summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/general
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--ansible_collections/community/general/.azure-pipelines/README.md9
-rw-r--r--ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml413
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh24
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py64
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/process-results.sh28
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.py105
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh19
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh38
-rwxr-xr-xansible_collections/community/general/.azure-pipelines/scripts/time-command.py29
-rw-r--r--ansible_collections/community/general/.azure-pipelines/templates/coverage.yml44
-rw-r--r--ansible_collections/community/general/.azure-pipelines/templates/matrix.yml60
-rw-r--r--ansible_collections/community/general/.azure-pipelines/templates/test.yml50
-rw-r--r--ansible_collections/community/general/.github/BOTMETA.yml1416
-rw-r--r--ansible_collections/community/general/.github/ISSUE_TEMPLATE/bug_report.yml153
-rw-r--r--ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml31
-rw-r--r--ansible_collections/community/general/.github/ISSUE_TEMPLATE/documentation_report.yml129
-rw-r--r--ansible_collections/community/general/.github/ISSUE_TEMPLATE/feature_request.yml73
-rw-r--r--ansible_collections/community/general/.github/dependabot.yml11
-rw-r--r--ansible_collections/community/general/.github/patchback.yml9
-rw-r--r--ansible_collections/community/general/.github/settings.yml11
-rw-r--r--ansible_collections/community/general/.github/workflows/ansible-test.yml240
-rw-r--r--ansible_collections/community/general/.github/workflows/codeql-analysis.yml61
-rw-r--r--ansible_collections/community/general/.github/workflows/reuse.yml35
-rw-r--r--ansible_collections/community/general/.gitignore514
-rw-r--r--ansible_collections/community/general/.pre-commit-config.yaml23
-rw-r--r--ansible_collections/community/general/.reuse/dep55
-rw-r--r--ansible_collections/community/general/CHANGELOG.rst705
-rw-r--r--ansible_collections/community/general/CHANGELOG.rst.license3
-rw-r--r--ansible_collections/community/general/CONTRIBUTING.md139
-rw-r--r--ansible_collections/community/general/COPYING675
-rw-r--r--ansible_collections/community/general/FILES.json25163
-rw-r--r--ansible_collections/community/general/LICENSES/BSD-2-Clause.txt8
-rw-r--r--ansible_collections/community/general/LICENSES/GPL-3.0-or-later.txt675
-rw-r--r--ansible_collections/community/general/LICENSES/MIT.txt9
-rw-r--r--ansible_collections/community/general/LICENSES/PSF-2.0.txt48
-rw-r--r--ansible_collections/community/general/MANIFEST.json30
-rw-r--r--ansible_collections/community/general/README.md143
-rw-r--r--ansible_collections/community/general/changelogs/.gitignore5
-rw-r--r--ansible_collections/community/general/changelogs/changelog.yaml1426
-rw-r--r--ansible_collections/community/general/changelogs/changelog.yaml.license3
-rw-r--r--ansible_collections/community/general/changelogs/config.yaml34
-rw-r--r--ansible_collections/community/general/changelogs/fragments/.keep0
-rw-r--r--ansible_collections/community/general/commit-rights.md80
-rw-r--r--ansible_collections/community/general/docs/docsite/extra-docs.yml10
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml18
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml18
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml7
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml18
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml7
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml8
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml9
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml9
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml9
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml9
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml14
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml24
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml9
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples.yml54
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j213
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j262
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j27
-rw-r--r--ansible_collections/community/general/docs/docsite/helper/lists_mergeby/playbook.yml62
-rw-r--r--ansible_collections/community/general/docs/docsite/links.yml27
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide.rst23
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst15
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst82
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst124
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst103
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst297
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_conversions.rst113
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_creating_identifiers.rst85
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_paths.rst19
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_selecting_json_data.rst149
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_times.rst89
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_unicode.rst35
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_versions.rst39
-rw-r--r--ansible_collections/community/general/docs/docsite/rst/test_guide.rst33
-rw-r--r--ansible_collections/community/general/meta/runtime.yml4582
-rw-r--r--ansible_collections/community/general/plugins/action/iptables_state.py187
-rw-r--r--ansible_collections/community/general/plugins/action/shutdown.py213
-rw-r--r--ansible_collections/community/general/plugins/become/doas.py127
-rw-r--r--ansible_collections/community/general/plugins/become/dzdo.py97
-rw-r--r--ansible_collections/community/general/plugins/become/ksu.py121
-rw-r--r--ansible_collections/community/general/plugins/become/machinectl.py132
-rw-r--r--ansible_collections/community/general/plugins/become/pbrun.py105
-rw-r--r--ansible_collections/community/general/plugins/become/pfexec.py105
-rw-r--r--ansible_collections/community/general/plugins/become/pmrun.py78
-rw-r--r--ansible_collections/community/general/plugins/become/sesu.py92
-rw-r--r--ansible_collections/community/general/plugins/become/sudosu.py92
-rw-r--r--ansible_collections/community/general/plugins/cache/memcached.py241
-rw-r--r--ansible_collections/community/general/plugins/cache/pickle.py69
-rw-r--r--ansible_collections/community/general/plugins/cache/redis.py232
-rw-r--r--ansible_collections/community/general/plugins/cache/yaml.py66
-rw-r--r--ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py118
-rw-r--r--ansible_collections/community/general/plugins/callback/context_demo.py55
-rw-r--r--ansible_collections/community/general/plugins/callback/counter_enabled.py254
-rw-r--r--ansible_collections/community/general/plugins/callback/dense.py501
-rw-r--r--ansible_collections/community/general/plugins/callback/diy.py1417
-rw-r--r--ansible_collections/community/general/plugins/callback/elastic.py424
-rw-r--r--ansible_collections/community/general/plugins/callback/hipchat.py229
-rw-r--r--ansible_collections/community/general/plugins/callback/jabber.py120
-rw-r--r--ansible_collections/community/general/plugins/callback/log_plays.py125
-rw-r--r--ansible_collections/community/general/plugins/callback/loganalytics.py236
-rw-r--r--ansible_collections/community/general/plugins/callback/logdna.py210
-rw-r--r--ansible_collections/community/general/plugins/callback/logentries.py332
-rw-r--r--ansible_collections/community/general/plugins/callback/logstash.py396
-rw-r--r--ansible_collections/community/general/plugins/callback/mail.py250
-rw-r--r--ansible_collections/community/general/plugins/callback/nrdp.py192
-rw-r--r--ansible_collections/community/general/plugins/callback/null.py32
-rw-r--r--ansible_collections/community/general/plugins/callback/opentelemetry.py569
-rw-r--r--ansible_collections/community/general/plugins/callback/say.py119
-rw-r--r--ansible_collections/community/general/plugins/callback/selective.py287
-rw-r--r--ansible_collections/community/general/plugins/callback/slack.py253
-rw-r--r--ansible_collections/community/general/plugins/callback/splunk.py286
-rw-r--r--ansible_collections/community/general/plugins/callback/sumologic.py190
-rw-r--r--ansible_collections/community/general/plugins/callback/syslog_json.py129
-rw-r--r--ansible_collections/community/general/plugins/callback/unixy.py248
-rw-r--r--ansible_collections/community/general/plugins/callback/yaml.py130
-rw-r--r--ansible_collections/community/general/plugins/connection/chroot.py210
-rw-r--r--ansible_collections/community/general/plugins/connection/funcd.py107
-rw-r--r--ansible_collections/community/general/plugins/connection/iocage.py84
-rw-r--r--ansible_collections/community/general/plugins/connection/jail.py204
-rw-r--r--ansible_collections/community/general/plugins/connection/lxc.py230
-rw-r--r--ansible_collections/community/general/plugins/connection/lxd.py162
-rw-r--r--ansible_collections/community/general/plugins/connection/qubes.py155
-rw-r--r--ansible_collections/community/general/plugins/connection/saltstack.py102
-rw-r--r--ansible_collections/community/general/plugins/connection/zone.py201
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/alicloud.py109
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/attributes.py93
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/auth_basic.py32
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/bitbucket.py44
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py51
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py37
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/emc.py46
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/gitlab.py32
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/hpe3par.py35
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/hwc.py66
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py38
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/influxdb.py85
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/ipa.py76
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/keycloak.py78
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/ldap.py84
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/lxca_common.py44
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/manageiq.py53
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/nomad.py52
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oneview.py80
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/online.py45
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/opennebula.py45
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/openswitch.py85
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle.py84
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py26
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py17
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py17
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py23
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py27
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/pritunl.py44
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/proxmox.py65
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/purestorage.py63
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/rackspace.py118
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/redis.py58
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/rundeck.py32
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/scaleway.py51
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/scaleway_waitable_resource.py33
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/utm.py56
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/vexata.py54
-rw-r--r--ansible_collections/community/general/plugins/doc_fragments/xenserver.py41
-rw-r--r--ansible_collections/community/general/plugins/filter/counter.py66
-rw-r--r--ansible_collections/community/general/plugins/filter/crc32.py64
-rw-r--r--ansible_collections/community/general/plugins/filter/dict.py79
-rw-r--r--ansible_collections/community/general/plugins/filter/dict_kv.py104
-rw-r--r--ansible_collections/community/general/plugins/filter/from_csv.py122
-rw-r--r--ansible_collections/community/general/plugins/filter/groupby_as_dict.py89
-rw-r--r--ansible_collections/community/general/plugins/filter/hashids.py98
-rw-r--r--ansible_collections/community/general/plugins/filter/hashids_decode.yml43
-rw-r--r--ansible_collections/community/general/plugins/filter/hashids_encode.yml43
-rw-r--r--ansible_collections/community/general/plugins/filter/jc.py160
-rw-r--r--ansible_collections/community/general/plugins/filter/json_query.py147
-rw-r--r--ansible_collections/community/general/plugins/filter/lists_mergeby.py192
-rw-r--r--ansible_collections/community/general/plugins/filter/random_mac.py96
-rw-r--r--ansible_collections/community/general/plugins/filter/time.py149
-rw-r--r--ansible_collections/community/general/plugins/filter/to_days.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_hours.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_milliseconds.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_minutes.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_months.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_seconds.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_time_unit.yml89
-rw-r--r--ansible_collections/community/general/plugins/filter/to_weeks.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/to_years.yml45
-rw-r--r--ansible_collections/community/general/plugins/filter/unicode_normalize.py81
-rw-r--r--ansible_collections/community/general/plugins/filter/version_sort.py52
-rw-r--r--ansible_collections/community/general/plugins/inventory/cobbler.py287
-rw-r--r--ansible_collections/community/general/plugins/inventory/gitlab_runners.py139
-rw-r--r--ansible_collections/community/general/plugins/inventory/icinga2.py294
-rw-r--r--ansible_collections/community/general/plugins/inventory/linode.py313
-rw-r--r--ansible_collections/community/general/plugins/inventory/lxd.py1099
-rw-r--r--ansible_collections/community/general/plugins/inventory/nmap.py295
-rw-r--r--ansible_collections/community/general/plugins/inventory/online.py263
-rw-r--r--ansible_collections/community/general/plugins/inventory/opennebula.py252
-rw-r--r--ansible_collections/community/general/plugins/inventory/proxmox.py644
-rw-r--r--ansible_collections/community/general/plugins/inventory/scaleway.py344
-rw-r--r--ansible_collections/community/general/plugins/inventory/stackpath_compute.py283
-rw-r--r--ansible_collections/community/general/plugins/inventory/virtualbox.py287
-rw-r--r--ansible_collections/community/general/plugins/inventory/xen_orchestra.py350
-rw-r--r--ansible_collections/community/general/plugins/lookup/bitwarden.py164
-rw-r--r--ansible_collections/community/general/plugins/lookup/cartesian.py87
-rw-r--r--ansible_collections/community/general/plugins/lookup/chef_databag.py106
-rw-r--r--ansible_collections/community/general/plugins/lookup/collection_version.py139
-rw-r--r--ansible_collections/community/general/plugins/lookup/consul_kv.py193
-rw-r--r--ansible_collections/community/general/plugins/lookup/credstash.py144
-rw-r--r--ansible_collections/community/general/plugins/lookup/cyberarkpassword.py187
-rw-r--r--ansible_collections/community/general/plugins/lookup/dependent.py224
-rw-r--r--ansible_collections/community/general/plugins/lookup/dig.py451
-rw-r--r--ansible_collections/community/general/plugins/lookup/dnstxt.py115
-rw-r--r--ansible_collections/community/general/plugins/lookup/dsv.py146
-rw-r--r--ansible_collections/community/general/plugins/lookup/etcd.py169
-rw-r--r--ansible_collections/community/general/plugins/lookup/etcd3.py229
-rw-r--r--ansible_collections/community/general/plugins/lookup/filetree.py225
-rw-r--r--ansible_collections/community/general/plugins/lookup/flattened.py95
-rw-r--r--ansible_collections/community/general/plugins/lookup/hiera.py92
-rw-r--r--ansible_collections/community/general/plugins/lookup/keyring.py73
-rw-r--r--ansible_collections/community/general/plugins/lookup/lastpass.py106
-rw-r--r--ansible_collections/community/general/plugins/lookup/lmdb_kv.py125
-rw-r--r--ansible_collections/community/general/plugins/lookup/manifold.py280
-rw-r--r--ansible_collections/community/general/plugins/lookup/merge_variables.py212
-rw-r--r--ansible_collections/community/general/plugins/lookup/onepassword.py625
-rw-r--r--ansible_collections/community/general/plugins/lookup/onepassword_raw.py101
-rw-r--r--ansible_collections/community/general/plugins/lookup/passwordstore.py494
-rw-r--r--ansible_collections/community/general/plugins/lookup/random_pet.py100
-rw-r--r--ansible_collections/community/general/plugins/lookup/random_string.py221
-rw-r--r--ansible_collections/community/general/plugins/lookup/random_words.py121
-rw-r--r--ansible_collections/community/general/plugins/lookup/redis.py117
-rw-r--r--ansible_collections/community/general/plugins/lookup/revbitspss.py107
-rw-r--r--ansible_collections/community/general/plugins/lookup/shelvefile.py92
-rw-r--r--ansible_collections/community/general/plugins/lookup/tss.py299
-rw-r--r--ansible_collections/community/general/plugins/module_utils/_filelock.py109
-rw-r--r--ansible_collections/community/general/plugins/module_utils/_mount.py54
-rw-r--r--ansible_collections/community/general/plugins/module_utils/_stormssh.py258
-rw-r--r--ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py292
-rw-r--r--ansible_collections/community/general/plugins/module_utils/btrfs.py464
-rw-r--r--ansible_collections/community/general/plugins/module_utils/cloud.py209
-rw-r--r--ansible_collections/community/general/plugins/module_utils/cmd_runner.py319
-rw-r--r--ansible_collections/community/general/plugins/module_utils/csv.py70
-rw-r--r--ansible_collections/community/general/plugins/module_utils/database.py191
-rw-r--r--ansible_collections/community/general/plugins/module_utils/deps.py98
-rw-r--r--ansible_collections/community/general/plugins/module_utils/dimensiondata.py332
-rw-r--r--ansible_collections/community/general/plugins/module_utils/gandi_livedns_api.py235
-rw-r--r--ansible_collections/community/general/plugins/module_utils/gconftool2.py32
-rw-r--r--ansible_collections/community/general/plugins/module_utils/gitlab.py123
-rw-r--r--ansible_collections/community/general/plugins/module_utils/heroku.py43
-rw-r--r--ansible_collections/community/general/plugins/module_utils/hwc_utils.py442
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py96
-rw-r--r--ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py2390
-rw-r--r--ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py77
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py307
-rw-r--r--ansible_collections/community/general/plugins/module_utils/influxdb.py94
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ipa.py215
-rw-r--r--ansible_collections/community/general/plugins/module_utils/jenkins.py35
-rw-r--r--ansible_collections/community/general/plugins/module_utils/known_hosts.py183
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ldap.py131
-rw-r--r--ansible_collections/community/general/plugins/module_utils/linode.py23
-rw-r--r--ansible_collections/community/general/plugins/module_utils/lxd.py134
-rw-r--r--ansible_collections/community/general/plugins/module_utils/manageiq.py470
-rw-r--r--ansible_collections/community/general/plugins/module_utils/memset.py144
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/base.py89
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/deco.py101
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/exceptions.py19
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py205
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/deprecate_attrs.py62
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py59
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/state.py40
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py135
-rw-r--r--ansible_collections/community/general/plugins/module_utils/mh/module_helper.py83
-rw-r--r--ansible_collections/community/general/plugins/module_utils/module_helper.py20
-rw-r--r--ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py370
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ocapi_utils.py502
-rw-r--r--ansible_collections/community/general/plugins/module_utils/oneandone.py260
-rw-r--r--ansible_collections/community/general/plugins/module_utils/onepassword.py31
-rw-r--r--ansible_collections/community/general/plugins/module_utils/oneview.py488
-rw-r--r--ansible_collections/community/general/plugins/module_utils/online.py124
-rw-r--r--ansible_collections/community/general/plugins/module_utils/opennebula.py349
-rw-r--r--ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py1965
-rw-r--r--ansible_collections/community/general/plugins/module_utils/pipx.py51
-rw-r--r--ansible_collections/community/general/plugins/module_utils/proxmox.py147
-rw-r--r--ansible_collections/community/general/plugins/module_utils/puppet.py111
-rw-r--r--ansible_collections/community/general/plugins/module_utils/pure.py115
-rw-r--r--ansible_collections/community/general/plugins/module_utils/rax.py334
-rw-r--r--ansible_collections/community/general/plugins/module_utils/redfish_utils.py3251
-rw-r--r--ansible_collections/community/general/plugins/module_utils/redhat.py272
-rw-r--r--ansible_collections/community/general/plugins/module_utils/redis.py100
-rw-r--r--ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py80
-rw-r--r--ansible_collections/community/general/plugins/module_utils/rundeck.py101
-rw-r--r--ansible_collections/community/general/plugins/module_utils/saslprep.py179
-rw-r--r--ansible_collections/community/general/plugins/module_utils/scaleway.py397
-rw-r--r--ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py95
-rw-r--r--ansible_collections/community/general/plugins/module_utils/ssh.py21
-rw-r--r--ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py15
-rw-r--r--ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py96
-rw-r--r--ansible_collections/community/general/plugins/module_utils/univention_umc.py279
-rw-r--r--ansible_collections/community/general/plugins/module_utils/utm_utils.py218
-rw-r--r--ansible_collections/community/general/plugins/module_utils/version.py22
-rw-r--r--ansible_collections/community/general/plugins/module_utils/vexata.py98
-rw-r--r--ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py520
-rw-r--r--ansible_collections/community/general/plugins/module_utils/xenserver.py862
-rw-r--r--ansible_collections/community/general/plugins/module_utils/xfconf.py38
-rw-r--r--ansible_collections/community/general/plugins/modules/aerospike_migrations.py529
-rw-r--r--ansible_collections/community/general/plugins/modules/airbrake_deployment.py169
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_devices.py377
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_filesystem.py573
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_inittab.py255
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_lvg.py371
-rw-r--r--ansible_collections/community/general/plugins/modules/aix_lvol.py347
-rw-r--r--ansible_collections/community/general/plugins/modules/alerta_customer.py207
-rw-r--r--ansible_collections/community/general/plugins/modules/ali_instance.py1012
-rw-r--r--ansible_collections/community/general/plugins/modules/ali_instance_info.py407
-rw-r--r--ansible_collections/community/general/plugins/modules/alternatives.py407
-rw-r--r--ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py374
-rw-r--r--ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py452
-rw-r--r--ansible_collections/community/general/plugins/modules/apache2_module.py297
-rw-r--r--ansible_collections/community/general/plugins/modules/apk.py378
-rw-r--r--ansible_collections/community/general/plugins/modules/apt_repo.py154
-rw-r--r--ansible_collections/community/general/plugins/modules/apt_rpm.py272
-rw-r--r--ansible_collections/community/general/plugins/modules/archive.py686
-rw-r--r--ansible_collections/community/general/plugins/modules/atomic_container.py217
-rw-r--r--ansible_collections/community/general/plugins/modules/atomic_host.py105
-rw-r--r--ansible_collections/community/general/plugins/modules/atomic_image.py177
-rw-r--r--ansible_collections/community/general/plugins/modules/awall.py164
-rw-r--r--ansible_collections/community/general/plugins/modules/beadm.py415
-rw-r--r--ansible_collections/community/general/plugins/modules/bearychat.py175
-rw-r--r--ansible_collections/community/general/plugins/modules/bigpanda.py226
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_access_key.py281
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py207
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py304
-rw-r--r--ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py276
-rw-r--r--ansible_collections/community/general/plugins/modules/bower.py236
-rw-r--r--ansible_collections/community/general/plugins/modules/btrfs_info.py109
-rw-r--r--ansible_collections/community/general/plugins/modules/btrfs_subvolume.py682
-rw-r--r--ansible_collections/community/general/plugins/modules/bundler.py211
-rw-r--r--ansible_collections/community/general/plugins/modules/bzr.py202
-rw-r--r--ansible_collections/community/general/plugins/modules/campfire.py162
-rw-r--r--ansible_collections/community/general/plugins/modules/capabilities.py188
-rw-r--r--ansible_collections/community/general/plugins/modules/cargo.py213
-rw-r--r--ansible_collections/community/general/plugins/modules/catapult.py162
-rw-r--r--ansible_collections/community/general/plugins/modules/circonus_annotation.py243
-rw-r--r--ansible_collections/community/general/plugins/modules/cisco_webex.py197
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_aa_policy.py353
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_alert_policy.py536
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_blueprint_package.py309
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_firewall_policy.py596
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_group.py522
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_loadbalancer.py945
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_modify_server.py975
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_publicip.py369
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_server.py1570
-rw-r--r--ansible_collections/community/general/plugins/modules/clc_server_snapshot.py419
-rw-r--r--ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py133
-rw-r--r--ansible_collections/community/general/plugins/modules/cloudflare_dns.py893
-rw-r--r--ansible_collections/community/general/plugins/modules/cobbler_sync.py150
-rw-r--r--ansible_collections/community/general/plugins/modules/cobbler_system.py348
-rw-r--r--ansible_collections/community/general/plugins/modules/composer.py275
-rw-r--r--ansible_collections/community/general/plugins/modules/consul.py635
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_acl.py691
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_kv.py336
-rw-r--r--ansible_collections/community/general/plugins/modules/consul_session.py307
-rw-r--r--ansible_collections/community/general/plugins/modules/copr.py500
-rw-r--r--ansible_collections/community/general/plugins/modules/cpanm.py247
-rw-r--r--ansible_collections/community/general/plugins/modules/cronvar.py431
-rw-r--r--ansible_collections/community/general/plugins/modules/crypttab.py362
-rw-r--r--ansible_collections/community/general/plugins/modules/datadog_downtime.py315
-rw-r--r--ansible_collections/community/general/plugins/modules/datadog_event.py193
-rw-r--r--ansible_collections/community/general/plugins/modules/datadog_monitor.py428
-rw-r--r--ansible_collections/community/general/plugins/modules/dconf.py490
-rw-r--r--ansible_collections/community/general/plugins/modules/deploy_helper.py535
-rw-r--r--ansible_collections/community/general/plugins/modules/dimensiondata_network.py303
-rw-r--r--ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py564
-rw-r--r--ansible_collections/community/general/plugins/modules/discord.py223
-rw-r--r--ansible_collections/community/general/plugins/modules/django_manage.py418
-rw-r--r--ansible_collections/community/general/plugins/modules/dnf_versionlock.py355
-rw-r--r--ansible_collections/community/general/plugins/modules/dnsimple.py434
-rw-r--r--ansible_collections/community/general/plugins/modules/dnsimple_info.py329
-rw-r--r--ansible_collections/community/general/plugins/modules/dnsmadeeasy.py724
-rw-r--r--ansible_collections/community/general/plugins/modules/dpkg_divert.py369
-rw-r--r--ansible_collections/community/general/plugins/modules/easy_install.py206
-rw-r--r--ansible_collections/community/general/plugins/modules/ejabberd_user.py195
-rw-r--r--ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py309
-rw-r--r--ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py181
-rw-r--r--ansible_collections/community/general/plugins/modules/etcd3.py261
-rw-r--r--ansible_collections/community/general/plugins/modules/facter.py80
-rw-r--r--ansible_collections/community/general/plugins/modules/filesize.py492
-rw-r--r--ansible_collections/community/general/plugins/modules/filesystem.py606
-rw-r--r--ansible_collections/community/general/plugins/modules/flatpak.py350
-rw-r--r--ansible_collections/community/general/plugins/modules/flatpak_remote.py273
-rw-r--r--ansible_collections/community/general/plugins/modules/flowdock.py205
-rw-r--r--ansible_collections/community/general/plugins/modules/gandi_livedns.py193
-rw-r--r--ansible_collections/community/general/plugins/modules/gconftool2.py163
-rw-r--r--ansible_collections/community/general/plugins/modules/gconftool2_info.py78
-rw-r--r--ansible_collections/community/general/plugins/modules/gem.py353
-rw-r--r--ansible_collections/community/general/plugins/modules/git_config.py290
-rw-r--r--ansible_collections/community/general/plugins/modules/github_deploy_key.py347
-rw-r--r--ansible_collections/community/general/plugins/modules/github_issue.py124
-rw-r--r--ansible_collections/community/general/plugins/modules/github_key.py250
-rw-r--r--ansible_collections/community/general/plugins/modules/github_release.py221
-rw-r--r--ansible_collections/community/general/plugins/modules/github_repo.py279
-rw-r--r--ansible_collections/community/general/plugins/modules/github_webhook.py303
-rw-r--r--ansible_collections/community/general/plugins/modules/github_webhook_info.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_branch.py183
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py301
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group.py400
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group_members.py441
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_group_variable.py455
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_hook.py384
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project.py678
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_badge.py216
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_members.py449
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_project_variable.py486
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py199
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_runner.py466
-rw-r--r--ansible_collections/community/general/plugins/modules/gitlab_user.py691
-rw-r--r--ansible_collections/community/general/plugins/modules/grove.py125
-rw-r--r--ansible_collections/community/general/plugins/modules/gunicorn.py233
-rw-r--r--ansible_collections/community/general/plugins/modules/hana_query.py219
-rw-r--r--ansible_collections/community/general/plugins/modules/haproxy.py488
-rw-r--r--ansible_collections/community/general/plugins/modules/heroku_collaborator.py138
-rw-r--r--ansible_collections/community/general/plugins/modules/hg.py303
-rw-r--r--ansible_collections/community/general/plugins/modules/hipchat.py220
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew.py981
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew_cask.py895
-rw-r--r--ansible_collections/community/general/plugins/modules/homebrew_tap.py279
-rw-r--r--ansible_collections/community/general/plugins/modules/homectl.py658
-rw-r--r--ansible_collections/community/general/plugins/modules/honeybadger_deployment.py136
-rw-r--r--ansible_collections/community/general/plugins/modules/hpilo_boot.py218
-rw-r--r--ansible_collections/community/general/plugins/modules/hpilo_info.py271
-rw-r--r--ansible_collections/community/general/plugins/modules/hponcfg.py120
-rw-r--r--ansible_collections/community/general/plugins/modules/htpasswd.py286
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py2142
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_evs_disk.py1217
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_network_vpc.py500
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_smn_topic.py344
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py884
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py698
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_port.py1167
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py360
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_route.py443
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py650
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py576
-rw-r--r--ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py741
-rw-r--r--ansible_collections/community/general/plugins/modules/ibm_sa_domain.py174
-rw-r--r--ansible_collections/community/general/plugins/modules/ibm_sa_host.py131
-rw-r--r--ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py139
-rw-r--r--ansible_collections/community/general/plugins/modules/ibm_sa_pool.py128
-rw-r--r--ansible_collections/community/general/plugins/modules/ibm_sa_vol.py118
-rw-r--r--ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py148
-rw-r--r--ansible_collections/community/general/plugins/modules/icinga2_feature.py134
-rw-r--r--ansible_collections/community/general/plugins/modules/icinga2_host.py337
-rw-r--r--ansible_collections/community/general/plugins/modules/idrac_redfish_command.py246
-rw-r--r--ansible_collections/community/general/plugins/modules/idrac_redfish_config.py339
-rw-r--r--ansible_collections/community/general/plugins/modules/idrac_redfish_info.py251
-rw-r--r--ansible_collections/community/general/plugins/modules/ilo_redfish_command.py175
-rw-r--r--ansible_collections/community/general/plugins/modules/ilo_redfish_config.py194
-rw-r--r--ansible_collections/community/general/plugins/modules/ilo_redfish_info.py189
-rw-r--r--ansible_collections/community/general/plugins/modules/imc_rest.py441
-rw-r--r--ansible_collections/community/general/plugins/modules/imgadm.py319
-rw-r--r--ansible_collections/community/general/plugins/modules/infinity.py575
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_database.py149
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_query.py108
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py350
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_user.py298
-rw-r--r--ansible_collections/community/general/plugins/modules/influxdb_write.py103
-rw-r--r--ansible_collections/community/general/plugins/modules/ini_file.py490
-rw-r--r--ansible_collections/community/general/plugins/modules/installp.py300
-rw-r--r--ansible_collections/community/general/plugins/modules/interfaces_file.py416
-rw-r--r--ansible_collections/community/general/plugins/modules/ip_netns.py140
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_config.py369
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py352
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_dnszone.py204
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_group.py342
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_hbacrule.py362
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_host.py312
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_hostgroup.py228
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_otpconfig.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_otptoken.py534
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py260
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_role.py309
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_service.py226
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_subca.py219
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_sudocmd.py158
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py186
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_sudorule.py471
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_user.py404
-rw-r--r--ansible_collections/community/general/plugins/modules/ipa_vault.py256
-rw-r--r--ansible_collections/community/general/plugins/modules/ipify_facts.py110
-rw-r--r--ansible_collections/community/general/plugins/modules/ipinfoio_facts.py136
-rw-r--r--ansible_collections/community/general/plugins/modules/ipmi_boot.py225
-rw-r--r--ansible_collections/community/general/plugins/modules/ipmi_power.py277
-rw-r--r--ansible_collections/community/general/plugins/modules/iptables_state.py654
-rw-r--r--ansible_collections/community/general/plugins/modules/ipwcli_dns.py358
-rw-r--r--ansible_collections/community/general/plugins/modules/irc.py311
-rw-r--r--ansible_collections/community/general/plugins/modules/iso_create.py305
-rw-r--r--ansible_collections/community/general/plugins/modules/iso_customize.py347
-rw-r--r--ansible_collections/community/general/plugins/modules/iso_extract.py215
-rw-r--r--ansible_collections/community/general/plugins/modules/jabber.py174
-rw-r--r--ansible_collections/community/general/plugins/modules/java_cert.py585
-rw-r--r--ansible_collections/community/general/plugins/modules/java_keystore.py584
-rw-r--r--ansible_collections/community/general/plugins/modules/jboss.py185
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_build.py297
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_job.py386
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_job_info.py262
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_plugin.py854
-rw-r--r--ansible_collections/community/general/plugins/modules/jenkins_script.py206
-rw-r--r--ansible_collections/community/general/plugins/modules/jira.py828
-rw-r--r--ansible_collections/community/general/plugins/modules/kdeconfig.py277
-rw-r--r--ansible_collections/community/general/plugins/modules/kernel_blacklist.py126
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authentication.py483
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py280
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_client.py984
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py361
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientscope.py506
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py285
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py161
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py174
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py456
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_group.py496
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py654
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_realm.py826
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_realm_info.py138
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_role.py374
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_user_federation.py1021
-rw-r--r--ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py406
-rw-r--r--ansible_collections/community/general/plugins/modules/keyring.py279
-rw-r--r--ansible_collections/community/general/plugins/modules/keyring_info.py156
-rw-r--r--ansible_collections/community/general/plugins/modules/kibana_plugin.py286
-rw-r--r--ansible_collections/community/general/plugins/modules/launchd.py522
-rw-r--r--ansible_collections/community/general/plugins/modules/layman.py276
-rw-r--r--ansible_collections/community/general/plugins/modules/lbu.py138
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_attrs.py337
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_entry.py286
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_passwd.py151
-rw-r--r--ansible_collections/community/general/plugins/modules/ldap_search.py189
-rw-r--r--ansible_collections/community/general/plugins/modules/librato_annotation.py175
-rw-r--r--ansible_collections/community/general/plugins/modules/linode.py691
-rw-r--r--ansible_collections/community/general/plugins/modules/linode_v4.py319
-rw-r--r--ansible_collections/community/general/plugins/modules/listen_ports_facts.py428
-rw-r--r--ansible_collections/community/general/plugins/modules/lldp.py88
-rw-r--r--ansible_collections/community/general/plugins/modules/locale_gen.py243
-rw-r--r--ansible_collections/community/general/plugins/modules/logentries.py164
-rw-r--r--ansible_collections/community/general/plugins/modules/logentries_msg.py105
-rw-r--r--ansible_collections/community/general/plugins/modules/logstash_plugin.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/lvg.py338
-rw-r--r--ansible_collections/community/general/plugins/modules/lvol.py615
-rw-r--r--ansible_collections/community/general/plugins/modules/lxc_container.py1742
-rw-r--r--ansible_collections/community/general/plugins/modules/lxca_cmms.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/lxca_nodes.py212
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_container.py862
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_profile.py563
-rw-r--r--ansible_collections/community/general/plugins/modules/lxd_project.py461
-rw-r--r--ansible_collections/community/general/plugins/modules/macports.py326
-rw-r--r--ansible_collections/community/general/plugins/modules/mail.py418
-rw-r--r--ansible_collections/community/general/plugins/modules/make.py233
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py313
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_alerts.py357
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_group.py642
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_policies.py202
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_policies_info.py121
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_provider.py939
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_tags.py189
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_tags_info.py113
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_tenant.py550
-rw-r--r--ansible_collections/community/general/plugins/modules/manageiq_user.py325
-rw-r--r--ansible_collections/community/general/plugins/modules/mas.py301
-rw-r--r--ansible_collections/community/general/plugins/modules/matrix.py147
-rw-r--r--ansible_collections/community/general/plugins/modules/mattermost.py187
-rw-r--r--ansible_collections/community/general/plugins/modules/maven_artifact.py762
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_dns_reload.py194
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_memstore_info.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_server_info.py305
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_zone.py323
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_zone_domain.py277
-rw-r--r--ansible_collections/community/general/plugins/modules/memset_zone_record.py393
-rw-r--r--ansible_collections/community/general/plugins/modules/mksysb.py171
-rw-r--r--ansible_collections/community/general/plugins/modules/modprobe.py320
-rw-r--r--ansible_collections/community/general/plugins/modules/monit.py349
-rw-r--r--ansible_collections/community/general/plugins/modules/mqtt.py257
-rw-r--r--ansible_collections/community/general/plugins/modules/mssql_db.py243
-rw-r--r--ansible_collections/community/general/plugins/modules/mssql_script.py313
-rw-r--r--ansible_collections/community/general/plugins/modules/nagios.py1255
-rw-r--r--ansible_collections/community/general/plugins/modules/netcup_dns.py296
-rw-r--r--ansible_collections/community/general/plugins/modules/newrelic_deployment.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/nexmo.py143
-rw-r--r--ansible_collections/community/general/plugins/modules/nginx_status_info.py159
-rw-r--r--ansible_collections/community/general/plugins/modules/nictagadm.py232
-rw-r--r--ansible_collections/community/general/plugins/modules/nmcli.py2504
-rw-r--r--ansible_collections/community/general/plugins/modules/nomad_job.py260
-rw-r--r--ansible_collections/community/general/plugins/modules/nomad_job_info.py343
-rw-r--r--ansible_collections/community/general/plugins/modules/nosh.py559
-rw-r--r--ansible_collections/community/general/plugins/modules/npm.py342
-rw-r--r--ansible_collections/community/general/plugins/modules/nsupdate.py527
-rw-r--r--ansible_collections/community/general/plugins/modules/ocapi_command.py274
-rw-r--r--ansible_collections/community/general/plugins/modules/ocapi_info.py224
-rw-r--r--ansible_collections/community/general/plugins/modules/oci_vcn.py229
-rw-r--r--ansible_collections/community/general/plugins/modules/odbc.py176
-rw-r--r--ansible_collections/community/general/plugins/modules/office_365_connector_card.py310
-rw-r--r--ansible_collections/community/general/plugins/modules/ohai.py55
-rw-r--r--ansible_collections/community/general/plugins/modules/omapi_host.py319
-rw-r--r--ansible_collections/community/general/plugins/modules/one_host.py292
-rw-r--r--ansible_collections/community/general/plugins/modules/one_image.py414
-rw-r--r--ansible_collections/community/general/plugins/modules/one_image_info.py276
-rw-r--r--ansible_collections/community/general/plugins/modules/one_service.py759
-rw-r--r--ansible_collections/community/general/plugins/modules/one_template.py284
-rw-r--r--ansible_collections/community/general/plugins/modules/one_vm.py1725
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py580
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py684
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py1045
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_private_network.py455
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_public_ip.py338
-rw-r--r--ansible_collections/community/general/plugins/modules/oneandone_server.py704
-rw-r--r--ansible_collections/community/general/plugins/modules/onepassword_info.py390
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py168
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py252
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py257
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py177
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fc_network.py131
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py118
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py127
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py116
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py174
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py129
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_network_set.py160
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_network_set_info.py176
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_san_manager.py225
-rw-r--r--ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py129
-rw-r--r--ansible_collections/community/general/plugins/modules/online_server_info.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/online_user_info.py78
-rw-r--r--ansible_collections/community/general/plugins/modules/open_iscsi.py464
-rw-r--r--ansible_collections/community/general/plugins/modules/openbsd_pkg.py664
-rw-r--r--ansible_collections/community/general/plugins/modules/opendj_backendprop.py216
-rw-r--r--ansible_collections/community/general/plugins/modules/openwrt_init.py191
-rw-r--r--ansible_collections/community/general/plugins/modules/opkg.py212
-rw-r--r--ansible_collections/community/general/plugins/modules/osx_defaults.py409
-rw-r--r--ansible_collections/community/general/plugins/modules/ovh_ip_failover.py269
-rw-r--r--ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py320
-rw-r--r--ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py163
-rw-r--r--ansible_collections/community/general/plugins/modules/pacemaker_cluster.py230
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_device.py682
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_ip_subnet.py336
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_project.py254
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_sshkey.py278
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_volume.py331
-rw-r--r--ansible_collections/community/general/plugins/modules/packet_volume_attachment.py308
-rw-r--r--ansible_collections/community/general/plugins/modules/pacman.py859
-rw-r--r--ansible_collections/community/general/plugins/modules/pacman_key.py321
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty.py288
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty_alert.py263
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty_change.py200
-rw-r--r--ansible_collections/community/general/plugins/modules/pagerduty_user.py254
-rw-r--r--ansible_collections/community/general/plugins/modules/pam_limits.py357
-rw-r--r--ansible_collections/community/general/plugins/modules/pamd.py853
-rw-r--r--ansible_collections/community/general/plugins/modules/parted.py810
-rw-r--r--ansible_collections/community/general/plugins/modules/pear.py327
-rw-r--r--ansible_collections/community/general/plugins/modules/pids.py234
-rw-r--r--ansible_collections/community/general/plugins/modules/pingdom.py149
-rw-r--r--ansible_collections/community/general/plugins/modules/pip_package_info.py156
-rw-r--r--ansible_collections/community/general/plugins/modules/pipx.py330
-rw-r--r--ansible_collections/community/general/plugins/modules/pipx_info.py212
-rw-r--r--ansible_collections/community/general/plugins/modules/pkg5.py186
-rw-r--r--ansible_collections/community/general/plugins/modules/pkg5_publisher.py210
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgin.py396
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgng.py540
-rw-r--r--ansible_collections/community/general/plugins/modules/pkgutil.py301
-rw-r--r--ansible_collections/community/general/plugins/modules/pmem.py637
-rw-r--r--ansible_collections/community/general/plugins/modules/portage.py587
-rw-r--r--ansible_collections/community/general/plugins/modules/portinstall.py216
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_org.py206
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_org_info.py132
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_user.py361
-rw-r--r--ansible_collections/community/general/plugins/modules/pritunl_user_info.py174
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks.py667
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py266
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_nic.py297
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_volume.py440
-rw-r--r--ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py267
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox.py826
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_disk.py767
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_domain_info.py134
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_group_info.py144
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_kvm.py1433
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_nic.py311
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_snap.py363
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_storage_info.py191
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py185
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_template.py247
-rw-r--r--ansible_collections/community/general/plugins/modules/proxmox_user_info.py257
-rw-r--r--ansible_collections/community/general/plugins/modules/pubnub_blocks.py639
-rw-r--r--ansible_collections/community/general/plugins/modules/pulp_repo.py743
-rw-r--r--ansible_collections/community/general/plugins/modules/puppet.py281
-rw-r--r--ansible_collections/community/general/plugins/modules/pushbullet.py197
-rw-r--r--ansible_collections/community/general/plugins/modules/pushover.py161
-rw-r--r--ansible_collections/community/general/plugins/modules/python_requirements_info.py215
-rw-r--r--ansible_collections/community/general/plugins/modules/rax.py903
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cbs.py237
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py228
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb.py268
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb_database.py181
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_cdb_user.py229
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb.py322
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb_nodes.py293
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_clb_ssl.py291
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_dns.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_dns_record.py362
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_facts.py154
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_files.py402
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_files_objects.py558
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_identity.py112
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_keypair.py181
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_meta.py184
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_alarm.py237
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_check.py331
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_entity.py203
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_notification.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py193
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_network.py146
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_queue.py147
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_scaling_group.py441
-rw-r--r--ansible_collections/community/general/plugins/modules/rax_scaling_policy.py294
-rw-r--r--ansible_collections/community/general/plugins/modules/read_csv.py221
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_command.py959
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_config.py444
-rw-r--r--ansible_collections/community/general/plugins/modules/redfish_info.py569
-rw-r--r--ansible_collections/community/general/plugins/modules/redhat_subscription.py1237
-rw-r--r--ansible_collections/community/general/plugins/modules/redis.py335
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_data.py257
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_data_incr.py193
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_data_info.py116
-rw-r--r--ansible_collections/community/general/plugins/modules/redis_info.py240
-rw-r--r--ansible_collections/community/general/plugins/modules/rhevm.py1506
-rw-r--r--ansible_collections/community/general/plugins/modules/rhn_channel.py200
-rw-r--r--ansible_collections/community/general/plugins/modules/rhn_register.py455
-rw-r--r--ansible_collections/community/general/plugins/modules/rhsm_release.py141
-rw-r--r--ansible_collections/community/general/plugins/modules/rhsm_repository.py260
-rw-r--r--ansible_collections/community/general/plugins/modules/riak.py238
-rw-r--r--ansible_collections/community/general/plugins/modules/rocketchat.py250
-rw-r--r--ansible_collections/community/general/plugins/modules/rollbar_deployment.py151
-rw-r--r--ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py239
-rw-r--r--ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py192
-rw-r--r--ansible_collections/community/general/plugins/modules/rundeck_job_run.py322
-rw-r--r--ansible_collections/community/general/plugins/modules/rundeck_project.py197
-rw-r--r--ansible_collections/community/general/plugins/modules/runit.py262
-rw-r--r--ansible_collections/community/general/plugins/modules/sap_task_list_execute.py348
-rw-r--r--ansible_collections/community/general/plugins/modules/sapcar_extract.py228
-rw-r--r--ansible_collections/community/general/plugins/modules/say.py99
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_compute.py699
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py217
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container.py412
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_info.py152
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py296
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py143
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_registry.py272
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py142
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_database_backup.py379
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_function.py394
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_function_info.py151
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py298
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py143
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_image_info.py130
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_ip.py270
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_ip_info.py114
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_lb.py366
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_organization_info.py108
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_private_network.py241
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_security_group.py245
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py118
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py282
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_server_info.py201
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py119
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_sshkey.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_user_data.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_volume.py201
-rw-r--r--ansible_collections/community/general/plugins/modules/scaleway_volume_info.py114
-rw-r--r--ansible_collections/community/general/plugins/modules/sefcontext.py385
-rw-r--r--ansible_collections/community/general/plugins/modules/selinux_permissive.py135
-rw-r--r--ansible_collections/community/general/plugins/modules/selogin.py254
-rw-r--r--ansible_collections/community/general/plugins/modules/sendgrid.py280
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_check.py376
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_client.py269
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_handler.py281
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_silence.py305
-rw-r--r--ansible_collections/community/general/plugins/modules/sensu_subscription.py160
-rw-r--r--ansible_collections/community/general/plugins/modules/seport.py331
-rw-r--r--ansible_collections/community/general/plugins/modules/serverless.py224
-rw-r--r--ansible_collections/community/general/plugins/modules/shutdown.py81
-rw-r--r--ansible_collections/community/general/plugins/modules/sl_vm.py439
-rw-r--r--ansible_collections/community/general/plugins/modules/slack.py521
-rw-r--r--ansible_collections/community/general/plugins/modules/slackpkg.py211
-rw-r--r--ansible_collections/community/general/plugins/modules/smartos_image_info.py119
-rw-r--r--ansible_collections/community/general/plugins/modules/snap.py413
-rw-r--r--ansible_collections/community/general/plugins/modules/snap_alias.py195
-rw-r--r--ansible_collections/community/general/plugins/modules/snmp_facts.py475
-rw-r--r--ansible_collections/community/general/plugins/modules/solaris_zone.py493
-rw-r--r--ansible_collections/community/general/plugins/modules/sorcery.py653
-rw-r--r--ansible_collections/community/general/plugins/modules/spectrum_device.py336
-rw-r--r--ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py536
-rw-r--r--ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py1595
-rw-r--r--ansible_collections/community/general/plugins/modules/ss_3par_cpg.py302
-rw-r--r--ansible_collections/community/general/plugins/modules/ssh_config.py341
-rw-r--r--ansible_collections/community/general/plugins/modules/stackdriver.py223
-rw-r--r--ansible_collections/community/general/plugins/modules/stacki_host.py303
-rw-r--r--ansible_collections/community/general/plugins/modules/statsd.py179
-rw-r--r--ansible_collections/community/general/plugins/modules/statusio_maintenance.py475
-rw-r--r--ansible_collections/community/general/plugins/modules/sudoers.py309
-rw-r--r--ansible_collections/community/general/plugins/modules/supervisorctl.py270
-rw-r--r--ansible_collections/community/general/plugins/modules/svc.py305
-rw-r--r--ansible_collections/community/general/plugins/modules/svr4pkg.py270
-rw-r--r--ansible_collections/community/general/plugins/modules/swdepot.py214
-rw-r--r--ansible_collections/community/general/plugins/modules/swupd.py322
-rw-r--r--ansible_collections/community/general/plugins/modules/syslogger.py198
-rw-r--r--ansible_collections/community/general/plugins/modules/syspatch.py175
-rw-r--r--ansible_collections/community/general/plugins/modules/sysrc.py259
-rw-r--r--ansible_collections/community/general/plugins/modules/sysupgrade.py161
-rw-r--r--ansible_collections/community/general/plugins/modules/taiga_issue.py319
-rw-r--r--ansible_collections/community/general/plugins/modules/telegram.py146
-rw-r--r--ansible_collections/community/general/plugins/modules/terraform.py659
-rw-r--r--ansible_collections/community/general/plugins/modules/timezone.py923
-rw-r--r--ansible_collections/community/general/plugins/modules/twilio.py182
-rw-r--r--ansible_collections/community/general/plugins/modules/typetalk.py136
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_dns_record.py233
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_dns_zone.py248
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_group.py184
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_share.py579
-rw-r--r--ansible_collections/community/general/plugins/modules/udm_user.py573
-rw-r--r--ansible_collections/community/general/plugins/modules/ufw.py606
-rw-r--r--ansible_collections/community/general/plugins/modules/uptimerobot.py157
-rw-r--r--ansible_collections/community/general/plugins/modules/urpmi.py221
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_aaa_group.py239
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py131
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py168
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py109
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_dns_host.py167
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_network_interface_address.py143
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py104
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py355
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_exception.py249
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py286
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py149
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_location.py226
-rw-r--r--ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py130
-rw-r--r--ansible_collections/community/general/plugins/modules/vdo.py781
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_configuration.py204
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_info.py287
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_role.py254
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_schema.py327
-rw-r--r--ansible_collections/community/general/plugins/modules/vertica_user.py393
-rw-r--r--ansible_collections/community/general/plugins/modules/vexata_eg.py216
-rw-r--r--ansible_collections/community/general/plugins/modules/vexata_volume.py203
-rw-r--r--ansible_collections/community/general/plugins/modules/vmadm.py790
-rw-r--r--ansible_collections/community/general/plugins/modules/wakeonlan.py139
-rw-r--r--ansible_collections/community/general/plugins/modules/wdc_redfish_command.py345
-rw-r--r--ansible_collections/community/general/plugins/modules/wdc_redfish_info.py208
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_app.py207
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_db.py203
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_domain.py178
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_mailbox.py146
-rw-r--r--ansible_collections/community/general/plugins/modules/webfaction_site.py217
-rw-r--r--ansible_collections/community/general/plugins/modules/xattr.py247
-rw-r--r--ansible_collections/community/general/plugins/modules/xbps.py351
-rw-r--r--ansible_collections/community/general/plugins/modules/xcc_redfish_command.py795
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_facts.py212
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_guest.py2033
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_guest_info.py224
-rw-r--r--ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py275
-rw-r--r--ansible_collections/community/general/plugins/modules/xfconf.py291
-rw-r--r--ansible_collections/community/general/plugins/modules/xfconf_info.py191
-rw-r--r--ansible_collections/community/general/plugins/modules/xfs_quota.py504
-rw-r--r--ansible_collections/community/general/plugins/modules/xml.py996
-rw-r--r--ansible_collections/community/general/plugins/modules/yarn.py408
-rw-r--r--ansible_collections/community/general/plugins/modules/yum_versionlock.py180
-rw-r--r--ansible_collections/community/general/plugins/modules/zfs.py295
-rw-r--r--ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py276
-rw-r--r--ansible_collections/community/general/plugins/modules/zfs_facts.py260
-rw-r--r--ansible_collections/community/general/plugins/modules/znode.py301
-rw-r--r--ansible_collections/community/general/plugins/modules/zpool_facts.py199
-rw-r--r--ansible_collections/community/general/plugins/modules/zypper.py607
-rw-r--r--ansible_collections/community/general/plugins/modules/zypper_repository.py474
-rw-r--r--ansible_collections/community/general/plugins/test/a_module.py72
-rw-r--r--ansible_collections/community/general/tests/.gitignore5
-rw-r--r--ansible_collections/community/general/tests/config.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/requirements.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/aix_devices/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml81
-rw-r--r--ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml130
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alerta_customer/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alerta_customer/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alerta_customer/tasks/main.yml156
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml93
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml19
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/subcommands.yml222
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml56
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml54
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_state.yml120
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/files/test.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/tasks/main.yml88
-rw-r--r--ansible_collections/community/general/tests/integration/targets/apache2_module/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml47
-rw-r--r--ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml207
-rw-r--r--ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml52
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt0
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/files/sub/subfile.txt0
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml145
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/tests/broken-link.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/tests/core.yml177
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/tests/exclusions.yml44
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/tests/idempotency.yml144
-rw-r--r--ansible_collections/community/general/tests/integration/targets/archive/tests/remove.yml211
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/defaults/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/main.yml29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_common_tests.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/setup.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml80
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml42
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml99
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml61
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml86
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml54
-rw-r--r--ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml62
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback/inventory.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml100
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback_diy/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml462
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml9
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml101
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cargo/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cargo/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cargo/tasks/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cargo/tasks/setup.yml28
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_general.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_version.yml50
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml68
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cmd_runner/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cmd_runner/library/cmd_echo.py55
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml19
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cmd_runner/vars/main.yml123
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection/aliases5
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/connection/test.sh16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml48
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases7
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_jail/aliases5
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases5
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases6
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/connection_posix/aliases6
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/connection_posix/test.sh21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/consul/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml177
-rw-r--r--ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml89
-rw-r--r--ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j214
-rw-r--r--ansible_collections/community/general/tests/integration/targets/copr/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/copr/tasks/main.yml160
-rw-r--r--ansible_collections/community/general/tests/integration/targets/copr/vars/main.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cpanm/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cpanm/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cpanm/tasks/main.yml65
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cronvar/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml124
-rw-r--r--ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml158
-rw-r--r--ansible_collections/community/general/tests/integration/targets/discord/README.md20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/discord/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/discord/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/discord/tasks/main.yml69
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/aliases15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py6
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py21
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py133
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py28
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/startproj/.keep0
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/django_manage/tasks/main.yaml84
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dnf_versionlock/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/install.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml74
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml43
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml291
-rw-r--r--ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml384
-rw-r--r--ansible_collections/community/general/tests/integration/targets/etcd3/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml81
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/defaults/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/tasks/basics.yml411
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/tasks/errors.yml133
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/tasks/floats.yml249
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/tasks/main.yml44
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/tasks/sparse.yml286
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesize/tasks/symlinks.yml97
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml64
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml119
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/freebsd_setup.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml107
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml59
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml102
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml154
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_counter/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_counter/tasks/main.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_dict/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_dict/tasks/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_from_csv/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_from_csv/tasks/main.yml54
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_from_csv/vars/main.yml31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml49
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/vars/main.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_hashids/aliases6
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/filter_hashids/runme.sh15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_hashids/tasks/main.yml63
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_hashids/vars/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_jc/aliases7
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases7
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml143
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml169
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/vars/main.yml209
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/tasks/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_random_mac/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml62
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_time/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml115
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/tasks/main.yml44
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/vars/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_version_sort/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/filter_version_sort/tasks/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/files/serve.py69
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml197
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml64
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml68
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml289
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml206
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml50
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml135
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/defaults/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/create_record.yml69
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/record.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/remove_record.yml61
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/update_record.yml59
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gem/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml213
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml28
-rw-r--r--ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/github_issue/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_branch/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_branch/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_branch/tasks/main.yml69
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml78
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml129
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml74
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml706
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml77
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml49
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/defaults/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/tasks/main.yml214
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_members/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_members/defaults/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_members/tasks/main.yml124
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml701
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml78
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml257
-rw-r--r--ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/sshkey.yml139
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hg/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml88
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml45
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml85
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml53
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homebrew/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml99
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homebrew_cask/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homebrew_cask/defaults/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homebrew_cask/tasks/main.yml73
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homectl/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/homectl/tasks/main.yml182
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml319
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml113
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml105
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml86
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml190
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml155
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml141
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml142
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml159
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml91
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml166
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml152
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/tasks/main.yml53
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/tasks/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml143
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/00-basic.yml42
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/01-value.yml592
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/02-values.yml1023
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml44
-rw-r--r--ansible_collections/community/general/tests/integration/targets/interfaces_file/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff_384111
-rw-r--r--ansible_collections/community/general/tests/integration/targets/interfaces_file/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/interfaces_file/tasks/main.yml67
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iptables_state/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml320
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml294
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml203
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml115
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_create/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg61
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg61
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml163
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/aliases13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize.yml75
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_add_files.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_delete_files.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_exception.yml71
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_mount.yml39
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/main.yml94
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/prepare.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/aliases13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.isobin0 -> 374784 bytes
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml54
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml43
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Alpine.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Archlinux.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Debian.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml19
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/files/setupSSLServer.py24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12bin0 -> 2532 bytes
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml116
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_cert/tasks/state_change.yml298
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_keystore/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_keystore/defaults/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml43
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/prepare.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/tests.yml313
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jboss/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml238
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jira/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jira/tasks/main.yml111
-rw-r--r--ansible_collections/community/general/tests/integration/targets/jira/vars/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kdeconfig/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kdeconfig/meta/main.yml7
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/files/kwriteconf_fake38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/main.yml369
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kernel_blacklist/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kernel_blacklist/files/blacklist7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kernel_blacklist/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/kernel_blacklist/tasks/main.yml111
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/readme.adoc27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/tasks/main.yml234
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/vars/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_client/README.md17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_client/docker-compose.yml31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_client/tasks/main.yml63
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_client/vars/main.yml61
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/README.md16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/docker-compose.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/tasks/main.yml164
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/vars/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/README.md17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml48
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/vars/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/README.md17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml49
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/vars/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_group/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_group/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_group/readme.adoc27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_group/tasks/main.yml527
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_group/vars/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/tasks/main.yml175
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/vars/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_role/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_role/tasks/main.yml250
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_role/vars/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/tasks/main.yml425
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/vars/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/aliases4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml143
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/vars/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keyring/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keyring/tasks/main.yml99
-rw-r--r--ansible_collections/community/general/tests/integration/targets/keyring/vars/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml30
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml30
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml71
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml46
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml115
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml65
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j218
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j218
-rw-r--r--ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ldap_search/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/listen_ports_facts/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml112
-rw-r--r--ansible_collections/community/general/tests/integration/targets/locale_gen/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml99
-rw-r--r--ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json30
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md0
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/library/local_module.py33
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.sh20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_dependent/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_dependent/tasks/main.yml183
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_dig/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_dig/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_dig/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml7
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml28
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml16
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/aliases6
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_merge_variables/runme.sh13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test.yml174
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test_with_env.yml44
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/vars.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml84
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml130
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml239
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j213
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Alpine.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml122
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_pet/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_pet/dependencies.yml10
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_random_pet/runme.sh11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_pet/test.yml30
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_string/aliases7
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_random_string/runme.sh8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_string/test.yml53
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_words/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_words/dependencies.yml10
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/lookup_random_words/runme.sh11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lookup_random_words/test.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml81
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lxd_project/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/lxd_project/tasks/main.yml142
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mail/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt26
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py69
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mail/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml105
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mas/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml158
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml125
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml152
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml235
-rw-r--r--ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/library/mdepfail.py70
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/library/msimple.py78
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/library/msimpleda.py66
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/library/mstate.py78
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/tasks/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mdepfail.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple.yml85
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple_output_conflict.yml54
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimpleda.yml39
-rw-r--r--ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mstate.yml83
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/aliases14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py51
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml99
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml65
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j219
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/vars/Alpine.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/vars/Archlinux.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mqtt/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml147
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mssql_script/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mssql_script/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mssql_script/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/mssql_script/tasks/main.yml246
-rw-r--r--ansible_collections/community/general/tests/integration/targets/nomad/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl400
-rw-r--r--ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml111
-rw-r--r--ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml111
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/tasks/no_bin_links.yml68
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml74
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml158
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_host/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gzbin0 -> 2950 bytes
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml243
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_template/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gzbin0 -> 1069 bytes
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_template/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/one_template/tasks/main.yml246
-rw-r--r--ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml255
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/basic.yml86
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/locally_installed_package.yml85
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/main.yml19
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/package_urls.yml219
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/reason.yml101
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/remove_nosave.yml74
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pacman/tasks/update_cache.yml27
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pam_limits/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pam_limits/files/test_pam_limits.conf5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pam_limits/tasks/main.yml92
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pamd/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pamd/tasks/main.yml74
-rw-r--r--ansible_collections/community/general/tests/integration/targets/parted/aliases13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/parted/handlers/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/parted/tasks/main.yml86
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pids/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pids/files/sleeper.c13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pids/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml120
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pids/templates/obtainpid.sh7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pipx/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pipx/tasks/main.yml316
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pipx_info/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pipx_info/tasks/main.yml140
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml52
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/tasks/freebsd.yml551
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/tasks/install_single_package.yml58
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/tasks/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/tasks/setup-testjail.yml100
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j216
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgng/vars/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgutil/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml117
-rw-r--r--ansible_collections/community/general/tests/integration/targets/proxmox/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml579
-rw-r--r--ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml45
-rw-r--r--ansible_collections/community/general/tests/integration/targets/read_csv/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/read_csv/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml176
-rw-r--r--ansible_collections/community/general/tests/integration/targets/redis_info/aliases10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml48
-rw-r--r--ansible_collections/community/general/tests/integration/targets/rundeck/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/rundeck/defaults/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/rundeck/files/test_job.yaml28
-rw-r--r--ansible_collections/community/general/tests/integration/targets/rundeck/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/rundeck/tasks/main.yml127
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml206
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml76
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml152
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml392
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container/defaults/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container/tasks/main.yml290
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_info/defaults/main.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_info/tasks/main.yml63
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/defaults/main.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/tasks/main.yml255
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/defaults/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/tasks/main.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/tasks/main.yml178
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/tasks/main.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml238
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function/defaults/main.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function/tasks/main.yml284
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_info/defaults/main.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_info/tasks/main.yml62
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/defaults/main.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/tasks/main.yml260
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/defaults/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/tasks/main.yml43
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml449
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml224
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml22
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml139
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml252
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml49
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml87
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml51
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sefcontext/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml233
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_client/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml179
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml129
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml53
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml56
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml56
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml56
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml75
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/alpine.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/archlinux.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/README.md73
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml19
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/D-Fedora.yml33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/default.yml21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml55
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Fedora.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Ubuntu.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml104
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md144
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/create-repo.sh63
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xzbin0 -> 7352 bytes
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml26
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Alpine.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Archlinux.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf39
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml29
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif22
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif4
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml72
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml60
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml60
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_opennebula/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_opennebula/tasks/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml69
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Alpine.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Archlinux.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/CentOS-8.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Darwin.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat-9.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml39
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml22
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml257
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml56
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml39
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml78
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml16
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/handlers/main.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default-cleanup.yml10
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default.yml22
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/main.yml20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/defaults/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/tasks/main.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Alpine.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Archlinux.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Debian.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/RedHat.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/defaults/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/handlers/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Fedora.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.0.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.1.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Ubuntu.yml19
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/Debian.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/RedHat.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/default.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/main.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/nothing.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem24
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml30
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml18
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml107
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j214
-rw-r--r--ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j220
-rw-r--r--ansible_collections/community/general/tests/integration/targets/shutdown/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml93
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap/aliases13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap/tasks/main.yml243
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap_alias/aliases13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap_alias/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/test.yml159
-rw-r--r--ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/tasks/main.yml78
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ssh_config/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ssh_config/files/fake_id_rsa0
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ssh_config/files/ssh_config_test0
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ssh_config/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/main.yml245
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/options.yml422
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sudoers/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sudoers/tasks/main.yml279
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py31
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml57
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml140
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml64
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf48
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sysrc/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sysrc/tasks/main.yml343
-rw-r--r--ansible_collections/community/general/tests/integration/targets/sysrc/tasks/setup-testjail.yml73
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/.gitignore8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/main.tf35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/variables.tf62
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/tasks/complex_variables.yml60
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/tasks/main.yml67
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/templates/provider_test/main.tf.j213
-rw-r--r--ansible_collections/community/general/tests/integration/targets/terraform/vars/main.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/test_a_module/aliases5
-rw-r--r--ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/test_a_module/library/local_module.py33
-rwxr-xr-xansible_collections/community/general/tests/integration/targets/test_a_module/runme.sh20
-rw-r--r--ansible_collections/community/general/tests/integration/targets/test_a_module/runme.yml42
-rw-r--r--ansible_collections/community/general/tests/integration/targets/timezone/aliases9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/timezone/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml96
-rw-r--r--ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml612
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/aliases17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml45
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml25
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml406
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml154
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml84
-rw-r--r--ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml86
-rw-r--r--ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml58
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xattr/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml6
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml21
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml72
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml46
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml147
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml184
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml147
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/aliases7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml17
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml14
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml77
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml38
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml42
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml241
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml39
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml51
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml26
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml33
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml34
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml32
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml37
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml36
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml81
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml53
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml86
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml35
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml50
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml50
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml41
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml61
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml53
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml85
-rw-r--r--ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yarn/aliases8
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml9
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml23
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml233
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j214
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yum_versionlock/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/yum_versionlock/tasks/main.yml87
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec12
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec.license3
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml15
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml530
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j224
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo11
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml7
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml13
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml40
-rw-r--r--ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml289
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/aliases.json11
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/aliases.json.license3
-rwxr-xr-xansible_collections/community/general/tests/sanity/extra/aliases.py65
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/botmeta.json8
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/botmeta.json.license3
-rwxr-xr-xansible_collections/community/general/tests/sanity/extra/botmeta.py234
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/extra-docs.json13
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/extra-docs.json.license3
-rwxr-xr-xansible_collections/community/general/tests/sanity/extra/extra-docs.py29
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/licenses.json4
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/licenses.json.license3
-rwxr-xr-xansible_collections/community/general/tests/sanity/extra/licenses.py110
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/licenses.py.license3
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json7
-rw-r--r--ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json.license3
-rwxr-xr-xansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py58
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.11.txt28
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.11.txt.license3
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.12.txt21
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.12.txt.license3
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.13.txt21
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.13.txt.license3
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.14.txt23
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.14.txt.license3
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.15.txt23
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.15.txt.license3
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.16.txt23
-rw-r--r--ansible_collections/community/general/tests/sanity/ignore-2.16.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/compat/__init__.py0
-rw-r--r--ansible_collections/community/general/tests/unit/compat/builtins.py20
-rw-r--r--ansible_collections/community/general/tests/unit/compat/mock.py109
-rw-r--r--ansible_collections/community/general/tests/unit/compat/unittest.py25
-rw-r--r--ansible_collections/community/general/tests/unit/mock/loader.py103
-rw-r--r--ansible_collections/community/general/tests/unit/mock/path.py12
-rw-r--r--ansible_collections/community/general/tests/unit/mock/procenv.py77
-rw-r--r--ansible_collections/community/general/tests/unit/mock/vault_helper.py29
-rw-r--r--ansible_collections/community/general/tests/unit/mock/yaml_helper.py128
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/conftest.py38
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/helper.py19
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/test_doas.py85
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py95
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py86
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py85
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py82
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/become/test_sudosu.py51
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py26
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/callback/test_elastic.py127
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/callback/test_loganalytics.py66
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/callback/test_opentelemetry.py212
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/callback/test_splunk.py64
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py25
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/filter/test_crc32.py16
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd174
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json222
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py31
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_icinga2.py149
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py47
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_lxd.py106
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_opennebula.py342
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py745
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py206
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/inventory/test_xen_orchestra.py211
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_common.py85
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_conftest.py39
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json20
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json35
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json85
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json103
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_bitwarden.py160
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_dependent.py45
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py44
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py56
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py175
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py537
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_merge_variables.py135
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py268
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_revbitspss.py44
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py120
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py54
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_scaleway.py126
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py73
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py167
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py40
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py165
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py103
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py632
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_cmd_runner.py374
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_csv.py166
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py143
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py117
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py239
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_ocapi_utils.py54
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_onepassword.py44
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_opennebula.py102
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py57
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py48
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py34
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py70
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py26
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py119
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json73
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json707
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json87
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json771
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json75
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json420
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py75
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py74
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py183
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py414
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py221
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py176
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py51
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/FakeAnsibleModule.py34
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/FakeXenAPI.py70
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/conftest.py39
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/gitlab.py704
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/hpe_test_utils.py206
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file-README.md27
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv412
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up13
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up13
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv612
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up13
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up13
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu13
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv47
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up7
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up7
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv66
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu7
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json18
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt17
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv48
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv68
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert7
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json21
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up62
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice62
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup62
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv461
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv661
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup10
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv411
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv611
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt9
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt0
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp6
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces8
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup11
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup.license3
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py557
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/linode_conftest.py87
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/oneview_conftest.py28
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/oneview_module_loader.py36
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/rhn_conftest.py35
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_alerta_customer.py250
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_apache2_module.py24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_apk.py38
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_archive.py75
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_access_key.py343
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py198
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py193
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py311
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_campfire.py96
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_circonus_annotation.py153
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_cpanm.py293
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_datadog_downtime.py.disabled226
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_dconf.py44
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_discord.py105
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple.py64
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple_info.py111
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2.py116
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2_info.py103
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gem.py143
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_github_repo.py330
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_deploy_key.py109
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_group.py133
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_hook.py104
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_project.py125
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_protected_branch.py83
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_runner.py145
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_user.py184
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_hana_query.py103
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew.py24
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew_cask.py23
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_icinga2_feature.py100
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otpconfig.py407
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otptoken.py496
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_pwpolicy.py614
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_java_keystore.py422
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_build.py224
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_plugin.py192
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_authentication.py623
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client.py150
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py573
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_clientscope.py614
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_identity_provider.py588
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm.py311
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm_info.py122
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_role.py327
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_user_federation.py582
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_linode.py22
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_linode_v4.py379
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_cmms.py101
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_nodes.py103
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_macports.py35
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_maven_artifact.py71
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_modprobe.py485
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_monit.py159
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_nmcli.py4261
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_npm.py262
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_command.py639
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_info.py240
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_one_vm.py60
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_datacenter_info.py80
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_enclosure_info.py137
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network.py392
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py104
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network.py170
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network_info.py61
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network.py168
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py63
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py261
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py63
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set.py187
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set_info.py121
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager.py243
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager_info.py72
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_opkg.py241
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pacman.py1099
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pacman_key.py577
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty.py130
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_alert.py46
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_change.py85
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pamd.py386
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_parted.py346
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pkgin.py145
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pmem.py707
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org.py205
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org_info.py138
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user.py209
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user_info.py161
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_kvm.py20
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_snap.py117
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_tasks_info.py195
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_puppet.py227
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_redhat_subscription.py1337
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data.py278
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_incr.py208
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_info.py114
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_redis_info.py77
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_channel.py147
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_register.py293
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_rhsm_release.py148
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_rpm_ostree_pkg.py108
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_sap_task_list_execute.py91
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_sapcar_extract.py54
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_compute_private_network.py180
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_private_network.py198
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_slack.py203
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_solaris_zone.py116
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ss_3par_cpg.py248
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_statsd.py101
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_sysupgrade.py69
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_terraform.py23
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_ufw.py477
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_command.py911
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_info.py216
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_xcc_redfish_command.py629
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_info.py79
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py299
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf.py312
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf_info.py172
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/utils.py54
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/xenserver_common.py12
-rw-r--r--ansible_collections/community/general/tests/unit/plugins/modules/xenserver_conftest.py76
-rw-r--r--ansible_collections/community/general/tests/unit/requirements.txt46
-rw-r--r--ansible_collections/community/general/tests/unit/requirements.yml7
-rw-r--r--ansible_collections/community/general/tests/utils/constraints.txt59
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/aix.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/alpine.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/fedora.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/freebsd.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/generic.sh21
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/linux-community.sh22
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/linux.sh21
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/macos.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/osx.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/remote.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/rhel.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/sanity.sh45
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/shippable.sh232
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/ubuntu.sh29
-rwxr-xr-xansible_collections/community/general/tests/utils/shippable/units.sh41
2773 files changed, 396930 insertions, 0 deletions
diff --git a/ansible_collections/community/general/.azure-pipelines/README.md b/ansible_collections/community/general/.azure-pipelines/README.md
new file mode 100644
index 000000000..9e8ad7410
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/README.md
@@ -0,0 +1,9 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+## Azure Pipelines Configuration
+
+Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
diff --git a/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml b/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml
new file mode 100644
index 000000000..07da25589
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/azure-pipelines.yml
@@ -0,0 +1,413 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+trigger:
+ batch: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+pr:
+ autoCancel: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+schedules:
+ - cron: 0 8 * * *
+ displayName: Nightly (main)
+ always: true
+ branches:
+ include:
+ - main
+ - cron: 0 10 * * *
+ displayName: Nightly (active stable branches)
+ always: true
+ branches:
+ include:
+ - stable-7
+ - stable-6
+ - cron: 0 11 * * 0
+ displayName: Weekly (old stable branches)
+ always: true
+ branches:
+ include:
+ - stable-5
+
+variables:
+ - name: checkoutPath
+ value: ansible_collections/community/general
+ - name: coverageBranches
+ value: main
+ - name: pipelinesCoverage
+ value: coverage
+ - name: entryPoint
+ value: tests/utils/shippable/shippable.sh
+ - name: fetchDepth
+ value: 0
+
+resources:
+ containers:
+ - container: default
+ image: quay.io/ansible/azure-pipelines-test-container:3.0.0
+
+pool: Standard
+
+stages:
+### Sanity
+ - stage: Sanity_devel
+ displayName: Sanity devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: devel/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+ - test: extra
+ - stage: Sanity_2_15
+ displayName: Sanity 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: 2.15/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+ - stage: Sanity_2_14
+ displayName: Sanity 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: 2.14/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+ - stage: Sanity_2_13
+ displayName: Sanity 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Test {0}
+ testFormat: 2.13/sanity/{0}
+ targets:
+ - test: 1
+ - test: 2
+ - test: 3
+ - test: 4
+### Units
+ - stage: Units_devel
+ displayName: Units devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: devel/units/{0}/1
+ targets:
+ - test: 2.7
+ - test: 3.6
+ - test: 3.7
+ - test: 3.8
+ - test: 3.9
+ - test: '3.10'
+ - test: '3.11'
+ - stage: Units_2_15
+ displayName: Units 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.15/units/{0}/1
+ targets:
+ - test: 3.5
+ - test: "3.10"
+ - stage: Units_2_14
+ displayName: Units 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.14/units/{0}/1
+ targets:
+ - test: 3.9
+ - stage: Units_2_13
+ displayName: Units 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.13/units/{0}/1
+ targets:
+ - test: 2.7
+ - test: 3.8
+
+## Remote
+ - stage: Remote_devel_extra_vms
+ displayName: Remote devel extra VMs
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}
+ targets:
+ - name: Alpine 3.17
+ test: alpine/3.17
+ # - name: Fedora 37
+ # test: fedora/37
+ - name: Ubuntu 22.04
+ test: ubuntu/22.04
+ groups:
+ - vm
+ - stage: Remote_devel
+ displayName: Remote devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}
+ targets:
+ - name: macOS 13.2
+ test: macos/13.2
+ - name: RHEL 9.1
+ test: rhel/9.1
+ - name: FreeBSD 13.2
+ test: freebsd/13.2
+ - name: FreeBSD 12.4
+ test: freebsd/12.4
+ groups:
+ - 1
+ - 2
+ - 3
+ - stage: Remote_2_15
+ displayName: Remote 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.15/{0}
+ targets:
+ - name: RHEL 7.9
+ test: rhel/7.9
+ - name: FreeBSD 13.1
+ test: freebsd/13.1
+ groups:
+ - 1
+ - 2
+ - 3
+ - stage: Remote_2_14
+ displayName: Remote 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.14/{0}
+ targets:
+ - name: RHEL 9.0
+ test: rhel/9.0
+ - name: FreeBSD 12.3
+ test: freebsd/12.3
+ groups:
+ - 1
+ - 2
+ - 3
+ - stage: Remote_2_13
+ displayName: Remote 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.13/{0}
+ targets:
+ - name: macOS 12.0
+ test: macos/12.0
+ - name: RHEL 8.5
+ test: rhel/8.5
+ - name: FreeBSD 13.0
+ test: freebsd/13.0
+ groups:
+ - 1
+ - 2
+ - 3
+
+### Docker
+ - stage: Docker_devel
+ displayName: Docker devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/linux/{0}
+ targets:
+ - name: Fedora 37
+ test: fedora37
+ - name: openSUSE 15
+ test: opensuse15
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+ - name: Ubuntu 22.04
+ test: ubuntu2204
+ - name: Alpine 3
+ test: alpine3
+ groups:
+ - 1
+ - 2
+ - 3
+ - stage: Docker_2_15
+ displayName: Docker 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.15/linux/{0}
+ targets:
+ - name: CentOS 7
+ test: centos7
+ groups:
+ - 1
+ - 2
+ - 3
+ - stage: Docker_2_14
+ displayName: Docker 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.14/linux/{0}
+ targets:
+ - name: Fedora 36
+ test: fedora36
+ groups:
+ - 1
+ - 2
+ - 3
+ - stage: Docker_2_13
+ displayName: Docker 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.13/linux/{0}
+ targets:
+ - name: Fedora 35
+ test: fedora35
+ - name: openSUSE 15 py2
+ test: opensuse15py2
+ - name: Alpine 3
+ test: alpine3
+ groups:
+ - 1
+ - 2
+ - 3
+
+### Community Docker
+ - stage: Docker_community_devel
+ displayName: Docker (community images) devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/linux-community/{0}
+ targets:
+ - name: Debian Bullseye
+ test: debian-bullseye/3.9
+ - name: ArchLinux
+ test: archlinux/3.11
+ - name: CentOS Stream 8
+ test: centos-stream8/3.9
+ groups:
+ - 1
+ - 2
+ - 3
+
+### Generic
+ - stage: Generic_devel
+ displayName: Generic devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: devel/generic/{0}/1
+ targets:
+ - test: 2.7
+ - test: '3.11'
+ - stage: Generic_2_15
+ displayName: Generic 2.15
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.15/generic/{0}/1
+ targets:
+ - test: 3.9
+ - stage: Generic_2_14
+ displayName: Generic 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.14/generic/{0}/1
+ targets:
+ - test: '3.10'
+ - stage: Generic_2_13
+ displayName: Generic 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ nameFormat: Python {0}
+ testFormat: 2.13/generic/{0}/1
+ targets:
+ - test: 3.9
+
+ - stage: Summary
+ condition: succeededOrFailed()
+ dependsOn:
+ - Sanity_devel
+ - Sanity_2_13
+ - Sanity_2_14
+ - Sanity_2_15
+ - Units_devel
+ - Units_2_13
+ - Units_2_14
+ - Units_2_15
+ - Remote_devel_extra_vms
+ - Remote_devel
+ - Remote_2_13
+ - Remote_2_14
+ - Remote_2_15
+ - Docker_devel
+ - Docker_2_13
+ - Docker_2_14
+ - Docker_2_15
+ - Docker_community_devel
+# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+# - Generic_devel
+# - Generic_2_13
+# - Generic_2_14
+# - Generic_2_15
+ jobs:
+ - template: templates/coverage.yml
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh b/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh
new file mode 100755
index 000000000..ca2b19de9
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/aggregate-coverage.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Aggregate code coverage results for later processing.
+
+set -o pipefail -eu
+
+agent_temp_directory="$1"
+
+PATH="${PWD}/bin:${PATH}"
+
+mkdir "${agent_temp_directory}/coverage/"
+
+options=(--venv --venv-system-site-packages --color -v)
+
+ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
+
+if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
+ # Only analyze coverage if the installed version of ansible-test supports it.
+ # Doing so allows this script to work unmodified for multiple Ansible versions.
+ ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
+fi
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py b/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py
new file mode 100755
index 000000000..3b2fd993d
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/combine-coverage.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""
+Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
+Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
+The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
+Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
+It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shutil
+import sys
+
+
+def main():
+ """Main program entry point."""
+ source_directory = sys.argv[1]
+
+ if '/ansible_collections/' in os.getcwd():
+ output_path = "tests/output"
+ else:
+ output_path = "test/results"
+
+ destination_directory = os.path.join(output_path, 'coverage')
+
+ if not os.path.exists(destination_directory):
+ os.makedirs(destination_directory)
+
+ jobs = {}
+ count = 0
+
+ for name in os.listdir(source_directory):
+ match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
+ label = match.group('label')
+ attempt = int(match.group('attempt'))
+ jobs[label] = max(attempt, jobs.get(label, 0))
+
+ for label, attempt in jobs.items():
+ name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
+ source = os.path.join(source_directory, name)
+ source_files = os.listdir(source)
+
+ for source_file in source_files:
+ source_path = os.path.join(source, source_file)
+ destination_path = os.path.join(destination_directory, source_file + '.' + label)
+ print('"%s" -> "%s"' % (source_path, destination_path))
+ shutil.copyfile(source_path, destination_path)
+ count += 1
+
+ print('Coverage file count: %d' % count)
+ print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
+ print('##vso[task.setVariable variable=outputPath]%s' % output_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh b/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh
new file mode 100755
index 000000000..1f4b8e4f1
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/process-results.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Check the test results and set variables for use in later steps.
+
+set -o pipefail -eu
+
+if [[ "$PWD" =~ /ansible_collections/ ]]; then
+ output_path="tests/output"
+else
+ output_path="test/results"
+fi
+
+echo "##vso[task.setVariable variable=outputPath]${output_path}"
+
+if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveTestResults]true"
+fi
+
+if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveBotResults]true"
+fi
+
+if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveCoverageData]true"
+fi
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.py b/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.py
new file mode 100755
index 000000000..58e32f6d3
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/publish-codecov.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""
+Upload code coverage reports to codecov.io.
+Multiple coverage files from multiple languages are accepted and aggregated after upload.
+Python coverage, as well as PowerShell and Python stubs can all be uploaded.
+"""
+
+import argparse
+import dataclasses
+import pathlib
+import shutil
+import subprocess
+import tempfile
+import typing as t
+import urllib.request
+
+
+@dataclasses.dataclass(frozen=True)
+class CoverageFile:
+ name: str
+ path: pathlib.Path
+ flags: t.List[str]
+
+
+@dataclasses.dataclass(frozen=True)
+class Args:
+ dry_run: bool
+ path: pathlib.Path
+
+
+def parse_args() -> Args:
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-n', '--dry-run', action='store_true')
+ parser.add_argument('path', type=pathlib.Path)
+
+ args = parser.parse_args()
+
+ # Store arguments in a typed dataclass
+ fields = dataclasses.fields(Args)
+ kwargs = {field.name: getattr(args, field.name) for field in fields}
+
+ return Args(**kwargs)
+
+
+def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
+ processed = []
+ for file in directory.joinpath('reports').glob('coverage*.xml'):
+ name = file.stem.replace('coverage=', '')
+
+ # Get flags from name
+ flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
+ flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
+
+ processed.append(CoverageFile(name, file, flags))
+
+ return tuple(processed)
+
+
+def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
+ for file in files:
+ cmd = [
+ str(codecov_bin),
+ '--name', file.name,
+ '--file', str(file.path),
+ ]
+ for flag in file.flags:
+ cmd.extend(['--flags', flag])
+
+ if dry_run:
+ print(f'DRY-RUN: Would run command: {cmd}')
+ continue
+
+ subprocess.run(cmd, check=True)
+
+
+def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
+ if dry_run:
+ print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
+ return
+
+ with urllib.request.urlopen(url) as resp:
+ with dest.open('w+b') as f:
+ # Read data in chunks rather than all at once
+ shutil.copyfileobj(resp, f, 64 * 1024)
+
+ dest.chmod(flags)
+
+
+def main():
+ args = parse_args()
+ url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
+ with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
+ codecov_bin = pathlib.Path(tmpdir) / 'codecov'
+ download_file(url, codecov_bin, 0o755, args.dry_run)
+
+ files = process_files(args.path)
+ upload_files(codecov_bin, files, args.dry_run)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh b/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh
new file mode 100755
index 000000000..c08154b6f
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/report-coverage.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
+
+set -o pipefail -eu
+
+PATH="${PWD}/bin:${PATH}"
+
+if ! ansible-test --help >/dev/null 2>&1; then
+ # Install the devel version of ansible-test for generating code coverage reports.
+ # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
+ # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
+ pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+fi
+
+ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh b/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh
new file mode 100755
index 000000000..2cfdcf61e
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/run-tests.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Configure the test environment and run the tests.
+
+set -o pipefail -eu
+
+entry_point="$1"
+test="$2"
+read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
+
+export COMMIT_MESSAGE
+export COMPLETE
+export COVERAGE
+export IS_PULL_REQUEST
+
+if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
+ IS_PULL_REQUEST=true
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
+else
+ IS_PULL_REQUEST=
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
+fi
+
+COMPLETE=
+COVERAGE=
+
+if [ "${BUILD_REASON}" = "Schedule" ]; then
+ COMPLETE=yes
+
+ if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
+ COVERAGE=yes
+ fi
+fi
+
+"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
diff --git a/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py b/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py
new file mode 100755
index 000000000..85a7c3c17
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/scripts/time-command.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+
+def main():
+ """Main program entry point."""
+ start = time.time()
+
+ sys.stdin.reconfigure(errors='surrogateescape')
+ sys.stdout.reconfigure(errors='surrogateescape')
+
+ for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml b/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml
new file mode 100644
index 000000000..3c8841aa2
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/templates/coverage.yml
@@ -0,0 +1,44 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This template adds a job for processing code coverage data.
+# It will upload results to Azure Pipelines and codecov.io.
+# Use it from a job stage that completes after all other jobs have completed.
+# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
+
+jobs:
+ - job: Coverage
+ displayName: Code Coverage
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - task: DownloadPipelineArtifact@2
+ displayName: Download Coverage Data
+ inputs:
+ path: coverage/
+ patterns: "Coverage */*=coverage.combined"
+ - bash: .azure-pipelines/scripts/combine-coverage.py coverage/
+ displayName: Combine Coverage Data
+ - bash: .azure-pipelines/scripts/report-coverage.sh
+ displayName: Generate Coverage Report
+ condition: gt(variables.coverageFileCount, 0)
+ - task: PublishCodeCoverageResults@1
+ inputs:
+ codeCoverageTool: Cobertura
+ # Azure Pipelines only accepts a single coverage data file.
+ # That means only Python or PowerShell coverage can be uploaded, but not both.
+ # Set the "pipelinesCoverage" variable to determine which type is uploaded.
+ # Use "coverage" for Python and "coverage-powershell" for PowerShell.
+ summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
+ displayName: Publish to Azure Pipelines
+ condition: gt(variables.coverageFileCount, 0)
+ - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
+ displayName: Publish to codecov.io
+ condition: gt(variables.coverageFileCount, 0)
+ continueOnError: true
diff --git a/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml b/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml
new file mode 100644
index 000000000..487637585
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/templates/matrix.yml
@@ -0,0 +1,60 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
+# If this matrix template does not provide the required functionality, consider using the test template directly instead.
+
+parameters:
+ # A required list of dictionaries, one per test target.
+ # Each item in the list must contain a "test" or "name" key.
+ # Both may be provided. If one is omitted, the other will be used.
+ - name: targets
+ type: object
+
+ # An optional list of values which will be used to multiply the targets list into a matrix.
+ # Values can be strings or numbers.
+ - name: groups
+ type: object
+ default: []
+
+ # An optional format string used to generate the job name.
+ # - {0} is the name of an item in the targets list.
+ - name: nameFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to generate the test name.
+ # - {0} is the name of an item in the targets list.
+ - name: testFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to add the group to the job name.
+ # {0} is the formatted name of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: nameGroupFormat
+ type: string
+ default: "{0} - {{1}}"
+
+ # An optional format string used to add the group to the test name.
+ # {0} is the formatted test of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: testGroupFormat
+ type: string
+ default: "{0}/{{1}}"
+
+jobs:
+ - template: test.yml
+ parameters:
+ jobs:
+ - ${{ if eq(length(parameters.groups), 0) }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/ansible_collections/community/general/.azure-pipelines/templates/test.yml b/ansible_collections/community/general/.azure-pipelines/templates/test.yml
new file mode 100644
index 000000000..700cf629d
--- /dev/null
+++ b/ansible_collections/community/general/.azure-pipelines/templates/test.yml
@@ -0,0 +1,50 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This template uses the provided list of jobs to create test one or more test jobs.
+# It can be used directly if needed, or through the matrix template.
+
+parameters:
+ # A required list of dictionaries, one per test job.
+ # Each item in the list must contain a "job" and "name" key.
+ - name: jobs
+ type: object
+
+jobs:
+ - ${{ each job in parameters.jobs }}:
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/ansible_collections/community/general/.github/BOTMETA.yml b/ansible_collections/community/general/.github/BOTMETA.yml
new file mode 100644
index 000000000..c6379bdcb
--- /dev/null
+++ b/ansible_collections/community/general/.github/BOTMETA.yml
@@ -0,0 +1,1416 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+notifications: true
+automerge: true
+files:
+ plugins/:
+ supershipit: quidame
+ changelogs/: {}
+ changelogs/fragments/:
+ support: community
+ $actions:
+ labels: action
+ $actions/iptables_state.py:
+ maintainers: quidame
+ $actions/shutdown.py:
+ maintainers: nitzmahone samdoran aminvakil
+ $becomes/:
+ labels: become
+ $becomes/doas.py:
+ maintainers: $team_ansible_core
+ $becomes/dzdo.py:
+ maintainers: $team_ansible_core
+ $becomes/ksu.py:
+ maintainers: $team_ansible_core
+ $becomes/machinectl.py:
+ maintainers: $team_ansible_core
+ $becomes/pbrun.py:
+ maintainers: $team_ansible_core
+ $becomes/pfexec.py:
+ maintainers: $team_ansible_core
+ $becomes/pmrun.py:
+ maintainers: $team_ansible_core
+ $becomes/sesu.py:
+ maintainers: nekonyuu
+ $becomes/sudosu.py:
+ maintainers: dagwieers
+ $caches/:
+ labels: cache
+ $caches/memcached.py: {}
+ $caches/pickle.py:
+ maintainers: bcoca
+ $caches/redis.py: {}
+ $caches/yaml.py:
+ maintainers: bcoca
+ $callbacks/:
+ labels: callbacks
+ $callbacks/cgroup_memory_recap.py: {}
+ $callbacks/context_demo.py: {}
+ $callbacks/counter_enabled.py: {}
+ $callbacks/dense.py:
+ maintainers: dagwieers
+ $callbacks/diy.py:
+ maintainers: theque5t
+ $callbacks/elastic.py:
+ keywords: apm observability
+ maintainers: v1v
+ $callbacks/hipchat.py: {}
+ $callbacks/jabber.py: {}
+ $callbacks/log_plays.py: {}
+ $callbacks/loganalytics.py:
+ maintainers: zhcli
+ $callbacks/logdna.py: {}
+ $callbacks/logentries.py: {}
+ $callbacks/logstash.py:
+ maintainers: ujenmr
+ $callbacks/mail.py:
+ maintainers: dagwieers
+ $callbacks/nrdp.py:
+ maintainers: rverchere
+ $callbacks/null.py: {}
+ $callbacks/opentelemetry.py:
+ keywords: opentelemetry observability
+ maintainers: v1v
+ $callbacks/say.py:
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: macos say
+ maintainers: $team_macos
+ notify: chris-short
+ $callbacks/selective.py: {}
+ $callbacks/slack.py: {}
+ $callbacks/splunk.py: {}
+ $callbacks/sumologic.py:
+ labels: sumologic
+ maintainers: ryancurrah
+ $callbacks/syslog_json.py:
+ maintainers: imjoseangel
+ $callbacks/unixy.py:
+ labels: unixy
+ maintainers: akatch
+ $callbacks/yaml.py: {}
+ $connections/:
+ labels: connections
+ $connections/chroot.py: {}
+ $connections/funcd.py:
+ maintainers: mscherer
+ $connections/iocage.py: {}
+ $connections/jail.py:
+ maintainers: $team_ansible_core
+ $connections/lxc.py: {}
+ $connections/lxd.py:
+ labels: lxd
+ maintainers: mattclay
+ $connections/qubes.py:
+ maintainers: kushaldas
+ $connections/saltstack.py:
+ labels: saltstack
+ maintainers: mscherer
+ $connections/zone.py:
+ maintainers: $team_ansible_core
+ $doc_fragments/:
+ labels: docs_fragments
+ $doc_fragments/hpe3par.py:
+ labels: hpe3par
+ maintainers: farhan7500 gautamphegde
+ $doc_fragments/hwc.py:
+ labels: hwc
+ maintainers: $team_huawei
+ $doc_fragments/nomad.py:
+ maintainers: chris93111
+ $doc_fragments/xenserver.py:
+ labels: xenserver
+ maintainers: bvitnik
+ $filters/counter.py:
+ maintainers: keilr
+ $filters/crc32.py:
+ maintainers: jouir
+ $filters/dict.py:
+ maintainers: felixfontein
+ $filters/dict_kv.py:
+ maintainers: giner
+ $filters/from_csv.py:
+ maintainers: Ajpantuso
+ $filters/groupby_as_dict.py:
+ maintainers: felixfontein
+ $filters/hashids.py:
+ maintainers: Ajpantuso
+ $filters/hashids_decode.yml:
+ maintainers: Ajpantuso
+ $filters/hashids_encode.yml:
+ maintainers: Ajpantuso
+ $filters/jc.py:
+ maintainers: kellyjonbrazil
+ $filters/json_query.py: {}
+ $filters/lists_mergeby.py:
+ maintainers: vbotka
+ $filters/random_mac.py: {}
+ $filters/time.py:
+ maintainers: resmo
+ $filters/to_days.yml:
+ maintainers: resmo
+ $filters/to_hours.yml:
+ maintainers: resmo
+ $filters/to_milliseconds.yml:
+ maintainers: resmo
+ $filters/to_minutes.yml:
+ maintainers: resmo
+ $filters/to_months.yml:
+ maintainers: resmo
+ $filters/to_seconds.yml:
+ maintainers: resmo
+ $filters/to_time_unit.yml:
+ maintainers: resmo
+ $filters/to_weeks.yml:
+ maintainers: resmo
+ $filters/to_years.yml:
+ maintainers: resmo
+ $filters/unicode_normalize.py:
+ maintainers: Ajpantuso
+ $filters/version_sort.py:
+ maintainers: ericzolf
+ $inventories/:
+ labels: inventories
+ $inventories/cobbler.py:
+ maintainers: opoplawski
+ $inventories/gitlab_runners.py:
+ maintainers: morph027
+ $inventories/icinga2.py:
+ maintainers: BongoEADGC6
+ $inventories/linode.py:
+ keywords: linode dynamic inventory script
+ labels: cloud linode
+ maintainers: $team_linode
+ $inventories/lxd.py:
+ maintainers: conloos
+ $inventories/nmap.py: {}
+ $inventories/online.py:
+ maintainers: remyleone
+ $inventories/opennebula.py:
+ keywords: opennebula dynamic inventory script
+ labels: cloud opennebula
+ maintainers: feldsam
+ $inventories/proxmox.py:
+ maintainers: $team_virt ilijamt
+ $inventories/scaleway.py:
+ labels: cloud scaleway
+ maintainers: $team_scaleway
+ $inventories/stackpath_compute.py:
+ maintainers: shayrybak
+ $inventories/virtualbox.py: {}
+ $inventories/xen_orchestra.py:
+ maintainers: ddelnano shinuza
+ $lookups/:
+ labels: lookups
+ $lookups/bitwarden.py:
+ maintainers: lungj
+ $lookups/cartesian.py: {}
+ $lookups/chef_databag.py: {}
+ $lookups/collection_version.py:
+ maintainers: felixfontein
+ $lookups/consul_kv.py: {}
+ $lookups/credstash.py: {}
+ $lookups/cyberarkpassword.py:
+ labels: cyberarkpassword
+ notify: cyberark-bizdev
+ $lookups/dependent.py:
+ maintainers: felixfontein
+ $lookups/dig.py:
+ labels: dig
+ maintainers: jpmens
+ $lookups/dnstxt.py:
+ maintainers: jpmens
+ $lookups/dsv.py:
+ ignore: amigus
+ maintainers: delineaKrehl tylerezimmerman
+ $lookups/etcd.py:
+ maintainers: jpmens
+ $lookups/etcd3.py:
+ maintainers: eric-belhomme
+ $lookups/filetree.py:
+ maintainers: dagwieers
+ $lookups/flattened.py: {}
+ $lookups/hiera.py:
+ maintainers: jparrill
+ $lookups/keyring.py: {}
+ $lookups/lastpass.py: {}
+ $lookups/lmdb_kv.py:
+ maintainers: jpmens
+ $lookups/manifold.py:
+ labels: manifold
+ maintainers: galanoff
+ $lookups/merge_variables.py:
+ maintainers: rlenferink m-a-r-k-e
+ $lookups/onepass:
+ labels: onepassword
+ maintainers: samdoran
+ $lookups/onepassword.py:
+ maintainers: azenk scottsb
+ $lookups/onepassword_raw.py:
+ maintainers: azenk scottsb
+ $lookups/passwordstore.py: {}
+ $lookups/random_pet.py:
+ maintainers: Akasurde
+ $lookups/random_string.py:
+ maintainers: Akasurde
+ $lookups/random_words.py:
+ maintainers: konstruktoid
+ $lookups/redis.py:
+ maintainers: $team_ansible_core jpmens
+ $lookups/revbitspss.py:
+ maintainers: RevBits
+ $lookups/shelvefile.py: {}
+ $lookups/tss.py:
+ ignore: amigus
+ maintainers: delineaKrehl tylerezimmerman
+ $module_utils/:
+ labels: module_utils
+ $module_utils/btrfs.py:
+ maintainers: gnfzdz
+ $module_utils/deps.py:
+ maintainers: russoz
+ $module_utils/gconftool2.py:
+ labels: gconftool2
+ maintainers: russoz
+ $module_utils/gitlab.py:
+ keywords: gitlab source_control
+ labels: gitlab
+ maintainers: $team_gitlab
+ notify: jlozadad
+ $module_utils/hwc_utils.py:
+ keywords: cloud huawei hwc
+ labels: huawei hwc_utils networking
+ maintainers: $team_huawei
+ $module_utils/identity/keycloak/keycloak.py:
+ maintainers: $team_keycloak
+ $module_utils/identity/keycloak/keycloak_clientsecret.py:
+ maintainers: $team_keycloak fynncfchen johncant
+ $module_utils/ipa.py:
+ labels: ipa
+ maintainers: $team_ipa
+ $module_utils/jenkins.py:
+ labels: jenkins
+ maintainers: russoz
+ $module_utils/manageiq.py:
+ labels: manageiq
+ maintainers: $team_manageiq
+ $module_utils/memset.py:
+ labels: cloud memset
+ $module_utils/mh/:
+ labels: module_helper
+ maintainers: russoz
+ $module_utils/module_helper.py:
+ labels: module_helper
+ maintainers: russoz
+ $module_utils/net_tools/pritunl/:
+ maintainers: Lowess
+ $module_utils/oracle/oci_utils.py:
+ labels: cloud
+ maintainers: $team_oracle
+ $module_utils/pipx.py:
+ labels: pipx
+ maintainers: russoz
+ $module_utils/puppet.py:
+ labels: puppet
+ maintainers: russoz
+ $module_utils/pure.py:
+ labels: pure pure_storage
+ maintainers: $team_purestorage
+ $module_utils/redfish_utils.py:
+ labels: redfish_utils
+ maintainers: $team_redfish
+ $module_utils/remote_management/lxca/common.py:
+ maintainers: navalkp prabhosa
+ $module_utils/scaleway.py:
+ labels: cloud scaleway
+ maintainers: $team_scaleway
+ $module_utils/ssh.py:
+ maintainers: russoz
+ $module_utils/storage/hpe3par/hpe3par.py:
+ maintainers: farhan7500 gautamphegde
+ $module_utils/utm_utils.py:
+ labels: utm_utils
+ maintainers: $team_e_spirit
+ $module_utils/wdc_redfish_utils.py:
+ labels: wdc_redfish_utils
+ maintainers: $team_wdc
+ $module_utils/xenserver.py:
+ labels: xenserver
+ maintainers: bvitnik
+ $module_utils/xfconf.py:
+ labels: xfconf
+ maintainers: russoz
+ $modules/aerospike_migrations.py:
+ maintainers: Alb0t
+ $modules/airbrake_deployment.py:
+ ignore: bpennypacker
+ labels: airbrake_deployment
+ maintainers: phumpal
+ $modules/aix:
+ keywords: aix efix lpar wpar
+ labels: aix
+ maintainers: $team_aix
+ $modules/aix_lvol.py:
+ maintainers: adejoux
+ $modules/alerta_customer.py:
+ maintainers: cwollinger
+ $modules/ali_:
+ maintainers: xiaozhu36
+ $modules/alternatives.py:
+ ignore: DavidWittman jiuka
+ labels: alternatives
+ maintainers: mulby
+ $modules/ansible_galaxy_install.py:
+ maintainers: russoz
+ $modules/apache2_mod_proxy.py:
+ maintainers: oboukili
+ $modules/apache2_module.py:
+ ignore: robinro
+ maintainers: berendt n0trax
+ $modules/apk.py:
+ ignore: kbrebanov
+ labels: apk
+ maintainers: tdtrask
+ $modules/apt_repo.py:
+ maintainers: obirvalger
+ $modules/apt_rpm.py:
+ maintainers: evgkrsk
+ $modules/archive.py:
+ maintainers: bendoh
+ $modules/atomic_:
+ maintainers: krsacme
+ $modules/atomic_container.py:
+ maintainers: giuseppe krsacme
+ $modules/awall.py:
+ maintainers: tdtrask
+ $modules/beadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: beadm solaris
+ maintainers: $team_solaris
+ $modules/bearychat.py:
+ maintainers: tonyseek
+ $modules/bigpanda.py:
+ maintainers: hkariti
+ $modules/bitbucket_:
+ maintainers: catcombo
+ $modules/bower.py:
+ maintainers: mwarkentin
+ $modules/btrfs_:
+ maintainers: gnfzdz
+ $modules/bundler.py:
+ maintainers: thoiberg
+ $modules/bzr.py:
+ maintainers: andreparames
+ $modules/campfire.py:
+ maintainers: fabulops
+ $modules/capabilities.py:
+ maintainers: natefoo
+ $modules/cargo.py:
+ maintainers: radek-sprta
+ $modules/catapult.py:
+ maintainers: Jmainguy
+ $modules/circonus_annotation.py:
+ maintainers: NickatEpic
+ $modules/cisco_webex.py:
+ maintainers: drew-russell
+ $modules/clc_:
+ maintainers: clc-runner
+ $modules/cloud_init_data_facts.py:
+ maintainers: resmo
+ $modules/cloudflare_dns.py:
+ labels: cloudflare_dns
+ maintainers: mgruener
+ $modules/cobbler_:
+ maintainers: dagwieers
+ $modules/composer.py:
+ ignore: resmo
+ maintainers: dmtrs
+ $modules/consul:
+ ignore: colin-nolan
+ maintainers: $team_consul
+ $modules/copr.py:
+ maintainers: schlupov
+ $modules/cpanm.py:
+ maintainers: fcuny russoz
+ $modules/cronvar.py:
+ maintainers: dougluce
+ $modules/crypttab.py:
+ maintainers: groks
+ $modules/datadog_downtime.py:
+ maintainers: Datadog
+ $modules/datadog_event.py:
+ ignore: arturaz
+ labels: datadog_event
+ maintainers: n0ts
+ $modules/datadog_monitor.py:
+ ignore: skornehl
+ $modules/dconf.py:
+ maintainers: azaghal
+ $modules/deploy_helper.py:
+ maintainers: ramondelafuente
+ $modules/dimensiondata_network.py:
+ labels: dimensiondata_network
+ maintainers: aimonb tintoy
+ $modules/dimensiondata_vlan.py:
+ maintainers: tintoy
+ $modules/discord.py:
+ maintainers: cwollinger
+ $modules/django_manage.py:
+ ignore: scottanderson42 tastychutney
+ labels: django_manage
+ maintainers: russoz
+ $modules/dnf_versionlock.py:
+ maintainers: moreda
+ $modules/dnsimple.py:
+ maintainers: drcapulet
+ $modules/dnsimple_info.py:
+ maintainers: edhilgendorf
+ $modules/dnsmadeeasy.py:
+ maintainers: briceburg
+ $modules/dpkg_divert.py:
+ maintainers: quidame
+ $modules/easy_install.py:
+ maintainers: mattupstate
+ $modules/ejabberd_user.py:
+ maintainers: privateip
+ $modules/elasticsearch_plugin.py:
+ maintainers: ThePixelDeveloper samdoran
+ $modules/emc_vnx_sg_member.py:
+ maintainers: remixtj
+ $modules/etcd3.py:
+ ignore: vfauth
+ maintainers: evrardjp
+ $modules/facter.py:
+ labels: facter
+ maintainers: $team_ansible_core gamethis
+ $modules/filesize.py:
+ maintainers: quidame
+ $modules/filesystem.py:
+ labels: filesystem
+ maintainers: pilou- abulimov quidame
+ $modules/flatpak.py:
+ maintainers: $team_flatpak
+ $modules/flatpak_remote.py:
+ maintainers: $team_flatpak
+ $modules/flowdock.py:
+ ignore: mcodd
+ $modules/gandi_livedns.py:
+ maintainers: gthiemonge
+ $modules/gconftool2.py:
+ labels: gconftool2
+ maintainers: Akasurde kevensen
+ $modules/gconftool2_info.py:
+ labels: gconftool2
+ maintainers: russoz
+ $modules/gem.py:
+ labels: gem
+ maintainers: $team_ansible_core johanwiren
+ $modules/git_config.py:
+ maintainers: djmattyg007 mgedmin
+ $modules/github_:
+ maintainers: stpierre
+ $modules/github_deploy_key.py:
+ maintainers: bincyber
+ $modules/github_issue.py:
+ maintainers: Akasurde
+ $modules/github_key.py:
+ ignore: erydo
+ labels: github_key
+ maintainers: erydo
+ $modules/github_release.py:
+ maintainers: adrianmoisey
+ $modules/github_repo.py:
+ maintainers: atorrescogollo
+ $modules/gitlab_:
+ keywords: gitlab source_control
+ maintainers: $team_gitlab
+ notify: jlozadad
+ $modules/gitlab_branch.py:
+ maintainers: paytroff
+ $modules/gitlab_project_variable.py:
+ maintainers: markuman
+ $modules/gitlab_runner.py:
+ maintainers: SamyCoenen
+ $modules/gitlab_user.py:
+ maintainers: LennertMertens stgrace
+ $modules/grove.py:
+ maintainers: zimbatm
+ $modules/gunicorn.py:
+ maintainers: agmezr
+ $modules/hana_query.py:
+ maintainers: rainerleber
+ $modules/haproxy.py:
+ maintainers: ravibhure Normo
+ $modules/heroku_collaborator.py:
+ maintainers: marns93
+ $modules/hg.py:
+ maintainers: yeukhon
+ $modules/hipchat.py:
+ maintainers: pb8226 shirou
+ $modules/homebrew.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: homebrew macos
+ maintainers: $team_macos andrew-d
+ notify: chris-short
+ $modules/homebrew_cask.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: homebrew_ macos
+ maintainers: $team_macos enriclluelles
+ notify: chris-short
+ $modules/homebrew_tap.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: homebrew_ macos
+ maintainers: $team_macos
+ notify: chris-short
+ $modules/homectl.py:
+ maintainers: jameslivulpi
+ $modules/honeybadger_deployment.py:
+ maintainers: stympy
+ $modules/hpilo_:
+ ignore: dagwieers
+ maintainers: haad
+ $modules/hponcfg.py:
+ ignore: dagwieers
+ maintainers: haad
+ $modules/htpasswd.py:
+ labels: htpasswd
+ maintainers: $team_ansible_core
+ $modules/hwc_:
+ keywords: cloud huawei hwc
+ maintainers: $team_huawei huaweicloud
+ $modules/ibm_sa_:
+ maintainers: tzure
+ $modules/icinga2_feature.py:
+ maintainers: nerzhul
+ $modules/icinga2_host.py:
+ maintainers: t794104
+ $modules/idrac_:
+ ignore: jose-delarosa
+ maintainers: $team_redfish
+ $modules/ilo_:
+ ignore: jose-delarosa varini-hp
+ maintainers: $team_redfish
+ $modules/imc_rest.py:
+ labels: cisco
+ maintainers: dagwieers
+ $modules/imgadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/infinity.py:
+ maintainers: MeganLiu
+ $modules/influxdb_:
+ maintainers: kamsz
+ $modules/influxdb_query.py:
+ maintainers: resmo
+ $modules/influxdb_user.py:
+ maintainers: zhhuta
+ $modules/influxdb_write.py:
+ maintainers: resmo
+ $modules/ini_file.py:
+ maintainers: jpmens noseka1
+ $modules/installp.py:
+ keywords: aix efix lpar wpar
+ labels: aix installp
+ maintainers: $team_aix kairoaraujo
+ $modules/interfaces_file.py:
+ labels: interfaces_file
+ maintainers: obourdon hryamzik
+ $modules/ip_netns.py:
+ maintainers: bregman-arie
+ $modules/ipa_:
+ maintainers: $team_ipa
+ $modules/ipa_pwpolicy.py:
+ maintainers: adralioh
+ $modules/ipa_service.py:
+ maintainers: cprh
+ $modules/ipa_vault.py:
+ maintainers: jparrill
+ $modules/ipify_facts.py:
+ maintainers: resmo
+ $modules/ipinfoio_facts.py:
+ maintainers: akostyuk
+ $modules/ipmi_:
+ maintainers: bgaifullin cloudnull
+ $modules/iptables_state.py:
+ maintainers: quidame
+ $modules/ipwcli_dns.py:
+ maintainers: cwollinger
+ $modules/irc.py:
+ maintainers: jpmens sivel
+ $modules/iso_create.py:
+ maintainers: Tomorrow9
+ $modules/iso_customize.py:
+ maintainers: ZouYuhua
+ $modules/iso_extract.py:
+ maintainers: dagwieers jhoekx ribbons
+ $modules/jabber.py:
+ maintainers: bcoca
+ $modules/java_cert.py:
+ maintainers: haad absynth76
+ $modules/java_keystore.py:
+ maintainers: Mogztter quidame
+ $modules/jboss.py:
+ labels: jboss
+ maintainers: $team_jboss jhoekx
+ $modules/jenkins_build.py:
+ maintainers: brettmilford unnecessary-username
+ $modules/jenkins_job.py:
+ maintainers: sermilrod
+ $modules/jenkins_job_info.py:
+ maintainers: stpierre
+ $modules/jenkins_plugin.py:
+ maintainers: jtyr
+ $modules/jenkins_script.py:
+ maintainers: hogarthj
+ $modules/jira.py:
+ ignore: DWSR
+ labels: jira
+ maintainers: Slezhuk tarka pertoft
+ $modules/kdeconfig.py:
+ maintainers: smeso
+ $modules/kernel_blacklist.py:
+ maintainers: matze
+ $modules/keycloak_:
+ maintainers: $team_keycloak
+ $modules/keycloak_authentication.py:
+ maintainers: elfelip Gaetan2907
+ $modules/keycloak_authz_authorization_scope.py:
+ maintainers: mattock
+ $modules/keycloak_client_rolemapping.py:
+ maintainers: Gaetan2907
+ $modules/keycloak_clientscope.py:
+ maintainers: Gaetan2907
+ $modules/keycloak_clientscope_type.py:
+ maintainers: simonpahl
+ $modules/keycloak_clientsecret_info.py:
+ maintainers: fynncfchen johncant
+ $modules/keycloak_clientsecret_regenerate.py:
+ maintainers: fynncfchen johncant
+ $modules/keycloak_group.py:
+ maintainers: adamgoossens
+ $modules/keycloak_identity_provider.py:
+ maintainers: laurpaum
+ $modules/keycloak_realm.py:
+ maintainers: kris2kris
+ $modules/keycloak_realm_info.py:
+ maintainers: fynncfchen
+ $modules/keycloak_role.py:
+ maintainers: laurpaum
+ $modules/keycloak_user_federation.py:
+ maintainers: laurpaum
+ $modules/keycloak_user_rolemapping.py:
+ maintainers: bratwurzt
+ $modules/keyring.py:
+ maintainers: ahussey-redhat
+ $modules/keyring_info.py:
+ maintainers: ahussey-redhat
+ $modules/kibana_plugin.py:
+ maintainers: barryib
+ $modules/launchd.py:
+ maintainers: martinm82
+ $modules/layman.py:
+ maintainers: jirutka
+ $modules/lbu.py:
+ maintainers: kunkku
+ $modules/ldap_attrs.py:
+ maintainers: drybjed jtyr noles
+ $modules/ldap_entry.py:
+ maintainers: jtyr
+ $modules/ldap_passwd.py:
+ maintainers: KellerFuchs jtyr
+ $modules/ldap_search.py:
+ maintainers: eryx12o45 jtyr
+ $modules/librato_annotation.py:
+ maintainers: Sedward
+ $modules/linode:
+ maintainers: $team_linode
+ $modules/linode.py:
+ maintainers: zbal
+ $modules/listen_ports_facts.py:
+ maintainers: ndavison
+ $modules/lldp.py:
+ ignore: andyhky
+ labels: lldp
+ $modules/locale_gen.py:
+ maintainers: AugustusKling
+ $modules/logentries.py:
+ ignore: ivanvanderbyl
+ labels: logentries
+ $modules/logentries_msg.py:
+ maintainers: jcftang
+ $modules/logstash_plugin.py:
+ maintainers: nerzhul
+ $modules/lvg.py:
+ maintainers: abulimov
+ $modules/lvol.py:
+ maintainers: abulimov jhoekx zigaSRC unkaputtbar112
+ $modules/lxc_container.py:
+ maintainers: cloudnull
+ $modules/lxca_:
+ maintainers: navalkp prabhosa
+ $modules/lxd_:
+ ignore: hnakamur
+ $modules/lxd_profile.py:
+ maintainers: conloos
+ $modules/lxd_project.py:
+ maintainers: we10710aa
+ $modules/macports.py:
+ ignore: ryansb
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: macos macports
+ maintainers: $team_macos jcftang
+ notify: chris-short
+ $modules/mail.py:
+ maintainers: dagwieers
+ $modules/make.py:
+ maintainers: LinusU
+ $modules/manageiq_:
+ labels: manageiq
+ maintainers: $team_manageiq
+ $modules/manageiq_alert_profiles.py:
+ maintainers: elad661
+ $modules/manageiq_alerts.py:
+ maintainers: elad661
+ $modules/manageiq_group.py:
+ maintainers: evertmulder
+ $modules/manageiq_policies_info.py:
+ maintainers: russoz $team_manageiq
+ $modules/manageiq_tags_info.py:
+ maintainers: russoz $team_manageiq
+ $modules/manageiq_tenant.py:
+ maintainers: evertmulder
+ $modules/mas.py:
+ maintainers: lukasbestle mheap
+ $modules/matrix.py:
+ maintainers: jcgruenhage
+ $modules/mattermost.py:
+ maintainers: bjolivot
+ $modules/maven_artifact.py:
+ ignore: chrisisbeef
+ labels: maven_artifact
+ maintainers: tumbl3w33d turb
+ $modules/memset_:
+ ignore: glitchcrab
+ $modules/mksysb.py:
+ labels: aix mksysb
+ maintainers: $team_aix
+ $modules/modprobe.py:
+ ignore: stygstra
+ labels: modprobe
+ maintainers: jdauphant mattjeffery
+ $modules/monit.py:
+ labels: monit
+ maintainers: dstoflet brian-brazil snopoke
+ $modules/mqtt.py:
+ maintainers: jpmens
+ $modules/mssql_db.py:
+ labels: mssql_db
+ maintainers: vedit Jmainguy kenichi-ogawa-1988
+ $modules/mssql_script.py:
+ labels: mssql_script
+ maintainers: kbudde
+ $modules/nagios.py:
+ maintainers: tbielawa tgoetheyn
+ $modules/netcup_dns.py:
+ maintainers: nbuchwitz
+ $modules/newrelic_deployment.py:
+ ignore: mcodd
+ $modules/nexmo.py:
+ maintainers: sivel
+ $modules/nginx_status_info.py:
+ maintainers: resmo
+ $modules/nictagadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris SmithX10
+ $modules/nmcli.py:
+ maintainers: alcamie101
+ $modules/nomad_:
+ maintainers: chris93111
+ $modules/nosh.py:
+ maintainers: tacatac
+ $modules/npm.py:
+ ignore: chrishoffman
+ labels: npm
+ maintainers: shane-walker xcambar
+ $modules/nsupdate.py:
+ maintainers: nerzhul
+ $modules/ocapi_command.py:
+ maintainers: $team_wdc
+ $modules/ocapi_info.py:
+ maintainers: $team_wdc
+ $modules/oci_vcn.py:
+ maintainers: $team_oracle rohitChaware
+ $modules/odbc.py:
+ maintainers: john-westcott-iv
+ $modules/office_365_connector_card.py:
+ maintainers: marc-sensenich
+ $modules/ohai.py:
+ labels: ohai
+ maintainers: $team_ansible_core
+ ignore: mpdehaan
+ $modules/omapi_host.py:
+ maintainers: amasolov nerzhul
+ $modules/one_:
+ maintainers: $team_opennebula
+ $modules/one_host.py:
+ maintainers: rvalle
+ $modules/oneandone_:
+ maintainers: aajdinov edevenport
+ $modules/onepassword_info.py:
+ maintainers: Rylon
+ $modules/oneview_:
+ maintainers: adriane-cardozo fgbulsoni tmiotto
+ $modules/oneview_datacenter_info.py:
+ maintainers: aalexmonteiro madhav-bharadwaj ricardogpsf soodpr
+ $modules/oneview_fc_network.py:
+ maintainers: fgbulsoni
+ $modules/oneview_fcoe_network.py:
+ maintainers: fgbulsoni
+ $modules/online_:
+ maintainers: remyleone
+ $modules/open_iscsi.py:
+ maintainers: srvg
+ $modules/openbsd_pkg.py:
+ ignore: ryansb
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ labels: bsd openbsd_pkg
+ maintainers: $team_bsd eest
+ $modules/opendj_backendprop.py:
+ maintainers: dj-wasabi
+ $modules/openwrt_init.py:
+ maintainers: agaffney
+ $modules/opkg.py:
+ maintainers: skinp
+ $modules/osx_defaults.py:
+ keywords: brew cask darwin homebrew macosx macports osx
+ labels: macos osx_defaults
+ maintainers: $team_macos notok
+ notify: chris-short
+ $modules/ovh_:
+ maintainers: pascalheraud
+ $modules/ovh_monthly_billing.py:
+ maintainers: fraff
+ $modules/pacemaker_cluster.py:
+ maintainers: matbu
+ $modules/packet_:
+ maintainers: nurfet-becirevic t0mk
+ $modules/packet_device.py:
+ maintainers: baldwinSPC t0mk teebes
+ $modules/packet_sshkey.py:
+ maintainers: t0mk
+ $modules/pacman.py:
+ ignore: elasticdog
+ labels: pacman
+ maintainers: elasticdog indrajitr tchernomax jraby
+ $modules/pacman_key.py:
+ labels: pacman
+ maintainers: grawlinson
+ $modules/pagerduty.py:
+ ignore: bpennypacker
+ labels: pagerduty
+ maintainers: suprememoocow thaumos
+ $modules/pagerduty_alert.py:
+ maintainers: ApsOps
+ $modules/pagerduty_change.py:
+ maintainers: adamvaughan
+ $modules/pagerduty_user.py:
+ maintainers: zanssa
+ $modules/pam_limits.py:
+ ignore: usawa
+ labels: pam_limits
+ maintainers: giovannisciortino
+ $modules/pamd.py:
+ maintainers: kevensen
+ $modules/parted.py:
+ maintainers: ColOfAbRiX jake2184
+ $modules/pear.py:
+ ignore: jle64
+ labels: pear
+ $modules/pids.py:
+ maintainers: saranyasridharan
+ $modules/pingdom.py:
+ maintainers: thaumos
+ $modules/pip_package_info.py:
+ maintainers: bcoca matburt maxamillion
+ $modules/pipx.py:
+ maintainers: russoz
+ $modules/pipx_info.py:
+ maintainers: russoz
+ $modules/pkg5:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: pkg5 solaris
+ maintainers: $team_solaris mavit
+ $modules/pkgin.py:
+ labels: pkgin solaris
+ maintainers: $team_solaris L2G jasperla szinck martinm82
+ $modules/pkgng.py:
+ ignore: bleader
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ labels: bsd pkgng
+ maintainers: $team_bsd bleader
+ $modules/pkgutil.py:
+ labels: pkgutil solaris
+ maintainers: $team_solaris dermute
+ $modules/pmem.py:
+ maintainers: mizumm
+ $modules/portage.py:
+ ignore: sayap
+ labels: portage
+ maintainers: Tatsh wltjr
+ $modules/portinstall.py:
+ ignore: ryansb
+ keywords: doas dragonfly freebsd iocage jail netbsd openbsd opnsense pfsense
+ labels: bsd portinstall
+ maintainers: $team_bsd berenddeboer
+ $modules/pritunl_:
+ maintainers: Lowess
+ $modules/profitbricks:
+ maintainers: baldwinSPC
+ $modules/proxmox:
+ keywords: kvm libvirt proxmox qemu
+ labels: proxmox virt
+ maintainers: $team_virt
+ $modules/proxmox.py:
+ ignore: skvidal
+ maintainers: UnderGreen
+ $modules/proxmox_disk.py:
+ maintainers: castorsky
+ $modules/proxmox_kvm.py:
+ ignore: skvidal
+ maintainers: helldorado
+ $modules/proxmox_nic.py:
+ maintainers: Kogelvis
+ $modules/proxmox_tasks_info:
+ maintainers: paginabianca
+ $modules/proxmox_template.py:
+ ignore: skvidal
+ maintainers: UnderGreen
+ $modules/pubnub_blocks.py:
+ maintainers: parfeon pubnub
+ $modules/pulp_repo.py:
+ maintainers: sysadmind
+ $modules/puppet.py:
+ labels: puppet
+ maintainers: emonty
+ $modules/pushbullet.py:
+ maintainers: willybarro
+ $modules/pushover.py:
+ maintainers: weaselkeeper wopfel
+ $modules/python_requirements_info.py:
+ ignore: ryansb
+ maintainers: willthames
+ $modules/rax:
+ ignore: ryansb sivel
+ $modules/rax.py:
+ maintainers: omgjlk sivel
+ $modules/rax_cbs.py:
+ maintainers: claco
+ $modules/rax_cbs_attachments.py:
+ maintainers: claco
+ $modules/rax_cdb.py:
+ maintainers: jails
+ $modules/rax_cdb_database.py:
+ maintainers: jails
+ $modules/rax_cdb_user.py:
+ maintainers: jails
+ $modules/rax_clb.py:
+ maintainers: claco
+ $modules/rax_clb_nodes.py:
+ maintainers: neuroid
+ $modules/rax_clb_ssl.py:
+ maintainers: smashwilson
+ $modules/rax_files.py:
+ maintainers: angstwad
+ $modules/rax_files_objects.py:
+ maintainers: angstwad
+ $modules/rax_identity.py:
+ maintainers: claco
+ $modules/rax_mon_alarm.py:
+ maintainers: smashwilson
+ $modules/rax_mon_check.py:
+ maintainers: smashwilson
+ $modules/rax_mon_entity.py:
+ maintainers: smashwilson
+ $modules/rax_mon_notification.py:
+ maintainers: smashwilson
+ $modules/rax_mon_notification_plan.py:
+ maintainers: smashwilson
+ $modules/rax_network.py:
+ maintainers: claco omgjlk
+ $modules/rax_queue.py:
+ maintainers: claco
+ $modules/read_csv.py:
+ maintainers: dagwieers
+ $modules/redfish_:
+ ignore: jose-delarosa
+ maintainers: $team_redfish TSKushal
+ $modules/redhat_subscription.py:
+ labels: redhat_subscription
+ maintainers: $team_rhsm
+ ignore: barnabycourt alikins kahowell
+ $modules/redis.py:
+ maintainers: slok
+ $modules/redis_data.py:
+ maintainers: paginabianca
+ $modules/redis_data_incr.py:
+ maintainers: paginabianca
+ $modules/redis_data_info.py:
+ maintainers: paginabianca
+ $modules/redis_info.py:
+ maintainers: levonet
+ $modules/rhevm.py:
+ ignore: skvidal
+ keywords: kvm libvirt proxmox qemu
+ labels: rhevm virt
+ maintainers: $team_virt TimothyVandenbrande
+ $modules/rhn_channel.py:
+ labels: rhn_channel
+ maintainers: vincentvdk alikins $team_rhn
+ $modules/rhn_register.py:
+ labels: rhn_register
+ maintainers: jlaska $team_rhn
+ $modules/rhsm_release.py:
+ maintainers: seandst $team_rhsm
+ $modules/rhsm_repository.py:
+ maintainers: giovannisciortino $team_rhsm
+ $modules/riak.py:
+ maintainers: drewkerrigan jsmartin
+ $modules/rocketchat.py:
+ ignore: ramondelafuente
+ labels: rocketchat
+ maintainers: Deepakkothandan
+ $modules/rollbar_deployment.py:
+ maintainers: kavu
+ $modules/rpm_ostree_pkg.py:
+ maintainers: dustymabe Akasurde
+ $modules/rundeck_acl_policy.py:
+ maintainers: nerzhul
+ $modules/rundeck_job_executions_info.py:
+ maintainers: phsmith
+ $modules/rundeck_job_run.py:
+ maintainers: phsmith
+ $modules/rundeck_project.py:
+ maintainers: nerzhul
+ $modules/runit.py:
+ maintainers: jsumners
+ $modules/sap_task_list_execute:
+ maintainers: rainerleber
+ $modules/sapcar_extract.py:
+ maintainers: RainerLeber
+ $modules/say.py:
+ maintainers: $team_ansible_core
+ ignore: mpdehaan
+ $modules/scaleway_:
+ maintainers: $team_scaleway
+ $modules/scaleway_compute_private_network.py:
+ maintainers: pastral
+ $modules/scaleway_container.py:
+ maintainers: Lunik
+ $modules/scaleway_container_info.py:
+ maintainers: Lunik
+ $modules/scaleway_container_namespace.py:
+ maintainers: Lunik
+ $modules/scaleway_container_namespace_info.py:
+ maintainers: Lunik
+ $modules/scaleway_container_registry.py:
+ maintainers: Lunik
+ $modules/scaleway_container_registry_info.py:
+ maintainers: Lunik
+ $modules/scaleway_database_backup.py:
+ maintainers: guillaume_ro_fr
+ $modules/scaleway_function.py:
+ maintainers: Lunik
+ $modules/scaleway_function_info.py:
+ maintainers: Lunik
+ $modules/scaleway_function_namespace.py:
+ maintainers: Lunik
+ $modules/scaleway_function_namespace_info.py:
+ maintainers: Lunik
+ $modules/scaleway_image_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_ip_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_organization_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_private_network.py:
+ maintainers: pastral
+ $modules/scaleway_security_group.py:
+ maintainers: DenBeke
+ $modules/scaleway_security_group_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_security_group_rule.py:
+ maintainers: DenBeke
+ $modules/scaleway_server_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_snapshot_info.py:
+ maintainers: Spredzy
+ $modules/scaleway_volume.py:
+ ignore: hekonsek
+ labels: scaleway_volume
+ $modules/scaleway_volume_info.py:
+ maintainers: Spredzy
+ $modules/sefcontext.py:
+ maintainers: dagwieers
+ $modules/selinux_permissive.py:
+ maintainers: mscherer
+ $modules/selogin.py:
+ maintainers: bachradsusi dankeder jamescassell
+ $modules/sendgrid.py:
+ maintainers: makaimc
+ $modules/sensu_:
+ maintainers: dmsimard
+ $modules/sensu_check.py:
+ maintainers: andsens
+ $modules/sensu_silence.py:
+ maintainers: smbambling
+ $modules/sensu_subscription.py:
+ maintainers: andsens
+ $modules/seport.py:
+ maintainers: dankeder
+ $modules/serverless.py:
+ ignore: ryansb
+ $modules/shutdown.py:
+ maintainers: nitzmahone samdoran aminvakil
+ $modules/sl_vm.py:
+ maintainers: mcltn
+ $modules/slack.py:
+ maintainers: ramondelafuente
+ $modules/slackpkg.py:
+ maintainers: KimNorgaard
+ $modules/smartos_image_info.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/snap.py:
+ labels: snap
+ maintainers: angristan vcarceler
+ $modules/snap_alias.py:
+ labels: snap
+ maintainers: russoz
+ $modules/snmp_facts.py:
+ maintainers: ogenstad ujwalkomarla
+ $modules/solaris_zone.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris pmarkham
+ $modules/sorcery.py:
+ maintainers: vaygr
+ $modules/spectrum_device.py:
+ maintainers: orgito
+ $modules/spectrum_model_attrs.py:
+ maintainers: tgates81
+ $modules/spotinst_aws_elastigroup.py:
+ maintainers: talzur
+ $modules/ss_3par_cpg.py:
+ maintainers: farhan7500 gautamphegde
+ $modules/ssh_config.py:
+ maintainers: gaqzi Akasurde
+ $modules/stackdriver.py:
+ maintainers: bwhaley
+ $modules/stacki_host.py:
+ labels: stacki_host
+ maintainers: bsanders bbyhuy
+ $modules/statsd.py:
+ maintainers: mamercad
+ $modules/statusio_maintenance.py:
+ maintainers: bhcopeland
+ $modules/sudoers.py:
+ maintainers: JonEllis
+ $modules/supervisorctl.py:
+ maintainers: inetfuture mattupstate
+ $modules/svc.py:
+ maintainers: bcoca
+ $modules/svr4pkg.py:
+ labels: solaris svr4pkg
+ maintainers: $team_solaris brontitall
+ $modules/swdepot.py:
+ keywords: hp-ux
+ labels: hpux swdepot
+ maintainers: $team_hpux melodous
+ $modules/swupd.py:
+ labels: swupd
+ maintainers: hnanni albertomurillo
+ $modules/syslogger.py:
+ maintainers: garbled1
+ $modules/syspatch.py:
+ maintainers: precurse
+ $modules/sysrc.py:
+ maintainers: dlundgren
+ $modules/sysupgrade.py:
+ maintainers: precurse
+ $modules/taiga_issue.py:
+ maintainers: lekum
+ $modules/telegram.py:
+ maintainers: tyouxa loms lomserman
+ $modules/terraform.py:
+ ignore: ryansb
+ maintainers: m-yosefpor rainerleber
+ $modules/timezone.py:
+ maintainers: indrajitr jasperla tmshn
+ $modules/twilio.py:
+ maintainers: makaimc
+ $modules/typetalk.py:
+ maintainers: tksmd
+ $modules/udm_:
+ maintainers: keachi
+ $modules/ufw.py:
+ labels: ufw
+ maintainers: ahtik ovcharenko pyykkis
+ notify: felixfontein
+ $modules/uptimerobot.py:
+ maintainers: nate-kingsley
+ $modules/urpmi.py:
+ maintainers: pmakowski
+ $modules/utm_:
+ keywords: sophos utm
+ maintainers: $team_e_spirit
+ $modules/utm_ca_host_key_cert.py:
+ maintainers: stearz
+ $modules/utm_ca_host_key_cert_info.py:
+ maintainers: stearz
+ $modules/utm_network_interface_address.py:
+ maintainers: steamx
+ $modules/utm_network_interface_address_info.py:
+ maintainers: steamx
+ $modules/utm_proxy_auth_profile.py:
+ keywords: sophos utm
+ maintainers: $team_e_spirit stearz
+ $modules/utm_proxy_exception.py:
+ keywords: sophos utm
+ maintainers: $team_e_spirit RickS-C137
+ $modules/vdo.py:
+ maintainers: rhawalsh bgurney-rh
+ $modules/vertica_:
+ maintainers: dareko
+ $modules/vexata_:
+ maintainers: vexata
+ $modules/vmadm.py:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/wakeonlan.py:
+ maintainers: dagwieers
+ $modules/wdc_:
+ ignore: jose-delarosa
+ maintainers: $team_redfish
+ $modules/wdc_redfish_command.py:
+ maintainers: $team_wdc
+ $modules/wdc_redfish_info.py:
+ maintainers: $team_wdc
+ $modules/webfaction_:
+ maintainers: quentinsf
+ $modules/xattr.py:
+ labels: xattr
+ maintainers: bcoca
+ $modules/xbps.py:
+ maintainers: dinoocch the-maldridge
+ $modules/xcc_:
+ maintainers: panyy3 renxulei
+ $modules/xenserver_:
+ maintainers: bvitnik
+ $modules/xenserver_facts.py:
+ ignore: andyhky ryansb
+ labels: xenserver_facts
+ maintainers: caphrim007 cheese
+ $modules/xfconf.py:
+ labels: xfconf
+ maintainers: russoz jbenden
+ $modules/xfconf_info.py:
+ labels: xfconf
+ maintainers: russoz
+ $modules/xfs_quota.py:
+ maintainers: bushvin
+ $modules/xml.py:
+ ignore: magnus919
+ labels: m:xml xml
+ maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0
+ $modules/yarn.py:
+ ignore: chrishoffman verkaufer
+ $modules/yum_versionlock.py:
+ maintainers: gyptazy aminvakil
+ $modules/zfs:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/zfs.py:
+ maintainers: johanwiren
+ $modules/zfs_delegate_admin.py:
+ maintainers: natefoo
+ $modules/znode.py:
+ maintainers: treyperry
+ $modules/zpool_facts:
+ keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool
+ labels: solaris
+ maintainers: $team_solaris
+ $modules/zypper.py:
+ ignore: dirtyharrycallahan robinro
+ labels: zypper
+ maintainers: $team_suse
+ $modules/zypper_repository.py:
+ ignore: matze
+ labels: zypper
+ maintainers: $team_suse
+ $tests/a_module.py:
+ maintainers: felixfontein
+#########################
+ tests/:
+ labels: tests
+ tests/integration:
+ labels: integration
+ support: community
+ tests/unit/:
+ labels: unit
+ support: community
+ tests/utils/:
+ labels: unit
+ maintainers: gundalow
+macros:
+ actions: plugins/action
+ becomes: plugins/become
+ caches: plugins/cache
+ callbacks: plugins/callback
+ cliconfs: plugins/cliconf
+ connections: plugins/connection
+ doc_fragments: plugins/doc_fragments
+ filters: plugins/filter
+ inventories: plugins/inventory
+ lookups: plugins/lookup
+ module_utils: plugins/module_utils
+ modules: plugins/modules
+ terminals: plugins/terminal
+ tests: plugins/test
+ team_ansible_core:
+ team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross
+ team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo
+ team_consul: sgargan
+ team_cyberark_conjur: jvanderhoof ryanprior
+ team_e_spirit: MatrixCrawler getjack
+ team_flatpak: JayKayy oolongbrothers
+ team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit
+ team_hpux: bcoca davx8342
+ team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2
+ team_ipa: Akasurde Nosmoht fxfitz justchris1
+ team_jboss: Wolfant jairojunior wbrefvem
+ team_keycloak: eikef ndclt mattock
+ team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber
+ team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr
+ team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder
+ team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip
+ team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding
+ team_oracle: manojmeda mross22 nalsaber
+ team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16
+ team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt
+ team_rhn: FlossWare alikins barnabycourt vritant
+ team_rhsm: cnsnyder ptoscano
+ team_scaleway: remyleone abarbare
+ team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l
+ team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor
+ team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso
+ team_wdc: mikemoerk
diff --git a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..bd5030f2c
--- /dev/null
+++ b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,153 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Bug report
+description: Create a report to help us improve
+
+body:
+- type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
+
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: Explain the problem briefly below.
+ placeholder: >-
+ When I try to do X with the collection from the main branch on GitHub, Y
+ breaks in a way Z under the env E. Here are all the details I know
+ about this problem...
+ validations:
+ required: true
+
+- type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Bug Report
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ # For smaller collections we could use a multi-select and hardcode the list
+ # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins
+ # Select from list, filter as you type (`mysql` would only show the 3 mysql components)
+ # OR freeform - doesn't seem to be supported in adaptivecards
+ label: Component Name
+ description: >-
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*.
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ If this issue has an example piece of YAML that can help to reproduce this problem, please provide it.
+ This can be a piece of YAML from, e.g., an automation, script, scene or configuration.
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+
+
+- type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. target OS versions,
+ network device firmware, etc.
+ placeholder: RHEL 8, CentOS Stream etc.
+ validations:
+ required: false
+
+
+- type: textarea
+ attributes:
+ label: Steps to Reproduce
+ description: |
+ Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+ <!--- Paste example playbooks or commands between quotes below -->
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Expected Results
+ description: >-
+ Describe what you expected to happen when running the steps above.
+ placeholder: >-
+ I expected X to happen because I assumed Y.
+ that it did not.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Actual Results
+ description: |
+ Describe what actually happened. If possible run with extra verbosity (`-vvvv`).
+
+ Paste verbatim command output between quotes.
+ value: |
+ ```console (paste below)
+
+ ```
+- type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
+...
diff --git a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..0cc2db058
--- /dev/null
+++ b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser
+blank_issues_enabled: false # default: true
+contact_links:
+- name: Security bug report
+ url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: |
+ Please learn how to report security vulnerabilities here.
+
+ For all security related bugs, email security@ansible.com
+ instead of using this issue tracker and you will receive
+ a prompt response.
+
+ For more information, see
+ https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html
+- name: Ansible Code of Conduct
+ url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Be nice to other members of the community.
+- name: Talks to the community
+ url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information
+ about: Please ask and answer usage questions here
+- name: Working groups
+ url: https://github.com/ansible/community/wiki
+ about: Interested in improving a specific area? Become a part of a working group!
+- name: For Enterprise
+ url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections
+ about: Red Hat offers support for the Ansible Automation Platform
diff --git a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/documentation_report.yml b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/documentation_report.yml
new file mode 100644
index 000000000..3a2777f20
--- /dev/null
+++ b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/documentation_report.yml
@@ -0,0 +1,129 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Documentation Report
+description: Ask us about docs
+# NOTE: issue body is enabled to allow screenshots
+
+body:
+- type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
+
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: |
+ Explain the problem briefly below, add suggestions to wording or structure.
+
+ **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page?
+ placeholder: >-
+ I was reading the Collection documentation of version X and I'm having
+ problems understanding Y. It would be very helpful if that got
+ rephrased as Z.
+ validations:
+ required: true
+
+- type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Documentation Report
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the rst file, module, plugin, task or
+ feature below, *use your best guess if unsure*.
+ placeholder: mysql_user
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Ansible Version
+ description: >-
+ Paste verbatim output from `ansible --version` between
+ tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible --version
+
+ ```
+ validations:
+ required: false
+
+- type: textarea
+ attributes:
+ label: Community.general Version
+ description: >-
+ Paste verbatim output from "ansible-galaxy collection list community.general"
+ between tripple backticks.
+ value: |
+ ```console (paste below)
+ $ ansible-galaxy collection list community.general
+
+ ```
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Configuration
+ description: >-
+ Paste verbatim output from `ansible-config dump --only-changed` between quotes.
+ value: |
+ ```console (paste below)
+ $ ansible-config dump --only-changed
+
+ ```
+ validations:
+ required: false
+
+- type: textarea
+ attributes:
+ label: OS / Environment
+ description: >-
+ Provide all relevant information below, e.g. OS version,
+ browser, etc.
+ placeholder: Fedora 33, Firefox etc.
+ validations:
+ required: false
+
+- type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how this improves the documentation, e.g. before/after situation or screenshots.
+
+ **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ placeholder: >-
+ When the improvement is applied, it makes it more straightforward
+ to understand X.
+ validations:
+ required: false
+
+- type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
+...
diff --git a/ansible_collections/community/general/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..9630b67e1
--- /dev/null
+++ b/ansible_collections/community/general/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,73 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Feature request
+description: Suggest an idea for this project
+
+body:
+- type: markdown
+ attributes:
+ value: |
+ ⚠
+ Verify first that your issue is not [already reported on GitHub][issue search].
+ Also test if the latest release and devel branch are affected too.
+ *Complete **all** sections as described, this form is processed automatically.*
+
+ [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues
+
+
+- type: textarea
+ attributes:
+ label: Summary
+ description: Describe the new feature/improvement briefly below.
+ placeholder: >-
+ I am trying to do X with the collection from the main branch on GitHub and
+ I think that implementing a feature Y would be very helpful for me and
+ every other user of community.general because of Z.
+ validations:
+ required: true
+
+- type: dropdown
+ attributes:
+ label: Issue Type
+ # FIXME: Once GitHub allows defining the default choice, update this
+ options:
+ - Feature Idea
+ validations:
+ required: true
+
+- type: input
+ attributes:
+ label: Component Name
+ description: >-
+ Write the short name of the module, plugin, task or feature below,
+ *use your best guess if unsure*.
+ placeholder: dnf, apt, yum, pip, user etc.
+ validations:
+ required: true
+
+- type: textarea
+ attributes:
+ label: Additional Information
+ description: |
+ Describe how the feature would be used, why it is needed and what it would solve.
+
+ **HINT:** You can paste https://gist.github.com links for larger files.
+ value: |
+ <!--- Paste example playbooks or commands between quotes below -->
+ ```yaml (paste below)
+
+ ```
+ validations:
+ required: false
+- type: checkboxes
+ attributes:
+ label: Code of Conduct
+ description: |
+ Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first.
+ options:
+ - label: I agree to follow the Ansible Code of Conduct
+ required: true
+...
diff --git a/ansible_collections/community/general/.github/dependabot.yml b/ansible_collections/community/general/.github/dependabot.yml
new file mode 100644
index 000000000..2f4ff900d
--- /dev/null
+++ b/ansible_collections/community/general/.github/dependabot.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/ansible_collections/community/general/.github/patchback.yml b/ansible_collections/community/general/.github/patchback.yml
new file mode 100644
index 000000000..5ee7812ed
--- /dev/null
+++ b/ansible_collections/community/general/.github/patchback.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+backport_branch_prefix: patchback/backports/
+backport_label_prefix: backport-
+target_branch_prefix: stable-
+...
diff --git a/ansible_collections/community/general/.github/settings.yml b/ansible_collections/community/general/.github/settings.yml
new file mode 100644
index 000000000..3e8a5f9ad
--- /dev/null
+++ b/ansible_collections/community/general/.github/settings.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# DO NOT MODIFY
+
+# Settings: https://probot.github.io/apps/settings/
+# Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml
+
+_extends: ".github"
diff --git a/ansible_collections/community/general/.github/workflows/ansible-test.yml b/ansible_collections/community/general/.github/workflows/ansible-test.yml
new file mode 100644
index 000000000..8d5809cda
--- /dev/null
+++ b/ansible_collections/community/general/.github/workflows/ansible-test.yml
@@ -0,0 +1,240 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see
+# https://github.com/marketplace/actions/ansible-test
+
+name: EOL CI
+on:
+ # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ branches:
+ - main
+ - stable-*
+ pull_request:
+ # Run EOL CI once per day (at 10:00 UTC)
+ schedule:
+ - cron: '0 10 * * *'
+
+concurrency:
+ # Make sure there is at most one active run per PR, but do not cancel any non-PR runs
+ group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ sanity:
+ name: EOL Sanity (Ⓐ${{ matrix.ansible }})
+ strategy:
+ matrix:
+ ansible:
+ - '2.11'
+ - '2.12'
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["2.9", "2.10", "2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ steps:
+ - name: Perform sanity testing
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
+ ansible-core-version: stable-${{ matrix.ansible }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ pull-request-change-detection: 'true'
+ testing-type: sanity
+
+ units:
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["2.9", "2.10", "2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }})
+ strategy:
+ # As soon as the first unit test fails, cancel the others to free up the CI queue
+ fail-fast: true
+ matrix:
+ ansible:
+ - ''
+ python:
+ - ''
+ exclude:
+ - ansible: ''
+ include:
+ - ansible: '2.11'
+ python: '2.7'
+ - ansible: '2.11'
+ python: '3.5'
+ - ansible: '2.12'
+ python: '2.6'
+ - ansible: '2.12'
+ python: '3.8'
+
+ steps:
+ - name: >-
+ Perform unit testing against
+ Ansible version ${{ matrix.ansible }}
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
+ ansible-core-version: stable-${{ matrix.ansible }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ pre-test-cmd: >-
+ mkdir -p ../../ansible
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
+ pull-request-change-detection: 'true'
+ target-python-version: ${{ matrix.python }}
+ testing-type: units
+
+ integration:
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["2.9", "2.10", "2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
+ strategy:
+ fail-fast: false
+ matrix:
+ ansible:
+ - ''
+ docker:
+ - ''
+ python:
+ - ''
+ target:
+ - ''
+ exclude:
+ - ansible: ''
+ include:
+ # 2.11
+ - ansible: '2.11'
+ docker: fedora32
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.11'
+ docker: fedora32
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.11'
+ docker: fedora32
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.11'
+ docker: fedora33
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.11'
+ docker: fedora33
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.11'
+ docker: fedora33
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.11'
+ docker: alpine3
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.11'
+ docker: alpine3
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.11'
+ docker: alpine3
+ python: ''
+ target: azp/posix/3/
+ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+ # - ansible: '2.11'
+ # docker: default
+ # python: '2.7'
+ # target: azp/generic/1/
+ # - ansible: '2.11'
+ # docker: default
+ # python: '3.5'
+ # target: azp/generic/1/
+ # 2.12
+ - ansible: '2.12'
+ docker: centos6
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.12'
+ docker: centos6
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.12'
+ docker: centos6
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.12'
+ docker: fedora34
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.12'
+ docker: fedora34
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.12'
+ docker: fedora34
+ python: ''
+ target: azp/posix/3/
+ - ansible: '2.12'
+ docker: ubuntu1804
+ python: ''
+ target: azp/posix/1/
+ - ansible: '2.12'
+ docker: ubuntu1804
+ python: ''
+ target: azp/posix/2/
+ - ansible: '2.12'
+ docker: ubuntu1804
+ python: ''
+ target: azp/posix/3/
+ # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled.
+ # - ansible: '2.12'
+ # docker: default
+ # python: '3.8'
+ # target: azp/generic/1/
+
+ steps:
+ - name: >-
+ Perform integration testing against
+ Ansible version ${{ matrix.ansible }}
+ under Python ${{ matrix.python }}
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-github-repository-slug: ${{ contains(fromJson('["2.10", "2.11"]'), matrix.ansible) && 'felixfontein/ansible' || 'ansible/ansible' }}
+ ansible-core-version: stable-${{ matrix.ansible }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ docker-image: ${{ matrix.docker }}
+ integration-continue-on-error: 'false'
+ integration-diff: 'false'
+ integration-retry-on-error: 'true'
+ pre-test-cmd: >-
+ mkdir -p ../../ansible
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools
+ pull-request-change-detection: 'true'
+ target: ${{ matrix.target }}
+ target-python-version: ${{ matrix.python }}
+ testing-type: integration
diff --git a/ansible_collections/community/general/.github/workflows/codeql-analysis.yml b/ansible_collections/community/general/.github/workflows/codeql-analysis.yml
new file mode 100644
index 000000000..f7ab9450c
--- /dev/null
+++ b/ansible_collections/community/general/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,61 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: "Code scanning - action"
+
+on:
+ schedule:
+ - cron: '26 19 * * 1'
+
+permissions:
+ contents: read
+
+jobs:
+ CodeQL-Build:
+
+ permissions:
+ actions: read # for github/codeql-action/init to get workflow details
+ contents: read # for actions/checkout to fetch code
+ security-events: write # for github/codeql-action/autobuild to send a status report
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ with:
+ # We must fetch at least the immediate parents so that if this is
+ # a pull request then we can checkout the head.
+ fetch-depth: 2
+
+ # If this run was triggered by a pull request event, then checkout
+ # the head of the pull request instead of the merge commit.
+ - run: git checkout HEAD^2
+ if: ${{ github.event_name == 'pull_request' }}
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v2
+ # Override language selection by uncommenting this and choosing your languages
+ # with:
+ # languages: go, javascript, csharp, python, cpp, java
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v2
+
+ # ℹ️ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v2
diff --git a/ansible_collections/community/general/.github/workflows/reuse.yml b/ansible_collections/community/general/.github/workflows/reuse.yml
new file mode 100644
index 000000000..8467668f1
--- /dev/null
+++ b/ansible_collections/community/general/.github/workflows/reuse.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Verify REUSE
+
+on:
+ push:
+ branches: [main]
+ pull_request_target:
+ types: [opened, synchronize, reopened]
+ branches: [main]
+ # Run CI once per day (at 07:30 UTC)
+ schedule:
+ - cron: '30 7 * * *'
+
+jobs:
+ check:
+ permissions:
+ contents: read
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ ref: ${{ github.event.pull_request.head.sha || '' }}
+
+ - name: Install dependencies
+ run: |
+ pip install reuse
+
+ - name: Check REUSE compliance
+ run: |
+ reuse lint
diff --git a/ansible_collections/community/general/.gitignore b/ansible_collections/community/general/.gitignore
new file mode 100644
index 000000000..b7868a9e4
--- /dev/null
+++ b/ansible_collections/community/general/.gitignore
@@ -0,0 +1,514 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Created by https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks
+# Edit at https://www.toptal.com/developers/gitignore?templates=vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks
+
+### dotenv ###
+.env
+
+### Emacs ###
+# -*- mode: gitignore; -*-
+*~
+\#*\#
+/.emacs.desktop
+/.emacs.desktop.lock
+*.elc
+auto-save-list
+tramp
+.\#*
+
+# Org-mode
+.org-id-locations
+*_archive
+
+# flymake-mode
+*_flymake.*
+
+# eshell files
+/eshell/history
+/eshell/lastdir
+
+# elpa packages
+/elpa/
+
+# reftex files
+*.rel
+
+# AUCTeX auto folder
+/auto/
+
+# cask packages
+.cask/
+dist/
+
+# Flycheck
+flycheck_*.el
+
+# server auth directory
+/server/
+
+# projectiles files
+.projectile
+
+# directory configuration
+.dir-locals.el
+
+# network security
+/network-security.data
+
+
+### Git ###
+# Created by git for backups. To disable backups in Git:
+# $ git config --global mergetool.keepBackup false
+*.orig
+
+# Created by git when using merge tools for conflicts
+*.BACKUP.*
+*.BASE.*
+*.LOCAL.*
+*.REMOTE.*
+*_BACKUP_*.txt
+*_BASE_*.txt
+*_LOCAL_*.txt
+*_REMOTE_*.txt
+
+### JupyterNotebooks ###
+# gitignore template for Jupyter Notebooks
+# website: http://jupyter.org/
+
+.ipynb_checkpoints
+*/.ipynb_checkpoints/*
+
+# IPython
+profile_default/
+ipython_config.py
+
+# Remove previous ipynb_checkpoints
+# git rm -r .ipynb_checkpoints/
+
+### Linux ###
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### macOS ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### macOS Patch ###
+# iCloud generated files
+*.icloud
+
+### PyCharm+all ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# AWS User-specific
+.idea/**/aws.xml
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/artifacts
+# .idea/compiler.xml
+# .idea/jarRepositories.xml
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# SonarLint plugin
+.idea/sonarlint/
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### PyCharm+all Patch ###
+# Ignore everything but code style settings and run configurations
+# that are supposed to be shared within teams.
+
+.idea/*
+
+!.idea/codeStyles
+!.idea/runConfigurations
+
+### pydev ###
+.pydevproject
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+
+# IPython
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+### Vim ###
+# Swap
+[._]*.s[a-v][a-z]
+!*.svg # comment out if you don't need vector files
+[._]*.sw[a-p]
+[._]s[a-rt-v][a-z]
+[._]ss[a-gi-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+Sessionx.vim
+
+# Temporary
+.netrwhist
+# Auto-generated tag files
+tags
+# Persistent undo
+[._]*.un~
+
+### WebStorm ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+
+# AWS User-specific
+
+# Generated files
+
+# Sensitive or high-churn files
+
+# Gradle
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/artifacts
+# .idea/compiler.xml
+# .idea/jarRepositories.xml
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+
+# Mongo Explorer plugin
+
+# File-based project format
+
+# IntelliJ
+
+# mpeltonen/sbt-idea plugin
+
+# JIRA plugin
+
+# Cursive Clojure plugin
+
+# SonarLint plugin
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+
+# Editor-based Rest Client
+
+# Android studio 3.1+ serialized cache file
+
+### WebStorm Patch ###
+# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
+
+# *.iml
+# modules.xml
+# .idea/misc.xml
+# *.ipr
+
+# Sonarlint plugin
+# https://plugins.jetbrains.com/plugin/7973-sonarlint
+.idea/**/sonarlint/
+
+# SonarQube Plugin
+# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
+.idea/**/sonarIssues.xml
+
+# Markdown Navigator plugin
+# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
+.idea/**/markdown-navigator.xml
+.idea/**/markdown-navigator-enh.xml
+.idea/**/markdown-navigator/
+
+# Cache file creation bug
+# See https://youtrack.jetbrains.com/issue/JBR-2257
+.idea/$CACHE_FILE$
+
+# CodeStream plugin
+# https://plugins.jetbrains.com/plugin/12206-codestream
+.idea/codestream.xml
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+# End of https://www.toptal.com/developers/gitignore/api/vim,git,macos,linux,pydev,emacs,dotenv,python,windows,webstorm,pycharm+all,jupyternotebooks
+
+# Integration tests cloud configs
+tests/integration/cloud-config-*.ini
diff --git a/ansible_collections/community/general/.pre-commit-config.yaml b/ansible_collections/community/general/.pre-commit-config.yaml
new file mode 100644
index 000000000..7e3d19094
--- /dev/null
+++ b/ansible_collections/community/general/.pre-commit-config.yaml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: mixed-line-ending
+ args: [--fix=lf]
+ - id: fix-encoding-pragma
+ - id: check-ast
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.9.0
+ hooks:
+ - id: rst-backticks
+ types: [file]
+ files: changelogs/fragments/.*\.(yml|yaml)$
diff --git a/ansible_collections/community/general/.reuse/dep5 b/ansible_collections/community/general/.reuse/dep5
new file mode 100644
index 000000000..0c3745ebf
--- /dev/null
+++ b/ansible_collections/community/general/.reuse/dep5
@@ -0,0 +1,5 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+
+Files: changelogs/fragments/*
+Copyright: Ansible Project
+License: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/CHANGELOG.rst b/ansible_collections/community/general/CHANGELOG.rst
new file mode 100644
index 000000000..9ddf616c3
--- /dev/null
+++ b/ansible_collections/community/general/CHANGELOG.rst
@@ -0,0 +1,705 @@
+===============================
+Community General Release Notes
+===============================
+
+.. contents:: Topics
+
+This changelog describes changes after version 5.0.0.
+
+v6.6.2
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Bugfixes
+--------
+
+- csv module utils - detects and remove unicode BOM markers from incoming CSV content (https://github.com/ansible-collections/community.general/pull/6662).
+- gitlab_group - the module passed parameters to the API call even when not set. The module is now filtering out ``None`` values to remediate this (https://github.com/ansible-collections/community.general/pull/6712).
+- ini_file - fix a bug where the inactive options were not used when possible (https://github.com/ansible-collections/community.general/pull/6575).
+- keycloak module utils - fix ``is_struct_included`` handling of lists of lists/dictionaries (https://github.com/ansible-collections/community.general/pull/6688).
+- keycloak module utils - the function ``get_user_by_username`` now return the user representation or ``None`` as stated in the documentation (https://github.com/ansible-collections/community.general/pull/6758).
+
+v6.6.1
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Minor Changes
+-------------
+
+- dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter that has it (https://github.com/ansible-collections/community.general/pull/6491).
+
+Bugfixes
+--------
+
+- deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
+- nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
+- passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
+- portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008, https://github.com/ansible-collections/community.general/pull/6548).
+- portage - update the logic for generating the emerge command arguments to ensure that ``withbdeps: false`` results in a passing an ``n`` argument with the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451, https://github.com/ansible-collections/community.general/pull/6456).
+- proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from ``required_together`` as it causes to require ``api_password`` even when API token param is used (https://github.com/ansible-collections/community.general/issues/6201).
+- puppet - handling ``noop`` parameter was not working at all, now it is has been fixed (https://github.com/ansible-collections/community.general/issues/6452, https://github.com/ansible-collections/community.general/issues/6458).
+- terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
+- xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
+- zypper - added handling of zypper exitcode 102. Changed state is set correctly now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
+
+v6.6.0
+======
+
+Release Summary
+---------------
+
+Bugfix and feature release.
+
+Minor Changes
+-------------
+
+- cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
+- dconf - be forgiving about boolean values: convert them to GVariant booleans automatically (https://github.com/ansible-collections/community.general/pull/6206).
+- dconf - minor refactoring improving parameters and dependencies validation (https://github.com/ansible-collections/community.general/pull/6336).
+- deps module utils - add function ``failed()`` providing the ability to check the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
+- dig lookup plugin - Support multiple domains to be queried as indicated in docs (https://github.com/ansible-collections/community.general/pull/6334).
+- gitlab_project - add new option ``topics`` for adding topics to GitLab projects (https://github.com/ansible-collections/community.general/pull/6278).
+- homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
+- idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response (https://github.com/ansible-collections/community.general/issues/5603).
+- ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
+- keycloak_authentication - add flow type option to sub flows to allow the creation of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
+- mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
+- nmap inventory plugin - added environment variables for configure ``address`` and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
+- nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
+- pipx - add ``system_site_packages`` parameter to give application access to system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
+- pipx - ensure ``include_injected`` parameter works with ``state=upgrade`` and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
+- puppet - add new options ``skip_tags`` to exclude certain tagged resources during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
+- terraform - remove state file check condition and error block, because in the native implementation of terraform will not cause errors due to the non-existent file (https://github.com/ansible-collections/community.general/pull/6296).
+- udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
+
+Bugfixes
+--------
+
+- archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
+- flatpak - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing Flatpaks because of a parameter witch only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
+- icinga2_host - fix the data structure sent to Icinga to make use of host templates and template vars (https://github.com/ansible-collections/community.general/pull/6286).
+- idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob`` to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
+- ini_file - make ``section`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
+- keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
+- one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
+- pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade`` (https://github.com/ansible-collections/community.general/pull/6303).
+- redhat_subscription - do not use D-Bus for registering when ``environment`` is specified, so it possible to specify again the environment names for registering, as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
+- redhat_subscription - try to unregister only when already registered when ``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258, https://github.com/ansible-collections/community.general/pull/6259).
+- redhat_subscription - use the right D-Bus options for environments when registering a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
+- rhsm_release - make ``release`` parameter not required so it is possible to pass ``null`` as a value. This only was possible in the past due to a bug in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
+- rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
+- rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices, not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/pull/5887, https://github.com/ansible-collections/community.general/pull/6300).
+- rundeck_project - update the module to use ``module_utils.rundeck`` functions (https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
+- snap_alias - module would only recognize snap names containing letter, numbers or the underscore character, failing to identify valid snap names such as ``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
+
+New Modules
+-----------
+
+- btrfs_info - Query btrfs filesystem info
+- btrfs_subvolume - Manage btrfs subvolumes
+- ilo_redfish_command - Manages Out-Of-Band controllers using Redfish APIs
+- keycloak_authz_authorization_scope - Allows administration of Keycloak client authorization scopes via Keycloak API
+- keycloak_clientscope_type - Set the type of aclientscope in realm or client via Keycloak API
+
+v6.5.0
+======
+
+Release Summary
+---------------
+
+Feature and bugfix release.
+
+Minor Changes
+-------------
+
+- apt_rpm - adds ``clean``, ``dist_upgrade`` and ``update_kernel`` parameters for clear caches, complete upgrade system, and upgrade kernel packages (https://github.com/ansible-collections/community.general/pull/5867).
+- dconf - parse GVariants for equality comparison when the Python module ``gi.repository`` is available (https://github.com/ansible-collections/community.general/pull/6049).
+- gitlab_runner - allow to register group runner (https://github.com/ansible-collections/community.general/pull/3935).
+- jira - add worklog functionality (https://github.com/ansible-collections/community.general/issues/6209, https://github.com/ansible-collections/community.general/pull/6210).
+- ldap modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/6185).
+- make - add ``command`` return value to the module output (https://github.com/ansible-collections/community.general/pull/6160).
+- nmap inventory plugin - add new option ``open`` for only returning open ports (https://github.com/ansible-collections/community.general/pull/6200).
+- nmap inventory plugin - add new option ``port`` for port specific scan (https://github.com/ansible-collections/community.general/pull/6165).
+- nmcli - add ``default`` and ``default-or-eui64`` to the list of valid choices for ``addr_gen_mode6`` parameter (https://github.com/ansible-collections/community.general/pull/5974).
+- nmcli - add support for ``team.runner-fast-rate`` parameter for ``team`` connections (https://github.com/ansible-collections/community.general/issues/6065).
+- openbsd_pkg - set ``TERM`` to ``'dumb'`` in ``execute_command()`` to make module less dependant on the ``TERM`` environment variable set on the Ansible controller (https://github.com/ansible-collections/community.general/pull/6149).
+- pipx - optional ``install_apps`` parameter added to install applications from injected packages (https://github.com/ansible-collections/community.general/pull/6198).
+- proxmox_kvm - add new ``archive`` parameter. This is needed to create a VM from an archive (backup) (https://github.com/ansible-collections/community.general/pull/6159).
+- redfish_info - adds commands to retrieve the HPE ThermalConfiguration and FanPercentMinimum settings from iLO (https://github.com/ansible-collections/community.general/pull/6208).
+- redhat_subscription - credentials (``username``, ``activationkey``, and so on) are required now only if a system needs to be registered, or ``force_register`` is specified (https://github.com/ansible-collections/community.general/pull/5664).
+- redhat_subscription - the registration is done using the D-Bus ``rhsm`` service instead of spawning a ``subscription-manager register`` command, if possible; this avoids passing plain-text credentials as arguments to ``subscription-manager register``, which can be seen while that command runs (https://github.com/ansible-collections/community.general/pull/6122).
+- ssh_config - add ``proxyjump`` option (https://github.com/ansible-collections/community.general/pull/5970).
+- ssh_config - vendored StormSSH's config parser to avoid having to install StormSSH to use the module (https://github.com/ansible-collections/community.general/pull/6117).
+- znode module - optional ``use_tls`` parameter added for encrypted communication (https://github.com/ansible-collections/community.general/issues/6154).
+
+Bugfixes
+--------
+
+- archive - avoid deprecated exception class on Python 3 (https://github.com/ansible-collections/community.general/pull/6180).
+- gitlab_runner - fix ``KeyError`` on runner creation and update (https://github.com/ansible-collections/community.general/issues/6112).
+- influxdb_user - fix running in check mode when the user does not exist yet (https://github.com/ansible-collections/community.general/pull/6111).
+- interfaces_file - fix reading options in lines not starting with a space (https://github.com/ansible-collections/community.general/issues/6120).
+- jail connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/6118).
+- memset - fix memset urlerror handling (https://github.com/ansible-collections/community.general/pull/6114).
+- nmcli - fixed idempotency issue for bridge connections. Module forced default value of ``bridge.priority`` to nmcli if not set; if ``bridge.stp`` is disabled nmcli ignores it and keep default (https://github.com/ansible-collections/community.general/issues/3216, https://github.com/ansible-collections/community.general/issues/4683).
+- nmcli - fixed idempotency issue when module params is set to ``may_fail4=false`` and ``method4=disabled``; in this case nmcli ignores change and keeps their own default value ``yes`` (https://github.com/ansible-collections/community.general/pull/6106).
+- nmcli - implemented changing mtu value on vlan interfaces (https://github.com/ansible-collections/community.general/issues/4387).
+- opkg - fixes bug when using ``update_cache=true`` (https://github.com/ansible-collections/community.general/issues/6004).
+- redhat_subscription, rhsm_release, rhsm_repository - cleanly fail when not running as root, rather than hanging on an interactive ``console-helper`` prompt; they all interact with ``subscription-manager``, which already requires to be run as root (https://github.com/ansible-collections/community.general/issues/734, https://github.com/ansible-collections/community.general/pull/6211).
+- xenorchestra inventory plugin - fix failure to receive objects from server due to not checking the id of the response (https://github.com/ansible-collections/community.general/pull/6227).
+- yarn - fix ``global=true`` to not fail when `executable` wasn't specified (https://github.com/ansible-collections/community.general/pull/6132)
+- yarn - fixes bug where yarn module tasks would fail when warnings were emitted from Yarn. The ``yarn.list`` method was not filtering out warnings (https://github.com/ansible-collections/community.general/issues/6127).
+
+New Plugins
+-----------
+
+Lookup
+~~~~~~
+
+- merge_variables - merge variables with a certain suffix
+
+New Modules
+-----------
+
+- kdeconfig - Manage KDE configuration files
+
+v6.4.0
+======
+
+Release Summary
+---------------
+
+Regular feature and bugfix release.
+
+Minor Changes
+-------------
+
+- dnsimple - set custom User-Agent for API requests to DNSimple (https://github.com/ansible-collections/community.general/pull/5927).
+- flatpak_remote - add new boolean option ``enabled``. It controls, whether the remote is enabled or not (https://github.com/ansible-collections/community.general/pull/5926).
+- gitlab_project - add ``releases_access_level``, ``environments_access_level``, ``feature_flags_access_level``, ``infrastructure_access_level``, ``monitor_access_level``, and ``security_and_compliance_access_level`` options (https://github.com/ansible-collections/community.general/pull/5986).
+- jc filter plugin - added the ability to use parser plugins (https://github.com/ansible-collections/community.general/pull/6043).
+- keycloak_group - add new optional module parameter ``parents`` to properly handle keycloak subgroups (https://github.com/ansible-collections/community.general/pull/5814).
+- keycloak_user_federation - make ``org.keycloak.storage.ldap.mappers.LDAPStorageMapper`` the default value for mappers ``providerType`` (https://github.com/ansible-collections/community.general/pull/5863).
+- ldap modules - add ``xorder_discovery`` option (https://github.com/ansible-collections/community.general/issues/6045, https://github.com/ansible-collections/community.general/pull/6109).
+- lxd_container - add diff and check mode (https://github.com/ansible-collections/community.general/pull/5866).
+- mattermost, rocketchat, slack - replace missing default favicon with docs.ansible.com favicon (https://github.com/ansible-collections/community.general/pull/5928).
+- modprobe - add ``persistent`` option (https://github.com/ansible-collections/community.general/issues/4028, https://github.com/ansible-collections/community.general/pull/542).
+- osx_defaults - include stderr in error messages (https://github.com/ansible-collections/community.general/pull/6011).
+- proxmox - suppress urllib3 ``InsecureRequestWarnings`` when ``validate_certs`` option is ``false`` (https://github.com/ansible-collections/community.general/pull/5931).
+- redfish_command - adding ``EnableSecureBoot`` functionality (https://github.com/ansible-collections/community.general/pull/5899).
+- redfish_command - adding ``VerifyBiosAttributes`` functionality (https://github.com/ansible-collections/community.general/pull/5900).
+- sefcontext - add support for path substitutions (https://github.com/ansible-collections/community.general/issues/1193).
+
+Deprecated Features
+-------------------
+
+- gitlab_runner - the option ``access_level`` will lose its default value in community.general 8.0.0. From that version on, you have set this option to ``ref_protected`` explicitly, if you want to have a protected runner (https://github.com/ansible-collections/community.general/issues/5925).
+
+Bugfixes
+--------
+
+- cartesian and flattened lookup plugins - adjust to parameter deprecation in ansible-core 2.14's ``listify_lookup_plugin_terms`` helper function (https://github.com/ansible-collections/community.general/pull/6074).
+- cloudflare_dns - fixed the idempotency for SRV DNS records (https://github.com/ansible-collections/community.general/pull/5972).
+- cloudflare_dns - fixed the possiblity of setting a root-level SRV DNS record (https://github.com/ansible-collections/community.general/pull/5972).
+- github_webhook - fix always changed state when no secret is provided (https://github.com/ansible-collections/community.general/pull/5994).
+- jenkins_plugin - fix error due to undefined variable when updates file is not downloaded (https://github.com/ansible-collections/community.general/pull/6100).
+- keycloak_client - fix accidental replacement of value for attribute ``saml.signing.private.key`` with ``no_log`` in wrong contexts (https://github.com/ansible-collections/community.general/pull/5934).
+- lxd_* modules, lxd inventory plugin - fix TLS/SSL certificate validation problems by using the correct purpose when creating the TLS context (https://github.com/ansible-collections/community.general/issues/5616, https://github.com/ansible-collections/community.general/pull/6034).
+- nmcli - fix change handling of values specified as an integer 0 (https://github.com/ansible-collections/community.general/pull/5431).
+- nmcli - fix failure to handle WIFI settings when connection type not specified (https://github.com/ansible-collections/community.general/pull/5431).
+- nmcli - fix improper detection of changes to ``wifi.wake-on-wlan`` (https://github.com/ansible-collections/community.general/pull/5431).
+- nmcli - order is significant for lists of addresses (https://github.com/ansible-collections/community.general/pull/6048).
+- onepassword lookup plugin - Changed to ignore errors from "op account get" calls. Previously, errors would prevent auto-signin code from executing (https://github.com/ansible-collections/community.general/pull/5942).
+- terraform and timezone - slight refactoring to avoid linter reporting potentially undefined variables (https://github.com/ansible-collections/community.general/pull/5933).
+- various plugins and modules - remove unnecessary imports (https://github.com/ansible-collections/community.general/pull/5940).
+- yarn - fix ``global=true`` to check for the configured global folder instead of assuming the default (https://github.com/ansible-collections/community.general/pull/5829)
+- yarn - fix ``state=absent`` not working with ``global=true`` when the package does not include a binary (https://github.com/ansible-collections/community.general/pull/5829)
+- yarn - fix ``state=latest`` not working with ``global=true`` (https://github.com/ansible-collections/community.general/issues/5712).
+- zfs_delegate_admin - zfs allow output can now be parsed when uids/gids are not known to the host system (https://github.com/ansible-collections/community.general/pull/5943).
+- zypper - make package managing work on readonly filesystem of openSUSE MicroOS (https://github.com/ansible-collections/community.general/pull/5615).
+
+v6.3.0
+======
+
+Release Summary
+---------------
+
+Regular bugfix and feature release.
+
+Minor Changes
+-------------
+
+- apache2_module - add module argument ``warn_mpm_absent`` to control whether warning are raised in some edge cases (https://github.com/ansible-collections/community.general/pull/5793).
+- bitwarden lookup plugin - can now retrieve secrets from custom fields (https://github.com/ansible-collections/community.general/pull/5694).
+- bitwarden lookup plugin - implement filtering results by ``collection_id`` parameter (https://github.com/ansible-collections/community.general/issues/5849).
+- dig lookup plugin - support CAA record type (https://github.com/ansible-collections/community.general/pull/5913).
+- gitlab_project - add ``builds_access_level``, ``container_registry_access_level`` and ``forking_access_level`` options (https://github.com/ansible-collections/community.general/pull/5706).
+- gitlab_runner - add new boolean option ``access_level_on_creation``. It controls, whether the value of ``access_level`` is used for runner registration or not. The option ``access_level`` has been ignored on registration so far and was only used on updates (https://github.com/ansible-collections/community.general/issues/5907, https://github.com/ansible-collections/community.general/pull/5908).
+- ilo_redfish_utils module utils - change implementation of DNS Server IP and NTP Server IP update (https://github.com/ansible-collections/community.general/pull/5804).
+- ipa_group - allow to add and remove external users with the ``external_user`` option (https://github.com/ansible-collections/community.general/pull/5897).
+- iptables_state - minor refactoring within the module (https://github.com/ansible-collections/community.general/pull/5844).
+- one_vm - add a new ``updateconf`` option which implements the ``one.vm.updateconf`` API call (https://github.com/ansible-collections/community.general/pull/5812).
+- opkg - refactored module to use ``CmdRunner`` for executing ``opkg`` (https://github.com/ansible-collections/community.general/pull/5718).
+- redhat_subscription - adds ``token`` parameter for subscription-manager authentication using Red Hat API token (https://github.com/ansible-collections/community.general/pull/5725).
+- snap - minor refactor when executing module (https://github.com/ansible-collections/community.general/pull/5773).
+- snap_alias - refactored module to use ``CmdRunner`` to execute ``snap`` (https://github.com/ansible-collections/community.general/pull/5486).
+- sudoers - add ``setenv`` parameters to support passing environment variables via sudo. (https://github.com/ansible-collections/community.general/pull/5883)
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- ModuleHelper module utils - when the module sets output variables named ``msg``, ``exception``, ``output``, ``vars``, or ``changed``, the actual output will prefix those names with ``_`` (underscore symbol) only when they clash with output variables generated by ModuleHelper itself, which only occurs when handling exceptions. Please note that this breaking change does not require a new major release since before this release, it was not possible to add such variables to the output `due to a bug <https://github.com/ansible-collections/community.general/pull/5755>`__ (https://github.com/ansible-collections/community.general/pull/5765).
+
+Deprecated Features
+-------------------
+
+- consul - deprecate using parameters unused for ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5772).
+- gitlab_runner - the default of the new option ``access_level_on_creation`` will change from ``false`` to ``true`` in community.general 7.0.0. This will cause ``access_level`` to be used during runner registration as well, and not only during updates (https://github.com/ansible-collections/community.general/pull/5908).
+
+Bugfixes
+--------
+
+- ModuleHelper - fix bug when adjusting the name of reserved output variables (https://github.com/ansible-collections/community.general/pull/5755).
+- alternatives - support subcommands on Fedora 37, which uses ``follower`` instead of ``slave`` (https://github.com/ansible-collections/community.general/pull/5794).
+- bitwarden lookup plugin - clarify what to do, if the bitwarden vault is not unlocked (https://github.com/ansible-collections/community.general/pull/5811).
+- dig lookup plugin - correctly handle DNSKEY record type's ``algorithm`` field (https://github.com/ansible-collections/community.general/pull/5914).
+- gem - fix force parameter not being passed to gem command when uninstalling (https://github.com/ansible-collections/community.general/pull/5822).
+- gem - fix hang due to interactive prompt for confirmation on specific version uninstall (https://github.com/ansible-collections/community.general/pull/5751).
+- gitlab_deploy_key - also update ``title`` and not just ``can_push`` (https://github.com/ansible-collections/community.general/pull/5888).
+- keycloak_user_federation - fixes federation creation issue. When a new federation was created and at the same time a default / standard mapper was also changed / updated the creation process failed as a bad None set variable led to a bad malformed url request (https://github.com/ansible-collections/community.general/pull/5750).
+- keycloak_user_federation - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing user federations because of a buggy seemingly superflous extra query parameter (https://github.com/ansible-collections/community.general/pull/5732).
+- loganalytics callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+- logdna callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+- logstash callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+- nsupdate - fix zone lookup. The SOA record for an existing zone is returned as an answer RR and not as an authority RR (https://github.com/ansible-collections/community.general/issues/5817, https://github.com/ansible-collections/community.general/pull/5818).
+- proxmox_disk - fixed issue with read timeout on import action (https://github.com/ansible-collections/community.general/pull/5803).
+- redfish_utils - removed basic auth HTTP header when performing a GET on the service root resource and when performing a POST to the session collection (https://github.com/ansible-collections/community.general/issues/5886).
+- splunk callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+- sumologic callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+- syslog_json callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+- terraform - fix ``current`` workspace never getting appended to the ``all`` key in the ``workspace_ctf`` object (https://github.com/ansible-collections/community.general/pull/5735).
+- terraform - fix ``terraform init`` failure when there are multiple workspaces on the remote backend and when ``default`` workspace is missing by setting ``TF_WORKSPACE`` environmental variable to the value of ``workspace`` when used (https://github.com/ansible-collections/community.general/pull/5735).
+- terraform module - disable ANSI escape sequences during validation phase (https://github.com/ansible-collections/community.general/pull/5843).
+- xml - fixed a bug where empty ``children`` list would not be set (https://github.com/ansible-collections/community.general/pull/5808).
+
+New Modules
+-----------
+
+- ocapi_command - Manages Out-Of-Band controllers using Open Composable API (OCAPI)
+- ocapi_info - Manages Out-Of-Band controllers using Open Composable API (OCAPI)
+
+v6.2.0
+======
+
+Release Summary
+---------------
+
+Regular bugfix and feature release.
+
+Minor Changes
+-------------
+
+- opkg - allow installing a package in a certain version (https://github.com/ansible-collections/community.general/pull/5688).
+- proxmox - added new module parameter ``tags`` for use with PVE 7+ (https://github.com/ansible-collections/community.general/pull/5714).
+- puppet - refactored module to use ``CmdRunner`` for executing ``puppet`` (https://github.com/ansible-collections/community.general/pull/5612).
+- redhat_subscription - add a ``server_proxy_scheme`` parameter to configure the scheme for the proxy server (https://github.com/ansible-collections/community.general/pull/5662).
+- ssh_config - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5720).
+- sudoers - adds ``host`` parameter for setting hostname restrictions in sudoers rules (https://github.com/ansible-collections/community.general/issues/5702).
+
+Deprecated Features
+-------------------
+
+- manageiq_policies - deprecate ``state=list`` in favour of using ``community.general.manageiq_policies_info`` (https://github.com/ansible-collections/community.general/pull/5721).
+- rax - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_cbs - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_cbs_attachments - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_cdb - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_cdb_database - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_cdb_user - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_clb - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_clb_nodes - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_clb_ssl - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_dns - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_dns_record - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_facts - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_files - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_files_objects - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_identity - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_keypair - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_meta - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_mon_alarm - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_mon_check - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_mon_entity - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_mon_notification - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_mon_notification_plan - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_network - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_queue - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_scaling_group - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+- rax_scaling_policy - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+
+Bugfixes
+--------
+
+- ansible_galaxy_install - set default to raise exception if command's return code is different from zero (https://github.com/ansible-collections/community.general/pull/5680).
+- ansible_galaxy_install - try ``C.UTF-8`` and then fall back to ``en_US.UTF-8`` before failing (https://github.com/ansible-collections/community.general/pull/5680).
+- gitlab_group_variables - fix dropping variables accidentally when GitLab introduced new properties (https://github.com/ansible-collections/community.general/pull/5667).
+- gitlab_project_variables - fix dropping variables accidentally when GitLab introduced new properties (https://github.com/ansible-collections/community.general/pull/5667).
+- lxc_container - fix the arguments of the lxc command which broke the creation and cloning of containers (https://github.com/ansible-collections/community.general/issues/5578).
+- opkg - fix issue that ``force=reinstall`` would not reinstall an existing package (https://github.com/ansible-collections/community.general/pull/5705).
+- proxmox_disk - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5672).
+- proxmox_nic - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5672).
+- unixy callback plugin - fix typo introduced when updating to use Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
+
+v6.1.0
+======
+
+Release Summary
+---------------
+
+Regular bugfix and feature release.
+
+Minor Changes
+-------------
+
+- cmd_runner module utils - ``cmd_runner_fmt.as_bool()`` can now take an extra parameter to format when value is false (https://github.com/ansible-collections/community.general/pull/5647).
+- gconftool2 - refactor using ``ModuleHelper`` and ``CmdRunner`` (https://github.com/ansible-collections/community.general/pull/5545).
+- java_certs - add more detailed error output when extracting certificate from PKCS12 fails (https://github.com/ansible-collections/community.general/pull/5550).
+- jenkins_plugin - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5565).
+- lxd_project - refactored code out to module utils to clear sanity check (https://github.com/ansible-collections/community.general/pull/5549).
+- nmap inventory plugin - add new options ``udp_scan``, ``icmp_timestamp``, and ``dns_resolve`` for different types of scans (https://github.com/ansible-collections/community.general/pull/5566).
+- rax_scaling_group - refactored out code to the ``rax`` module utils to clear the sanity check (https://github.com/ansible-collections/community.general/pull/5563).
+- redfish_command - add ``PerformRequestedOperations`` command to perform any operations necessary to continue the update flow (https://github.com/ansible-collections/community.general/issues/4276).
+- redfish_command - add ``update_apply_time`` to ``SimpleUpdate`` command (https://github.com/ansible-collections/community.general/issues/3910).
+- redfish_command - add ``update_status`` to output of ``SimpleUpdate`` command to allow a user monitor the update in progress (https://github.com/ansible-collections/community.general/issues/4276).
+- redfish_info - add ``GetUpdateStatus`` command to check the progress of a previous update request (https://github.com/ansible-collections/community.general/issues/4276).
+- redfish_utils module utils - added PUT (``put_request()``) functionality (https://github.com/ansible-collections/community.general/pull/5490).
+- slack - add option ``prepend_hash`` which allows to control whether a ``#`` is prepended to ``channel_id``. The current behavior (value ``auto``) is to prepend ``#`` unless some specific prefixes are found. That list of prefixes is incomplete, and there does not seem to exist a documented condition on when exactly ``#`` must not be prepended. We recommend to explicitly set ``prepend_hash=always`` or ``prepend_hash=never`` to avoid any ambiguity (https://github.com/ansible-collections/community.general/pull/5629).
+- spotinst_aws_elastigroup - add ``elements`` attribute when missing in ``list`` parameters (https://github.com/ansible-collections/community.general/pull/5553).
+- ssh_config - add ``host_key_algorithms`` option (https://github.com/ansible-collections/community.general/pull/5605).
+- udm_share - added ``elements`` attribute to ``list`` type parameters (https://github.com/ansible-collections/community.general/pull/5557).
+- udm_user - add ``elements`` attribute when missing in ``list`` parameters (https://github.com/ansible-collections/community.general/pull/5559).
+
+Deprecated Features
+-------------------
+
+- The ``sap`` modules ``sapcar_extract``, ``sap_task_list_execute``, and ``hana_query``, will be removed from this collection in community.general 7.0.0 and replaced with redirects to ``community.sap_libs``. If you want to continue using these modules, make sure to also install ``community.sap_libs`` (it is part of the Ansible package) (https://github.com/ansible-collections/community.general/pull/5614).
+
+Bugfixes
+--------
+
+- chroot connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/5570).
+- cmd_runner module utils - fixed bug when handling default cases in ``cmd_runner_fmt.as_map()`` (https://github.com/ansible-collections/community.general/pull/5538).
+- cmd_runner module utils - formatting arguments ``cmd_runner_fmt.as_fixed()`` was expecting an non-existing argument (https://github.com/ansible-collections/community.general/pull/5538).
+- keycloak_client_rolemapping - calculate ``proposed`` and ``after`` return values properly (https://github.com/ansible-collections/community.general/pull/5619).
+- keycloak_client_rolemapping - remove only listed mappings with ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5619).
+- proxmox inventory plugin - fix bug while templating when using templates for the ``url``, ``user``, ``password``, ``token_id``, or ``token_secret`` options (https://github.com/ansible-collections/community.general/pull/5640).
+- proxmox inventory plugin - handle tags delimited by semicolon instead of comma, which happens from Proxmox 7.3 on (https://github.com/ansible-collections/community.general/pull/5602).
+- redhat_subscription - do not ignore ``consumer_name`` and other variables if ``activationkey`` is specified (https://github.com/ansible-collections/community.general/issues/3486, https://github.com/ansible-collections/community.general/pull/5627).
+- redhat_subscription - do not pass arguments to ``subscription-manager register`` for things already configured; now a specified ``rhsm_baseurl`` is properly set for subscription-manager (https://github.com/ansible-collections/community.general/pull/5583).
+- unixy callback plugin - fix plugin to work with ansible-core 2.14 by using Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
+- vdo - now uses ``yaml.safe_load()`` to parse command output instead of the deprecated ``yaml.load()`` which is potentially unsafe. Using ``yaml.load()`` without explicitely setting a ``Loader=`` is also an error in pyYAML 6.0 (https://github.com/ansible-collections/community.general/pull/5632).
+- vmadm - fix for index out of range error in ``get_vm_uuid`` (https://github.com/ansible-collections/community.general/pull/5628).
+
+New Modules
+-----------
+
+- gitlab_project_badge - Manage project badges on GitLab Server
+- keycloak_clientsecret_info - Retrieve client secret via Keycloak API
+- keycloak_clientsecret_regenerate - Regenerate Keycloak client secret via Keycloak API
+
+v6.0.1
+======
+
+Release Summary
+---------------
+
+Bugfix release for Ansible 7.0.0.
+
+Bugfixes
+--------
+
+- dependent lookup plugin - avoid warning on deprecated parameter for ``Templar.template()`` (https://github.com/ansible-collections/community.general/pull/5543).
+- jenkins_build - fix the logical flaw when deleting a Jenkins build (https://github.com/ansible-collections/community.general/pull/5514).
+- one_vm - avoid splitting labels that are ``None`` (https://github.com/ansible-collections/community.general/pull/5489).
+- onepassword_raw - add missing parameter to plugin documentation (https://github.com/ansible-collections/community.general/issues/5506).
+- proxmox_disk - avoid duplicate ``vmid`` reference (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5493).
+
+v6.0.0
+======
+
+Release Summary
+---------------
+
+New major release of community.general with lots of bugfixes, new features, some removed deprecated features, and some other breaking changes. Please check the coresponding sections of the changelog for more details.
+
+Major Changes
+-------------
+
+- The internal structure of the collection was changed for modules and action plugins. These no longer live in a directory hierarchy ordered by topic, but instead are now all in a single (flat) directory. This has no impact on users *assuming they did not use internal FQCNs*. These will still work, but result in deprecation warnings. They were never officially supported and thus the redirects are kept as a courtsey, and this is not labelled as a breaking change. Note that for example the Ansible VScode plugin started recommending these internal names. If you followed its recommendation, you will now have to change back to the short names to avoid deprecation warnings, and potential errors in the future as these redirects will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5461).
+- newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
+
+Minor Changes
+-------------
+
+- Added MIT license as ``LICENSES/MIT.txt`` for tests/unit/plugins/modules/packaging/language/test_gem.py (https://github.com/ansible-collections/community.general/pull/5065).
+- All software licenses are now in the ``LICENSES/`` directory of the collection root (https://github.com/ansible-collections/community.general/pull/5065, https://github.com/ansible-collections/community.general/pull/5079, https://github.com/ansible-collections/community.general/pull/5080, https://github.com/ansible-collections/community.general/pull/5083, https://github.com/ansible-collections/community.general/pull/5087, https://github.com/ansible-collections/community.general/pull/5095, https://github.com/ansible-collections/community.general/pull/5098, https://github.com/ansible-collections/community.general/pull/5106).
+- ModuleHelper module utils - added property ``verbosity`` to base class (https://github.com/ansible-collections/community.general/pull/5035).
+- ModuleHelper module utils - improved ``ModuleHelperException``, using ``to_native()`` for the exception message (https://github.com/ansible-collections/community.general/pull/4755).
+- The collection repository conforms to the `REUSE specification <https://reuse.software/spec/>`__ except for the changelog fragments (https://github.com/ansible-collections/community.general/pull/5138).
+- ali_instance - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5240).
+- ali_instance_info - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5240).
+- alternatives - add ``state=absent`` to be able to remove an alternative (https://github.com/ansible-collections/community.general/pull/4654).
+- alternatives - add ``subcommands`` parameter (https://github.com/ansible-collections/community.general/pull/4654).
+- ansible_galaxy_install - minor refactoring using latest ``ModuleHelper`` updates (https://github.com/ansible-collections/community.general/pull/4752).
+- ansible_galaxy_install - refactored module to use ``CmdRunner`` to execute ``ansible-galaxy`` (https://github.com/ansible-collections/community.general/pull/5477).
+- apk - add ``world`` parameter for supporting a custom world file (https://github.com/ansible-collections/community.general/pull/4976).
+- bitwarden lookup plugin - add option ``search`` to search for other attributes than name (https://github.com/ansible-collections/community.general/pull/5297).
+- cartesian lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- cmd_runner module util - added parameters ``check_mode_skip`` and ``check_mode_return`` to ``CmdRunner.context()``, so that the command is not executed when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/4736).
+- cmd_runner module utils - add ``__call__`` method to invoke context (https://github.com/ansible-collections/community.general/pull/4791).
+- consul - adds ``ttl`` parameter for session (https://github.com/ansible-collections/community.general/pull/4996).
+- consul - minor refactoring (https://github.com/ansible-collections/community.general/pull/5367).
+- consul_session - adds ``token`` parameter for session (https://github.com/ansible-collections/community.general/pull/5193).
+- cpanm - refactored module to use ``CmdRunner`` to execute ``cpanm`` (https://github.com/ansible-collections/community.general/pull/5485).
+- cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
+- credstash lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- dependent lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- dig lookup plugin - add option ``fail_on_error`` to allow stopping execution on lookup failures (https://github.com/ansible-collections/community.general/pull/4973).
+- dig lookup plugin - start using Ansible's configuration manager to parse options. All documented options can now also be passed as lookup parameters (https://github.com/ansible-collections/community.general/pull/5440).
+- dnstxt lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- filetree lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- flattened lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- gitlab module util - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_branch - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_deploy_key - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_group - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_group_members - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_group_variable - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_hook - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_hook - minor refactoring (https://github.com/ansible-collections/community.general/pull/5271).
+- gitlab_project - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_project_members - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_project_variable - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_protected_branch - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_runner - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- gitlab_user - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+- hiera lookup plugin - start using Ansible's configuration manager to parse options. The Hiera executable and config file can now also be passed as lookup parameters (https://github.com/ansible-collections/community.general/pull/5440).
+- homebrew, homebrew_tap - added Homebrew on Linux path to defaults (https://github.com/ansible-collections/community.general/pull/5241).
+- hponcfg - refactored module to use ``CmdRunner`` to execute ``hponcfg`` (https://github.com/ansible-collections/community.general/pull/5483).
+- keycloak_* modules - add ``http_agent`` parameter with default value ``Ansible`` (https://github.com/ansible-collections/community.general/issues/5023).
+- keyring lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- lastpass - use config manager for handling plugin options (https://github.com/ansible-collections/community.general/pull/5022).
+- ldap_attrs - allow for DNs to have ``{x}`` prefix on first RDN (https://github.com/ansible-collections/community.general/issues/977, https://github.com/ansible-collections/community.general/pull/5450).
+- linode inventory plugin - simplify option handling (https://github.com/ansible-collections/community.general/pull/5438).
+- listen_ports_facts - add new ``include_non_listening`` option which adds ``-a`` option to ``netstat`` and ``ss``. This shows both listening and non-listening (for TCP this means established connections) sockets, and returns ``state`` and ``foreign_address`` (https://github.com/ansible-collections/community.general/issues/4762, https://github.com/ansible-collections/community.general/pull/4953).
+- lmdb_kv lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- lxc_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/5358).
+- machinectl become plugin - can now be used with a password from another user than root, if a polkit rule is present (https://github.com/ansible-collections/community.general/pull/4849).
+- machinectl become plugin - combine the success command when building the become command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287).
+- manifold lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440).
+- maven_artifact - add a new ``unredirected_headers`` option that can be used with ansible-core 2.12 and above. The default value is to not use ``Authorization`` and ``Cookie`` headers on redirects for security reasons. With ansible-core 2.11, all headers are still passed on for redirects (https://github.com/ansible-collections/community.general/pull/4812).
+- mksysb - refactored module to use ``CmdRunner`` to execute ``mksysb`` (https://github.com/ansible-collections/community.general/pull/5484).
+- mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
+- nagios - minor refactoring on parameter validation for different actions (https://github.com/ansible-collections/community.general/pull/5239).
+- netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301).
+- nmcli - add ``transport_mode`` configuration for Infiniband devices (https://github.com/ansible-collections/community.general/pull/5361).
+- nmcli - add bond option ``xmit_hash_policy`` to bond options (https://github.com/ansible-collections/community.general/issues/5148).
+- nmcli - adds ``vpn`` type and parameter for supporting VPN with service type L2TP and PPTP (https://github.com/ansible-collections/community.general/pull/4746).
+- nmcli - honor IP options for VPNs (https://github.com/ansible-collections/community.general/pull/5228).
+- onepassword - support version 2 of the OnePassword CLI (https://github.com/ansible-collections/community.general/pull/4728)
+- opentelemetry callback plugin - allow configuring opentelementry callback via config file (https://github.com/ansible-collections/community.general/pull/4916).
+- opentelemetry callback plugin - send logs. This can be disabled by setting ``disable_logs=false`` (https://github.com/ansible-collections/community.general/pull/4175).
+- pacman - added parameters ``reason`` and ``reason_for`` to set/change the install reason of packages (https://github.com/ansible-collections/community.general/pull/4956).
+- passwordstore lookup plugin - allow options to be passed lookup options instead of being part of the term strings (https://github.com/ansible-collections/community.general/pull/5444).
+- passwordstore lookup plugin - allow using alternative password managers by detecting wrapper scripts, allow explicit configuration of pass and gopass backends (https://github.com/ansible-collections/community.general/issues/4766).
+- passwordstore lookup plugin - improve error messages to include stderr (https://github.com/ansible-collections/community.general/pull/5436)
+- pipx - added state ``latest`` to the module (https://github.com/ansible-collections/community.general/pull/5105).
+- pipx - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/5085).
+- pipx - module fails faster when ``name`` is missing for states ``upgrade`` and ``reinstall`` (https://github.com/ansible-collections/community.general/pull/5100).
+- pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
+- pipx module utils - created new module util ``pipx`` providing a ``cmd_runner`` specific for the ``pipx`` module (https://github.com/ansible-collections/community.general/pull/5085).
+- portage - add knobs for Portage's ``--backtrack`` and ``--with-bdeps`` options (https://github.com/ansible-collections/community.general/pull/5349).
+- portage - use Portage's python module instead of calling gentoolkit-provided program in shell (https://github.com/ansible-collections/community.general/pull/5349).
+- proxmox inventory plugin - added new flag ``qemu_extended_statuses`` and new groups ``<group_prefix>prelaunch``, ``<group_prefix>paused``. They will be populated only when ``want_facts=true``, ``qemu_extended_statuses=true`` and only for ``QEMU`` machines (https://github.com/ansible-collections/community.general/pull/4723).
+- proxmox inventory plugin - simplify option handling code (https://github.com/ansible-collections/community.general/pull/5437).
+- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
+- proxmox_kvm - allow ``agent`` argument to be a string (https://github.com/ansible-collections/community.general/pull/5107).
+- proxmox_snap - add ``unbind`` param to support snapshotting containers with configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
+- puppet - adds ``confdir`` parameter to configure a custom confir location (https://github.com/ansible-collections/community.general/pull/4740).
+- redfish - added new command GetVirtualMedia, VirtualMediaInsert and VirtualMediaEject to Systems category due to Redfish spec changes the virtualMedia resource location from Manager to System (https://github.com/ansible-collections/community.general/pull/5124).
+- redfish_config - add ``SetSessionService`` to set default session timeout policy (https://github.com/ansible-collections/community.general/issues/5008).
+- redfish_info - add ``GetManagerInventory`` to report list of Manager inventory information (https://github.com/ansible-collections/community.general/issues/4899).
+- seport - added new argument ``local`` (https://github.com/ansible-collections/community.general/pull/5203)
+- snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
+- sudoers - will attempt to validate the proposed sudoers rule using visudo if available, optionally skipped, or required (https://github.com/ansible-collections/community.general/pull/4794, https://github.com/ansible-collections/community.general/issues/4745).
+- terraform - adds capability to handle complex variable structures for ``variables`` parameter in the module. This must be enabled with the new ``complex_vars`` parameter (https://github.com/ansible-collections/community.general/pull/4797).
+- terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout of the task (https://github.com/ansible-collections/community.general/pull/5147).
+- wdc_redfish_command - add ``IndicatorLedOn`` and ``IndicatorLedOff`` commands for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5059).
+- wdc_redfish_command - add ``PowerModeLow`` and ``PowerModeNormal`` commands for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5145).
+- xfconf - add ``stdout``, ``stderr`` and ``cmd`` to the module results (https://github.com/ansible-collections/community.general/pull/5037).
+- xfconf - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
+- xfconf - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975).
+- xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674).
+- xfconf module utils - created new module util ``xfconf`` providing a ``cmd_runner`` specific for ``xfconf`` modules (https://github.com/ansible-collections/community.general/pull/4776).
+- xfconf_info - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
+- xfconf_info - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975).
+- znode - possibility to use ZooKeeper ACL authentication (https://github.com/ansible-collections/community.general/pull/5306).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- newrelic_deployment - ``revision`` is required for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
+- scaleway_container_registry_info - no longer replace ``secret_environment_variables`` in the output by ``SENSITIVE_VALUE`` (https://github.com/ansible-collections/community.general/pull/5497).
+
+Deprecated Features
+-------------------
+
+- ArgFormat module utils - deprecated along ``CmdMixin``, in favor of the ``cmd_runner_fmt`` module util (https://github.com/ansible-collections/community.general/pull/5370).
+- CmdMixin module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370).
+- CmdModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370).
+- CmdStateModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370).
+- cmd_runner module utils - deprecated ``fmt`` in favour of ``cmd_runner_fmt`` as the parameter format object (https://github.com/ansible-collections/community.general/pull/4777).
+- django_manage - support for Django releases older than 4.1 has been deprecated and will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
+- django_manage - support for the commands ``cleanup``, ``syncdb`` and ``validate`` that have been deprecated in Django long time ago will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
+- django_manage - the behavior of "creating the virtual environment when missing" is being deprecated and will be removed in community.general version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5405).
+- gconftool2 - deprecates ``state=get`` in favor of using the module ``gconftool2_info`` (https://github.com/ansible-collections/community.general/pull/4778).
+- lxc_container - the module will no longer make any effort to support Python 2 (https://github.com/ansible-collections/community.general/pull/5304).
+- newrelic_deployment - ``appname`` and ``environment`` are no longer valid options in the v2 API. They will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/5341).
+- proxmox - deprecated the current ``unprivileged`` default value, will be changed to ``true`` in community.general 7.0.0 (https://github.com/pull/5224).
+- xfconf - deprecated parameter ``disable_facts``, as since version 4.0.0 it only allows value ``true`` (https://github.com/ansible-collections/community.general/pull/4520).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- bitbucket* modules - ``username`` is no longer an alias of ``workspace``, but of ``user`` (https://github.com/ansible-collections/community.general/pull/5326).
+- gem - the default of the ``norc`` option changed from ``false`` to ``true`` (https://github.com/ansible-collections/community.general/pull/5326).
+- gitlab_group_members - ``gitlab_group`` must now always contain the full path, and no longer just the name or path (https://github.com/ansible-collections/community.general/pull/5326).
+- keycloak_authentication - the return value ``flow`` has been removed. Use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/5326).
+- keycloak_group - the return value ``group`` has been removed. Use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/5326).
+- lxd_container - the default of the ``ignore_volatile_options`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326).
+- mail callback plugin - the ``sender`` option is now required (https://github.com/ansible-collections/community.general/pull/5326).
+- module_helper module utils - remove the ``VarDict`` attribute from ``ModuleHelper``. Import ``VarDict`` from ``ansible_collections.community.general.plugins.module_utils.mh.mixins.vars`` instead (https://github.com/ansible-collections/community.general/pull/5326).
+- proxmox inventory plugin - the default of the ``want_proxmox_nodes_ansible_host`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326).
+- vmadm - the ``debug`` option has been removed. It was not used anyway (https://github.com/ansible-collections/community.general/pull/5326).
+
+Bugfixes
+--------
+
+- Include ``PSF-license.txt`` file for ``plugins/module_utils/_mount.py``.
+- Include ``simplified_bsd.txt`` license file for various module utils, the ``lxca_common`` docs fragment, and the ``utm_utils`` unit tests.
+- alternatives - do not set the priority if the priority was not set by the user (https://github.com/ansible-collections/community.general/pull/4810).
+- alternatives - only pass subcommands when they are specified as module arguments (https://github.com/ansible-collections/community.general/issues/4803, https://github.com/ansible-collections/community.general/issues/4804, https://github.com/ansible-collections/community.general/pull/4836).
+- alternatives - when ``subcommands`` is specified, ``link`` must be given for every subcommand. This was already mentioned in the documentation, but not enforced by the code (https://github.com/ansible-collections/community.general/pull/4836).
+- apache2_mod_proxy - avoid crash when reporting inability to parse balancer_member_page HTML caused by using an undefined variable in the error message (https://github.com/ansible-collections/community.general/pull/5111).
+- archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz`` (https://github.com/ansible-collections/community.general/pull/5393).
+- cmd_runner module utils - fix bug caused by using the ``command`` variable instead of ``self.command`` when looking for binary path (https://github.com/ansible-collections/community.general/pull/4903).
+- consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680).
+- credstash lookup plugin - pass plugin options to credstash for all terms, not just for the first (https://github.com/ansible-collections/community.general/pull/5440).
+- dig lookup plugin - add option to return empty result without empty strings, and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5439, https://github.com/ansible-collections/community.general/issues/5428).
+- dig lookup plugin - fix evaluation of falsy values for boolean parameters ``fail_on_error`` and ``retry_servfail`` (https://github.com/ansible-collections/community.general/pull/5129).
+- dnsimple_info - correctly report missing library as ``requests`` and not ``another_library`` (https://github.com/ansible-collections/community.general/pull/5111).
+- dnstxt lookup plugin - add option to return empty result without empty strings, and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5457, https://github.com/ansible-collections/community.general/issues/5428).
+- dsv lookup plugin - do not ignore the ``tld`` parameter (https://github.com/ansible-collections/community.general/pull/4911).
+- filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700).
+- filesystem - improve error messages when output cannot be parsed by including newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700).
+- funcd connection plugin - fix signature of ``exec_command`` (https://github.com/ansible-collections/community.general/pull/5111).
+- ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307).
+- iso_create - the module somtimes failed to add folders for Joliet and UDF formats (https://github.com/ansible-collections/community.general/issues/5275).
+- keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241).
+- keyring_info - fix the result from the keyring library never getting returned (https://github.com/ansible-collections/community.general/pull/4964).
+- ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error was occuring when the ldap attribute value contained special characters such as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434, https://github.com/ansible-collections/community.general/pull/5435).
+- ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute values (https://github.com/ansible-collections/community.general/issues/977, https://github.com/ansible-collections/community.general/pull/5385).
+- listen_ports_facts - removed leftover ``EnvironmentError`` . The ``else`` clause had a wrong indentation. The check is now handled in the ``split_pid_name`` function (https://github.com/ansible-collections/community.general/pull/5202).
+- locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281).
+- lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304).
+- lxd connection plugin - fix incorrect ``inventory_hostname`` in ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/issues/4886).
+- manageiq_alert_profiles - avoid crash when reporting unknown profile caused by trying to return an undefined variable (https://github.com/ansible-collections/community.general/pull/5111).
+- nmcli - avoid changed status for most cases with VPN connections (https://github.com/ansible-collections/community.general/pull/5126).
+- nmcli - fix error caused by adding undefined module arguments for list options (https://github.com/ansible-collections/community.general/issues/4373, https://github.com/ansible-collections/community.general/pull/4813).
+- nmcli - fix error when setting previously unset MAC address, ``gsm.apn`` or ``vpn.data``: current values were being normalized without checking if they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).
+- nmcli - fix int options idempotence (https://github.com/ansible-collections/community.general/issues/4998).
+- nsupdate - compatibility with NS records (https://github.com/ansible-collections/community.general/pull/5112).
+- nsupdate - fix silent failures when updating ``NS`` entries from Bind9 managed DNS zones (https://github.com/ansible-collections/community.general/issues/4657).
+- opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342).
+- osx_defaults - no longer expand ``~`` in ``value`` to the user's home directory, or expand environment variables (https://github.com/ansible-collections/community.general/issues/5234, https://github.com/ansible-collections/community.general/pull/5243).
+- packet_ip_subnet - fix error reporting in case of invalid CIDR prefix lengths (https://github.com/ansible-collections/community.general/pull/5111).
+- pacman - fixed name resolution of URL packages (https://github.com/ansible-collections/community.general/pull/4959).
+- passwordstore lookup plugin - fix ``returnall`` for gopass (https://github.com/ansible-collections/community.general/pull/5027).
+- passwordstore lookup plugin - fix password store path detection for gopass (https://github.com/ansible-collections/community.general/pull/4955).
+- pfexec become plugin - remove superflous quotes preventing exe wrap from working as expected (https://github.com/ansible-collections/community.general/issues/3671, https://github.com/ansible-collections/community.general/pull/3889).
+- pip_package_info - remove usage of global variable (https://github.com/ansible-collections/community.general/pull/5111).
+- pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363).
+- proxmox - fix error handling when getting VM by name when ``state=absent`` (https://github.com/ansible-collections/community.general/pull/4945).
+- proxmox inventory plugin - fix crash when ``enabled=1`` is used in agent config string (https://github.com/ansible-collections/community.general/pull/4910).
+- proxmox inventory plugin - fixed extended status detection for qemu (https://github.com/ansible-collections/community.general/pull/4816).
+- proxmox_kvm - fix ``agent`` parameter when boolean value is specified (https://github.com/ansible-collections/community.general/pull/5198).
+- proxmox_kvm - fix error handling when getting VM by name when ``state=absent`` (https://github.com/ansible-collections/community.general/pull/4945).
+- proxmox_kvm - fix exception when no ``agent`` argument is specified (https://github.com/ansible-collections/community.general/pull/5194).
+- proxmox_kvm - fix wrong condition (https://github.com/ansible-collections/community.general/pull/5108).
+- proxmox_kvm - replace new condition with proper condition to allow for using ``vmid`` on update (https://github.com/ansible-collections/community.general/pull/5206).
+- rax_clb_nodes - fix code to be compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/4933).
+- redfish_command - fix the check if a virtual media is unmounted to just check for ``instered= false`` caused by Supermicro hardware that does not clear the ``ImageName`` (https://github.com/ansible-collections/community.general/pull/4839).
+- redfish_command - the Supermicro Redfish implementation only supports the ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected`` or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4839).
+- redfish_info - fix to ``GetChassisPower`` to correctly report power information when multiple chassis exist, but not all chassis report power information (https://github.com/ansible-collections/community.general/issues/4901).
+- redfish_utils module utils - centralize payload checking when performing modification requests to a Redfish service (https://github.com/ansible-collections/community.general/issues/5210/).
+- redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741).
+- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
+- redis* modules - fix call to ``module.fail_json`` when failing because of missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733).
+- slack - fix incorrect channel prefix ``#`` caused by incomplete pattern detection by adding ``G0`` and ``GF`` as channel ID patterns (https://github.com/ansible-collections/community.general/pull/5019).
+- slack - fix message update for channels which start with ``CP``. When ``message-id`` was passed it failed for channels which started with ``CP`` because the ``#`` symbol was added before the ``channel_id`` (https://github.com/ansible-collections/community.general/pull/5249).
+- snap - allow values in the ``options`` parameter to contain whitespaces (https://github.com/ansible-collections/community.general/pull/5475).
+- sudoers - ensure sudoers config files are created with the permissions requested by sudoers (0440) (https://github.com/ansible-collections/community.general/pull/4814).
+- sudoers - fix incorrect handling of ``state: absent`` (https://github.com/ansible-collections/community.general/issues/4852).
+- tss lookup plugin - adding support for updated Delinea library (https://github.com/DelineaXPM/python-tss-sdk/issues/9, https://github.com/ansible-collections/community.general/pull/5151).
+- virtualbox inventory plugin - skip parsing values with keys that have both a value and nested data. Skip parsing values that are nested more than two keys deep (https://github.com/ansible-collections/community.general/issues/5332, https://github.com/ansible-collections/community.general/pull/5348).
+- xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682).
+- xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module from working at all (https://github.com/ansible-collections/community.general/pull/5383).
+- xfconf - fix setting of boolean values (https://github.com/ansible-collections/community.general/issues/4999, https://github.com/ansible-collections/community.general/pull/5007).
+- zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707, https://github.com/ansible-collections/community.general/pull/4726).
+
+New Plugins
+-----------
+
+Filter
+~~~~~~
+
+- counter - Counts hashable elements in a sequence
+
+Lookup
+~~~~~~
+
+- bitwarden - Retrieve secrets from Bitwarden
+
+New Modules
+-----------
+
+- gconftool2_info - Retrieve GConf configurations
+- iso_customize - Add/remove/change files in ISO file
+- keycloak_user_rolemapping - Allows administration of Keycloak user_rolemapping with the Keycloak API
+- keyring - Set or delete a passphrase using the Operating System's native keyring
+- keyring_info - Get a passphrase using the Operating System's native keyring
+- manageiq_policies_info - Listing of resource policy_profiles in ManageIQ
+- manageiq_tags_info - Retrieve resource tags in ManageIQ
+- pipx_info - Rretrieves information about applications installed with pipx
+- proxmox_disk - Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
+- scaleway_compute_private_network - Scaleway compute - private network management
+- scaleway_container - Scaleway Container management
+- scaleway_container_info - Retrieve information on Scaleway Container
+- scaleway_container_namespace - Scaleway Container namespace management
+- scaleway_container_namespace_info - Retrieve information on Scaleway Container namespace
+- scaleway_container_registry - Scaleway Container registry management module
+- scaleway_container_registry_info - Scaleway Container registry info module
+- scaleway_function - Scaleway Function management
+- scaleway_function_info - Retrieve information on Scaleway Function
+- scaleway_function_namespace - Scaleway Function namespace management
+- scaleway_function_namespace_info - Retrieve information on Scaleway Function namespace
+- wdc_redfish_command - Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs
+- wdc_redfish_info - Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs
diff --git a/ansible_collections/community/general/CHANGELOG.rst.license b/ansible_collections/community/general/CHANGELOG.rst.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/CHANGELOG.rst.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/CONTRIBUTING.md b/ansible_collections/community/general/CONTRIBUTING.md
new file mode 100644
index 000000000..358daa5e9
--- /dev/null
+++ b/ansible_collections/community/general/CONTRIBUTING.md
@@ -0,0 +1,139 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+# Contributing
+
+We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our contributions and interactions within this repository.
+
+If you are a committer, also refer to the [collection's committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md).
+
+## Issue tracker
+
+Whether you are looking for an opportunity to contribute or you found a bug and already know how to solve it, please go to the [issue tracker](https://github.com/ansible-collections/community.general/issues).
+There you can find feature ideas to implement, reports about bugs to solve, or submit an issue to discuss your idea before implementing it which can help choose a right direction at the beginning of your work and potentially save a lot of time and effort.
+Also somebody may already have started discussing or working on implementing the same or a similar idea,
+so you can cooperate to create a better solution together.
+
+* If you are interested in starting with an easy issue, look for [issues with an `easyfix` label](https://github.com/ansible-collections/community.general/labels/easyfix).
+* Often issues that are waiting for contributors to pick up have [the `waiting_on_contributor` label](https://github.com/ansible-collections/community.general/labels/waiting_on_contributor).
+
+## Open pull requests
+
+Look through currently [open pull requests](https://github.com/ansible-collections/community.general/pulls).
+You can help by reviewing them. Reviews help move pull requests to merge state. Some good pull requests cannot be merged only due to a lack of reviews. And it is always worth saying that good reviews are often more valuable than pull requests themselves.
+Note that reviewing does not only mean code review, but also offering comments on new interfaces added to existing plugins/modules, interfaces of new plugins/modules, improving language (not everyone is a native english speaker), or testing bugfixes and new features!
+
+Also, consider taking up a valuable, reviewed, but abandoned pull request which you could politely ask the original authors to complete yourself.
+
+* Try committing your changes with an informative but short commit message.
+* Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge.
+* Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout.
+* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#creating-changelog-fragments). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) )
+* Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed.
+
+You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst).
+
+## Test pull requests
+
+If you want to test a PR locally, refer to [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how do it quickly.
+
+If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it.
+
+## Run sanity, unit or integration tests locally
+
+You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is:
+
+```.bash
+mkdir -p ~/dev/ansible_collections/community
+git clone https://github.com/ansible-collections/community.general.git ~/dev/ansible_collections/community/general
+cd ~/dev/ansible_collections/community/general
+```
+
+Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+.
+
+The following commands show how to run sanity tests:
+
+```.bash
+# Run sanity tests for all files in the collection:
+ansible-test sanity --docker -v
+
+# Run sanity tests for the given files and directories:
+ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/
+```
+
+The following commands show how to run unit tests:
+
+```.bash
+# Run all unit tests:
+ansible-test units --docker -v
+
+# Run all unit tests for one Python version (a lot faster):
+ansible-test units --docker -v --python 3.8
+
+# Run a specific unit test (for the nmcli module) for one Python version:
+ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py
+```
+
+The following commands show how to run integration tests:
+
+```.bash
+# Run integration tests for the interfaces_files module in a Docker container using the
+# fedora35 operating system image (the supported images depend on your ansible-core version):
+ansible-test integration --docker fedora35 -v interfaces_file
+
+# Run integration tests for the flattened lookup **without any isolation**:
+ansible-test integration -v lookup_flattened
+```
+
+If you are unsure about the integration test target name for a module or plugin, you can take a look in `tests/integration/targets/`. Tests for plugins have the plugin type prepended.
+
+## Creating new modules or plugins
+
+Creating new modules and plugins requires a bit more work than other Pull Requests.
+
+1. Please make sure that your new module or plugin is of interest to a larger audience. Very specialized modules or plugins that
+ can only be used by very few people should better be added to more specialized collections.
+
+2. Please do not add more than one plugin/module in one PR, especially if it is the first plugin/module you are contributing.
+ That makes it easier for reviewers, and increases the chance that your PR will get merged. If you plan to contribute a group
+ of plugins/modules (say, more than a module and a corresponding ``_info`` module), please mention that in the first PR. In
+ such cases, you also have to think whether it is better to publish the group of plugins/modules in a new collection.
+
+3. When creating a new module or plugin, please make sure that you follow various guidelines:
+
+ - Follow [development conventions](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_best_practices.html);
+ - Follow [documentation standards](https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html) and
+ the [Ansible style guide](https://docs.ansible.com/ansible/devel/dev_guide/style_guide/index.html#style-guide);
+ - Make sure your modules and plugins are [GPL-3.0-or-later](https://www.gnu.org/licenses/gpl-3.0-standalone.html) licensed
+ (new module_utils can also be [BSD-2-clause](https://opensource.org/licenses/BSD-2-Clause) licensed);
+ - Make sure that new plugins and modules have tests (unit tests, integration tests, or both); it is preferable to have some tests
+ which run in CI.
+
+4. Action plugins need to be accompanied by a module, even if the module file only contains documentation
+ (`DOCUMENTATION`, `EXAMPLES` and `RETURN`). The module must have the same name and directory path in `plugins/modules/`
+ than the action plugin has in `plugins/action/`.
+
+5. Make sure to add a BOTMETA entry for your new module/plugin in `.github/BOTMETA.yml`. Search for other plugins/modules in the
+ same directory to see how entries could look. You should list all authors either as `maintainers` or under `ignore`. People
+ listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests.
+
+ When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it.
+
+## pre-commit
+
+To help ensure high-quality contributions this repository includes a [pre-commit](https://pre-commit.com) configuration which
+corrects and tests against common issues that would otherwise cause CI to fail. To begin using these pre-commit hooks see
+the [Installation](#installation) section below.
+
+This is optional and not required to contribute to this repository.
+
+### Installation
+
+Follow the [instructions](https://pre-commit.com/#install) provided with pre-commit and run `pre-commit install` under the repository base. If for any reason you would like to disable the pre-commit hooks run `pre-commit uninstall`.
+
+This is optional to run it locally.
+
+You can trigger it locally with `pre-commit run --all-files` or even to run only for a given file `pre-commit run --files YOUR_FILE`.
diff --git a/ansible_collections/community/general/COPYING b/ansible_collections/community/general/COPYING
new file mode 100644
index 000000000..10926e87f
--- /dev/null
+++ b/ansible_collections/community/general/COPYING
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
diff --git a/ansible_collections/community/general/FILES.json b/ansible_collections/community/general/FILES.json
new file mode 100644
index 000000000..5db6d01e6
--- /dev/null
+++ b/ansible_collections/community/general/FILES.json
@@ -0,0 +1,25163 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/aggregate-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a66c319d17b506827f37213502826713230920d629d63a0af58c445d36aa83b4",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/combine-coverage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49d3567c21d253290b5c075e250b1460ea46c3f33b7d25a8994447824aa19279",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/process-results.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9431997ce33a94d4c5d3b13be6958762087f7fc18785b92a7977f6f55d5b99d5",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/publish-codecov.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31c38e5c2f6021b3762c52c5c55f9fb565586b69e998b36519d3072608564546",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/report-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d61f921311b94d2c00733ba8c1e6fcdb34f6dc0236e99595e57ea54f4811197",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/run-tests.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "237b5604b5f4581824888ef0c2c58594248fa05c46e59b502b525d80c90ffd92",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/time-command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "486dd0a00417773b1a8901b4d411cb82f7e3ffea636ed69163743c4b43bf683a",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64e463d7bc905405658478276ec947084df99b36512a6429cd991d3a8fddf7b2",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/matrix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b082277884eb4e4392647491e6a390fdce58e66f036f00b104cfb35e6d210bc",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21b5fa07d5f0bca65f95e337ce815fd62ffd941e9846a5322fa606eab76ecec5",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e68364750709fae75220652b5837198a1deff224fa37d4147eec37a7bcddd70",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/azure-pipelines.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "569cbda91c80aeddd6a357ea3f766669a76a4547ebbf54b7b32a4d09fe949ce9",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c928fdf2a407181cd2733e99684a11060a3c2dfc4189a5e3c415ebd53f34f562",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97be56767beb3299dc598a5dd116262a426d83758a1e19f525584f3520283ddd",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/documentation_report.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "976b5f4c7544b9438724037369c27fdf474d8aa5fb55af19eb3853581c2ae0fb",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d5c6b512bb5b8031886d043e9121d643c34a445dfc35e7c396f90facd9ae863",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b30123e188ab9c7e77cbc5dfd71585d7ba02ccd2ba07ae702757d5dc7c24bdbb",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/codeql-analysis.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ee0e5baf1022280bfe9bb31af5caa90bc70b3feada2e16c2404f14be8e30e9c",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/reuse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3198de8ad067ccbb07ee9e461632f8ffce2e5eae1c77329ebb1935ed878811f0",
+ "format": 1
+ },
+ {
+ "name": ".github/BOTMETA.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e115baf123a80784ad9779135a969029087ee5104fe7d6f9e66fda6c1ec7e6d",
+ "format": 1
+ },
+ {
+ "name": ".github/dependabot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f314f2fce46d346ef9559fdeae3a55ce6d799bab45a255092446c31e17e6874d",
+ "format": 1
+ },
+ {
+ "name": ".github/patchback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a23e48e2562604318540e6ddcac75213ad2c367258d76fc75914e9b939d380e",
+ "format": 1
+ },
+ {
+ "name": ".github/settings.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33af9908a060da987217ca36e8b960fdd511d5c271e5bf0b6765eb3139fc2a25",
+ "format": 1
+ },
+ {
+ "name": ".reuse",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".reuse/dep5",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d8cb20d72d7e81aaf2e7f0ddce7eacdb3d47a890541717d7fae08a6cab7ebed",
+ "format": 1
+ },
+ {
+ "name": "LICENSES",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "LICENSES/GPL-3.0-or-later.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227",
+ "format": 1
+ },
+ {
+ "name": "LICENSES/BSD-2-Clause.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f11e51ed1eec39ad21d458ba44d805807a301c17ee9fe39538ccc9e2b280936c",
+ "format": 1
+ },
+ {
+ "name": "LICENSES/MIT.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b85dcd3e453d05982552c52b5fc9e0bdd6d23c6f8e844b984a88af32570b0cc0",
+ "format": 1
+ },
+ {
+ "name": "LICENSES/PSF-2.0.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83b042fc7d6aca0f10d68e45efa56b9bc0a1496608e7e7728fe09d1a534a054a",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddda66bb21960b57f50fff18977daa89afd2d50f7c2ecb9c4535c7a3d20752a2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06567fbbffc7f5357eba89d98577dd40b961bcef14ebb452111d609b1b019ae6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d9b70273177bcc2aac7752be98876e4bea048566ab55f1c1be2ac2e34f7f83b",
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-001_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd2eef902441d9c71b84d997508e78803e648c534e75c8b12323e199eeca81d6",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "663a3b0429cd096b10b06038a561f01872a34bc0e611f535abc9e626a474b6a9",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-002_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd2eef902441d9c71b84d997508e78803e648c534e75c8b12323e199eeca81d6",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "852a8518e33c1d8a2144f1835f6a10b0c17fcc13850cf45475ce538a4312171e",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-003_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73003165935630df144177b3cbb78954f7eeaccc3031d694a1e1c2f8b365d99d",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-004_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b04d243241c56ad51cdee8c34a6475af76482801a74a55a269a296122c3be44",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-005_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4541eb704de6d64c4c34c12cc22328e19016c66811e99af22b4be27053d9526",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-006_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "205959e977ba30d49bdfd879f4b7bb2f50b50762361d89b2360cbb419a6af931",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-007_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5517facd2643e52b06c5181df7768b096b2916ac411f7403484f0ad1b7d8ee1",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-008_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a833536f106aebd7a5f737743f96c791302260e72f9393be53bdda0a86a10c9a",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/default-common.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd2eef902441d9c71b84d997508e78803e648c534e75c8b12323e199eeca81d6",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/default-recursive-true.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce747f67c1635a6dfd887c7ebf3ee02c2014b9eced5773b08d747a11fd916a95",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-001.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec664dbed63f2d8f9d7173a44f6f53b8aac3917e4c152082157ae06b2b64b717",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-002.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "174db79b357280e60b4b37b96e77708148894d4d81571fa10d65d9846bbcf365",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-003.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fac7cdbb3fc0b3addddb5ffaa58fcbf241df07195b41ad011f2550df1281fc7",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-004.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7781daf8df6f6e6403fc8bd8ba40a6a5515e24b1ffa96f85b4d3cb2e23d926e",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-005.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c02a2a83fc72b27668c5ce96e0c3feb466ea89047f5fa8bc961260bce0aa97b5",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-006.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9eda979168ded0b39e43a4ae0e02c38cdccecc236604b61cbf80069869e762e3",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-007.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78a002ab23ee4a16c60322ce988af490a5d900131fb621ada85541afee932fdd",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/example-008.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11c87a5bd327951a21baaf28e47d11c0c07e8a12cdcc5326fcd7f8b0d5217d56",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/examples.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe278ca276168ebfc2167cf5ad2e2d4b3cae0d6cdd246326b11620db259e2eb3",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/examples_all.rst.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31f8a0a79c3aebf801b74e0cc7587ea7486fc221a7ab948701211f0178310ace",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27fd7341d931b081d0caa6d9dbeacee7cd788bb5418019fb70e5794293f15582",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/list3.out.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d228735e576466950b3a90b1de44b6a503de957521b3db41a492d742695d8d71",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/helper/lists_mergeby/playbook.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "825f6e9f15549cd02a8fa1cf5a6b6bdbc1dc39ff80ce59f5014af8c0358bee58",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f2cc2cddb28b89a9b7b254377879829d13d9d6ace295643104856b75cbb2955",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_abstract_informations.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9673ab16c175d095503c01855617585ed74a750fe44ca53dd77a11b083712c2e",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "445709ac655c00442e6394a4726886d6365b799cd18674bdeec037bc725cc16e",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88e9098413c34026a6a56d676f1d10c46bbf8de27d6451dad0843e6d5942b201",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_abstract_informations_grouping.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcb3a21f1fc8b9cba37b42a83734bd2d4b6674235751b13de50add2984e9cb21",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a800545d0815aa27a81ee5a6953547cc95f73b892f55f3b1740dfcee17de1a31",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_conversions.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a55a01c7ea069d6cc1ba48873f9539cb75ff84893ca870c8bd57db43302461f1",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_creating_identifiers.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2dedef7bafae9792207e41391205f651edd21b4a0b57c541be82c4075f75226",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_paths.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ba5e31891655d169ad835d65da51eeaebb542305bd2f004c5e275e941d1da1c",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_selecting_json_data.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae8db5288f8fdff26aa7927d6e83ca54e66b8c74caf23589bd09a32987558bd7",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_working_with_times.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e6ea07f337f5d298ee8b5d4a7ab799f960010a85beddb7ffcb88651bbeb3c2d",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_working_with_unicode.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd0c1ac2407c97bb0b513566e17051ab20bc7f8313d68efa380bc308655d9151",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/filter_guide_working_with_versions.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aed3270dc77a60e71ce47a3897b77a6e9ae8636661ccd07d650bd7040fc28359",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/test_guide.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93f17c0d2998ab3fcb6cc656787b36d0703eb364d4f4ed9aa2a120bef3ab0df7",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/extra-docs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e4dff32f91531f5acfe49caddbae8744d8b97c1395e4dff1102d62ab4d02de4",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97935a90656318eede04446a4f83b560820a5cdf4098069f8ffcc88731bc1b01",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1476d80543a98566205d337aa4623bed629ada66da100604e0f9672d6c0d0922",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/action",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/action/iptables_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a53d66f79a6f4c656ca8ce128211b7c091f989ba6b90fd7941f1c5de54513827",
+ "format": 1
+ },
+ {
+ "name": "plugins/action/shutdown.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "389a1ff6ec5c3894ca807c67b6cdcf189a32a738506ad7a23ba6e4b5ea3ccd51",
+ "format": 1
+ },
+ {
+ "name": "plugins/become",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/become/doas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "395a0783bbbc864885ac063ad61bb643763495de967d2ca5893af9c6119758a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/dzdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6974a6931a4f16796fefa79ac1f1225036b6839b356415f86dabbf29c6a9303c",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/ksu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b7794b44c398e41592294f7019bc41012c871d5072ff6352cd156ac5ff63fb8",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/machinectl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b645d8dedc9920a4f28fcb9f4e6dd6befd9d3185be40f29c05fed563c1d5d1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/pbrun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f57dd61889827ecfa8b7e7e72f0d89342997783e2f533793d963ac9ab5ccc9b",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/pfexec.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ed536b92a9d1cdba5ede804fe4bb3721a253e4cbb4b50e34bc8e3e15cd113d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/pmrun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35ea82076e2a5fd92e5db1c99cc47730d5d0fb86d9afbcbcf64751afb74494d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/sesu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d4400f99ce81e7a5d404a429a706113835b06ac6da3164c732d9da956f6295f",
+ "format": 1
+ },
+ {
+ "name": "plugins/become/sudosu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55f6384a02ac0157b3e9c3520928a13ba0a562ec708d24f250a8c871b5975d73",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/memcached.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09d75f1c2e2b453c7bd7aebc5e7b16a987dfa503689ef984a75d7ca67fbdb019",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/pickle.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abfbed1fbc0278b761a22cac287a42b2ea14588d7a4b86c1a4a040811b4aaa2a",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26a64d84e51d0d1186fec26495bc6cc2cc3f08afff425cbd5b7f15cf6f74f2c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/cache/yaml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1297dc1fea7932a27670244c6212c9044504a5225e25bb3d6fc0d57ee3db19ef",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/cgroup_memory_recap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "196e22c6129e0beb6535990c5c39af72b49f3790ba885d6329d921d8cbd0792d",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/context_demo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f01159f953b175b6b3904b95dc9ab64155dc62cf3ac2d45b7eec3c14d3b1d875",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/counter_enabled.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd3be70cf9c3355ae3c2eda2768aa295116b6b3f64491ee3398e43ad5f7d19b4",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/dense.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3798c942865cdb68514a9269bc52103903d5fefd4b7991eafcbfea670d750f82",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/diy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fcf28099eb1e17c7d5f15fecbb428859da6a821ac1854eadb6c63896bfdb70f",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/elastic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aae5776261f50ab89a672387845fea6a8304f88000b12d8375c937e02f7226e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/hipchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "987c2a8b697623f30f2de2d4e70b027096190c0a4a6c99703db36ce0b1b9bb63",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/jabber.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c8adad573cc4be386e0fd7e0f70b689e33413aae5836de914701154684020ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/log_plays.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7905af5b8c5f75751ed633dd123bd56ae214a76f73bd5e21947fc28f1ef58d1",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/loganalytics.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41ee07d1e548f4d03475f0001e084645124d77d850e031c17d78d81b5bfc2e31",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/logdna.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd3fa190b3089cf5ad73b3ef168d5fb9034aa1ae8152340290b5452b6537934d",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/logentries.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9534b6c7d2b5cc4a9ff4cab35199577e1bd02c26ac7f8a276777e8669c809335",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/logstash.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "411199e9f8925c20601b623056b6288f02000815efcb0c25254e69051a826aee",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/mail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ddb19f09ccaec02d0b1e98ed3ab6f80b9479c2e51871e9dd84a9d26eb95aca6",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/nrdp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b1aff2684e6a38243bdb30404fc76508c3c4cac8c1f4d29a2a4cf7cd00b1a46",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/null.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d0e768ead6c718de5e958d0624992cc89447f0cf5f80af1462a7f78f2189572",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/opentelemetry.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4ea1bad3bfbd276d03d006730a0168d3a969c545e0ffade75ba53cd1bf4e316",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/say.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdc54bf0477494706798fcb6c288c8f4bb9fe70cfd928b5b1fa0a8da066cad75",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/selective.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b4e414d25029a1e14d68eaa4ecb7763f7bf586cffb18c81676db2e9ed70a5bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7401eaba4487eea31250c44426ef10b8279f0b694ee4f8c17a882a36e51cf35",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/splunk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8ca5cfcb34a88feaaedbdb9f44704252a3d29c2838c3829a48080285c077bed",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/sumologic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5bfb9153b1c272983612fe52b199994c3e3d7f06cdeb7924215ae0fc8da17ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/syslog_json.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5880600ec24461f70479f7410db97f122bbdbf5e8bec3231bbb5fdaed3af45d",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/unixy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bbdd306804eec63f89eade94447c24e784e83fcd5204e62eaf4261813e6fc875",
+ "format": 1
+ },
+ {
+ "name": "plugins/callback/yaml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55f2e5bcd2ef0216231384b40ee1024cc827a71e1d79da82cf42208c5a94fa98",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/chroot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8648738b5146b4d4202b393895824ca2817b523f049307a414719cd72b7afcfd",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/funcd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d28756792fd00a8fc3a486f16ebcef60330447215d40291c390705937f29c0ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/iocage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46c64a82bf3989da083d66ae9fcd2970da3f933ea2215407f47633ef278be014",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/jail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed55620ca0177cef975bb05d7ea88b0d4c574997ecedc795349c435f6efe7212",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/lxc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3d4201d9dab79834210fbd187f0f7c03ae55ca9da989c75fc99b997fe4f1328",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c698516a8b75ac6bbc3a1d94cbd83c1647e6739bc4401d83035cdd1d382e8ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/qubes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3340048f47c2652566fe7f2e60cdf9a2c886bb14b6ebdccfa4c57fb0ce833c65",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/saltstack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d76e7d96d62845c57831ce71853641034a0d38f7fc744af5abce7ddcca4bb00",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0265ec5385187634ee5ae52d274ce284ea5a45819a611e95d4f1a237aee36d00",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/alicloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c755676691121db240cff2282cff319b9c83fbc442466b641617782cdfaf26b5",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/attributes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c41434e90502ee68001fbf43477ed9124537447be7466c66f29096943ef288fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/auth_basic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44e2dc39beef434001c9d02d7ee65c3ac47c64fdebf8d8b304b957d5e5f00a8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/bitbucket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7521d6cda99a6c6bfb45a488bba864198a9238e4522d172ac9364663f4b24727",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/dimensiondata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ceefadbc09e96b3217066fe970117d94a7e8ad428a16f2ada6ec4f205b2feb7f",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/dimensiondata_wait.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e12eb961245371eeb738735a43a61586dd677259e18bc900362c608fb5afdad0",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/emc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f494e26b7e059222476883669eb84bff4e3bf711c72b5b9098d9ab2cca52d685",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/gitlab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1a610f5bebc33d55c50efe3184598fae8e7583fb59b721b60c67be950e2e665",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/hpe3par.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fb7efee3a8025457b564612555812ec38dc3d1a11d6b8ed8c25ff89e9006a61",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/hwc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "013ee1403ffd53279ce5127f4a474474770551fe8a00262f70092bf627814ae5",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ibm_storage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68e232bf4207388f31008ecf453dbfc70ccc82701b3306f380b775bbab300b20",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/influxdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99f5ebfe593b8be6485f933769c54bfcfdf10824ad5d9e38178445057dc888a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ipa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8f22086deb02c7ecf6905ba2c243e8bf2ae24d8c41d7322ac05d5634266bb3a",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/keycloak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b12e8398192f11de889fab0c344262ced06b7c95af284d02dd29f891d7c9309e",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5b243ba89e49d3f4763a0a5115324e2fa8846753ae497848ccfd8300b03c0a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/lxca_common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2dfb9df82cd901f3235da2432c1cda249d17a29ac8ced7fc53224680fc983800",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/manageiq.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4512fd57646adf145c618184b04e1747bf53e88acf2661c54419bc5877b091e7",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/nomad.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2143922b22f3c4f2e8a8204ad8d8f0300fbb0a7a92f6efa4c4bd02c32a54b8c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oneview.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdcf470d5783d31e0afb6a735796a1017d3a8a21810857c0efd47ed2ecde7d1d",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/online.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e838b205f84db6fb238cb5018e7e4a3b0f46953f6163e5a56e0779a74414f24",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "854ecc9b2169728ec65f56cb00f12996b715fc05cea520343b1df1951c2de778",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/openswitch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a339c27247063dcc6678caf5051bb451fceffe23dd1da903963cb2fadf7b93fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b2ec0d6017534a8da4559b983b61cfc305d65d6e1cb6e1be90274da362e731e",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_creatable_resource.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08b2ceae7cb7f166b9cd2bbefbc9994a4e00c88b13ca582e3ba84774340a3ccd",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_display_name_option.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64260dad472862657ee961f79b7453be448ca3fa072cb5ad65a267f9dfe67ed7",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_name_option.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ad3676e31d808a1f6541f72763eda59ac5272ce5b1ccf5af2d8ad149f7f5907",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3dd23c24c76276e6f50d674c63c416050913b2a1eeb985e2bd7c62d6c600348",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/oracle_wait_options.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab3ccc72af0335441b655e7d6c446854f59ace02fe70458c541375f8743a0c1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/pritunl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44037c8b801ec8d86fabde64045c8875acdbf86ec081b4308d443e09a3ccab87",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9be758169d594fee836b038599c65b7979c455de3eb28d99bf064085d27be1ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/purestorage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82da8765cc1b51d71cb49699ed3deb5ed98f6e14a82a580999916899a5cd6b91",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/rackspace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10624f5ce57c7423c959db433f9513a7db8c5e1893582bff54e6b0e3f78c348b",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5a692f155845c4ac614351bbb146c96cf93f7e2de47f72943d1a1231294885",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/rundeck.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a52a74689c469fd23b928aa1a900555b8ec26fdf8d2ed786c15f952319d51df3",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "536c5415d4d73a8e11b74e1a40068266d149333ecb8e0d297cccb66ba4120f30",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/scaleway_waitable_resource.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4a3b7fb28b85ce64af79d83781da5df695d49a6876b5b2df48d9ff5c286e74f",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/utm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40510c5612b11da5dc3bcb3c2b109857cd9d897c2ea5ae6ba8850292e30f4857",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/vexata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7deb6fcfa73cb0e95d2ad54b17cd3c49f34d34b1290f195a309a3b973b2c9da",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/xenserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32f95a4762fc04041dc5f84abc1813b734add74f36dea833a31a2fd827877511",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/counter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd04527cdb0039fc57a451d255a4b23d8e4e75f924e6e32475209aa20c2016f2",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/crc32.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "741ebab408518e97a2b871e5bc71fa0d8f4bc83d3c81d37ca897bd7bbf7a0feb",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/dict.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb5ac62a79cf46de7802145e19d70cedbd372a80abae7c202252ae02be12d081",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/dict_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e7596ccf359e5e7305b07c05a4ae481d609ec2e2f0dfd90fe77ebf264fc2ec5",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/from_csv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d7f66858c792e8f5d0b53987aad8d14252baa528b01779e3a8875f295aeebf4",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/groupby_as_dict.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec705d8d9f440c5bf405183c6f95fd30b65d33edd55533ef3c5a5eb10d0673f0",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/hashids.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b47c5dc7c6b91a8c10dbab0a1d5b6277dcc965d0b3b6b23e7e26716c73a7ba5",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/hashids_decode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10d4a64c9948a36cdcf4e6d2ca43061da23c8af53d01ac502fc18b8513cf8a41",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/hashids_encode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae43679d22bf67efb49103d96e6484877338c9a4845f4d790b077725947ab780",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/jc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76ae5c88d50a2333f2758be438cfcd4c4b9a1e91076df4c48edeed101932cbf3",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/json_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b7c9ced4f1477a1196b93a163f3edbd880e6e46966522d144248541730f783d",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/lists_mergeby.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63b1474490930ed608cf35dea2a4e73fa4c80c640c346abae9b82c525b33f932",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/random_mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11d31c3d49d45c995a5251adc719156a7e329df8681c8432cce467549b0d0c39",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/time.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "551bf41b7c11d40606add85d4e2bc7b327951950dbdfc9714a9bd49bdb01bd38",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_days.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d23445f7ad0ca22323568af7e75818b386dd3e0e81adc86b72045f48e3551600",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_hours.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29f218b85c66ccde6b6f422ae9eda6b4dd6c4f732ac6eac11673942135a852bf",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_milliseconds.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2b674758da200d530e4c7a57883d35b0d47216cf7693d7f29d44c84365b18a0",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_minutes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29f4c579100170a46b7a177f923d48175557da42d51c4105451bf1761c041453",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_months.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "797dd5c490aa4ddf565b48c4ec4f7719704a94a1c7bd56fb6776d1470affe15c",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_seconds.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a5587aa9f512aa4e408a4f4a13b949e25d4fbdc794d57e497557a20fcf2013b",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_time_unit.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e126e417a274e75816c69275917b806a1c259aaa058cc84518a96a4c2084508",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_weeks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04463eb7201b7e414cf32bc3c7d57b656ddac61eb46a631efe1f7d36a189b534",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/to_years.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5326aea9bd9fef4c5c06bcd4ac5ec2e36f5556c4e2bf83fa4e8535049e0255bb",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/unicode_normalize.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd9a4fa4e1cf4ae96b4a43cc29a9c17be2b195b833542a43be65d0da346780d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/filter/version_sort.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f925a599c36e18839d6d89d661ba86e9f885b3ad5a6dca655fe243f6bb59d5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/cobbler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2884871f47b923590017afbfe7677511d9692a41fc11f28285b9ef5178d8a391",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/gitlab_runners.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e7b3bb1505bb24603c85b55ce42dc4176e3a59aab301746e36229b8f38051f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/icinga2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac38a22836ded8fa733b0a9dda9ed63d67a63a603e7dba6764b3ca7df09cb7f4",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9f29484c6ca0940b25e9192a1f1adb2e63970ac5305d97225963f582475bfdf",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e5a52c6e31d407fdbd5127a86642c386e49ffe16a87bd923cab8da60d50ec1e",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/nmap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb2f448c78fe0186248baaeef41f3fbdb57d648eda3c0ed5a2e5cfde482e306a",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/online.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5eb0fb5a8a0aee54a1198e84ff550f9d70a962b4a9f30643d08f5779c1578696",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08654f9aa131acaa382b0aed38a44b641f993effc3c3f50c38fc2d13b77802b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7757c5e263bfd80d0fa3d8356fb1658d09f2030649a5332bf553a579fdfbcf90",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87dbfcb3b4239cd3a2cf7718e3caab553def93060065970f39aa070fa988f254",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/stackpath_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ccbe9c76dd2114c837bd6a3c504620619633ddf47a6699ea380ee33e2fc755a",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/virtualbox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b220b977c1442ca3a654614ce57e7754d9795630bf75a5dde4496c2913f2f40b",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/xen_orchestra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbd0fa9d5b07b04f5f962c614c6459e5aebbbfa0a57f60f67dc9d843092f3b85",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/bitwarden.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "522634e07df8ad975f3250ea21b207b9bd5eaa73da6850f8ad2ca00f6b0ece62",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/cartesian.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3403b01d7b02c804341660e8abddc44b591c218fd23037a1398f422da3c449e",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/chef_databag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "456f263dae27338f2845852fb06d0177367e8486d0b24b4847443314f29bb1d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/collection_version.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b5a1ed7cc5304e6d0bd83354961571d808950a4fa29dbd4decca7df8d935776",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/consul_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08d9c175275635d7d51e3e5f060e2f0b0ec048dae84e2f9bc1b478002011cbc7",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/credstash.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b03004d8ef248726684b4f832469fca8d451f8be9096264e0f7ebadb3a7450fc",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/cyberarkpassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "940e09d29a69fecb2874019cb8a42928d4527be8ac0e27302434864bcb573f46",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dependent.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d1c12d9e1687687c109175c8d8c2ce17947dfd4b632d813b2216ac43731e336",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17a45ded85474b0db603aa78f2140375da36edaffd71c516e9c6a5cb3e800f1a",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dnstxt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6f60f231e06b1777eebc014d2759c2116ecdfee7fd8b65599b34e5a8d5b0fd5",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/dsv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8bc846802cbdd881f4ff48bdbd67d9dc534c56e4985ac8ded3e0b953cd999a76",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/etcd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad620642d507aada986b475aa40d3aef112de77327a4861563d038ad9e8539be",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52e06d25c01f3074ae330aba6a91446640fef7b2a8dacfaf7780830b2928e1cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/filetree.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e93f4a8a98de008d123400187a2ea2ce1fbd6bd2e5847c4b11d955e9b5e509c",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/flattened.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b668474a4d77fd40a59af01f3d6eb778080b674a20de20fb4725162d6da32ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/hiera.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83e7f9c5ab2184e6e5157b68af827f0d0376b5624021e6bd58b5c457f1bc645c",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/keyring.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fec43c4f1ff0e94e3aa5ff1ea38a47f4e2d59009eb4e173acd0e483bdc3d2e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/lastpass.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "471fe8ebec1aa9c9d786a44504f3a378302a59d3b05f501b4b470734a3bf452a",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/lmdb_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3c22e4bb9041b8bd79d520f7d80e3429b26494f6884b71dcbee86711b9ec041",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/manifold.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bff5b4733e9afac2882793397f8b0492b99f1eb6946bff038d670246e8d900fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/merge_variables.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a7e1f8bb4cc16ab1b7a545847fb9f62eb25387e4681734ea5136ead9c5f8ac6",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/onepassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12df22f733a259022dfe8bf439a3938d85d7d69d3b69dec770aa6e46a10d4a3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/onepassword_raw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0997a0491899015d8d7fe4c3bc9de5f345b579de0c2c258cf50b358b42cf2fb0",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/passwordstore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fac37c01614ecfbde5ef49a93bcfe1218d4b4c010fee52cad93b41af8feffa9",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/random_pet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce8d5b9cd8ae8317a78c487a85db20049438f869519506f76fe35803e716f4ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/random_string.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f81b451b1fbb8d49076bb90d8b40ceaa5200efa7845f1ea43464e56a6de1f3b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/random_words.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2fc8ee0c1b8e5dc8fb1a43e0b94f52db48dcfe83723ad0592cc3029c096504e",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84501859f81f78d622cb37daf1f0752076d8d35dba1ed2bf6485d4e753784c15",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/revbitspss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a486e0540059abfacbc0da7ad6d351e9758d7f53c6e94258f58cdafd5aff3b57",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/shelvefile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c7c4c6480904bd57b42fcff990520a0f110705ca5f226dbdb846414b8f1529d",
+ "format": 1
+ },
+ {
+ "name": "plugins/lookup/tss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0ccc8d25f17ce7d8e8eedddad9c9d1343f26915f3e81a6b8ddbfbd45b6d0a6f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/keycloak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/keycloak/keycloak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04d2f740ababea369bb587d1415ccd9a21b3886a485ba4668ee24d80e3ec3180",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/identity/keycloak/keycloak_clientsecret.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ccc560628b7842a1632e0d7345e3340d79496452754464d889871685204528a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/mixins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/mixins/cmd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27ea6b91f5fd744b8b1f7b035ae92c6d83dab8ab021dc95c519cf4c8d4c937e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/mixins/deprecate_attrs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a466625f014edee543016f1949945e79f9c6f675e3c2ebe039d2a63347ce40d3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/mixins/deps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "572dd83d013ea9eca0762f8e0aaf92343b25bce6a867639102975015e5fe397e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/mixins/state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b8aa982effd77ab1c8155fd0315830dad9ce4668c5e3cc716f62a2b4f0e4e3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/mixins/vars.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42321b2e74cdc6f133f06d2f3da04e7e1960abb99db89aa7115edaca45093ad5",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/base.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "581669bb552f09d82dd4855d39bec70c9e73d9eb35a482c9b28cf6c12a928ad5",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/deco.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cc2ae2e5d80912be166f4db149ed1701ed63716885cdd766ce0b3db786c540d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/exceptions.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc4cbbb1e854ab9c1813a4a7275cb01c9675bbd1b81fb8a4a777b92013e0d3ae",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/mh/module_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66325ed2b8f7656c0251e9b5c25b63f2f1c3600359bbe05cae1326cb5e9f4129",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools/pritunl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/net_tools/pritunl/api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "276c8f9eac2319776a57a3e46d215467467b9b5f051b448696d07f9dfc322a12",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oracle",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oracle/oci_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98cb44c4b774079019929180f2edd90024f851efcd9c3a7bfdedf6f990af0974",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/lxca",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/remote_management/lxca/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bad7676e1b303f03c1371d4b7f961cd68a0ddbf329ad3ae578406b71416a451e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/source_control",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/source_control/bitbucket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94390a592b98b0245d7dfe28fa0c40ec09b613a7b29ac5647f2ac32a4e3e3d9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/emc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/emc/emc_vnx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcdc4934ef73ba3afe20ec4465315b26da7b0d181272701b859d4818d02821b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/hpe3par",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/hpe3par/hpe3par.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4948a4b9c921b801acd5b3cee30ec5c298a543fa0bc01e9606e04ffe0c8f3104",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_filelock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2603646cf6b8836bf9463fac12303285bbe1c71b81b95210e5c69526b58839f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_mount.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da86be51799ca16fa74fcd61b73e41789f2a42248e0832963b7c2ddb581d94e9",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_stormssh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5043afba5520af2aaf27770e7394c672529e45dad295d0d2f003e23ba8c1c562",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/alicloud_ecs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c27a5ba6e76e100af34ee0e3b70591240e41912287c8b4025ab016738f4cb1c9",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/btrfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eac9c40bf7e97f4074121acfcf470e0430236d891d0da214425251e88445d88",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/cloud.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40dd033fc9af53f39d0b47c7a925f9d934f486eb786cf0d0683e06d982fc9cce",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/cmd_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1c38eda36d3d59456d730777b0fd54e443bc18b7f9f69adab9bc455d5ed94f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/csv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b324abd90f80888cc4b919a4e1249086f145e308cf9b1081643d888708d2515a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a98f22b0a6cebc84175c7c0d90b328f27d30a043827a66df8bf696a81ab265ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/deps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1108ffc323383fd8b06053d3dc8242e332569f9e9a17d0bd4819f18d3c6f924",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/dimensiondata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7929553d9eeecb50d26984c90a105d491aa8a32d945eeb0f8b0ada1b340f21f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gandi_livedns_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77ca8211234201bd301a6728b0e7943ddba5e1c946f8b4c603a1035527fc8ff9",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gconftool2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3c3dfdfac5ebea89af206f0e103cf9b7e01e0b8797c9dc4558d393009ab5cad",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/gitlab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3e3fcf55708a48775b5834aeb414df8691b83de920b3acb6d112d88fd725b30",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/heroku.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbda8c10463f77d4d61da5932c65ecc02d8f33c45d91a4de5a3a83c65d0226b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/hwc_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c06bb56716060cb481004f3a78d9debbb8c593de003958041c38913c8f78d52e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ibm_sa_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aca525a27aa45ca5801357ff1ef64d450c5a60f594195b5d494188735d90658f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ilo_redfish_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "854a02d4935924d19e8ef87f68b231a019c609b6d69daede43f12f65d258a778",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/influxdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b60218d62295883947047697e6604c8b55ff3e419ed5520b2912e13e3e46d99",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ipa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fbe8b6da9a2d3be12640df88c4ce8907f52706e0175228a8f2e03aacf0f0f60",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/jenkins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "706fc70da922c6d1657faf6d5faa307e7c5c8608a4420368d7061e53fd0905ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/known_hosts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb78405e49c65fb5651f17bbbb60208a237a7cdc63e07cf58fa7f75e46e6b533",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ldap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "882bd750b5018f04ec17023d2bdd8d5a0c9484cff87f457fc47e2a628868decd",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc5fcd782d4fb9fc2c885ff7a193afdc7db1f3c5a3333d76839e9fc2398c6a86",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b29b900addc911e4c600d06da6bbdfc5ed72e360b67fb6fcfcbe69c3e3ff1f21",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/manageiq.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea7eedc9cb9baa248dbddb63fa3df90d3b78a101b5bc91f3bfb09488d92250a0",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/memset.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2c53131d2efd0bc45cce11439cd11a1ee5b49b30882b83c816102c97be8af6a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/module_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f9ecf9c995a26bca5600a3de342fd29dab59b81b9cb1ee7277f7421ce8da203",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ocapi_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dac6655ca02c7b2d48f243e28bfb88156c1cbb6e0b6cd612cf2f2bef93776e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oneandone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4c21b238425d20111bd16433df1cdd527427770c3e73f5f53d2a4786d0f240f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/onepassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c1b305c0efc442986919109f1ad567d1577ed31af224132deb81cfd2abe6580",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/oneview.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb2d061718d377d3ea0147133a7b86ae9a0446e51cbb497a8e7a96ca907f67f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/online.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "801de5e16b523ee0e6bbfa8a24d7a83d6c1cff4089c352d4b95aa3a4816f5a3b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5880835c474943b634664cdb1001d642923d97c910596567abc3ccc463f74ca4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/pipx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e914c49bce660e3768d1a387197339a2e5813c24303b7b67442a52e43c5c2846",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c6b797f197c68cdd9a14c0a6f492083ca14eeb7798125b7dcd151894181ae4f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/puppet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e1957ab2d41bb194a5f100737467e054580c8e48b4997757c484b99fc02ab27",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/pure.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "997bb241660dc057aee17ac8fef4af503d0ce853563eeec68b097cdbb71fe964",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/rax.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80bd4c38be7acfedc8542a541c753c87753b249419e06eb20c13884e643408b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/redfish_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bbe905987ac0a4995ecfcb2ef01a8cf3ee644d4d3c03397e3960db3e35800748",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/redhat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a13b858effcfc0989720e95f6cd7ea15b67d6140f9d2273fdddb6b304b4e9731",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d379fa87452d7421be99bacf692df749fda9bece5924da763f80645737c7c754",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/rundeck.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f013a30db68886db62035238eaa846def1945b9e7f990c90a1884d62dc546d48",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/saslprep.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc9e78982e5e4ecbd5978b1af537e4b1e9d8a8d3615b999dfd2e295b824d8ec0",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "564f346365d0f82d668e173c01919ea3eb3d0080dcb693fb754d19ea704b7f9c",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/ssh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98adf0a21bd75dc1d0532e1da9988e7d6543e1638cab438728c416052431d3e4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/univention_umc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62f0b4c921241ab2adec4a53e8f1e03b55b5233302ef22357354c4688c020c18",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/utm_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "581a2299ec4c91d93b1af4dbf3af2bed41235c5395a444d899835fd89c7130d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/version.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7af185b9a3367d00933f2e1e72202193d1bfca38ee16ed8d9779b778e9eef3b8",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/vexata.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e82f71aaf18b3f30a9a8d845f5227463eb7eccfbca938d6b84903370a86e17a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/wdc_redfish_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6efeb7ca07df481c907d9d462a55fd5e89626301952827a93b962b1c8c332930",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/xenserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "068f5c2a7ae05c898029a4e19774e16f4b7818a3e434d8303568a10d8197296d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/xfconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aaff7d1d4d4d40b2443974b66b42399de1b7d7980c99bbf2a14b11a5e211f4ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aerospike_migrations.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92e7aedb7f2785edbf7080e3f8160e10d751eab6ab671c54db173bf13c8f9b7d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/airbrake_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61f53ca65da913464b70f15361a8563b23cd449850ddea68acfb7d2ebcff3c43",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_devices.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88a79a5752332e5d5e93e4e301a4849ffce3db6289adfa7ef6f1a684a0ebf002",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13dd76495e56912361966ffc96a3067e8fb0bd70c0343977075765ce59816de7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_inittab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2ea663fea3d28a6779f28c394eef1da5764a6faa1ee036e7150689881a8cbb6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_lvg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a43a54688f275a53925b5981f91b857fa4bf3e45451e81d16cc08878a4d8c6fc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/aix_lvol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e95f7b27c11751210397c322a9c80f0b2a39a0e1dc2d5ad380b6ca37342ccb39",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/alerta_customer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1d6187135a5e3a61ca9058fd5cc14f3bc1394ecc305ba31c741486d0579fbf7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ali_instance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64eae2dcbaec70f75b96aa9724f99992b64036437d0094ccd1ab0cd3566b5ca8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ali_instance_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7ba3b6cc1c672200ac8376595e39f53d6548b5ae3fa2c41054d5eac26af26df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/alternatives.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7708e5911a5012631d19f2cf676ad40c4096a9bffad399ada3bdb7d809a4215",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ansible_galaxy_install.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2d972de8ec65fee8262adca0ca7cb4da8bf542f61e6ba5ff50385d542c8f505",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apache2_mod_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17df20a3d0ca2fb0b7652f0a11d7a0a02d6b1b4bc7b357b17e6a77182ef341ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apache2_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53d4683bd5506977fb9b8a0196cc574a0152062a07cb95e637ee0a86826af3e8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b17bc58d0fefe4507ad6a218d6c712dd60e636f124c43e0dab88859074020b8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apt_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45577358980c2d95993c4b1094a1366b2959c0660dce61e94408b032220e8b91",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/apt_rpm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1ca27a3d0002f64d8bf10c82cef83c5b400b2968428b3fd51473423e57e69f3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/archive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48196a0adae35bab8227c076060cdab1b0b8eda419366f9c6e49cb0260d0253a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/atomic_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd992ec654187c36c759f00e6bece67c76d5699392ac9acf182cb3a5f20c9eac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/atomic_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3269a9699e34da942ca8de49deadcc437e660a01333ab601084103fb5ec5519",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/atomic_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6111aefe3a8db057a447d3ed38a113eda43872c51b3628a70e3c1df51ef90e8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/awall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "574ddd2d99728477339198d45323fc88c28dbcac72f71ee4e335becd7cf9243d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/beadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6745b17f945e61fecfebfde619dce82782423b0908110407a8748fdba8011a5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bearychat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d35c57bf328848543735956341e5e2a74fd23c1a74e8a9d9f2c4c32edf0c95d5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bigpanda.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5ee7f8dcedb96c12df9f5302930259dd8f0a790d8c3dbdf9e29ad76cafb253b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_access_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3497a9aa116023b2062b1972b32167f8d85cb02457d43ea273e624353a2594f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_pipeline_key_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f3c984da7de315a9f6d79e0c4ab11d9a580447346e27f1727e185e31b26bf02",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_pipeline_known_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f30db11c9f9350662470bac5b2f6482c03f4c64002c0e6eeae8e4bff65e4db7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bitbucket_pipeline_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c654706e27b80c3f5952c00d518181728ea20aa8d3cf698ba06d22d78280db50",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bower.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b1287b2390e1e37c03131f2a1371b12922a50352c06ff286f2d4553c67f703a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/btrfs_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80d1a25f52699c6a554abc38054025abe03ba58aa57b54821c7d870d1e2cdbbf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/btrfs_subvolume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3988751919cc17180c909332e2dfca5adc78926c7eb5daf870ea7234b30210f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bundler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a58a121c02fb4eefd237f48700275152c3b562821fc20828ff81838956964a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/bzr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55c6cf5c096655df5a7067809846a1ea005cf355cd22bb624b204f16c2f04ae4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/campfire.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cd7b165b8a29cdf99a78d3878391a967de7e78ac1e92ee3e2eb0033cc26a2ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/capabilities.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e228b2a53ec1fe0c9f3dd4a483661cf32ecd00d29f8a5d21c7a9eaebafa5384",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cargo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18613d41168fcdb4dcb40b5058fd3965c886237cb094e81e457d5618fcbeaf59",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/catapult.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b4258db56a0db7d27f8a433c9706164070b283bbfe5278bcfb677040e4f983c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/circonus_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "070337dcd572ade38abb7f14b8f85d17ba2e6d27fd002c2d075fd071e827a0b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cisco_webex.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a9d8d7cc1a241e3e90f4ae44df576f27d77373aa6cf0a924aa48ab69ede585e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_aa_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a5ebc90398f2171b77a354b84443f5704f1afb68b68431b75cff418978477f7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_alert_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "594327c74bd855e4f805292fe50dfc27cfae13bd84b09a0cb085c850fac1c423",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_blueprint_package.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12c46a154ff77b39202bc1aa0097b0cc92d81b9c6078712457b0b15cd19189ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_firewall_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89d8aa1996b14e4326480ae02c1f4ce6a9a19748a48e8a0377cdeac2fa9595a2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a2411b45a8ea3911a4ec58da81c3077c234b1d017f8528ae6fdd349a9f86aa7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_loadbalancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7eafadefa515ef1a18d91239eb5527c35662d0d0d20e243d0e1932ebbd95cff7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_modify_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fec92e3be9468ff8012ee0939adcbed3654786b3d0b4fe32b0699a2aedfbfd10",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_publicip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fd23c63d7fe063c6f5a968c6bcbb458764e272de9a63ee799fe88e2ebd0de05",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f71e092472a711dae840a74e28f2c4bd0fdb5a386cdbda75b31c02aa8542f7df",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/clc_server_snapshot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d20009329ad73b22365078d7e64f1799aaf05edc292fd9f9effbb27ee59f0298",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloud_init_data_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5daf130294f337e03f5ceebcf78ed741ceaac27f9f43c357c5142631221f0ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cloudflare_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e21f7a548347aaa20c7a15ee21e87b9cd8aad634530bfa7158f1b00499a565c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cobbler_sync.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0171f8da37611ebd0d09a66fdce9f2553340bf95c25e88fba7afb5175afd1ba5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cobbler_system.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d0e6981589cfac4f95f42f88d22981b5a229e1f576bf75c0d75be61288294ab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/composer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe4774f6714cbe5664372016499b92f12ca67a7c46ff0577f9fd5746acfed2b5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ad813c447c762bab9336caff71d652d2edb97ca70eb8ce2af221d332b32b8a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul_acl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdda1625ec66ff4f70263e2736357b0146928d70e2e8a4a21d24a672aecb4d02",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul_kv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ca07e607fbd91d804f06a4253d7dd521a185297fc6dba146fe90592cd77a323",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/consul_session.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "898216ffb17e046caee1a8b76919f23e621301153791a4e7f0f42f79ec1d82ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/copr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc410451fa7014851722b0a561d86135d8994f73c4703d1253053af09d7afa08",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cpanm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dec4310e0a59dd1b38fa2be99a3e1675354257b36b45d421d4731d696d7f0811",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/cronvar.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43e6004fb2a3cfa103644fd98659094d118b525fa95c6edc094c2ad29a2e2b4d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/crypttab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e7567a57d63152229ac5d3118d582035e25851b6c6ffaf0f45508c466ee3847",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/datadog_downtime.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea4c54f1167d979736b0c0444b483df2a63937a88a3032b4447e1f8a30380ce2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/datadog_event.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4722641c798040b65cda242e174e774324021f7ffd0d47ca68c84b4febb700cb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/datadog_monitor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecee40a0d6ceb1b8912a41b0ff8d7580420a41815e6cb61943424499688f949a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35a674794b7755a49123c47a52f2f1a6508a5aaeb75be0bcc2a5efc0f827a8af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/deploy_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ddee0443e6d949053b71630781ba5776f6eea76c7f8d7d8195e747d816eed9a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dimensiondata_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0c18effde77249db9072d7052c9169eaae33fc5d35e13b9afb5840e26ef02c2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dimensiondata_vlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "215e05484909417e3a2918feafb04c2dbfc96391daaf2bdb3515aa2a2bdeff8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/discord.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b4023894a7ca06b59a8e73dc71ca1f3c1a41746abc70ab00576aa009af14f77",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/django_manage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a181d9f341a0bea7759751f43153e6d3eea34b9fe539e79d464c4b4d4b169c9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dnf_versionlock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd4d4e7267438d62b7e61fc58adb1ded77fe7312936c3b160aadd70ad43299d2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dnsimple.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a2b8510397393ee00f3417b8ef8852704a14628aa1cc6a1534d8714d6120773",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dnsimple_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e354ead3473b1af341c014303d1ecdef39232492d11e0b72d488cb1f2a34ba4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dnsmadeeasy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d028ae81ae5c74b86bfbfbf07923295e306203d5e93aa6753dc80b613896545",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/dpkg_divert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83a4cf59d33a4bbae1235181b07bd4eb83644afd5752b35dd322e7fc7d32285a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/easy_install.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20c091bd3ca00fac3c88de485484a79f6e3fd68fcac6ba0092aa7ee49d43d25",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ejabberd_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dea9018041169ae445b095a329224e358252d2abbce47e6410173dfe2076e318",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/elasticsearch_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07806afbd2d9cac722bfb32e74c28816a303da9545729cc55db24d8b2203f9e3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/emc_vnx_sg_member.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a89bf3410bc5cb100eda22e6424c058c545933d84c962ed4d8a9a16a33c1f77f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cef71555963f9e74970afea3e2c34ec812598e55d1cc65426cc6bd87d5449649",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/facter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fefa49704a0faab55d93c64e9bb3a897240afa7596199fbdeb796e5fc9117f5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/filesize.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccdc039da2e7bc985df6f20dc147f4a6a2b6f3f4c9de71e22fb33b1b5649a47e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/filesystem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf942ef1ee36fa86306681fbf839136a2ff55c9c93feb06bae1c79c4c98c52e8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/flatpak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "572543052381333cbacca489d5c03320d309a33adc8d6a2b88b71c1f22ccacae",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/flatpak_remote.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7b8984c934cd3edb3f2b1c6747354d84a6a64e6d95084dac92b5c89b03fb7f9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/flowdock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1418e6a06b4d774498cca8d240fe5c853e1c5cf72bd86a12391bd4d40c9bb0b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gandi_livedns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb3f4e63a2ecf0be77f5ed073d69f0b42b4b81d2d96a7d3216df043b80514b5c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gconftool2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbbdc41a2418dbd7b480a43f833ca14b63198b43478b8e842368158395429406",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gconftool2_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d720513958b1ce71503a8826fdd50e81c19c1fbbc19ac20f9164fe0841dcf212",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f5a22564c1ccdea0f4b3af94b8b63cc7557080a4aa3db5ab1ec562807c13eec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/git_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e94f13fd2a359baf7697256ce965845fb8e48a57fa76d5dfef45f0e632c8f46",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "137403cd95b2a624146099fc99b18f91ada3486c3889e1b77ab2f80b0e81f796",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_issue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4520165c58ac18968044b5494cc93a12b4eb3b347acb56167225f824ae85b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65ed5462dbc99e29eec041be56b0bc58f20fee0b42b7493b5ad0c61440afc8c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbab052066a16dab590f4490b5b0521100588adcef29b4b6b982da95d0c0ae5a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bc1f3d3d2d68c0230bd45530e9b211d636cba360d84b809e652841038a1d3d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_webhook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33456f8f87e5cbded64f859a5cd1e61e8258650016b6824b9c50d827ddcddc09",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/github_webhook_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90770694707b05e7b6ac94c4c271ec155d3b589556f2521022b7a5e72e9dabef",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_branch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c6636a6e83f640c7cf14e7041fb9d0595fbed1a633afcd2183c57e1d8214e21",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9b9d88004a4d1f5cf48b1dd9a53049c4c5e8c3d453e8b38e410c2c3e31a0e60",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3356b420cade3e7a5ba52143ccc922c797080b3c1bb98dead09f6797dfc0963",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_group_members.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f67e9cf379128eeac3ff3706c0aac0accecafbf92ec79c22dc2209b22fc2cec3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_group_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12f05607537bf35975deeb99dddf3c872677c68be436f65ab6bf9be1e35ff6c0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_hook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a2f7f9ef8577d63951574c861b4b22bed58b26dd03a1c691e08461a722b2af5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a16c658c9c07d087b58ad6d9ed93f6834a0536fce7e251e410969c1dfa0f0be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_project_badge.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd2facd211bff8bf2d14465112e09b11b19ecc4b09854888cc811da931d082d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_project_members.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae66350b3bdc618ed2acc6e938c484439d83901a1bbea3363c8fcecfbf6205a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_project_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0621f2754563425f573978a5f89059fde433130eeda32c9d3c7ab1de86d61039",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_protected_branch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b01efa7f012007c6c36d65093d010b0f6965e3377e7a58d332376abdec6fae9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44055222db26d6b2838f4cf2caec6273bf2cc62d9497142e70537ae8938fc1bb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gitlab_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74e5e8205c49dcdd5d07acde697f5d47dadb0a3befa44ec167e817593b4d6a3c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/grove.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d5fcf9d5009d954a38a5b7c0c8446ee93be48086f04b2d3410659f7e2fe9d63",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/gunicorn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3847d91b81d5b4917d877c3d945a1c6c8a814180c901a37b2a94c699d8b9fcb7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hana_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ed46857b938221cbcccdd8b2a766259d0aff61c7a3374e40c22676d404c733d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/haproxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2474e0d23cc1855af4b4b383ffdc78fd26a473271e487023826cbce1feb79402",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/heroku_collaborator.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33afa89a26ac74c4885e8e2b0f341d4c80280b4d994e2152cd92c50ef6cc7699",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e47d7c753b41a38cad8178d09a79a27ad82b12902ca1695340279e8933c11453",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hipchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feec640748df0de0e9f31df56ff48dc30dd3ebf713cb04d36f2e0dc245a1d48d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homebrew.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba91e275ac3da817b8f29e921435ff079d74f29f071627b368e05c84989316d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homebrew_cask.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f5750acbb0afa275d2564e270c28f6afe1202e337e50a0ea0403fafc5cbedfe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homebrew_tap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab90a789fbb734e0140eb07b1e963e8cdc0aaf42df7395f5f0e41b62add7b671",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/homectl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a267411a289f6c95485b3e6befc889066733592a54748cd0bc0357ce290a14bc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/honeybadger_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e8c767f87c5c69d8376bb54d4cd7aa9fad0e85eec532d85f3308781d4373968",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hpilo_boot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f92c702bc0d15584ac3f2f9a6804b1ebba623f9308c6a37ae92d8d4004903c68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hpilo_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5028ceeb0bb4dbc4cff34e821fda3d731e833538626d21011590552aea63977d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hponcfg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8e6e150c96dad033824e7157cd1a31e079064f8e712af23815455e6377cfd61",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/htpasswd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76412e63b0374b59340592bfbeff5870a320845819111cebc8e3e31e5d942672",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_ecs_instance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90f1bb75df403719b8e3e1703b1064a11e0d8e7175eddc02aca8c10ae875195d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_evs_disk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a018e4492c07de643d42087cb4a705a52ce0c78d1d628882aec415705e9d53a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_network_vpc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b234db61358ee6847f605b94116fceb2a6d699165e2aade5c7f17b973f5e213",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_smn_topic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba47442d9a2834c09328430bd227597fc80f52e8f3ed6cd8f91b2088b404c508",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_eip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b6bb34e76fa010ba9ae5d86d87128ede2e20e1489e694fc7e372a6ce57f4417",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_peering_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "500c7cadf40aa66f1d6bc9b2203722fea6e540e61a28cf852bcf171e92e8a906",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_port.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67fad91e5d81dc4428633b29d340d47bb4dd5b16e946661f557568de714bcd8b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_private_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebfb01db52cbb3869e6324cd718e8a0aa753627d962c41f54f3f8485134b6d05",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_route.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdd8097fb8db7ce573427f4b63875500eb268a4c790bf1186e50e067a34155c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_security_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "25768a8fb5d1785090bb273cf6fe78057c09ec5773248c642cb8ed8d8a8c056e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_security_group_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcf0f661b4394f038c001c5a0ac8839eed3879d087c5e3ce8548b768e83ebe8f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/hwc_vpc_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8f7cb86bc537788144014a829c9a19e2187a245431bdb8e34b4707d1811c4aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0769cc9eb205adc8aa07251d5f357bd514aafe264bc02c6bcff515d1dc81a146",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "493c5be37792942566e928f4993c71e44b2d723515930edf8353b35781e0ec24",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_host_ports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a801f1164f720023d28c7fcf9fa3e9af1e220a74da73e48e61e4fc18d0c9bad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_pool.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5ccf7ed8f1af24d0ea954fb0ff187c2a620657cb67a2fecb48336225db65b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_vol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24f1160f1ccfcff2ab1514e8f9c5c1356e69888e9bf7ffab8a2184ec74eb159b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ibm_sa_vol_map.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64a023b62786287097e88f390b4e47bd86167203ff7eccfff7c623330d87cab8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/icinga2_feature.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "648c11df285cccc27f6092474ad8197b474c66a67822b71a011313673e93abc3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/icinga2_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43390079098113985bd140ff26472af7ba5382dc2a91eb93c9eb2a2fc861af6c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d18f0ff7b49e43a1ca56730e2c1939336b657da1e3173dde9af2defa6ea4d03c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b6839158d5d6bed95253c9b9df23a36a31b5113dae8cd299bfe147528f1209f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3f93035acdaadc1974f04d9012c7f83e8a4783ed72f2f9818805ac86fa82cba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ilo_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6cc52c3b511b626416ae92dabe3320c0883b692ad9a563110c2e654b916fe0d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ilo_redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feb4815242f5b9dbcefa203e37d696ca50f6a0b7902e377842fa9142517fe4e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ilo_redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d453d98d269603d1aaebe4b57e16bb2be4ac472ed39b529641572bcec9433d8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/imc_rest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0b9f8a0f8bcba2420b9fd3106bccf678f82d192464fcc6d9c0aaa0c8c584e27",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/imgadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba579d2844cfb2584ceb7447822e79b6a97f32a67c570e6de95c48acfc4fe952",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/infinity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "908f34a7bc1551288afecf7427f542ff3989faef569609ee7809969907769723",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9642c5169b3ad77918f02aa48eaecf3dc55993d109d3f42dcddceac0346af9a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59d68108945a7a70a822b189834df11fe3310881a3b88c32d84467e1a4d7156b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_retention_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "337879161084745755ca9f194e386ff4fd6ff15d68fecaee0a909d19216a34e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73849db9a2dc5412d9e1d2adc17bcddd994f98b6ef27b1e19c0f9857fbe322be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/influxdb_write.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f956300435269e206c7418bb77e7344c8a81628c048958601b2959216a6130e0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ini_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf2f658550a534e1dbe6f7c3a3d6ce0754fbb77132814b4cffcbb0f1a1a309dc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/installp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9553388ae97679c6404b54478f3beb09bfc616415fca84a2ee0ab7172d94857a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/interfaces_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "578188df20d9c152973c586ec45a173dda8078b841d13c81473bbc6773ba36d1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ip_netns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c8a8262887b27c3515a9367206276ae8b969e39c1f1488b9ea640d7764fbb8c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcffb115c7f4a2a5f0b1cb51c9957ebcb3687c20c9d427036e857b57d1e70cb5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_dnsrecord.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36d587bfa5e91a7c32c84135b10ccd4f20351a497db84f15a2ca98087647c43b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_dnszone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "130c2faa7317bd3c61bf1bcf1aa0ca91536069d527266f07f2cb24756149bc1e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55b2cec03a5818e2dd230556217bac0632517158a09f88c02d5ebc90e4b73dc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_hbacrule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82a40310df2e353d9d223145b6a713fe8f84065a3ddd96c7c11176eb90c6944a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21a0543fc685a2fcaa15d14c29a19886cabb006a8cf06108f807bdc58cbc8024",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_hostgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "721de84c76619b0d82fecfa226ca32efb862581e5b8220b82d6c1f40a781ec1c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_otpconfig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fef1d6e6743b961febc84b70451d93d409f04c46542e463600d210753cd5db0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_otptoken.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4bb4948657e723619f1b69a3c1721c41f26daa8c2e2e2ee7e6d700f1d0221ab2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_pwpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed9c231d05370490c4b3bba188c51af23b72347c93999a8562bda66363abf171",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ae008b44ef9c9236a8b75fa39cceeccd6c38564e06d49137b40d0d063851a78",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75ad252be7199215c5d66defd8fe2b779594859aa265a543d539e2bea5277aad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_subca.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad18b2cd692e83aba38d0a72f6b77e95a62a8c0a4315d6f27a18fc7c704a0b8b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_sudocmd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ed5822c4f494c46433ceba7f6dcab614f425e56fdf0f41ef0ae147905fc2df7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_sudocmdgroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47a21729909b809ef28ef11c27f260f1e15e1e6b9ee604635c8e332592f7a81b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_sudorule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a75e59ac3871f68e30635e78e4c7e559eaee9ee291df542c7695fafdd65ddb7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a80805d7e93ebe02977b93b2a0a6018f938438c844e2887001f22c3f63f60fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipa_vault.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b953ffad3a2288229cacb4c0f3f6f7a328e8e661e445dae6b848bbce477d025d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipify_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "955573422d24518b6a33dd3f0259cf56d0854b88796f18bc207a023a76f304ba",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipinfoio_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34833fa6c7a1268fc70ba3833e90e2b10c5a18e78a66e7f0ae200d8232b4e2fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipmi_boot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e00a40374e7b43a66a09cc3563b693a0c5a1a77d3111fcba226bb4c4cf8aad59",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipmi_power.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c889bbe8e7d91bd46cae1a9134dc1805f2c288f24545b191502a46c980868f8b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iptables_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e35e80b31daa94e712184454ba6d6d2bc455ff5c32b017d92444a942afcf8748",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ipwcli_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e208f3f019aa041e443a42a63abca695dd9ec4d8b90107e83e15162a8bfac03",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/irc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be120bfc78b6c6f89eec96fe65167fc62a24a2e15ae7a74b55220710c7450a5d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iso_create.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e51604740a940c53ec91965e76637ee0074912282c8ab1396f028852bbfe7644",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iso_customize.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94f55e28d70bd033bc34ee2b7e710a11328db8ff41425bfd6cda0e67f9c8dcbe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/iso_extract.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d88a5241865ad0f06052f5edc84e2d6ed7fc02647d29a48db583dbefc998f00",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jabber.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4551ce02ceceee45bbb78eccddd4bf5be5b1d72a3ea7ff64f400d02bd4155ada",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/java_cert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7e28cb732857e4633e9693f1f3d8ee0951198dd0d6045c52758d0903ba27b84",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/java_keystore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96dbf361d11844ac5f75ccd1ccae9b89d08895a681556d8ddd6df94c85c0087c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jboss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a21306311a1caf21814914339c33f55df520682d88cd4c1e98118ecc5e7d558b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_build.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2062703e429dcbc727e43a769ae92dab97e583b3978ea5e6d9b0ae3e9e9c932",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce169c941bf99bb517fe9d2231506d58eec2dcf05f66686dd06b02c90ab5bdac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_job_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fae160ec57cb25c854235046b3d472162f5a69ba50be25d0d5ed49064cbcc20",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7d0bc2e1d0b489a489f35215ef019d5c54f079103875413a3573da687af7295",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jenkins_script.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f378fae28794a824333172cedf723120617657460c9bbef2a48ad43bae500fd6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/jira.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4be661192d0623095f417aacbf31a8af09f671f410fbe8cca7486e86cd8a49f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kdeconfig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcc801825b1134d88c34aca85234fb913f2edb8caa0fd79145cf8aaad5604fb1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kernel_blacklist.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9dcc62df8957ad080a0686f2bbab46214bd53bafdd99b1405e1c8d3c8d45595",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_authentication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bdac1872012b3f79ff6ae64558e54d0647b9c1a5ee4620e6e98e36650f73039",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_authz_authorization_scope.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea5f33a5cdf7989817fcec05d117765aa3a9b9504b9302377092f9f8fb1bae3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "786a36a9240fa932746b973582ae45e6feeebc64a064e18517b08f98a7b31d68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_client_rolemapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8504fa2dace6ca2a7ceb56a1005b3e2a93dc80b1043504534681093b593d909a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_clientscope.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f29a8133f2cdad9f0e7452a2f9512540b72ca60c0165b8ba2a2c7c902e20344",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_clientscope_type.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c76a8ad81dcc5bd47591804ef0b1706329361f34926e9334d8a1f4450653753",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_clientsecret_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e1f37664f9e841c5adf1a7250ae272559fb4cdc002b74f3bad043a80a39156b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_clientsecret_regenerate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e76db17d9239b9d739b6eb232e5c15562e6fae7a661e8d694d45a5a56f95dbfd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_clienttemplate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08aed4c4cdcdd833791170d9347830cd35c280de32731cdd225138be591f0cdf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36775c495fc00d391b8835a1bd2aad0866da36a4bb33993b16755587bfeb34cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_identity_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d22ef8f28abbc9d18f4e6bb14dd1da8555f015cac982787fb6d315fd529b399d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_realm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60bb00520f91eaa275ac527b6232ec2f21a97c41fc24bca90f1e660404d4de93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_realm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ec24f7a583d036b0e3b90511fea46574ad7bbec06f36f4fee45d62c49f17c27",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "099fa1153a347ae9809057feabfa027d27607f48e7f9483ff0c7c419ecf4c60f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_user_federation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91ab78bf22dbc5875e54af1f26f264f88e7c32e8551f03872e0f8c2fdb37e63e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keycloak_user_rolemapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cae7fa40038590477b7f10b4fe54058307f62960d7d79196c93563155a4869dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keyring.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad36fa8002c63a55a753416c392e97421bc4f579132c77c2ef82edccacbece8f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/keyring_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31bbcfbc89baf8641205f581caf95d393ee74f92fe901627bea1b639d992e26f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/kibana_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59d6bb301266fcd13f6cc50ed964d5caaf4df70d5ab933760e65244851314f0a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/launchd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "356ab45e7f1f7f142b1c30977cd08700fa90b744d5ca8082e9c74905103f21fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/layman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0de70649704ed2301401e6515ed8b3aaed22f6d19f4ef864ca8cdea34a5da63f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lbu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d6f52707d9f3ad4fd378ba563a09af443bf1d54db6010e7a44b29c37a883ed6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_attrs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78510608583a757c3bc9912b584cb4c5d829ba6b70aabb908c08a1ca1af796a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_entry.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e779d589911e22b4c681ef2df7e4fe2f217b8df2c6122de9ae0b441aa7eb164c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_passwd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50788b0344b4cc48ff0dfcf43c1526f7b29aa6ff9f6588b3d2d2880bb5c3d95c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ldap_search.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "509f169e92f7c26345e77f7d32fca4b8473dab07be80f110e62654b00abbea5c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/librato_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ff82b7015248d125005011d38df5bcad4f9deab469d3b1ab0e8d430e63ebe73",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f9e1eeb9ee07644b74f49a20eac83be430214a40c3df052655c1f3be1fa5be5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/linode_v4.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fff3788dc38392139b78f5b5f5f5640ead22db632682f8c225095e9be97dc42",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/listen_ports_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bd73959a007555bc3a6aa3381459319d26555afe21fcbb29b7ed7a1502454f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lldp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d3eb31c93da793cd93fefe93b00f9b66f9218c0c4fd9a76e76e843ba4f3a352",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/locale_gen.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f83eb084c5229b122bf26bc735fb6dda550ec38cec746ba023cb32f552e16c8a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/logentries.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c998c5266c05eb4260d5d89080f6fdcaaab8065e2168fc5d2e5723b511decbc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/logentries_msg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29c84511cf63215f56fc7a50c9b35ed90dd3de4142b4d78522c17c960a892289",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/logstash_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43909fc501a1f6b07039d53843a2cdf16c80fe37c0777aeed77e39d50b62011e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lvg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a20ea201e30b4e89136f651ac54c9f2c43dc49cdee53687973630564fc7a8543",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lvol.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa4568ddc6ae42d2908982e0c5dd7b989a240d6e4e9a6e2062a4f5e23d2d326",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxc_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bca54560f809417f4ee9d1d3e878bb5e6ebc3b95f9874c30b94ba5b730d613d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxca_cmms.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2899e07227d3e7cf73409cea6edca07ad2d9c168e4cf3fb7a80882ea94697b55",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxca_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "495d21e44fda18a5fd497590190a70b9e5a0d84c4b7186479122f489c9dbbcf2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxd_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bbbf8c67ca4b6a8d3bd73ee55f377821f38678afc9f10ba0c929f4ddc52aa68e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxd_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89a85fdbd5d521d008e26667eadcb8850f31860ea28fe24c6c0c25364bdd726f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/lxd_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae8453ec8bd7c0cbf1f12ecf63ea9951cd65067219591a17517e5628f8d73b51",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/macports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5583e723feab3467549a14fbdd90e9098570182a8ca413d26a2dda683877cfb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6000a725640848ece04ec174f729e48917e0caf81bda3aab71da87eb529ba66d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/make.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6a2d2294d9c7e1ee0edf9e45a3e3fb0a8df5b4b288df02ec8c6cea4c500e8bd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_alert_profiles.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c6fbaf19ee5230630fdb636d15c257a5442e2e1119a1c7c51801c6f061e9593",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_alerts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3f271bae5ea27a6e0bf8830f75923fd090fe627369541c2241a174fbc0c3198",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b421343af34a9d5a515bf304ca63d3f9fb68e5d7ac2d8473bf4eeba1d9d54f2c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_policies.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78ab36008d5219c0c1882f58add98c8fb8a03eb9d8b19edba97a73bd3b11f00f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_policies_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b1e0e3d1c159b65ad47339aaa59d6c2dab98ee2269cecd7e0ace20aff4dec9e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "398499db7253e6905ee4c0ff208044f6facff0474abd70323d39e83eb198a5dc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_tags.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcfa97c470a511b84140685853c841268e4f6c3823bcf98c28f2bc915af69cc0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_tags_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c77f48b292aadda5e88e5e2d772aeb1c5e63d2c2b82d999035bf8a089040196",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_tenant.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "287d73123e32efde317cb17ed61e274819100a4958aa2246b390c1ed94426441",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/manageiq_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6e36750b0a297e8f06969ef7d7023829ce79fb1342c6fb7dced840c5a9a8fec",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1845b14d27fed8a50d2ba33aa35c4f31575112cd805746ffad0c2a8cb5f46cc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/matrix.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cdbe1e557ec5744a71506b382e5e8d6279451f56d4b63c704d730a525c9cb3b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mattermost.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "052cee31d1efc1facdd72f7f052aa11ecf2e2328ab797538772de9b85e420cd3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/maven_artifact.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "215b47621c95e9cb8720635bd7d51ad4becd598b467356f53b0a613d4f201b60",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_dns_reload.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67156adb1ebb682de81595202295ffed3314bf6c42f786cf326b7c235082dba1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_memstore_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "963abdb965c9991ec073a299cfae1c608d69c2330d4d885359799118ae9219fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46880ab646a1161611e2365d2f7c59b2bf178ef0f8fb61f8b08ef9bd2d01dda2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0b008baf1976bbb58f8a1a74ddcbffb38d16ce7f812cb3aff22512cd495f03a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_zone_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b176e85c5d585b0c9afd3234798cf820310f46c83dd33f61bf59fa3396d6765f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/memset_zone_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60dcfae793c50c6ca8eb2377a46c962ec34df88e5b694f1dd8a0b0b00277e7ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mksysb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "411a53424247600bae0a604ae3fecec02afb97d91283cd389e44310ab8a8d977",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/modprobe.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cfb6e93ccb524a5e73a509801cc081f89b05743665bdbf93eafa8bf19e5e205",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/monit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4527c6ded10d22fd9fbe85227c85583e889a00abef6070b81c305fb3829efc8c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mqtt.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9737efb497e1d65a7e04201eeece3221930f214ffcfca19c5fcd3815d88bddd3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mssql_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58f7934c24577e50ff17e6a17c973c0f2ee238585c2e36a7db9da169a88b5df3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/mssql_script.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a84081c1ada3a2365c6222391bcfac644ee43d27b1ac96e709593ff91aaebd3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nagios.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67262bdd025c90494cf2cba2e626d9b9407b953e30163e3cafd95f7dc4078211",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/netcup_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e43bdcff828eb8a5f731171c24fb8e37865480ff755cb49bbce04ddcaf36a49b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/newrelic_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63382fef809beba707925b40ef71e7e0991ef61a4b966482e3ee203bdb574613",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nexmo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40bdf7a7433942e79e6c62a69c4912d005d7230e9df9956977b14a6624b7fffa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nginx_status_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b59175649ad6e22b14dba2f9beafd46e8cb8b7849e52242f095fd9b935e8a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nictagadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d27604d65e86fc707114f36f0d4045ffb721e648e1fb9374b84500ddce6bce0f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nmcli.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73e72cd822c2b4191b29c332b9ba8ed686a0023be4da89b9e578fd549b9b59a0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nomad_job.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0000e71629d0423f09de699378fec5a9af5160d18ea1abbd8c46743575f2ddd2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nomad_job_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1849d4448a72a45405c4b06969737075066c6d969e47b1b7c806a9fc48ac1b5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nosh.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27102003c63b13a1dcae92ccf912f8103ed630c9f33b27c40d09cafea6f22e87",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/npm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7087dff9d3c6fc79626ea106b60720dc04fe99d7e88c30e9c9fd83ec6d5f8529",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/nsupdate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58367cf4e6fd72c72a6bd003023d484d7ad3c827a962aa47920389ec43d6a6bf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ocapi_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d62a2cadb602263121a6d48423e58664a2875d3e7f3d936f8fe7035ec43ac9e3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ocapi_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "868c13d527560182f2b8769c31fb5259ea8e9f3ce7cee9dc63b0e65f4d02856d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oci_vcn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9544fc21fab87d851fe3878a7696fb3f01b8229bb347d35f58bc4079568038be",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/odbc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80377d34f6be998e7767a3aadad37f34e2d84ecd803f86e3520230da21e328f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/office_365_connector_card.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30623c47feccd97341741a32c705248135b6615d5af5c4b3d5b7efe0a35d7027",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ohai.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccbb6d0e3c3b8c8d4aa39b0fdb963523fa7e3a07eaedadbc84ea6b9e3e0ca87e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/omapi_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "642193c9f2cbdfc3d46bccc5685da484caec7548ff3e21dc99972ac2c558cddd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4d38114de7384770892120ddd618f13eeff9c1506ed1758d637c88f5fa92267",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2401889a75569b4a76e7520ab5e6926a9c935af99fc01fb618dd8e13dc031fe0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cae82bf8f9c6b70088a7b0dc28b88618bfae78a8d4b2ddb8e9edc1f3b16e674c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d36d93fa6f5709527ac774211b53df76393ed322878376b5096c97a6eef21267",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dedffde8e661c8cd92059444f37fdda8b9d8f859db7b6e45a94877db78a8db1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/one_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eef28a59197d6d0defc65858e43a0f7e89f080adc04f314fc6aead874123f970",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_firewall_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d4d2c4f97da3a4a4b19c508be9a3cc68cb48ad5cbc5bddf43047823fe95569c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_load_balancer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2543504063531517ad213cc14171c8d143dc113b2e283885483ba9a41792788d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_monitoring_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2bd7ff548bd89b760a4d0e1ab0bfb5061e8199f5a566eea750492fb0cbf2f92",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7579750ca046b8e114f4e542c8d087dfa8bae65fd848f4352b3a6ee5708bb82",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_public_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20a161bc781932f3a564ebf7105c0659d56350a5b749a52eba1894707667ea87",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneandone_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc5c8df04b6a65642acc6533a9f895b435c08844519ed5e89c3a35e5315a963b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/onepassword_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2c8cbf0a37cdf4be5ebfbd715166df56bb95ac7f3b16f51547df4e0c843b148",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ead61d33f5b227f8c36d6a543a023a7406bbe8608a1b639e1f360a0c36dae67",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_enclosure_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e2bb44e9658481f1395f466f0a311857a7706cd9c3b6acf92ca5e049b70c997",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_ethernet_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bf9e298685d31e6e3c65e90d310143a5fa73b1bf34d9d56c197f20d4beb2794",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_ethernet_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91dbf75f4ac93983c56f5a00786b42f0c3f8e265928aaa583e414657c6440a33",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fc_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c98a3a0c78e7e0d047b0610f1ddf0fe4cfc9b5fda4dfb5dd4f9213490b2fb62a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fc_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bda33c70e7a21a37afbc964f92991fd9cef909655b45b7763177f87b5ae17b49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fcoe_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3b4739ad5cd6d89823d606b2d9a39b3def0e2967d6f8b04ae7f3fe2a821ed67",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_fcoe_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "732dea52b31191cf263fb95460247631e1d4cbfc720472ef166842232ba252a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_logical_interconnect_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21f72a3f8ea9ae75c8315b7a7b06b969f4b1a07c7da448cb70d54f484f84667b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_logical_interconnect_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5f38d9022dbcba12146d6223b0c82f4ebe7c9b8d307e4fbd01f3699b75d7386",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_network_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba56e78c0e00440905d2b606a307fc3837f9c8f8b083c625f61c99b6c66790a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_network_set_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79c582f5ee67a8fa4c1f07ed6895b4c5ad632b20b7fb60d3f8e49f4fd0f1ff3c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_san_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c6bbd6531cd68ee5480dab844ee81d3952dfb39c8cdfd7f06ee087358c92dbf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/oneview_san_manager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45dca5e38ba6e094cb680adfd28624685d0cbd0e4a47f2cc42b1e811c253772b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/online_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89479d50f2d4e4deb2e7625c389df849c57ab99e38de31f7be2fd8b9da94d6f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/online_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5b02e2fbab19238d57c150c63686e8932bef84500b5f38a9bdd9e612d3f81e4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/open_iscsi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f464bbe532ae7e1106e90565ac75c492fcfbabc080e5bdcbd565e6a3c192233c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/openbsd_pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22c4c4c87144f46406339180177a2dff11dde76122f78ab358e32d35065ac2a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/opendj_backendprop.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19850b499955b281a545db40931b4b8b0eea21e8b5cfcad942dd5d1630804a34",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/openwrt_init.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fa03eb5cddc2d8deae385c4bdad8a5e8246fe91046cf9ce6e4b69042e2e64b2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/opkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "601f102a1c8cd8f88ab07b4fa35e785db85afcfa92ef130e62e572a6c923c275",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/osx_defaults.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "362ae9073e6e548c4dad2d5924c3b823b12007674293ed337937df17f0bcefc6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovh_ip_failover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b9b9566d7d0873f0bae9c4ad68fba0c7459534263ae0b8c729115a5e3eec532",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovh_ip_loadbalancing_backend.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2093559861b59ded9d3fdc8bde9bd48a32d01f99f9c168cce1c83536493cca0e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ovh_monthly_billing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e3cf12e8142224c835005cab6b805414349f5f46a05fbca2458aa381ae6e495",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pacemaker_cluster.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b761cbcb34a0fb34ee91f1961c8240a1850bb2ccd3f956fd4f1e3771302ccc21",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99b81cf01b9e511cb398b70d0ae91a3fa3dc0fd99d09a02b76c78950bfa00949",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_ip_subnet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bd48f1465a85505c88d0238904f073c9c13645d9102a7fa7706248b216cc7bf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c33e79041bdfec0b870a3e869857f56e57ec17b6b9681d987d68e85cda5f9f0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_sshkey.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24704cf647370970962dd477bc4c90bf8e503f331ad27bf242b1699737d16cbe",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94e3880ff25a60ab0c75fccbbe8b3fbb0e205e8aa5dfed53747706ad2aab032e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/packet_volume_attachment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29aee868278064f35c4b708fd8331f3b5455708f09c957008e0525c231a4e476",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pacman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b70c839d4fad92b205572f88bcac91c39fc858566bd1b781d7a2c418cf0ce5b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pacman_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8668d6e39f78c145841742a54e7914b9a9639d48a3fc73a74c95cd5fe2b139bb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "827b0403a098b677ad7648e0c0b806de812b4cc9286c9ff4125579ec11211a92",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae2718f2b5d083fb37c02fddd2f2315edc16b9ef831ac37a95ab7a6e2f6e057",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty_change.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eccea5f30455ff59c5fb7e0b03e92eeec4c0b39ce8bdbe94c65c51ba1f24a895",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pagerduty_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e9c07a48f2cd3b51a5c50717e30334ef853536e5c31031dcf23242a1fd64dac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pam_limits.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d91a2175af8cb34bd9c20ccd042d6c5ce12e39be8faf6c49eab3a2ab7c104f5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pamd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fea2435d3591ec72e08230d6551a2cc08f8d4afb5a063e0b0953abf68d43d438",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/parted.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c66bf9e1617bddad08b0a96a7c1df92828967e6edeb7ce2efc46d624a8bf12b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pear.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60344b890209df445edec4f97458af01dc43e8db984e258a0bcecd9bb2ada464",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pids.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bb378284631bdecab170086c5a5421ccd2eff1b660faff0f3df8b7650ad3e61",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pingdom.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "85a20006d74afb4ad59c952b779e10a2a9bc7b52c62eb5dced146b1952825553",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pip_package_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90bff70ebcfe011f46b95ab1eb2da290b1fb16d671fe56079ab959a24768085c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pipx.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02827e663efc2960091832160ff7ebef6eb8c5909aaf7a211e59bf6f12ef7aa4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pipx_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3cca85f8398db8755cf65c54ecf79d34f036d148cec69bb0cd03f4602f316d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkg5.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd3e53997319e1473d1e4faf830b2bf57c21737a72c99ad022560033aeb2a073",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkg5_publisher.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9dcf64812261306793422daee37ebc6c2660c90563a9d62ebf52fabb3832730",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkgin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b742c95e442b624853223f54226001fbbd88268179f841e044c4eb0d9d8f16b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkgng.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5640531afdc589158458b5cdf712f4e4a2d67df8129531028190e3d4e4d753d5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pkgutil.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e47da21fba937908855043d154e241452d0deff13b2eac6210905b1701f07b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pmem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7e827132135a87a721efe2d6c9d58ce8a41e185bbc9084bfe813ac8b777c136",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/portage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a9d8d46d4ad616cd560b6933287d90ebf00e36d84482dc3e6e49fa42bb198a3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/portinstall.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0fff5d00287f10a07a7108260e747d3ea02eb79f3f56eba22bb382d9657f5fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pritunl_org.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e72265d0c72d3dd91a0c739eb73c150b5a438edb0debc6e12497cd1b709d9457",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pritunl_org_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aac365855203eac563433ae401771928500f2baead9eaa01728a1bd3dd152530",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pritunl_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65dafbd54661337ac5c244d52ae1e4b80e6aa92d7b78fd3602077b390ec28a25",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pritunl_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29bcc9f395f5e956b34f787eab78f2233139a90db3984716e570ea3e9842d4bc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e4c6262c494cfbf82248418fc37b9d12e68fc819d38defc67c1c23e58f12765",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_datacenter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a818198efa5b964bed4a2c06eabb42e70daffc64d24b520c8d127790035b550e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_nic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd644929dce276dceb3eed2c396f3996e9bbf7905cb8d3333ed4f7da9d84dfe4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32ef48b857e64c0d5c4b555173a9c1f7ac94c630ff3e36a9a7605354989f046b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/profitbricks_volume_attachments.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bb70965cedcee4229d3f509b0f80ecde71d59452f787108f799822657058649",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2bfb21760298dc51986b35e7fd97641e16aa50214e5dac43cecc1b29787fdc4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_disk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9d3ee1ffeecdd4061ec6a2d8933fd6617ad2133dea517b724543534440a13a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_domain_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c15c31af9b758101cd6598e626cc8245d01998205e24a36037e71ba212fbbb1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35430f3908046b38ff12572e85bd305b0d477bbde47066fb522ee8984b3dafe4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_kvm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "108e41bcfc715a4afdc3858b2014aa4d6a738a881df534597863f965550f6f2e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_nic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "114afcf1b6bd37a2075b4d9f1725d614da6ca3a43204995ecf155ebb39e33613",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "502dfe87410e493e6133260ec1a6ba45e47b61cf7ff7afc3ec2dfed42bca7c63",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_storage_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebd62128dc3ffc469bb308b2959edc19b852442939d4a4b48114ed51f235d9af",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_tasks_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6f332dbfd66dda7fffc49c1b72872f4a2c9b746e8c8affba649f521f138ddf7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_template.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "747049ec9db4e6b920af301db484d629b52020fbf987c7689e4c0d34cfe429b6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/proxmox_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03bbbbf2c76f4bd992f37ecc135b98aaa5ce5a77c670b4d538ad37701a4d6920",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pubnub_blocks.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50a1f072c611d640c217f7a1c16836f288dc4daf9eb88a58876a75ffb097e6a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pulp_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c210e8698517338f402ebecf40f206fd3c032b9633118fb2c0c623b902c04754",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/puppet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3d32772e05944219bf0211942003088474d1172a03b202ddd30bb1d24e0af25",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pushbullet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc4a405e57542b0d5a3b6f0baa5a9a4f9f9b9b13e8f09756fd390122be801292",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/pushover.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "088758cf1e02e6e01f9e641e44f13f7b0c7a9f333a24b0562512b194f55bc9dc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/python_requirements_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c1bdcb4a1b82fbcda14ecff1073319ab2eb315775536ccff4713f0b8b29295b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "260a3f7830f52a926daeade726cb1b6215443f38a201991d8af5d9c6736a71ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cbs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6df24e7edaa990dd7b49cbb3b6f531d38c1a8d249672619ea44ff6a78d59f45",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cbs_attachments.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c26bb16985ec14e3054fec4586dd05fbd7f60e26f129ef6cf20d101059ba2fc5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cdb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e12cf034cdff7a969f042b7f89db3e5fb74a7879d97461d08d32bcce12f3bb22",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cdb_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88a8df01fcd5e1935d835ff1d9e562ff7ccb0b749a3aa8090dd18131f8f1ba49",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_cdb_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd662b9e594bb429304340db2fbd6b75dcb8e730626c65118c733aef1c7fdf1e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_clb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fff8b0f831320066275a2d6a3da52c200617b7263edde722bb022c1250fa6a8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_clb_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdb205a8704905e0295ba5ac81d491b3a9ac3c286919ee60a313cfa31e880129",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_clb_ssl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab3cccef409b9febcd4721812e80dbfd180d2911c4007b5ba4502f800a9b6372",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_dns.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40d58d336dac0ce8c20947614a2406970f665fa44be14de4e9cbc54fb965e260",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_dns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d274d0ec45eb6c0b52f9d7a2eefbd497dc3170e5709524185a2f4acd1e4ee81",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77cdc7820cae18792f79ccc1dcd1796d6524b68125e5b16f557ad1e42a1189c8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12f4bab820a8d7127ba347533b91ef576e23054254fca318200babf024fea94d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_files_objects.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42de8366e999ff5713acce6ac485cbe50b5243bc3c407ef6c6db1db0e347e7d0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_identity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29e9b9171bac729414dabd84332886064f91e13c57ffa1dc5431c7ca9f69fe6c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_keypair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9da9266facce9e73ca572c9739bf671f82c815a9aef9fcc66f78d97a5055cd86",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_meta.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3ae22fc326d950ab0c23bb5357719891365479820e82aedc366e490ec03b0fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_alarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae89f314313dcaa6301426e1c0568ae49a0f898a1d65f9c932e26d05b68b9fd3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07078968df29c55d784e1b69d824b362d2c929926c64331035a700ef80078d5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_entity.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67d5683bd92879c39fbb3fb7cb30c4c4fc4226606d41feedf0acbcadd54f8907",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_notification.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78695081b337a5513bce3bf7ed25148c78c59475962264fe56a9813eecbff2db",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_mon_notification_plan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03ca5592440ae3e56654578c1663c340642952176039de5123777d78df585225",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66f617814f7229b709e6cd393a18ff0c5e295b95e16395134b689b543bfe051b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_queue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d47ee58bb21549c6cf732396adca95150332637ddccbd647b6ef3a0804b80b3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_scaling_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57ef0775660592e06c446aa1898d976eaec3b4fa4a785dbb1fe9e0b7273987e5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rax_scaling_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e1fe0c9f1d54bc7ab5f4bdceade1059d6776bd3b7cfaec79baf18b37a62b24a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/read_csv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3879eedf2f9a57f8782d9f0c37253d9be21882a73c1d745b27108a876b26e1aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b3aae9b595a6be520fa675b75cf63c76096829275f2a47c54e8f337db7a79c3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce450be810df3a98b5fac0bc7b1e2c50b7f1dc5afb87307a09b8145cfb98e523",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e1d11a2b7ddc5f00ac5a34cb56a76048f6e27abe073a2c404089c0d5429d09",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redhat_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb514c4810686b3e2f682a3201772d405fca842ee01a11dcc0dff677806ca51f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01b399785ba7ac0e720a244f922959b466911097abe119058a9b95f40733ea0f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis_data.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcde54fa9b831f37ba88f806b55eb1a5000cf355e85d0dcd46fe8c10a30954a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis_data_incr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56586f157ca36093c57283b9a5720ff33592c602344252e76f4cdf89fa685bb2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis_data_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29ef49640182c83f5e72600a360f87ea9d7a0b927f427b1f4a0b2f142dece788",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redis_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55ce6ccc23b568b3b8b3ada05124c4fdbd7c4811ece274680acfd09db65ef116",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhevm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8dc25f0a5d69b8497fcdf9be1228e31ba39de028d22735147b46461635ed65f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhn_channel.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9545ba247ae7ecb34f5b9edd18faf9f94d25196899d8ccf701ce3cff319dcd8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhn_register.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a37c00e7e516636e87ee192b4fa56ef75025db675182edd383dcc7ce912b396a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhsm_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c18edd6435ff9eb2face29e1934e1ddc7c214ab2e1ea4038299e434c3f98ff33",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rhsm_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df2a80807ccc35ce842ce711d92b38087419c644614a939f7a5cfa19396059e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/riak.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19bb95370c34da795cb2365588c046ea7f0fe2e5df78d19baa4f04d7bf4b9d9a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rocketchat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6036e4ab63f42cc8c9379697d3e182743bdb5300d6d2819e533215525f70d853",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rollbar_deployment.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd7f5dc67e8eb3e442cd04e29910eb1b68592b2dc6c05e012af354c0098a3990",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rpm_ostree_pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb2559ef1284b23455cbd0c532fef23ec2b680bf1f374e2ca080874062646aef",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rundeck_acl_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e632a8b90b5780be4070fb083870b5e749622fe8678d9b26e1ae6893221310bc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rundeck_job_executions_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "378f9d4a1b54e31a717c07f1d3e79cdd813e2b7896546f1c2dfac146d797c209",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rundeck_job_run.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "500cf6160948209fc5d10116247122ac38c12775d379ba82aa0fc00d2994aa66",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/rundeck_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07f03961f2f2015e02662bc52db3216c9d1d776cf78800de074567602b102daa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/runit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d39edae3ea5ea850f6e59aa8074c40438c3472dcbcfdd4630bbdd1a49e312c41",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sap_task_list_execute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ada516256b26d2db01d01ad9766aa2d2d72f47ecf36a5c2259e9e77163cf60e6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sapcar_extract.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc9fa7332cc7d968685f33f7f0195b9e4e64b4d56082d7aaa28d0e3899973f4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/say.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6360f740833b259d8e482e02e4ebd3e6fb70f96fd7b966225fc9e193884b2a8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff299f1bc19114b13ddfe1a151f8dc22eb213a4b30cc1237e7fce1a045ca03a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_compute_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fdbf4eba9c54e90b0a4580bdaaaafe66c5b3e65307d75974a2333a44eddb202",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e5399eecca5882522e8092b1cdf07366447aead777a9728bc62afdd5ee747ce",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_container_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7287e9aa176030192fa1edb2b04e44fc270254f4965d0904c9a866ed01938c21",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_container_namespace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ce757a1e9efd759c3eeeeee2e808445b3c37f7a32a74979d721759cada2f840",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_container_namespace_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6539a012ef41776e6e79748dd3556394f9e55dc8a29e60bc10e61952922d86b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_container_registry.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0aec542a806b8414234a9037570a395f59db239834ff2e3e38f175a109abdcf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_container_registry_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b980023c1c86c44e2d4ca2749b13fa21c986f2fa78e419475c08c82ba063c8e7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_database_backup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6729019f0e417f21305f25bfab6318bb6416f8fbc787ce76347df4303534db24",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_function.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be7739acef5106d9f713962ecb326200c0b6cf2cc64120fa6df9283e04d9b3cc",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_function_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ea8cb568b9b874b2abad7615e50fc3d3b09159865933627e3334539216ab6f1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_function_namespace.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd802a0916f572b697b0d4a71b89870ad915551611660dac661e7a68a517e571",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_function_namespace_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe1806928be724c8588ee351105e8dc3b424ecb494f69b8707d9ec273045705",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1641188aaab75f822d9d907ee0e42a65de1a360276c905f1c4ae482b146d4b5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_ip.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df0bed7f0300bb73600f15ad695cb9909dd319f953e4df541d05e0326bd1b05e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_ip_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db31e75c2b4701594848082ef750d3dd954ec1ae6e2cbe31aad0a48af7e47c4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_lb.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c405f1d1f0e8763657353c861b5d99dbb7468c355f4723abbdba3ebdccbe33ac",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_organization_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68e3af9d6594c504b81ecfcc7b5f90b0dc5d823659299a6e963cba7f35c195fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ef0e5f6203c85fb1b770ed3e019e36cd1ff9df293406173a3614a083621f954",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1aeeff5b67ec38125f14bc75895a3046652ef72dee0379baf23be9b10a2c69d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ae8d23b82fca34231ea0c575c681577fabae807ba6cee0c17c30a7c400a8f1f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_security_group_rule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "193d731fe4d4a9874d5b4c4dcbf22110fe03931769b6e2ce169f6cc92b8f195e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_server_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f901597284a5368094bbbb1bba1d97ba8898b924c15ab7ef26ba74ee1aa3817",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_snapshot_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4373e31ca827e0601ad5da6ef1e56ecd43ea21501bb6e45fb142ba0f1dc17e3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_sshkey.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6dfc50e9ce49df289e96f6512aa30f44efbf907cc99f2eb159afcdc78117033",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_user_data.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af50d2abe7778da9d8bbcede626c8e4c8e3a06b1ed262c709282240963f328b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2529f48861c04f84f41c6332abc0de93560e4118a532a240b4174390c5c18cab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/scaleway_volume_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36853aaa6f4ae15f5daaf53f58d3743bda121f6a322115e01e1f4cb80b71808f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sefcontext.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4cc9edd2f77fb303c88c446cd9947d20d80f130ecd26c242a346a2de7463137",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/selinux_permissive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8bf1f27932d66c92b3be50bef20e451e12bced7a388ed22f94314f0221a499e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/selogin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dcf600342048e5c5f65fb591f21b1366615342f8b8aaccc8799024680f33dc9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sendgrid.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec35e403a1b0b5cf24600e9b9b311e8f1c4dfd7cc202dddcf54efbeba113e6d0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_check.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e56acdfd74abe21700a59d282ff3ef0836c4c1973e4187e233396a7ef6cf20c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f1429d29a0cb1efd92ebc5806f3c6fb4359f0b3c430c373bbfd467f46f07eb2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_handler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "196271a3fcde7b6cf45477a4710918258e1875773508c865a259db01dce700d1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_silence.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d8b271740e4e1e2ef17b2ab9ce3ab2563da9af078124b851b7716b5008f473c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sensu_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "374f4a84a6ba9c29ef217d9e1940a3f3bf207747b2e1d86d3cd093de226c1f93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/seport.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "116033adcfc746053d9b6579cbc8e6468a053034e740081627164e0ea57e12ee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/serverless.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e8e88323c16862a0cb68894799a2389d4d70444890cbc73dadafaae6f8518fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/shutdown.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45e539174be9c8211012f7733e9a360e645dea32126f560070caed5c029bc3a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sl_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a37782760e5b630306e0da41cdd1cd0566153c78bd1ccc6b9ad0ae15157ed35c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29945ecfeee355e8cc00b5cff7bedae410f29f905b189433c1e3d0ca7f025125",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/slackpkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e1f7f606fee691d5ca23334a3a25614083ace1e630116ee28667f8f226728fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/smartos_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5601f8718c9f2c4a2914fd3d89bd50b412b793739b1d3145d9966707b8dfdd98",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b56f9dd2ce887547d6675de578cfdaea1455a72052fdb9db5e6f1a7f72609617",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snap_alias.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7246c438210b143db250dd1d28e8e3ccf7bd7cec88dfd6f2de630193519897a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snmp_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a6e17f2fd377acfac49d130cb27403e7dc524f66555de7536901cf1252c5607",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/solaris_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e40e563faeadea10c511613f9192cadc08a12f6bd106bc72a908f9d306b9044d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sorcery.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94b2cb338fe113a445527fedfaa0b72c60a4431b041c293268937e0484f4b61c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/spectrum_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4a60520fa6819ea2ccf347a55b9cf7cd83efa45a60112fd934080e071eecef3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/spectrum_model_attrs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ecf012b62d84d3d08d333623f81e866354e1a8511cd0fdd9d91f1a50c28a41e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/spotinst_aws_elastigroup.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8a1757291fd3bd65310cc1abfc801b332821f683b125282d35b981857c29b92",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ss_3par_cpg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f530debb47434e7bcb78b0433fec2faf36e8abf1ba0275800706c7b5e95665ff",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ssh_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27d5f076fdf868b0f5685c31b80ebdd947b6766c6395e80c62d9757c569067a9",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/stackdriver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1229f4c3f84fb38de299df4374feab035d45b707b3ba3fcad69895c8f53f70fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/stacki_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6c7a2c9e54b594da6e4e5124785a94e7fe57c511a8fd7053794234a78b84a00",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/statsd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95c849c53213ad5834dbdb9b3cda95fe04a958dac354b182f070a3d28f0b16a5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/statusio_maintenance.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a00ca04cc99c19eab82911ac9cf11cb5df77cd29f96287d60dec2179d53a84ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sudoers.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51c0912f16fca1904d687fd0d08667f980fbaf2c6c8d27acbc0781c6729fc11d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/supervisorctl.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2871ebc56d5cf531d5ce28fa3e9bd3a6138f598dac91d6e53c6b4bce00df9c40",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/svc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd1bad04d7dab8d937305c00ff71949a950e2e5ed33222a355f3b8f7176ff741",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/svr4pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6f917e3db5d8719897198e37df5a0ddad5f6c7dfe1c81a0325804c45118b630",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/swdepot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff26ea548d96299c8447de48651445e124eabb5cadf872a3d5f72e0ea78725fa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/swupd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97c4699f49179b56eeeeeeb7e27a357038220ba267d76bb4f983ed3a17c1042f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/syslogger.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e601d766c857b48517158fdabb3680e2164a57e1f0e6688752a5d7f573d6afcf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/syspatch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f1c9c40438ff0ef5fb8b1ae27a76dfa2114cc379c766fd36f2a051d916342e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sysrc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ecb25783bb73e9863206433c90d81244cb131118b0a20d7ca94532a862d1bed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sysupgrade.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4623dec27bfe0bccfbbb52af3f8cb2496667927d01594c3d36cd222d378aea3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/taiga_issue.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "416baafa504366f7803f779bc46be7c12f0befc24ce5947daceebf91187929d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/telegram.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa0e4b4784a6032107646b106cf5c5c73bdfa70461a638d3d88954a6122d9167",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/terraform.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a73aac1add13dd2adf8d7a7bfe0647d89b1b3392f50b5e7e0de751325fb28985",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/timezone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1578903dc0f212f54d19119a34b4cfdd29766d117cdb82730e5892983424c00",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/twilio.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9988af89eefa69655c9212255d6953ce98306363e2a618d5120375c94da32cc2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/typetalk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dee3dbdafe346b6a8413c4c80e20746c3fff8f137e13e411d423885c58236e6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_dns_record.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56ae686f1a5bb18536dfbec4523144053239bc423b96ea36d65dabec29aa4875",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_dns_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd76c1b0a0455eac1d0cf1f12cb663f76d52ddb5ba191b614873fc141dd6e223",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43c88fff6afd4996f9cc7479e9239beec2b6eb392977889980dd2bfb9efc3be0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_share.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1427fe80978879d0b15c6eaab387bc25c92d3ac795716ada938a11221a59308a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/udm_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "444060481fdea92619bd1ed1e3674c2afdf9b827716f0c8c3a28c39424072daf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ufw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8304e0c613c151c775e58ec4ccde4c58c028c55d2d709235fe0c24d9d63d26e7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/uptimerobot.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d6d32ae78db2183b806a7430708343dfd457ae10651b4723ed3598b41dac2aa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/urpmi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e8484d0c8d63292c3517ab7236afe02a5f9d78e00240493e959d02b0e136a5e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_aaa_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d85b4d6538af0bbbfb3dc2bfa4f0ee2c518cd43e7d767071b1e4c01ea3f6e17",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_aaa_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19d9622bb2fff8e0f87f785b479ee2e59f70c40e19b5b4abf62a2d8f435c01fb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_ca_host_key_cert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c0d983f45283018a395bd61226eddf6a69a82eeb73a1c5aea9b72bde05e32c8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_ca_host_key_cert_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "babfddc7f10a7036af3c1bcce3edb880efa8d25edc7356b0bd3365490e13332e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_dns_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ced9f62bf1b230ddd2be5fcdf6f67554ff71e949830d42ef36f6aa6ca6ba4af6",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_network_interface_address.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cefa0b624d3ab10537105af9c8d745bc928e14dce6742768e4c13f915638dcff",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_network_interface_address_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73fb6d0c2d5bc9660ea3421a63feee449242239dbd9bbc6aa355b73f3c606293",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_auth_profile.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6832da2f9c5d4e68f535d23a4d413a3bf00cbfc91ca2dc3a5cbf03e39c7158b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_exception.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e4fe12585805cdb23f1eace5560a6243995327adfba8ddd90a223e3fae4a8ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_frontend.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40ebfe515835b9552e7dcaa2fc1fc7c6333850602674f700873bb75364744a3d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_frontend_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df902c349d6170ba6145a39fe44e82ec618ddec8487e9ca9c30f100a40d35aa2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_location.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c0e8b84a9927f6313f63f140b50ced9e3a3b64d86bdcc00020bf90dbf45b611",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/utm_proxy_location_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d955f1ef06ac3866d417111f251912f96b3e824002b8bb86b6be4b8168b7f24",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce7c3cd23ddcee51bc45c0b7dd2538fdb001d5fcd2c29fce59bb7499ee75672d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_configuration.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c0aa1f6db2aa7a08b1f865acf36aeebc0bdb8d8d824ba952cd4eb9b039b2dea",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a46add11638e96d3d5975c3a696ae070003ade4bb9f0cea8092b2e1682f167c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d0a37d3bb220ea341f739ca959a96620103c20dbed5e1c531416814dd00814a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_schema.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bc91a094f43aa44f8e47885a0396ede89297320f1db82aebe93c616199295a0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vertica_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ada565198074caf40a38ee9014070509869e1af8e7c5f0b04721fd26fa60e3a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vexata_eg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "155f1ddcbf3d213ab21c797a2f7759570cd31ad67488b4aa3e99ac681be9c116",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vexata_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4a7d0ec376abbfb2b7462d1b819a58f735c413d580ec0c5f26cc4532e569e5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/vmadm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46a7e37ccbabf791d280c32800a91761fe62c421757eb32be6f17b1a47e55cb8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/wakeonlan.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d3da1108bce0f40628a63ab8291865ecdb4f466b316f0ce920e1bc6c85e0e4c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/wdc_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bdc9969cc4377b14a450b0a48c0236e6335ea5c4c74a3584c44db219ae6255e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/wdc_redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78c5b119393dcb7073417b3491352efd917ac2d22a20c8f93889f383a869ff7d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_app.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c48310ad10d6ec9fe163d07cdcb540580ea31e041746ec785e63975156ea3279",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9dd092e1ec2c318c4a70cb7fdb1a5dd7cfd9c56e4eb1af7dda4932b6099ad68",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_domain.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "712f733337be663d0473deb9ed4a1f5001f87500cc592fb44f652bf25d172c93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_mailbox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "582f8918f64d1714629d56f436539fc0b8b0b19ec6c0a72e5659c3130422b4cf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/webfaction_site.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7847deec3494b81cb21f1d6b62d7fffbd940fbec7fb774af9f42f6dc938569b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xattr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e973a3198180d2d467cd67bb0db529d460db3cae0019d3dd2ee11bd3848ba4a7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xbps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7dbdb9bd073b35704b4be41fef7d6cf4a911ac151a42fb875c7357a08a0be063",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xcc_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9ad104cd73bed87ae0d6c595b577fbb31ce360158eb27d6840ba4340ef2aee0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93d1755c39b647d6a3b4d43c298388beb92f3c85a4e25e2b87e1388f4dd11bbf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a4dea9d8882bb70f9360f374d49de96220d60be2a71a0dcf51a2b0649fa8981",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e82b5d7de8d6bbb5ba2061d50d85e770d4bd999ea54b02691f508f448b456323",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xenserver_guest_powerstate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b055bf336e88377ae31041d358f7f37c1e4afb6d7fb7f38a8e2250052a713c05",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xfconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bec9b6404fa34fbcb581ff80d4e7b56de09c14cb1c89dc01c2370a4cd2b0902a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xfconf_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee767fd48a7bfcf87c9ec27b594ea67d240b9031d4c149412fc507cdbed0d6cb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xfs_quota.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5d599449035f139802ce8f5573be2290b8b7cb05e7b9625bb1c347df010bd62",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/xml.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38a4462c5b51edeb59f0fcb1a21443fe45b29f40c11787467f01fc316555e475",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/yarn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8980711841e64c8a06388eb9fe3652827bd81e5134f4af70d8c596cb2252c6c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/yum_versionlock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ef5cdecb75e8800be5b686dc0a492b00d75a71436f014bf88da207e9b5c6049",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca5769b13392ec15040a0cf9c96bae13bb029831b675d42bc8b89dee33b15e57",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zfs_delegate_admin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d3c4c5f5761e5c70fc6d47284f7bfb4d8460e4d0a617b479da0540006e8a6b7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zfs_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2386dc166afad9fc10cb60b6f936b400ae784dfe66da80e54027eff74f99773e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/znode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3501c9f3857eff254e298fee210edc0016e15265c69b3df7fd433d09ee8af9b4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zpool_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0cd14b46a4cf43cb5c5df16f4f39b348001a32ec526a10399433e2a9bde3690",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zypper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1dcde23c3dee87675bfd78d3885da676d707551baa6f0fcc45db7c829560344",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/zypper_repository.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03ebe5613a4a9c43bd9c21de4e58b4c4480fd0a3965e008bd68785210dba6141",
+ "format": 1
+ },
+ {
+ "name": "plugins/test",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/test/a_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4be86fdf5341ba0e4de5f0aa85984152e905d908430b940538912f343bb86baa",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f81ed172378af976e35df0bb3f04c7dddf2ba0d774ae9e3f9ff5343075bf7e94",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_devices/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b248ed7a0ee8d019cf35f6ab4bdae101594a14f44a095837273019935c38dcd2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae8e81aabb3f66ba309397f22c3805cae6bab9055f5ad8649fc30e6c1c7470c2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/aix_filesystem/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alerta_customer",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alerta_customer/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alerta_customer/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59d54bbb986cfe992a84cb75ebb4e5ec5f808763a81e5c99ca1359cd548ad520",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alerta_customer/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alerta_customer/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8d5ff7e6b55da320acfa29e9e267704859583a09f6d6d950236a2e94ea1da21",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alerta_customer/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65bdbe63acb80a27c8bb3eb2a01debf90574458b33e34fd354e91df215aef725",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39826be9d97ffc924db7198c8e0080cc5d961d5ab361ee7ca541d0bc03e4c9dd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/path_is_checked.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77c1ec332fb7b43b6841f6830a1143224170913492a184a2d5d54e024475acf4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/remove_links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e21159af90267bccd19b3fbc5563e6b9fc42c910373c7966306e4b85bd4003",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2e7706417d599bbcd7183442f3f6d945ba2a22ba49b77c54deb059e4a0c25e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/setup_test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b607f1615dd94d58c30db05c788e6c125a58cf252f50f79b02564355bcf227d9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/subcommands.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f323ee51bb88f2422c8b4bfdef45e0a72c2f85c8f0b74b4b9934394e899a2c6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a1f60aa352347b76197f51ac8a0624fae66c00b3d5a22bfc9bb8cbedd1915e7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2717e58bf4fd1cc1a059830f6dff57dea17ac3cd21808b4631c689e7fa6d24ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/tests_set_priority.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f89b31c36f2412b455dd101e8cb42676f7aa4cce429167510ffa5fe9c900e558",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/tasks/tests_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b276306245c53f3520b2197f13db78620e25c63b44505055792d32632a518bf6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/templates/dummy_alternative",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f0587c18c7a3279322c9240070f267ea672bbdfb9bbf7cb8efbc251297d5a7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/templates/dummy_command",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b7ba449d88db89018b6357dd82d17e7ed88e68f5ab6263c79362bfed28634ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81cd139e850c7970e6989bf7e6680ee2fb9af16df5d94ab17819b7e786852f11",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars/Suse-42.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1de17e1af457cf7e62192d4abcc430bb8bbca2f30a7cbd341570cf81af8affe2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9b439d2856cf7f8af2358ef5d566ed63e221c6b0be52c3bb973445fb6ffecc9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/alternatives/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec97cf3b74b81e243562450a157b89183deb51ec5468957a4d33d7fbf1d15b22",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/files/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5215ecd8f6f0a7225a416d77b980f16ff83dceb48958bdd47bd9d9b38df26bd3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db8a8fd9d62a035862276fb8ce6bae558ccfe9ee6a03a498c561272236301307",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ansible_galaxy_install/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16a2fea9aaec069f0eb1971b262817e63bc08b018d6d58bd2d1c6b06415ec3fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2f66a663b603b80a6a29e63cf51328d8bd0b55ec4ed72ff602d4881fac459fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks/actualtest.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f87a33aa6bd1a53628e05dfa008ce665b9d2456c3b2beb019186cc6970cd6b18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfbb311428cd8c601a6c8200434ba69913f2eb1e26390a95c356b17f659f2d71",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/apache2_module/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ba9a096b1076645a41880e82e18bbf40244a4e85e9fc56eb22f4977e3f31be9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/sub",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/sub/subfile.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/bar.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8eb3176f63c15f2d2ab042f2afed2e03b21d7c5f124999aec995cbece8a4dc93",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/empty.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/files/foo.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80e606446c23d9f83abad94c74b45e374660ab3490280db8964643ac4edd31d4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6175b7bb274084411df0cd2b275893f1f79b7e3bd2ac8b5eba9e6ec8296d607f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tests/broken-link.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3b2e661b5704df83f85a81e27050f3cb898d092abbd0626f832983d3f08d198",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tests/core.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9baef7009f93b2218cb5a87822417edac461e52e4db8f4860c40b46910ed1d6e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tests/exclusions.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd29bdf0293ed3a2966d1277b3768e11dbea2dba5f494ce2f9d138ebc1dfad1b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tests/idempotency.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "662de89e1d89c43420e2a7a105603272218459c7a5e15a6e199aff2dc796881b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/tests/remove.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9404b81ec6b1952f02f0b26ce523c2323720d620ab48a1ecb81396e929d6b68f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/archive/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d316226212f51bb3b14ba67060d4ed6df65fba7dab303a0c14fc2b8789ab7fdc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f428085a671e80a7c75b8ebcee48bb9091c8d0b728339cb4048aeb5790a4cb4e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a128afe01e32295f7f3c70212ed4c297678654bd23fa467872e08a075bb3beea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/run_common_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39e27e087bb9e8aac9151b80dba21ebbf3f88f042afce2909bd175481047b368",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77d14934d753c3ec5ae4120682ecc55057658838334264c081d0b4aa7c9a0991",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc74967b76f17bc0a71a1b6ca79ec18b52d8fc76164776d32d3e349229ee9620",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e83bc87ba3c23d13e5bf07c1564f36b2b4a9f0befe9ac4caf0f15d1f2ae9e697",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f8f88b126f765dc84b49be699481eb426790078d6601906c8d62e968babe520",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02d493df04cdc21527e97a98d996eafb156e382915bad50c38e39a8e37c9d597",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80b3d008adac71ac951c812ae33a1019d061610e381c44124b99cd79af83af94",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b430e90622ed573c4828d38722b3719a5b4da016e927c06cb421e1ffff7a993",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ffabda833b1da19316fae7f744da001dfdc6b6371203a18bec0d5c7abb60551",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8699f773b553a84295d9a338df68c60a9ae21a1093fcab8332b8b54dff7593b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d5d6d506f17152688ff944f3a6fcb63e41c85845ac8c54ae96dc7ec2e25dc5b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22ff2c78ffe79918c57aa651fe7c388fc4ce0c405fd28cc52fae70d463d48f75",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/btrfs_subvolume/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ecf77a2c1e3f8e54ea570e1e133d2de539be44f334cc5b266778b4acea793da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4700abae0f0a4e3fd66691dce2edcf1bdec749ef7d4422abd8add862a1799106",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback/inventory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb1a235326b13446e9c3f02b05c74f5a790e7c6f7a870aadc7464a95b5c2ad47",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df5c8bc89a423e0d59b98455865b54395248679adc904198ad318fe1017c0932",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_diy/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8348da0356811dd51f09b2c0ec68b0764d22cae61a09bc309698098584dae45d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "935444847dff7cf6ef6476a46c6b5699e240f499b003363925d82e983fc3aefc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays/ping_log.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67a656f5e8b09eb8d9f93853c0bb9b622d1c50e088f7d775c1c74620141bada2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_log_plays/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3afef70f4a386bed710bb642b0c0f8290271e6e3ce9f0ce0febedb703562502f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "885caf5bb66ca8a9ac299f39ee4fec39deae7f43e7f31306c8497c4623052aa9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/callback_yaml/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b68d50bda2b14e4db1a0d53b02bea863bd68eff2d043812702e0b9e960023670",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d23bc639ce5ca95d45f7fd6ca30245522170b0289f9208a275ca2ae0fd1bd733",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad0fd64eae29859a9b82e6fe7287afe12de85fefd83e38c866a90df04a396354",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/tasks/test_general.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1d72e921080d466ad57a977d0971cf3a77bbb50d134f5b7ae210a226d6ee65d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/tasks/test_version.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5638a5553923e40c040e7a3200ca89055d17fc412936fad81f55b1839ee317c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cargo/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a5bf4504067f8e89358bffa62577085c58cf290a2fa790234ecdee48b368ad6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6b091960bb2e3d132701a7545e102d9dde29ece7b14559855742c87d2b915b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cloud_init_data_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d204ca2e0dc8eaebde2978d87efe3fe7339cb1346cd37989fcdbc1a21e179cf6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/library",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/library/cmd_echo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1861ab39a9364d62a15466de58a3043ddcdd707f711a46f3dcadce314beda487",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9830d306710fa7218869552f7c23e0c62f1c8d5b94111c86204e9e4fe6ead182",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07e0cce6b96cebf03c15af6658c2b02af21d9ef087f9e5b4a4500313baf4a592",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd9cad1f291a0205aefd3eb749f5e5eaae28a97772c01a506605928ef2f4959e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cmd_runner/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c28953826fdc5ca7bcb519d60bc7d1286b038dae761b63e43f6f0a163cc127b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/test.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1795c9109c37a127bfb605b3275b1673f59476c3c832979475b3c4f49bb008f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/test_connection.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b592195fed688108e3ab4748d9140bab2e28d150e0a4bb5a128f3c11caa7b02",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f560fd765c11ef6e6d01fce8aed19eabcba4f099185fd58d90ba49384a99dbdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4acf012c41df2743d5cabe76460e1a91dc9bff25ed203f6baf29194c88639ec9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_chroot/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883a4105498b239c0b1fed67f26f73e995db3d4b639da34e1d0944f158fddaad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f560fd765c11ef6e6d01fce8aed19eabcba4f099185fd58d90ba49384a99dbdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_jail/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9de6087a256125670a2c84dbb0e636b38dc4016d8ed2a3bf45e335ad4a0e971b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f560fd765c11ef6e6d01fce8aed19eabcba4f099185fd58d90ba49384a99dbdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxc/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05cbe20fd1747bcf95e8979ab526085a4bdb515a028060f3816b52398d60d7c4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f560fd765c11ef6e6d01fce8aed19eabcba4f099185fd58d90ba49384a99dbdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0263e0920568d58006d948e60fffbfb6ebb50c6ee0099f04230cae48e97ef416",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_lxd/test_connection.inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "181ddca34b244da718b8697a4b5231f01a518362661120c3f103cb2638681d45",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b04c82c870ffc2902a90819cfd91df99dd6c9584734a2a2e646b1f804238262",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix/test.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f560fd765c11ef6e6d01fce8aed19eabcba4f099185fd58d90ba49384a99dbdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "027a3fc0208875a1641bf57030f73c5232862df7a69c300c5bdbdddeb1fe6e9e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/tasks/consul_session.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1b518f00b78109f1852c190ba1f870461045e624d839e3e548f2bda45efade6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9f3d40eb77ab944532fbebcdca6fec27274c30206d295a918a12ffa5e42bbc2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/templates/consul_config.hcl.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f039dace9e85cd5cfd886bc61a87d7ceb1fe874f2de7ac8b25a444bce137fc7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/consul/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6493ba925733dedf318732cc11f51baba9de20e040d7c18caa64866dda9004ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/copr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/copr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/copr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44c30a4e8597ecaa3b92b54391a62e9da4dec50e79a77b5ceb74621edf176292",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/copr/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/copr/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9aae623422d5854f6cf1ec08aca156c9f9dc4d21098ee61f6bb450cfe73328d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/copr/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "909f100d2e592d8a33976bd904b0dd7f15087cf0d27ba3a50218222844a2a5d9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cpanm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cpanm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cpanm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cpanm/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cpanm/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "653dbd5100adc070f54638e9c11add3e9b1164d35754d1309a2057fc4f78e2ee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cpanm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa32cb9781948756898d6e4f7972ea93f1b524735ef0a6c613f20a50a55b34b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f39e5a59e4f06522bb8174b960add3eb5f7ada9c839d3cbe8df387904de47d5c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bb022cc3ab2326832e92b366f945147deed3d7984f41bad54caa2217ea455b6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c40b0f7042457619ac57d9c559c473560a815bd933e09cc401679a74b5675d0c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/cronvar/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e001f502217afdf6fb95f6fecf20d071ed8fa78b40f7baac76491fcc12d3172",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "493434966f881b971f76b928e152d569a2540428e14cd6ca5fcd2bf1ba5c0863",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/deploy_helper/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4762e4d5adeeb09c1dbca4b156fc16c19f786f55765669aad0bc9f3b41edac9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70ef5b683f9bfee8216e52e0dab9f28dd1c1223b6c9d2539c14f9bb8451de6a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3bbd2363a408f54c810b848a6faec8ab62b5eda0029e4df0ac164942c9bf4f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f9f7aca6288ceddd5d3c1afea0656b578412b5beeba36514d221177b94b1345",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/discord/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b28b0d30b31e9600c688d7aadf9e17c34cbc81ed689a7eefb23207b056e8a34b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "570ab1f24fb152ccc0f4373a6833196e9ec89c251f843c8ae4e7ff6cf8b2d682",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/simple_project",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86f3a10d3c8e4f272a3443a7e1497209b732faabf71ab0237edc520ef4190610",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8509ef1d5453013263323a920562ab1f27d997cec1ccf3edf1a190680ca8a377",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1100c12055b293000e9ef7029acbde54653eb22013ff600e22009c6832f89a6f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/startproj",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/files/base_test/startproj/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/tasks/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acf2895219fc5b757396f9d77d4c878c5b36a574043dd9fbb847937d412725ca",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/django_manage/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21a4a6fceba1d357b08843ab08f2cf83d707e4be7375a3ab6368f07b0318f17f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock/tasks/install.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cbf056d4c022dca4cba0d7adb4b46046ed332a0c73ff5c84a5bc46312bf8617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7560dd3ee61bc95f910e38a68936001efb1460140bcc4d589dab99e1d2d4b439",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c104d8fd0268256a3016ccef9d6e8b60b2e32639e2b111009e6edfd74634ecd1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c729ecea3cf81f1d4988c67031831df9a8c6478a204e7a977d829f597acf9b14",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dnf_versionlock/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f755a442e3b004ad7d328bb2b389ef3997e303fb60d634da920b48800f653567",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "923f6aae3e384df67fad27dfbeb6a66d71065677a07196e71d9409ad5d45975e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bfe55ef8510b3c177de4fab339de9cf563281f233234a0361068ce9c023bb14",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04cc503e5dfa6d8dfb42b9dc2e25e84e8a5c6f911dab1ca6ea8381e66311dd4e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/tasks/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8129c7ce44387530133abf87e4f3805aa664762b17fd53468dfc93a7a2c1e61c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/dpkg_divert/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0091fca388a4ccca0ec5c843132ec34c039a4409823d0eaf20e68d9dc320ff93",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18c639e4a6cfd54ade7901507cf7b621c562c0fca93f300c905895b8f729b82a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66128fcb3eb64d1678cb2b07a9fc26e468cd58d65717aebdfab2303d38035b5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/tasks/run_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1afa57addd04fb922a0f88bcd11250c322a80b66473609fab74cc73ea538833d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/etcd3/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c848396ab50443ff89d6f4ca88ed935253fef1da06ea0832dbcbc593e315a4fc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69755bb7b737bdaf63bf2f544342eafb201466eaba9f0242c53f13dd1a9484a2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks/basics.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae9442ca08b3ff58130ccf852fb1e0913be00393be9487614c978883b73634a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks/errors.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e82541316502b4a7cd61c0272c88d5ca517be416c31e4e186f5ddae9d9593c6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks/floats.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2d3ca5dc862b129f8be9327605c3e20685db3ca47b07b0a5333bd2d15cebc46",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a929dbe25af3e7f54a5df9134397b66ad20bbd98564f07c89cf8c7f39316ac7f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks/sparse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7bd1d406b44a6ab39f6dbe8c422a4eca486647160fed701cc79ed36c166df6a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/tasks/symlinks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1cc2dc2e7b3d270b3987c1ce75313326bae6a91fe028056899f77b4de9281d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesize/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c4352287b3da47e11d4603581e8259fbbbdf58bb28f601c938308c512d961cf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "757cbeb22a88c3cdf9157a7391a7ac69f2a7ec1203242f89e326e5d6c96f4b76",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9898c9f42e621f516997436dc8fc4b0631fc3b7f03edd359c9b889ec329eca0f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/create_device.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46e2e8bf430535e412d5314b12fb5cdf7d960d14c9eec992df0e683a5a110015",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/create_fs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f389f6d321b4294f8b7b6d2a76879557b7321817c9aff668e4da883457d860b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/freebsd_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad7a957dd6bd1873ba8f009cd70ecc8c65af64ae6308fef9aa5fea0020126b92",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b21b03a2123f134c171bf8c291350d5cf34b82c9f70c52866ad6f8bebb45d718",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e418e877926abde2ee1a392261a092101d70424a5d723e3c8713a39975068e38",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/remove_fs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c95c4d03a31ce4f96552667e2ea3e6126dabde80255c48035d05830f345f79fe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abcc4f8bf8d6df58581ff4f19e47a6afed3bcd456a7f04a66687069389c6d8a3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e61366a8fac58d3bedb5dbb3f7752ccc4d5b4b48a825518b87a221ab76df789",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52a28a517500470385a4bc55471c67eaa7c693b775a4e4a72da46d70ac26b128",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filesystem/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8644f2ee7190eb0bb4068ea8241f50fd78d67c9ee41c4e81523358278a35f5ec",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_counter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_counter/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_counter/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "380ac4584e420a9c9292f0d3406755785ee8bcb0248d371bb9b64713c135986c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_counter/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ac18c541c08905526ae4a55397dad7f3f7cbed928f259f864d2511ae3af33d4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "370e79ab6fa9ddd49544f28facc1c94fc720419d9ad27b806b1e462c024d451c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b2e9e5929657d110668af9d91a396d5b28efdc93d49e0d53e1bf7203696ed42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_dict_kv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_from_csv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_from_csv/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_from_csv/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ced129b6a78aee00856c4b913a212d9b0479537b01bff5d72481b5f9c3321d71",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_from_csv/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_from_csv/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c592c8ea545e468883779b6450bc5e819dc1b68df7098fc6e776d73dd9f017d7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_from_csv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_groupby_as_dict",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_groupby_as_dict/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_groupby_as_dict/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c28cd48f5d0d3cbaf14f6da8f3c40309ca928c50690e6149222dea2c55ca872f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_groupby_as_dict/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_groupby_as_dict/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f5f4dd5c032eaabe89f03069075df9e2548a9764e018a5f2c7efcc90582b157",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_groupby_as_dict/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "370e79ab6fa9ddd49544f28facc1c94fc720419d9ad27b806b1e462c024d451c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95b2820dcb6905179a2fd0c47c981afd38d514a05fecd0f987186c358b0216e2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "232be222855178a531a91a0acbb6764c33769e99acc630b7ccb22ec7192e30a8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56a26177914abff46006f224ea1e4938a5e73223210ff2197ae97b2c10764c3e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_hashids/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24df12656e61c801c531bb61f17742d4154ba9c4dc348bf379e56575318ea2dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6ec45b92da07559477db4bc5622072a194f095c12ca0a73862236952f8c0fb7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e9844ad48a5f30a80790903f1f27f13c1c2676e79c50d706063bdd25e5b1da3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c75e11aa221c85afce1b539c75a876d59f510aa18680cc2054363f912a4024e5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_jc/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d50b4a5d097e257c93cc359dff5b15df18824323f5fa2646a3ad9d477d0d3ff5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db404350b793baa66ee8446013a22d7f7389c70a815ef754ec8691906f6bc427",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43b6d3bca0307d150558c2c450e5610432ca80cacdec499b5cab317dcff07ccb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04d550c69028ec48b216c053bb6d7ddb9e0e57fe93874138b09aff2776789444",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73ea6b232ecbbc4b5f5df061de20e1bdd03bd24d0983feaaa21b5b5b7e0eae18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_json_query/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0687a2996f0209a274d955428556ca467899d168d22c1da28a547175ea5a5f9c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63e794f2e180ff2af4a9c30230baea4485c1470ad2493d1959ed6c4686b89bf5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6e346d033d30223e68c8bb8595883862a04a78b08ab413c8a42a7aeb3eba223",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d60e9beba1f5ff37670bc66bfa22b5d71a3c8db15e28aa8d5bac2b027c515863",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51c77e4c8f2f81a16cdb6cf444ae7b2fff714e1707540c7d1a2ba2bd3ed78d97",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_lists_mergeby/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_path_join_shim",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_path_join_shim/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_path_join_shim/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b5b1d83592e2666175539dcf95fa0905942797e0ebd39c559a7e3b49cc36665",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_path_join_shim/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be8d8b0ca4ade5971990fe8ae0d00b52a1e6abd717be6ea768ccb9cb09a0807e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49a2c6af77c2417b5f7bae88305a637a42cb34d4134da1937a85be9c20f1cf83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_random_mac/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04d550c69028ec48b216c053bb6d7ddb9e0e57fe93874138b09aff2776789444",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa8082f72f03f85c8abd29e41a2b541d789941822bf4c38fdba5bfde8fec468f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_time/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_unicode_normalize",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_unicode_normalize/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_unicode_normalize/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2117ca3b25f7866115a48450fabc2e9665e7d892926987d136736aac2f6f3b8b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_unicode_normalize/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_unicode_normalize/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f581bbb483aa0e065fe5ee4f7ee7e4321cff25406a2d67591d5eab14ebe7ab0d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_unicode_normalize/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_version_sort",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_version_sort/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_version_sort/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9be8e3bad35f20b96e30dbb6bc9404465da251ab661bb3b2fb520dc22ca78466",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/filter_version_sort/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2908fcc4953a67530ae1d897c66e847d8281e02299b5563f9c751db06ba10036",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/files/serve.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "771fca7f6c37fed2d511a0ed290c2bad65c5c774bfa4093b619026497677d69a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1ff0681355e34b3734c94e7db87df141639b737b74f654b969235b5d50a6579",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/check_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daec12df262fbf5c01327d4ce20cb2c2c92670d222ba6164ea2b205c1e1b6166",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14f68d5120cd310e01b275b1c6faf4bb570c0964eb4e83a1673ad194f51c1f97",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55ee7e3792267ad187d2a59db02da66747324e7e52f254c3cca4cd98b63b4d1a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07cb037e4f5c2d08971d4af2b80d2cd6a03de40e4b41ff02779e84d002131fcd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "476fb83199a049892984acabc904c7d87b5eed4df6bcc322ea583e2bffe185c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1ff0681355e34b3734c94e7db87df141639b737b74f654b969235b5d50a6579",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/check_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "252e68b99abc4b36a511efbfbfa6f472cb3f7cd5381ec4258ea58b217dc0047e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee08b7f4253845802c9c5ee2acd49e481fafc0bb4e217fb5abaf5d814af6ea40",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc647e575f7f2b048f91c7daf2ed73337a756f7f29e03d0418e2888e02a87490",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a69c5428653dddd8ba696ed199a8bf59b2d6464e594d4bab5438a6dfd5c41ed6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/flatpak_remote/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "476fb83199a049892984acabc904c7d87b5eed4df6bcc322ea583e2bffe185c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e450163317fb1b3d5e5d52a63bc6564569f1e2f751d383e09d924285f6aeb210",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/tasks/create_record.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "446bc960f355726f7738046f5265c5a7d6414157a548f6f7f4c5d59ae8220830",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f36958dc7faf1098c95956481576b67f757323f3fef30112011bca0177d4b8d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/tasks/record.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d38959532bb2557c1e79aeaed7389ffe5e6560dfc23ccba231e3c04cfa96dab2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/tasks/remove_record.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3464b871398c65dfad8aa2b042fb249dbc88818f8405154194926e920dd8ef19",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/tasks/update_record.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a72b53a9dfd67149da89b1f827a0a922fa09fb56835a6b87da00010fc1c40ff",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gandi_livedns/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d1fff174d8d3bc6d66b1be0ce2c6a6b5b4ce8c7de4b01332bbacfd91b294554",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "020f2d3391b7333827fb1597b69765ddb2c446f74ae4289940532c9b19f0339f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ae61bc80a88ec0976b1b8024bf13f7367cc5ac58a7cf89ae5ee6b0dfc7c59b4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66231a11b2c1e6be2feabd14a1fe91c2caaaacdbb11c03916bbf95e5062361f6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "174dba49f0ea40577db0969c2da9beeea44bc524309276e74820fc9fffe700f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gem/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "743c8078dc2facc0d71e8e9a12e1ff72f1d09099ba83676899bdbc6b418104ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/files/gitconfig",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecf5a7fd8075b61a600c5ffeedd56209f6b50ba1cacc3e25b52a7d6ffc930058",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4238c1a4f920c5a4f3424a49e4d4678e2f4059d076b5b8f461b1b6553b810f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/get_set_no_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f3b0fb63801800334cb86fe025b610ec588d37037579469d6c4db894d704769",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/get_set_state_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62935f49e449ccf9d89ab9a6179d0aa62001f419e06b4392255e8ef803d3e309",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/get_set_state_present_file.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "741853733f1628c55e98da1060ed6f3d3ac55f8fc301382d88d31c4df1c20029",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1d8282e63ce7b7eee45065c80c1fe9306d0f5e59f2a6275f3f9b59ac7759565",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20829e018f530b1e465ad35a6202718a5c99f5b1ca13014eb5fba1b8fd667f1f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/set_value_with_tilde.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7df2b83250b9ecddd3adc61e334de7d006fc645f23ea4ea6782134e30178744",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d65b28d74bfbcc78ebf01b8a9ddd6c3939c50303153a84d49b5a3c05f320a78",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/setup_no_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "969d0059a8251a9e62614dd70a7a86c49a2a137cdbc247b06f9313bb5e5c70c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/setup_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4075b78d45d7d7c218ba7d0ff3fc2662357c85bae28b16fcde52b76a92aa3e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/unset_check_mode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6623b447fc62f44d4fb1351fb1213b15afff627684d00d85a90b745fd38a283",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/unset_no_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "844b3556bb312cfe5bd4afed1c10b377017c436953721748f050c6f56a5fc0da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/tasks/unset_value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e31339e0f76aecbacc8839d3755b7fd21fa1882a8189d5cc79890e45d65e559c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0245a2ed757c1509b132c83a6f272d27d41ab751f93b5765b152a62e0f30778b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/git_config/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3e3401add1aae4e9d51ea5372aca862480b2b36f02879a8d6a56547710c3598",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcb2e6dc4c94aae4980161550893d24c45bd730ef2942cb3b6e210bea86709e4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de9bf3a4d56ef3d9324400ae5698234ba79e0e22b33f2f05418c478915e92f0b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/github_issue/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32cec593e0676935e5e4a2ddd4ed59aaea210cff8ca9dc31dbf2a254fb87af48",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_branch",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_branch/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_branch/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e513ebe996e0a9e65904552f04016f9c10e1984621653f7608125c8d5155ba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_branch/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_branch/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1843ca52faf29659fab3087948bf090da3eef0f18703c4262b8408616f62479c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_branch/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65bdbe63acb80a27c8bb3eb2a01debf90574458b33e34fd354e91df215aef725",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "707e5be3c1e2ffc63b2115e91ef805e3747886e9841d7db3be83698742d92fc1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a328fb3dcb3f8644f48c1523fd428f35ec7e304c8faed38ad524e1b3f1238c7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_deploy_key/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3447437c5aae0231cf623843756a50ca7e67b07d15fc1a9e1b85c216edff835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b0bfd015e7b0711e70df9c77fa7669ddd494dd343000a6b8e75a048d6837f25",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d82ac7987afb46c581c00f0fd7f42ba2f50af67d48aa38e40c66fdfac81f6c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3447437c5aae0231cf623843756a50ca7e67b07d15fc1a9e1b85c216edff835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb70910aa5e5b241253c9822a089c87e4a829b5178d56e0d092a7d4db39b0a4e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06cd2558b0b0005bc55c3d9b936da66841131bf06d460b5ec0f2f659cc0b6b1e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_members/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "942282f4b5d149507b70ee6e11c8d1a709abbf14d940b0308c2320a6255b8e1d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_group_variable/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "870c33f6682aa2dac5dc17fa9af323a55bcbf158f7901f7c5c4363259ac0c065",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64baa056ba785cb332e0f55c51ba3d3ca89151056ba0daea8edfc6f656db678d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_hook/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3447437c5aae0231cf623843756a50ca7e67b07d15fc1a9e1b85c216edff835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ff624beb09e24803a5268937e22cacb0ea61fddffd22ec030fb9967b6735b87",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06ea42554d18a543fb23e452fb71f3420e10e6953f21cbfc9cfdffb74bcaf36b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3447437c5aae0231cf623843756a50ca7e67b07d15fc1a9e1b85c216edff835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_badge",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_badge/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_badge/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0c0e1e6ad7ff2767e4b9951d2947e83f118b73e6660e1934b819ae13c1268af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_badge/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_badge/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d626402cfa4253a0a7dd84ce002d240a3596e7819aa8793e7572c6d73b43a5b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_badge/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c09bf277226549e964ac3fe5bdd2064fe97a99c0afa4791bf26ff93cd3546eba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_members",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_members/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_members/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69b92ff83548344902bb4e26262fbb8dba70330e75858f8ee4781ba3ecd688f5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_members/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_members/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "598690fe17d1389bfaa9a2c60390f9b23623999426be1a2aeba8483e16ac99ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_members/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b0cbb3deae49a7361e96ecda2ab94eb59d3f3eddce961ef723c5f09f89aecc5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_project_variable/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f2fbf013a2a74f6f947dd601970f7264ac7babe330cb43d1adeb2a42603cf0d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2aefbb3855dfa5e3f0ca00327b819fe231425fb91849f323c3217be1ba3ab16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_runner/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3447437c5aae0231cf623843756a50ca7e67b07d15fc1a9e1b85c216edff835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd771b740bf77c230852717da76e09e8fbb14155633e26e46328cf69f78144e7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "709f2c2ed168cd8c5a6f53e495e996e573cf73a4e0e66df30f187cbf7fadee8e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/tasks/sshkey.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7237e3e7041c0a4819b3a6a4664bfe1dfaf437ab29b9e4112d389f6191d9842f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/gitlab_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3447437c5aae0231cf623843756a50ca7e67b07d15fc1a9e1b85c216edff835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/install.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a15cadbd0bd0dd4ec443774831bcc33a9c681b4465c9913aa2200bb7c4c5ccb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10db8542b1a802d8b4e12372a8dc99f80b3fe35e761d5f378483850cd3967e17",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/run-tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef8a980525cf395fa580811578a1de72a5e0792a5cbba88a1592464246609946",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/tasks/uninstall.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5789b918e327bb28d0c9f1cad73259ef0eaa1e978e3b5009047d4663bea02b67",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hg/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bc729d7cb368174f961782c056520badcf7b5f3af57adf5b810c1d4a51e443b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3b0ace4227b32e8e9a7c3a35da793720787e2617f21621f8836d88f5d356b80",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5891f82ed92622416753a31732b6210796f5eb1e4207b0b21c0ac86c3fffc6fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew_cask",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew_cask/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew_cask/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10113f0c29fb19fceb0052c5aa8baf66ef5329cefb84176bc27e2e53bddc4898",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew_cask/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew_cask/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f36f5c7817d7fb5a7076baaf47b0ec87253f5f03e957a41fcb8e351b3855565a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homebrew_cask/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5891f82ed92622416753a31732b6210796f5eb1e4207b0b21c0ac86c3fffc6fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homectl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homectl/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homectl/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "790cd18464995f8121ae9d47c6e9f353fce7b9644fe9f401deaf6114fe860477",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/homectl/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07576b5ab2023931b1d1a1f7b6b9986c42ec71a6ac26b90397228eb0f7857eb5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1344157c3410b856e1c478baef95ec8af2e05467f22c8e4e65669f47f4093f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_ecs_instance/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58700a8647bf3fc00537d49579ea2f713847a252388b89d06adf6632ff50b188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_evs_disk/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24c238c4a8231205ebca90bf907bf4df8a8ace2681fe42cc45ead613f8c39852",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_network_vpc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86c2dce4dc619835d934c316f2240d0935bc1d7da55f38288fda48f56e8335a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_smn_topic/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9a982aa9c822a0ba763910aa2fe1731a89f8339cbcf5a8bfc33c5f00b0a6e02",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_eip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65f860fae3c16ef79abfbb8460f70335468170cd9f52ca2a3e6fed0e7a5aaaf3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_peering_connect/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0272a44db290efc7456888a159875b96dd99e954607e06d33cf64b99fdc99d26",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_port/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e3b73cb2cefdb05d66ba45a092c9574a146d327cb8de1ec058dc8c5def20128",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_private_ip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71f57edcb74caa2eac0ae70cead340733dc5f88a5274ae4f390bceac3d91a0fe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_route/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ce2405df48984a19381be834c86e34e6200abe04fe07c0526edf4ae3e49d312",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec3cb54222824c9ee1a272f7aa39e9c61eec7de506bd6045e84aa39351357f2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_security_group_rule/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c069821819cbb4f8d811a069d646426c6c2154871ef307b87cb948b0502e3abf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/hwc_vpc_subnet/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_command",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_command/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_command/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bda2e235dc47588a84a3d30c88a3970be95495a6bfe55ed0b0dc522ccf8b5c95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_command/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5db4b857230496ec51dcf741cd931c3b65584d2a31c52d00f2fd2a610c82a8ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_config/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcfdcb841bd6ce9ebdf58bd2454c95ce99b87622be10c8c96d7803067370e7b7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ilo_redfish_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4cedb2c9cd170271e4efe0427c84906202812fcd3f083447c416acec0c04ef30",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fd1d436c1db7ef395af664ddc81b61937686feaab0c0e4e60525463f7141d62",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a9ce61dbd8caa01cfaddc9887bd265613f29e9f12b53ee4bb2ce6533254a862",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/influxdb_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dda0ecc84fa42fbfd6effecad156ffabcb80badee69f435937f083682baab30a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/tests/00-basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20eec1e8f2a9fc6a8f90bf28afc7eae22d402f7d5022b3543a05d495b2f3e83d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/tests/01-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "805190da6cbd6ed4738f32460f6163df5c3ebf4cc5c0c00357f85c2e7fab377e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/tests/02-values.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "432e63145dcbf7f12c4d08e01f29e9e2c5264566eb3e72843dfef53282bca95e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/tests/03-encoding.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e33f03a4e2034c6de86abd670aa3a58c340835c6eda15aedafeadf3ef77650ae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d957ff276ed44fd63d2202fdc938551d4379a951cb8b291a5558a1175c482b05",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ini_file/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/files/interfaces_ff",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6582882d6556e7dd0b114a296091cddafe1f002bbbd9453f621ecb59ee07b57",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/files/interfaces_ff_3841",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92d8d88084bfa7127cb51c297e5e6a964b8e66a278cfa16b721cffbe7bef6229",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fd3fb5db340b283f4dd25e00f82324cac5d61539c008884ea89378652360a7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/interfaces_file/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "699e17c5739ef99f320dee0fac328af3ee544d9abd1e9acd3a06de2170a164de",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c91f7ae6b4d3c05962aaabb8d6cda61df3ca611e7f7413e06223f2ea269eb9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipify_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests/00-basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d79019b570d6d780c7e6d0c13f62777e9549b78b462f8eafb9dffad96e19d637",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests/01-tables.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db7508c2c90655c84d7d3d61e2d13227dd8b13a569621d6d2110f092c5bb504e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0381b7f53dadf98e4d02e6c1b750e4cb9b0b69f68747a0b5b3fb002910b69ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c349224883470255621679e4fe285c67b1a2538b135b57d75581a8f3404eb40",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iptables_state/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11b1d4c69d3ad44e40411e286464b83c1dbd9235d7a14fec01cf53797d7ecd99",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90676412369b958d9efd29d160d05883e5736f26ce2e88511b475416d1d192fe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ipwcli_dns/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f62cc63e6960d29ccc4d2defc877a9af93b346d0e04b0f191b81256da66a2019",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files/test_dir",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files/test_dir/test2.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fe5ca691fac26d85c46909860a2751f389fcdbc09d4a4862ac01c47b233e6ce",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/files/test1.cfg",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fe5ca691fac26d85c46909860a2751f389fcdbc09d4a4862ac01c47b233e6ce",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea80a346c2f550f390c3fc0ec508dffcc2fd226cc3548a63941f4df31e980bbf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7712f4eabf5ece6d817494fab69ecf025816b4a42a8887f0adcd1215324ddc5a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac2a373f1ed8fd80675d045187a215286f18d938b76a330b15fb24933998bf43",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_create/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bad943e25b57511690c80988d8e4695ee980395e52cf9daf6fb93e6b14b2b78d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec5e7720d4db8482d249ff61aeffe809c765b366e3befdb2129c8a3c710edd67",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/iso_customize.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "873c94ab71f376d8dc828a2e233ace7d719b170a5728630aa2de041169f4fd59",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/iso_customize_add_files.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a45ea1f1382dcb42a05b69f57ebad865d9d1b5e97311ce6061eee982f6ba825",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/iso_customize_delete_files.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a849009ba0caa285c3a89e5447c6a86bd760ff54c6093449c0ce09cc8419a27",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/iso_customize_exception.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58d34a05df22d1b52617815e2b2b425a51af42f809bb93a3cb1fa123966366f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/iso_mount.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "469e1bab086047d3bdd8f2cc9b4e18d716b9ee0ad8910cf7b324039def7caa7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3ba14cfce1b971beeb766368cfbd45fce9e01781a31566fe8e98dbfdeacb1c4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/tasks/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9616abbaf2526bcf38a0c1253a2d8d1ab2bc525292bde8733807e962b6ba56c9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_customize/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fa272fd0b848965be407c1029f0091491346121c82ff3f801c5b4a0fd02a067",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/files/test.iso",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c5a1719603516790e3a007e17f28e28fca7eb5ec8d6205692e906d4239fe068",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/files/test.iso.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/7zip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdd28142f75ca82b11d10ad3c26529a12340406816569f912bfa367bf84f3825",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "097ca4d15d29bc84abb0c8debc1859d33bc11cb802a1b59860764622ede4b082",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be208f94453440903ea44feb15799197d78b3bb6918d7a5f15f59d7ab196d04b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4699d12a100092486ebf8daa5b9c3e2e01f0efe7a3ee10039a4525dd4d1079de",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "798b727325573fc234fd003a56d9deca387d2a6a244cc9ad4721d85b64f2b516",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "798b727325573fc234fd003a56d9deca387d2a6a244cc9ad4721d85b64f2b516",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5dce2055ba690428372cddfe5b25f452eae5cc3c6075db72eb809aa3bf0ec79",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "798b727325573fc234fd003a56d9deca387d2a6a244cc9ad4721d85b64f2b516",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cad09dbffddfd25b0876f1da5514efa7b239f5e66a96488256943d70c3ac047",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "880987dde7d36b8c04bc748c10b422971248b5b25ebd3328a6549ad4e411e8a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5dce2055ba690428372cddfe5b25f452eae5cc3c6075db72eb809aa3bf0ec79",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/iso_extract/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eeb6e9152c52bd05f9887bb2587664b23db23ddfb846d6ff4dbbf3ee4fa769a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6480d10ad3c099eeebe7fe597e394f94785cec1be70641d9941ef0735cfdd792",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/files/setupSSLServer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a70f0ef4ee25e120cacc5a56c6f1c79ea9d4f0d9783b3a20e438f4f256d2e0a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/files/testpkcs.p12",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "194ae4f77eeaf175ebefa471eced93551d2b9f0a0018e9bfd0a24cb0acc380da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/files/testpkcs.p12.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdf757041db0797b3c0b0825c274c18cb423fc687460f653edb4e61d70a61c6e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da3b11a31bc81d5ebd3a611a673e5b50bf42b8b1590fc68bdfbca4fb1480b1ba",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/tasks/state_change.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "627a1956eaaf4eb22605dfbe350d300843f18788b402c7dfe3c3d3f82894629f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_cert/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8f75e90e2d1d80d3b686b9b835939455728d1769063924648561d883e130434",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54212ab55da55520977b3cf94ae8436ac105efbc80daa9b25f43ff4ab37bb274",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdf757041db0797b3c0b0825c274c18cb423fc687460f653edb4e61d70a61c6e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0e4646b9e9d40fe956cb6711e6704074a731684517a24607592446d2da663ce",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/tasks/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6d08be91cc4cfdfe5077bcea2ae63ddb000a65f8dcc04738e28d26e43e0cf97",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c53088b01f27b395cebf1b4eba557e8a6ad47e19baba37ddc375f02f10439a7c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/java_keystore/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8f75e90e2d1d80d3b686b9b835939455728d1769063924648561d883e130434",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3aa4ae63838cc0c455607d4c69c0cad8eff0f5016a1aa3f1f785d29672ff4f5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/tasks/jboss.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d4a4ab8b66ed5e4c653ce0463cd79abf56ea7e1ad23bfcdd0d7be6c7d078999",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43de3cb185df43ea3ce6920e1b307f27a87783f05fa8e6cf21f9c9277b288f34",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jboss/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2668c70a77788ae86e13bfd8b1eb53dac6571314156238a34ef57199694d4620",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jira",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jira/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jira/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79146a96f1505ff4503c4c28918d469401717c3dfadb46ab0d64d32de3893808",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jira/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jira/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aeb0137db2b12b9f22fca37512529e9703372534f2d61c74f6897347c2a42534",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/jira/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac1f4cc4124bc3e83ad78c9064a2ad4874a6336439a8d1c76297b689ee50d426",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/tasks/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/tasks/files/kwriteconf_fake",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "797de330afec8eaa29dfbae45bc7fe915d680c8475178e7fbb704f0012fcdd97",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1f5906f89905c34bb7f870834de0f1e46e796f90f3335554c8a4038f5c51d82",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kdeconfig/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/files/blacklist",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed306a3abe4ecbbd23e02253d280cbe0908068b079839097d884abd38152b9e1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8ee51d3dbb085a368c392dcec89ffcaa67640ed29a64bcc8dec44a81e4a9b12",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/kernel_blacklist/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4762e4d5adeeb09c1dbca4b156fc16c19f786f55765669aad0bc9f3b41edac9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3307c958f1f7c2b4b7da89da647192e39ed31f3ee7f1f1461ab69ec057ce84b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eff28a6b76beccc151aac7d4da652d00566c9e6b9240ee60b90060075f73f072",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_authz_authorization_scope/readme.adoc",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a06315c33a9a4581d3fa90bda3255db1662bad238ecdf131e1a211f7a9153a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38a55d499da32e786f6a6d91c487399bc532aa2d6074e4ff8ed5d69b1e16a21d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f5745bfec883f9a58adcf2038d3bc3b65ef723b9cd28de29acdadfecbc8bffa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e79f52fafe919b9e05d3ad339d992eb6106f44c7f0f04f769f62347a124b709",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_client/docker-compose.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "779896e621ae4fe9bad52c9b65fbacf02f1c9c76ce75589ea4f43e52382bf5d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d33c970969d6aa9536b0f0cca8dfe1a7441a14a58b8e9067fdaf8c1ce32ce695",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce6b622aa25cea60b3c05fbf0638c90aed7f2bdfe219a9e196a261cc61474db7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c14472dabe18ef89aa04479a2c24127db1e113f7eb26bdd385f6398935f19bc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientscope_type/docker-compose.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed55e460834214d0856852081fa0cca7bb4bb66cc5f9b63301593e1ffabce542",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d001d732082eb6226515d318575502351a08852546e51e488cc1995681fe3ce8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e871edcf169516f29d6b633b6bfc27125b1584d59e6f57eaee32f178cb8b7018",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05cd82a85909e6f54dfdca89f6b2ad83502eec1eebeffcf8fba0900ae46634cb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "779896e621ae4fe9bad52c9b65fbacf02f1c9c76ce75589ea4f43e52382bf5d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "185fd97e72b311d53034155c58bef5cf4bcdd7ab100cea95896c0a39d50daeed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e871edcf169516f29d6b633b6bfc27125b1584d59e6f57eaee32f178cb8b7018",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0f8a07ac67aebfc762eef3cad04a6296780bdd287969278273d681f5d9dceb3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "779896e621ae4fe9bad52c9b65fbacf02f1c9c76ce75589ea4f43e52382bf5d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "958e9f1297d51a80ffae62a3a8a020151f16ea627ed027ad72176783e41d0eb8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca3467b6c8a1a45eef3701e9b46853743f63713974c796effe508447650c4347",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_group/readme.adoc",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a06315c33a9a4581d3fa90bda3255db1662bad238ecdf131e1a211f7a9153a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_identity_provider",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_identity_provider/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_identity_provider/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df76d0849e85e9397055fabcdd56433132f08b66abab7a5843782c58ce6a06d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_identity_provider/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_identity_provider/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fee747449e0a9a17334a14a0b532a5244609cdf34c0a9f4d16d26e49d70c233d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_identity_provider/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_role",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_role/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_role/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3eb6077a613495dc72c150bc7d266a3fd4545754c34509358fb55d06f4f2b9c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_role/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_role/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ec512c21b18adafebfb9fe6603537768511a4976713c6e34abfa3e0538e482b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_role/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_federation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_federation/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_federation/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8bb00cf66e691163574d81e1728774e00bc4c98c59a6aa97b3668184065e701f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_federation/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_federation/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf09d06f70e7264ada0ba40687d81d396be3f8591da61cdb1a6a259a4cb68992",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_federation/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_rolemapping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_rolemapping/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "354e86093e98cd5780e19b404df1f3537e08aa0179c90e0980323693e18164b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_rolemapping/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_rolemapping/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5989efe1c03d6b2207d441508f19c9c19fee9b017f493c17f087d5bb5fc6d8e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keycloak_user_rolemapping/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "853d92162d1306bed7255c2ce3fdde4e7b76e97c15034a801ace697395b79839",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keyring",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keyring/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keyring/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f483a04081c834b3326fcaeb4d47b5674b794af79b31842c968de92a74b46fea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keyring/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keyring/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3de02aeddec4bb471bbd479697204c3f23116b09534560376a9054fa856e1491",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/keyring/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/files/ansible_test_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "880ca3ccad1e17023def883cde5ae37870f00e7014c78538cd1fe3632f79c107",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_reload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60305e7dc1aa5b9de2c2cd369d6e465552f8b502976552516e0b8d7467e3f228",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_restart.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a429dcd60c84aed1dbf373b60d5768a777dea783368d9d4b9eb941e1e0be1d62",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_runatload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5726ebbcd541ebc18733a3274214360adccdf45340639b4d262e2dd316c947f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_start_stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d1df6cda3b61352d9a3d68a9bffbd1868e7630d80c9df37c5678078c4929ee1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_unknown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fc7c1eb9fdc923c12e2bd5bfeebddadf34ea5227382830529b7445cff57865b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/tests/test_unload.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa6acf7c01cf63c501fc00ae38d79b3721cf0eb1cdce581063e18ec0d8201650",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a5da0c6a22c3e9e9305c45cdb21e7d4303e78f4a21e7fc6ee3a6f9794ea17e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e701caa62181ac23419aaadbf48f7d9260521239f6c5fe95633a7b11b6bb7f80",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/teardown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b530af2411c6afb4282b03e7ee34867cb39cac2a1d59981242662674aa0851b4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1539f5b45780990e738a57b4fab871a075fbbfaa3f8e55d076dffa393c3a9c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/templates/launchd.test.service.plist.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79bf44b35bd25ca49e217c57fcce229ea1acf8b05297af9c6f4dbd24e54568a2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9a6333463f17fef1f6ec3db82857b27ce408193e6708141faa577559a7bcaa2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d5e4b258cdfcc5fe1940e9168d85553f2b2df327ed91a9238e71a50a62ab7e5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/launchd/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7159b79905a050fdec5503643943cd4363b6bf76eb356f19967a10be1b06ffb4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f85824980234f92c6515092d24cf4ed095ef40ae1179b3d5b8e708c48b3b578",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfba8f9dae76e2f7411bd8aedcf06e9fc8119ede9951152c80117625afed9409",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7fe615426b02ea4994ab644d60cd5265b503d101997f502e4e2f58d24e401cd4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ldap_search/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "041e9ba5bf1607ad63a57c38912572d0ac195a19cb4b6ea7d08578e075411cf9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4175607d700d452a760e89d15d27699d44d81072adcedbe0284d4756fc9f842a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/listen_ports_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38c88a07ae5a245859f4ae788d56675ec7bbb1d8be46cb45650562488e4e76a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/tasks/locale_gen.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b94a61252c6d59374173d10a28d4b928e9883a693fc44989c13706f1a0d919f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28aed6856c5df5b7008c1fde1e883d95c4e4de338a2793a49149934c155df67e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/locale_gen/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "deb0b216a5f28d319dc0159ef9a51b469745c4a3fc2de6365c209a5e62198b72",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dc8b5f9231986be7beeb0a9ee73cb3a64a4590966f731e37959b2eeccbdef05",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_cartesian/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7df9be441bf519e667744b3a98e3935a9caeed6f0143a6ead07b027ea5ecf09e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "021d1a934f992ea5f85dab1d3f778f3f6f515e9c596e7bb17ec1d1cc21d38657",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "376a96ba549cd67e5db868b4db1884b79665333c7a955c95e3bad36684b045d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "021d1a934f992ea5f85dab1d3f778f3f6f515e9c596e7bb17ec1d1cc21d38657",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "026622b42fc753d8d459a00a6c3ac8fc40fe5aa0b2945e4cf7308f1485a6aa50",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae891a39f40cf75d5e8a7b43590429702996f10f5854bbfb9076715b54cdb03",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "021d1a934f992ea5f85dab1d3f778f3f6f515e9c596e7bb17ec1d1cc21d38657",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "021d1a934f992ea5f85dab1d3f778f3f6f515e9c596e7bb17ec1d1cc21d38657",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ed9953c542601a4a97777d6ad1fc3f41783c20dd2320add86c2223cd9af588e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/library",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/library/local_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "708c8b792361c641fcbf34bde0b181e21e8e693e5cac5f14e865506ad5f826ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "935444847dff7cf6ef6476a46c6b5699e240f499b003363925d82e983fc3aefc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d469b85bf068ebf9541120274ecf32171aa4c1d9bd0f8c4dd0b4e3904a250a1e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_collection_version/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b7f602aa10a1007137ede8a6ecf08797fd19093345b434f78f30a2f895c7768",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dependent",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dependent/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dependent/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "684bd8b5f751c058206e66331114d0f591bd7adf623a61b11375a43489a07a0d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dependent/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5e85f0e230188e33cebf3b8f98571da22922af25128f832be4732f02fe79dd6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dig",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dig/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dig/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c67ba2bf9bbea5928d69537403fc0f3a53d20dd0faa2cdef8854c7395eea4733",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dig/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dig/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01fb8ad212b7fb68f98868ead0f5257dff2acdbb6520ee272d48782b2ad55775",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_dig/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e53ae1d11ca1e278b274c7720587a0962b38b2967efe20a113cd3f24aff5c16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1bbbab72caba494542522bc30872ccd22f0661832cf8d6678bfeef642f979bb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97efae590b6dc82b8f7c521a8f3b8521109988207a4bd563755f008d2e6834c8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "640dfe7703805142b43bb08c034c8d117add13364a269a0158fd224459844ec3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3a758c35a3956b13431d57c60e786532f35da1490f5dd12a686a9b9fd10b323",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e408772b6eb84d938b05cb1a3a84582d736019cc67ff6a0e4bc32fbdf8fc3055",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30389cc1156dd3c2ca8ff4202aa762dea0bdc4c424d040758c6c9d184a435367",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a70bbb27cd32bae0f3f583ce7862ad5741f1c296bb1598a7549280902fd1d96",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bde22b721f1f405091f6f606c75e64d45bd19c7f332cedae97e487ed6e6f0180",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_flattened/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15f11670744ee718a1e0afe11977fc26ed56858a7b679c5754b811632107454e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0756df07f6ddd0e438bceb927e9103d9cec53187b91d1ec1d4c4b8c6fdc23558",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1862d9aa8e441ad20c6b24f107932ef7dbf91e680c74cbaa288b06f949bba717",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da9d0e24e287b1bfad22c7b437c4963f65b1c838924d769648b4c583b7330fc0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1cedf373477c36de601ffdc2d8335d4536fa1d4927376c149a1c7c2a3fe46109",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_lmdb_kv/test_db.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e590fbf070bd6357c3835cb1406fe72fddfa250c225f6b0c74b958116c841189",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_merge_variables",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_merge_variables/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e53ae1d11ca1e278b274c7720587a0962b38b2967efe20a113cd3f24aff5c16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_merge_variables/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ca79153640cdf426ee07f5a1e5c401d958cb713ed543718b87d747d1031dd24",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_merge_variables/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d4ee51a505dce7011ee5f357d7c295ceaeff969a78d9fa16ce652dcbab76128",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_merge_variables/test_with_env.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88dc8bd39c3cc30494fa74e35dd1577eef5b52f9f90343ecbad6ed756fd29a2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_merge_variables/vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d539c48bb5c8bec46bc3c04f43271e5094dfb895d20fbbd0c776bf2ce6b80f99",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88e7f9c891557c2137ebd40194f6f57aae720037936c3081cc4af1aa223b34c2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/package.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5132c795bf244e2542cbd59cb8e7b5bf8ca46667ccd6d8156dbb8bd30654946a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6383b76703d241c51fcf8025c9b88afc793eb9a99f762a97d39665b3c78f639a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/tasks/tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd7d796a31bf62076cbcded2135d323a29b210af15493d87434cf27df6144d99",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates/input",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b5f9a20d102714792f1cc5d2eb6b87ae0379b2ce632d3ea1bd983b13a2d819f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates/input.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26ffdf83756ec8037079abbed050f09a31b5954b2a3a1cac8373b7cbdd18977e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e9aa1d312a9b85d1589e280b33357d5dce16844adaf33a66236b80b1bd91c95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e9aa1d312a9b85d1589e280b33357d5dce16844adaf33a66236b80b1bd91c95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4530c1a6110e9bb31b6b0a627173bef483b8557b7a366c96375969cf4893013",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e9aa1d312a9b85d1589e280b33357d5dce16844adaf33a66236b80b1bd91c95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c5dbc5c00fda60424ef5ad246c8c3613ec750a5063d044f3d81883be4ed87eb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7b14168d2c496b639cd51abee4db2467bb375a6c885070d64fbfc94e619c7c1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_passwordstore/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ec0d9677a42bf61ac68caf67be75a4d2e68577acebf9c9836054c538690aca9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_pet",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_pet/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15f11670744ee718a1e0afe11977fc26ed56858a7b679c5754b811632107454e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_pet/dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b65bcf0dbda09b4cf9830100e4fae09e6f3dbac17b51b8612c8dfbba20fd80a2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_pet/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da9d0e24e287b1bfad22c7b437c4963f65b1c838924d769648b4c583b7330fc0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_pet/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b07584f9a2316388b3ad3cc46ae6691b3e48b35d8d491510ec50bf99bc45dc2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_string",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_string/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15f11670744ee718a1e0afe11977fc26ed56858a7b679c5754b811632107454e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_string/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f0ff493941869baecb5e482e0006de5805140d56207d4593aeb9445ade8398f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_string/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0022543a8abae8d904dfa6871ed9c81e79dfc2c34fd5c7659015c8a746c87af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_words",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_words/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15f11670744ee718a1e0afe11977fc26ed56858a7b679c5754b811632107454e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_words/dependencies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bd974ed3e258de9e7c7be47aa8cd0a406c09fca560c40460c542be34dcabcb3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_words/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da9d0e24e287b1bfad22c7b437c4963f65b1c838924d769648b4c583b7330fc0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lookup_random_words/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b60c65d1120b651484c74f26829d77c681c88a29756c8c8d6306eaa9dfaac79",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ad0a514f697052105236d6d2c453d275f200360ff3894c62af0f6d8c81fecb8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07a044358f1356bbae010ac78f2b1c7b095fa552553b8fafc9704461fb1f039c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/teardown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bcc43bcc233cc9210754c593a5b466b88167357af43107126b7bf7cadd93855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/test_grow_reduce.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9b961a3b59439ab344fb7557f1d47ac35ab9b6a08a6868b6ccc84c1c62d3124",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/test_indempotency.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cefc560e56a359b4a47fba47837899e88af35df8e5f4bd89501af3bae9f7d3a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/tasks/test_pvresize.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24a67a6028e690611e279795cceb0a13f8cd2e3f4b652a3b63bb335f8f9828f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lvg/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07343d5c5f99d5ceabeb2cbab3c741a14b760b6141bd4fe371725822c0c718cd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lxd_project",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lxd_project/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lxd_project/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d58afeb64471d7e3404160cc08a3143351463743ccd15623c7935efb8431b69",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/lxd_project/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files/smtpserver.crt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f41e3cbe1eb99fb9334eeaa7407be6ff0034640578e091105216620502cd9273",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files/smtpserver.key",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3ac67d4baed753701596a43f03a8359daf79df64bf3105ae9a4448c3da00149",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/files/smtpserver.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fe2b9adc381f7d2cd894be080292398b115b6ab831efa4a92b920eae7f621ef",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f50538e1bc1f378a161b65870cc530b3880663a2968a4c8241bee083e03ca06",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mail/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4762e4d5adeeb09c1dbca4b156fc16c19f786f55765669aad0bc9f3b41edac9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19cb6f818e0e402b838d921f912959636770f2c1be04ce698d1e0f33d136b6be",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mas/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff68893ba0f3f9f10f1300500628712d449685ba51ae8279d05857624f86fd86",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0e2a26981e6bcb13a9cc5d115b6d22efd59ff3f0d7b06a9b02ad6ea25c24348",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_dns_reload/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9e2c54175ccd50661f988319468fe614663e7860d134d57c06cbf2020bfeffb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_memstore_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55a744607c06ad23a1108f449c24071f72f05472d602604f25f6a4e7f1898832",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_server_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07b07cf6a35c2c8cb172890ce9e842eccca6a2dec743e1ad2363ff99e4207460",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b709b16df6f88904983bdd803bd7d71dacffb9404176ac55355a901bc409682",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94b7d00fa52a9cdd88c31be5c012a2d194741e0c28e5e9530af3c115b01966aa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a652272ba9dd0bd71025040abcbb23fc9aa14607fc907b0caa9119d2592e1a8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_domain/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c8a60ac81d0973d78ef4faf2b311844109904b568b7aab3688e954ee1f9ab1f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a023e9ba56610056adcbcb038d65b3943fd368b1a9e4e57762cc1853a17f80e7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/memset_zone_record/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/library",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/library/mdepfail.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ccb5898901617cf6976b660ed6025f61d0ae4c73b01beddf1f6946b88c5643c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/library/msimple.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6ed981e0a4a9cc25a1ece50d6d9aa3894ea9678bd9517ff01a47d1314696a6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/library/msimpleda.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "343bdeea204741c195b802a4d81b9f97a3b2af5a3ce01f4ba4bd8d2add2ada5f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/library/mstate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feaf0076b48a59004d93f7e9d18b13b919d2a35b3e4d8ede711f9f3ff9c7a510",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f812372f1a9f69fae3f011a259651445eabd26e277b6cfa0c5a5e877a820bfcc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks/mdepfail.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75d3a17b73e28b3bf5f10ff3a8c4b3b9258c2e55ef4003f765ae013877ca5a05",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks/msimple.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87a3441a3f91498b5ce5a6ce8fabe721b839ab449a449050344c20f96c9aa8a3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks/msimple_output_conflict.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55bb67066cf155c0ad1a0312f3a48a4defc631b43ca158e98ee292d23819edc7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks/msimpleda.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43ae15b499ac1b3cdba565d4c472fe1585bd119bd715303db5971c27c28096dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/tasks/mstate.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0f6696d0abe873dbbc72902992f900c68910829d87cbc43b6b5e134c1680902",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/module_helper/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78d1cafe3ed05316d7decb2c54a18c2d9f7e519080a80c5451acb3afd7ec2326",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/files/httpd_echo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "142cc9e2c685c246be588822176e324999d3c583b9b83905e3e00de2f2afc38b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "951b2d2640274aaf81a280636514d7c1cc16a8371388f82f71f4e40b67387929",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/check_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9819e95ee31c131ccea21c85d225cc5621e420a935700075ea0be160063619db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e72f68b30f2a0710dd66f97c501f7d51fe9a5bc8af306b29874a0c01eba50530",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1b8319c85553e197b68aba8a59dfe523bba7f2ef386a8d0af23599a694706b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test_errors.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ad918cbbd92e31acacc3582104095562dd5839ea62e87defcc17b7d015705b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test_reload_present.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00c99fdad890550478db4b1afa3820d07f7208e46a96c978225fae105faf284a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/tasks/test_state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4e30ebb5e5f1fe0400b6956b376eb77b936b1543bccf9d0938f3bdf4da875b3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/templates/monitrc.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "134ad72821142bcbcbed98a3e2d7567b14f1e4a8227c578459beebd2c5ff2038",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43466cd6283056b889339788ffaed47d6f148f35541f959c77a61b3986e8b52b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43466cd6283056b889339788ffaed47d6f148f35541f959c77a61b3986e8b52b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/CentOS-6.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "826229196349d11c3446d7ab89a6106dc222bf6591f44bb7d1864a29b24630d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43466cd6283056b889339788ffaed47d6f148f35541f959c77a61b3986e8b52b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43466cd6283056b889339788ffaed47d6f148f35541f959c77a61b3986e8b52b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/vars/defaults.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f6eccb7dbd047a0ec190264b79ceec7459fc56bfc3b5e98508fb74cc6d7bdd0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/monit/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6270ea2c23a9e7bf3ead0602decdcab7bf7405a24251b23b5756974402759b57",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c8056fa72b0594ff48774bfa538fcff2890e94f22271d964ca9b76261aaa5d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4b0b76eda6c83b4b323a818323fc70e08e271214e5a159e2a897897a05c5ca9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/tasks/ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edf2bed2d6aa22fb573c5db0c33b03168a6d9fdfbe76b3a46076bb703e7d897c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mqtt/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf60443d1b1d9071fc78208429ae99584ca83e44a99379659676eb811285b7a1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c5ee86c6899d36929b38034bf30ae2b92cbaf8e98995d2da8345a64bc486c5b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "600161fe11bca5382381ae0bdfdf1a51cb43f00b7023701ddbde8598dcaa508b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/mssql_script/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5e3e3138b8ce802a7350c9686c1f10c236fd77bdf6a1f82350ce02cd203b39b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/files/job.hcl",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "85afa5cbd38ac2c9f6613d04bd473f9f76bfe9e9d9c463e35ab57ff11f263cf6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "027a3fc0208875a1641bf57030f73c5232862df7a69c300c5bdbdddeb1fe6e9e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e347f46077155dcdddc0754d21a437637c5de5cfcfc9c71daec6c4ce01d6814b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/tasks/nomad_job.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f696a0f7249f5fb52322a496eeaf109d546f930f7224de4cefc2a220b189bc8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/nomad/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e99003f7e70ca6802d437ffc7d4b53706b85def986fa33860eff6b679c0896e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35dfb6bdfdb9176e795b5fe61ed2df04282630b6454d601463ec6fabb32e8e94",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd46ccfc9beede94557d534b8afb6c94068ea8524de29d199e46672e002dc1b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/no_bin_links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "232beeba13c45e3a23ced0572426651c1ca6ea08d70bae2abe3740e2516e5fcd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/run.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecea7b93c797cabb39c9ad78fbae784c38c65225db85d76216bc43b8ce991901",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a1d27723cc6bc47a3f9d011ba1d81206c00e25234285161cadbfc51f445e557",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06c4f3d36f38c0ea5c2c9712cca7bb4782365bc1871f9034bd698d1126331b58",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/npm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "759a70689d3b12842b2c835ed719ebe6114aebc6f8b3c1c852859272d9e90cee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab9f60dc7f00c4ed682bc0be306f84a6e06d465309653b3d228ccc39d2245b2c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2127b904b9912c29fa1a8e30ebdeb0c99aab5552ffae6b0b3bd0a334c86b8555",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/install_pyodbc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6d4069013fdf34659f2da69687f3bb112fe7c3b9ded16ff3d5783b6d202ea65",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56ccd675892edef06817b40d8d6be532f7293765f1b52ddbae1f412725336249",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/negative_tests.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebc26aa742f597ad76979f85d144937ad672f73334317eaa9e212d90784f3f7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/tasks/no_pyodbc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cff149b10132867830c04a8c4209430a0dc0e4029b7388a6dc5b7e300ec4af64",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/odbc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46ccdfc640a6941875ebc7366ae736a12058166f9eb0603d1b1851871a4641bd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost/tmp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18dc59bac231e749997e1a5243db69081f63b757fd43c37de017e20d58d010d6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a6def14fadc4f91407fdfff8bf7f40592b5732d57e18f1c12a04d9964bdc7b3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02cb1e273b28df5ba99c3434be90308779ac4c7f4583c905ae133cdda3eb2811",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_host/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d0fdc86dde46235ec0ae6e9ef10406a365d0588c4cb7e2370235e59e54603b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/files/testhost",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/files/testhost/tmp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b303d1a0b4deab46c421512a15476493a895061e9d1f71f49ce03d78484e928",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a6def14fadc4f91407fdfff8bf7f40592b5732d57e18f1c12a04d9964bdc7b3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43e8e34f2465a0077a46d342c24ff0061da74d7795fe1d91e0978cd38296a87e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/one_template/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d0fdc86dde46235ec0ae6e9ef10406a365d0588c4cb7e2370235e59e54603b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bf706da04a49c14a8c6e07827e48d81a00113633a86979e8f5d4abd2300e9b8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/osx_defaults/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5ad036c484e4b95376b2aa53d3f2fd2557e6b2395acff6522f8e7f5f185df83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e35a0a15fded9c8926fec0c8c83e7c2b716d01c41ea9c0c1c81e67474a02b352",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20c593c3a56806ebe60e08cb225b8987ec1ebff763a43624c7a2baba495989a8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/locally_installed_package.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1798920a40f41a51b22500304a3186341db67c93dad97b3aa45448df7c4a300",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d052b4adb40acd4cdbfaf139411b0fb74af245c1fca9126000564ce1ce5d80e2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/package_urls.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e319773c59a3742f6b526a54c71d7fd67e1fa42fa9d7efd58b194616317fcfdc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/reason.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eec370ee32a849e61568907fd14bfbc245dfc026eac906188d6c177404a17d78",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/remove_nosave.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8e4738b88af420276caed70a1c76c5789043be60504ba95912085be1ae68d04",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/tasks/update_cache.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "474dd450bc8dbd72476c4770d94eb7a4032c21fdb4ecfb83bb61e3ced2d88eaa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pacman/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "943ad51b564126a7961302e2acb936a1d0708b0f71d127221a6d3dba548aadb5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b97e5fae4dede2f95f2e75ff4a57bab4342868bf8c5ed7a060ef5b75be7b3762",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a9a2ca9ed351841209fffbc63fe2125e0728cfcbf84783cc0a297908ccc4e60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pagerduty_user/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pam_limits",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pam_limits/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pam_limits/files/test_pam_limits.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf9f216147d72a809c23265e56fa76cc7afaea78e9cb54b1025058018dfc39f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pam_limits/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pam_limits/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "523413fc09c7681351ffb58ed92a1a287e8ec04f6a008dfcf6f44bebfd5a8f82",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pam_limits/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f755a442e3b004ad7d328bb2b389ef3997e303fb60d634da920b48800f653567",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pamd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pamd/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pamd/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf1052ec2b4fffc78bfbb8ccc59eadff705b969e01ec894a297d4e1dd935b621",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pamd/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f755a442e3b004ad7d328bb2b389ef3997e303fb60d634da920b48800f653567",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/parted",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/parted/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/parted/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5628dbc2232e704203eed568ffb4f97b1e35d6970cf4b2afc25421a1f1bd446",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/parted/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/parted/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad56aadd5175cf91a2f20cb6f3bd8ae33b62ae39ffb985849bc4bd2ca8ebb322",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/parted/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ca995f51aaec9e475005e9919a68681731aa20c7fa1a257a2aa9a258b9f6fcc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/files/sleeper.c",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d4f80aea6bb844dc7cbb71429f8efb05d878ef24b75f14e3b1f1803361b8c7f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbfba12288cabf657465e10ac584985224f3830c7d421342f5815b57ba6027c6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/templates/obtainpid.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "845eea7405e1d7deb8b10655a5688c96a2c4abb0c4861cea000f0e01a8222279",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pids/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "935444847dff7cf6ef6476a46c6b5699e240f499b003363925d82e983fc3aefc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb78d7641bd06d9f3a500925c867787c8f63cce145859726b8218e0a0cdc4c31",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0f6bffe899b20d2d6c1b0df7e69da08e7730344bf267c3a80d56c8299af22f5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0947fba5e022d2ca8f47afaed9e3611225264bbbb421ee6347d3a391bd9181a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pipx_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0cf6c820f954829c114e7d0d121752c43fce99ad9709ac40783ac1ca2acb3277",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e1365716e8eab0b1ec9facf0b88b2191426ce275dda0cb2e28f3d0cfa96f80d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/tasks/freebsd.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6e9d3dbb1920dc5a8be607b653e809e0ac967b4289d0ed4fc2cc6b13fb770da",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/tasks/install_single_package.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b71f2e2c454d0875dbd166cac8fb7ed879f364ec26f96f39cc019cb153ab090",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "072b8907971e4cf890cbcb7ca57115a472930875c0cf94409f675f62c2a6cd82",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/tasks/setup-testjail.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf20f67d633202d5b7f0803b805af3f14fc86ef9c3bcadd47f7413d7628556b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/templates/MANIFEST.json.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7723b76e9aa5eec55d6911d8c10c742e99e38820ae0f54781373c458ef7379d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/templates/MANIFEST.json.j2.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a36931f25676c2a6f0efb04da0b8240a2efda4f52b011ca4cca3e18d3064fda",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgng/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd586e26a9f899c57ee09008b55d9e82cb69dcd9459cabec48e834016d29910a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "beb6b1ba4bcb6f1ce6a09a102a7cbbd09b53eec25f44257b2b1721af393c466c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/pkgutil/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0f5a9f70c0e835616ded6729c9212820546cf7ff81b5f8dfa049381424dcb0b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e699ba15bc92b1e2ddaf15c5ec2eeaddf21ca3ccc271610434154750043fef5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/proxmox/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4cfd8ebae778f9325ed038c5853cb9ae6de348dc2457311a20a662b225e954d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f34c719324a04787f504f55b7a72219131364785d8765cc01c1313816dfc233",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/python_requirements_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6e718f2d19d55e79184b414f1b2d8ccc71ea37156ec5284d6afb9d62557357c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/read_csv/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f6b609400eeca7ab6608770f1c0e7da36e210d901e5aec612ffc06742a0d327",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "969c1421cde46fda56fecd5e7f4eef7cde677a7d079ed77a173ce61e38897d6c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffc6614fb6eae256d16eb8cdf6b458dd5919815c5b0f3102760c8e87c8fccb4c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/redis_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9022d7efb778d57f353f7795ac905f85765cd5d6b10ce774e909ace17b79a66",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "548e41109aa2c6c567845570ea2419ba24243fdfd38a90d6abdd6c028a9e39dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/files/test_job.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fba6f21d1a489b6ff476dad4d6b5c651e25443fe4cfdec6be1a40052cbcfa62f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "839f7cf1f96eaf8c71a46c449b848ed4a90bd4bcae970e55ca82c9378326ad59",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7172c83eef1af7407717e6dcb8252e0dc9ebe652e4efa345cc5fdb46077eaf66",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/rundeck/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db4d4d34a62a051efdf2b9bcde0e1cd2845787dbdad30f08698ef60dd28aea36",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eafbdda17602b333791eee836dbd46a63abb6f061f4b7ec06d703b354bc40c93",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/ip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3416ca8a136906ac04e2fa3420c45155f81d2081b245f16afd227dc673d8caf9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7e245db854c1b97e9abfd63e2c2a5bf82902ccac15836efc4fc424594e71e73",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/pagination.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62d53d3e698830c0e76b1a8b42bcb1f3636ba01acb8f377f1a387bce19955517",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/security_group.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70128b62b55334145ec1d4d33d2ce2310c4dafbbed60fa889cf9f2d8b214393a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/tasks/state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9b896076a8fb513905477467344c8d7fa7efc67bfab1fac1a150019319e0a2b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_compute/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfe3528dd2c679e3fe54b77bb60ea2c40496750643358e4beab81472471ba3f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e47239906fe3fb765e201e097b3bba18d384eb838d68f3e32d9796e257e2d8b7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e114726deaf9f927b81738b995af467939215c2d45ed94320dc7d763b9fa2034",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10ccca032371ef23254b89afe85e99156931715adea5030b29be324820f375b5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d514530da56e7ffd32a9e4b641949dc953d3e2d0a6d3a950a414ea2510112090",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6d9cfb9e53f44c87234c6bea0e626ebe534a5b58fbfd68b7a1be17ced5a11ac",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6bd94c9beed07c9ac719fcf87c021357d7ebb5b361cbc460f456b1fcc081b9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c3205d18c02a58d8be46af8287819b10b3b7daa3c4e43717f37ec4128a9cfb2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_namespace_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6e529024a7cc9711a0091dbbf323512ef4dbf8d516d3d11f37ccbe721cd20dd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae2465a002101d0e936eca4abd36955e55255312037470434adad7899404cb97",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84ecbc42e059d56104418b05d9e94f3de088c022d3c3e2249b1ab44ff4299689",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "912a08ddaef6721dee6931f5ddda83f5760621df3c79bbcc0e436ec4d7d97b42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_container_registry_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f51510e8e356f0049d3ea3d764826f8a5c3ba6d486b5600fd00ab921eef86ef2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a1525790dff28b22ea5456259074473bc45dbc91a689e8e323cf1f60e3542aa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_database_backup/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff163fbfa0b10c3fe884e239a381b926b596b7e85606c02bd5c07009296780eb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d72617f42dc73c07dc5e4744b475fbf2b2987c966dc7a8451302a572a8a964f0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93390e10203a5c98acb3d0d76cbe36596824227c87f0de2e59ca35d06fc51494",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9f132d78f531f46308851c5a7087a19c798ff5fae304a8a6bb196cc71bbb7c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa7e26ce08143dab7d41321010684f996b10af6c42928137a178161a69df7be8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07be064265ac83060c76159d0e3875d4f8ea6acaf773d946b75fef31a09f2d4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace_info/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace_info/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6355aab948341ad6c9e1fa5724db5fd43de3f7e138f52f44fe7fd98902806503",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6b037ab7d401b062bc443970a54c646f748aed0ae3bb692da735816b7054421",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_function_namespace_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d84667c51f06829690370d148f035bbd643014deb562759939663f126a9ac18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e657049baf4c4904b741de7b0c597d9aa5921d2874edce0878ea17239e2c638b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_image_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "615c528c8bd4fc7af78372e5289e3a26b679d8dfe858e706018b1bd48456ac7e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72ce4dee499204060f50f4004ee394cb312747e61035d95a61a75c92c983f0e6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14ebc31e2938b9baacd6b105524f7afd36ac68a47726584c135c61c9381885c1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_ip_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ec190eb9a00a132fcb15c0a673cc0e32ee448e6f0fd31d34d69b5e03886f514",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17117d9bc25f4d044aaa71c84433e8d29b96133103c7e6ca6bd2c284e1b9b49f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_lb/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "369c65cf782280880d3ccabc262bb47816a66512e7a691facdd2d2ac7116915b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_organization_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "246d90a6caae3babeb8fa0e78b4bb174712a616b8c26d019154e35883035f198",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cde8758d7abf1ac132d06e5f6c534e1ed422313c55ef45b82e1bad897f13432f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dda5e92c672c34999b52cf34defb55bacf5a07e8c06f1b80ddabd5c499327e30",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3977543393a8bbe0935f0e6cf126d3aea1badb3668e9492659de583dda8c3986",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7298b37ce49ab1a65f8248b589ce7d2213b483cead9013db99c9e5994320163b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_security_group_rule/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49e4c5d7c55fa76eb8ebcc9816223d230a6f1eb038fd02c9ceeacc6d0f0c470d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_server_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08b39422a4294f6dd7beaafb9869db0662efeddf009574b416c4531c6f8541a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_snapshot_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db8ecbacb03549be99b5bd251e9fa10f92a67f0665fc6e8efcf868ae511fee11",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_sshkey/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f8d503330f36a5a3b1225100ad436b756ab80f124ccc79de29c780ccd4fa675",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84483a9289f008ef458607f37ef97c6887c350fc4b55dcf028b4bc8ccc7202d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_user_data/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "246d90a6caae3babeb8fa0e78b4bb174712a616b8c26d019154e35883035f198",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fc4e0d3476e1e11db7b34b105b04edf8a399aeaebd0ded3a20373c9b6f06bd1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a00602b6663fa18a7f993cef164e84cf4881a60d5e8ca2d8b28b3c08d9977e48",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/scaleway_volume_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63d66f8170cc9af70cae5dae45ff0c9225203692deaf7d48047bc2feb6fd84ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc02d9925b974350ce32a31bd42de0232a20819ade498af84b1576d5d3a35d7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/tasks/sefcontext.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27359b2a3636cb3814aabfad4ec6b96d064d523d3fa0a4c1095da649b8e64554",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sefcontext/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8d11de60f89ab63e59bab04a78ddb1ff861bc2e4f901524ac1f06d715a6213b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5401ee848cc5106cf883305bc3a4f227156ebfdaf3d8e4c2cc6360b015dca4d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_client/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "194b69fec1c92fcd0c8edce4102afc841704c71304088a1f933ca840544cb676",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd9bc7279fafc56df80482a7bbb17d6f7758619cd0c00d42948c06da2a3b7136",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/pipe.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a521734df1d8599eb35cb096984cba3a29ab131186cc9eb93480fa8d0613011",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/set.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34028e0b3ceef31acf86d7e51e4c1b1bf7ca1f9e2c92562f5dad46e3c84e0650",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/tcp.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f1d02825a6227a97838f28336eff85afa4d9136e2a41088f480b71c78b690ca",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/transport.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6cb2c1167adb04938565b4c302f206575549a4029e4001b688a63aac46ed1a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/tasks/udp.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e877ee544a8273d0c25b6bd26c700a71fc5ae9095cc98e838ccd486fb027245",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sensu_handler/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "194b69fec1c92fcd0c8edce4102afc841704c71304088a1f933ca840544cb676",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f504144cc5fa43ffbad4400cf380b90acf1c15b1f7c0fe28f0e0b91a5d4725f6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca80e0ff352fb7efa2678e42bb90ed19cf4722ae4d68cb0328854e2f08698f76",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb5772b6970367b52e97729921b317c60d3b9f0bcf40ec65f98046d45e09441e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdde59c70d492ee4688fdea75e399d64b52c986f339a5dd97cca0d7b79d40cad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8bff779ff97355eb301acf3bd2dfd92696026a1a803631fc8ab91ea551b6f38",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a93d5995a80bd101793bd19bc161df65b7e211bec0e3a35ebbcddb81d43d416b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/freebsd.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20583dd3fd185fa280cc0d658b10f9c6a23475f93689fa7c46bd5208715c6f54",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/redhat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7af16538fb9fde83763ff3048189610f7932d27958a8b5d5b6572d910fc3e9c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_cron/vars/suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "429661898d352a6683731203ec3843fa7e84798f220de35334c4e55d6bd35edf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b610b32a41e77aeb9bf6eae595718f36cf70cf2fabdbbe0d3826553d690de2a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0ad8f1433b5bc0a03934c2501da25f96dc7a671160f097ba7cdd5e0463bfaa7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/D-Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6337d969ec71f720c83e6b2b418a01cb343b5299e5dfdc50fed3a3773e0dbbd0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d60915b2a5756ef2091b3e79cfd22ba7a329ca20fe469289bc1032ea35bba156",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a58ccd0f97b0ed64adaa9a2a23c5f050f6571d943b7148094179eada2e7ac06d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/D-Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa301501918169109bf5400ebf89825cb9e2b4afd9c596f8ef77e18bb6132707",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/D-Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93e48815dca85d1a933b2316c8c74908c8843c39a60c4a484ea88c7b82476be3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be802c30a865736abccf63387165e3c5eaa9f0bbf0da190a0308307a73016a56",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb09ffab7e455855046b8e8ef753c031bcd11a625c879517fa98335d23b15af8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36bebeb527a373625ffb9c38e041143202f9fa4c2249cce1cdc3cc1c872a8c2f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b408a09df425424d1c559c474da32032f0441935ad0eb84246c905d5f6845a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f5e876474ebb8b6ed9a88e27e86a57b42ccd56a21b956645bebd469cd784cb0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a90ba9324f9403b45f08e87910a7d93d4a29f76410574006cc7cd30934d1f4b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/Suse-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff089b10c7b16dfff9a30ff507636075f5345f0e4d7a7529c99d20cef47a6e20",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff089b10c7b16dfff9a30ff507636075f5345f0e4d7a7529c99d20cef47a6e20",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_etcd3/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9070a07ceeabd5c7eb8a90add506e94e0e15b8b7823a351675c8f4a8fbfdf08",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "673cf7c93e0a5773c1aed10345ddeb3cb2fdaac193b311970f0a9f1929c1ddae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/handlers/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e77fd48ba47ab2f7dcfe721efbdb6578bd0b67880abb1ac77e1ec898cf264a83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84e40419ccec22816f56f582d04cc36db0305cd480dda02b925c0b044d5f71e5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/tasks/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "505626870e0efe4638645919a5d14483f3d62ddf0599695600703118264394b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddf28b98714ecd7b408366eb8f34d23ff23eec8b2247393b9432eef7c774999e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_flatpak_remote/create-repo.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c00c220b71d851e34e8271255223f723f41e8d38402593a3e1dff3e53bfdc51f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69674af6a96f776066f1ef3ebd7b065baba7cc223e9740a4779889a73745046b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_gnutar/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23200e72437a9481f60350c7c42a4ba930878cbc713b23971b07c421e4328b60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eaecb40d5d0ed9b369725b1370ea15eb0187c2ed458f739a7fda821038569aad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_influxdb/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "434cc9c4f685ccb8ec8923ad948d5311156f344cfaa6b18b558c4f551d12bcbf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb68a78cb264725dfaa8d3647048f08b136626454fb58e349c909e13b19d4be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3367bea7a11f5f0255b026f1af941bd9f119b280b2307f200e357970af972635",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d6ca1d7f8ecbe06e87ed2dad9fe015fed10aa906b70ace7969d5ce2138c5055",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed6856cf47b91a6b4eb9d12d66bbed74c753b64d8d9d17d4b08898b66b98077b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8345b870e0d770f7befef71644bfe288c2476f5be1fb8af1e72ad1d5fead25f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8835b1f83d2b73f10c9bd61a6397629e1e5e85baed5330e57508ff39dcce7f35",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_java_keytool/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8835b1f83d2b73f10c9bd61a6397629e1e5e85baed5330e57508ff39dcce7f35",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/files/mosquitto.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afbfc9ff0a4e2d0b5d86edabc6649cf264781b8c147397cc67da73713864b3fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a9c25e2dbf30667b3572e9184b750bb8577549d3e015c3241afda1b1ce2d8d9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc3f786a11db7ebc0c861c7eab74a80f5f063cd3665b5568e373fd903d806e37",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06ace2a2298da1a9444883ccc22b7e56031a977e7440fe707cfb10af0c789514",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files/initial_config.ldif",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "052e5c4ae036b07264d62bc8323599d8e6cee06bd193e926cb56f26d772bbd2b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files/initial_config.ldif.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f093e7bee7f99098d02209f2654dc483fad2768e15ff6391bc7e73904d51309b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e448d91ca0403de001122c60d5fc4eb5976c77278cecd19b7e6fe1ad5c65451",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99e50d897eb686a38ee0e1010814f0c671d9daab5b1af50a28195187d324735e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openldap/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99e50d897eb686a38ee0e1010814f0c671d9daab5b1af50a28195187d324735e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c67ba2bf9bbea5928d69537403fc0f3a53d20dd0faa2cdef8854c7395eea4733",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e82321d582a18efb90f75ff9f7ae2efa524aee68cd162f8bf41d477b2c8004c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_opennebula/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4b0bd97125829cadb1696a344f2586c2de98f6ebef0051a53524951d7b81a8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb68a78cb264725dfaa8d3647048f08b136626454fb58e349c909e13b19d4be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed28d318d9012475c3318e4b37252cc842c7dc5d0213e6900ee6b569c315508d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68a2756984695cd2432fdb3e8ce0ef829664292d2d925100a7e376aac847686c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a36954474212c41700ae90772c2632a5cf1df57fe578926942b2164d3e5148",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/CentOS-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f192f2e320f37af1544faa7e49f4cdcf0ca7f99d204e0cb2473bcce7d7ea6bf9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Darwin.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa1d75e627985f9efd57397c5ae7c9a009192657a2ae3d8c596174b023d55ae8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b71bfd59e8959ad7539ff4d28e6d65890e1de5ca7bccdbbdc0076ecf7f02bd5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cba8c06fdcd475495b79f7891df2495ba1895cee894904e905daeb2442c98943",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/RedHat-9.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d75b1c5035a1e9c17de5b28a76e8bbc81c9d52e4dd78592464e279583619a95",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed652013c3229cfeb825abe1976a576fba9b931d8cf072545ceb8e562344e54",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8027a6beb88082e9656efd44f776ea977d9f79c631662b7560e377ea6fac14b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef4b0b90491f629bfd6a84cec74208ed90604faad98794d6f811adebcfeed991",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3098ebb93dfb044fbe50fdd61378dff6b037aa9115e070403693510e2421afb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36d3b1453f9868dd678d75b312481a6abfbd8320955146089b3691b2983da43a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65ef2ba571f52939a5f034e228977b13e567c7960f9e14e570b169e39d1108ad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "603e78d3f6119cb55c32f4421aa791d7cf4e4d836682cb9a0e831bd077081740",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89449cdb450739af22ed463c7f675970581f63cb8f44c14bf495f09b16b3038d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8000b3819e84f8f1af575e137e4f478bc16cef5b0b11867f4d348840ea34bff",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/dummy.control.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/files/pg_hba.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d41aa98f14bbfecc1dec3d57e81f6f3a1eb0a073bdd2a6c1a17e3b1e2160e02",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60882f6a10be5873cf18ee0e68aedb3bf29a4402308c4a7a22662631446793fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75a1cd9b1d200df4e80f00510e845d62da5c1be1f0758299db505b01f8f1fb64",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc5c301b5ed551b394ce847534925a9bf9372117a855ac33a464de6dc46418c8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3aee0f9bdea81de1e16f1922f67d67cb3bc4865c951efef143693256f1008549",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db78aab35524a2eafb80b6ef66e54ad5aaa7f4c2b4fa4326b5cb38d16e1e83f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cea6e22c6bad45d013cf45045262f06745abb530decaf761ffb035f31d58177f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c0ab751762775160fe2b4b0bfc2332b7e514b75a1fc4e08296c4113e1a3bc20",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cea6e22c6bad45d013cf45045262f06745abb530decaf761ffb035f31d58177f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b7f72dc91d5dfa72c2d3a47440543c75299cec9378c88a026f3fb4d8ad2702f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fa810d50b8e4e1a976419ea5c4472385d159ea81a6f50bedad8718a4155465a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab2d2e003a976b00b143f3ce27660a1ce64a9716827a1cc6b59765215ecc5de0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "491110b92d6ecab060c6e5f7ef6fe8f672637efd131f274c35157b031725f8d2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc2c949a635a7e52e53f78078733c27b9c691998768a08adbdf65e3c898da608",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "549868a5927808dcfb2cf1fedfbee4f3c3b255cd4e2de2e2d1fe8f1e638d3572",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f850db8cee7180eb569cb8c01f5e628b8510bf4f4933f7ee7842eca099e2953",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38270c78cc20cc3f0296ad9896bdaeeb83c4e3cd96b84dfe56203b554dd6eec7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32b2e64aa3c3d41acf058c869bf91654ed977f0ba9ef21b58ab3c36c539ffa45",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77bfcc2527a5c084e3ccd9e896d45c98701105d8d1c502fb26e8f9d2c6eca272",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "519a033ab3b2ff416a163d8902e63290b6ada7891378df17d097edb406b224a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab9723d51662572996fdf453f8ca8347f61ed81498be1085136505815b3c1c45",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/default-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68839b3f509218170a879e5a8d9545ecc628c02384b4be4dff792c8cc33c8f2b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_postgresql_db/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "85de40d09e7e478bba2da87cc15a471dc4cb40c5843a9a2b50e3987442713657",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1cd19ac233929eab4c1cd7c29267a6c8d84ffa98fe81d068d8c821f94481632",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3db5e9f4e537c4c1a3600452c749f3e3b4621a33ca3415d4dcc267c49a037707",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e4212771f6cf156f031b1fe08a6a6fe6ab2903f3b759379b98afad31bae68f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36ca707fde9d3f55992a823dd4335ad3396ddfaa6388ee8ea86483a5424a7aa1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "305fd28ca0ca59a0a964547bd2cb2758f0299c9c753afccae636679bcf0997f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86b29e790c6ae977a080ccc815b48d521927052ae23196fe4a33e8e5aa820f93",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60c77e9e9feaec01032830fa3e185152cb4aacbbf0b17279e6a9c3efb40b4835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb71bb93b6958a495a1486357d097e5bc9ee2071a16dcc17f27780ad8af5ef77",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54730c80c5fa787eea72dac59c88627036aeae96c48e27cbec02f9c1bd49a2d2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "594627e64e5cb619d5d2a817b6f94c8750c61f21452ebfe3e85427d59996ec06",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "835cc0c0c74d681d94b5bd3aded394e0cf4776cfe760d2780a2e3e396ece610d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb71bb93b6958a495a1486357d097e5bc9ee2071a16dcc17f27780ad8af5ef77",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default-cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54730c80c5fa787eea72dac59c88627036aeae96c48e27cbec02f9c1bd49a2d2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdf7f2ffcd9b74a6fed7806baf4a31e3aa04aada5eecaf71124164ea1f268f7d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "835cc0c0c74d681d94b5bd3aded394e0cf4776cfe760d2780a2e3e396ece610d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a20ef4736752d7a0bebd7d3d4da1b0acb2b42e210b35fad57b6af3493aa5a76",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9402477878b55c4348649e65c5cc0c586307e572ee76c4aba71871e4911463f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21745ef9b0cd80ea670af937a564a78b65052f35491cea2065ecaf850d5480c8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59519ed52da7b404864d8ada2f3c4a9b3773a57acc0c4aa3d1c071e0ad570968",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8560aec9681c6e3f2cab6db359b3f361c935720fb0c82f671ab2cea93ac8fb2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_rundeck/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9222311659385aedd1d9d4ab96160ca0f9f4306648990f6b6372f8e154152bed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c615e4f0c797743cd6d566d5dcacf75ed9b79ff59d499b417545611e7e3bcbe5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d0bf8b9ac9473adf013dfcbebd95090fdc61acba8fea35862ba2776d23aa651",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/D-Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7385a56479432504996e8bc6a09d885fca3221d1c285990173a7fc1f114e7ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbcd1ecc87781db24708d223b8d65728cb711a00df02364526758c306650abc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbcd1ecc87781db24708d223b8d65728cb711a00df02364526758c306650abc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-9.0.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbcd1ecc87781db24708d223b8d65728cb711a00df02364526758c306650abc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7385a56479432504996e8bc6a09d885fca3221d1c285990173a7fc1f114e7ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7385a56479432504996e8bc6a09d885fca3221d1c285990173a7fc1f114e7ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/D-RedHat-9.1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbcd1ecc87781db24708d223b8d65728cb711a00df02364526758c306650abc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/D-Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "464c0b689843bfd36b339f208deb2ae1232856ae0054ead4fc6df39f24a4f857",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7385a56479432504996e8bc6a09d885fca3221d1c285990173a7fc1f114e7ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d98e65fd99e9ac255e63f2515172d52f4b7059e59c210e32da0b27d1d7c76aa4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/tasks/nothing.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbcd1ecc87781db24708d223b8d65728cb711a00df02364526758c306650abc4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_snap/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb09ffab7e455855046b8e8ef753c031bcd11a625c879517fa98335d23b15af8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/ca_certificate.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "968655920cc97bf5de84a0c52046b430110fe977b2cce46b1eff492dcf3f3f1d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/ca_key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd5306c600316fd344a4027361e5a533cfb97461ff928b3550cd0d45b994b4d2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/client_certificate.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e78c5033fa3e6e26e0bac11e1c66295edb40d761ea0d8c71af75cfa20dc54f53",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/client_key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fae1b92fb7fbd097804d7b4839024814632f2485b86263bb902da10fe49bc7ae",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/server_certificate.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec5e95d973d4a14e251248670f3d9a85a6caa257324e0645316eb6877f22304d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/files/server_key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e128fb808d4d6ac61949ca188d70ce0be9edf5775c185e4b8802b98bbb16375",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_tls/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff9214467629f45157cb806193a45a93ca74ac60eea34a1c8243a2791d946d08",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ad533e4dac319983e0a0b1ed7a3120554465aa19468f3d41a16d4155d321336",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/files/wildfly.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c06679c4c2d7bd100fa8c90060d65bb23d2114a8e8ce70c028844a5b18928f66",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1844a0e4177f3d1acddb418c2ebc17f88b8863265ce484e6ad2ac06c7f38cb12",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7837577f323d1bc63030836e3e7f3365b16dbe66e5aeb505ed789b5e90de8387",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "566f3fb7ccf763d83dd2da921a519902beeaff8ca1a628c7b405f5bd83792733",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1e64cb85842bc75b876786dda7935b5fdfe56e75033fe59073bf719a5abad1d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12c6d1b9a0cffb27ceb96c0fb0861c486076356d3fa585d612865d48fa5caee1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c481592019395e3fabb39d0b367d382a19d4dd4bf2073e8800dcfe5a907b2556",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/shutdown/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4762e4d5adeeb09c1dbca4b156fc16c19f786f55765669aad0bc9f3b41edac9b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98df429f4ebdd37e193ab4ca32ccc35daaeca869c236e6f3cddefd3a83588a2c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f701425c47a1521abf353d91f5ed172d40d46c8876ea00e44965e6d1d30cab4c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ada89b221f0cfaaed22454ceed17a73789cc5bf2e8c78f35b88c431055231d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98df429f4ebdd37e193ab4ca32ccc35daaeca869c236e6f3cddefd3a83588a2c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6be78ce26878f378d019fe508b55f48c2930b6d1b97dd223bce0962a2c114805",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97879576cc8965cdea8f137d6bde5dfd56d7ac8f694c2d39c9c3f2ae2feb73dc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/snap_alias/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ada89b221f0cfaaed22454ceed17a73789cc5bf2e8c78f35b88c431055231d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/spectrum_model_attrs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/spectrum_model_attrs/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/spectrum_model_attrs/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94ac503112de3e354155314a14d218a0fae20e3889a2099b0f5eccbe78186159",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/spectrum_model_attrs/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efa136c759c51db61ae085d265a6d6402c567e9bb00875292d45dbb00c1ed892",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/files/fake_id_rsa",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/files/ssh_config_test",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "139f9f41166f27a1092e2294d027341017873d4b5a76a34ae8a7e2657fda2b7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2aa641ed281ecfeb5c73c0a569bf364a1adc6d3640362add3e1148c077ef5add",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/tasks/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6663549e6526c05e0359d1c65d2b336e6f748e208e11a6a89c5948277de413c3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ssh_config/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8053b06325a9c58c6856e1b6f938a87a865ede0f309053a1f7d5fcf4537492ee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sudoers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sudoers/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sudoers/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48117b88b9e738d7fb5847da7e5fab3899feb74aa53f3867d5ca7553cf7ba484",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sudoers/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96646c5ab7118c53b8b722f6abe91a7ed4d3eed9be401faefb399d6a8f427c2e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/files/sendProcessStdin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52c4adc7b3b817ecce239a59a57224a1f90f605909acac2b758a63c128ab9ed3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_Darwin.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66168bba692754526a703f2ab6366df613d8eedec7c7725f26245c3ed8089bad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66168bba692754526a703f2ab6366df613d8eedec7c7725f26245c3ed8089bad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66168bba692754526a703f2ab6366df613d8eedec7c7725f26245c3ed8089bad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66168bba692754526a703f2ab6366df613d8eedec7c7725f26245c3ed8089bad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc584ec73d1e50d6ba09cc6ca5809ac11da45f03f30df9b05cd463e1a73b7188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc584ec73d1e50d6ba09cc6ca5809ac11da45f03f30df9b05cd463e1a73b7188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc584ec73d1e50d6ba09cc6ca5809ac11da45f03f30df9b05cd463e1a73b7188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc584ec73d1e50d6ba09cc6ca5809ac11da45f03f30df9b05cd463e1a73b7188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_Linux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d7bc784d4fd4c7fdb11fc3fdccad230f106fd7d812a6526ac173dcd98e1a2f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/install_pip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66168bba692754526a703f2ab6366df613d8eedec7c7725f26245c3ed8089bad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8767cd4da7f1782c442c52a7365b37efc00db63a4d5c9cf3ff0574deed3b5fd4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/start_supervisord.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a930ef27805f4a73cbce29c923303e4a39e6dfb2a1884575ef288aa8bf437748",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "721fb12beebf433484cb0e61a8caf1d76f443d6f6d5ea52d3eb3e1f085270c8d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb98a75b5c4117753377c00a0f7078c06da56d1c676aec49a3dd87f006e2c4ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/test_start.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c182d55c3754f3a9517f76757744f1fb875515e8bb66dc0b6cd5d01c268ad279",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/test_stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21c08abd747e622e77aec62e8b4e045d8a7b27a4c009722ed60fc3a9204ff50b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b4bcc62b261241bf0fc91b46cb7fc34602b82850b49fb6c65b5b040969405a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc584ec73d1e50d6ba09cc6ca5809ac11da45f03f30df9b05cd463e1a73b7188",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/templates/supervisord.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84fbab462c712e2e75969b786fbb1103b0714e4e3beb4709d882632ebc35b507",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34377b12e2cc4c9aa890453bbd728ec491df702d8062405b47eabea1924121d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/vars/defaults.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8caa40b591ba8b80e66cba4e3780d8ce0977854e9c324bf3abecd560edc210f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/supervisorctl/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27bf9d12997f0efb13b94f9e793e55b09da64c6e31dbf88b209f3f10a7ffa3f7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sysrc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sysrc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sysrc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c0245eb91b4a52830cffc76315ce1152667b0b1e9ab8afbf2ae7ab306d250b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sysrc/tasks/setup-testjail.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18a1a0d3947d4b2b5e808323d0fba753d0e9bd3ecf8903daada38d979a01d8aa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/sysrc/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd586e26a9f899c57ee09008b55d9e82cb69dcd9459cabec48e834016d29910a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/files/complex_variables",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/files/complex_variables/main.tf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de08eea8b5873abf446ef5ba19ff9ad7b56e57bd194b679688ba5e5952f77005",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/files/complex_variables/variables.tf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "082cae243f44cce50a73a5e0641d37d462a95fa2287405fb5a8d641731bf1f26",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/tasks/complex_variables.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e94e62916074bee8ca2a3d39fd0c93b96af2a57dbe923a12dfd1bd0d61731595",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "432b4599aa787306d4da0bb4d1ad0ddb587fa68f5ab403db3de01689d69c3ba7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/tasks/test_provider_upgrade.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74c57b9325529d1b5130390dca5e8afbd4df4a710ba99f9a80e39852847f6638",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/templates/provider_test",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/templates/provider_test/main.tf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0777f5f501b5e14e2c17cc94b380df94e606887a7e25c82211235c441e98f75f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7de1a026a1729e6f71c8468e81d3e406915744673067602f978ed730adfd53d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f103fbc0cc390c050d07f59389ed327b31141f8c1d9481591231aefcc7d153b4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/terraform/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e140221fda2b25d02e1e62758174b1ca195611d3a07b6f6aeac8e50ed57208df",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "021d1a934f992ea5f85dab1d3f778f3f6f515e9c596e7bb17ec1d1cc21d38657",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "376a96ba549cd67e5db868b4db1884b79665333c7a955c95e3bad36684b045d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/library",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/library/local_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "708c8b792361c641fcbf34bde0b181e21e8e693e5cac5f14e865506ad5f826ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "935444847dff7cf6ef6476a46c6b5699e240f499b003363925d82e983fc3aefc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d469b85bf068ebf9541120274ecf32171aa4c1d9bd0f8c4dd0b4e3904a250a1e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/test_a_module/runme.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb9ff9312b0cde4942f978d69e772557e301ebd0f3b25ab8f78bf2638c4d47a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f54963c431f59227cb4725d1a77f2315139be83cd8937b88c103b228248d5736",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6b2ff2abf232879bf610cc4f94de8b093b89afa13d4d7517effac73b18e4d5b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/timezone/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "743c8078dc2facc0d71e8e9a12e1ff72f1d09099ba83676899bdbc6b418104ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48030d0e9fc738d6ba9c53168919f2ce38a4295d17c010a0b053ef530192cad1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/global-state.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f5276c99003b56938a62e89a3dea3b37d45edc200043098a0f3a183e7274fae1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c07e2508aa7c67559253f60e80d0ad6b58a7dbef4c0efb3254e15b967a2bda1b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/tests/interface.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a90082f4c7f9c5f797ba06613181f96c5e00b1fae523cdbce1b07b1a6845593f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec7ff9b1eaf406300e477eaf048907908d2f2686f0b0691bc0e7802cb54d5632",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19d6961012b1bcea4d5f68cca8d44d9de709448bf1476ccf3edaa641c1c1ba5d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/ufw/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "084563fba485ef388e28a3fe0f6b4dd1e16d72f44716e33549508f94ead343c7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32247fe03cb3e37a13deb899ee9c5aba72961a795c388d071c9ab781d1141ffa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/wakeonlan/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97aeafca6d46181e78434a9dfbd80a826ba334abe46e42d18b6304d7aca14e3c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fdc141f74a397624981e05814d18898f707315c28bb4802d8eb687e0fff5100",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bfa19c3024fcba582ae0c934106ab0792de055dbaeb7620fbaf2a3ba6ffbace",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f458b5e7ae15edbff188d1bcb394ea1c98399a5e8a7ee087ad915a2680567611",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6617c098447a9afb434779ae2420e233d2b3bc77089badbfd75a1b69ced4401",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41bb14056be44098dacc11a3a525502215db2a6fb48b24b66d31b82b5290e40d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xattr/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "479eff810bfa5538dc0f81ee00e96d60b24db39494574ae33b0c4da524eff5c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c9e113e24f555edafab1df57c053d0b57f04b9baca3f38e39a32dec985a4974",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/gquota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1cd74752b24a828f62caec1dceb727c3aef086ac3bac44720e513ba81c09def",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01ba15265bbcc038b56dd37c16f32870c0483d3c5faeef7e9467b0a31fb3d4f2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/pquota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a72e46b67db918003bdcbc0a5e2f18747c904a50e14c73748aa1587c752024db",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/tasks/uquota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "545b300424749620c46824971005c5da7b6cc1676d6c3cc0a1738afe7edd17b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xfs_quota/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82463708196884ca388bc4b2508b5a1d07f6364b97dcb8dafcaf3804b5343c3d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b88157804ecb91179f87676a83ba7980af70efe935b17d39c18d05c298f57cf5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36e6ffd3c5397494980ebfe9771ba624b7d920e3ce3d7bb843f364675fbcddb3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-beers.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bedf0325ff7e7d595e5381906c3f47c18bfc4c9f67801674785b08458700032",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11ca6edae518cc531ab425daa9eb93f78f89b9ddd515deabd239e8c7925323d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-elements-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fa0151a690b0594c7629130921e9f383b01db98d42473de70b5824525bf4a88",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-elements-unicode.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-elements.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40733765ef4b1ed5195aef3a9311adffd39ab5a264684aedfe13c59e035b5f37",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-elements.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-from-groupvars.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a5032712559b849626aaa6f23cd268bb38922e34278be659b9bb18e06a18213",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-from-groupvars.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-insertafter.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f12f8469913b495e3138ad3207c4a228eb99c584b016021afff0ebd565033e36",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-insertafter.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-insertbefore.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54632a063875c7558eddb674a785edd2ae3d6360d0988912ae3ee3f50c6f0082",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-insertbefore.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac74b656e68e2eafab02f2f8bb74eeba345e3e8c15982bf8aec3b0da18be43e9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-with-attributes.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61ee8e66fa7b25c8bfd7b37fed789aadc0fcf448a9454f5363d1275cffee36f3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-children-with-attributes.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-element-implicitly.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f51c7ddee9d1cd6e1bd7ab58dfca1bd58d56f1a27bd3bdecc49428a6a58778a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-element-implicitly.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a70cc2c5672f7d12745d33915ae18f4670c7b9f76a99a99149084f39f9cc5c83",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-pretty-print-only.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36e6ffd3c5397494980ebfe9771ba624b7d920e3ce3d7bb843f364675fbcddb3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-pretty-print-only.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-pretty-print.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "824b2a646c1c901e70bccfb7e1ee63721c9e8cee7994133dd166178d53e67065",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-pretty-print.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-attribute.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "638c8e9573a836d7c6b151d357cb4d5a959fbbef60e4297358a8c14aa87d01a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-attribute.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-element.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ce8925fb7ff7ce36c12e9f37abcd9b61bcdc6a37fd4c39b0098d003d3afa940",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-element.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63f1d247d5a0db0cf3c51d318e753a9bae46984cc964ab1ba1c26faa463fcef7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-namespaced-element.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "96d5f3aa563caa66fb127af51f35b41d29c0b210b453beadabd07c7812b5cbb5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-remove-namespaced-element.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "119dcd994362f64aefecb25c3dbee1641637fb01db9dee1302e59cd69b95c52f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-attribute-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc04cf848e913fefcc6325748f27df9d1a28cde08a7445252e6404e17d5c4ff0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-attribute-value.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30ed0c503abddaf122def4f143c783fc865f51e938befb256721ff7f1316d70f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-level.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "046ccd888bd6767af3eeafa06aef413cd3c45644dc7ad70790b64896c8084ff5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-level.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5dcf6091417109d747a24a59316f516336247d2f693e5437bda0e8c52e921e6e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements-unicode.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8650cae4a3a3e4ae69c6b9feef9b7cb9361954f0c40a474210bea368236801a4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-children-elements.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value-empty.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e14d49417a4d6ea9d46d8b544b7708c7f84e4bf4d75e853736e5acbeadefd75",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value-empty.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value-unicode.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae2b92cbff7bba1507470be1480491cbe0e41826fd9bd70dc2cd201cb228a96",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value-unicode.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e86bdbd7a2701e59238c6d5b5531bcfa657057234ea18e9a1228b28add01d8b9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-element-value.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82fca1545d380bdd2bce23d6467f5981fab651485f8ef99a7f3970c02002a711",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-namespaced-element-value.xml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fcbc7ccdb47e5a2a556da77797f40748ea5a481213a59184adf369f5526990c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/results/test-set-namespaced-element-value.xml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e777b6eff1946f9218fdbbed7a94e0572ab20c5824a20a1b2d2f0c829e54e0f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30a11dd84c00b897c4ee386bab4fe923d4717404e94042c860d37dabe71b2888",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de7c6a59092c6b19adfab259df0827fb6dcf8d3067e69d0d7d55efe6a6f5a696",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "633a4e9e90ce5d0b41ecac56bab62fee3e048e2bff987747973fe9bb38b9a266",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-insertafter.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cc9c9204548d75e160465319e64144f5b651f709221c8e98a1bf4b4c2f570b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6820b0d360be9d1bae2fa26c75ca0e40817d13d264b309600223510a776793f7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36deaf43371e7e15f7addb84715d8ef731603527f9175c5b791c56bdfa3dcb6c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb33aafb06dfd587e9fa4d200c1241c94e439dc3f21639ef3292877b51821087",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-element-implicitly.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d01f8db51f345131cd18e0789cab644255aabea42ab36fa58025a74d46323fe3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3113810c293c045272aa8450021fc3c996d31055a58924ddf8026662f8f21060",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-children-elements-xml.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10ef765b7ddd273965f553f1db9847b0c18406cc6333d4d9b8d690edc2255982",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-count-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65ee4c4c5c3cb100476bbaf3fb52662852f7abb375844607cf33f9bdb7ab5159",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-count.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30ab8abc29b11b14d613e7698ad6f65c056231e82eb0aa0b6b8bf50a2f2da0d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "836cc4573a3463d600d4f4aacc24fec631f9dec6a9a189c1a3efe85bf0e86424",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-get-element-content.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d401903989ed7abe4c08a0e42dc50cdd150d3c3bb0099508a686425487960848",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b105fc54d5f808b9d104d2ef48cda65debbf70a5f9013abd712784c3b802eb08",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-pretty-print-only.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "517ea072819ec1223665ed6823cdfc384d4095d5658d3434d4588e0435325b60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-pretty-print.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e499aacda525ca6e945a4e495effa8cd4a2d7e856d71e606bb8b67db7c16a24",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0701d891cfc62607bd0e787f6fe4d126d0e6800edaab8a26a1e6597970562420",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-attribute.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a8c930532403e846013fae049952b6fc75d8667c33dca4960e008efa9084c2b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-element-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e6afde34346117e8f01a2a25bfbd3842959582ec681c96ac44796ce945cb52e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-element.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5f0b5323afc66cce1014b1a3a5f704e033a70add49ded1ba7585f60ad788e55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de8373868a7dd93dd1f08369536f40a9bdb78329ceb8739c1d348fa00508a758",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc2b41cd3b970b4f8ba8ca1415623cf724a0de5ecaca478217fadf352f3d7e7c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "780c9e137dfc74b224fe4165e88e0d70465de0d3bb361c6f31e81ad5bcd528a9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d188f81b44df05e13f110424fa370dfecc5203a0f33ba22ac98374deabaaa00c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e864466ef0372aeeb7b286db036da8dec66f362533667a4fa011f1722ea7d66",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-attribute-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4ea36691b2043b62ff606785926237d04f20a5c095d82f81a136f4169c91ea7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-children-elements-level.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7edba87536c0c83e9671514e60dcda741a30a6344844d5b9a6e16aaa9533b808",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6ae99ae0df4eef2eb76ee3ba0f6fbbc250230fdbdc64b1b4864ffc61311a59",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fdd41eb7564591f44ef64b3be276f180ba58342a069f75e3e87ac6605a45953",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-element-value-empty.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3404a9c91ce6f35cdc0b3b0ec6abd64873893b8fc65a3dd9584c32c212cb1e12",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa7429ab9456eeb9ddf9a06e5c1b175fe1c403e5ccfd14a01ccdf359d18b0a85",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-element-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0415836e328cc85b548227908a31fa9148e3b74830a901aa135caf58d0b0e5b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a08f1adda1c81c6edb808364ac99fcc1774968652bf6ee5cf8b5e5a5619a3976",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c92acc0ea0e3b08fcf0d55af0032290a7f8cdc6706251b93a4b14324f002583",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d613676029eebadb13dbc291ba34f15fca62f8361c9f34daff339c3115b8f26",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/tasks/test-xmlstring.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abe18abc49bf97113a07bb809ec0896b9c8ff825aac99802213b3f7610e0acfb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7127cd471afd66059c83994a29fcbc3938aa74b6665132b13ed59bbb52814ba1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/xml/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ba9a096b1076645a41880e82e18bbf40244a4e85e9fc56eb22f4977e3f31be9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35dfb6bdfdb9176e795b5fe61ed2df04282630b6454d601463ec6fabb32e8e94",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8900c16955af498f875b161841c1bd51f4b9e5062f7b4eedb3e303f49b6c7050",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/tasks/run.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f66005139c3acc10061ef420b5b10f96ee8f41d25e0de5f3d1758d23f425c0d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/templates/package.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b48d31a2bdc4c19515df06e8ae8c03d9fed21c13db904d08333e8926f01dcb00",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yarn/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "751ca5b7f886eb441d4852733720f3e833d71defaaf23cbf9887bf2ba432120d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yum_versionlock",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yum_versionlock/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yum_versionlock/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb77ebd2b7f93915695df5ce46a0ab5e6db7a62d0a573ff2d5ce2b5517477696",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/yum_versionlock/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d28262be36c4d29289551e419dddf685cd454cc41340e0f55402a49561dce8c6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/files/empty.spec",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77f59f1c05484da8dd181c8158c7ac48b5540a9a308c5f3872c52960c6317450",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/files/empty.spec.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2644a69c2074ea9e8de059979439af4f3dad4bac1dff5043d11375c93eedf69a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/tasks/zypper.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5cd461848a23694a52831a582cb2813b5a08e65e45235f15bae109345595e922",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/templates/duplicate.spec.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ddbecfb13351acd7fd67e8c1af845f01bd23dc7f519517d4d105a98ea1cb76f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be16e876dd72b54d14783b6c607788c61bfce521cd0610f1e6dd1428a9e14479",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae8ef885a6340b52c5099c1acecb442e1be091818983cfd515d7b8a306e60d12",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9394027c32adbd258e06e9ec92b050e73b6a16943c73b7ab418827eb3c50bf18",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ec747791eb09ed8c4f8a64ce5391b41d3d1c22199e7839aa48478c333786992",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/tasks/zypper_repository.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a31d67e341231b6d6ee63c9b64477ea4e0248d59bbd48a8049c1a7b7374a9b43",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/zypper_repository/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be16e876dd72b54d14783b6c607788c61bfce521cd0610f1e6dd1428a9e14479",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba68aec2cb5dfd3b340b0f3b0d6a34702195d67039c13f8d47d40b1eb1c75c6f",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/aliases.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d825699416551c4276f44b44c20aeef37c9b357d7711c55cd15ee12dea293907",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/aliases.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/aliases.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd950d2c31d244eaf28d1822dbc63d273faafdb72c2ea55e22fb07a291483ea6",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/botmeta.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e56a83a42ca5002a300003f6ea560a036c684768e839c228af08ce501ac03b89",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/botmeta.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/botmeta.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f207a3b57027803486aa9e05811fa2efe76ba1d234de9face827b86bc8f48397",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/extra-docs.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c7fbc8a07fa803ce062387232eb0d0198a311f5347493f06c19a07aa45a9bf6",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/extra-docs.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/extra-docs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fbd87476e9c35e4c5feb31be4aa1e8fc6aebf0de13058e5a267879f741ec0bf",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c111eb62fa6f1b6c8a1260e9ff06c8b76ef8244428c6289969d79678093618f",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6a3f81c21d393ed12248ccafda772fb5f22f889c0bfe88f905e145fc052b4ea",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.py.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88f745b5d91e1371369c207e3392877af6f3e1de48fbaca63a728d4dcf79e03c",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0a3780ef0497719277e3dec9c3902572d7904db1a83953e0a46be8275729600",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7c385de70e0910e039428db73963993f69f78b50356722dbd34d1b953bfbdeb",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2348ca7c764b67e002e65d01e00c06a20eac1c2a8f9bf7c6bcb17dd6c35d07f9",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2348ca7c764b67e002e65d01e00c06a20eac1c2a8f9bf7c6bcb17dd6c35d07f9",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a459a712e8f6b2a8aed41fc1db2f16b6c6eea47a7d108deab3d98e3a6a7e54c6",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a459a712e8f6b2a8aed41fc1db2f16b6c6eea47a7d108deab3d98e3a6a7e54c6",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.16.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0637ae63c9c7aba292802a108d1f3284def1f6a94171a7d0467c3bacc4725d1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.16.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/builtins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a66bf5868ec79d871566ca33e62612c7467681405a2c8aef8a93a768c3deebb",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bd66497039ee0b8bb9bbd7e1bade6ed20fd641ad262e99dab53b879e2173d83",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9db3b735dd4bde864e6c9d0f18a5a487d336bb4b60fd6b83088d72b4b05a021c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/loader.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7504fd9d17a53a787f6f7b286765a66052bf6b1d980f7e2800cef8fcee83e627",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/path.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ff3257e5355531f1bdf095252c12bbb804350e26ec7e177d077dce07a3e5ced",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/procenv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55c5bd4fd2671088854a080ddd885871db54b7a15841ceb68526b18d4b2ceca9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/vault_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f407783d54397d692f30ee067e008140ee4e125358d49fba61041a02c8803685",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/mock/yaml_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dffd6219684a294fb9dc8eb15b8fa6bdb2970cd12c68a3a694a7ba685e6be272",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98d19bb390115d83d328a0a17fc244ccdafaec90878fb915ac59f44e226c6bb6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "000efeb2d52da72504992e8156ef51f4e39dc4b842a19da167b4e3b23d215a38",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_doas.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d57a3f611dc71cb417e37b9d80756221d4f0744b339d7f9803b2b038a4f4bc8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_dzdo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67911770018400e0a2943bcdbf9de0940b3a59b305402ace35314022bce11df1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_ksu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1bae391162db2d58413b82fb49af1ff20529b6b90c0b8d6430e8d7552a829f3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_pbrun.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b0d137caa9792728ab96cf9448ecfd02b6ed0ebc7cf29c4ed0bedd21e9f7848",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_pfexec.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61bd25536762dc0f16c81970df0d4e6219998cb6776b077baf341e19371dec4b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/become/test_sudosu.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f28de98d498f1718d3297733c59a16b6e4af5dd8aed96cc3083997922937953c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache/test_memcached.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8103b0181b620f1b736abe52c2978b45f1f429cddc74e5cd51bc86e02639fd56",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/cache/test_redis.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9556dea9cb61113c79abd72b3066a3c03a35269dc8a3f9271c181c225afa224f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/callback",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/callback/test_elastic.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "237bdecbf4407f04cfaeb63cdfb03dadcf71b11e3e3291060e3a108286e32939",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/callback/test_loganalytics.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0edd676b99891b7c36b8afba59662a266df303848a66f67cc3c9d73c8db19fbe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/callback/test_opentelemetry.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98b067c34d9f991de19e01cb0b509a53d7ac43857dfd56bac0b03d34b4ca3322",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/callback/test_splunk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "561373bea8138ef0cc8901b4d9ce1f80ff841f3cec112a4449b89ecc49a235fd",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection/test_lxc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8ec27b94b0e0ee8e12ead44e211c3fe591381245349647674afdcc8ab21e19c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/filter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/filter/test_crc32.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2daf095062de17446fc1421326e2f362ba866faa3cc3755e08aca2f8d49936af",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/fixtures/lxd_inventory.atd",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "162213d31385d92e0c3c9eee2f02189646a384b14132e7a3952afc23ffeb33a4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/fixtures/lxd_inventory.atd.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/fixtures/opennebula_inventory.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8c7746f8eec2f71cec741c1a66252cfd70e6ff597224821cb2eb936b432ab95",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/fixtures/opennebula_inventory.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_cobbler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03871abf3d1ca441fcd2080fb3f938a2da8bef56570648c9924c9a7a3488266f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_icinga2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7923583ee86d43e8484a9ea1af5c10b05545d9f6099ae70ef0dabee858897d9c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6d49875a49222175deba629f096b14dd83591f2a9b7b19762382f6d1f4fc101",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_lxd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "888eafab7fd96d148c22688e7a2653da657c31a7dfa94c2354edd1e2c5f876b7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f22548ceb36a3391031e716a39cc86c8886f467029266ed3505d049fa603183c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_proxmox.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec839664b7603f838f6e53564b22316c699e58804612a16760df67e474213bb0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_stackpath_compute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e04efd526b298e72fbe5a87a269f836ac202fde02db136d9482b951061519f67",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_xen_orchestra.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c67aa847aaaef53fe89282aa78e9664d60b3bbd868eaf428e9d961abca20e23",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba565f2dd09d86af6eb549695bf3dc0f53a61cfd1d75658d712aad6b85586ca4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5224dc439dee9ae03a8c024f2e0434464c463dad0caf2d2bace082be69cb54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ae3b2ea292cfe8de32a1aa7fb100a44273d3bd39a6c634c275e07763cd5a6034",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5224dc439dee9ae03a8c024f2e0434464c463dad0caf2d2bace082be69cb54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "322cb9a44f96218468199bcf9e8b1487999d27665cc002547d2147bd706b34c3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5224dc439dee9ae03a8c024f2e0434464c463dad0caf2d2bace082be69cb54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2816c9277e392cf97b5ac8c00016439497ea88f5cef37cfa9eb46c677ea580c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5224dc439dee9ae03a8c024f2e0434464c463dad0caf2d2bace082be69cb54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b90ff0ccccedd1d894caf06d281c5c1432516c0d32e5411ab686c255be13dd71",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5224dc439dee9ae03a8c024f2e0434464c463dad0caf2d2bace082be69cb54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68e94f8e8262af8ed9fda6ee5145af9bb9bddf179c8d451f9e95406ba4bdd924",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e5224dc439dee9ae03a8c024f2e0434464c463dad0caf2d2bace082be69cb54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04839c2a04633cfff4e774ca25a799a2ee02d73770398c7b3ac375d95d784f4b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/onepassword_conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1264ad544cc4af2407e82659bf4e3f74de25083af53189680f9adff6c392bf7d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_bitwarden.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f498f1ca02e67a8079777760280ba0215767362448e41cf9c4c320eccee0329f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_dependent.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf60fa53681c3c8ea0e1739e3925123fe79eba8ea299c297bb305270eda3f5cc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_dsv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86549a7a5f6f1b7aebeddb498873e80e3e9dd8de8a9a365d6c41b3d854fffd86",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_etcd3.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b32a0b3df786e792f83a150a8ba37759291af583607c68d171620d5793558f1e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_lastpass.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb9f41e3ad50a0cf247ed29a0667b62bf569270dae4d71d1d36e4e8b4f31af34",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_manifold.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2b13a4f39f1e61d2b992008f241e0a94027bb19e246d7b1e511c9ebe7aefa72",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_merge_variables.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7d90bc6b420772235e64e370cdb68787476f016c97f1a331a154cc7edf16664",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_onepassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "383ab17c21d213629460fa34515aed38aa618e7982671e49282a3657aaba5410",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_revbitspss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "841697fb600318dd51f8b602f705e76e724eb244f2f1de8fb098c8f4dc2db655",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/lookup/test_tss.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a528d5617668fdb9f0d0d4acfc17e26a9028409f2e343b575e7039abdc308d24",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/cloud",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/cloud/test_backoff.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f53abca722ae7e7fd6c4eeefe76fca0f1e3f114b1372eacddba4578a3c8ed00c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/cloud/test_scaleway.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cad880e7e4db3577e3beecff5806613e380cca78b1247b966aec3213edaaa0ba",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc/test_dict_comparison.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a651668d26dee93b1b68a896e68d83eb82b0fbc92c614b95e00a5aac90f9c374",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/hwc/test_hwc_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c939a7d89cc202fc12e368fa4e82c4202c7671466bad41a6733f42c3b59fd6a6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/keycloak",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8332b593abec0a31ce6396f374b4042413ea33bd02734dab5a45cfe02b92293c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dd23d6bb73d987a5df4c15364947c679ff154af37a3effb57056d70c2a0d8ba",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools/pritunl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4bd168a943eea41f6ba372e3eecdcab84a61ba0a9becb5b5c59003bd3e0b0258",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58e21893fa65459c9869fdbcc9c79299cc01183e3a10cf575cd75a62ff366e58",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61a935bdae44191686b63826996abbf2431834febaa54e4a9e523aec016cdd61",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "728bad77904f8e3d2539810fd0dfcec6bb24621c78406daf4434dd611042da5e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b994736a7e80d02c759c7b19977101c0c04ebc1c8460258f5b96f595b9daf037",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92c62d837dcef25a817ac3a9903d6a430b0deb44848d29ab5ac5bb6eafcda526",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a8de78de7ba45268294a48c99a82a957ecb3da299ac9036264308392b14106b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92ed90d23e1fe3de0f2851f35ab29f410769372e882a2c93758a8569c2b7cbca",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63f32fb97b3cfdfe8f4ffd5b921a8111aeb264d1e1d8147f72011a7ef102dde5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb3c3e11884101f42cf4d91e286fa3200c6c3ea1f214b7dd58abfc22c46d825c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca8179137adf1114a3498cec0fbcd575477c33673109ce91541a4095154343f8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0400cccb451f6ed812c53cc84b968d35799f84939a338e88fc43a1378ec350ac",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfbbab96b504c11a7cd2aeba0e35b3e674f5564c573328f060a44b9b58b17110",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_misc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ab0eef8cb0cc823d93781681cb89dbe649d758e475dc2f64bea821ab4650c16",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ff4fdfc1836c7bac3f0f1db469d1ba5bfdb3d124e464f7309a6237d7ee724a0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1597ba941f58231315eee54df969f863ef179eac2bc506bd6cf5111d8e2431a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ac9a065ebcc225815fb86090bc43736330487639565674685c9c2b6c2e91d75",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_xapi.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df17d30d285c01a4f07cffc083342f6d0b48c624b3e0cb46540c052874f3d572",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4e1592b6f4d49c925b017312ecb05a5aab66ccc6704daa3d2f6df8552873455",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af717e5118174a382596601c960128636baaab39008d7cc43eaf772bf0f028cc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_cmd_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59e3f0ebd7da5145a43fdad5ad47607b980db8c6875091956888e588664d15b3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_csv.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f2903477a4a2484ae521828ee5ff6ef3e83e758099d1b40369b5807e8fa73a8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_database.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f70438169c03e003d31a3fe2e0895eb5b74b63e3153e09d861ee23c004295ece",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_known_hosts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c496ceef78bc45a00d3c0f3054beca18461f3430a816a153e30f4740c3ab31a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_module_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d078f60eecd45ad986d514417ea96ea3dbfe4560bf8be72046b3ad80b9458f7d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_ocapi_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "835e27ea2184bd07df63c50e70643b924986100357eecbd9b3464b454130063b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_onepassword.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2c0347eff6980d51653e4c3cbc202cb4c4763ff8615cbbf3ab3eb1345c19cf6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_opennebula.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ac9fd62ca191e572a92a8aff1c2ee2ac2d1cd8111f7636d58d60c92a9ee9d82",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_saslprep.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54a34c7444326b92fe5e0545105ea6ef9f250415174471a107511e93b1abf9c9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_utm_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ed3af71818b62800ad2adcd2ce85c53bbdf62a8208b5753acfcfb2e3beaf533",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59099684ed5b9a2b4836c4fa8151dd6a36ab0f2e0b8485a1a3974dad44f4e96a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed25c68446849224c5906ed7a79cc442dd629c36b4036592b79c2ed097eebd10",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0ca1de7056030c45807554ff8b46f24dffbde275a736f667a4c995b910ef91a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "812f4a529b868eff6dfbc2662b6bd8f4f3764f7d8366b8b1ac0b5c1216231e62",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6653eeb79ba4c679c224c9c43b4a0bde5075c9795cd5f446d95560c883df1c67",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4dccfc3f80598ea3f2f32f6661b3b5fa6997e6d6a0e9e2f3cc4648505ec7f52",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53abda66ee9a035f805bb82fc7cfae6e0b17f42663158bd6c7af5fa2b90aea88",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "134f64892e64e650bffb29c928ec0ab72e397c122f178417e99cb56fab5c3b2b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acfcc70084877cfb0c89871e02d24ec9711b22085f5f5fbe4ca8a69cf0336dcf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08747ecd380971329c1bfe12df432f00c64dbbcf346f4c14ec799dfba42b2b1f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee8c253d08fbe43f0e89d4c7fbd683c8e6db873ab6842018caffe9a8091def19",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4eaf8c9708b20dab8fc90b8b2b5716167e2bc92c1c1b0638ca82e11323f78199",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5128a54fc416149c84afb10c6b2ea85131466a4e2fc799275bb6866f6b7bfb9d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a136123fffed58a1bdd91a77fe4e777556de80a33bba6671b0b7320f49854b76",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b28c487ed9aa793623a50c353d0aea6a15b56327973e3f0a50dca358589bcfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59099684ed5b9a2b4836c4fa8151dd6a36ab0f2e0b8485a1a3974dad44f4e96a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed25c68446849224c5906ed7a79cc442dd629c36b4036592b79c2ed097eebd10",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0ca1de7056030c45807554ff8b46f24dffbde275a736f667a4c995b910ef91a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "812f4a529b868eff6dfbc2662b6bd8f4f3764f7d8366b8b1ac0b5c1216231e62",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fda07e9a6f93949f6f53ba8b71054114024b9d1d612c4455b1ca5effe630e5e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "529e2e7b36f6ec834edb09878ead526156aa9d5349a5cedc1194796d30c7b7e4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9db4000a5df22bf6923e3c3fae4171698ec097639c4e94297297af729fc0dbe7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e1415f493e0c8f211d0fa5901960437f8b83784a46663de28e336b1b4d040a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22bcc220e7dec90f9bd62ee4116f5c531c85305bd78869e762f54d4ba193319f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07094c32c9b7ad391c0bd692f8025965b306738331bc2bf69988fa7605f129",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee8c253d08fbe43f0e89d4c7fbd683c8e6db873ab6842018caffe9a8091def19",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70b95830220d518dae6662f2e1ca836dd9c8adc1823351048cc53db8c865c33a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5128a54fc416149c84afb10c6b2ea85131466a4e2fc799275bb6866f6b7bfb9d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a136123fffed58a1bdd91a77fe4e777556de80a33bba6671b0b7320f49854b76",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce33823888fb76e34cc4ffdd69776375136a30886bf8dd25c5f6b8315ea0e06e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59099684ed5b9a2b4836c4fa8151dd6a36ab0f2e0b8485a1a3974dad44f4e96a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed25c68446849224c5906ed7a79cc442dd629c36b4036592b79c2ed097eebd10",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a0ca1de7056030c45807554ff8b46f24dffbde275a736f667a4c995b910ef91a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "812f4a529b868eff6dfbc2662b6bd8f4f3764f7d8366b8b1ac0b5c1216231e62",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6564ba760c8328832753ea281bcc8fba04b8cbc9c670a52d4fdce131701184e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8cafee313ee74bba450ddbfdf1ab976a71e97edf9d084f990dfab35fdb47412",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "798069560856e00f2786be4511a3983a9a26b05e779712df8b5412d9359233a9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e1415f493e0c8f211d0fa5901960437f8b83784a46663de28e336b1b4d040a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22bcc220e7dec90f9bd62ee4116f5c531c85305bd78869e762f54d4ba193319f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07094c32c9b7ad391c0bd692f8025965b306738331bc2bf69988fa7605f129",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee8c253d08fbe43f0e89d4c7fbd683c8e6db873ab6842018caffe9a8091def19",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec939b58678650ddb48e42cc8d018b9ed0e215e603c9ed636e0dbc67d4d75616",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a962f83aad6edabe9f2b5a25d0c00f3f903f2a69ca5fe954da851b266d5c04c7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5128a54fc416149c84afb10c6b2ea85131466a4e2fc799275bb6866f6b7bfb9d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a136123fffed58a1bdd91a77fe4e777556de80a33bba6671b0b7320f49854b76",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1df3719c2aa41c23d3935e78e54a26dad2590030784f61bf335ac64f5dd2e2b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7931947554451b1a241d07eacac42d91143414f385e5ed6e99b5c6039d26fb0c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05580b4b870d769e6cf33792346f032b73b408865c51cfa1bd51e230867f8bdf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "695bae4899224d79bab66fe018f61cdb0c945d88dc31baa5045eed88593a7b1d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3eb0f49f59d1779d421ae472897217aa0cbe246a56ba7e611ff435cc8baae39",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e1415f493e0c8f211d0fa5901960437f8b83784a46663de28e336b1b4d040a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22bcc220e7dec90f9bd62ee4116f5c531c85305bd78869e762f54d4ba193319f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07094c32c9b7ad391c0bd692f8025965b306738331bc2bf69988fa7605f129",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb32a11d2175d165ac30d4d96265aa7890de42aad1e4c03fe862db31a9b609f6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecd86f265499aa203e6ee9cf161eef1127d6c166ed6fe8d27772c628d6c8ba46",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "765acfe0c09a64b68f40286196b454e433fa6b0a2a780c1e5c3e2ae8f3c6f28e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07945be2848b249d636ea429313c539ea4c9f921780e1d912b6472561821143c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fad9f187d14f11e0c62b3fec0697bc3829e4474749b58118658ba18c8fcf5c5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d65afd09be4ed2e70dadbbcc3691e8170b1e819256795dfcffb128a41a880d3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9366b1acb6d9d2f5750cf14b97fd9d61fd9a32d7292e3201a82fb3ebbed34647",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "691c3c19b3d8ad7ab347c24c006da07ed165f4f6161216dfb90da0f2ac922768",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f98b88779a53ef7a5c4b2dbcdf2229493bb1b9eff316d9b0fab32e2bf45ca774",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05580b4b870d769e6cf33792346f032b73b408865c51cfa1bd51e230867f8bdf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "695bae4899224d79bab66fe018f61cdb0c945d88dc31baa5045eed88593a7b1d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3eb0f49f59d1779d421ae472897217aa0cbe246a56ba7e611ff435cc8baae39",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e1415f493e0c8f211d0fa5901960437f8b83784a46663de28e336b1b4d040a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22bcc220e7dec90f9bd62ee4116f5c531c85305bd78869e762f54d4ba193319f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07094c32c9b7ad391c0bd692f8025965b306738331bc2bf69988fa7605f129",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee8c253d08fbe43f0e89d4c7fbd683c8e6db873ab6842018caffe9a8091def19",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "765acfe0c09a64b68f40286196b454e433fa6b0a2a780c1e5c3e2ae8f3c6f28e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16edb798abcd4c903a6812211f3b9f3ee149161f86a0036af50ce1df0f7b224a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fad9f187d14f11e0c62b3fec0697bc3829e4474749b58118658ba18c8fcf5c5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4f53dddf4a96187f7318bbc97ed3774b0f66b870a3e1cc0dfc2862832fa516",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a447e55dc6c3d14825e754ff1836a20230b6d99c605647ed9b7208f981dfaf57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3e14c9b11e9a08419fca885ab31abd8e775053b8c2b608037626ecd7ed01595",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f68a4e8b47807998b4faafd014844326622ef227b2fae85afdb982288e59d47b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f19b3b156a10a677a4da66d93d3fe72cd727387de5801f412f593f975a2587",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfe51ee900cfe3dc3229efbc0906a41210a1a2ce90da7721c8b3e753b2cda522",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a26e395e1723d7835d1dc9c170c2b3818b2adc4d9c49651fd6d7641524cb1a15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/interfaces_file-README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2c54932251e815b5c71672d66b4ecab1ac7112bb4a712e8fea3a6fd60aabedf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98f9d66c049cd866da633a5b4e453a756ddfb4e6373aae570cce6e05251b9546",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/FakeAnsibleModule.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92ed90d23e1fe3de0f2851f35ab29f410769372e882a2c93758a8569c2b7cbca",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/FakeXenAPI.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63f32fb97b3cfdfe8f4ffd5b921a8111aeb264d1e1d8147f72011a7ef102dde5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bde9b8ecaadc8c84ebe3f48aa1f18cd1324889a328f17ace67b745446ed38003",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/gitlab.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e03e56f1492f532260e19fba71fba8a6fb459fad1d7ab255ec7bdf64a7fe5fb3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/hpe_test_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1505a381b7326c02b6292ba6171a8e0caf05382e985cef82cd3ae834f94afc6a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/linode_conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "499d22d611efaee874324af84a6dd32a7aba6d52eb241bfecef71e4560f3f436",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/oneview_conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "782114173aa2490a678a47a0ab36e9bc69cf61db35de6e88f359836056d28737",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/oneview_module_loader.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "880dc637677c8f8da7c81bac2ab9544e160c885b352e550353d158295a8e9826",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/rhn_conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "301e21dd2101ae0e070888ec4831a42f7740d4b805e8577233de2f3e3df2ceb9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_alerta_customer.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc01b4596d13df367ba9f6f9d449f5c244d2bda90b2b57acd55d679ea650b556",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_apache2_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad72c3a3187d19176efd9f553e8ddbe5787ab71f7103eb176b8ab6e1b180116f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_apk.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d183a0cfbe779a9f6c15e00a0682ceb42d13f7f058de8dc772cbb60e64d8e9ad",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_archive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00f1b5dd73927f250dd150b299e84e332d30c0e4872d5c3ae56d9f957c1ab11c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_bitbucket_access_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f25299b30465ec63b465cd89f4cd902309ef43ebe3cb2dfc05765a6f0a8a07de",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29fc34442570a9f730fa749b1ef32714e532b4bbf8f767482a5e0bb491277659",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6b1b45b481a7f367d0755fdadf44924ec556b10a4054f045eebe9c762892ffe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d196cb3d85f83c6a11f290bc7cc1fd2a0bbf167b18e18c73a1db00bb61133b2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_campfire.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "366a6acea057887b4540d9e1109a295800fab9702c4c137aff79c309595b9f0f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_circonus_annotation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c9c7f38aa9c3e9091af0cb38730b69491290969248a4766edf571759cb62955",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_cpanm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5eca61cd3da9592d95ddf8330313b450e3cd0cb1d599170a8c5efe7b2f5b470a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_datadog_downtime.py.disabled",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "004ac85e46ae6595468ad9a18d098b9552b0a86ca187397b6086b3680e0d43ae",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_dconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7f096f3dab6ef3321484bd01fbdf8035e0763f162cfe43e970007e4d40730de",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_discord.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b8d2ed6fff1ec9b96a1c4fb9498504675cb4f87fcccbb9f5cf0d33cb2772b69",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_dnsimple.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1199b611a25718a42cb1587f29309a30c1c0b38b34234def4103faacbd67d23a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_dnsimple_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d6920c0b0abe6edf0ecdafa076684903fbaae14bd76c562dffc4a530fcbfac23",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gconftool2.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1d6b256e644b88ca86b4064b809e15af3ced724134f6eab575d95624684f0b0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gconftool2_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4c348e06dda55c92674106ddb34f0e1c99d3c2c5699a49cd3b3af00999a17d2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acf58572377785d32f4c0a155410b32188f64296a388d7e055368d8551a2be30",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_github_repo.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b1ce4d62f476dd33f67aff61e57b057e95dd5eabd011babe9fcb50c8d4d8f29",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_deploy_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "992bea4ee28e83d69924864237756290f7900524469a870efd58f623e7c12974",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "144bf6821e62c2e71ea7c11afbae5efcfddf5b8bb3216906a36bd8eaebd6235e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_hook.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "923dfab2678dec755592e95f82d25ef20234ee9445307093fe1d0be5b8181c5b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_project.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77cd3341093a34c0db41f1fa54efac66ca20d4d232f322433a3549916cd8112b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_protected_branch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d3c29f687e45139a7f615c42635c842a9fab7140750d72434b8f077fb10c504",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_runner.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68655a93a62be30c5447a1cbaf84c90cb0ff91e669e9e02ebe075ae5a003af69",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_gitlab_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e9ccb12db59c04cde76fcfe9410378a728d426796d175556c581f70749f972a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_hana_query.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c51841c4450843ab7f3b2445a0ce42b8fb64fdbbe26657dca5aba166a6b60b9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_homebrew.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e69d96b396482e1418d7159092f4985ed1eb5ecf4370e9ee1efabb0b7e970398",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_homebrew_cask.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e58ce089f9a8bde5429e9dbcae4e54a9482730c9a9334ccea0b38eaf367d18e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_icinga2_feature.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0473164336138c750b0e13401ea9c6cc9d33eba551b02cf087e25506143d467",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ipa_otpconfig.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a669fafd98aad99476f5ff21bf23c85b4473dfd02975b144f6198529abfeda9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ipa_otptoken.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2938ebfd66222b52d1ae68081273901992d634e88039e2853309bf540b0bfc11",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ipa_pwpolicy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df5849a5efdf159aa698b817615cec46fef0dee8085405c100027f15038c501b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_java_keystore.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e65dfdf3ae6cdef22bc5e80b211d0ed8de175d0158e2a6d6c109f96455aa976f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_jenkins_build.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a08bf75c34c58585fb721b8d4214b6e96f8bfd3fa1c391ab06af19c62ad7b6fa",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_jenkins_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "16c8710c9e7864f2df2cd968d6294c3bdcaf94eb9201c8738dd69d55e6c18496",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_authentication.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0eb5111b40cbedaecbb64f658563a20c01cbcb494ff5e37bd8378c7fe637403",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "202e69fe36022534e4d36ef63864c2a43c0be5ccccc7126b20e51b4bf0c9dbbd",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_client_rolemapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef725bd67a94176b71c2dd50c3a72e20cb30e87ed7cddd35b150f913125d2d9c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_clientscope.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4f687fdae301534f7454ceb84d19f2da08dfe291acae8e7dfdfe0c46619437d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_identity_provider.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9769e9feb1fd0de1e61d09235290b37e958942e1db1a17eb75c59bbf345d9462",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_realm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82d5218800e8141d85d89e263244054cd63d3bb37d96b53487e9b60443a11a60",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_realm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "560cfea146f90782333d2ec8a38b025d4e749a71ef181b11a3efb940d46e345a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_role.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7caddfcf79f22c4edd920a9309247a02d583cd3d5b92f97aae30a2ea852d262f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_keycloak_user_federation.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b399e09b344c3de512a5daca087fc519c3af3a26bc039250ba619925e31e8f61",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_linode.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ff4eed418d555964bba7f10e6ab498ac2f3fda632ea3f154b85aad74bcd2f82",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_linode_v4.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "623053225c0bccee072031c279afd1503596af68fc99b71f2def17386cc0ac87",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_lxca_cmms.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3941ddeb86056bb6d9374cab59c09abe4d231b739e25e6c5887ae007c2b9b925",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_lxca_nodes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d2ad2e7ff0a43193ef972530e99f59afb132e45ae93da057b2bef47c606ad68",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_macports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19b33d8714b34307e86daef8ffcc9f7165d72d5bfb55f2665d4a334fee9cc314",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_maven_artifact.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bb8bc06cfa3873f652a041b5363643b7670c4a245ae64612527526d7f3a5939",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_modprobe.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb7183c6b205d2bcfc1b5ea9bc6b263bda61584ed09a112f386af9c47034a8e9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_monit.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7528463c4da674fc24e388af323ee4e1d5875834abb4cea9dd64c0334ee6c806",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_nmcli.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20c5fcfd0e0e12d1fd9ec9a72fb3731b637bb9a75531abb3788c7719c59790a8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_npm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f3cd31f4a5598afb5e772310574ac4ffa10e5d6806dfe531804cb2ac4d4c40a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ocapi_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94950d3748dcc3346daa9627aa02384ab198303cef58d8dfa8fc5da5e094a4a7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ocapi_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d548159f589a57f3300da36d5f92d59b693015878cbb0e60552c2e26f9ba6f8a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_one_vm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02805cc28c3b416191a1a567a80590e3ecfbea5d6284149085c6af0f101a1ead",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_datacenter_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b681f0f6f4594ccb8c73799b9529e833f50f161e154989b82ba4808d05253a2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_enclosure_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0bfe9798731236a3c1cfefea53bd91072b34154b184ba0649a1ccb24fd93f58",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_ethernet_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43d3fe9e48b7c7a7746b30f36c3c2f9dc71246286594ff83a36c098467926418",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_ethernet_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57218303869d1618f0df507443b219e4dba2aa6ec59fbe94c78121258ce29529",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_fc_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8db90e633f4acf9af928c8d0e403d0cf9a64446afbc0e631e06bacfb9e118e1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_fc_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "552d7eb68db54bb5add6488f5a34c324cbb1c2a5a2c9f871fd0867df9493d2c0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_fcoe_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a46c9dc554862d9aeab8b03bdfb1523572b42730a75b4eaad02029afe5b0d66",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_fcoe_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b05db653732f3896a9587f49bfe2e3235acc46e9579e97044f1f6e5f142fd0a9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08e9ca0e985a6c67c12e14ddb7ec69f2958c2393cd4f19acecde9b5b53511bfe",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "636ae4b9c923d8ac5ff149cb42cd23b3cd0289be95b3aedaede5b87eae050aa1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_network_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe27781e4704c35da8f5fcda8e43a05efc2eaaacd4dcdc9c2d42e9b3a4ed44cc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_network_set_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ee125d2a946383162a71ea3ec340aa707b8d182c40d3b0f3ed41713ebefbff6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_san_manager.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "868b447eb8647554693428d392bc32f6823e6310d75cd9ad1278ee830b57f2c8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_oneview_san_manager_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af245295db40bf171a06d15fdfd1b373871dea0113b0124fa999990e5b72f4f1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_opkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40515dbe28269da53c99ed6755eced47af692d04fd704813887cdec759a64e76",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pacman.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "361a2af9786bc238410f555e141d3dc4cd8c8f364befd5c9fefda4c6a6e092c6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pacman_key.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "058ab5f79eeb676ec3d55da9505af8af7fde369bd7bed13fad4f9c47dfe6ef15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pagerduty.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5b9304daa4b3b063e8d38ca0fb203a801b982c41ec9b092c7324230091f243b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pagerduty_alert.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ecb84e28adb10476d71e031112ab241e5d65ba903e0236b01f670a42a2d7f9a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pagerduty_change.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ba550d05d6f86860f1c2736b2333be8a48a9a918c8bd41f4213717050a2e9c7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pamd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d063899ab71e526e93fab7eea5bc4d315eec1022a6f4fd5a9183492f99f93ac",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_parted.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3995abb5abf52e8dcd2d74de978b0cd6a822abfbcf68aa7056f3774a3e94d4d6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pkgin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec1ddfafcc64745b56a64322e1e738e1804ab2818a60f407afa36f4cde5c0cc2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pmem.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "698782732a92e869f5a9318873d0bb1e285bcc2e941b6649b93c6af15bad8dc9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pritunl_org.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a24769d4908f931e89ab8d17c5a3e3d270415b1d96e401271d773939a809271c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pritunl_org_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f55fa3d6e222e8533a41176e6dc62ec25ff4fe5cf6ac2c100541ef85bc8e4b0b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pritunl_user.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b23480c4beb5fbdcd387a7d19aeb9905e641a706d77792631ec322722fb4824",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_pritunl_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ce6de1b74636af026dfd6c138b16ad6fa2017e43a5ab45e3f78321e581165a3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_proxmox_kvm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04e48908fa164cdf8985990322c5de527240f7bff5585d34061e2a45d0796327",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_proxmox_snap.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7ea09c7bf0c5c7e3bea29939ef1a62809f2f71a0cf6c8bd44c387534a8d279b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_proxmox_tasks_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87e2f651fa4bb9a31aae30769b6f457c3239edf4bf0c45b5a42e1c75cf4c0c34",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_puppet.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc1d25520eee8b3f7a076cd16658189dae51eaa84e0140308bf1744ec6d3e590",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_redhat_subscription.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b0cf4b34573499cae93cb33bae60249882486335e2ea255ad9f367929924cdd",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_redis_data.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b268013affa5e0ed5796b3953cc122f96388ed796d72c8d2bbcbeace315d1421",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_redis_data_incr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1557311dfff4d3f4d387933c8ba5e5c1c82e8210181dfdfe19a117e4f3fedd96",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_redis_data_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c2224dbe9b6fbdf473fac1ef0c3b1b0ac56a449fd3ac44bfdeb080cec2d1882e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_redis_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a334e4a805528f22d9c788b25d16f18482bb6cbb11b448590be2981497514b76",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_rhn_channel.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "023e31dac7a2cbac6efd0928f433fc83573b48cb78c150e90c898534f01643ab",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_rhn_register.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fbdf8cb999367cac2bb1cacb7e0e47b886e47aa0172323f8dc79e15425e909b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_rhsm_release.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "644c8f2b28c3b76e6278dcfba81a7ab528950d9621c76d59b09e0b0a459d9cf2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_rpm_ostree_pkg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9034acb1f847865d6bbba4fbe0ec2b7a49a08fd7d59042bdbd79cdef06716a17",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_sap_task_list_execute.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bdc5b3e4764b387eca5d8e2f13a22df0dd5f0af65180c98b59e5d6b5233cd156",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_sapcar_extract.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c91b80fc5550b96998de44ba16994cebfd5f09340928eca3608dc4fd23ddfc3f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_scaleway_compute_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c421141d861007f3a63f158c96bb162a0a0151e3f25168be32b35494a6d3fb4c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_scaleway_private_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb660a5f32ef6d680c4f1677c22457f24cc938acbea9ac3e1e8828a0ba2402d4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_slack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad7fbf9ae278156ff410db7361e0c5ded24ca1c4d5ddd149d885e07717b2f031",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_solaris_zone.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbda3c21edeeda3d09633c30e7fb4a612c08e89bdefe1f72be98be88e7cfe9bd",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ss_3par_cpg.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb8ee4d8037fdbe28516d268b8f68d89967851f397848bdf057bf8a328c10892",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_statsd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "513bbb43dde8b278e10fb45ac16e3e818b91d23d9c46bf35da44d9eeeb168552",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_sysupgrade.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34ed144eb932f4bceadee5622cacbb29f6e6375256fd45b88b0eff5e60e9a629",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_terraform.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12d0d08b9b914c0f6551e420908cd1bd684ea98ae438c9d01217728dbe114e40",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ufw.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "345000b759f7320f48d24376d2f64faf5a3e02876be24b94838c9b7da31b0d24",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_wdc_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8b81990c9949a836a755c0ed8a8aac9e7a5c5a2e28b498f0a26bb00ac90f3de",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_wdc_redfish_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d57ddd0536bf8f3df19812b178141b1b12449e31a564642427893022603b2ca3",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_xcc_redfish_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09ef6f20d50df2d3c367112bf4fdb85ec40220f0952139a70840726750f35212",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_xenserver_guest_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d45a9b748f88f04c266986b58bc74c382a634e21119411e20512e4b44dcd624d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_xenserver_guest_powerstate.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6576429e9fc3a0031752529cb8a09b5cc131dfb0dc1aea9cb48f7cf70bfe79c7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_xfconf.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e50b18406f4bebb6640964a9a5413f69d4ff698efce29abeb3728d59c6acea89",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_xfconf_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "adb44156b0f1f43c30b0620d97e77fe43dc9559b3bffadf2cea82ff995e9017d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95a8b0caa2b77f95bbfc00332b35c0281a5fae447435d72cb75c512bb54667a0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/xenserver_common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "056f3eda58e0f2b9719b6361ca11cba2ba551ddf7824456644d768d3fc6f3965",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/xenserver_conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c1e1266ad454f4c513ea1043828f019e84a914a601a395f7c59193d6881ffcb",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b783ac041b809d1250b2b7c45d8c1979b78e043e2b1ea4df0522bbac747cb06",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "999b6bb4a4234b1f38abb63cbd57d40af2d93bcff2260ab088e09157a055abaf",
+ "format": 1
+ },
+ {
+ "name": "tests/utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/aix.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/alpine.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/fedora.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/freebsd.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/macos.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/osx.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/rhel.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/ubuntu.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/generic.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2dae415e9b5a6135b2f6b82a647f65f54d44fcae90363e58cb1b01796a327d01",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/linux-community.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a94ce1b7e6ceae1382a8e786545fc0378f0c8eb0c09204da18b1d8e319446850",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/linux.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73ac21c284e9957f631402b0465e409033a2f68086764b278e3289b0f03ecf48",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/remote.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab698807cf7f8bf7aea1388fd23f523d5e96846d9d81698feafd83bafdd6da39",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/sanity.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "41e394cc80c02fb2d40e00ccba0654e667bdb323a1f5cb5658fe61d4a83d2bbe",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/shippable.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54ce4f452428c1c57cbc0e8bfdd03a958370f3c8df25f248cf5902cd1c5d0d34",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/units.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d8a32b28e0d8ce42083e3e8db4d264529412a635abce0c989ee837000a80238",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/constraints.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d07bc7e664261cae0a8ea453132fb629b5e491ad923e4f18a4adefbc63f49cd",
+ "format": 1
+ },
+ {
+ "name": "tests/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58cfc1cc2436abdda80f8c752f3157c649bd38c86379f93c653dc0f7f1deb766",
+ "format": 1
+ },
+ {
+ "name": "tests/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "498d46cf08b5abb09880088fa8fd142315f7f775e94cdc7d41364ed20fc2cd65",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6aef8926f289080d8a3ab9cbf9dddcd411a1e2d2e4ccd2c138aa056805ebfb77",
+ "format": 1
+ },
+ {
+ "name": ".pre-commit-config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0acbe42d25a24bf2ca023947755c7e4a9c695c2e12a6ae909698d86d6f7d2e35",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e1236c2e69e23203c68ca9b688de63ac0079e88280d5c50e5647c2ce418ad83",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b00274eb3feefd31dfdefd9f17afe6e2d1e3627d3e2ff6614bc646189af7b40b",
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5b28f24a1fee9d7f4028ad4c9bbcd239390e742fce58d7a72920ea2e706c13a",
+ "format": 1
+ },
+ {
+ "name": "commit-rights.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57c66bff089b6e2f30c3e2f38febd24fc641607d7836d4068ad2d34c8b7f5643",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/community/general/LICENSES/BSD-2-Clause.txt b/ansible_collections/community/general/LICENSES/BSD-2-Clause.txt
new file mode 100644
index 000000000..6810e04e3
--- /dev/null
+++ b/ansible_collections/community/general/LICENSES/BSD-2-Clause.txt
@@ -0,0 +1,8 @@
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/ansible_collections/community/general/LICENSES/GPL-3.0-or-later.txt b/ansible_collections/community/general/LICENSES/GPL-3.0-or-later.txt
new file mode 100644
index 000000000..10926e87f
--- /dev/null
+++ b/ansible_collections/community/general/LICENSES/GPL-3.0-or-later.txt
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
+
diff --git a/ansible_collections/community/general/LICENSES/MIT.txt b/ansible_collections/community/general/LICENSES/MIT.txt
new file mode 100644
index 000000000..2071b23b0
--- /dev/null
+++ b/ansible_collections/community/general/LICENSES/MIT.txt
@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) <year> <copyright holders>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/ansible_collections/community/general/LICENSES/PSF-2.0.txt b/ansible_collections/community/general/LICENSES/PSF-2.0.txt
new file mode 100644
index 000000000..35acd7fb5
--- /dev/null
+++ b/ansible_collections/community/general/LICENSES/PSF-2.0.txt
@@ -0,0 +1,48 @@
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/ansible_collections/community/general/MANIFEST.json b/ansible_collections/community/general/MANIFEST.json
new file mode 100644
index 000000000..fa90daa61
--- /dev/null
+++ b/ansible_collections/community/general/MANIFEST.json
@@ -0,0 +1,30 @@
+{
+ "collection_info": {
+ "namespace": "community",
+ "name": "general",
+ "version": "6.6.2",
+ "authors": [
+ "Ansible (https://github.com/ansible)"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "community"
+ ],
+ "description": null,
+ "license": [],
+ "license_file": "COPYING",
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/community.general",
+ "documentation": "https://docs.ansible.com/ansible/latest/collections/community/general/",
+ "homepage": "https://github.com/ansible-collections/community.general",
+ "issues": "https://github.com/ansible-collections/community.general/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4af3ab11a47cb58399cf5036c03082a247092a36ad50e2e415f17890dad1ae0",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/community/general/README.md b/ansible_collections/community/general/README.md
new file mode 100644
index 000000000..dc5db2944
--- /dev/null
+++ b/ansible_collections/community/general/README.md
@@ -0,0 +1,143 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+# Community General Collection
+
+[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-6)](https://dev.azure.com/ansible/community.general/_build?definitionId=31)
+[![EOL CI](https://github.com/ansible-collections/community.general/workflows/EOL%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.general/actions)
+[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general)
+
+This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections.
+
+You can find [documentation for this collection on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
+
+Please note that this collection does **not** support Windows targets. Only connection plugins included in this collection might support Windows targets, and will explicitly mention that in their documentation if they do so.
+
+## Code of Conduct
+
+We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) in all our interactions within this project.
+
+If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint.
+
+## Tested with Ansible
+
+Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases.
+
+Parts of this collection will not work with ansible-core 2.11 on Python 3.12+.
+
+## External requirements
+
+Some modules and plugins require external libraries. Please check the requirements for each plugin or module you use in the documentation to find out which requirements are needed.
+
+## Included content
+
+Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/).
+
+## Using this collection
+
+This collection is shipped with the Ansible package. So if you have it installed, no more action is required.
+
+If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool:
+
+ ansible-galaxy collection install community.general
+
+You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
+
+```yaml
+collections:
+- name: community.general
+```
+
+Note that if you install the collection manually, it will not be upgraded automatically when you upgrade the Ansible package. To upgrade the collection to the latest available version, run the following command:
+
+```bash
+ansible-galaxy collection install community.general --upgrade
+```
+
+You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general):
+
+```bash
+ansible-galaxy collection install community.general:==X.Y.Z
+```
+
+See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
+
+## Contributing to this collection
+
+The content of this collection is made by good people just like you, a community of individuals collaborating on making the world better through developing automation software.
+
+We are actively accepting new contributors.
+
+All types of contributions are very welcome.
+
+You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-6/CONTRIBUTING.md)!
+
+The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-6/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals.
+
+You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
+
+Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-6/CONTRIBUTING.md).
+
+### Running tests
+
+See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#testing-collections).
+
+## Collection maintenance
+
+To learn how to maintain / become a maintainer of this collection, refer to:
+
+* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-6/commit-rights.md).
+* [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst).
+
+It is necessary for maintainers of this collection to be subscribed to:
+
+* The collection itself (the `Watch` button → `All Activity` in the upper right corner of the repository's homepage).
+* The "Changes Impacting Collection Contributors and Maintainers" [issue](https://github.com/ansible-collections/overview/issues/45).
+
+They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn).
+
+## Communication
+
+We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed.
+
+Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat).
+
+We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us.
+
+For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community).
+
+For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html).
+
+## Publishing New Version
+
+See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection.
+
+## Release notes
+
+See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-6/CHANGELOG.rst).
+
+## Roadmap
+
+In general, we plan to release a major version every six months, and minor versions every two months. Major versions can contain breaking changes, while minor versions only contain new features and bugfixes.
+
+See [this issue](https://github.com/ansible-collections/community.general/issues/582) for information on releasing, versioning, and deprecation.
+
+## More information
+
+- [Ansible Collection overview](https://github.com/ansible-collections/overview)
+- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
+- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
+- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
+
+## Licensing
+
+This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
+
+See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-6/COPYING) for the full text.
+
+Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-6/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-6/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-6/LICENSES/PSF-2.0.txt).
+
+All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/).
diff --git a/ansible_collections/community/general/changelogs/.gitignore b/ansible_collections/community/general/changelogs/.gitignore
new file mode 100644
index 000000000..3d7ad8262
--- /dev/null
+++ b/ansible_collections/community/general/changelogs/.gitignore
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+/.plugin-cache.yaml
diff --git a/ansible_collections/community/general/changelogs/changelog.yaml b/ansible_collections/community/general/changelogs/changelog.yaml
new file mode 100644
index 000000000..3b6438f62
--- /dev/null
+++ b/ansible_collections/community/general/changelogs/changelog.yaml
@@ -0,0 +1,1426 @@
+ancestor: 5.0.0
+releases:
+ 6.0.0:
+ changes:
+ breaking_changes:
+ - scaleway_container_registry_info - no longer replace ``secret_environment_variables``
+ in the output by ``SENSITIVE_VALUE`` (https://github.com/ansible-collections/community.general/pull/5497).
+ bugfixes:
+ - iso_create - the module somtimes failed to add folders for Joliet and UDF
+ formats (https://github.com/ansible-collections/community.general/issues/5275).
+ - ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error
+ was occuring when the ldap attribute value contained special characters such
+ as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434,
+ https://github.com/ansible-collections/community.general/pull/5435).
+ - snap - allow values in the ``options`` parameter to contain whitespaces (https://github.com/ansible-collections/community.general/pull/5475).
+ minor_changes:
+ - ansible_galaxy_install - refactored module to use ``CmdRunner`` to execute
+ ``ansible-galaxy`` (https://github.com/ansible-collections/community.general/pull/5477).
+ - cpanm - refactored module to use ``CmdRunner`` to execute ``cpanm`` (https://github.com/ansible-collections/community.general/pull/5485).
+ - hponcfg - refactored module to use ``CmdRunner`` to execute ``hponcfg`` (https://github.com/ansible-collections/community.general/pull/5483).
+ - ldap_attrs - allow for DNs to have ``{x}`` prefix on first RDN (https://github.com/ansible-collections/community.general/issues/977,
+ https://github.com/ansible-collections/community.general/pull/5450).
+ - mksysb - refactored module to use ``CmdRunner`` to execute ``mksysb`` (https://github.com/ansible-collections/community.general/pull/5484).
+ - onepassword - support version 2 of the OnePassword CLI (https://github.com/ansible-collections/community.general/pull/4728)
+ release_summary: New major release of community.general with lots of bugfixes,
+ new features, some removed deprecated features, and some other breaking changes.
+ Please check the coresponding sections of the changelog for more details.
+ fragments:
+ - 4728-onepassword-v2.yml
+ - 5435-escape-ldap-param.yml
+ - 5450-allow-for-xordered-dns.yaml
+ - 5468-iso-create-not-add-folders.yml
+ - 5475-snap-option-value-whitespace.yml
+ - 5477-ansible-galaxy-install-cmd-runner.yml
+ - 5483-hponcfg-cmd-runner.yml
+ - 5484-mksysb-cmd-runner.yml
+ - 5485-cpanm-cmd-runner.yml
+ - 5497-scaleway-filtering.yml
+ - 6.0.0.yml
+ modules:
+ - description: Scaleway Container management
+ name: scaleway_container
+ namespace: ''
+ - description: Retrieve information on Scaleway Container
+ name: scaleway_container_info
+ namespace: ''
+ - description: Scaleway Container namespace management
+ name: scaleway_container_namespace
+ namespace: ''
+ - description: Retrieve information on Scaleway Container namespace
+ name: scaleway_container_namespace_info
+ namespace: ''
+ - description: Scaleway Function management
+ name: scaleway_function
+ namespace: ''
+ - description: Retrieve information on Scaleway Function
+ name: scaleway_function_info
+ namespace: ''
+ release_date: '2022-11-07'
+ 6.0.0-a1:
+ changes:
+ breaking_changes:
+ - newrelic_deployment - ``revision`` is required for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
+ bugfixes:
+ - Include ``PSF-license.txt`` file for ``plugins/module_utils/_mount.py``.
+ - Include ``simplified_bsd.txt`` license file for various module utils, the
+ ``lxca_common`` docs fragment, and the ``utm_utils`` unit tests.
+ - alternatives - do not set the priority if the priority was not set by the
+ user (https://github.com/ansible-collections/community.general/pull/4810).
+ - alternatives - only pass subcommands when they are specified as module arguments
+ (https://github.com/ansible-collections/community.general/issues/4803, https://github.com/ansible-collections/community.general/issues/4804,
+ https://github.com/ansible-collections/community.general/pull/4836).
+ - alternatives - when ``subcommands`` is specified, ``link`` must be given for
+ every subcommand. This was already mentioned in the documentation, but not
+ enforced by the code (https://github.com/ansible-collections/community.general/pull/4836).
+ - apache2_mod_proxy - avoid crash when reporting inability to parse balancer_member_page
+ HTML caused by using an undefined variable in the error message (https://github.com/ansible-collections/community.general/pull/5111).
+ - archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz``
+ (https://github.com/ansible-collections/community.general/pull/5393).
+ - cmd_runner module utils - fix bug caused by using the ``command`` variable
+ instead of ``self.command`` when looking for binary path (https://github.com/ansible-collections/community.general/pull/4903).
+ - consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680).
+ - credstash lookup plugin - pass plugin options to credstash for all terms,
+ not just for the first (https://github.com/ansible-collections/community.general/pull/5440).
+ - dig lookup plugin - add option to return empty result without empty strings,
+ and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5439,
+ https://github.com/ansible-collections/community.general/issues/5428).
+ - dig lookup plugin - fix evaluation of falsy values for boolean parameters
+ ``fail_on_error`` and ``retry_servfail`` (https://github.com/ansible-collections/community.general/pull/5129).
+ - dnsimple_info - correctly report missing library as ``requests`` and not ``another_library``
+ (https://github.com/ansible-collections/community.general/pull/5111).
+ - dnstxt lookup plugin - add option to return empty result without empty strings,
+ and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5457,
+ https://github.com/ansible-collections/community.general/issues/5428).
+ - dsv lookup plugin - do not ignore the ``tld`` parameter (https://github.com/ansible-collections/community.general/pull/4911).
+ - filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700).
+ - filesystem - improve error messages when output cannot be parsed by including
+ newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700).
+ - funcd connection plugin - fix signature of ``exec_command`` (https://github.com/ansible-collections/community.general/pull/5111).
+ - ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307).
+ - keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241).
+ - keyring_info - fix the result from the keyring library never getting returned
+ (https://github.com/ansible-collections/community.general/pull/4964).
+ - ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute
+ values (https://github.com/ansible-collections/community.general/issues/977,
+ https://github.com/ansible-collections/community.general/pull/5385).
+ - listen_ports_facts - removed leftover ``EnvironmentError`` . The ``else``
+ clause had a wrong indentation. The check is now handled in the ``split_pid_name``
+ function (https://github.com/ansible-collections/community.general/pull/5202).
+ - locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281).
+ - lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304).
+ - lxd connection plugin - fix incorrect ``inventory_hostname`` in ``remote_addr``.
+ This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/issues/4886).
+ - manageiq_alert_profiles - avoid crash when reporting unknown profile caused
+ by trying to return an undefined variable (https://github.com/ansible-collections/community.general/pull/5111).
+ - nmcli - avoid changed status for most cases with VPN connections (https://github.com/ansible-collections/community.general/pull/5126).
+ - nmcli - fix error caused by adding undefined module arguments for list options
+ (https://github.com/ansible-collections/community.general/issues/4373, https://github.com/ansible-collections/community.general/pull/4813).
+ - 'nmcli - fix error when setting previously unset MAC address, ``gsm.apn``
+ or ``vpn.data``: current values were being normalized without checking if
+ they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).'
+ - nmcli - fix int options idempotence (https://github.com/ansible-collections/community.general/issues/4998).
+ - nsupdate - compatibility with NS records (https://github.com/ansible-collections/community.general/pull/5112).
+ - nsupdate - fix silent failures when updating ``NS`` entries from Bind9 managed
+ DNS zones (https://github.com/ansible-collections/community.general/issues/4657).
+ - opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed
+ support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342).
+ - osx_defaults - no longer expand ``~`` in ``value`` to the user's home directory,
+ or expand environment variables (https://github.com/ansible-collections/community.general/issues/5234,
+ https://github.com/ansible-collections/community.general/pull/5243).
+ - packet_ip_subnet - fix error reporting in case of invalid CIDR prefix lengths
+ (https://github.com/ansible-collections/community.general/pull/5111).
+ - pacman - fixed name resolution of URL packages (https://github.com/ansible-collections/community.general/pull/4959).
+ - passwordstore lookup plugin - fix ``returnall`` for gopass (https://github.com/ansible-collections/community.general/pull/5027).
+ - passwordstore lookup plugin - fix password store path detection for gopass
+ (https://github.com/ansible-collections/community.general/pull/4955).
+ - pfexec become plugin - remove superflous quotes preventing exe wrap from working
+ as expected (https://github.com/ansible-collections/community.general/issues/3671,
+ https://github.com/ansible-collections/community.general/pull/3889).
+ - pip_package_info - remove usage of global variable (https://github.com/ansible-collections/community.general/pull/5111).
+ - pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363).
+ - proxmox - fix error handling when getting VM by name when ``state=absent``
+ (https://github.com/ansible-collections/community.general/pull/4945).
+ - proxmox inventory plugin - fix crash when ``enabled=1`` is used in agent config
+ string (https://github.com/ansible-collections/community.general/pull/4910).
+ - proxmox inventory plugin - fixed extended status detection for qemu (https://github.com/ansible-collections/community.general/pull/4816).
+ - proxmox_kvm - fix ``agent`` parameter when boolean value is specified (https://github.com/ansible-collections/community.general/pull/5198).
+ - proxmox_kvm - fix error handling when getting VM by name when ``state=absent``
+ (https://github.com/ansible-collections/community.general/pull/4945).
+ - proxmox_kvm - fix exception when no ``agent`` argument is specified (https://github.com/ansible-collections/community.general/pull/5194).
+ - proxmox_kvm - fix wrong condition (https://github.com/ansible-collections/community.general/pull/5108).
+ - proxmox_kvm - replace new condition with proper condition to allow for using
+ ``vmid`` on update (https://github.com/ansible-collections/community.general/pull/5206).
+ - rax_clb_nodes - fix code to be compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/4933).
+ - redfish_command - fix the check if a virtual media is unmounted to just check
+ for ``instered= false`` caused by Supermicro hardware that does not clear
+ the ``ImageName`` (https://github.com/ansible-collections/community.general/pull/4839).
+ - redfish_command - the Supermicro Redfish implementation only supports the
+ ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert``
+ and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected``
+ or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4839).
+ - redfish_info - fix to ``GetChassisPower`` to correctly report power information
+ when multiple chassis exist, but not all chassis report power information
+ (https://github.com/ansible-collections/community.general/issues/4901).
+ - redfish_utils module utils - centralize payload checking when performing modification
+ requests to a Redfish service (https://github.com/ansible-collections/community.general/issues/5210/).
+ - redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741).
+ - redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313).
+ - redis* modules - fix call to ``module.fail_json`` when failing because of
+ missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733).
+ - slack - fix incorrect channel prefix ``#`` caused by incomplete pattern detection
+ by adding ``G0`` and ``GF`` as channel ID patterns (https://github.com/ansible-collections/community.general/pull/5019).
+ - slack - fix message update for channels which start with ``CP``. When ``message-id``
+ was passed it failed for channels which started with ``CP`` because the ``#``
+ symbol was added before the ``channel_id`` (https://github.com/ansible-collections/community.general/pull/5249).
+ - sudoers - ensure sudoers config files are created with the permissions requested
+ by sudoers (0440) (https://github.com/ansible-collections/community.general/pull/4814).
+ - 'sudoers - fix incorrect handling of ``state: absent`` (https://github.com/ansible-collections/community.general/issues/4852).'
+ - tss lookup plugin - adding support for updated Delinea library (https://github.com/DelineaXPM/python-tss-sdk/issues/9,
+ https://github.com/ansible-collections/community.general/pull/5151).
+ - virtualbox inventory plugin - skip parsing values with keys that have both
+ a value and nested data. Skip parsing values that are nested more than two
+ keys deep (https://github.com/ansible-collections/community.general/issues/5332,
+ https://github.com/ansible-collections/community.general/pull/5348).
+ - xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia
+ resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682).
+ - xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module
+ from working at all (https://github.com/ansible-collections/community.general/pull/5383).
+ - xfconf - fix setting of boolean values (https://github.com/ansible-collections/community.general/issues/4999,
+ https://github.com/ansible-collections/community.general/pull/5007).
+ - zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707,
+ https://github.com/ansible-collections/community.general/pull/4726).
+ deprecated_features:
+ - ArgFormat module utils - deprecated along ``CmdMixin``, in favor of the ``cmd_runner_fmt``
+ module util (https://github.com/ansible-collections/community.general/pull/5370).
+ - CmdMixin module utils - deprecated in favor of the ``CmdRunner`` module util
+ (https://github.com/ansible-collections/community.general/pull/5370).
+ - CmdModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module
+ util (https://github.com/ansible-collections/community.general/pull/5370).
+ - CmdStateModuleHelper module utils - deprecated in favor of the ``CmdRunner``
+ module util (https://github.com/ansible-collections/community.general/pull/5370).
+ - cmd_runner module utils - deprecated ``fmt`` in favour of ``cmd_runner_fmt``
+ as the parameter format object (https://github.com/ansible-collections/community.general/pull/4777).
+ - django_manage - support for Django releases older than 4.1 has been deprecated
+ and will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
+ - django_manage - support for the commands ``cleanup``, ``syncdb`` and ``validate``
+ that have been deprecated in Django long time ago will be removed in community.general
+ 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400).
+ - django_manage - the behavior of "creating the virtual environment when missing"
+ is being deprecated and will be removed in community.general version 9.0.0
+ (https://github.com/ansible-collections/community.general/pull/5405).
+ - gconftool2 - deprecates ``state=get`` in favor of using the module ``gconftool2_info``
+ (https://github.com/ansible-collections/community.general/pull/4778).
+ - lxc_container - the module will no longer make any effort to support Python
+ 2 (https://github.com/ansible-collections/community.general/pull/5304).
+ - newrelic_deployment - ``appname`` and ``environment`` are no longer valid
+ options in the v2 API. They will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/5341).
+ - proxmox - deprecated the current ``unprivileged`` default value, will be changed
+ to ``true`` in community.general 7.0.0 (https://github.com/pull/5224).
+ - xfconf - deprecated parameter ``disable_facts``, as since version 4.0.0 it
+ only allows value ``true`` (https://github.com/ansible-collections/community.general/pull/4520).
+ major_changes:
+ - The internal structure of the collection was changed for modules and action
+ plugins. These no longer live in a directory hierarchy ordered by topic, but
+ instead are now all in a single (flat) directory. This has no impact on users
+ *assuming they did not use internal FQCNs*. These will still work, but result
+ in deprecation warnings. They were never officially supported and thus the
+ redirects are kept as a courtsey, and this is not labelled as a breaking change.
+ Note that for example the Ansible VScode plugin started recommending these
+ internal names. If you followed its recommendation, you will now have to change
+ back to the short names to avoid deprecation warnings, and potential errors
+ in the future as these redirects will be removed in community.general 9.0.0
+ (https://github.com/ansible-collections/community.general/pull/5461).
+ - newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341).
+ minor_changes:
+ - Added MIT license as ``LICENSES/MIT.txt`` for tests/unit/plugins/modules/packaging/language/test_gem.py
+ (https://github.com/ansible-collections/community.general/pull/5065).
+ - All software licenses are now in the ``LICENSES/`` directory of the collection
+ root (https://github.com/ansible-collections/community.general/pull/5065,
+ https://github.com/ansible-collections/community.general/pull/5079, https://github.com/ansible-collections/community.general/pull/5080,
+ https://github.com/ansible-collections/community.general/pull/5083, https://github.com/ansible-collections/community.general/pull/5087,
+ https://github.com/ansible-collections/community.general/pull/5095, https://github.com/ansible-collections/community.general/pull/5098,
+ https://github.com/ansible-collections/community.general/pull/5106).
+ - ModuleHelper module utils - added property ``verbosity`` to base class (https://github.com/ansible-collections/community.general/pull/5035).
+ - ModuleHelper module utils - improved ``ModuleHelperException``, using ``to_native()``
+ for the exception message (https://github.com/ansible-collections/community.general/pull/4755).
+ - The collection repository conforms to the `REUSE specification <https://reuse.software/spec/>`__
+ except for the changelog fragments (https://github.com/ansible-collections/community.general/pull/5138).
+ - ali_instance - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5240).
+ - ali_instance_info - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5240).
+ - alternatives - add ``state=absent`` to be able to remove an alternative (https://github.com/ansible-collections/community.general/pull/4654).
+ - alternatives - add ``subcommands`` parameter (https://github.com/ansible-collections/community.general/pull/4654).
+ - ansible_galaxy_install - minor refactoring using latest ``ModuleHelper`` updates
+ (https://github.com/ansible-collections/community.general/pull/4752).
+ - apk - add ``world`` parameter for supporting a custom world file (https://github.com/ansible-collections/community.general/pull/4976).
+ - bitwarden lookup plugin - add option ``search`` to search for other attributes
+ than name (https://github.com/ansible-collections/community.general/pull/5297).
+ - cartesian lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - cmd_runner module util - added parameters ``check_mode_skip`` and ``check_mode_return``
+ to ``CmdRunner.context()``, so that the command is not executed when ``check_mode=True``
+ (https://github.com/ansible-collections/community.general/pull/4736).
+ - cmd_runner module utils - add ``__call__`` method to invoke context (https://github.com/ansible-collections/community.general/pull/4791).
+ - consul - adds ``ttl`` parameter for session (https://github.com/ansible-collections/community.general/pull/4996).
+ - consul - minor refactoring (https://github.com/ansible-collections/community.general/pull/5367).
+ - consul_session - adds ``token`` parameter for session (https://github.com/ansible-collections/community.general/pull/5193).
+ - cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
+ modules (https://github.com/ansible-collections/community.general/pull/4674).
+ - credstash lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - dependent lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - dig lookup plugin - add option ``fail_on_error`` to allow stopping execution
+ on lookup failures (https://github.com/ansible-collections/community.general/pull/4973).
+ - dig lookup plugin - start using Ansible's configuration manager to parse options.
+ All documented options can now also be passed as lookup parameters (https://github.com/ansible-collections/community.general/pull/5440).
+ - dnstxt lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - filetree lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - flattened lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - gitlab module util - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_branch - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_deploy_key - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_group - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_group_members - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_group_variable - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_hook - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_hook - minor refactoring (https://github.com/ansible-collections/community.general/pull/5271).
+ - gitlab_project - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_project_members - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_project_variable - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_protected_branch - minor refactor when checking for installed dependency
+ (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_runner - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+ - gitlab_user - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259).
+ - hiera lookup plugin - start using Ansible's configuration manager to parse
+ options. The Hiera executable and config file can now also be passed as lookup
+ parameters (https://github.com/ansible-collections/community.general/pull/5440).
+ - homebrew, homebrew_tap - added Homebrew on Linux path to defaults (https://github.com/ansible-collections/community.general/pull/5241).
+ - keycloak_* modules - add ``http_agent`` parameter with default value ``Ansible``
+ (https://github.com/ansible-collections/community.general/issues/5023).
+ - keyring lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - lastpass - use config manager for handling plugin options (https://github.com/ansible-collections/community.general/pull/5022).
+ - linode inventory plugin - simplify option handling (https://github.com/ansible-collections/community.general/pull/5438).
+ - listen_ports_facts - add new ``include_non_listening`` option which adds ``-a``
+ option to ``netstat`` and ``ss``. This shows both listening and non-listening
+ (for TCP this means established connections) sockets, and returns ``state``
+ and ``foreign_address`` (https://github.com/ansible-collections/community.general/issues/4762,
+ https://github.com/ansible-collections/community.general/pull/4953).
+ - lmdb_kv lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - lxc_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/5358).
+ - machinectl become plugin - can now be used with a password from another user
+ than root, if a polkit rule is present (https://github.com/ansible-collections/community.general/pull/4849).
+ - machinectl become plugin - combine the success command when building the become
+ command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287).
+ - manifold lookup plugin - start using Ansible's configuration manager to parse
+ options (https://github.com/ansible-collections/community.general/pull/5440).
+ - maven_artifact - add a new ``unredirected_headers`` option that can be used
+ with ansible-core 2.12 and above. The default value is to not use ``Authorization``
+ and ``Cookie`` headers on redirects for security reasons. With ansible-core
+ 2.11, all headers are still passed on for redirects (https://github.com/ansible-collections/community.general/pull/4812).
+ - mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
+ modules (https://github.com/ansible-collections/community.general/pull/4674).
+ - nagios - minor refactoring on parameter validation for different actions (https://github.com/ansible-collections/community.general/pull/5239).
+ - netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301).
+ - nmcli - add ``transport_mode`` configuration for Infiniband devices (https://github.com/ansible-collections/community.general/pull/5361).
+ - nmcli - add bond option ``xmit_hash_policy`` to bond options (https://github.com/ansible-collections/community.general/issues/5148).
+ - nmcli - adds ``vpn`` type and parameter for supporting VPN with service type
+ L2TP and PPTP (https://github.com/ansible-collections/community.general/pull/4746).
+ - nmcli - honor IP options for VPNs (https://github.com/ansible-collections/community.general/pull/5228).
+ - opentelemetry callback plugin - allow configuring opentelementry callback
+ via config file (https://github.com/ansible-collections/community.general/pull/4916).
+ - opentelemetry callback plugin - send logs. This can be disabled by setting
+ ``disable_logs=false`` (https://github.com/ansible-collections/community.general/pull/4175).
+ - pacman - added parameters ``reason`` and ``reason_for`` to set/change the
+ install reason of packages (https://github.com/ansible-collections/community.general/pull/4956).
+ - passwordstore lookup plugin - allow options to be passed lookup options instead
+ of being part of the term strings (https://github.com/ansible-collections/community.general/pull/5444).
+ - passwordstore lookup plugin - allow using alternative password managers by
+ detecting wrapper scripts, allow explicit configuration of pass and gopass
+ backends (https://github.com/ansible-collections/community.general/issues/4766).
+ - passwordstore lookup plugin - improve error messages to include stderr (https://github.com/ansible-collections/community.general/pull/5436)
+ - pipx - added state ``latest`` to the module (https://github.com/ansible-collections/community.general/pull/5105).
+ - pipx - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/5085).
+ - pipx - module fails faster when ``name`` is missing for states ``upgrade``
+ and ``reinstall`` (https://github.com/ansible-collections/community.general/pull/5100).
+ - pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
+ modules (https://github.com/ansible-collections/community.general/pull/4674).
+ - pipx module utils - created new module util ``pipx`` providing a ``cmd_runner``
+ specific for the ``pipx`` module (https://github.com/ansible-collections/community.general/pull/5085).
+ - portage - add knobs for Portage's ``--backtrack`` and ``--with-bdeps`` options
+ (https://github.com/ansible-collections/community.general/pull/5349).
+ - portage - use Portage's python module instead of calling gentoolkit-provided
+ program in shell (https://github.com/ansible-collections/community.general/pull/5349).
+ - proxmox inventory plugin - added new flag ``qemu_extended_statuses`` and new
+ groups ``<group_prefix>prelaunch``, ``<group_prefix>paused``. They will be
+ populated only when ``want_facts=true``, ``qemu_extended_statuses=true`` and
+ only for ``QEMU`` machines (https://github.com/ansible-collections/community.general/pull/4723).
+ - proxmox inventory plugin - simplify option handling code (https://github.com/ansible-collections/community.general/pull/5437).
+ - proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to
+ standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274).
+ - proxmox_kvm - allow ``agent`` argument to be a string (https://github.com/ansible-collections/community.general/pull/5107).
+ - proxmox_snap - add ``unbind`` param to support snapshotting containers with
+ configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274).
+ - puppet - adds ``confdir`` parameter to configure a custom confir location
+ (https://github.com/ansible-collections/community.general/pull/4740).
+ - redfish - added new command GetVirtualMedia, VirtualMediaInsert and VirtualMediaEject
+ to Systems category due to Redfish spec changes the virtualMedia resource
+ location from Manager to System (https://github.com/ansible-collections/community.general/pull/5124).
+ - redfish_config - add ``SetSessionService`` to set default session timeout
+ policy (https://github.com/ansible-collections/community.general/issues/5008).
+ - redfish_info - add ``GetManagerInventory`` to report list of Manager inventory
+ information (https://github.com/ansible-collections/community.general/issues/4899).
+ - seport - added new argument ``local`` (https://github.com/ansible-collections/community.general/pull/5203)
+ - snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
+ modules (https://github.com/ansible-collections/community.general/pull/4674).
+ - sudoers - will attempt to validate the proposed sudoers rule using visudo
+ if available, optionally skipped, or required (https://github.com/ansible-collections/community.general/pull/4794,
+ https://github.com/ansible-collections/community.general/issues/4745).
+ - terraform - adds capability to handle complex variable structures for ``variables``
+ parameter in the module. This must be enabled with the new ``complex_vars``
+ parameter (https://github.com/ansible-collections/community.general/pull/4797).
+ - terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout
+ of the task (https://github.com/ansible-collections/community.general/pull/5147).
+ - wdc_redfish_command - add ``IndicatorLedOn`` and ``IndicatorLedOff`` commands
+ for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5059).
+ - wdc_redfish_command - add ``PowerModeLow`` and ``PowerModeNormal`` commands
+ for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5145).
+ - xfconf - add ``stdout``, ``stderr`` and ``cmd`` to the module results (https://github.com/ansible-collections/community.general/pull/5037).
+ - xfconf - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
+ - xfconf - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975).
+ - xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived
+ modules (https://github.com/ansible-collections/community.general/pull/4674).
+ - xfconf module utils - created new module util ``xfconf`` providing a ``cmd_runner``
+ specific for ``xfconf`` modules (https://github.com/ansible-collections/community.general/pull/4776).
+ - xfconf_info - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776).
+ - xfconf_info - use ``do_raise()`` instead of defining custom exception class
+ (https://github.com/ansible-collections/community.general/pull/4975).
+ - znode - possibility to use ZooKeeper ACL authentication (https://github.com/ansible-collections/community.general/pull/5306).
+ release_summary: This is a pre-release for the upcoming 6.0.0 major release.
+ The main objective of this pre-release is to make it possible to test the
+ large stuctural changes by flattening the directory structure. See the corresponding
+ entry in the changelog for details.
+ removed_features:
+ - bitbucket* modules - ``username`` is no longer an alias of ``workspace``,
+ but of ``user`` (https://github.com/ansible-collections/community.general/pull/5326).
+ - gem - the default of the ``norc`` option changed from ``false`` to ``true``
+ (https://github.com/ansible-collections/community.general/pull/5326).
+ - gitlab_group_members - ``gitlab_group`` must now always contain the full path,
+ and no longer just the name or path (https://github.com/ansible-collections/community.general/pull/5326).
+ - keycloak_authentication - the return value ``flow`` has been removed. Use
+ ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/5326).
+ - keycloak_group - the return value ``group`` has been removed. Use ``end_state``
+ instead (https://github.com/ansible-collections/community.general/pull/5326).
+ - lxd_container - the default of the ``ignore_volatile_options`` option changed
+ from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326).
+ - mail callback plugin - the ``sender`` option is now required (https://github.com/ansible-collections/community.general/pull/5326).
+ - module_helper module utils - remove the ``VarDict`` attribute from ``ModuleHelper``.
+ Import ``VarDict`` from ``ansible_collections.community.general.plugins.module_utils.mh.mixins.vars``
+ instead (https://github.com/ansible-collections/community.general/pull/5326).
+ - proxmox inventory plugin - the default of the ``want_proxmox_nodes_ansible_host``
+ option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326).
+ - vmadm - the ``debug`` option has been removed. It was not used anyway (https://github.com/ansible-collections/community.general/pull/5326).
+ fragments:
+ - 3671-illumos-pfexec.yml
+ - 4175-opentelemetry_logs.yml
+ - 4520-xfconf-deprecate-disable-facts.yml
+ - 4654-alternatives-add-subcommands.yml
+ - 4674-use-mh-raise.yaml
+ - 4682-compatibility-virtualmedia-resource-location.yaml
+ - 4700-code-changes.yml
+ - 4712-consul-bugfix.yaml
+ - 4719-fix-keycloak-realm.yaml
+ - 4724-proxmox-qemu-extend.yaml
+ - 4726-zfs.yml
+ - 4733-redis-fail.yml
+ - 4736-cmd-runner-skip-if-check.yml
+ - 4740-puppet-feature.yaml
+ - 4746-add-vpn-support-nmcli.yaml
+ - 4752-ansible-galaxy-install-mh-updates.yml
+ - 4755-mhexception-improvement.yml
+ - 4776-xfconf-cmd-runner.yaml
+ - 4777-cmd-runner-deprecate-fmt.yaml
+ - 4778-gconftool2-deprecate-state-get.yaml
+ - 4780-passwordstore-wrapper-compat.yml
+ - 4791-cmd-runner-callable.yaml
+ - 4794-sudoers-validation.yml
+ - 4797-terraform-complex-variables.yml
+ - 4809-redhat_subscription-unsubscribe.yaml
+ - 4810-alternatives-bug.yml
+ - 4812-expose-unredirected-headers.yml
+ - 4813-fix-nmcli-convert-list.yaml
+ - 4814-sudoers-file-permissions.yml
+ - 4816-proxmox-fix-extended-status.yaml
+ - 4836-alternatives.yml
+ - 4839-fix-VirtualMediaInsert-Supermicro.yml
+ - 4849-add-password-prompt-support-for-machinectl.yml
+ - 4852-sudoers-state-absent.yml
+ - 4886-fix-lxd-inventory-hostname.yml
+ - 4899-add-GetManagerInventory-for-redfish_info.yml
+ - 4901-fix-redfish-chassispower.yml
+ - 4903-cmdrunner-bugfix.yaml
+ - 4910-fix-for-agent-enabled.yml
+ - 4911-dsv-honor-tld-option.yml
+ - 4916-opentelemetry-ini-options.yaml
+ - 4933-fix-rax-clb-nodes.yaml
+ - 4945-fix-get_vm-int-parse-handling.yaml
+ - 4953-listen-ports-facts-extend-output.yaml
+ - 4955-fix-path-detection-for-gopass.yaml
+ - 4956-pacman-install-reason.yaml
+ - 4959-pacman-fix-url-packages-name.yaml
+ - 4964-fix-keyring-info.yml
+ - 4973-introduce-dig-lookup-argument.yaml
+ - 4975-xfconf-use-do-raise.yaml
+ - 4976-apk-add-support-for-a-custom-world-file.yaml
+ - 4996-consul-session-ttl.yml
+ - 4998-nmcli-fix-int-options-idempotence.yml
+ - 4999-xfconf-bool.yml
+ - 5008-addSetSessionService.yml
+ - 5019-slack-support-more-groups.yml
+ - 5022-lastpass-lookup-cleanup.yml
+ - 5023-http-agent-param-keycloak.yml
+ - 5027-fix-returnall-for-gopass.yaml
+ - 5035-mh-base-verbosity.yaml
+ - 5037-xfconf-add-cmd-output.yaml
+ - 5059-wdc_redfish_command-indicator-leds.yml
+ - 5085-pipx-use-cmd-runner.yaml
+ - 5100-pipx-req-if.yaml
+ - 5105-pipx-state-latest.yaml
+ - 5107-proxmox-agent-argument.yaml
+ - 5108-proxmox-node-name-condition.yml
+ - 5111-fixes.yml
+ - 5112-fix-nsupdate-ns-entry.yaml
+ - 5124-compatibility-virtualmedia-resource-location.yaml
+ - 5126-nmcli-remove-diffs.yml
+ - 5129-dig-boolean-params-fix.yml
+ - 5145-wdc-redfish-enclosure-power-state.yml
+ - 5147-terraform-init-no-color.yml
+ - 5149-nmcli-bond-option.yml
+ - 5151-add-delinea-support-tss-lookup.yml
+ - 5193-consul-session-token.yaml
+ - 5194-fix-proxmox-agent-exception.yaml
+ - 5198-proxmox.yml
+ - 5202-bugfix-environmentError-wrong-indentation.yaml
+ - 5203-seport-add-local-argument.yaml
+ - 5206-proxmox-conditional-vmid.yml
+ - 5210-redfish_utils-cleanup-of-configuration-logic-and-oem-checks.yaml
+ - 5224-proxmox-unprivileged-default.yaml
+ - 5228-nmcli-ip-options.yaml
+ - 5239-nagios-refactor.yaml
+ - 5240-unused-imports.yaml
+ - 5241-homebrew-add-linux-path.yaml
+ - 5243-osx-defaults-expand-user-flags.yml
+ - 5249-add-new-channel-prefix.yml
+ - 5259-gitlab-imports.yaml
+ - 5271-gitlab_hook-refactor.yaml
+ - 5274-proxmox-snap-container-with-mountpoints.yml
+ - 5280-lxc_container-py3.yaml
+ - 5282-locale_gen.yaml
+ - 5287-machinectl-become-success.yml
+ - 5291-fix-nmcli-error-when-setting-unset-mac-address.yaml
+ - 5297-bitwarden-add-search-field.yml
+ - 5301-netcup_dnsapi-timeout.yml
+ - 5306-add-options-for-authentication.yml
+ - 5307-ini_file-lint.yaml
+ - 5313-fix-redhat_subscription-idempotency-pool_ids.yml
+ - 5341-newrelic-v2-api-changes.yml
+ - 5342-opentelemetry_bug_fix_opentelemetry-api-1.13.yml
+ - 5348-fix-vbox-deeply-nested-hostvars.yml
+ - 5349-drop-gentoolkit-more-knobs.yml
+ - 5358-lxc-container-refactor.yml
+ - 5361-nmcli-add-infiniband-transport-mode.yaml
+ - 5367-consul-refactor.yaml
+ - 5369-pkgng-fix-update-all.yaml
+ - 5370-mh-cmdmixin-deprecation.yaml
+ - 5377-nsupdate-ns-records-with-bind.yml
+ - 5383-xenserver_facts.yml
+ - 5385-search_s-based-_is_value_present.yaml
+ - 5393-archive.yml
+ - 5400-django-manage-deprecations.yml
+ - 5404-django-manage-venv-deprecation.yml
+ - 5436-passwordstore-errors.yml
+ - 5437-proxmox.yml
+ - 5438-linode.yml
+ - 5439-dig-return-empty-result.yml
+ - 5444-passwordstore-options.yml
+ - 5457-dnstxt-empty.yml
+ - 6.0.0-a1.yml
+ - deprecation-removals.yml
+ - licenses-2.yml
+ - licenses.yml
+ - lookup-options.yml
+ - psf-license.yml
+ - simplified-bsd-license.yml
+ - unflatmap.yml
+ modules:
+ - description: Retrieve GConf configurations
+ name: gconftool2_info
+ namespace: ''
+ - description: Add/remove/change files in ISO file
+ name: iso_customize
+ namespace: ''
+ - description: Allows administration of Keycloak user_rolemapping with the Keycloak
+ API
+ name: keycloak_user_rolemapping
+ namespace: ''
+ - description: Set or delete a passphrase using the Operating System's native
+ keyring
+ name: keyring
+ namespace: ''
+ - description: Get a passphrase using the Operating System's native keyring
+ name: keyring_info
+ namespace: ''
+ - description: Listing of resource policy_profiles in ManageIQ
+ name: manageiq_policies_info
+ namespace: ''
+ - description: Retrieve resource tags in ManageIQ
+ name: manageiq_tags_info
+ namespace: ''
+ - description: Rretrieves information about applications installed with pipx
+ name: pipx_info
+ namespace: ''
+ - description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster.
+ name: proxmox_disk
+ namespace: ''
+ - description: Scaleway compute - private network management
+ name: scaleway_compute_private_network
+ namespace: ''
+ - description: Scaleway Container registry management module
+ name: scaleway_container_registry
+ namespace: ''
+ - description: Scaleway Container registry info module
+ name: scaleway_container_registry_info
+ namespace: ''
+ - description: Scaleway Function namespace management
+ name: scaleway_function_namespace
+ namespace: ''
+ - description: Retrieve information on Scaleway Function namespace
+ name: scaleway_function_namespace_info
+ namespace: ''
+ - description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish
+ APIs
+ name: wdc_redfish_command
+ namespace: ''
+ - description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish
+ APIs
+ name: wdc_redfish_info
+ namespace: ''
+ plugins:
+ filter:
+ - description: Counts hashable elements in a sequence
+ name: counter
+ namespace: null
+ lookup:
+ - description: Retrieve secrets from Bitwarden
+ name: bitwarden
+ namespace: null
+ release_date: '2022-11-02'
+ 6.0.1:
+ changes:
+ bugfixes:
+ - dependent lookup plugin - avoid warning on deprecated parameter for ``Templar.template()``
+ (https://github.com/ansible-collections/community.general/pull/5543).
+ - jenkins_build - fix the logical flaw when deleting a Jenkins build (https://github.com/ansible-collections/community.general/pull/5514).
+ - one_vm - avoid splitting labels that are ``None`` (https://github.com/ansible-collections/community.general/pull/5489).
+ - onepassword_raw - add missing parameter to plugin documentation (https://github.com/ansible-collections/community.general/issues/5506).
+ - proxmox_disk - avoid duplicate ``vmid`` reference (https://github.com/ansible-collections/community.general/issues/5492,
+ https://github.com/ansible-collections/community.general/pull/5493).
+ release_summary: Bugfix release for Ansible 7.0.0.
+ fragments:
+ - 5489-nonetype-in-get-vm-by-label.yml
+ - 5493-proxmox.yml
+ - 5506-onepassword_raw-missing-param.yml
+ - 5514-fix-logical-flaw-when-deleting-jenkins-build.yml
+ - 5543-dependent-template.yml
+ - 6.0.1.yml
+ release_date: '2022-11-15'
+ 6.1.0:
+ changes:
+ bugfixes:
+ - chroot connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``.
+ This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/5570).
+ - cmd_runner module utils - fixed bug when handling default cases in ``cmd_runner_fmt.as_map()``
+ (https://github.com/ansible-collections/community.general/pull/5538).
+ - cmd_runner module utils - formatting arguments ``cmd_runner_fmt.as_fixed()``
+ was expecting an non-existing argument (https://github.com/ansible-collections/community.general/pull/5538).
+ - keycloak_client_rolemapping - calculate ``proposed`` and ``after`` return
+ values properly (https://github.com/ansible-collections/community.general/pull/5619).
+ - keycloak_client_rolemapping - remove only listed mappings with ``state=absent``
+ (https://github.com/ansible-collections/community.general/pull/5619).
+ - proxmox inventory plugin - fix bug while templating when using templates for
+ the ``url``, ``user``, ``password``, ``token_id``, or ``token_secret`` options
+ (https://github.com/ansible-collections/community.general/pull/5640).
+ - proxmox inventory plugin - handle tags delimited by semicolon instead of comma,
+ which happens from Proxmox 7.3 on (https://github.com/ansible-collections/community.general/pull/5602).
+ - redhat_subscription - do not ignore ``consumer_name`` and other variables
+ if ``activationkey`` is specified (https://github.com/ansible-collections/community.general/issues/3486,
+ https://github.com/ansible-collections/community.general/pull/5627).
+ - redhat_subscription - do not pass arguments to ``subscription-manager register``
+ for things already configured; now a specified ``rhsm_baseurl`` is properly
+ set for subscription-manager (https://github.com/ansible-collections/community.general/pull/5583).
+ - unixy callback plugin - fix plugin to work with ansible-core 2.14 by using
+ Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
+ - vdo - now uses ``yaml.safe_load()`` to parse command output instead of the
+ deprecated ``yaml.load()`` which is potentially unsafe. Using ``yaml.load()``
+ without explicitely setting a ``Loader=`` is also an error in pyYAML 6.0 (https://github.com/ansible-collections/community.general/pull/5632).
+ - vmadm - fix for index out of range error in ``get_vm_uuid`` (https://github.com/ansible-collections/community.general/pull/5628).
+ deprecated_features:
+ - The ``sap`` modules ``sapcar_extract``, ``sap_task_list_execute``, and ``hana_query``,
+ will be removed from this collection in community.general 7.0.0 and replaced
+ with redirects to ``community.sap_libs``. If you want to continue using these
+ modules, make sure to also install ``community.sap_libs`` (it is part of the
+ Ansible package) (https://github.com/ansible-collections/community.general/pull/5614).
+ minor_changes:
+ - cmd_runner module utils - ``cmd_runner_fmt.as_bool()`` can now take an extra
+ parameter to format when value is false (https://github.com/ansible-collections/community.general/pull/5647).
+ - gconftool2 - refactor using ``ModuleHelper`` and ``CmdRunner`` (https://github.com/ansible-collections/community.general/pull/5545).
+ - java_certs - add more detailed error output when extracting certificate from
+ PKCS12 fails (https://github.com/ansible-collections/community.general/pull/5550).
+ - jenkins_plugin - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5565).
+ - lxd_project - refactored code out to module utils to clear sanity check (https://github.com/ansible-collections/community.general/pull/5549).
+ - nmap inventory plugin - add new options ``udp_scan``, ``icmp_timestamp``,
+ and ``dns_resolve`` for different types of scans (https://github.com/ansible-collections/community.general/pull/5566).
+ - rax_scaling_group - refactored out code to the ``rax`` module utils to clear
+ the sanity check (https://github.com/ansible-collections/community.general/pull/5563).
+ - redfish_command - add ``PerformRequestedOperations`` command to perform any
+ operations necessary to continue the update flow (https://github.com/ansible-collections/community.general/issues/4276).
+ - redfish_command - add ``update_apply_time`` to ``SimpleUpdate`` command (https://github.com/ansible-collections/community.general/issues/3910).
+ - redfish_command - add ``update_status`` to output of ``SimpleUpdate`` command
+ to allow a user monitor the update in progress (https://github.com/ansible-collections/community.general/issues/4276).
+ - redfish_info - add ``GetUpdateStatus`` command to check the progress of a
+ previous update request (https://github.com/ansible-collections/community.general/issues/4276).
+ - redfish_utils module utils - added PUT (``put_request()``) functionality (https://github.com/ansible-collections/community.general/pull/5490).
+ - slack - add option ``prepend_hash`` which allows to control whether a ``#``
+ is prepended to ``channel_id``. The current behavior (value ``auto``) is to
+ prepend ``#`` unless some specific prefixes are found. That list of prefixes
+ is incomplete, and there does not seem to exist a documented condition on
+ when exactly ``#`` must not be prepended. We recommend to explicitly set ``prepend_hash=always``
+ or ``prepend_hash=never`` to avoid any ambiguity (https://github.com/ansible-collections/community.general/pull/5629).
+ - spotinst_aws_elastigroup - add ``elements`` attribute when missing in ``list``
+ parameters (https://github.com/ansible-collections/community.general/pull/5553).
+ - ssh_config - add ``host_key_algorithms`` option (https://github.com/ansible-collections/community.general/pull/5605).
+ - udm_share - added ``elements`` attribute to ``list`` type parameters (https://github.com/ansible-collections/community.general/pull/5557).
+ - udm_user - add ``elements`` attribute when missing in ``list`` parameters
+ (https://github.com/ansible-collections/community.general/pull/5559).
+ release_summary: Regular bugfix and feature release.
+ fragments:
+ - 3910-redfish-add-operation-apply-time-to-simple-update.yml
+ - 4276-redfish-command-updates-for-full-simple-update-workflow.yml
+ - 5490-adding-put-functionality.yml
+ - 5538-cmd-runner-as-fixed.yml
+ - 5545-gconftool-cmd-runner.yml
+ - 5549-lxd-project-sanity.yml
+ - 5550-java_certs-not-enough-info-on-error.yml
+ - 5553-spotinst-aws-elasticgroup-sanity.yml
+ - 5557-udm-share-sanity.yml
+ - 5559-udm-user-sanity.yml
+ - 5563-rax-scaling-group-sanity.yml
+ - 5565-jenkins-plugin-sanity.yml
+ - 5566-additional-flags-nmap.yml
+ - 5570-chroot-plugin-fix-default-inventory_hostname.yml
+ - 5583-redhat_subscription-subscribe-parameters.yaml
+ - 5601-unixy-callback-use-config-manager.yml
+ - 5602-proxmox-tags.yml
+ - 5605-ssh-config-add-host-key-algorithms.yaml
+ - 5619-keycloak-improvements.yml
+ - 5627-redhat_subscription-subscribe-parameters-2.yaml
+ - 5628-fix-vmadm-off-by-one.yml
+ - 5629-add-prepend-hash-option-for-channel-id.yml
+ - 5632-vdo-Use-yaml-safe-load-instead-of-yaml-load.yml
+ - 5640-fix-typo-proxmox-inventory.yml
+ - 5647-cmd-runner-as-bool-false.yml
+ - 6.1.0.yml
+ - sap-removal.yml
+ modules:
+ - description: Manage project badges on GitLab Server
+ name: gitlab_project_badge
+ namespace: ''
+ - description: Retrieve client secret via Keycloak API
+ name: keycloak_clientsecret_info
+ namespace: ''
+ - description: Regenerate Keycloak client secret via Keycloak API
+ name: keycloak_clientsecret_regenerate
+ namespace: ''
+ release_date: '2022-12-06'
+ 6.2.0:
+ changes:
+ bugfixes:
+ - ansible_galaxy_install - set default to raise exception if command's return
+ code is different from zero (https://github.com/ansible-collections/community.general/pull/5680).
+ - ansible_galaxy_install - try ``C.UTF-8`` and then fall back to ``en_US.UTF-8``
+ before failing (https://github.com/ansible-collections/community.general/pull/5680).
+ - gitlab_group_variables - fix dropping variables accidentally when GitLab introduced
+ new properties (https://github.com/ansible-collections/community.general/pull/5667).
+ - gitlab_project_variables - fix dropping variables accidentally when GitLab
+ introduced new properties (https://github.com/ansible-collections/community.general/pull/5667).
+ - lxc_container - fix the arguments of the lxc command which broke the creation
+ and cloning of containers (https://github.com/ansible-collections/community.general/issues/5578).
+ - opkg - fix issue that ``force=reinstall`` would not reinstall an existing
+ package (https://github.com/ansible-collections/community.general/pull/5705).
+ - proxmox_disk - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492,
+ https://github.com/ansible-collections/community.general/pull/5672).
+ - proxmox_nic - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492,
+ https://github.com/ansible-collections/community.general/pull/5672).
+ - unixy callback plugin - fix typo introduced when updating to use Ansible's
+ configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600).
+ deprecated_features:
+ - manageiq_policies - deprecate ``state=list`` in favour of using ``community.general.manageiq_policies_info``
+ (https://github.com/ansible-collections/community.general/pull/5721).
+ - rax - module relies on deprecates library ``pyrax``. Unless maintainers step
+ up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_cbs - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_cbs_attachments - module relies on deprecates library ``pyrax``. Unless
+ maintainers step up to work on the module, it will be marked as deprecated
+ in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_cdb - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_cdb_database - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_cdb_user - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_clb - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_clb_nodes - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_clb_ssl - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_dns - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_dns_record - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_facts - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_files - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_files_objects - module relies on deprecates library ``pyrax``. Unless
+ maintainers step up to work on the module, it will be marked as deprecated
+ in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_identity - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_keypair - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_meta - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_mon_alarm - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_mon_check - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_mon_entity - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_mon_notification - module relies on deprecates library ``pyrax``. Unless
+ maintainers step up to work on the module, it will be marked as deprecated
+ in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_mon_notification_plan - module relies on deprecates library ``pyrax``.
+ Unless maintainers step up to work on the module, it will be marked as deprecated
+ in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_network - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_queue - module relies on deprecates library ``pyrax``. Unless maintainers
+ step up to work on the module, it will be marked as deprecated in community.general
+ 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_scaling_group - module relies on deprecates library ``pyrax``. Unless
+ maintainers step up to work on the module, it will be marked as deprecated
+ in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ - rax_scaling_policy - module relies on deprecates library ``pyrax``. Unless
+ maintainers step up to work on the module, it will be marked as deprecated
+ in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733).
+ minor_changes:
+ - opkg - allow installing a package in a certain version (https://github.com/ansible-collections/community.general/pull/5688).
+ - proxmox - added new module parameter ``tags`` for use with PVE 7+ (https://github.com/ansible-collections/community.general/pull/5714).
+ - puppet - refactored module to use ``CmdRunner`` for executing ``puppet`` (https://github.com/ansible-collections/community.general/pull/5612).
+ - redhat_subscription - add a ``server_proxy_scheme`` parameter to configure
+ the scheme for the proxy server (https://github.com/ansible-collections/community.general/pull/5662).
+ - ssh_config - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5720).
+ - sudoers - adds ``host`` parameter for setting hostname restrictions in sudoers
+ rules (https://github.com/ansible-collections/community.general/issues/5702).
+ release_summary: Regular bugfix and feature release.
+ fragments:
+ - 5612-puppet-cmd-runner.yml
+ - 5659-fix-lxc_container-command.yml
+ - 5662-redhat_subscription-server_proxy_scheme.yaml
+ - 5666-gitlab-variables.yml
+ - 5672-proxmox.yml
+ - 5680-ansible_galaxy_install-fx-locale.yaml
+ - 5688-opkg-module-install-certain-version.yml
+ - 5703-sudoers-host-support.yml
+ - 5705-opkg-fix-force-reinstall.yml
+ - 5714-proxmox-lxc-tag-support.yml
+ - 5720-ssh_config-plugin-sanity.yml
+ - 5721-manageiq-policies-deprecate-list-state.yaml
+ - 5733-rax-deprecation-notice.yml
+ - 5744-unixy-callback-fix-config-manager-typo.yml
+ - 6.2.0.yml
+ release_date: '2023-01-04'
+ 6.3.0:
+ changes:
+ breaking_changes:
+ - 'ModuleHelper module utils - when the module sets output variables named ``msg``,
+ ``exception``, ``output``, ``vars``, or ``changed``, the actual output will
+ prefix those names with ``_`` (underscore symbol) only when they clash with
+ output variables generated by ModuleHelper itself, which only occurs when
+ handling exceptions. Please note that this breaking change does not require
+ a new major release since before this release, it was not possible to add
+ such variables to the output `due to a bug <https://github.com/ansible-collections/community.general/pull/5755>`__
+ (https://github.com/ansible-collections/community.general/pull/5765).
+
+ '
+ bugfixes:
+ - ModuleHelper - fix bug when adjusting the name of reserved output variables
+ (https://github.com/ansible-collections/community.general/pull/5755).
+ - alternatives - support subcommands on Fedora 37, which uses ``follower`` instead
+ of ``slave`` (https://github.com/ansible-collections/community.general/pull/5794).
+ - bitwarden lookup plugin - clarify what to do, if the bitwarden vault is not
+ unlocked (https://github.com/ansible-collections/community.general/pull/5811).
+ - dig lookup plugin - correctly handle DNSKEY record type's ``algorithm`` field
+ (https://github.com/ansible-collections/community.general/pull/5914).
+ - gem - fix force parameter not being passed to gem command when uninstalling
+ (https://github.com/ansible-collections/community.general/pull/5822).
+ - gem - fix hang due to interactive prompt for confirmation on specific version
+ uninstall (https://github.com/ansible-collections/community.general/pull/5751).
+ - gitlab_deploy_key - also update ``title`` and not just ``can_push`` (https://github.com/ansible-collections/community.general/pull/5888).
+ - keycloak_user_federation - fixes federation creation issue. When a new federation
+ was created and at the same time a default / standard mapper was also changed
+ / updated the creation process failed as a bad None set variable led to a
+ bad malformed url request (https://github.com/ansible-collections/community.general/pull/5750).
+ - 'keycloak_user_federation - fixes idempotency detection issues. In some cases
+ the module could fail to properly detect already existing user federations
+ because of a buggy seemingly superflous extra query parameter (https://github.com/ansible-collections/community.general/pull/5732).
+
+ '
+ - loganalytics callback plugin - adjust type of callback to ``notification``,
+ it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+ - logdna callback plugin - adjust type of callback to ``notification``, it was
+ incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+ - logstash callback plugin - adjust type of callback to ``notification``, it
+ was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+ - nsupdate - fix zone lookup. The SOA record for an existing zone is returned
+ as an answer RR and not as an authority RR (https://github.com/ansible-collections/community.general/issues/5817,
+ https://github.com/ansible-collections/community.general/pull/5818).
+ - proxmox_disk - fixed issue with read timeout on import action (https://github.com/ansible-collections/community.general/pull/5803).
+ - redfish_utils - removed basic auth HTTP header when performing a GET on the
+ service root resource and when performing a POST to the session collection
+ (https://github.com/ansible-collections/community.general/issues/5886).
+ - splunk callback plugin - adjust type of callback to ``notification``, it was
+ incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+ - sumologic callback plugin - adjust type of callback to ``notification``, it
+ was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+ - syslog_json callback plugin - adjust type of callback to ``notification``,
+ it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761).
+ - terraform - fix ``current`` workspace never getting appended to the ``all``
+ key in the ``workspace_ctf`` object (https://github.com/ansible-collections/community.general/pull/5735).
+ - terraform - fix ``terraform init`` failure when there are multiple workspaces
+ on the remote backend and when ``default`` workspace is missing by setting
+ ``TF_WORKSPACE`` environmental variable to the value of ``workspace`` when
+ used (https://github.com/ansible-collections/community.general/pull/5735).
+ - terraform module - disable ANSI escape sequences during validation phase (https://github.com/ansible-collections/community.general/pull/5843).
+ - xml - fixed a bug where empty ``children`` list would not be set (https://github.com/ansible-collections/community.general/pull/5808).
+ deprecated_features:
+ - consul - deprecate using parameters unused for ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5772).
+ - gitlab_runner - the default of the new option ``access_level_on_creation``
+ will change from ``false`` to ``true`` in community.general 7.0.0. This will
+ cause ``access_level`` to be used during runner registration as well, and
+ not only during updates (https://github.com/ansible-collections/community.general/pull/5908).
+ minor_changes:
+ - apache2_module - add module argument ``warn_mpm_absent`` to control whether
+ warning are raised in some edge cases (https://github.com/ansible-collections/community.general/pull/5793).
+ - bitwarden lookup plugin - can now retrieve secrets from custom fields (https://github.com/ansible-collections/community.general/pull/5694).
+ - bitwarden lookup plugin - implement filtering results by ``collection_id``
+ parameter (https://github.com/ansible-collections/community.general/issues/5849).
+ - dig lookup plugin - support CAA record type (https://github.com/ansible-collections/community.general/pull/5913).
+ - gitlab_project - add ``builds_access_level``, ``container_registry_access_level``
+ and ``forking_access_level`` options (https://github.com/ansible-collections/community.general/pull/5706).
+ - gitlab_runner - add new boolean option ``access_level_on_creation``. It controls,
+ whether the value of ``access_level`` is used for runner registration or not.
+ The option ``access_level`` has been ignored on registration so far and was
+ only used on updates (https://github.com/ansible-collections/community.general/issues/5907,
+ https://github.com/ansible-collections/community.general/pull/5908).
+ - ilo_redfish_utils module utils - change implementation of DNS Server IP and
+ NTP Server IP update (https://github.com/ansible-collections/community.general/pull/5804).
+ - ipa_group - allow to add and remove external users with the ``external_user``
+ option (https://github.com/ansible-collections/community.general/pull/5897).
+ - iptables_state - minor refactoring within the module (https://github.com/ansible-collections/community.general/pull/5844).
+ - one_vm - add a new ``updateconf`` option which implements the ``one.vm.updateconf``
+ API call (https://github.com/ansible-collections/community.general/pull/5812).
+ - opkg - refactored module to use ``CmdRunner`` for executing ``opkg`` (https://github.com/ansible-collections/community.general/pull/5718).
+ - redhat_subscription - adds ``token`` parameter for subscription-manager authentication
+ using Red Hat API token (https://github.com/ansible-collections/community.general/pull/5725).
+ - snap - minor refactor when executing module (https://github.com/ansible-collections/community.general/pull/5773).
+ - snap_alias - refactored module to use ``CmdRunner`` to execute ``snap`` (https://github.com/ansible-collections/community.general/pull/5486).
+ - sudoers - add ``setenv`` parameters to support passing environment variables
+ via sudo. (https://github.com/ansible-collections/community.general/pull/5883)
+ release_summary: Regular bugfix and feature release.
+ fragments:
+ - 5486-snap-alias-cmd-runner.yml
+ - 5694-add-custom-fields-to-bitwarden.yml
+ - 5706-add-builds-forks-container-registry.yml
+ - 5718-opkg-refactor.yaml
+ - 5725-redhat_subscription-add-red-hat-api-token.yml
+ - 5732-bugfix-keycloak-userfed-idempotency.yml
+ - 5735-terraform-init-fix-when-default-workspace-doesnt-exists.yaml
+ - 5750-bugfixing-keycloak-usrfed-fail-when-update-default-mapper-simultaneously.yml
+ - 5751-gem-fix-uninstall-hang.yml
+ - 5755-mh-fix-output-conflict.yml
+ - 5761-callback-types.yml
+ - 5765-mh-lax-output-conflict.yml
+ - 5772-consul-deprecate-params-when-absent.yml
+ - 5773-snap-mh-execute.yml
+ - 5793-apache2-module-npm-warnings.yml
+ - 5794-alternatives-fedora37.yml
+ - 5803-proxmox-read-timeout.yml
+ - 5804-minor-changes-to-hpe-ilo-collection.yml
+ - 5808-xml-children-parameter-does-not-exist.yml
+ - 5811-clarify-bitwarden-error.yml
+ - 5812-implement-updateconf-api-call.yml
+ - 5818-nsupdate-fix-zone-lookup.yml
+ - 5822-gem-uninstall-force.yml
+ - 5843-terraform-validate-no-color.yml
+ - 5844-iptables-state-refactor.yml
+ - 5851-lookup-bitwarden-add-filter-by-collection-id-parameter.yml
+ - 5883-sudoers-add-support-for-setenv-parameter.yml
+ - 5886-redfish-correct-basic-auth-usage-on-session-creation.yml
+ - 5888-update-key-title.yml
+ - 5897-ipa_group-add-external-users.yml
+ - 5907-fix-gitlab_runner-not-idempotent.yml
+ - 5913-dig-caa.yml
+ - 5914-dig-dnskey.yml
+ - 6.3.0.yml
+ modules:
+ - description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
+ name: ocapi_command
+ namespace: ''
+ - description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
+ name: ocapi_info
+ namespace: ''
+ release_date: '2023-01-31'
+ 6.4.0:
+ changes:
+ bugfixes:
+ - cartesian and flattened lookup plugins - adjust to parameter deprecation in
+ ansible-core 2.14's ``listify_lookup_plugin_terms`` helper function (https://github.com/ansible-collections/community.general/pull/6074).
+ - cloudflare_dns - fixed the idempotency for SRV DNS records (https://github.com/ansible-collections/community.general/pull/5972).
+ - cloudflare_dns - fixed the possiblity of setting a root-level SRV DNS record
+ (https://github.com/ansible-collections/community.general/pull/5972).
+ - github_webhook - fix always changed state when no secret is provided (https://github.com/ansible-collections/community.general/pull/5994).
+ - jenkins_plugin - fix error due to undefined variable when updates file is
+ not downloaded (https://github.com/ansible-collections/community.general/pull/6100).
+ - keycloak_client - fix accidental replacement of value for attribute ``saml.signing.private.key``
+ with ``no_log`` in wrong contexts (https://github.com/ansible-collections/community.general/pull/5934).
+ - lxd_* modules, lxd inventory plugin - fix TLS/SSL certificate validation problems
+ by using the correct purpose when creating the TLS context (https://github.com/ansible-collections/community.general/issues/5616,
+ https://github.com/ansible-collections/community.general/pull/6034).
+ - nmcli - fix change handling of values specified as an integer 0 (https://github.com/ansible-collections/community.general/pull/5431).
+ - nmcli - fix failure to handle WIFI settings when connection type not specified
+ (https://github.com/ansible-collections/community.general/pull/5431).
+ - nmcli - fix improper detection of changes to ``wifi.wake-on-wlan`` (https://github.com/ansible-collections/community.general/pull/5431).
+ - nmcli - order is significant for lists of addresses (https://github.com/ansible-collections/community.general/pull/6048).
+ - onepassword lookup plugin - Changed to ignore errors from "op account get"
+ calls. Previously, errors would prevent auto-signin code from executing (https://github.com/ansible-collections/community.general/pull/5942).
+ - terraform and timezone - slight refactoring to avoid linter reporting potentially
+ undefined variables (https://github.com/ansible-collections/community.general/pull/5933).
+ - various plugins and modules - remove unnecessary imports (https://github.com/ansible-collections/community.general/pull/5940).
+ - yarn - fix ``global=true`` to check for the configured global folder instead
+ of assuming the default (https://github.com/ansible-collections/community.general/pull/5829)
+ - yarn - fix ``state=absent`` not working with ``global=true`` when the package
+ does not include a binary (https://github.com/ansible-collections/community.general/pull/5829)
+ - yarn - fix ``state=latest`` not working with ``global=true`` (https://github.com/ansible-collections/community.general/issues/5712).
+ - zfs_delegate_admin - zfs allow output can now be parsed when uids/gids are
+ not known to the host system (https://github.com/ansible-collections/community.general/pull/5943).
+ - zypper - make package managing work on readonly filesystem of openSUSE MicroOS
+ (https://github.com/ansible-collections/community.general/pull/5615).
+ deprecated_features:
+ - gitlab_runner - the option ``access_level`` will lose its default value in
+ community.general 8.0.0. From that version on, you have set this option to
+ ``ref_protected`` explicitly, if you want to have a protected runner (https://github.com/ansible-collections/community.general/issues/5925).
+ minor_changes:
+ - dnsimple - set custom User-Agent for API requests to DNSimple (https://github.com/ansible-collections/community.general/pull/5927).
+ - flatpak_remote - add new boolean option ``enabled``. It controls, whether
+ the remote is enabled or not (https://github.com/ansible-collections/community.general/pull/5926).
+ - gitlab_project - add ``releases_access_level``, ``environments_access_level``,
+ ``feature_flags_access_level``, ``infrastructure_access_level``, ``monitor_access_level``,
+ and ``security_and_compliance_access_level`` options (https://github.com/ansible-collections/community.general/pull/5986).
+ - jc filter plugin - added the ability to use parser plugins (https://github.com/ansible-collections/community.general/pull/6043).
+ - keycloak_group - add new optional module parameter ``parents`` to properly
+ handle keycloak subgroups (https://github.com/ansible-collections/community.general/pull/5814).
+ - keycloak_user_federation - make ``org.keycloak.storage.ldap.mappers.LDAPStorageMapper``
+ the default value for mappers ``providerType`` (https://github.com/ansible-collections/community.general/pull/5863).
+ - ldap modules - add ``xorder_discovery`` option (https://github.com/ansible-collections/community.general/issues/6045,
+ https://github.com/ansible-collections/community.general/pull/6109).
+ - lxd_container - add diff and check mode (https://github.com/ansible-collections/community.general/pull/5866).
+ - mattermost, rocketchat, slack - replace missing default favicon with docs.ansible.com
+ favicon (https://github.com/ansible-collections/community.general/pull/5928).
+ - modprobe - add ``persistent`` option (https://github.com/ansible-collections/community.general/issues/4028,
+ https://github.com/ansible-collections/community.general/pull/542).
+ - osx_defaults - include stderr in error messages (https://github.com/ansible-collections/community.general/pull/6011).
+ - proxmox - suppress urllib3 ``InsecureRequestWarnings`` when ``validate_certs``
+ option is ``false`` (https://github.com/ansible-collections/community.general/pull/5931).
+ - redfish_command - adding ``EnableSecureBoot`` functionality (https://github.com/ansible-collections/community.general/pull/5899).
+ - redfish_command - adding ``VerifyBiosAttributes`` functionality (https://github.com/ansible-collections/community.general/pull/5900).
+ - sefcontext - add support for path substitutions (https://github.com/ansible-collections/community.general/issues/1193).
+ release_summary: Regular feature and bugfix release.
+ fragments:
+ - 4028-modprobe-persistent-option.yml
+ - 5431-nmcli-wifi.yml
+ - 5615-zypper-transactional-update.yml
+ - 5814-support-keycloak-subgroups.yml
+ - 5829-fix-yarn-global.yml
+ - 5830-sefcontext-path-subs.yml
+ - 5863-providerType-defaulted-keycloak_userfed-mappers.yml
+ - 5866-lxd_container-diff-and-check-mode.yml
+ - 5899-adding-enablesecureboot-functionality-to-redfish-config.yml
+ - 5900-adding-verifybiosattribute-fucntionality-to-redfish-command.yml
+ - 5915-suppress-urllib3-insecure-request-warnings.yml
+ - 5925-align_gitlab_runner_access_level_default_with_gitlab.yml
+ - 5926-flatpak-remote-enabled.yml
+ - 5927-set-user-agent-dnsimple.yml
+ - 5928-fix-favicon-url.yml
+ - 5933-linting.yml
+ - 5934-fix-keycloak-sanitize_cr.yml
+ - 5942-onepassword-ignore-errors-from-op-account-get.yml
+ - 5943-zfs_delegate_admin-fix-zfs-allow-cannot-parse-unknown-uid-gid.yml
+ - 5972-cloudflare-dns-srv-record.yml
+ - 5985-add-new-gitlab-api-features.yml
+ - 5994-github-webhook-secret.yml
+ - 6.4.0.yml
+ - 6011-osx-defaults-errors.yml
+ - 6034-lxd-tls.yml
+ - 6043-jc_plugin_parser_support.yml
+ - 6045-xorder-discovery.yml
+ - 6048-nmcli-addres-order.yml
+ - 6074-loader_in_listify.yml.yml
+ - 6100-jenkins_plugin.yml
+ - remove-unneeded-imports.yml
+ release_date: '2023-02-27'
+ 6.5.0:
+ changes:
+ bugfixes:
+ - archive - avoid deprecated exception class on Python 3 (https://github.com/ansible-collections/community.general/pull/6180).
+ - gitlab_runner - fix ``KeyError`` on runner creation and update (https://github.com/ansible-collections/community.general/issues/6112).
+ - influxdb_user - fix running in check mode when the user does not exist yet
+ (https://github.com/ansible-collections/community.general/pull/6111).
+ - interfaces_file - fix reading options in lines not starting with a space (https://github.com/ansible-collections/community.general/issues/6120).
+ - jail connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``.
+ This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/6118).
+ - memset - fix memset urlerror handling (https://github.com/ansible-collections/community.general/pull/6114).
+ - nmcli - fixed idempotency issue for bridge connections. Module forced default
+ value of ``bridge.priority`` to nmcli if not set; if ``bridge.stp`` is disabled
+ nmcli ignores it and keep default (https://github.com/ansible-collections/community.general/issues/3216,
+ https://github.com/ansible-collections/community.general/issues/4683).
+ - nmcli - fixed idempotency issue when module params is set to ``may_fail4=false``
+ and ``method4=disabled``; in this case nmcli ignores change and keeps their
+ own default value ``yes`` (https://github.com/ansible-collections/community.general/pull/6106).
+ - nmcli - implemented changing mtu value on vlan interfaces (https://github.com/ansible-collections/community.general/issues/4387).
+ - opkg - fixes bug when using ``update_cache=true`` (https://github.com/ansible-collections/community.general/issues/6004).
+ - redhat_subscription, rhsm_release, rhsm_repository - cleanly fail when not
+ running as root, rather than hanging on an interactive ``console-helper``
+ prompt; they all interact with ``subscription-manager``, which already requires
+ to be run as root (https://github.com/ansible-collections/community.general/issues/734,
+ https://github.com/ansible-collections/community.general/pull/6211).
+ - xenorchestra inventory plugin - fix failure to receive objects from server
+ due to not checking the id of the response (https://github.com/ansible-collections/community.general/pull/6227).
+ - yarn - fix ``global=true`` to not fail when `executable` wasn't specified
+ (https://github.com/ansible-collections/community.general/pull/6132)
+ - yarn - fixes bug where yarn module tasks would fail when warnings were emitted
+ from Yarn. The ``yarn.list`` method was not filtering out warnings (https://github.com/ansible-collections/community.general/issues/6127).
+ minor_changes:
+ - apt_rpm - adds ``clean``, ``dist_upgrade`` and ``update_kernel`` parameters
+ for clear caches, complete upgrade system, and upgrade kernel packages (https://github.com/ansible-collections/community.general/pull/5867).
+ - dconf - parse GVariants for equality comparison when the Python module ``gi.repository``
+ is available (https://github.com/ansible-collections/community.general/pull/6049).
+ - gitlab_runner - allow to register group runner (https://github.com/ansible-collections/community.general/pull/3935).
+ - jira - add worklog functionality (https://github.com/ansible-collections/community.general/issues/6209,
+ https://github.com/ansible-collections/community.general/pull/6210).
+ - ldap modules - add ``ca_path`` option (https://github.com/ansible-collections/community.general/pull/6185).
+ - make - add ``command`` return value to the module output (https://github.com/ansible-collections/community.general/pull/6160).
+ - nmap inventory plugin - add new option ``open`` for only returning open ports
+ (https://github.com/ansible-collections/community.general/pull/6200).
+ - nmap inventory plugin - add new option ``port`` for port specific scan (https://github.com/ansible-collections/community.general/pull/6165).
+ - nmcli - add ``default`` and ``default-or-eui64`` to the list of valid choices
+ for ``addr_gen_mode6`` parameter (https://github.com/ansible-collections/community.general/pull/5974).
+ - nmcli - add support for ``team.runner-fast-rate`` parameter for ``team`` connections
+ (https://github.com/ansible-collections/community.general/issues/6065).
+ - openbsd_pkg - set ``TERM`` to ``'dumb'`` in ``execute_command()`` to make
+ module less dependant on the ``TERM`` environment variable set on the Ansible
+ controller (https://github.com/ansible-collections/community.general/pull/6149).
+ - pipx - optional ``install_apps`` parameter added to install applications from
+ injected packages (https://github.com/ansible-collections/community.general/pull/6198).
+ - proxmox_kvm - add new ``archive`` parameter. This is needed to create a VM
+ from an archive (backup) (https://github.com/ansible-collections/community.general/pull/6159).
+ - redfish_info - adds commands to retrieve the HPE ThermalConfiguration and
+ FanPercentMinimum settings from iLO (https://github.com/ansible-collections/community.general/pull/6208).
+ - redhat_subscription - credentials (``username``, ``activationkey``, and so
+ on) are required now only if a system needs to be registered, or ``force_register``
+ is specified (https://github.com/ansible-collections/community.general/pull/5664).
+ - redhat_subscription - the registration is done using the D-Bus ``rhsm`` service
+ instead of spawning a ``subscription-manager register`` command, if possible;
+ this avoids passing plain-text credentials as arguments to ``subscription-manager
+ register``, which can be seen while that command runs (https://github.com/ansible-collections/community.general/pull/6122).
+ - ssh_config - add ``proxyjump`` option (https://github.com/ansible-collections/community.general/pull/5970).
+ - ssh_config - vendored StormSSH's config parser to avoid having to install
+ StormSSH to use the module (https://github.com/ansible-collections/community.general/pull/6117).
+ - znode module - optional ``use_tls`` parameter added for encrypted communication
+ (https://github.com/ansible-collections/community.general/issues/6154).
+ release_summary: Feature and bugfix release.
+ fragments:
+ - 3216-nmcli-bridge-idempotency-fix.yml
+ - 3935-add-gitlab-group-runner.yml
+ - 4387-nmcli-mtu-for-vlan-connection-fix.yml
+ - 5664-redhat_subscription-credentials-when-needed.yaml
+ - 5867-apt_rpm-add-clean-and-upgrade.yml
+ - 5970-add-proxyjump-option-to-ssh-config.yml
+ - 5974-nmcli_add_new_addr_gen_mode6_options.yml
+ - 6.5.0.yml
+ - 6049-dconf-strings.yml
+ - 6065-nmcli-add-runner-fast-rate-option.yml
+ - 6106-nmcli-ipv4-mayfail-idempotency-fix.yml
+ - 6111-influxdb_user-check-mode.yaml
+ - 6112-fix_key_error_in_gitlab_runner_creation_update.yml
+ - 6114-memset-add-url-error-handling.yml
+ - 6117-remove-stormssh-depend.yml
+ - 6118-jail-plugin-fix-default-inventory_hostname.yml
+ - 6119-opkg-update.yaml
+ - 6122-redhat_subscription-subscribe-via-dbus.yaml
+ - 6127-yarn-ignore-warnings.yml
+ - 6131-fix-interfaces_file-for-no-leading-spaces.yml
+ - 6138-fix-yarn-global.yml
+ - 6149-openbsd_pkg-term.yml
+ - 6154-znode-optional-tls.yml
+ - 6158-create-proxmox-vm-from-archive.yml
+ - 6160-add-command-make-output.yml
+ - 6165-nmap-port.yml
+ - 6180-replace-deprecated-badzipfile.yml
+ - 6198-pipx-inject-install-apps.yml
+ - 6200-adding-open-option-to-nmap.yml
+ - 6208-hpe-thermal-fan-percent.yaml
+ - 6210-add-worklog-functionality-to-jira.yml
+ - 6211-rhsm-require-root.yml
+ - 6227-xen-orchestra-check-response-id.yml
+ - xxxx-ldap-ca-cert-file.yml
+ modules:
+ - description: Manage KDE configuration files
+ name: kdeconfig
+ namespace: ''
+ plugins:
+ lookup:
+ - description: merge variables with a certain suffix
+ name: merge_variables
+ namespace: null
+ release_date: '2023-03-27'
+ 6.6.0:
+ changes:
+ bugfixes:
+ - archive - reduce RAM usage by generating CRC32 checksum over chunks (https://github.com/ansible-collections/community.general/pull/6274).
+ - flatpak - fixes idempotency detection issues. In some cases the module could
+ fail to properly detect already existing Flatpaks because of a parameter witch
+ only checks the installed apps (https://github.com/ansible-collections/community.general/pull/6289).
+ - icinga2_host - fix the data structure sent to Icinga to make use of host templates
+ and template vars (https://github.com/ansible-collections/community.general/pull/6286).
+ - idrac_redfish_command - allow user to specify ``resource_id`` for ``CreateBiosConfigJob``
+ to specify an exact manager (https://github.com/ansible-collections/community.general/issues/2090).
+ - ini_file - make ``section`` parameter not required so it is possible to pass
+ ``null`` as a value. This only was possible in the past due to a bug in ansible-core
+ that now has been fixed (https://github.com/ansible-collections/community.general/pull/6404).
+ - keycloak - improve error messages (https://github.com/ansible-collections/community.general/pull/6318).
+ - one_vm - fix syntax error when creating VMs with a more complex template (https://github.com/ansible-collections/community.general/issues/6225).
+ - pipx - fixed handling of ``install_deps=true`` with ``state=latest`` and ``state=upgrade``
+ (https://github.com/ansible-collections/community.general/pull/6303).
+ - redhat_subscription - do not use D-Bus for registering when ``environment``
+ is specified, so it possible to specify again the environment names for registering,
+ as the D-Bus APIs work only with IDs (https://github.com/ansible-collections/community.general/pull/6319).
+ - redhat_subscription - try to unregister only when already registered when
+ ``force_register`` is specified (https://github.com/ansible-collections/community.general/issues/6258,
+ https://github.com/ansible-collections/community.general/pull/6259).
+ - redhat_subscription - use the right D-Bus options for environments when registering
+ a CentOS Stream 8 system and using ``environment`` (https://github.com/ansible-collections/community.general/pull/6275).
+ - rhsm_release - make ``release`` parameter not required so it is possible to
+ pass ``null`` as a value. This only was possible in the past due to a bug
+ in ansible-core that now has been fixed (https://github.com/ansible-collections/community.general/pull/6401).
+ - rundeck module utils - fix errors caused by the API empty responses (https://github.com/ansible-collections/community.general/pull/6300)
+ - rundeck_acl_policy - fix ``TypeError - byte indices must be integers or slices,
+ not str`` error caused by empty API response. Update the module to use ``module_utils.rundeck``
+ functions (https://github.com/ansible-collections/community.general/pull/5887,
+ https://github.com/ansible-collections/community.general/pull/6300).
+ - rundeck_project - update the module to use ``module_utils.rundeck`` functions
+ (https://github.com/ansible-collections/community.general/issues/5742) (https://github.com/ansible-collections/community.general/pull/6300)
+ - snap_alias - module would only recognize snap names containing letter, numbers
+ or the underscore character, failing to identify valid snap names such as
+ ``lxd.lxc`` (https://github.com/ansible-collections/community.general/pull/6361).
+ minor_changes:
+ - cpanm - minor change, use feature from ``ModuleHelper`` (https://github.com/ansible-collections/community.general/pull/6385).
+ - 'dconf - be forgiving about boolean values: convert them to GVariant booleans
+ automatically (https://github.com/ansible-collections/community.general/pull/6206).'
+ - dconf - minor refactoring improving parameters and dependencies validation
+ (https://github.com/ansible-collections/community.general/pull/6336).
+ - deps module utils - add function ``failed()`` providing the ability to check
+ the dependency check result without triggering an exception (https://github.com/ansible-collections/community.general/pull/6383).
+ - dig lookup plugin - Support multiple domains to be queried as indicated in
+ docs (https://github.com/ansible-collections/community.general/pull/6334).
+ - gitlab_project - add new option ``topics`` for adding topics to GitLab projects
+ (https://github.com/ansible-collections/community.general/pull/6278).
+ - homebrew_cask - allows passing ``--greedy`` option to ``upgrade_all`` (https://github.com/ansible-collections/community.general/pull/6267).
+ - idrac_redfish_command - add ``job_id`` to ``CreateBiosConfigJob`` response
+ (https://github.com/ansible-collections/community.general/issues/5603).
+ - ipa_hostgroup - add ``append`` parameter for adding a new hosts to existing
+ hostgroups without changing existing hostgroup members (https://github.com/ansible-collections/community.general/pull/6203).
+ - keycloak_authentication - add flow type option to sub flows to allow the creation
+ of 'form-flow' sub flows like in Keycloak's built-in registration flow (https://github.com/ansible-collections/community.general/pull/6318).
+ - mksysb - improved the output of the module in case of errors (https://github.com/ansible-collections/community.general/issues/6263).
+ - nmap inventory plugin - added environment variables for configure ``address``
+ and ``exclude`` (https://github.com/ansible-collections/community.general/issues/6351).
+ - nmcli - add ``macvlan`` connection type (https://github.com/ansible-collections/community.general/pull/6312).
+ - pipx - add ``system_site_packages`` parameter to give application access to
+ system-wide packages (https://github.com/ansible-collections/community.general/pull/6308).
+ - pipx - ensure ``include_injected`` parameter works with ``state=upgrade``
+ and ``state=latest`` (https://github.com/ansible-collections/community.general/pull/6212).
+ - puppet - add new options ``skip_tags`` to exclude certain tagged resources
+ during a puppet agent or apply (https://github.com/ansible-collections/community.general/pull/6293).
+ - terraform - remove state file check condition and error block, because in
+ the native implementation of terraform will not cause errors due to the non-existent
+ file (https://github.com/ansible-collections/community.general/pull/6296).
+ - udm_dns_record - minor refactor to the code (https://github.com/ansible-collections/community.general/pull/6382).
+ release_summary: Bugfix and feature release.
+ fragments:
+ - 2090-idrac-redfish-resource-id-fix.yml
+ - 5603-redfish-idrac-job-id-in-response.yml
+ - 6.6.0.yml
+ - 6199-archive-generate-checksum-in-chunks.yml
+ - 6203-add-append-option-to-ipa-hostgroup.yml
+ - 6206-dconf-booleans.yml
+ - 6212-pipx-include-injected.yml
+ - 6259-redhat_subscription-fix-force.yaml
+ - 6267-homebrew-cask-upgrade-all-greedy.yml
+ - 6269-mksysb-output.yml
+ - 6275-redhat_subscription-fix-environments-centos.yaml
+ - 6277-add-topics-gitlab-project.yml
+ - 6286-icinga2_host-template-and-template-vars.yml
+ - 6289-bugfix-flatpak-check-if-already-installed.yml
+ - 6293-add-puppet-skip-tags-option.yaml
+ - 6294-fix-one_vm-instantiation.yml
+ - 6296-LanceNero-Terraform_statefile_check.yml
+ - 6300-rundeck-modules-fixes-and-improvements.yml
+ - 6303-pipx-fix-state-latest-and-add-system-site-packages.yml
+ - 6308-pipx-add-system-site-packages.yml
+ - 6312-nmcli-add-macvlan-connection-type.yml
+ - 6318-add-form-flow.yml
+ - 6319-redhat_subscription-fix-environment-parameter.yaml
+ - 6334-dig-support-multiple-domains.yml
+ - 6336-dconf-refactor.yml
+ - 6351-support-env-variables-to-nmap-dynamic-inventoiry.yaml
+ - 6361-snap-alias-regex-bugfix.yml
+ - 6382-udm-dns-record-refactor.yml
+ - 6383-deps-failed.yml
+ - 6385-cpan-mh-feat.yml
+ - 6401-rhsm_release-required.yml
+ - 6404-ini_file-section.yml
+ modules:
+ - description: Query btrfs filesystem info
+ name: btrfs_info
+ namespace: ''
+ - description: Manage btrfs subvolumes
+ name: btrfs_subvolume
+ namespace: ''
+ - description: Manages Out-Of-Band controllers using Redfish APIs
+ name: ilo_redfish_command
+ namespace: ''
+ - description: Allows administration of Keycloak client authorization scopes via
+ Keycloak API
+ name: keycloak_authz_authorization_scope
+ namespace: ''
+ - description: Set the type of aclientscope in realm or client via Keycloak API
+ name: keycloak_clientscope_type
+ namespace: ''
+ release_date: '2023-04-24'
+ 6.6.1:
+ changes:
+ bugfixes:
+ - deps module utils - do not fail when dependency cannot be found (https://github.com/ansible-collections/community.general/pull/6479).
+ - nmcli - fix bond option ``xmit_hash_policy`` (https://github.com/ansible-collections/community.general/pull/6527).
+ - passwordstore lookup plugin - make compatible with ansible-core 2.16 (https://github.com/ansible-collections/community.general/pull/6447).
+ - portage - fix ``changed_use`` and ``newuse`` not triggering rebuilds (https://github.com/ansible-collections/community.general/issues/6008,
+ https://github.com/ansible-collections/community.general/pull/6548).
+ - 'portage - update the logic for generating the emerge command arguments to
+ ensure that ``withbdeps: false`` results in a passing an ``n`` argument with
+ the ``--with-bdeps`` emerge flag (https://github.com/ansible-collections/community.general/issues/6451,
+ https://github.com/ansible-collections/community.general/pull/6456).'
+ - proxmox_tasks_info - remove ``api_user`` + ``api_password`` constraint from
+ ``required_together`` as it causes to require ``api_password`` even when API
+ token param is used (https://github.com/ansible-collections/community.general/issues/6201).
+ - puppet - handling ``noop`` parameter was not working at all, now it is has
+ been fixed (https://github.com/ansible-collections/community.general/issues/6452,
+ https://github.com/ansible-collections/community.general/issues/6458).
+ - terraform - fix broken ``warn()`` call (https://github.com/ansible-collections/community.general/pull/6497).
+ - xfs_quota - in case of a project quota, the call to ``xfs_quota`` did not
+ initialize/reset the project (https://github.com/ansible-collections/community.general/issues/5143).
+ - zypper - added handling of zypper exitcode 102. Changed state is set correctly
+ now and rc 102 is still preserved to be evaluated by the playbook (https://github.com/ansible-collections/community.general/pull/6534).
+ minor_changes:
+ - dconf - if ``gi.repository.GLib`` is missing, try to respawn in a Python interpreter
+ that has it (https://github.com/ansible-collections/community.general/pull/6491).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 5143-fix-xfs-quota-project-init.yml
+ - 6.6.1.yml
+ - 6456-fix-portage-withbdeps-false.yml
+ - 6458-puppet-noop.yml
+ - 6491-dconf-respawn.yml
+ - 6497-terraform-fix.yml
+ - 6527-nmcli-bond-fix-xmit_hash_policy.yml
+ - 6534-zypper-exitcode-102-handled.yaml
+ - 6548-portage-changed_use-newuse.yml
+ - 6554-proxmox-tasks-info-fix-required-password.yaml
+ - deps.yml
+ - passwordstore-lock.yml
+ release_date: '2023-05-22'
+ 6.6.2:
+ changes:
+ bugfixes:
+ - csv module utils - detects and remove unicode BOM markers from incoming CSV
+ content (https://github.com/ansible-collections/community.general/pull/6662).
+ - gitlab_group - the module passed parameters to the API call even when not
+ set. The module is now filtering out ``None`` values to remediate this (https://github.com/ansible-collections/community.general/pull/6712).
+ - ini_file - fix a bug where the inactive options were not used when possible
+ (https://github.com/ansible-collections/community.general/pull/6575).
+ - keycloak module utils - fix ``is_struct_included`` handling of lists of lists/dictionaries
+ (https://github.com/ansible-collections/community.general/pull/6688).
+ - keycloak module utils - the function ``get_user_by_username`` now return the
+ user representation or ``None`` as stated in the documentation (https://github.com/ansible-collections/community.general/pull/6758).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 6.6.2.yml
+ - 6568-fix-get-user-by-username-in-keycloak-module-utils.yml
+ - 6662-csv-bom.yml
+ - 6688-is-struct-included-bug-in-keycloak-py.yml
+ - 6712-gitlab_group-filtered-for-none-values.yml
+ - ini_file-use-inactive-options-when-possible.yml
+ release_date: '2023-06-19'
diff --git a/ansible_collections/community/general/changelogs/changelog.yaml.license b/ansible_collections/community/general/changelogs/changelog.yaml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/changelogs/changelog.yaml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/changelogs/config.yaml b/ansible_collections/community/general/changelogs/config.yaml
new file mode 100644
index 000000000..52e101e11
--- /dev/null
+++ b/ansible_collections/community/general/changelogs/config.yaml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+flatmap: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Community General
diff --git a/ansible_collections/community/general/changelogs/fragments/.keep b/ansible_collections/community/general/changelogs/fragments/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/changelogs/fragments/.keep
diff --git a/ansible_collections/community/general/commit-rights.md b/ansible_collections/community/general/commit-rights.md
new file mode 100644
index 000000000..196565eca
--- /dev/null
+++ b/ansible_collections/community/general/commit-rights.md
@@ -0,0 +1,80 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+Committers Guidelines for community.general
+===========================================
+
+This document is based on the [Ansible committer guidelines](https://github.com/ansible/ansible/blob/b57444af14062ec96e0af75fdfc2098c74fe2d9a/docs/docsite/rst/community/committer_guidelines.rst) ([latest version](https://docs.ansible.com/ansible/devel/community/committer_guidelines.html)).
+
+These are the guidelines for people with commit privileges on the Ansible Community General Collection GitHub repository. Please read the guidelines before you commit.
+
+These guidelines apply to everyone. At the same time, this is NOT a process document. So just use good judgment. You have been given commit access because we trust your judgment.
+
+That said, use the trust wisely.
+
+If you abuse the trust and break components and builds, and so on, the trust level falls and you may be asked not to commit or you may lose your commit privileges.
+
+Our workflow on GitHub
+----------------------
+
+As a committer, you may already know this, but our workflow forms a lot of our team policies. Please ensure you are aware of the following workflow steps:
+
+* Fork the repository upon which you want to do some work to your own personal repository
+* Work on the specific branch upon which you need to commit
+* Create a Pull Request back to the collection repository and await reviews
+* Adjust code as necessary based on the Comments provided
+* Ask someone from the other committers to do a final review and merge
+
+Sometimes, committers merge their own pull requests. This section is a set of guidelines. If you are changing a comma in a doc or making a very minor change, you can use your best judgement. This is another trust thing. The process is critical for any major change, but for little things or getting something done quickly, use your best judgement and make sure people on the team are aware of your work.
+
+Roles
+-----
+* Release managers: Merge pull requests to `stable-X` branches, create tags to do releases.
+* Committers: Fine to do PRs for most things, but we should have a timebox. Hanging PRs may merge on the judgement of these devs.
+* Module maintainers: Module maintainers own specific modules and have indirect commit access through the current module PR mechanisms. This is primary [ansibullbot](https://github.com/ansibullbot)'s `shipit` mechanism.
+
+General rules
+-------------
+Individuals with direct commit access to this collection repository are entrusted with powers that allow them to do a broad variety of things--probably more than we can write down. Rather than rules, treat these as general *guidelines*, individuals with this power are expected to use their best judgement.
+
+* Do NOTs:
+
+ - Do not commit directly.
+ - Do not merge your own PRs. Someone else should have a chance to review and approve the PR merge. You have a small amount of leeway here for very minor changes.
+ - Do not forget about non-standard / alternate environments. Consider the alternatives. Yes, people have bad/unusual/strange environments (like binaries from multiple init systems installed), but they are the ones who need us the most.
+ - Do not drag your community team members down. Discuss the technical merits of any pull requests you review. Avoid negativity and personal comments. For more guidance on being a good community member, read the [Ansible Community Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
+ - Do not forget about the maintenance burden. High-maintenance features may not be worth adding.
+ - Do not break playbooks. Always keep backwards compatibility in mind.
+ - Do not forget to keep it simple. Complexity breeds all kinds of problems.
+ - Do not merge to branches other than `main`, especially not to `stable-X`, if you do not have explicit permission to do so.
+ - Do not create tags. Tags are used in the release process, and should only be created by the people responsible for managing the stable branches.
+
+* Do:
+
+ - Squash, avoid merges whenever possible, use GitHub's squash commits or cherry pick if needed (bisect thanks you).
+ - Be active. Committers who have no activity on the project (through merges, triage, commits, and so on) will have their permissions suspended.
+ - Consider backwards compatibility (goes back to "do not break existing playbooks").
+ - Write tests. PRs with tests are looked at with more priority than PRs without tests that should have them included. While not all changes require tests, be sure to add them for bug fixes or functionality changes.
+ - Discuss with other committers, specially when you are unsure of something.
+ - Document! If your PR is a new feature or a change to behavior, make sure you've updated all associated documentation or have notified the right people to do so.
+ - Consider scope, sometimes a fix can be generalized.
+ - Keep it simple, then things are maintainable, debuggable and intelligible.
+
+Committers are expected to continue to follow the same community and contribution guidelines followed by the rest of the Ansible community.
+
+
+People
+------
+
+Individuals who have been asked to become a part of this group have generally been contributing in significant ways to the community.general collection for some time. Should they agree, they are requested to add their names and GitHub IDs to this file, in the section below, through a pull request. Doing so indicates that these individuals agree to act in the ways that their fellow committers trust that they will act.
+
+| Name | GitHub ID | IRC Nick | Other |
+| ------------------- | -------------------- | ------------------ | -------------------- |
+| Alexei Znamensky | russoz | russoz | |
+| Andrew Klychkov | andersson007 | andersson007_ | |
+| Andrew Pantuso | Ajpantuso | ajpantuso | |
+| Felix Fontein | felixfontein | felixfontein | |
+| John R Barker | gundalow | gundalow | |
diff --git a/ansible_collections/community/general/docs/docsite/extra-docs.yml b/ansible_collections/community/general/docs/docsite/extra-docs.yml
new file mode 100644
index 000000000..2171031ac
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/extra-docs.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+sections:
+ - title: Guides
+ toctree:
+ - filter_guide
+ - test_guide
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml
new file mode 100644
index 000000000..fd874e5c9
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-common.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: foo
+ extra: true
+ - name: bar
+ extra: false
+ - name: meh
+ extra: true
+
+list2:
+ - name: foo
+ path: /foo
+ - name: baz
+ path: /baz
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml
new file mode 100644
index 000000000..0cf6a9b8a
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 1. Merge two lists by common attribute 'name'
+ include_vars:
+ dir: example-001_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-001.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml
new file mode 100644
index 000000000..fd874e5c9
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: foo
+ extra: true
+ - name: bar
+ extra: false
+ - name: meh
+ extra: true
+
+list2:
+ - name: foo
+ path: /foo
+ - name: baz
+ path: /baz
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml
new file mode 100644
index 000000000..0604feccb
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-001_vars/list3.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ list1|
+ community.general.lists_mergeby(list2, 'name') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml
new file mode 100644
index 000000000..5e6e0315d
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 2. Merge two lists by common attribute 'name'
+ include_vars:
+ dir: example-002_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-002.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml
new file mode 100644
index 000000000..fd874e5c9
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: foo
+ extra: true
+ - name: bar
+ extra: false
+ - name: meh
+ extra: true
+
+list2:
+ - name: foo
+ path: /foo
+ - name: baz
+ path: /baz
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml
new file mode 100644
index 000000000..8ad752407
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-002_vars/list3.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml
new file mode 100644
index 000000000..2f93ab8a2
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 3. Merge recursive by 'name', replace lists (default)
+ include_vars:
+ dir: example-003_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-003.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml
new file mode 100644
index 000000000..d5374eece
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-003_vars/list3.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true) }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml
new file mode 100644
index 000000000..3ef067faf
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 4. Merge recursive by 'name', keep lists
+ include_vars:
+ dir: example-004_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-004.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml
new file mode 100644
index 000000000..a054ea1e7
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='keep') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml
new file mode 100644
index 000000000..57e7a779d
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 5. Merge recursive by 'name', append lists
+ include_vars:
+ dir: example-005_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-005.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml
new file mode 100644
index 000000000..3480bf658
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='append') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml
new file mode 100644
index 000000000..41fc88e49
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 6. Merge recursive by 'name', prepend lists
+ include_vars:
+ dir: example-006_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-006.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml
new file mode 100644
index 000000000..97513b559
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='prepend') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml
new file mode 100644
index 000000000..3de715844
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 7. Merge recursive by 'name', append lists 'remove present'
+ include_vars:
+ dir: example-007_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-007.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml
new file mode 100644
index 000000000..cb51653b4
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='append_rp') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml
new file mode 100644
index 000000000..e33828bf9
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 8. Merge recursive by 'name', prepend lists 'remove present'
+ include_vars:
+ dir: example-008_vars
+- debug:
+ var: list3
+ when: debug|d(false)|bool
+- template:
+ src: list3.out.j2
+ dest: example-008.out
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml
new file mode 100644
index 000000000..133c8f2ae
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml
new file mode 100644
index 000000000..af7001fc4
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='prepend_rp') }}"
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples.yml
new file mode 100644
index 000000000..83b985084
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples.yml
@@ -0,0 +1,54 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+examples:
+ - label: 'In the example below the lists are merged by the attribute ``name``:'
+ file: example-001_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-001.out
+ lang: 'yaml'
+ - label: 'It is possible to use a list of lists as an input of the filter:'
+ file: example-002_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces the same result as in the previous example:'
+ file: example-002.out
+ lang: 'yaml'
+ - label: 'Example ``list_merge=replace`` (default):'
+ file: example-003_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-003.out
+ lang: 'yaml'
+ - label: 'Example ``list_merge=keep``:'
+ file: example-004_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-004.out
+ lang: 'yaml'
+ - label: 'Example ``list_merge=append``:'
+ file: example-005_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-005.out
+ lang: 'yaml'
+ - label: 'Example ``list_merge=prepend``:'
+ file: example-006_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-006.out
+ lang: 'yaml'
+ - label: 'Example ``list_merge=append_rp``:'
+ file: example-007_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-007.out
+ lang: 'yaml'
+ - label: 'Example ``list_merge=prepend_rp``:'
+ file: example-008_vars/list3.yml
+ lang: 'yaml+jinja'
+ - label: 'This produces:'
+ file: example-008.out
+ lang: 'yaml'
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2
new file mode 100644
index 000000000..95a0fafdd
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/examples_all.rst.j2
@@ -0,0 +1,13 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+{% for i in examples %}
+{{ i.label }}
+
+.. code-block:: {{ i.lang }}
+
+ {{ lookup('file', i.file)|indent(2) }}
+
+{% endfor %}
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2
new file mode 100644
index 000000000..71d0d5da6
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2
@@ -0,0 +1,62 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Merging lists of dictionaries
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter.
+
+.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin <ansible_collections.community.general.yaml_callback>`.
+
+Let us use the lists below in the following examples:
+
+.. code-block:: yaml
+
+ {{ lookup('file', 'default-common.yml')|indent(2) }}
+
+{% for i in examples[0:2] %}
+{{ i.label }}
+
+.. code-block:: {{ i.lang }}
+
+ {{ lookup('file', i.file)|indent(2) }}
+
+{% endfor %}
+
+.. versionadded:: 2.0.0
+
+{% for i in examples[2:4] %}
+{{ i.label }}
+
+.. code-block:: {{ i.lang }}
+
+ {{ lookup('file', i.file)|indent(2) }}
+
+{% endfor %}
+
+The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0.
+
+**recursive**
+ Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
+
+**list_merge**
+ Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists.
+
+The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries <combine_filter>` to learn details about these options.
+
+Let us use the lists below in the following examples
+
+.. code-block:: yaml
+
+ {{ lookup('file', 'default-recursive-true.yml')|indent(2) }}
+
+{% for i in examples[4:16] %}
+{{ i.label }}
+
+.. code-block:: {{ i.lang }}
+
+ {{ lookup('file', i.file)|indent(2) }}
+
+{% endfor %}
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2 b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2
new file mode 100644
index 000000000..b51f6b868
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/list3.out.j2
@@ -0,0 +1,7 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+list3:
+{{ list3|to_nice_yaml(indent=0) }}
diff --git a/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/playbook.yml b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/playbook.yml
new file mode 100644
index 000000000..793d23348
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/helper/lists_mergeby/playbook.yml
@@ -0,0 +1,62 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# 1) Run all examples and create example-XXX.out
+# shell> ansible-playbook playbook.yml -e examples=true
+#
+# 2) Optionally, for testing, create examples_all.rst
+# shell> ansible-playbook playbook.yml -e examples_all=true
+#
+# 3) Create docs REST files
+# shell> ansible-playbook playbook.yml -e merging_lists_of_dictionaries=true
+#
+# Notes:
+# * Use YAML callback, e.g. set ANSIBLE_STDOUT_CALLBACK=community.general.yaml
+# * Use sphinx-view to render and review the REST files
+# shell> sphinx-view <path_to_helper>/examples_all.rst
+# * Proofread and copy completed docs *.rst files into the directory rst.
+# * Then delete the *.rst and *.out files from this directory. Do not
+# add *.rst and *.out in this directory to the version control.
+#
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# community.general/docs/docsite/helper/lists_mergeby/playbook.yml
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+
+ - block:
+ - import_tasks: example-001.yml
+ tags: t001
+ - import_tasks: example-002.yml
+ tags: t002
+ - import_tasks: example-003.yml
+ tags: t003
+ - import_tasks: example-004.yml
+ tags: t004
+ - import_tasks: example-005.yml
+ tags: t005
+ - import_tasks: example-006.yml
+ tags: t006
+ - import_tasks: example-007.yml
+ tags: t007
+ - import_tasks: example-008.yml
+ tags: t008
+ when: examples|d(false)|bool
+
+ - block:
+ - include_vars: examples.yml
+ - template:
+ src: examples_all.rst.j2
+ dest: examples_all.rst
+ when: examples_all|d(false)|bool
+
+ - block:
+ - include_vars: examples.yml
+ - template:
+ src: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2
+ dest: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
+ when: merging_lists_of_dictionaries|d(false)|bool
diff --git a/ansible_collections/community/general/docs/docsite/links.yml b/ansible_collections/community/general/docs/docsite/links.yml
new file mode 100644
index 000000000..bd954c409
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/links.yml
@@ -0,0 +1,27 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+edit_on_github:
+ repository: ansible-collections/community.general
+ branch: main
+ path_prefix: ''
+
+extra_links:
+ - description: Submit a bug report
+ url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml
+ - description: Request a feature
+ url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=feature_request.yml
+
+communication:
+ matrix_rooms:
+ - topic: General usage and support questions
+ room: '#users:ansible.im'
+ irc_channels:
+ - topic: General usage and support questions
+ network: Libera
+ channel: '#ansible'
+ mailing_lists:
+ - topic: Ansible Project List
+ url: https://groups.google.com/g/ansible-project
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst
new file mode 100644
index 000000000..1c6468dde
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide.rst
@@ -0,0 +1,23 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.filter_guide:
+
+community.general Filter Guide
+==============================
+
+The :ref:`community.general collection <plugins_in_community.general>` offers several useful filter plugins.
+
+.. toctree::
+ :maxdepth: 2
+
+ filter_guide_paths
+ filter_guide_abstract_informations
+ filter_guide_working_with_times
+ filter_guide_working_with_versions
+ filter_guide_creating_identifiers
+ filter_guide_conversions
+ filter_guide_selecting_json_data
+ filter_guide_working_with_unicode
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst
new file mode 100644
index 000000000..8f997f163
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations.rst
@@ -0,0 +1,15 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Abstract transformations
+------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ filter_guide_abstract_informations_dictionaries
+ filter_guide_abstract_informations_grouping
+ filter_guide_abstract_informations_merging_lists_of_dictionaries
+ filter_guide_abstract_informations_counting_elements_in_sequence
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst
new file mode 100644
index 000000000..dcadd5a79
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst
@@ -0,0 +1,82 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Counting elements in a sequence
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values.
+
+.. code-block:: yaml+jinja
+
+ - name: Count character occurrences in a string
+ debug:
+ msg: "{{ 'abccbaabca' | community.general.counter }}"
+
+ - name: Count items in a list
+ debug:
+ msg: "{{ ['car', 'car', 'bike', 'plane', 'bike'] | community.general.counter }}"
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Count character occurrences in a string] ********************************************
+ ok: [localhost] => {
+ "msg": {
+ "a": 4,
+ "b": 3,
+ "c": 3
+ }
+ }
+
+ TASK [Count items in a list] **************************************************************
+ ok: [localhost] => {
+ "msg": {
+ "bike": 2,
+ "car": 2,
+ "plane": 1
+ }
+ }
+
+This plugin is useful for selecting resources based on current allocation:
+
+.. code-block:: yaml+jinja
+
+ - name: Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks
+ debug:
+ msg: >-
+ {{
+ ( disks | dict2items | map(attribute='value.adapter') | list
+ | community.general.counter | dict2items
+ | rejectattr('value', '>=', 4) | sort(attribute='value') | first
+ ).key
+ }}
+ vars:
+ disks:
+ sda:
+ adapter: scsi_1
+ sdb:
+ adapter: scsi_1
+ sdc:
+ adapter: scsi_1
+ sdd:
+ adapter: scsi_1
+ sde:
+ adapter: scsi_2
+ sdf:
+ adapter: scsi_3
+ sdg:
+ adapter: scsi_3
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Get ID of SCSI controller(s) with less than 4 disks attached and choose the one with the least disks]
+ ok: [localhost] => {
+ "msg": "scsi_2"
+ }
+
+.. versionadded:: 4.3.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
new file mode 100644
index 000000000..840bd1542
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst
@@ -0,0 +1,124 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Dictionaries
+^^^^^^^^^^^^
+
+You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``:
+
+.. code-block:: yaml+jinja
+
+ - name: Create a single-entry dictionary
+ debug:
+ msg: "{{ myvar | community.general.dict_kv('thatsmyvar') }}"
+ vars:
+ myvar: myvalue
+
+ - name: Create a list of dictionaries where the 'server' field is taken from a list
+ debug:
+ msg: >-
+ {{ myservers | map('community.general.dict_kv', 'server')
+ | map('combine', common_config) }}
+ vars:
+ common_config:
+ type: host
+ database: all
+ myservers:
+ - server1
+ - server2
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Create a single-entry dictionary] **************************************************
+ ok: [localhost] => {
+ "msg": {
+ "thatsmyvar": "myvalue"
+ }
+ }
+
+ TASK [Create a list of dictionaries where the 'server' field is taken from a list] *******
+ ok: [localhost] => {
+ "msg": [
+ {
+ "database": "all",
+ "server": "server1",
+ "type": "host"
+ },
+ {
+ "database": "all",
+ "server": "server2",
+ "type": "host"
+ }
+ ]
+ }
+
+.. versionadded:: 2.0.0
+
+If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used:
+
+.. code-block:: yaml+jinja
+
+ - name: Create a dictionary with the dict function
+ debug:
+ msg: "{{ dict([[1, 2], ['a', 'b']]) }}"
+
+ - name: Create a dictionary with the community.general.dict filter
+ debug:
+ msg: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
+
+ - name: Create a list of dictionaries with map and the community.general.dict filter
+ debug:
+ msg: >-
+ {{ values | map('zip', ['k1', 'k2', 'k3'])
+ | map('map', 'reverse')
+ | map('community.general.dict') }}
+ vars:
+ values:
+ - - foo
+ - 23
+ - a
+ - - bar
+ - 42
+ - b
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Create a dictionary with the dict function] ****************************************
+ ok: [localhost] => {
+ "msg": {
+ "1": 2,
+ "a": "b"
+ }
+ }
+
+ TASK [Create a dictionary with the community.general.dict filter] ************************
+ ok: [localhost] => {
+ "msg": {
+ "1": 2,
+ "a": "b"
+ }
+ }
+
+ TASK [Create a list of dictionaries with map and the community.general.dict filter] ******
+ ok: [localhost] => {
+ "msg": [
+ {
+ "k1": "foo",
+ "k2": 23,
+ "k3": "a"
+ },
+ {
+ "k1": "bar",
+ "k2": 42,
+ "k3": "b"
+ }
+ ]
+ }
+
+.. versionadded:: 3.0.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst
new file mode 100644
index 000000000..2cea7f9ba
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst
@@ -0,0 +1,103 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Grouping
+^^^^^^^^
+
+If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary.
+
+One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information:
+
+.. code-block:: yaml+jinja
+
+ - name: Output mount facts grouped by device name
+ debug:
+ var: ansible_facts.mounts | community.general.groupby_as_dict('device')
+
+ - name: Output mount facts grouped by mount point
+ debug:
+ var: ansible_facts.mounts | community.general.groupby_as_dict('mount')
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Output mount facts grouped by device name] ******************************************
+ ok: [localhost] => {
+ "ansible_facts.mounts | community.general.groupby_as_dict('device')": {
+ "/dev/sda1": {
+ "block_available": 2000,
+ "block_size": 4096,
+ "block_total": 2345,
+ "block_used": 345,
+ "device": "/dev/sda1",
+ "fstype": "ext4",
+ "inode_available": 500,
+ "inode_total": 512,
+ "inode_used": 12,
+ "mount": "/boot",
+ "options": "rw,relatime,data=ordered",
+ "size_available": 56821,
+ "size_total": 543210,
+ "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
+ },
+ "/dev/sda2": {
+ "block_available": 1234,
+ "block_size": 4096,
+ "block_total": 12345,
+ "block_used": 11111,
+ "device": "/dev/sda2",
+ "fstype": "ext4",
+ "inode_available": 1111,
+ "inode_total": 1234,
+ "inode_used": 123,
+ "mount": "/",
+ "options": "rw,relatime",
+ "size_available": 42143,
+ "size_total": 543210,
+ "uuid": "abcdef01-2345-6789-0abc-def012345678"
+ }
+ }
+ }
+
+ TASK [Output mount facts grouped by mount point] ******************************************
+ ok: [localhost] => {
+ "ansible_facts.mounts | community.general.groupby_as_dict('mount')": {
+ "/": {
+ "block_available": 1234,
+ "block_size": 4096,
+ "block_total": 12345,
+ "block_used": 11111,
+ "device": "/dev/sda2",
+ "fstype": "ext4",
+ "inode_available": 1111,
+ "inode_total": 1234,
+ "inode_used": 123,
+ "mount": "/",
+ "options": "rw,relatime",
+ "size_available": 42143,
+ "size_total": 543210,
+ "uuid": "bdf50b7d-4859-40af-8665-c637ee7a7808"
+ },
+ "/boot": {
+ "block_available": 2000,
+ "block_size": 4096,
+ "block_total": 2345,
+ "block_used": 345,
+ "device": "/dev/sda1",
+ "fstype": "ext4",
+ "inode_available": 500,
+ "inode_total": 512,
+ "inode_used": 12,
+ "mount": "/boot",
+ "options": "rw,relatime,data=ordered",
+ "size_available": 56821,
+ "size_total": 543210,
+ "uuid": "ab31cade-d9c1-484d-8482-8a4cbee5241a"
+ }
+ }
+ }
+
+.. versionadded: 3.0.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
new file mode 100644
index 000000000..9b56e98d7
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst
@@ -0,0 +1,297 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Merging lists of dictionaries
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter.
+
+.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin <ansible_collections.community.general.yaml_callback>`.
+
+Let us use the lists below in the following examples:
+
+.. code-block:: yaml
+
+ list1:
+ - name: foo
+ extra: true
+ - name: bar
+ extra: false
+ - name: meh
+ extra: true
+
+ list2:
+ - name: foo
+ path: /foo
+ - name: baz
+ path: /baz
+
+In the example below the lists are merged by the attribute ``name``:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ list1|
+ community.general.lists_mergeby(list2, 'name') }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - extra: false
+ name: bar
+ - name: baz
+ path: /baz
+ - extra: true
+ name: foo
+ path: /foo
+ - extra: true
+ name: meh
+
+
+.. versionadded:: 2.0.0
+
+It is possible to use a list of lists as an input of the filter:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name') }}"
+
+This produces the same result as in the previous example:
+
+.. code-block:: yaml
+
+ list3:
+ - extra: false
+ name: bar
+ - name: baz
+ path: /baz
+ - extra: true
+ name: foo
+ path: /foo
+ - extra: true
+ name: meh
+
+
+The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0.
+
+**recursive**
+ Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``.
+
+**list_merge**
+ Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists.
+
+The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries <combine_filter>` to learn details about these options.
+
+Let us use the lists below in the following examples
+
+.. code-block:: yaml
+
+ list1:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+ list2:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
+
+Example ``list_merge=replace`` (default):
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true) }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - name: myname01
+ param01:
+ list:
+ - patch_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 3
+ - 4
+ - 4
+ - key: value
+
+Example ``list_merge=keep``:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='keep') }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - name: myname01
+ param01:
+ list:
+ - default_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 1
+ - 1
+ - 2
+ - 3
+
+Example ``list_merge=append``:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='append') }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - name: myname01
+ param01:
+ list:
+ - default_value
+ - patch_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 3
+ - 4
+ - 4
+ - key: value
+
+Example ``list_merge=prepend``:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='prepend') }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - name: myname01
+ param01:
+ list:
+ - patch_value
+ - default_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 3
+ - 4
+ - 4
+ - key: value
+ - 1
+ - 1
+ - 2
+ - 3
+
+Example ``list_merge=append_rp``:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='append_rp') }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - name: myname01
+ param01:
+ list:
+ - default_value
+ - patch_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 4
+ - 4
+ - key: value
+
+Example ``list_merge=prepend_rp``:
+
+.. code-block:: yaml+jinja
+
+ list3: "{{ [list1, list2]|
+ community.general.lists_mergeby('name',
+ recursive=true,
+ list_merge='prepend_rp') }}"
+
+This produces:
+
+.. code-block:: yaml
+
+ list3:
+ - name: myname01
+ param01:
+ list:
+ - patch_value
+ - default_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 3
+ - 4
+ - 4
+ - key: value
+ - 1
+ - 1
+ - 2
+
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_conversions.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_conversions.rst
new file mode 100644
index 000000000..78970c17b
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_conversions.rst
@@ -0,0 +1,113 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Conversions
+-----------
+
+Parsing CSV files
+^^^^^^^^^^^^^^^^^
+
+Ansible offers the :ref:`community.general.read_csv module <ansible_collections.community.general.read_csv_module>` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists.
+
+.. code-block:: yaml+jinja
+
+ - name: "Parse CSV from string"
+ debug:
+ msg: "{{ csv_string | community.general.from_csv }}"
+ vars:
+ csv_string: |
+ foo,bar,baz
+ 1,2,3
+ you,this,then
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Parse CSV from string] **************************************************************
+ ok: [localhost] => {
+ "msg": [
+ {
+ "bar": "2",
+ "baz": "3",
+ "foo": "1"
+ },
+ {
+ "bar": "this",
+ "baz": "then",
+ "foo": "you"
+ }
+ ]
+ }
+
+The ``from_csv`` filter has several keyword arguments to control its behavior:
+
+:dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored.
+:fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names.
+:delimiter: Sets the delimiter to use. Default depends on the dialect used.
+:skipinitialspace: Set to ``true`` to ignore space directly after the delimiter. Default depends on the dialect used (usually ``false``).
+:strict: Set to ``true`` to error out on invalid CSV input.
+
+.. versionadded: 3.0.0
+
+Converting to JSON
+^^^^^^^^^^^^^^^^^^
+
+`JC <https://pypi.org/project/jc/>`_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library <https://pypi.org/project/jc/>`_ installed on the controller.
+
+.. code-block:: yaml+jinja
+
+ - name: Run 'ls' to list files in /
+ command: ls /
+ register: result
+
+ - name: Parse the ls output
+ debug:
+ msg: "{{ result.stdout | community.general.jc('ls') }}"
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Run 'ls' to list files in /] ********************************************************
+ changed: [localhost]
+
+ TASK [Parse the ls output] ****************************************************************
+ ok: [localhost] => {
+ "msg": [
+ {
+ "filename": "bin"
+ },
+ {
+ "filename": "boot"
+ },
+ {
+ "filename": "dev"
+ },
+ {
+ "filename": "etc"
+ },
+ {
+ "filename": "home"
+ },
+ {
+ "filename": "lib"
+ },
+ {
+ "filename": "proc"
+ },
+ {
+ "filename": "root"
+ },
+ {
+ "filename": "run"
+ },
+ {
+ "filename": "tmp"
+ }
+ ]
+ }
+
+.. versionadded: 2.0.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_creating_identifiers.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_creating_identifiers.rst
new file mode 100644
index 000000000..af0a8b7ba
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_creating_identifiers.rst
@@ -0,0 +1,85 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Creating identifiers
+--------------------
+
+The following filters allow to create identifiers.
+
+Hashids
+^^^^^^^
+
+`Hashids <https://hashids.org/>`_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library <https://pypi.org/project/hashids/>`_ installed on the controller.
+
+.. code-block:: yaml+jinja
+
+ - name: "Create hashid"
+ debug:
+ msg: "{{ [1234, 5, 6] | community.general.hashids_encode }}"
+
+ - name: "Decode hashid"
+ debug:
+ msg: "{{ 'jm2Cytn' | community.general.hashids_decode }}"
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Create hashid] **********************************************************************
+ ok: [localhost] => {
+ "msg": "jm2Cytn"
+ }
+
+ TASK [Decode hashid] **********************************************************************
+ ok: [localhost] => {
+ "msg": [
+ 1234,
+ 5,
+ 6
+ ]
+ }
+
+The hashids filters accept keyword arguments to allow fine-tuning the hashids generated:
+
+:salt: String to use as salt when hashing.
+:alphabet: String of 16 or more unique characters to produce a hash.
+:min_length: Minimum length of hash produced.
+
+.. versionadded: 3.0.0
+
+Random MACs
+^^^^^^^^^^^
+
+You can use the ``random_mac`` filter to complete a partial `MAC address <https://en.wikipedia.org/wiki/MAC_address>`_ to a random 6-byte MAC address.
+
+.. code-block:: yaml+jinja
+
+ - name: "Create a random MAC starting with ff:"
+ debug:
+ msg: "{{ 'FF' | community.general.random_mac }}"
+
+ - name: "Create a random MAC starting with 00:11:22:"
+ debug:
+ msg: "{{ '00:11:22' | community.general.random_mac }}"
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Create a random MAC starting with ff:] **********************************************
+ ok: [localhost] => {
+ "msg": "ff:69:d3:78:7f:b4"
+ }
+
+ TASK [Create a random MAC starting with 00:11:22:] ****************************************
+ ok: [localhost] => {
+ "msg": "00:11:22:71:5d:3b"
+ }
+
+You can also initialize the random number generator from a seed to create random-but-idempotent MAC addresses:
+
+.. code-block:: yaml+jinja
+
+ "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_paths.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_paths.rst
new file mode 100644
index 000000000..dac893145
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_paths.rst
@@ -0,0 +1,19 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Paths
+-----
+
+The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9.
+
+.. code-block:: yaml+jinja
+
+ # ansible-base 2.10 or newer:
+ path: {{ ('/etc', path, 'subdir', file) | path_join }}
+
+ # Also works with Ansible 2.9:
+ path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }}
+
+.. versionadded:: 3.0.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_selecting_json_data.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_selecting_json_data.rst
new file mode 100644
index 000000000..d8de07b92
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_selecting_json_data.rst
@@ -0,0 +1,149 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.json_query_filter:
+
+Selecting JSON data: JSON queries
+---------------------------------
+
+To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure.
+
+.. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples <http://jmespath.org/examples.html>`_.
+
+Consider this data structure:
+
+.. code-block:: yaml+jinja
+
+ {
+ "domain_definition": {
+ "domain": {
+ "cluster": [
+ {
+ "name": "cluster1"
+ },
+ {
+ "name": "cluster2"
+ }
+ ],
+ "server": [
+ {
+ "name": "server11",
+ "cluster": "cluster1",
+ "port": "8080"
+ },
+ {
+ "name": "server12",
+ "cluster": "cluster1",
+ "port": "8090"
+ },
+ {
+ "name": "server21",
+ "cluster": "cluster2",
+ "port": "9080"
+ },
+ {
+ "name": "server22",
+ "cluster": "cluster2",
+ "port": "9090"
+ }
+ ],
+ "library": [
+ {
+ "name": "lib1",
+ "target": "cluster1"
+ },
+ {
+ "name": "lib2",
+ "target": "cluster2"
+ }
+ ]
+ }
+ }
+ }
+
+To extract all clusters from this structure, you can use the following query:
+
+.. code-block:: yaml+jinja
+
+ - name: Display all cluster names
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
+
+To extract all server names:
+
+.. code-block:: yaml+jinja
+
+ - name: Display all server names
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
+
+To extract ports from cluster1:
+
+.. code-block:: yaml+jinja
+
+ - name: Display all ports from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
+ vars:
+ server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
+
+.. note:: You can use a variable to make the query more readable.
+
+To print out the ports from cluster1 in a comma separated string:
+
+.. code-block:: yaml+jinja
+
+ - name: Display all ports from cluster1 as a string
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
+
+.. note:: In the example above, quoting literals using backticks avoids escaping quotes and maintains readability.
+
+You can use YAML `single quote escaping <https://yaml.org/spec/current.html#id2534365>`_:
+
+.. code-block:: yaml+jinja
+
+ - name: Display all ports from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
+
+.. note:: Escaping single quotes within single quotes in YAML is done by doubling the single quote.
+
+To get a hash map with all ports and names of a cluster:
+
+.. code-block:: yaml+jinja
+
+ - name: Display all server ports and names from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
+ vars:
+ server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
+
+To extract ports from all clusters with name starting with 'server1':
+
+.. code-block:: yaml+jinja
+
+ - name: Display all ports from cluster1
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
+ vars:
+ server_name_query: "domain.server[?starts_with(name,'server1')].port"
+
+To extract ports from all clusters with name containing 'server1':
+
+.. code-block:: yaml+jinja
+
+ - name: Display all ports from cluster1
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
+ vars:
+ server_name_query: "domain.server[?contains(name,'server1')].port"
+
+.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure.
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_times.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_times.rst
new file mode 100644
index 000000000..dc68f2a2e
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_times.rst
@@ -0,0 +1,89 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Working with times
+------------------
+
+The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds.
+
+There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used:
+
+.. list-table:: Units
+ :widths: 25 25 25 25
+ :header-rows: 1
+
+ * - Unit name
+ - Unit value in seconds
+ - Unit strings for filter
+ - Shorthand filter
+ * - Millisecond
+ - 1/1000 second
+ - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds``
+ - ``to_milliseconds``
+ * - Second
+ - 1 second
+ - ``s``, ``sec``, ``secs``, ``second``, ``seconds``
+ - ``to_seconds``
+ * - Minute
+ - 60 seconds
+ - ``m``, ``min``, ``mins``, ``minute``, ``minutes``
+ - ``to_minutes``
+ * - Hour
+ - 60*60 seconds
+ - ``h``, ``hour``, ``hours``
+ - ``to_hours``
+ * - Day
+ - 24*60*60 seconds
+ - ``d``, ``day``, ``days``
+ - ``to_days``
+ * - Week
+ - 7*24*60*60 seconds
+ - ``w``, ``week``, ``weeks``
+ - ``to_weeks``
+ * - Month
+ - 30*24*60*60 seconds
+ - ``mo``, ``month``, ``months``
+ - ``to_months``
+ * - Year
+ - 365*24*60*60 seconds
+ - ``y``, ``year``, ``years``
+ - ``to_years``
+
+Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters.
+
+.. code-block:: yaml+jinja
+
+ - name: Convert string to seconds
+ debug:
+ msg: "{{ '30h 20m 10s 123ms' | community.general.to_time_unit('seconds') }}"
+
+ - name: Convert string to hours
+ debug:
+ msg: "{{ '30h 20m 10s 123ms' | community.general.to_hours }}"
+
+ - name: Convert string to years (using 365.25 days == 1 year)
+ debug:
+ msg: "{{ '400d 15h' | community.general.to_years(year=365.25) }}"
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Convert string to seconds] **********************************************************
+ ok: [localhost] => {
+ "msg": "109210.123"
+ }
+
+ TASK [Convert string to hours] ************************************************************
+ ok: [localhost] => {
+ "msg": "30.336145277778"
+ }
+
+ TASK [Convert string to years (using 365.25 days == 1 year)] ******************************
+ ok: [localhost] => {
+ "msg": "1.096851471595"
+ }
+
+.. versionadded: 0.2.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_unicode.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_unicode.rst
new file mode 100644
index 000000000..2e5a67f8f
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_unicode.rst
@@ -0,0 +1,35 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Working with Unicode
+---------------------
+
+`Unicode <https://unicode.org/main.html>`_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms <https://unicode.org/reports/tr15/>`_ which avoid these distinctions by choosing a unique character sequence for a given visual representation.
+
+You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks.
+
+.. code-block:: yaml+jinja
+
+ - name: Compare Unicode representations
+ debug:
+ msg: "{{ with_combining_character | community.general.unicode_normalize == without_combining_character }}"
+ vars:
+ with_combining_character: "{{ 'Mayagu\u0308ez' }}"
+ without_combining_character: Mayagüez
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Compare Unicode representations] ********************************************************
+ ok: [localhost] => {
+ "msg": true
+ }
+
+The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string.
+
+:form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference <https://unicode.org/reports/tr15/>`_ for more information.
+
+.. versionadded:: 3.7.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_versions.rst b/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_versions.rst
new file mode 100644
index 000000000..2488427b7
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/filter_guide_working_with_versions.rst
@@ -0,0 +1,39 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+Working with versions
+---------------------
+
+If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter:
+
+.. code-block:: yaml+jinja
+
+ - name: Sort list by version number
+ debug:
+ var: ansible_versions | community.general.version_sort
+ vars:
+ ansible_versions:
+ - '2.8.0'
+ - '2.11.0'
+ - '2.7.0'
+ - '2.10.0'
+ - '2.9.0'
+
+This produces:
+
+.. code-block:: ansible-output
+
+ TASK [Sort list by version number] ********************************************************
+ ok: [localhost] => {
+ "ansible_versions | community.general.version_sort": [
+ "2.7.0",
+ "2.8.0",
+ "2.9.0",
+ "2.10.0",
+ "2.11.0"
+ ]
+ }
+
+.. versionadded: 2.2.0
diff --git a/ansible_collections/community/general/docs/docsite/rst/test_guide.rst b/ansible_collections/community/general/docs/docsite/rst/test_guide.rst
new file mode 100644
index 000000000..b0b7885f9
--- /dev/null
+++ b/ansible_collections/community/general/docs/docsite/rst/test_guide.rst
@@ -0,0 +1,33 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.general.docsite.test_guide:
+
+community.general Test (Plugin) Guide
+=====================================
+
+The :ref:`community.general collection <plugins_in_community.general>` offers currently one test plugin.
+
+.. contents:: Topics
+
+Feature Tests
+-------------
+
+The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
+
+.. code-block:: yaml+jinja
+
+ - name: Make sure that community.aws.route53 is available
+ assert:
+ that:
+ - >
+ 'community.aws.route53' is community.general.a_module
+
+ - name: Make sure that community.general.does_not_exist is not a module or action plugin
+ assert:
+ that:
+ - "'community.general.does_not_exist' is not community.general.a_module"
+
+.. versionadded:: 4.0.0
diff --git a/ansible_collections/community/general/meta/runtime.yml b/ansible_collections/community/general/meta/runtime.yml
new file mode 100644
index 000000000..98a46f62d
--- /dev/null
+++ b/ansible_collections/community/general/meta/runtime.yml
@@ -0,0 +1,4582 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+requires_ansible: '>=2.11.0'
+plugin_routing:
+ connection:
+ docker:
+ redirect: community.docker.docker
+ oc:
+ redirect: community.okd.oc
+ lookup:
+ gcp_storage_file:
+ redirect: community.google.gcp_storage_file
+ hashi_vault:
+ redirect: community.hashi_vault.hashi_vault
+ nios:
+ redirect: infoblox.nios_modules.nios_lookup
+ nios_next_ip:
+ redirect: infoblox.nios_modules.nios_next_ip
+ nios_next_network:
+ redirect: infoblox.nios_modules.nios_next_network
+ modules:
+ database.aerospike.aerospike_migrations:
+ redirect: community.general.aerospike_migrations
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.aerospike_migrations
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.airbrake_deployment:
+ redirect: community.general.airbrake_deployment
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.airbrake_deployment
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.aix_devices:
+ redirect: community.general.aix_devices
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.aix_devices
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.aix_filesystem:
+ redirect: community.general.aix_filesystem
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.aix_filesystem
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.aix_inittab:
+ redirect: community.general.aix_inittab
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.aix_inittab
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.aix_lvg:
+ redirect: community.general.aix_lvg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.aix_lvg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.aix_lvol:
+ redirect: community.general.aix_lvol
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.aix_lvol
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.alerta_customer:
+ redirect: community.general.alerta_customer
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.alerta_customer
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.alicloud.ali_instance:
+ redirect: community.general.ali_instance
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ali_instance
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ ali_instance_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.ali_instance_info instead.
+ cloud.alicloud.ali_instance_info:
+ redirect: community.general.ali_instance_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ali_instance_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.alternatives:
+ redirect: community.general.alternatives
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.alternatives
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.ansible_galaxy_install:
+ redirect: community.general.ansible_galaxy_install
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ansible_galaxy_install
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.apache2_mod_proxy:
+ redirect: community.general.apache2_mod_proxy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.apache2_mod_proxy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.apache2_module:
+ redirect: community.general.apache2_module
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.apache2_module
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.apk:
+ redirect: community.general.apk
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.apk
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.apt_repo:
+ redirect: community.general.apt_repo
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.apt_repo
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.apt_rpm:
+ redirect: community.general.apt_rpm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.apt_rpm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.archive:
+ redirect: community.general.archive
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.archive
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.atomic.atomic_container:
+ redirect: community.general.atomic_container
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.atomic_container
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.atomic.atomic_host:
+ redirect: community.general.atomic_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.atomic_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.atomic.atomic_image:
+ redirect: community.general.atomic_image
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.atomic_image
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.awall:
+ redirect: community.general.awall
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.awall
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.beadm:
+ redirect: community.general.beadm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.beadm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.bearychat:
+ redirect: community.general.bearychat
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bearychat
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.bigpanda:
+ redirect: community.general.bigpanda
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bigpanda
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.bitbucket.bitbucket_access_key:
+ redirect: community.general.bitbucket_access_key
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bitbucket_access_key
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.bitbucket.bitbucket_pipeline_key_pair:
+ redirect: community.general.bitbucket_pipeline_key_pair
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bitbucket_pipeline_key_pair
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.bitbucket.bitbucket_pipeline_known_host:
+ redirect: community.general.bitbucket_pipeline_known_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bitbucket_pipeline_known_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.bitbucket.bitbucket_pipeline_variable:
+ redirect: community.general.bitbucket_pipeline_variable
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bitbucket_pipeline_variable
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.bower:
+ redirect: community.general.bower
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bower
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.bundler:
+ redirect: community.general.bundler
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bundler
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.bzr:
+ redirect: community.general.bzr
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.bzr
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.campfire:
+ redirect: community.general.campfire
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.campfire
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.capabilities:
+ redirect: community.general.capabilities
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.capabilities
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.cargo:
+ redirect: community.general.cargo
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cargo
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.catapult:
+ redirect: community.general.catapult
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.catapult
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.circonus_annotation:
+ redirect: community.general.circonus_annotation
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.circonus_annotation
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cisco_spark:
+ redirect: community.general.cisco_webex
+ notification.cisco_spark:
+ redirect: community.general.cisco_webex
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cisco_webex
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.cisco_webex:
+ redirect: community.general.cisco_webex
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cisco_webex
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_aa_policy:
+ redirect: community.general.clc_aa_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_aa_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_alert_policy:
+ redirect: community.general.clc_alert_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_alert_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_blueprint_package:
+ redirect: community.general.clc_blueprint_package
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_blueprint_package
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_firewall_policy:
+ redirect: community.general.clc_firewall_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_firewall_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_group:
+ redirect: community.general.clc_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_loadbalancer:
+ redirect: community.general.clc_loadbalancer
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_loadbalancer
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_modify_server:
+ redirect: community.general.clc_modify_server
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_modify_server
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_publicip:
+ redirect: community.general.clc_publicip
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_publicip
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_server:
+ redirect: community.general.clc_server
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_server
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.centurylink.clc_server_snapshot:
+ redirect: community.general.clc_server_snapshot
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.clc_server_snapshot
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.cloud_init_data_facts:
+ redirect: community.general.cloud_init_data_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cloud_init_data_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.cloudflare_dns:
+ redirect: community.general.cloudflare_dns
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cloudflare_dns
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.cobbler.cobbler_sync:
+ redirect: community.general.cobbler_sync
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cobbler_sync
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.cobbler.cobbler_system:
+ redirect: community.general.cobbler_system
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cobbler_system
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.composer:
+ redirect: community.general.composer
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.composer
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.consul.consul:
+ redirect: community.general.consul
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.consul
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.consul.consul_acl:
+ redirect: community.general.consul_acl
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.consul_acl
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.consul.consul_kv:
+ redirect: community.general.consul_kv
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.consul_kv
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.consul.consul_session:
+ redirect: community.general.consul_session
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.consul_session
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.copr:
+ redirect: community.general.copr
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.copr
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.cpanm:
+ redirect: community.general.cpanm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cpanm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.cronvar:
+ redirect: community.general.cronvar
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.cronvar
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.crypttab:
+ redirect: community.general.crypttab
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.crypttab
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.datadog.datadog_downtime:
+ redirect: community.general.datadog_downtime
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.datadog_downtime
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.datadog.datadog_event:
+ redirect: community.general.datadog_event
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.datadog_event
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.datadog.datadog_monitor:
+ redirect: community.general.datadog_monitor
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.datadog_monitor
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.dconf:
+ redirect: community.general.dconf
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dconf
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.deploy_helper:
+ redirect: community.general.deploy_helper
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.deploy_helper
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.dimensiondata.dimensiondata_network:
+ redirect: community.general.dimensiondata_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dimensiondata_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.dimensiondata.dimensiondata_vlan:
+ redirect: community.general.dimensiondata_vlan
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dimensiondata_vlan
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.discord:
+ redirect: community.general.discord
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.discord
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.django_manage:
+ redirect: community.general.django_manage
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.django_manage
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.dnf_versionlock:
+ redirect: community.general.dnf_versionlock
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dnf_versionlock
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.dnsimple:
+ redirect: community.general.dnsimple
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dnsimple
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.dnsimple_info:
+ redirect: community.general.dnsimple_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dnsimple_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.dnsmadeeasy:
+ redirect: community.general.dnsmadeeasy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dnsmadeeasy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ docker_compose:
+ redirect: community.docker.docker_compose
+ docker_config:
+ redirect: community.docker.docker_config
+ docker_container:
+ redirect: community.docker.docker_container
+ docker_container_info:
+ redirect: community.docker.docker_container_info
+ docker_host_info:
+ redirect: community.docker.docker_host_info
+ docker_image:
+ redirect: community.docker.docker_image
+ docker_image_facts:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use community.docker.docker_image_info instead.
+ docker_image_info:
+ redirect: community.docker.docker_image_info
+ docker_login:
+ redirect: community.docker.docker_login
+ docker_network:
+ redirect: community.docker.docker_network
+ docker_network_info:
+ redirect: community.docker.docker_network_info
+ docker_node:
+ redirect: community.docker.docker_node
+ docker_node_info:
+ redirect: community.docker.docker_node_info
+ docker_prune:
+ redirect: community.docker.docker_prune
+ docker_secret:
+ redirect: community.docker.docker_secret
+ docker_service:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use community.docker.docker_compose instead.
+ docker_stack:
+ redirect: community.docker.docker_stack
+ docker_stack_info:
+ redirect: community.docker.docker_stack_info
+ docker_stack_task_info:
+ redirect: community.docker.docker_stack_task_info
+ docker_swarm:
+ redirect: community.docker.docker_swarm
+ docker_swarm_info:
+ redirect: community.docker.docker_swarm_info
+ docker_swarm_service:
+ redirect: community.docker.docker_swarm_service
+ docker_swarm_service_info:
+ redirect: community.docker.docker_swarm_service_info
+ docker_volume:
+ redirect: community.docker.docker_volume
+ docker_volume_info:
+ redirect: community.docker.docker_volume_info
+ system.dpkg_divert:
+ redirect: community.general.dpkg_divert
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.dpkg_divert
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.easy_install:
+ redirect: community.general.easy_install
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.easy_install
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.ejabberd_user:
+ redirect: community.general.ejabberd_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ejabberd_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.elasticsearch_plugin:
+ redirect: community.general.elasticsearch_plugin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.elasticsearch_plugin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.emc.emc_vnx_sg_member:
+ redirect: community.general.emc_vnx_sg_member
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.emc_vnx_sg_member
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.etcd3:
+ redirect: community.general.etcd3
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.etcd3
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.facter:
+ redirect: community.general.facter
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.facter
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.filesize:
+ redirect: community.general.filesize
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.filesize
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.filesystem:
+ redirect: community.general.filesystem
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.filesystem
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.flatpak:
+ redirect: community.general.flatpak
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.flatpak
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.flatpak_remote:
+ redirect: community.general.flatpak_remote
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.flatpak_remote
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.flowdock:
+ redirect: community.general.flowdock
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.flowdock
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ foreman:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the modules from the theforeman.foreman collection instead.
+ net_tools.gandi_livedns:
+ redirect: community.general.gandi_livedns
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gandi_livedns
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ gc_storage:
+ redirect: community.google.gc_storage
+ gcdns_record:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_dns_resource_record_set instead.
+ gcdns_zone:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_dns_managed_zone instead.
+ gce:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_compute_instance instead.
+ gce_eip:
+ redirect: community.google.gce_eip
+ gce_img:
+ redirect: community.google.gce_img
+ gce_instance_template:
+ redirect: community.google.gce_instance_template
+ gce_labels:
+ redirect: community.google.gce_labels
+ gce_lb:
+ redirect: community.google.gce_lb
+ gce_mig:
+ redirect: community.google.gce_mig
+ gce_net:
+ redirect: community.google.gce_net
+ gce_pd:
+ redirect: community.google.gce_pd
+ gce_snapshot:
+ redirect: community.google.gce_snapshot
+ gce_tag:
+ redirect: community.google.gce_tag
+ system.gconftool2:
+ redirect: community.general.gconftool2
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gconftool2
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.gconftool2_info:
+ redirect: community.general.gconftool2_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gconftool2_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ gcp_backend_service:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_compute_backend_service instead.
+ gcp_forwarding_rule:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_compute_forwarding_rule or google.cloud.gcp_compute_global_forwarding_rule
+ instead.
+ gcp_healthcheck:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_compute_health_check, google.cloud.gcp_compute_http_health_check
+ or google.cloud.gcp_compute_https_health_check instead.
+ gcp_target_proxy:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_compute_target_http_proxy instead.
+ gcp_url_map:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_compute_url_map instead.
+ gcpubsub:
+ redirect: community.google.gcpubsub
+ gcpubsub_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.google.gcpubsub_info instead.
+ gcpubsub_info:
+ redirect: community.google.gcpubsub_info
+ gcspanner:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance
+ instead.
+ packaging.language.gem:
+ redirect: community.general.gem
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gem
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.git_config:
+ redirect: community.general.git_config
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.git_config
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.github.github_deploy_key:
+ redirect: community.general.github_deploy_key
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_deploy_key
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ github_hooks:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use community.general.github_webhook and community.general.github_webhook_info
+ instead.
+ source_control.github.github_issue:
+ redirect: community.general.github_issue
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_issue
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.github.github_key:
+ redirect: community.general.github_key
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_key
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.github.github_release:
+ redirect: community.general.github_release
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_release
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.github.github_repo:
+ redirect: community.general.github_repo
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_repo
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.github.github_webhook:
+ redirect: community.general.github_webhook
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_webhook
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.github.github_webhook_info:
+ redirect: community.general.github_webhook_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.github_webhook_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_branch:
+ redirect: community.general.gitlab_branch
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_branch
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_deploy_key:
+ redirect: community.general.gitlab_deploy_key
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_deploy_key
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_group:
+ redirect: community.general.gitlab_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_group_members:
+ redirect: community.general.gitlab_group_members
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_group_members
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_group_variable:
+ redirect: community.general.gitlab_group_variable
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_group_variable
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_hook:
+ redirect: community.general.gitlab_hook
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_hook
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_project:
+ redirect: community.general.gitlab_project
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_project
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_project_members:
+ redirect: community.general.gitlab_project_members
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_project_members
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_project_variable:
+ redirect: community.general.gitlab_project_variable
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_project_variable
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_protected_branch:
+ redirect: community.general.gitlab_protected_branch
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_protected_branch
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_runner:
+ redirect: community.general.gitlab_runner
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_runner
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ source_control.gitlab.gitlab_user:
+ redirect: community.general.gitlab_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gitlab_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.grove:
+ redirect: community.general.grove
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.grove
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.gunicorn:
+ redirect: community.general.gunicorn
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.gunicorn
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.saphana.hana_query:
+ redirect: community.general.hana_query
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hana_query
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.haproxy:
+ redirect: community.general.haproxy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.haproxy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.heroku.heroku_collaborator:
+ redirect: community.general.heroku_collaborator
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.heroku_collaborator
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ hetzner_failover_ip:
+ redirect: community.hrobot.failover_ip
+ hetzner_failover_ip_info:
+ redirect: community.hrobot.failover_ip_info
+ hetzner_firewall:
+ redirect: community.hrobot.firewall
+ hetzner_firewall_info:
+ redirect: community.hrobot.firewall_info
+ source_control.hg:
+ redirect: community.general.hg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.hipchat:
+ redirect: community.general.hipchat
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hipchat
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.homebrew:
+ redirect: community.general.homebrew
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.homebrew
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.homebrew_cask:
+ redirect: community.general.homebrew_cask
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.homebrew_cask
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.homebrew_tap:
+ redirect: community.general.homebrew_tap
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.homebrew_tap
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.homectl:
+ redirect: community.general.homectl
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.homectl
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.honeybadger_deployment:
+ redirect: community.general.honeybadger_deployment
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.honeybadger_deployment
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.hpilo.hpilo_boot:
+ redirect: community.general.hpilo_boot
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hpilo_boot
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ hpilo_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.hpilo_info instead.
+ remote_management.hpilo.hpilo_info:
+ redirect: community.general.hpilo_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hpilo_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.hpilo.hponcfg:
+ redirect: community.general.hponcfg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hponcfg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.htpasswd:
+ redirect: community.general.htpasswd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.htpasswd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_ecs_instance:
+ redirect: community.general.hwc_ecs_instance
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_ecs_instance
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_evs_disk:
+ redirect: community.general.hwc_evs_disk
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_evs_disk
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_network_vpc:
+ redirect: community.general.hwc_network_vpc
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_network_vpc
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_smn_topic:
+ redirect: community.general.hwc_smn_topic
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_smn_topic
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_eip:
+ redirect: community.general.hwc_vpc_eip
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_eip
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_peering_connect:
+ redirect: community.general.hwc_vpc_peering_connect
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_peering_connect
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_port:
+ redirect: community.general.hwc_vpc_port
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_port
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_private_ip:
+ redirect: community.general.hwc_vpc_private_ip
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_private_ip
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_route:
+ redirect: community.general.hwc_vpc_route
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_route
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_security_group:
+ redirect: community.general.hwc_vpc_security_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_security_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_security_group_rule:
+ redirect: community.general.hwc_vpc_security_group_rule
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_security_group_rule
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.huawei.hwc_vpc_subnet:
+ redirect: community.general.hwc_vpc_subnet
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.hwc_vpc_subnet
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.ibm.ibm_sa_domain:
+ redirect: community.general.ibm_sa_domain
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ibm_sa_domain
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.ibm.ibm_sa_host:
+ redirect: community.general.ibm_sa_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ibm_sa_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.ibm.ibm_sa_host_ports:
+ redirect: community.general.ibm_sa_host_ports
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ibm_sa_host_ports
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.ibm.ibm_sa_pool:
+ redirect: community.general.ibm_sa_pool
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ibm_sa_pool
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.ibm.ibm_sa_vol:
+ redirect: community.general.ibm_sa_vol
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ibm_sa_vol
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.ibm.ibm_sa_vol_map:
+ redirect: community.general.ibm_sa_vol_map
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ibm_sa_vol_map
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.icinga2_feature:
+ redirect: community.general.icinga2_feature
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.icinga2_feature
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.icinga2_host:
+ redirect: community.general.icinga2_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.icinga2_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ idrac_firmware:
+ redirect: dellemc.openmanage.idrac_firmware
+ remote_management.redfish.idrac_redfish_command:
+ redirect: community.general.idrac_redfish_command
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.idrac_redfish_command
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.redfish.idrac_redfish_config:
+ redirect: community.general.idrac_redfish_config
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.idrac_redfish_config
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ idrac_redfish_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.idrac_redfish_info instead.
+ remote_management.redfish.idrac_redfish_info:
+ redirect: community.general.idrac_redfish_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.idrac_redfish_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ idrac_server_config_profile:
+ redirect: dellemc.openmanage.idrac_server_config_profile
+ remote_management.redfish.ilo_redfish_config:
+ redirect: community.general.ilo_redfish_config
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ilo_redfish_config
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.redfish.ilo_redfish_info:
+ redirect: community.general.ilo_redfish_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ilo_redfish_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.imc.imc_rest:
+ redirect: community.general.imc_rest
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.imc_rest
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.smartos.imgadm:
+ redirect: community.general.imgadm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.imgadm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.infinity.infinity:
+ redirect: community.general.infinity
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.infinity
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.influxdb.influxdb_database:
+ redirect: community.general.influxdb_database
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.influxdb_database
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.influxdb.influxdb_query:
+ redirect: community.general.influxdb_query
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.influxdb_query
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.influxdb.influxdb_retention_policy:
+ redirect: community.general.influxdb_retention_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.influxdb_retention_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.influxdb.influxdb_user:
+ redirect: community.general.influxdb_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.influxdb_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.influxdb.influxdb_write:
+ redirect: community.general.influxdb_write
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.influxdb_write
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.ini_file:
+ redirect: community.general.ini_file
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ini_file
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.installp:
+ redirect: community.general.installp
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.installp
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.interfaces_file:
+ redirect: community.general.interfaces_file
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.interfaces_file
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ip_netns:
+ redirect: community.general.ip_netns
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ip_netns
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_config:
+ redirect: community.general.ipa_config
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_config
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_dnsrecord:
+ redirect: community.general.ipa_dnsrecord
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_dnsrecord
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_dnszone:
+ redirect: community.general.ipa_dnszone
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_dnszone
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_group:
+ redirect: community.general.ipa_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_hbacrule:
+ redirect: community.general.ipa_hbacrule
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_hbacrule
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_host:
+ redirect: community.general.ipa_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_hostgroup:
+ redirect: community.general.ipa_hostgroup
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_hostgroup
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_otpconfig:
+ redirect: community.general.ipa_otpconfig
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_otpconfig
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_otptoken:
+ redirect: community.general.ipa_otptoken
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_otptoken
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_pwpolicy:
+ redirect: community.general.ipa_pwpolicy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_pwpolicy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_role:
+ redirect: community.general.ipa_role
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_role
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_service:
+ redirect: community.general.ipa_service
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_service
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_subca:
+ redirect: community.general.ipa_subca
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_subca
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_sudocmd:
+ redirect: community.general.ipa_sudocmd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_sudocmd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_sudocmdgroup:
+ redirect: community.general.ipa_sudocmdgroup
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_sudocmdgroup
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_sudorule:
+ redirect: community.general.ipa_sudorule
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_sudorule
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_user:
+ redirect: community.general.ipa_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.ipa.ipa_vault:
+ redirect: community.general.ipa_vault
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipa_vault
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ipify_facts:
+ redirect: community.general.ipify_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipify_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ipinfoio_facts:
+ redirect: community.general.ipinfoio_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipinfoio_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.ipmi.ipmi_boot:
+ redirect: community.general.ipmi_boot
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipmi_boot
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.ipmi.ipmi_power:
+ redirect: community.general.ipmi_power
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipmi_power
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.iptables_state:
+ redirect: community.general.iptables_state
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.iptables_state
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ipwcli_dns:
+ redirect: community.general.ipwcli_dns
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ipwcli_dns
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.irc:
+ redirect: community.general.irc
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.irc
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.iso_create:
+ redirect: community.general.iso_create
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.iso_create
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.iso_extract:
+ redirect: community.general.iso_extract
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.iso_extract
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.iso_customize:
+ redirect: community.general.iso_customize
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.iso_customize
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.jabber:
+ redirect: community.general.jabber
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jabber
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.java_cert:
+ redirect: community.general.java_cert
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.java_cert
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.java_keystore:
+ redirect: community.general.java_keystore
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.java_keystore
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.jboss:
+ redirect: community.general.jboss
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jboss
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.jenkins_build:
+ redirect: community.general.jenkins_build
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jenkins_build
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.jenkins_job:
+ redirect: community.general.jenkins_job
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jenkins_job
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ jenkins_job_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.jenkins_job_info instead.
+ web_infrastructure.jenkins_job_info:
+ redirect: community.general.jenkins_job_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jenkins_job_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.jenkins_plugin:
+ redirect: community.general.jenkins_plugin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jenkins_plugin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.jenkins_script:
+ redirect: community.general.jenkins_script
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jenkins_script
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.jira:
+ redirect: community.general.jira
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.jira
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ katello:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the modules from the theforeman.foreman collection instead.
+ system.kernel_blacklist:
+ redirect: community.general.kernel_blacklist
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.kernel_blacklist
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_authentication:
+ redirect: community.general.keycloak_authentication
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_authentication
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_client:
+ redirect: community.general.keycloak_client
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_client
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_client_rolemapping:
+ redirect: community.general.keycloak_client_rolemapping
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_client_rolemapping
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_clientscope:
+ redirect: community.general.keycloak_clientscope
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_clientscope
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_clienttemplate:
+ redirect: community.general.keycloak_clienttemplate
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_clienttemplate
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_group:
+ redirect: community.general.keycloak_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_identity_provider:
+ redirect: community.general.keycloak_identity_provider
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_identity_provider
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_realm:
+ redirect: community.general.keycloak_realm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_realm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_realm_info:
+ redirect: community.general.keycloak_realm_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_realm_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_role:
+ redirect: community.general.keycloak_role
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_role
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_user_federation:
+ redirect: community.general.keycloak_user_federation
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_user_federation
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.keycloak.keycloak_user_rolemapping:
+ redirect: community.general.keycloak_user_rolemapping
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keycloak_user_rolemapping
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.keyring:
+ redirect: community.general.keyring
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keyring
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.keyring_info:
+ redirect: community.general.keyring_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.keyring_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.kibana_plugin:
+ redirect: community.general.kibana_plugin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.kibana_plugin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ kubevirt_cdi_upload:
+ redirect: community.kubevirt.kubevirt_cdi_upload
+ kubevirt_preset:
+ redirect: community.kubevirt.kubevirt_preset
+ kubevirt_pvc:
+ redirect: community.kubevirt.kubevirt_pvc
+ kubevirt_rs:
+ redirect: community.kubevirt.kubevirt_rs
+ kubevirt_template:
+ redirect: community.kubevirt.kubevirt_template
+ kubevirt_vm:
+ redirect: community.kubevirt.kubevirt_vm
+ system.launchd:
+ redirect: community.general.launchd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.launchd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.layman:
+ redirect: community.general.layman
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.layman
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.lbu:
+ redirect: community.general.lbu
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lbu
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ ldap_attr:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.ldap_attrs instead.
+ net_tools.ldap.ldap_attrs:
+ redirect: community.general.ldap_attrs
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ldap_attrs
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ldap.ldap_entry:
+ redirect: community.general.ldap_entry
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ldap_entry
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ldap.ldap_passwd:
+ redirect: community.general.ldap_passwd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ldap_passwd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.ldap.ldap_search:
+ redirect: community.general.ldap_search
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ldap_search
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.librato_annotation:
+ redirect: community.general.librato_annotation
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.librato_annotation
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.linode.linode:
+ redirect: community.general.linode
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.linode
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.linode.linode_v4:
+ redirect: community.general.linode_v4
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.linode_v4
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.listen_ports_facts:
+ redirect: community.general.listen_ports_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.listen_ports_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.lldp:
+ redirect: community.general.lldp
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lldp
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.locale_gen:
+ redirect: community.general.locale_gen
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.locale_gen
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.logentries:
+ redirect: community.general.logentries
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.logentries
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.logentries_msg:
+ redirect: community.general.logentries_msg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.logentries_msg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ logicmonitor:
+ tombstone:
+ removal_version: 1.0.0
+ warning_text: The logicmonitor_facts module is no longer maintained and the
+ API used has been disabled in 2017.
+ logicmonitor_facts:
+ tombstone:
+ removal_version: 1.0.0
+ warning_text: The logicmonitor_facts module is no longer maintained and the
+ API used has been disabled in 2017.
+ monitoring.logstash_plugin:
+ redirect: community.general.logstash_plugin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.logstash_plugin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.lvg:
+ redirect: community.general.lvg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lvg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.lvol:
+ redirect: community.general.lvol
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lvol
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.lxc.lxc_container:
+ redirect: community.general.lxc_container
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lxc_container
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.lxca.lxca_cmms:
+ redirect: community.general.lxca_cmms
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lxca_cmms
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.lxca.lxca_nodes:
+ redirect: community.general.lxca_nodes
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lxca_nodes
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.lxd.lxd_container:
+ redirect: community.general.lxd_container
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lxd_container
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.lxd.lxd_profile:
+ redirect: community.general.lxd_profile
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lxd_profile
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.lxd.lxd_project:
+ redirect: community.general.lxd_project
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.lxd_project
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.macports:
+ redirect: community.general.macports
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.macports
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.mail:
+ redirect: community.general.mail
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mail
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.make:
+ redirect: community.general.make
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.make
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_alert_profiles:
+ redirect: community.general.manageiq_alert_profiles
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_alert_profiles
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_alerts:
+ redirect: community.general.manageiq_alerts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_alerts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_group:
+ redirect: community.general.manageiq_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_policies:
+ redirect: community.general.manageiq_policies
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_policies
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_policies_info:
+ redirect: community.general.manageiq_policies_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_policies_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_provider:
+ redirect: community.general.manageiq_provider
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_provider
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_tags:
+ redirect: community.general.manageiq_tags
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_tags
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_tags_info:
+ redirect: community.general.manageiq_tags_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_tags_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_tenant:
+ redirect: community.general.manageiq_tenant
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_tenant
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.manageiq.manageiq_user:
+ redirect: community.general.manageiq_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.manageiq_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.mas:
+ redirect: community.general.mas
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mas
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.matrix:
+ redirect: community.general.matrix
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.matrix
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.mattermost:
+ redirect: community.general.mattermost
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mattermost
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.maven_artifact:
+ redirect: community.general.maven_artifact
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.maven_artifact
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.memset.memset_dns_reload:
+ redirect: community.general.memset_dns_reload
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.memset_dns_reload
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ memset_memstore_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.memset_memstore_info instead.
+ cloud.memset.memset_memstore_info:
+ redirect: community.general.memset_memstore_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.memset_memstore_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ memset_server_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.memset_server_info instead.
+ cloud.memset.memset_server_info:
+ redirect: community.general.memset_server_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.memset_server_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.memset.memset_zone:
+ redirect: community.general.memset_zone
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.memset_zone
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.memset.memset_zone_domain:
+ redirect: community.general.memset_zone_domain
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.memset_zone_domain
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.memset.memset_zone_record:
+ redirect: community.general.memset_zone_record
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.memset_zone_record
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.mksysb:
+ redirect: community.general.mksysb
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mksysb
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.modprobe:
+ redirect: community.general.modprobe
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.modprobe
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.monit:
+ redirect: community.general.monit
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.monit
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.mqtt:
+ redirect: community.general.mqtt
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mqtt
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.mssql.mssql_db:
+ redirect: community.general.mssql_db
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mssql_db
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.mssql.mssql_script:
+ redirect: community.general.mssql_script
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.mssql_script
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ na_cdot_aggregate:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_aggregate instead.
+ na_cdot_license:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_license instead.
+ na_cdot_lun:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_lun instead.
+ na_cdot_qtree:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_qtree instead.
+ na_cdot_svm:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_svm instead.
+ na_cdot_user:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_user instead.
+ na_cdot_user_role:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_user_role instead.
+ na_cdot_volume:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.ontap.na_ontap_volume instead.
+ na_ontap_gather_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use netapp.ontap.na_ontap_info instead.
+ monitoring.nagios:
+ redirect: community.general.nagios
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nagios
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.netcup_dns:
+ redirect: community.general.netcup_dns
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.netcup_dns
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.newrelic_deployment:
+ redirect: community.general.newrelic_deployment
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.newrelic_deployment
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.nexmo:
+ redirect: community.general.nexmo
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nexmo
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ nginx_status_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.nginx_status_info instead.
+ web_infrastructure.nginx_status_info:
+ redirect: community.general.nginx_status_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nginx_status_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.smartos.nictagadm:
+ redirect: community.general.nictagadm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nictagadm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ nios_a_record:
+ redirect: infoblox.nios_modules.nios_a_record
+ nios_aaaa_record:
+ redirect: infoblox.nios_modules.nios_aaaa_record
+ nios_cname_record:
+ redirect: infoblox.nios_modules.nios_cname_record
+ nios_dns_view:
+ redirect: infoblox.nios_modules.nios_dns_view
+ nios_fixed_address:
+ redirect: infoblox.nios_modules.nios_fixed_address
+ nios_host_record:
+ redirect: infoblox.nios_modules.nios_host_record
+ nios_member:
+ redirect: infoblox.nios_modules.nios_member
+ nios_mx_record:
+ redirect: infoblox.nios_modules.nios_mx_record
+ nios_naptr_record:
+ redirect: infoblox.nios_modules.nios_naptr_record
+ nios_network:
+ redirect: infoblox.nios_modules.nios_network
+ nios_network_view:
+ redirect: infoblox.nios_modules.nios_network_view
+ nios_nsgroup:
+ redirect: infoblox.nios_modules.nios_nsgroup
+ nios_ptr_record:
+ redirect: infoblox.nios_modules.nios_ptr_record
+ nios_srv_record:
+ redirect: infoblox.nios_modules.nios_srv_record
+ nios_txt_record:
+ redirect: infoblox.nios_modules.nios_txt_record
+ nios_zone:
+ redirect: infoblox.nios_modules.nios_zone
+ net_tools.nmcli:
+ redirect: community.general.nmcli
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nmcli
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.nomad.nomad_job:
+ redirect: community.general.nomad_job
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nomad_job
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.nomad.nomad_job_info:
+ redirect: community.general.nomad_job_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nomad_job_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.nosh:
+ redirect: community.general.nosh
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nosh
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.npm:
+ redirect: community.general.npm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.npm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.nsupdate:
+ redirect: community.general.nsupdate
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.nsupdate
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oracle.oci_vcn:
+ redirect: community.general.oci_vcn
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oci_vcn
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.odbc:
+ redirect: community.general.odbc
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.odbc
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.office_365_connector_card:
+ redirect: community.general.office_365_connector_card
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.office_365_connector_card
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.ohai:
+ redirect: community.general.ohai
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ohai
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.omapi_host:
+ redirect: community.general.omapi_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.omapi_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ ome_device_info:
+ redirect: dellemc.openmanage.ome_device_info
+ cloud.opennebula.one_host:
+ redirect: community.general.one_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.one_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.opennebula.one_image:
+ redirect: community.general.one_image
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.one_image
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ one_image_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.one_image_info instead.
+ cloud.opennebula.one_image_info:
+ redirect: community.general.one_image_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.one_image_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.opennebula.one_service:
+ redirect: community.general.one_service
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.one_service
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.opennebula.one_template:
+ redirect: community.general.one_template
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.one_template
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.opennebula.one_vm:
+ redirect: community.general.one_vm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.one_vm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oneandone.oneandone_firewall_policy:
+ redirect: community.general.oneandone_firewall_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneandone_firewall_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oneandone.oneandone_load_balancer:
+ redirect: community.general.oneandone_load_balancer
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneandone_load_balancer
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oneandone.oneandone_monitoring_policy:
+ redirect: community.general.oneandone_monitoring_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneandone_monitoring_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oneandone.oneandone_private_network:
+ redirect: community.general.oneandone_private_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneandone_private_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oneandone.oneandone_public_ip:
+ redirect: community.general.oneandone_public_ip
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneandone_public_ip
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.oneandone.oneandone_server:
+ redirect: community.general.oneandone_server
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneandone_server
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ onepassword_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.onepassword_info instead.
+ identity.onepassword_info:
+ redirect: community.general.onepassword_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.onepassword_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_datacenter_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_datacenter_info instead.
+ remote_management.oneview.oneview_datacenter_info:
+ redirect: community.general.oneview_datacenter_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_datacenter_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_enclosure_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_enclosure_info instead.
+ remote_management.oneview.oneview_enclosure_info:
+ redirect: community.general.oneview_enclosure_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_enclosure_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.oneview.oneview_ethernet_network:
+ redirect: community.general.oneview_ethernet_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_ethernet_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_ethernet_network_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_ethernet_network_info instead.
+ remote_management.oneview.oneview_ethernet_network_info:
+ redirect: community.general.oneview_ethernet_network_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_ethernet_network_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.oneview.oneview_fc_network:
+ redirect: community.general.oneview_fc_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_fc_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_fc_network_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_fc_network_info instead.
+ remote_management.oneview.oneview_fc_network_info:
+ redirect: community.general.oneview_fc_network_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_fc_network_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.oneview.oneview_fcoe_network:
+ redirect: community.general.oneview_fcoe_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_fcoe_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_fcoe_network_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_fcoe_network_info instead.
+ remote_management.oneview.oneview_fcoe_network_info:
+ redirect: community.general.oneview_fcoe_network_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_fcoe_network_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.oneview.oneview_logical_interconnect_group:
+ redirect: community.general.oneview_logical_interconnect_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_logical_interconnect_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_logical_interconnect_group_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_logical_interconnect_group_info
+ instead.
+ remote_management.oneview.oneview_logical_interconnect_group_info:
+ redirect: community.general.oneview_logical_interconnect_group_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_logical_interconnect_group_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.oneview.oneview_network_set:
+ redirect: community.general.oneview_network_set
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_network_set
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_network_set_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_network_set_info instead.
+ remote_management.oneview.oneview_network_set_info:
+ redirect: community.general.oneview_network_set_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_network_set_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.oneview.oneview_san_manager:
+ redirect: community.general.oneview_san_manager
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_san_manager
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ oneview_san_manager_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.oneview_san_manager_info instead.
+ remote_management.oneview.oneview_san_manager_info:
+ redirect: community.general.oneview_san_manager_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.oneview_san_manager_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ online_server_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.online_server_info instead.
+ cloud.online.online_server_info:
+ redirect: community.general.online_server_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.online_server_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ online_user_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.online_user_info instead.
+ cloud.online.online_user_info:
+ redirect: community.general.online_user_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.online_user_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.open_iscsi:
+ redirect: community.general.open_iscsi
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.open_iscsi
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.openbsd_pkg:
+ redirect: community.general.openbsd_pkg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.openbsd_pkg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ identity.opendj.opendj_backendprop:
+ redirect: community.general.opendj_backendprop
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.opendj_backendprop
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.openwrt_init:
+ redirect: community.general.openwrt_init
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.openwrt_init
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.opkg:
+ redirect: community.general.opkg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.opkg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.osx_defaults:
+ redirect: community.general.osx_defaults
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.osx_defaults
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.ovh.ovh_ip_failover:
+ redirect: community.general.ovh_ip_failover
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ovh_ip_failover
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.ovh.ovh_ip_loadbalancing_backend:
+ redirect: community.general.ovh_ip_loadbalancing_backend
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ovh_ip_loadbalancing_backend
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.ovh.ovh_monthly_billing:
+ redirect: community.general.ovh_monthly_billing
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ovh_monthly_billing
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ ovirt:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_vm instead.
+ ovirt_affinity_label_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_affinity_label_info instead.
+ ovirt_api_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_api_info instead.
+ ovirt_cluster_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_cluster_info instead.
+ ovirt_datacenter_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_datacenter_info instead.
+ ovirt_disk_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_disk_info instead.
+ ovirt_event_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_event_info instead.
+ ovirt_external_provider_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_external_provider_info instead.
+ ovirt_group_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_group_info instead.
+ ovirt_host_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_host_info instead.
+ ovirt_host_storage_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_host_storage_info instead.
+ ovirt_network_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_network_info instead.
+ ovirt_nic_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_nic_info instead.
+ ovirt_permission_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_permission_info instead.
+ ovirt_quota_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_quota_info instead.
+ ovirt_scheduling_policy_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_scheduling_policy_info instead.
+ ovirt_snapshot_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_snapshot_info instead.
+ ovirt_storage_domain_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_storage_domain_info instead.
+ ovirt_storage_template_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_storage_template_info instead.
+ ovirt_storage_vm_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_storage_vm_info instead.
+ ovirt_tag_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_tag_info instead.
+ ovirt_template_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_template_info instead.
+ ovirt_user_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_user_info instead.
+ ovirt_vm_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_vm_info instead.
+ ovirt_vmpool_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead.
+ clustering.pacemaker_cluster:
+ redirect: community.general.pacemaker_cluster
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pacemaker_cluster
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.packet.packet_device:
+ redirect: community.general.packet_device
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.packet_device
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.packet.packet_ip_subnet:
+ redirect: community.general.packet_ip_subnet
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.packet_ip_subnet
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.packet.packet_project:
+ redirect: community.general.packet_project
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.packet_project
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.packet.packet_sshkey:
+ redirect: community.general.packet_sshkey
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.packet_sshkey
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.packet.packet_volume:
+ redirect: community.general.packet_volume
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.packet_volume
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.packet.packet_volume_attachment:
+ redirect: community.general.packet_volume_attachment
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.packet_volume_attachment
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pacman:
+ redirect: community.general.pacman
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pacman
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pacman_key:
+ redirect: community.general.pacman_key
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pacman_key
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.pagerduty:
+ redirect: community.general.pagerduty
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pagerduty
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.pagerduty_alert:
+ redirect: community.general.pagerduty_alert
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pagerduty_alert
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.pagerduty_change:
+ redirect: community.general.pagerduty_change
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pagerduty_change
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.pagerduty_user:
+ redirect: community.general.pagerduty_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pagerduty_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.pam_limits:
+ redirect: community.general.pam_limits
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pam_limits
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.pamd:
+ redirect: community.general.pamd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pamd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.parted:
+ redirect: community.general.parted
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.parted
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.pear:
+ redirect: community.general.pear
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pear
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.pids:
+ redirect: community.general.pids
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pids
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.pingdom:
+ redirect: community.general.pingdom
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pingdom
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.pip_package_info:
+ redirect: community.general.pip_package_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pip_package_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.pipx:
+ redirect: community.general.pipx
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pipx
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.pipx_info:
+ redirect: community.general.pipx_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pipx_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pkg5:
+ redirect: community.general.pkg5
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pkg5
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pkg5_publisher:
+ redirect: community.general.pkg5_publisher
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pkg5_publisher
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pkgin:
+ redirect: community.general.pkgin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pkgin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pkgng:
+ redirect: community.general.pkgng
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pkgng
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pkgutil:
+ redirect: community.general.pkgutil
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pkgutil
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.pmem.pmem:
+ redirect: community.general.pmem
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pmem
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.portage:
+ redirect: community.general.portage
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.portage
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.portinstall:
+ redirect: community.general.portinstall
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.portinstall
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ postgresql_copy:
+ redirect: community.postgresql.postgresql_copy
+ postgresql_db:
+ redirect: community.postgresql.postgresql_db
+ postgresql_ext:
+ redirect: community.postgresql.postgresql_ext
+ postgresql_idx:
+ redirect: community.postgresql.postgresql_idx
+ postgresql_info:
+ redirect: community.postgresql.postgresql_info
+ postgresql_lang:
+ redirect: community.postgresql.postgresql_lang
+ postgresql_membership:
+ redirect: community.postgresql.postgresql_membership
+ postgresql_owner:
+ redirect: community.postgresql.postgresql_owner
+ postgresql_pg_hba:
+ redirect: community.postgresql.postgresql_pg_hba
+ postgresql_ping:
+ redirect: community.postgresql.postgresql_ping
+ postgresql_privs:
+ redirect: community.postgresql.postgresql_privs
+ postgresql_publication:
+ redirect: community.postgresql.postgresql_publication
+ postgresql_query:
+ redirect: community.postgresql.postgresql_query
+ postgresql_schema:
+ redirect: community.postgresql.postgresql_schema
+ postgresql_sequence:
+ redirect: community.postgresql.postgresql_sequence
+ postgresql_set:
+ redirect: community.postgresql.postgresql_set
+ postgresql_slot:
+ redirect: community.postgresql.postgresql_slot
+ postgresql_subscription:
+ redirect: community.postgresql.postgresql_subscription
+ postgresql_table:
+ redirect: community.postgresql.postgresql_table
+ postgresql_tablespace:
+ redirect: community.postgresql.postgresql_tablespace
+ postgresql_user:
+ redirect: community.postgresql.postgresql_user
+ postgresql_user_obj_stat_info:
+ redirect: community.postgresql.postgresql_user_obj_stat_info
+ net_tools.pritunl.pritunl_org:
+ redirect: community.general.pritunl_org
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pritunl_org
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.pritunl.pritunl_org_info:
+ redirect: community.general.pritunl_org_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pritunl_org_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.pritunl.pritunl_user:
+ redirect: community.general.pritunl_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pritunl_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.pritunl.pritunl_user_info:
+ redirect: community.general.pritunl_user_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pritunl_user_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.profitbricks.profitbricks:
+ redirect: community.general.profitbricks
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.profitbricks
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.profitbricks.profitbricks_datacenter:
+ redirect: community.general.profitbricks_datacenter
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.profitbricks_datacenter
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.profitbricks.profitbricks_nic:
+ redirect: community.general.profitbricks_nic
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.profitbricks_nic
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.profitbricks.profitbricks_volume:
+ redirect: community.general.profitbricks_volume
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.profitbricks_volume
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.profitbricks.profitbricks_volume_attachments:
+ redirect: community.general.profitbricks_volume_attachments
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.profitbricks_volume_attachments
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox:
+ redirect: community.general.proxmox
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_disk:
+ redirect: community.general.proxmox_disk
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_disk
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_domain_info:
+ redirect: community.general.proxmox_domain_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_domain_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_group_info:
+ redirect: community.general.proxmox_group_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_group_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_kvm:
+ redirect: community.general.proxmox_kvm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_kvm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_nic:
+ redirect: community.general.proxmox_nic
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_nic
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_snap:
+ redirect: community.general.proxmox_snap
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_snap
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_storage_info:
+ redirect: community.general.proxmox_storage_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_storage_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_tasks_info:
+ redirect: community.general.proxmox_tasks_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_tasks_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_template:
+ redirect: community.general.proxmox_template
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_template
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.proxmox_user_info:
+ redirect: community.general.proxmox_user_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.proxmox_user_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.pubnub.pubnub_blocks:
+ redirect: community.general.pubnub_blocks
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pubnub_blocks
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.pulp_repo:
+ redirect: community.general.pulp_repo
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pulp_repo
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.puppet:
+ redirect: community.general.puppet
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.puppet
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ purefa_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use purestorage.flasharray.purefa_info instead.
+ purefb_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use purestorage.flashblade.purefb_info instead.
+ notification.pushbullet:
+ redirect: community.general.pushbullet
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pushbullet
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.pushover:
+ redirect: community.general.pushover
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.pushover
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ python_requirements_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.python_requirements_info instead.
+ system.python_requirements_info:
+ redirect: community.general.python_requirements_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.python_requirements_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax:
+ redirect: community.general.rax
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_cbs:
+ redirect: community.general.rax_cbs
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_cbs
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_cbs_attachments:
+ redirect: community.general.rax_cbs_attachments
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_cbs_attachments
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_cdb:
+ redirect: community.general.rax_cdb
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_cdb
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_cdb_database:
+ redirect: community.general.rax_cdb_database
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_cdb_database
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_cdb_user:
+ redirect: community.general.rax_cdb_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_cdb_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_clb:
+ redirect: community.general.rax_clb
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_clb
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_clb_nodes:
+ redirect: community.general.rax_clb_nodes
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_clb_nodes
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_clb_ssl:
+ redirect: community.general.rax_clb_ssl
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_clb_ssl
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_dns:
+ redirect: community.general.rax_dns
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_dns
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_dns_record:
+ redirect: community.general.rax_dns_record
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_dns_record
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_facts:
+ redirect: community.general.rax_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_files:
+ redirect: community.general.rax_files
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_files
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_files_objects:
+ redirect: community.general.rax_files_objects
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_files_objects
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_identity:
+ redirect: community.general.rax_identity
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_identity
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_keypair:
+ redirect: community.general.rax_keypair
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_keypair
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_meta:
+ redirect: community.general.rax_meta
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_meta
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_mon_alarm:
+ redirect: community.general.rax_mon_alarm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_mon_alarm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_mon_check:
+ redirect: community.general.rax_mon_check
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_mon_check
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_mon_entity:
+ redirect: community.general.rax_mon_entity
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_mon_entity
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_mon_notification:
+ redirect: community.general.rax_mon_notification
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_mon_notification
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_mon_notification_plan:
+ redirect: community.general.rax_mon_notification_plan
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_mon_notification_plan
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_network:
+ redirect: community.general.rax_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_queue:
+ redirect: community.general.rax_queue
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_queue
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_scaling_group:
+ redirect: community.general.rax_scaling_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_scaling_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.rackspace.rax_scaling_policy:
+ redirect: community.general.rax_scaling_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rax_scaling_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.read_csv:
+ redirect: community.general.read_csv
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.read_csv
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.redfish.redfish_command:
+ redirect: community.general.redfish_command
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redfish_command
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.redfish.redfish_config:
+ redirect: community.general.redfish_config
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redfish_config
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ redfish_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.redfish_info instead.
+ remote_management.redfish.redfish_info:
+ redirect: community.general.redfish_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redfish_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.redhat_subscription:
+ redirect: community.general.redhat_subscription
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redhat_subscription
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.redis:
+ redirect: community.general.redis
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redis
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.redis_data:
+ redirect: community.general.redis_data
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redis_data
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.redis_data_incr:
+ redirect: community.general.redis_data_incr
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redis_data_incr
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.redis_data_info:
+ redirect: community.general.redis_data_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redis_data_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.redis_info:
+ redirect: community.general.redis_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.redis_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.rhevm:
+ redirect: community.general.rhevm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rhevm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.rhn_channel:
+ redirect: community.general.rhn_channel
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rhn_channel
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.rhn_register:
+ redirect: community.general.rhn_register
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rhn_register
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.rhsm_release:
+ redirect: community.general.rhsm_release
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rhsm_release
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.rhsm_repository:
+ redirect: community.general.rhsm_repository
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rhsm_repository
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.misc.riak:
+ redirect: community.general.riak
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.riak
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.rocketchat:
+ redirect: community.general.rocketchat
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rocketchat
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.rollbar_deployment:
+ redirect: community.general.rollbar_deployment
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rollbar_deployment
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.rpm_ostree_pkg:
+ redirect: community.general.rpm_ostree_pkg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rpm_ostree_pkg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.rundeck_acl_policy:
+ redirect: community.general.rundeck_acl_policy
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rundeck_acl_policy
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.rundeck_job_executions_info:
+ redirect: community.general.rundeck_job_executions_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rundeck_job_executions_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.rundeck_job_run:
+ redirect: community.general.rundeck_job_run
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rundeck_job_run
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.rundeck_project:
+ redirect: community.general.rundeck_project
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.rundeck_project
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.runit:
+ redirect: community.general.runit
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.runit
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.sap_task_list_execute:
+ redirect: community.general.sap_task_list_execute
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sap_task_list_execute
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.sapcar_extract:
+ redirect: community.general.sapcar_extract
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sapcar_extract
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.say:
+ redirect: community.general.say
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.say
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_compute:
+ redirect: community.general.scaleway_compute
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_compute
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_compute_private_network:
+ redirect: community.general.scaleway_compute_private_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_compute_private_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_container_registry:
+ redirect: community.general.scaleway_container_registry
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_container_registry
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_container_registry_info:
+ redirect: community.general.scaleway_container_registry_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_container_registry_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_database_backup:
+ redirect: community.general.scaleway_database_backup
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_database_backup
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_function_namespace:
+ redirect: community.general.scaleway_function_namespace
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_function_namespace
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_function_namespace_info:
+ redirect: community.general.scaleway_function_namespace_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_function_namespace_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_image_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_image_info instead.
+ cloud.scaleway.scaleway_image_info:
+ redirect: community.general.scaleway_image_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_image_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_ip:
+ redirect: community.general.scaleway_ip
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_ip
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_ip_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_ip_info instead.
+ cloud.scaleway.scaleway_ip_info:
+ redirect: community.general.scaleway_ip_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_ip_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_lb:
+ redirect: community.general.scaleway_lb
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_lb
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_organization_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_organization_info instead.
+ cloud.scaleway.scaleway_organization_info:
+ redirect: community.general.scaleway_organization_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_organization_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_private_network:
+ redirect: community.general.scaleway_private_network
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_private_network
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_security_group:
+ redirect: community.general.scaleway_security_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_security_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_security_group_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_security_group_info instead.
+ cloud.scaleway.scaleway_security_group_info:
+ redirect: community.general.scaleway_security_group_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_security_group_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_security_group_rule:
+ redirect: community.general.scaleway_security_group_rule
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_security_group_rule
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_server_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_server_info instead.
+ cloud.scaleway.scaleway_server_info:
+ redirect: community.general.scaleway_server_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_server_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_snapshot_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_snapshot_info instead.
+ cloud.scaleway.scaleway_snapshot_info:
+ redirect: community.general.scaleway_snapshot_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_snapshot_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_sshkey:
+ redirect: community.general.scaleway_sshkey
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_sshkey
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_user_data:
+ redirect: community.general.scaleway_user_data
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_user_data
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.scaleway.scaleway_volume:
+ redirect: community.general.scaleway_volume
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_volume
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ scaleway_volume_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.scaleway_volume_info instead.
+ cloud.scaleway.scaleway_volume_info:
+ redirect: community.general.scaleway_volume_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.scaleway_volume_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.sefcontext:
+ redirect: community.general.sefcontext
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sefcontext
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.selinux_permissive:
+ redirect: community.general.selinux_permissive
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.selinux_permissive
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.selogin:
+ redirect: community.general.selogin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.selogin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.sendgrid:
+ redirect: community.general.sendgrid
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sendgrid
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.sensu.sensu_check:
+ redirect: community.general.sensu_check
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sensu_check
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.sensu.sensu_client:
+ redirect: community.general.sensu_client
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sensu_client
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.sensu.sensu_handler:
+ redirect: community.general.sensu_handler
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sensu_handler
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.sensu.sensu_silence:
+ redirect: community.general.sensu_silence
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sensu_silence
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.sensu.sensu_subscription:
+ redirect: community.general.sensu_subscription
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sensu_subscription
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.seport:
+ redirect: community.general.seport
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.seport
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.serverless:
+ redirect: community.general.serverless
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.serverless
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ sf_account_manager:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.elementsw.na_elementsw_account instead.
+ sf_check_connections:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.elementsw.na_elementsw_check_connections instead.
+ sf_snapshot_schedule_manager:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.elementsw.na_elementsw_snapshot_schedule instead.
+ sf_volume_access_group_manager:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.elementsw.na_elementsw_access_group instead.
+ sf_volume_manager:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use netapp.elementsw.na_elementsw_volume instead.
+ system.shutdown:
+ redirect: community.general.shutdown
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.shutdown
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.softlayer.sl_vm:
+ redirect: community.general.sl_vm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sl_vm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.slack:
+ redirect: community.general.slack
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.slack
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.slackpkg:
+ redirect: community.general.slackpkg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.slackpkg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ smartos_image_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.smartos_image_info instead.
+ cloud.smartos.smartos_image_info:
+ redirect: community.general.smartos_image_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.smartos_image_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.snap:
+ redirect: community.general.snap
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.snap
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.snap_alias:
+ redirect: community.general.snap_alias
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.snap_alias
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ net_tools.snmp_facts:
+ redirect: community.general.snmp_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.snmp_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.solaris_zone:
+ redirect: community.general.solaris_zone
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.solaris_zone
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.sorcery:
+ redirect: community.general.sorcery
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sorcery
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.spectrum_device:
+ redirect: community.general.spectrum_device
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.spectrum_device
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.spectrum_model_attrs:
+ redirect: community.general.spectrum_model_attrs
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.spectrum_model_attrs
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.spotinst.spotinst_aws_elastigroup:
+ redirect: community.general.spotinst_aws_elastigroup
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.spotinst_aws_elastigroup
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.hpe3par.ss_3par_cpg:
+ redirect: community.general.ss_3par_cpg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ss_3par_cpg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.ssh_config:
+ redirect: community.general.ssh_config
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ssh_config
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.stackdriver:
+ redirect: community.general.stackdriver
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.stackdriver
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.stacki.stacki_host:
+ redirect: community.general.stacki_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.stacki_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.statsd:
+ redirect: community.general.statsd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.statsd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.statusio_maintenance:
+ redirect: community.general.statusio_maintenance
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.statusio_maintenance
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.sudoers:
+ redirect: community.general.sudoers
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sudoers
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.supervisorctl:
+ redirect: community.general.supervisorctl
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.supervisorctl
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.svc:
+ redirect: community.general.svc
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.svc
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.svr4pkg:
+ redirect: community.general.svr4pkg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.svr4pkg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.swdepot:
+ redirect: community.general.swdepot
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.swdepot
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.swupd:
+ redirect: community.general.swupd
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.swupd
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.syslogger:
+ redirect: community.general.syslogger
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.syslogger
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.syspatch:
+ redirect: community.general.syspatch
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.syspatch
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.sysrc:
+ redirect: community.general.sysrc
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sysrc
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.sysupgrade:
+ redirect: community.general.sysupgrade
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.sysupgrade
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.taiga_issue:
+ redirect: community.general.taiga_issue
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.taiga_issue
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.telegram:
+ redirect: community.general.telegram
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.telegram
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.terraform:
+ redirect: community.general.terraform
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.terraform
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.timezone:
+ redirect: community.general.timezone
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.timezone
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.twilio:
+ redirect: community.general.twilio
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.twilio
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ notification.typetalk:
+ redirect: community.general.typetalk
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.typetalk
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.univention.udm_dns_record:
+ redirect: community.general.udm_dns_record
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.udm_dns_record
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.univention.udm_dns_zone:
+ redirect: community.general.udm_dns_zone
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.udm_dns_zone
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.univention.udm_group:
+ redirect: community.general.udm_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.udm_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.univention.udm_share:
+ redirect: community.general.udm_share
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.udm_share
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.univention.udm_user:
+ redirect: community.general.udm_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.udm_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.ufw:
+ redirect: community.general.ufw
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.ufw
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ monitoring.uptimerobot:
+ redirect: community.general.uptimerobot
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.uptimerobot
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.urpmi:
+ redirect: community.general.urpmi
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.urpmi
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_aaa_group:
+ redirect: community.general.utm_aaa_group
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_aaa_group
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_aaa_group_info:
+ redirect: community.general.utm_aaa_group_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_aaa_group_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_ca_host_key_cert:
+ redirect: community.general.utm_ca_host_key_cert
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_ca_host_key_cert
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_ca_host_key_cert_info:
+ redirect: community.general.utm_ca_host_key_cert_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_ca_host_key_cert_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_dns_host:
+ redirect: community.general.utm_dns_host
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_dns_host
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_network_interface_address:
+ redirect: community.general.utm_network_interface_address
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_network_interface_address
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_network_interface_address_info:
+ redirect: community.general.utm_network_interface_address_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_network_interface_address_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_proxy_auth_profile:
+ redirect: community.general.utm_proxy_auth_profile
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_proxy_auth_profile
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_proxy_exception:
+ redirect: community.general.utm_proxy_exception
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_proxy_exception
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_proxy_frontend:
+ redirect: community.general.utm_proxy_frontend
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_proxy_frontend
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_proxy_frontend_info:
+ redirect: community.general.utm_proxy_frontend_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_proxy_frontend_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_proxy_location:
+ redirect: community.general.utm_proxy_location
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_proxy_location
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ web_infrastructure.sophos_utm.utm_proxy_location_info:
+ redirect: community.general.utm_proxy_location_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.utm_proxy_location_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.vdo:
+ redirect: community.general.vdo
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vdo
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.vertica.vertica_configuration:
+ redirect: community.general.vertica_configuration
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vertica_configuration
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ vertica_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.vertica_info instead.
+ database.vertica.vertica_info:
+ redirect: community.general.vertica_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vertica_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.vertica.vertica_role:
+ redirect: community.general.vertica_role
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vertica_role
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.vertica.vertica_schema:
+ redirect: community.general.vertica_schema
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vertica_schema
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ database.vertica.vertica_user:
+ redirect: community.general.vertica_user
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vertica_user
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.vexata.vexata_eg:
+ redirect: community.general.vexata_eg
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vexata_eg
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.vexata.vexata_volume:
+ redirect: community.general.vexata_volume
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vexata_volume
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.smartos.vmadm:
+ redirect: community.general.vmadm
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.vmadm
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.wakeonlan:
+ redirect: community.general.wakeonlan
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.wakeonlan
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.redfish.wdc_redfish_command:
+ redirect: community.general.wdc_redfish_command
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.wdc_redfish_command
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.redfish.wdc_redfish_info:
+ redirect: community.general.wdc_redfish_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.wdc_redfish_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.webfaction.webfaction_app:
+ redirect: community.general.webfaction_app
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.webfaction_app
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.webfaction.webfaction_db:
+ redirect: community.general.webfaction_db
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.webfaction_db
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.webfaction.webfaction_domain:
+ redirect: community.general.webfaction_domain
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.webfaction_domain
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.webfaction.webfaction_mailbox:
+ redirect: community.general.webfaction_mailbox
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.webfaction_mailbox
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.webfaction.webfaction_site:
+ redirect: community.general.webfaction_site
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.webfaction_site
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.xattr:
+ redirect: community.general.xattr
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xattr
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.xbps:
+ redirect: community.general.xbps
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xbps
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ remote_management.lenovoxcc.xcc_redfish_command:
+ redirect: community.general.xcc_redfish_command
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xcc_redfish_command
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.misc.xenserver_facts:
+ redirect: community.general.xenserver_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xenserver_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.xenserver.xenserver_guest:
+ redirect: community.general.xenserver_guest
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xenserver_guest
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ xenserver_guest_facts:
+ tombstone:
+ removal_version: 3.0.0
+ warning_text: Use community.general.xenserver_guest_info instead.
+ cloud.xenserver.xenserver_guest_info:
+ redirect: community.general.xenserver_guest_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xenserver_guest_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ cloud.xenserver.xenserver_guest_powerstate:
+ redirect: community.general.xenserver_guest_powerstate
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xenserver_guest_powerstate
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.xfconf:
+ redirect: community.general.xfconf
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xfconf
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.xfconf_info:
+ redirect: community.general.xfconf_info
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xfconf_info
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.xfs_quota:
+ redirect: community.general.xfs_quota
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xfs_quota
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ files.xml:
+ redirect: community.general.xml
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.xml
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.language.yarn:
+ redirect: community.general.yarn
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.yarn
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.yum_versionlock:
+ redirect: community.general.yum_versionlock
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.yum_versionlock
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.zfs.zfs:
+ redirect: community.general.zfs
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.zfs
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.zfs.zfs_delegate_admin:
+ redirect: community.general.zfs_delegate_admin
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.zfs_delegate_admin
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.zfs.zfs_facts:
+ redirect: community.general.zfs_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.zfs_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ clustering.znode:
+ redirect: community.general.znode
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.znode
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ storage.zfs.zpool_facts:
+ redirect: community.general.zpool_facts
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.zpool_facts
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.zypper:
+ redirect: community.general.zypper
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.zypper
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ packaging.os.zypper_repository:
+ redirect: community.general.zypper_repository
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.zypper_repository
+ modules. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ doc_fragments:
+ _gcp:
+ redirect: community.google._gcp
+ docker:
+ redirect: community.docker.docker
+ hetzner:
+ redirect: community.hrobot.robot
+ kubevirt_common_options:
+ redirect: community.kubevirt.kubevirt_common_options
+ kubevirt_vm_options:
+ redirect: community.kubevirt.kubevirt_vm_options
+ nios:
+ redirect: infoblox.nios_modules.nios
+ postgresql:
+ redirect: community.postgresql.postgresql
+ module_utils:
+ docker.common:
+ redirect: community.docker.common
+ docker.swarm:
+ redirect: community.docker.swarm
+ gcdns:
+ redirect: community.google.gcdns
+ gce:
+ redirect: community.google.gce
+ gcp:
+ redirect: community.google.gcp
+ hetzner:
+ redirect: community.hrobot.robot
+ kubevirt:
+ redirect: community.kubevirt.kubevirt
+ net_tools.nios.api:
+ redirect: infoblox.nios_modules.api
+ postgresql:
+ redirect: community.postgresql.postgresql
+ remote_management.dellemc.dellemc_idrac:
+ redirect: dellemc.openmanage.dellemc_idrac
+ remote_management.dellemc.ome:
+ redirect: dellemc.openmanage.ome
+ callback:
+ actionable:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
+ = no' and 'display_ok_hosts = no' options.
+ full_skip:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the 'default' callback plugin with 'display_skipped_hosts
+ = no' option.
+ osx_say:
+ redirect: community.general.say
+ stderr:
+ tombstone:
+ removal_version: 2.0.0
+ warning_text: Use the 'default' callback plugin with 'display_failed_stderr
+ = yes' option.
+ inventory:
+ docker_machine:
+ redirect: community.docker.docker_machine
+ docker_swarm:
+ redirect: community.docker.docker_swarm
+ kubevirt:
+ redirect: community.kubevirt.kubevirt
+ filter:
+ path_join:
+ # The ansible.builtin.path_join filter has been added in ansible-base 2.10.
+ # Since plugin routing is only available since ansible-base 2.10, this
+ # redirect will be used for ansible-base 2.10 or later. This was mostly
+ # relevant before community.general 5.0.0, when community.general also
+ # supported Ansible 2.9. Back then, the included path_join filter was used
+ # for Ansible 2.9 or earlier. Now we only will have the redirect until we
+ # eventually will deprecate and then remove it.
+ redirect: ansible.builtin.path_join
+ action:
+ system.iptables_state:
+ redirect: community.general.iptables_state
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.iptables_state
+ action. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
+ system.shutdown:
+ redirect: community.general.shutdown
+ deprecation:
+ removal_version: 9.0.0
+ warning_text: You are using an internal name to access the community.general.shutdown
+ action. This has never been supported or documented, and will stop working
+ in community.general 9.0.0.
diff --git a/ansible_collections/community/general/plugins/action/iptables_state.py b/ansible_collections/community/general/plugins/action/iptables_state.py
new file mode 100644
index 000000000..f59a7298b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/action/iptables_state.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import time
+
+from ansible.plugins.action import ActionBase
+from ansible.errors import AnsibleActionFail, AnsibleConnectionFailure
+from ansible.utils.vars import merge_hash
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class ActionModule(ActionBase):
+
+ # Keep internal params away from user interactions
+ _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait'))
+ DEFAULT_SUDOABLE = True
+
+ MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = (
+ "This module doesn't support async>0 and poll>0 when its 'state' param "
+ "is set to 'restored'. To enable its rollback feature (that needs the "
+ "module to run asynchronously on the remote), please set task attribute "
+ "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+ MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = (
+ "Attempts to restore iptables state without rollback in case of mistake "
+ "may lead the ansible controller to loose access to the hosts and never "
+ "regain it before fixing firewall rules through a serial console, or any "
+ "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and "
+ "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) "
+ "(recommended).")
+ MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = (
+ "You attempt to restore iptables state with rollback in case of mistake, "
+ "but with settings that will lead this rollback to happen AFTER that the "
+ "controller will reach its own timeout. Please set task attribute 'poll' "
+ "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than "
+ "'ansible_timeout' (=%s) (recommended).")
+
+ def _async_result(self, async_status_args, task_vars, timeout):
+ '''
+ Retrieve results of the asynchonous task, and display them in place of
+ the async wrapper results (those with the ansible_job_id key).
+ '''
+ async_status = self._task.copy()
+ async_status.args = async_status_args
+ async_status.action = 'ansible.builtin.async_status'
+ async_status.async_val = 0
+ async_action = self._shared_loader_obj.action_loader.get(
+ async_status.action, task=async_status, connection=self._connection,
+ play_context=self._play_context, loader=self._loader, templar=self._templar,
+ shared_loader_obj=self._shared_loader_obj)
+
+ if async_status.args['mode'] == 'cleanup':
+ return async_action.run(task_vars=task_vars)
+
+ # At least one iteration is required, even if timeout is 0.
+ for dummy in range(max(1, timeout)):
+ async_result = async_action.run(task_vars=task_vars)
+ if async_result.get('finished', 0) == 1:
+ break
+ time.sleep(min(1, timeout))
+
+ return async_result
+
+ def run(self, tmp=None, task_vars=None):
+
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ if not result.get('skipped'):
+
+ # FUTURE: better to let _execute_module calculate this internally?
+ wrap_async = self._task.async_val and not self._connection.has_native_async
+
+ # Set short names for values we'll have to compare or reuse
+ task_poll = self._task.poll
+ task_async = self._task.async_val
+ check_mode = self._play_context.check_mode
+ max_timeout = self._connection._play_context.timeout
+ module_args = self._task.args
+
+ if module_args.get('state', None) == 'restored':
+ if not wrap_async:
+ if not check_mode:
+ display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % (
+ task_poll,
+ task_async,
+ max_timeout))
+ elif task_poll:
+ raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % (
+ task_poll,
+ task_async,
+ max_timeout))
+ else:
+ if task_async > max_timeout and not check_mode:
+ display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % (
+ task_poll,
+ task_async,
+ max_timeout))
+
+ # inject the async directory based on the shell option into the
+ # module args
+ async_dir = self.get_shell_option('async_dir', default="~/.ansible_async")
+
+ # Bind the loop max duration to consistent values on both
+ # remote and local sides (if not the same, make the loop
+ # longer on the controller); and set a backup file path.
+ module_args['_timeout'] = task_async
+ module_args['_back'] = '%s/iptables.state' % async_dir
+ async_status_args = dict(mode='status')
+ confirm_cmd = 'rm -f %s' % module_args['_back']
+ starter_cmd = 'touch %s.starter' % module_args['_back']
+ remaining_time = max(task_async, max_timeout)
+
+ # do work!
+ result = merge_hash(result, self._execute_module(module_args=module_args, task_vars=task_vars, wrap_async=wrap_async))
+
+ # Then the 3-steps "go ahead or rollback":
+ # 1. Catch early errors of the module (in asynchronous task) if any.
+ # Touch a file on the target to signal the module to process now.
+ # 2. Reset connection to ensure a persistent one will not be reused.
+ # 3. Confirm the restored state by removing the backup on the remote.
+ # Retrieve the results of the asynchronous task to return them.
+ if '_back' in module_args:
+ async_status_args['jid'] = result.get('ansible_job_id', None)
+ if async_status_args['jid'] is None:
+ raise AnsibleActionFail("Unable to get 'ansible_job_id'.")
+
+ # Catch early errors due to missing mandatory option, bad
+ # option type/value, missing required system command, etc.
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, 0))
+
+ # The module is aware to not process the main iptables-restore
+ # command before finding (and deleting) the 'starter' cookie on
+ # the host, so the previous query will not reach ssh timeout.
+ dummy = self._low_level_execute_command(starter_cmd, sudoable=self.DEFAULT_SUDOABLE)
+
+ # As the main command is not yet executed on the target, here
+ # 'finished' means 'failed before main command be executed'.
+ if not result['finished']:
+ try:
+ self._connection.reset()
+ except AttributeError:
+ pass
+
+ for dummy in range(max_timeout):
+ time.sleep(1)
+ remaining_time -= 1
+ # - AnsibleConnectionFailure covers rejected requests (i.e.
+ # by rules with '--jump REJECT')
+ # - ansible_timeout is able to cover dropped requests (due
+ # to a rule or policy DROP) if not lower than async_val.
+ try:
+ dummy = self._low_level_execute_command(confirm_cmd, sudoable=self.DEFAULT_SUDOABLE)
+ break
+ except AnsibleConnectionFailure:
+ continue
+
+ result = merge_hash(result, self._async_result(async_status_args, task_vars, remaining_time))
+
+ # Cleanup async related stuff and internal params
+ for key in ('ansible_job_id', 'results_file', 'started', 'finished'):
+ if result.get(key):
+ del result[key]
+
+ if result.get('invocation', {}).get('module_args'):
+ for key in ('_back', '_timeout', '_async_dir', 'jid'):
+ if result['invocation']['module_args'].get(key):
+ del result['invocation']['module_args'][key]
+
+ async_status_args['mode'] = 'cleanup'
+ dummy = self._async_result(async_status_args, task_vars, 0)
+
+ if not wrap_async:
+ # remove a temporary path we created
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+
+ return result
diff --git a/ansible_collections/community/general/plugins/action/shutdown.py b/ansible_collections/community/general/plugins/action/shutdown.py
new file mode 100644
index 000000000..c2860f1d6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/action/shutdown.py
@@ -0,0 +1,213 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Amin Vakil <info@aminvakil.com>
+# Copyright (c) 2016-2018, Matt Davis <mdavis@ansible.com>
+# Copyright (c) 2018, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.collections import is_string
+from ansible.plugins.action import ActionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class TimedOutException(Exception):
+ pass
+
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+ _VALID_ARGS = frozenset((
+ 'msg',
+ 'delay',
+ 'search_paths'
+ ))
+
+ DEFAULT_CONNECT_TIMEOUT = None
+ DEFAULT_PRE_SHUTDOWN_DELAY = 0
+ DEFAULT_SHUTDOWN_MESSAGE = 'Shut down initiated by Ansible'
+ DEFAULT_SHUTDOWN_COMMAND = 'shutdown'
+ DEFAULT_SHUTDOWN_COMMAND_ARGS = '-h {delay_min} "{message}"'
+ DEFAULT_SUDOABLE = True
+
+ SHUTDOWN_COMMANDS = {
+ 'alpine': 'poweroff',
+ 'vmkernel': 'halt',
+ }
+
+ SHUTDOWN_COMMAND_ARGS = {
+ 'alpine': '',
+ 'void': '-h +{delay_min} "{message}"',
+ 'freebsd': '-h +{delay_sec}s "{message}"',
+ 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS,
+ 'macosx': '-h +{delay_min} "{message}"',
+ 'openbsd': '-h +{delay_min} "{message}"',
+ 'solaris': '-y -g {delay_sec} -i 5 "{message}"',
+ 'sunos': '-y -g {delay_sec} -i 5 "{message}"',
+ 'vmkernel': '-d {delay_sec}',
+ 'aix': '-Fh',
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ActionModule, self).__init__(*args, **kwargs)
+
+ @property
+ def delay(self):
+ return self._check_delay('delay', self.DEFAULT_PRE_SHUTDOWN_DELAY)
+
+ def _check_delay(self, key, default):
+ """Ensure that the value is positive or zero"""
+ value = int(self._task.args.get(key, default))
+ if value < 0:
+ value = 0
+ return value
+
+ def _get_value_from_facts(self, variable_name, distribution, default_value):
+ """Get dist+version specific args first, then distribution, then family, lastly use default"""
+ attr = getattr(self, variable_name)
+ value = attr.get(
+ distribution['name'] + distribution['version'],
+ attr.get(
+ distribution['name'],
+ attr.get(
+ distribution['family'],
+ getattr(self, default_value))))
+ return value
+
+ def get_shutdown_command_args(self, distribution):
+ args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS')
+ # Convert seconds to minutes. If less that 60, set it to 0.
+ delay_sec = self.delay
+ shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE)
+ return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message)
+
+ def get_distribution(self, task_vars):
+ # FIXME: only execute the module if we don't already have the facts we need
+ distribution = {}
+ display.debug('{action}: running setup module to get distribution'.format(action=self._task.action))
+ module_output = self._execute_module(
+ task_vars=task_vars,
+ module_name='ansible.legacy.setup',
+ module_args={'gather_subset': 'min'})
+ try:
+ if module_output.get('failed', False):
+ raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format(
+ to_native(module_output['module_stdout']).strip(),
+ to_native(module_output['module_stderr']).strip()))
+ distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower()
+ distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0])
+ distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower())
+ display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution))
+ return distribution
+ except KeyError as ke:
+ raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0]))
+
+ def get_shutdown_command(self, task_vars, distribution):
+ shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND')
+ default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
+ search_paths = self._task.args.get('search_paths', default_search_paths)
+
+ # FIXME: switch all this to user arg spec validation methods when they are available
+ # Convert bare strings to a list
+ if is_string(search_paths):
+ search_paths = [search_paths]
+
+ # Error if we didn't get a list
+ err_msg = "'search_paths' must be a string or flat list of strings, got {0}"
+ try:
+ incorrect_type = any(not is_string(x) for x in search_paths)
+ if not isinstance(search_paths, list) or incorrect_type:
+ raise TypeError
+ except TypeError:
+ raise AnsibleError(err_msg.format(search_paths))
+
+ display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format(
+ action=self._task.action,
+ command=shutdown_bin,
+ paths=search_paths))
+ find_result = self._execute_module(
+ task_vars=task_vars,
+ # prevent collection search by calling with ansible.legacy (still allows library/ override of find)
+ module_name='ansible.legacy.find',
+ module_args={
+ 'paths': search_paths,
+ 'patterns': [shutdown_bin],
+ 'file_type': 'any'
+ }
+ )
+
+ full_path = [x['path'] for x in find_result['files']]
+ if not full_path:
+ raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths))
+ self._shutdown_command = full_path[0]
+ return self._shutdown_command
+
+ def perform_shutdown(self, task_vars, distribution):
+ result = {}
+ shutdown_result = {}
+ shutdown_command = self.get_shutdown_command(task_vars, distribution)
+ shutdown_command_args = self.get_shutdown_command_args(distribution)
+ shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args)
+
+ self.cleanup(force=True)
+ try:
+ display.vvv("{action}: shutting down server...".format(action=self._task.action))
+ display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec))
+ if self._play_context.check_mode:
+ shutdown_result['rc'] = 0
+ else:
+ shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE)
+ except AnsibleConnectionFailure as e:
+ # If the connection is closed too quickly due to the system being shutdown, carry on
+ display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e)))
+ shutdown_result['rc'] = 0
+
+ if shutdown_result['rc'] != 0:
+ result['failed'] = True
+ result['shutdown'] = False
+ result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format(
+ stdout=to_native(shutdown_result['stdout'].strip()),
+ stderr=to_native(shutdown_result['stderr'].strip()))
+ return result
+
+ result['failed'] = False
+ result['shutdown_command'] = shutdown_command_exec
+ return result
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ # If running with local connection, fail so we don't shutdown ourself
+ if self._connection.transport == 'local' and (not self._play_context.check_mode):
+ msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action)
+ return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg}
+
+ if task_vars is None:
+ task_vars = {}
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+
+ if result.get('skipped', False) or result.get('failed', False):
+ return result
+
+ distribution = self.get_distribution(task_vars)
+
+ # Initiate shutdown
+ shutdown_result = self.perform_shutdown(task_vars, distribution)
+
+ if shutdown_result['failed']:
+ result = shutdown_result
+ return result
+
+ result['shutdown'] = True
+ result['changed'] = True
+ result['shutdown_command'] = shutdown_result['shutdown_command']
+
+ return result
diff --git a/ansible_collections/community/general/plugins/become/doas.py b/ansible_collections/community/general/plugins/become/doas.py
new file mode 100644
index 000000000..69e730aad
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/doas.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: doas
+ short_description: Do As user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the doas utility.
+ author: Ansible Core Team
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: doas_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_doas_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DOAS_USER
+ become_exe:
+ description: Doas executable
+ default: doas
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: doas_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_doas_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DOAS_EXE
+ become_flags:
+ description: Options to pass to doas
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: doas_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_doas_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DOAS_FLAGS
+ become_pass:
+ description: password for doas prompt
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_doas_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DOAS_PASS
+ ini:
+ - section: doas_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: doas_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_doas_prompt_l10n
+ env:
+ - name: ANSIBLE_DOAS_PROMPT_L10N
+'''
+
+import re
+
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.doas'
+
+ # messages for detecting prompted password issues
+ fail = ('Permission denied',)
+ missing = ('Authorization required',)
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ # FIXME: more accurate would be: 'doas (%s@' % remote_user
+ # however become plugins don't have that information currently
+ b_prompts = [to_bytes(p) for p in self.get_option('prompt_l10n')] or [br'doas \(', br'Password:']
+ b_prompt = b"|".join(b_prompts)
+
+ return bool(re.match(b_prompt, b_output))
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ self.prompt = True
+
+ become_exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ if not self.get_option('become_pass') and '-n' not in flags:
+ flags += ' -n'
+
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+
+ success_cmd = self._build_success_command(cmd, shell, noexe=True)
+ executable = getattr(shell, 'executable', shell.SHELL_FAMILY)
+
+ return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd)
diff --git a/ansible_collections/community/general/plugins/become/dzdo.py b/ansible_collections/community/general/plugins/become/dzdo.py
new file mode 100644
index 000000000..a358e84e3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/dzdo.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: dzdo
+ short_description: Centrify's Direct Authorize
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility.
+ author: Ansible Core Team
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: dzdo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_dzdo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_DZDO_USER
+ become_exe:
+ description: Dzdo executable
+ default: dzdo
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: dzdo_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_dzdo_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_DZDO_EXE
+ become_flags:
+ description: Options to pass to dzdo
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: dzdo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_dzdo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_DZDO_FLAGS
+ become_pass:
+ description: Options to pass to dzdo
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_dzdo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_DZDO_PASS
+ ini:
+ - section: dzdo_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.dzdo'
+
+ # messages for detecting prompted password issues
+ fail = ('Sorry, try again.',)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ becomecmd = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ if self.get_option('become_pass'):
+ self.prompt = '[dzdo via ansible, key=%s] password:' % self._id
+ flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt)
+
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+
+ return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)])
diff --git a/ansible_collections/community/general/plugins/become/ksu.py b/ansible_collections/community/general/plugins/become/ksu.py
new file mode 100644
index 000000000..fa2f66864
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/ksu.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: ksu
+ short_description: Kerberos substitute user
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the ksu utility.
+ author: Ansible Core Team
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: ksu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_ksu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_KSU_USER
+ required: true
+ become_exe:
+ description: Su executable
+ default: ksu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: ksu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_ksu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_KSU_EXE
+ become_flags:
+ description: Options to pass to ksu
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: ksu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_ksu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_KSU_FLAGS
+ become_pass:
+ description: ksu password
+ required: false
+ vars:
+ - name: ansible_ksu_pass
+ - name: ansible_become_pass
+ - name: ansible_become_password
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_KSU_PASS
+ ini:
+ - section: ksu_become_plugin
+ key: password
+ prompt_l10n:
+ description:
+ - List of localized strings to match for prompt detection
+ - If empty we'll use the built in one
+ default: []
+ ini:
+ - section: ksu_become_plugin
+ key: localized_prompts
+ vars:
+ - name: ansible_ksu_prompt_l10n
+ env:
+ - name: ANSIBLE_KSU_PROMPT_L10N
+'''
+
+import re
+
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.ksu'
+
+ # messages for detecting prompted password issues
+ fail = ('Password incorrect',)
+ missing = ('No password given',)
+
+ def check_password_prompt(self, b_output):
+ ''' checks if the expected password prompt exists in b_output '''
+
+ prompts = self.get_option('prompt_l10n') or ["Kerberos password for .*@.*:"]
+ b_prompt = b"|".join(to_bytes(p) for p in prompts)
+
+ return bool(re.match(b_prompt, b_output))
+
+ def build_become_command(self, cmd, shell):
+
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ # Prompt handling for ``ksu`` is more complicated, this
+ # is used to satisfy the connection plugin
+ self.prompt = True
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell))
diff --git a/ansible_collections/community/general/plugins/become/machinectl.py b/ansible_collections/community/general/plugins/become/machinectl.py
new file mode 100644
index 000000000..461a3f635
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/machinectl.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: machinectl
+ short_description: Systemd's machinectl privilege escalation
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility.
+ author: Ansible Core Team
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: machinectl_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_machinectl_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_MACHINECTL_USER
+ become_exe:
+ description: Machinectl executable
+ default: machinectl
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: machinectl_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_machinectl_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_MACHINECTL_EXE
+ become_flags:
+ description: Options to pass to machinectl
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: machinectl_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_machinectl_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_MACHINECTL_FLAGS
+ become_pass:
+ description: Password for machinectl
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_machinectl_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_MACHINECTL_PASS
+ ini:
+ - section: machinectl_become_plugin
+ key: password
+ notes:
+ - When not using this plugin with user C(root), it only works correctly with a polkit rule which will alter
+ the behaviour of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials,
+ if the user is allowed to perform the action (take a look at the examples section).
+ If such a rule is not present the plugin only work if it is used in context with the root user,
+ because then no further prompt will be shown by machinectl.
+'''
+
+EXAMPLES = r'''
+# A polkit rule needed to use the module with a non-root user.
+# See the Notes section for details.
+60-machinectl-fast-user-auth.rules: |
+ polkit.addRule(function(action, subject) {
+ if(action.id == "org.freedesktop.machine1.host-shell" && subject.isInGroup("wheel")) {
+ return polkit.Result.AUTH_SELF_KEEP;
+ }
+ });
+'''
+
+from re import compile as re_compile
+
+from ansible.plugins.become import BecomeBase
+from ansible.module_utils._text import to_bytes
+
+
+ansi_color_codes = re_compile(to_bytes(r'\x1B\[[0-9;]+m'))
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.machinectl'
+
+ prompt = 'Password: '
+ fail = ('==== AUTHENTICATION FAILED ====',)
+ success = ('==== AUTHENTICATION COMPLETE ====',)
+
+ @staticmethod
+ def remove_ansi_codes(line):
+ return ansi_color_codes.sub(b"", line)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s -q shell %s %s@ %s' % (become, flags, user, self._build_success_command(cmd, shell))
+
+ def check_success(self, b_output):
+ b_output = self.remove_ansi_codes(b_output)
+ return super().check_success(b_output)
+
+ def check_incorrect_password(self, b_output):
+ b_output = self.remove_ansi_codes(b_output)
+ return super().check_incorrect_password(b_output)
+
+ def check_missing_password(self, b_output):
+ b_output = self.remove_ansi_codes(b_output)
+ return super().check_missing_password(b_output)
diff --git a/ansible_collections/community/general/plugins/become/pbrun.py b/ansible_collections/community/general/plugins/become/pbrun.py
new file mode 100644
index 000000000..7d1437191
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/pbrun.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: pbrun
+ short_description: PowerBroker run
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility.
+ author: Ansible Core Team
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pbrun_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pbrun_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PBRUN_USER
+ become_exe:
+ description: Sudo executable
+ default: pbrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pbrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pbrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PBRUN_EXE
+ become_flags:
+ description: Options to pass to pbrun
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pbrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pbrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PBRUN_FLAGS
+ become_pass:
+ description: Password for pbrun
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pbrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PBRUN_PASS
+ ini:
+ - section: pbrun_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command pbrun calls in 'shell -c' or not
+ default: false
+ type: bool
+ ini:
+ - section: pbrun_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pbrun_wrap_execution
+ env:
+ - name: ANSIBLE_PBRUN_WRAP_EXECUTION
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pbrun'
+
+ prompt = 'Password:'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become_exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ become_user = self.get_option('become_user')
+ user = '-u %s' % (become_user) if become_user else ''
+ noexe = not self.get_option('wrap_exe')
+
+ return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)])
diff --git a/ansible_collections/community/general/plugins/become/pfexec.py b/ansible_collections/community/general/plugins/become/pfexec.py
new file mode 100644
index 000000000..392ee961f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/pfexec.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: pfexec
+ short_description: profile based execution
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility.
+ author: Ansible Core Team
+ options:
+ become_user:
+ description:
+ - User you 'become' to execute the task
+ - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out,
+ but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions.
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: pfexec_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_pfexec_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_PFEXEC_USER
+ become_exe:
+ description: Sudo executable
+ default: pfexec
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pfexec_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pfexec_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PFEXEC_EXE
+ become_flags:
+ description: Options to pass to pfexec
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pfexec_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pfexec_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PFEXEC_FLAGS
+ become_pass:
+ description: pfexec password
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pfexec_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PFEXEC_PASS
+ ini:
+ - section: pfexec_become_plugin
+ key: password
+ wrap_exe:
+ description: Toggle to wrap the command pfexec calls in 'shell -c' or not
+ default: false
+ type: bool
+ ini:
+ - section: pfexec_become_plugin
+ key: wrap_execution
+ vars:
+ - name: ansible_pfexec_wrap_execution
+ env:
+ - name: ANSIBLE_PFEXEC_WRAP_EXECUTION
+ notes:
+ - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out.
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pfexec'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ exe = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ noexe = not self.get_option('wrap_exe')
+ return '%s %s %s' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe))
diff --git a/ansible_collections/community/general/plugins/become/pmrun.py b/ansible_collections/community/general/plugins/become/pmrun.py
new file mode 100644
index 000000000..74b633f09
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/pmrun.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: pmrun
+ short_description: Privilege Manager run
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility.
+ author: Ansible Core Team
+ options:
+ become_exe:
+ description: Sudo executable
+ default: pmrun
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: pmrun_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_pmrun_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_PMRUN_EXE
+ become_flags:
+ description: Options to pass to pmrun
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: pmrun_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_pmrun_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_PMRUN_FLAGS
+ become_pass:
+ description: pmrun password
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_pmrun_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_PMRUN_PASS
+ ini:
+ - section: pmrun_become_plugin
+ key: password
+ notes:
+ - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user.
+'''
+
+from ansible.plugins.become import BecomeBase
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.pmrun'
+ prompt = 'Enter UPM user password:'
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell)))
diff --git a/ansible_collections/community/general/plugins/become/sesu.py b/ansible_collections/community/general/plugins/become/sesu.py
new file mode 100644
index 000000000..5958c1bfc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/sesu.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: sesu
+ short_description: CA Privileged Access Manager
+ description:
+ - This become plugins allows your remote/login user to execute commands as another user via the sesu utility.
+ author: ansible (@nekonyuu)
+ options:
+ become_user:
+ description: User you 'become' to execute the task
+ default: ''
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sesu_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sesu_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SESU_USER
+ become_exe:
+ description: sesu executable
+ default: sesu
+ ini:
+ - section: privilege_escalation
+ key: become_exe
+ - section: sesu_become_plugin
+ key: executable
+ vars:
+ - name: ansible_become_exe
+ - name: ansible_sesu_exe
+ env:
+ - name: ANSIBLE_BECOME_EXE
+ - name: ANSIBLE_SESU_EXE
+ become_flags:
+ description: Options to pass to sesu
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sesu_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sesu_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SESU_FLAGS
+ become_pass:
+ description: Password to pass to sesu
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sesu_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SESU_PASS
+ ini:
+ - section: sesu_become_plugin
+ key: password
+'''
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.sesu'
+
+ prompt = 'Please enter your password:'
+ fail = missing = ('Sorry, try again with sesu.',)
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ become = self.get_option('become_exe')
+
+ flags = self.get_option('become_flags')
+ user = self.get_option('become_user')
+ return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell))
diff --git a/ansible_collections/community/general/plugins/become/sudosu.py b/ansible_collections/community/general/plugins/become/sudosu.py
new file mode 100644
index 000000000..60bb2aa51
--- /dev/null
+++ b/ansible_collections/community/general/plugins/become/sudosu.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: sudosu
+ short_description: Run tasks using sudo su -
+ description:
+ - This become plugin allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined.
+ author:
+ - Dag Wieers (@dagwieers)
+ version_added: 2.4.0
+ options:
+ become_user:
+ description: User you 'become' to execute the task.
+ default: root
+ ini:
+ - section: privilege_escalation
+ key: become_user
+ - section: sudo_become_plugin
+ key: user
+ vars:
+ - name: ansible_become_user
+ - name: ansible_sudo_user
+ env:
+ - name: ANSIBLE_BECOME_USER
+ - name: ANSIBLE_SUDO_USER
+ become_flags:
+ description: Options to pass to C(sudo).
+ default: -H -S -n
+ ini:
+ - section: privilege_escalation
+ key: become_flags
+ - section: sudo_become_plugin
+ key: flags
+ vars:
+ - name: ansible_become_flags
+ - name: ansible_sudo_flags
+ env:
+ - name: ANSIBLE_BECOME_FLAGS
+ - name: ANSIBLE_SUDO_FLAGS
+ become_pass:
+ description: Password to pass to C(sudo).
+ required: false
+ vars:
+ - name: ansible_become_password
+ - name: ansible_become_pass
+ - name: ansible_sudo_pass
+ env:
+ - name: ANSIBLE_BECOME_PASS
+ - name: ANSIBLE_SUDO_PASS
+ ini:
+ - section: sudo_become_plugin
+ key: password
+"""
+
+
+from ansible.plugins.become import BecomeBase
+
+
+class BecomeModule(BecomeBase):
+
+ name = 'community.general.sudosu'
+
+ # messages for detecting prompted password issues
+ fail = ('Sorry, try again.',)
+ missing = ('Sorry, a password is required to run sudo', 'sudo: a password is required')
+
+ def build_become_command(self, cmd, shell):
+ super(BecomeModule, self).build_become_command(cmd, shell)
+
+ if not cmd:
+ return cmd
+
+ becomecmd = 'sudo'
+
+ flags = self.get_option('become_flags') or ''
+ prompt = ''
+ if self.get_option('become_pass'):
+ self.prompt = '[sudo via ansible, key=%s] password:' % self._id
+ if flags: # this could be simplified, but kept as is for now for backwards string matching
+ flags = flags.replace('-n', '')
+ prompt = '-p "%s"' % (self.prompt)
+
+ user = self.get_option('become_user') or ''
+ if user:
+ user = '%s' % (user)
+
+ return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)])
diff --git a/ansible_collections/community/general/plugins/cache/memcached.py b/ansible_collections/community/general/plugins/cache/memcached.py
new file mode 100644
index 000000000..0bc5256b3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/cache/memcached.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2014, Brian Coca, Josh Drake, et al
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: memcached
+ short_description: Use memcached DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in memcached.
+ requirements:
+ - memcache (python lib)
+ options:
+ _uri:
+ description:
+ - List of connection information for the memcached DBs
+ default: ['127.0.0.1:11211']
+ type: list
+ elements: string
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import collections
+import os
+import time
+from multiprocessing import Lock
+from itertools import chain
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common._collections_compat import MutableSet
+from ansible.plugins.cache import BaseCacheModule
+from ansible.utils.display import Display
+
+try:
+ import memcache
+ HAS_MEMCACHE = True
+except ImportError:
+ HAS_MEMCACHE = False
+
+display = Display()
+
+
+class ProxyClientPool(object):
+ """
+ Memcached connection pooling for thread/fork safety. Inspired by py-redis
+ connection pool.
+
+ Available connections are maintained in a deque and released in a FIFO manner.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.max_connections = kwargs.pop('max_connections', 1024)
+ self.connection_args = args
+ self.connection_kwargs = kwargs
+ self.reset()
+
+ def reset(self):
+ self.pid = os.getpid()
+ self._num_connections = 0
+ self._available_connections = collections.deque(maxlen=self.max_connections)
+ self._locked_connections = set()
+ self._lock = Lock()
+
+ def _check_safe(self):
+ if self.pid != os.getpid():
+ with self._lock:
+ if self.pid == os.getpid():
+ # bail out - another thread already acquired the lock
+ return
+ self.disconnect_all()
+ self.reset()
+
+ def get_connection(self):
+ self._check_safe()
+ try:
+ connection = self._available_connections.popleft()
+ except IndexError:
+ connection = self.create_connection()
+ self._locked_connections.add(connection)
+ return connection
+
+ def create_connection(self):
+ if self._num_connections >= self.max_connections:
+ raise RuntimeError("Too many memcached connections")
+ self._num_connections += 1
+ return memcache.Client(*self.connection_args, **self.connection_kwargs)
+
+ def release_connection(self, connection):
+ self._check_safe()
+ self._locked_connections.remove(connection)
+ self._available_connections.append(connection)
+
+ def disconnect_all(self):
+ for conn in chain(self._available_connections, self._locked_connections):
+ conn.disconnect_all()
+
+ def __getattr__(self, name):
+ def wrapped(*args, **kwargs):
+ return self._proxy_client(name, *args, **kwargs)
+ return wrapped
+
+ def _proxy_client(self, name, *args, **kwargs):
+ conn = self.get_connection()
+
+ try:
+ return getattr(conn, name)(*args, **kwargs)
+ finally:
+ self.release_connection(conn)
+
+
+class CacheModuleKeys(MutableSet):
+ """
+ A set subclass that keeps track of insertion time and persists
+ the set in memcached.
+ """
+ PREFIX = 'ansible_cache_keys'
+
+ def __init__(self, cache, *args, **kwargs):
+ self._cache = cache
+ self._keyset = dict(*args, **kwargs)
+
+ def __contains__(self, key):
+ return key in self._keyset
+
+ def __iter__(self):
+ return iter(self._keyset)
+
+ def __len__(self):
+ return len(self._keyset)
+
+ def add(self, value):
+ self._keyset[value] = time.time()
+ self._cache.set(self.PREFIX, self._keyset)
+
+ def discard(self, value):
+ del self._keyset[value]
+ self._cache.set(self.PREFIX, self._keyset)
+
+ def remove_by_timerange(self, s_min, s_max):
+ for k in list(self._keyset.keys()):
+ t = self._keyset[k]
+ if s_min < t < s_max:
+ del self._keyset[k]
+ self._cache.set(self.PREFIX, self._keyset)
+
+
+class CacheModule(BaseCacheModule):
+
+ def __init__(self, *args, **kwargs):
+ connection = ['127.0.0.1:11211']
+
+ super(CacheModule, self).__init__(*args, **kwargs)
+ if self.get_option('_uri'):
+ connection = self.get_option('_uri')
+ self._timeout = self.get_option('_timeout')
+ self._prefix = self.get_option('_prefix')
+
+ if not HAS_MEMCACHE:
+ raise AnsibleError("python-memcached is required for the memcached fact cache")
+
+ self._cache = {}
+ self._db = ProxyClientPool(connection, debug=0)
+ self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or [])
+
+ def _make_key(self, key):
+ return "{0}{1}".format(self._prefix, key)
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._keys.remove_by_timerange(0, expiry_age)
+
+ def get(self, key):
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the keyset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = value
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+ self._db.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
+ self._cache[key] = value
+ self._keys.add(key)
+
+ def keys(self):
+ self._expire_keys()
+ return list(iter(self._keys))
+
+ def contains(self, key):
+ self._expire_keys()
+ return key in self._keys
+
+ def delete(self, key):
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._keys.discard(key)
+
+ def flush(self):
+ for key in self.keys():
+ self.delete(key)
+
+ def copy(self):
+ return self._keys.copy()
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/ansible_collections/community/general/plugins/cache/pickle.py b/ansible_collections/community/general/plugins/cache/pickle.py
new file mode 100644
index 000000000..06b673921
--- /dev/null
+++ b/ansible_collections/community/general/plugins/cache/pickle.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Brian Coca
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: pickle
+ short_description: Pickle formatted files.
+ description:
+ - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem.
+ author: Brian Coca (@bcoca)
+ options:
+ _uri:
+ required: true
+ description:
+ - Path in which the cache plugin will save the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+'''
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from ansible.module_utils.six import PY3
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by pickle files.
+ """
+
+ def _load(self, filepath):
+ # Pickle is a binary format
+ with open(filepath, 'rb') as f:
+ if PY3:
+ return pickle.load(f, encoding='bytes')
+ else:
+ return pickle.load(f)
+
+ def _dump(self, value, filepath):
+ with open(filepath, 'wb') as f:
+ # Use pickle protocol 2 which is compatible with Python 2.3+.
+ pickle.dump(value, f, protocol=2)
diff --git a/ansible_collections/community/general/plugins/cache/redis.py b/ansible_collections/community/general/plugins/cache/redis.py
new file mode 100644
index 000000000..8c0621717
--- /dev/null
+++ b/ansible_collections/community/general/plugins/cache/redis.py
@@ -0,0 +1,232 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2014, Brian Coca, Josh Drake, et al
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: redis
+ short_description: Use Redis DB for cache
+ description:
+ - This cache uses JSON formatted, per host records saved in Redis.
+ requirements:
+ - redis>=2.4.5 (python lib)
+ options:
+ _uri:
+ description:
+ - A colon separated string of connection information for Redis.
+ - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme).
+ - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme).
+ - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0.
+ required: true
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the DB entries
+ default: ansible_facts
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _keyset_name:
+ description: User defined name for cache keyset name.
+ default: ansible_cache_keys
+ env:
+ - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME
+ ini:
+ - key: fact_caching_redis_keyset_name
+ section: defaults
+ version_added: 1.3.0
+ _sentinel_service_name:
+ description: The redis sentinel service name (or referenced as cluster name).
+ env:
+ - name: ANSIBLE_CACHE_REDIS_SENTINEL
+ ini:
+ - key: fact_caching_redis_sentinel
+ section: defaults
+ version_added: 1.3.0
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+import re
+import time
+import json
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
+from ansible.plugins.cache import BaseCacheModule
+from ansible.utils.display import Display
+
+try:
+ from redis import StrictRedis, VERSION
+ HAS_REDIS = True
+except ImportError:
+ HAS_REDIS = False
+
+display = Display()
+
+
+class CacheModule(BaseCacheModule):
+ """
+ A caching module backed by redis.
+
+ Keys are maintained in a zset with their score being the timestamp
+ when they are inserted. This allows for the usage of 'zremrangebyscore'
+ to expire keys. This mechanism is used or a pattern matched 'scan' for
+ performance.
+ """
+ _sentinel_service_name = None
+ re_url_conn = re.compile(r'^([^:]+|\[[^]]+\]):(\d+):(\d+)(?::(.*))?$')
+ re_sent_conn = re.compile(r'^(.*):(\d+)$')
+
+ def __init__(self, *args, **kwargs):
+ uri = ''
+
+ super(CacheModule, self).__init__(*args, **kwargs)
+ if self.get_option('_uri'):
+ uri = self.get_option('_uri')
+ self._timeout = float(self.get_option('_timeout'))
+ self._prefix = self.get_option('_prefix')
+ self._keys_set = self.get_option('_keyset_name')
+ self._sentinel_service_name = self.get_option('_sentinel_service_name')
+
+ if not HAS_REDIS:
+ raise AnsibleError("The 'redis' python module (version 2.4.5 or newer) is required for the redis fact cache, 'pip install redis'")
+
+ self._cache = {}
+ kw = {}
+
+ # tls connection
+ tlsprefix = 'tls://'
+ if uri.startswith(tlsprefix):
+ kw['ssl'] = True
+ uri = uri[len(tlsprefix):]
+
+ # redis sentinel connection
+ if self._sentinel_service_name:
+ self._db = self._get_sentinel_connection(uri, kw)
+ # normal connection
+ else:
+ connection = self._parse_connection(self.re_url_conn, uri)
+ self._db = StrictRedis(*connection, **kw)
+
+ display.vv('Redis connection: %s' % self._db)
+
+ @staticmethod
+ def _parse_connection(re_patt, uri):
+ match = re_patt.match(uri)
+ if not match:
+ raise AnsibleError("Unable to parse connection string")
+ return match.groups()
+
+ def _get_sentinel_connection(self, uri, kw):
+ """
+ get sentinel connection details from _uri
+ """
+ try:
+ from redis.sentinel import Sentinel
+ except ImportError:
+ raise AnsibleError("The 'redis' python module (version 2.9.0 or newer) is required to use redis sentinel.")
+
+ if ';' not in uri:
+ raise AnsibleError('_uri does not have sentinel syntax.')
+
+ # format: "localhost:26379;localhost2:26379;0:changeme"
+ connections = uri.split(';')
+ connection_args = connections.pop(-1)
+ if len(connection_args) > 0: # hanle if no db nr is given
+ connection_args = connection_args.split(':')
+ kw['db'] = connection_args.pop(0)
+ try:
+ kw['password'] = connection_args.pop(0)
+ except IndexError:
+ pass # password is optional
+
+ sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections]
+ display.vv('\nUsing redis sentinels: %s' % sentinels)
+ scon = Sentinel(sentinels, **kw)
+ try:
+ return scon.master_for(self._sentinel_service_name, socket_timeout=0.2)
+ except Exception as exc:
+ raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc))
+
+ def _make_key(self, key):
+ return self._prefix + key
+
+ def get(self, key):
+
+ if key not in self._cache:
+ value = self._db.get(self._make_key(key))
+ # guard against the key not being removed from the zset;
+ # this could happen in cases where the timeout value is changed
+ # between invocations
+ if value is None:
+ self.delete(key)
+ raise KeyError
+ self._cache[key] = json.loads(value, cls=AnsibleJSONDecoder)
+
+ return self._cache.get(key)
+
+ def set(self, key, value):
+
+ value2 = json.dumps(value, cls=AnsibleJSONEncoder, sort_keys=True, indent=4)
+ if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
+ self._db.setex(self._make_key(key), int(self._timeout), value2)
+ else:
+ self._db.set(self._make_key(key), value2)
+
+ if VERSION[0] == 2:
+ self._db.zadd(self._keys_set, time.time(), key)
+ else:
+ self._db.zadd(self._keys_set, {key: time.time()})
+ self._cache[key] = value
+
+ def _expire_keys(self):
+ if self._timeout > 0:
+ expiry_age = time.time() - self._timeout
+ self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
+
+ def keys(self):
+ self._expire_keys()
+ return self._db.zrange(self._keys_set, 0, -1)
+
+ def contains(self, key):
+ self._expire_keys()
+ return (self._db.zrank(self._keys_set, key) is not None)
+
+ def delete(self, key):
+ if key in self._cache:
+ del self._cache[key]
+ self._db.delete(self._make_key(key))
+ self._db.zrem(self._keys_set, key)
+
+ def flush(self):
+ for key in list(self.keys()):
+ self.delete(key)
+
+ def copy(self):
+ # TODO: there is probably a better way to do this in redis
+ ret = dict([(k, self.get(k)) for k in self.keys()])
+ return ret
+
+ def __getstate__(self):
+ return dict()
+
+ def __setstate__(self, data):
+ self.__init__()
diff --git a/ansible_collections/community/general/plugins/cache/yaml.py b/ansible_collections/community/general/plugins/cache/yaml.py
new file mode 100644
index 000000000..3a5ddf3e6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/cache/yaml.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Brian Coca
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: yaml
+ short_description: YAML formatted files.
+ description:
+ - This cache uses YAML formatted, per host, files saved to the filesystem.
+ author: Brian Coca (@bcoca)
+ options:
+ _uri:
+ required: true
+ description:
+ - Path in which the cache plugin will save the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_CONNECTION
+ ini:
+ - key: fact_caching_connection
+ section: defaults
+ _prefix:
+ description: User defined prefix to use when creating the files
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_PREFIX
+ ini:
+ - key: fact_caching_prefix
+ section: defaults
+ _timeout:
+ default: 86400
+ description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire
+ env:
+ - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
+ ini:
+ - key: fact_caching_timeout
+ section: defaults
+ type: integer
+'''
+
+
+import codecs
+
+import yaml
+
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.cache import BaseFileCacheModule
+
+
+class CacheModule(BaseFileCacheModule):
+ """
+ A caching module backed by yaml files.
+ """
+
+ def _load(self, filepath):
+ with codecs.open(filepath, 'r', encoding='utf-8') as f:
+ return AnsibleLoader(f).get_single_data()
+
+ def _dump(self, value, filepath):
+ with codecs.open(filepath, 'w', encoding='utf-8') as f:
+ yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
diff --git a/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py b/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
new file mode 100644
index 000000000..ccdbcc9cf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/cgroup_memory_recap.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: cgroup_memory_recap
+ type: aggregate
+ requirements:
+ - whitelist in configuration
+ - cgroups
+ short_description: Profiles maximum memory usage of tasks and full execution using cgroups
+ description:
+ - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups.
+ notes:
+ - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...).
+ - This cgroup should only be used by ansible to get accurate results.
+ - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile).
+ options:
+ max_mem_file:
+ required: true
+ description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes).
+ env:
+ - name: CGROUP_MAX_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: max_mem_file
+ cur_mem_file:
+ required: true
+ description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes).
+ env:
+ - name: CGROUP_CUR_MEM_FILE
+ ini:
+ - section: callback_cgroupmemrecap
+ key: cur_mem_file
+'''
+
+import time
+import threading
+
+from ansible.plugins.callback import CallbackBase
+
+
+class MemProf(threading.Thread):
+ """Python thread for recording memory usage"""
+ def __init__(self, path, obj=None):
+ threading.Thread.__init__(self)
+ self.obj = obj
+ self.path = path
+ self.results = []
+ self.running = True
+
+ def run(self):
+ while self.running:
+ with open(self.path) as f:
+ val = f.read()
+ self.results.append(int(val.strip()) / 1024 / 1024)
+ time.sleep(0.001)
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.cgroup_memory_recap'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display)
+
+ self._task_memprof = None
+
+ self.task_results = []
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.cgroup_max_file = self.get_option('max_mem_file')
+ self.cgroup_current_file = self.get_option('cur_mem_file')
+
+ with open(self.cgroup_max_file, 'w+') as f:
+ f.write('0')
+
+ def _profile_memory(self, obj=None):
+ prev_task = None
+ results = None
+ try:
+ self._task_memprof.running = False
+ results = self._task_memprof.results
+ prev_task = self._task_memprof.obj
+ except AttributeError:
+ pass
+
+ if obj is not None:
+ self._task_memprof = MemProf(self.cgroup_current_file, obj=obj)
+ self._task_memprof.start()
+
+ if results is not None:
+ self.task_results.append((prev_task, max(results)))
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._profile_memory(task)
+
+ def v2_playbook_on_stats(self, stats):
+ self._profile_memory()
+
+ with open(self.cgroup_max_file) as f:
+ max_results = int(f.read().strip()) / 1024 / 1024
+
+ self._display.banner('CGROUP MEMORY RECAP')
+ self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results)
+
+ for task, memory in self.task_results:
+ self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory))
diff --git a/ansible_collections/community/general/plugins/callback/context_demo.py b/ansible_collections/community/general/plugins/callback/context_demo.py
new file mode 100644
index 000000000..b9558fc06
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/context_demo.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: context_demo
+ type: aggregate
+ short_description: demo callback that adds play/task context
+ description:
+ - Displays some play and task context along with normal output.
+ - This is mostly for demo purposes.
+ requirements:
+ - whitelist in configuration
+'''
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ This is a very trivial example of how any callback function can get at play and task objects.
+ play will be 'None' for runner invocations, and task will be None for 'setup' invocations.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'community.general.context_demo'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, *args, **kwargs):
+ super(CallbackModule, self).__init__(*args, **kwargs)
+ self.task = None
+ self.play = None
+
+ def v2_on_any(self, *args, **kwargs):
+ self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task))
+
+ self._display.display(" --- ARGS ")
+ for i, a in enumerate(args):
+ self._display.display(' %s: %s' % (i, a))
+
+ self._display.display(" --- KWARGS ")
+ for k in kwargs:
+ self._display.display(' %s: %s' % (k, kwargs[k]))
+
+ def v2_playbook_on_play_start(self, play):
+ self.play = play
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
diff --git a/ansible_collections/community/general/plugins/callback/counter_enabled.py b/ansible_collections/community/general/plugins/callback/counter_enabled.py
new file mode 100644
index 000000000..27adc97a6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/counter_enabled.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ivan Aragones Muniesa <ivan.aragones.muniesa@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+'''
+ Counter enabled Ansible callback plugin (See DOCUMENTATION for more information)
+'''
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: counter_enabled
+ type: stdout
+ short_description: adds counters to the output items (tasks and hosts/task)
+ description:
+ - Use this callback when you need a kind of progress bar on a large environments.
+ - You will know how many tasks has the playbook to run, and which one is actually running.
+ - You will know how many hosts may run a task, and which of them is actually running.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled))
+'''
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
+from ansible.playbook.task_include import TaskInclude
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.counter_enabled'
+
+ _task_counter = 1
+ _task_total = 0
+ _host_counter = 1
+ _host_total = 0
+ _current_batch_total = 0
+ _previous_batch_total = 0
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self._playbook = ""
+ self._play = ""
+
+ def _all_vars(self, host=None, task=None):
+ # host and task need to be specified in case 'magic variables' (host vars, group vars, etc)
+ # need to be loaded as well
+ return self._play.get_variable_manager().get_vars(
+ play=self._play,
+ host=host,
+ task=task
+ )
+
+ def v2_playbook_on_start(self, playbook):
+ self._playbook = playbook
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = u"play"
+ else:
+ msg = u"PLAY [%s]" % name
+
+ self._play = play
+
+ self._display.banner(msg)
+ self._play = play
+
+ self._previous_batch_total = self._current_batch_total
+ self._current_batch_total = self._previous_batch_total + len(self._all_vars()['vars']['ansible_play_batch'])
+ self._host_total = len(self._all_vars()['vars']['ansible_play_hosts_all'])
+ self._task_total = len(self._play.get_tasks()[0])
+ self._task_counter = 1
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for host in hosts:
+ stat = stats.summarize(host)
+
+ self._display.display(u"%s : %s %s %s %s %s %s" % (
+ hostcolor(host, stat),
+ colorize(u'ok', stat['ok'], C.COLOR_OK),
+ colorize(u'changed', stat['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', stat['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', stat['rescued'], C.COLOR_OK),
+ colorize(u'ignored', stat['ignored'], C.COLOR_WARN)),
+ screen_only=True
+ )
+
+ self._display.display(u"%s : %s %s %s %s %s %s" % (
+ hostcolor(host, stat, False),
+ colorize(u'ok', stat['ok'], None),
+ colorize(u'changed', stat['changed'], None),
+ colorize(u'unreachable', stat['unreachable'], None),
+ colorize(u'failed', stat['failures'], None),
+ colorize(u'rescued', stat['rescued'], None),
+ colorize(u'ignored', stat['ignored'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+
+ # print custom stats
+ if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom:
+ # fallback on constants for inherited plugins missing docs
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ args = ''
+ # args can be specified as no_log in several places: in the task or in
+ # the argument spec. We can check whether the task is no_log but the
+ # argument spec can't be because that is only run on the target
+ # machine and we haven't run it there yet at this time.
+ #
+ # So we give people a config option to affect display of the args so
+ # that they can secure this if they feel that their stdout is insecure
+ # (shoulder surfing, logging stdout straight to a file, etc).
+ if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+ args = ' %s' % args
+ self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args))
+ if self._display.verbosity >= 2:
+ path = task.get_path()
+ if path:
+ self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
+ self._host_counter = self._previous_batch_total
+ self._task_counter += 1
+
+ def v2_runner_on_ok(self, result):
+
+ self._host_counter += 1
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if isinstance(result._task, TaskInclude):
+ return
+ elif result._result.get('changed', False):
+ if delegated_vars:
+ msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ color = C.COLOR_CHANGED
+ else:
+ if delegated_vars:
+ msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host'])
+ else:
+ msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ color = C.COLOR_OK
+
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ self._clean_results(result._result, result._task.action)
+
+ if self._run_is_verbose(result):
+ msg += " => %s" % (self._dump_results(result._result),)
+ self._display.display(msg, color=color)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+
+ self._host_counter += 1
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ self._handle_exception(result._result)
+ self._handle_warnings(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+
+ else:
+ if delegated_vars:
+ self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
+ else:
+ self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
+
+ if ignore_errors:
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
+
+ def v2_runner_on_skipped(self, result):
+ self._host_counter += 1
+
+ if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
+
+ self._clean_results(result._result, result._task.action)
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name())
+ if self._run_is_verbose(result):
+ msg += " => %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_runner_on_unreachable(self, result):
+ self._host_counter += 1
+
+ if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
+ self._print_task_banner(result._task)
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
+ else:
+ self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total,
+ result._host.get_name(), self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
diff --git a/ansible_collections/community/general/plugins/callback/dense.py b/ansible_collections/community/general/plugins/callback/dense.py
new file mode 100644
index 000000000..490705fd2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/dense.py
@@ -0,0 +1,501 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Dag Wieers <dag@wieers.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: dense
+type: stdout
+short_description: minimal stdout output
+extends_documentation_fragment:
+- default_callback
+description:
+- When in verbose mode it will act the same as the default callback.
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- set as stdout in configuration
+'''
+
+HAS_OD = False
+try:
+ from collections import OrderedDict
+ HAS_OD = True
+except ImportError:
+ pass
+
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+from ansible.utils.color import colorize, hostcolor
+from ansible.utils.display import Display
+
+import sys
+
+display = Display()
+
+
+# Design goals:
+#
+# + On screen there should only be relevant stuff
+# - How far are we ? (during run, last line)
+# - What issues occurred
+# - What changes occurred
+# - Diff output (in diff-mode)
+#
+# + If verbosity increases, act as default output
+# So that users can easily switch to default for troubleshooting
+#
+# + Rewrite the output during processing
+# - We use the cursor to indicate where in the task we are.
+# Output after the prompt is the output of the previous task.
+# - If we would clear the line at the start of a task, there would often
+# be no information at all, so we leave it until it gets updated
+#
+# + Use the same color-conventions of Ansible
+#
+# + Ensure the verbose output (-v) is also dense.
+# Remove information that is not essential (eg. timestamps, status)
+
+
+# TODO:
+#
+# + Properly test for terminal capabilities, and fall back to default
+# + Modify Ansible mechanism so we don't need to use sys.stdout directly
+# + Find an elegant solution for progress bar line wrapping
+
+
+# FIXME: Importing constants as C simply does not work, beats me :-/
+# from ansible import constants as C
+class C:
+ COLOR_HIGHLIGHT = 'white'
+ COLOR_VERBOSE = 'blue'
+ COLOR_WARN = 'bright purple'
+ COLOR_ERROR = 'red'
+ COLOR_DEBUG = 'dark gray'
+ COLOR_DEPRECATE = 'purple'
+ COLOR_SKIP = 'cyan'
+ COLOR_UNREACHABLE = 'bright red'
+ COLOR_OK = 'green'
+ COLOR_CHANGED = 'yellow'
+
+
+# Taken from Dstat
+class vt100:
+ black = '\033[0;30m'
+ darkred = '\033[0;31m'
+ darkgreen = '\033[0;32m'
+ darkyellow = '\033[0;33m'
+ darkblue = '\033[0;34m'
+ darkmagenta = '\033[0;35m'
+ darkcyan = '\033[0;36m'
+ gray = '\033[0;37m'
+
+ darkgray = '\033[1;30m'
+ red = '\033[1;31m'
+ green = '\033[1;32m'
+ yellow = '\033[1;33m'
+ blue = '\033[1;34m'
+ magenta = '\033[1;35m'
+ cyan = '\033[1;36m'
+ white = '\033[1;37m'
+
+ blackbg = '\033[40m'
+ redbg = '\033[41m'
+ greenbg = '\033[42m'
+ yellowbg = '\033[43m'
+ bluebg = '\033[44m'
+ magentabg = '\033[45m'
+ cyanbg = '\033[46m'
+ whitebg = '\033[47m'
+
+ reset = '\033[0;0m'
+ bold = '\033[1m'
+ reverse = '\033[2m'
+ underline = '\033[4m'
+
+ clear = '\033[2J'
+# clearline = '\033[K'
+ clearline = '\033[2K'
+ save = '\033[s'
+ restore = '\033[u'
+ save_all = '\0337'
+ restore_all = '\0338'
+ linewrap = '\033[7h'
+ nolinewrap = '\033[7l'
+
+ up = '\033[1A'
+ down = '\033[1B'
+ right = '\033[1C'
+ left = '\033[1D'
+
+
+colors = dict(
+ ok=vt100.darkgreen,
+ changed=vt100.darkyellow,
+ skipped=vt100.darkcyan,
+ ignored=vt100.cyanbg + vt100.red,
+ failed=vt100.darkred,
+ unreachable=vt100.red,
+)
+
+states = ('skipped', 'ok', 'changed', 'failed', 'unreachable')
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ This is the dense callback interface, where screen estate is still valued.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'dense'
+
+ def __init__(self):
+
+ # From CallbackModule
+ self._display = display
+
+ if HAS_OD:
+
+ self.disabled = False
+ self.super_ref = super(CallbackModule, self)
+ self.super_ref.__init__()
+
+ # Attributes to remove from results for more density
+ self.removed_attributes = (
+ # 'changed',
+ 'delta',
+ # 'diff',
+ 'end',
+ 'failed',
+ 'failed_when_result',
+ 'invocation',
+ 'start',
+ 'stdout_lines',
+ )
+
+ # Initiate data structures
+ self.hosts = OrderedDict()
+ self.keep = False
+ self.shown_title = False
+ self.count = dict(play=0, handler=0, task=0)
+ self.type = 'foo'
+
+ # Start immediately on the first line
+ sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+ else:
+ display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
+ self.disabled = True
+
+ def __del__(self):
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+
+ def _add_host(self, result, status):
+ name = result._host.get_name()
+
+ # Add a new status in case a failed task is ignored
+ if status == 'failed' and result._task.ignore_errors:
+ status = 'ignored'
+
+ # Check if we have to update an existing state (when looping over items)
+ if name not in self.hosts:
+ self.hosts[name] = dict(state=status)
+ elif states.index(self.hosts[name]['state']) < states.index(status):
+ self.hosts[name]['state'] = status
+
+ # Store delegated hostname, if needed
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ self.hosts[name]['delegate'] = delegated_vars['ansible_host']
+
+ # Print progress bar
+ self._display_progress(result)
+
+# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
+# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
+ # Ensure that tasks with changes/failures stay on-screen
+ if status in ['changed', 'failed', 'unreachable']:
+ self.keep = True
+
+ if self._display.verbosity == 1:
+ # Print task title, if needed
+ self._display_task_banner()
+ self._display_results(result, status)
+
+ def _clean_results(self, result):
+ # Remove non-essential attributes
+ for attr in self.removed_attributes:
+ if attr in result:
+ del result[attr]
+
+ # Remove empty attributes (list, dict, str)
+ for attr in result.copy():
+ if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
+ if not result[attr]:
+ del result[attr]
+
+ def _handle_exceptions(self, result):
+ if 'exception' in result:
+ # Remove the exception from the result so it's not shown every time
+ del result['exception']
+
+ if self._display.verbosity == 1:
+ return "An exception occurred during task execution. To see the full traceback, use -vvv."
+
+ def _display_progress(self, result=None):
+ # Always rewrite the complete line
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
+ sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ # Print out each host in its own status-color
+ for name in self.hosts:
+ sys.stdout.write(' ')
+ if self.hosts[name].get('delegate', None):
+ sys.stdout.write(self.hosts[name]['delegate'] + '>')
+ sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
+ sys.stdout.flush()
+
+# if result._result.get('diff', False):
+# sys.stdout.write('\n' + vt100.linewrap)
+ sys.stdout.write(vt100.linewrap)
+
+# self.keep = True
+
+ def _display_task_banner(self):
+ if not self.shown_title:
+ self.shown_title = True
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
+ sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ def _display_results(self, result, status):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ self._clean_results(result._result)
+
+ dump = ''
+ if result._task.action == 'include':
+ return
+ elif status == 'ok':
+ return
+ elif status == 'ignored':
+ dump = self._handle_exceptions(result._result)
+ elif status == 'failed':
+ dump = self._handle_exceptions(result._result)
+ elif status == 'unreachable':
+ dump = result._result['msg']
+
+ if not dump:
+ dump = self._dump_results(result._result)
+
+ if result._task.loop and 'results' in result._result:
+ self._process_items(result)
+ else:
+ sys.stdout.write(colors[status] + status + ': ')
+
+ delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ if delegated_vars:
+ sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
+ else:
+ sys.stdout.write(result._host.get_name())
+
+ sys.stdout.write(': ' + dump + '\n')
+ sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ if status == 'changed':
+ self._handle_warnings(result._result)
+
+ def v2_playbook_on_play_start(self, play):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
+
+ # Reset at the start of each play
+ self.keep = False
+ self.count.update(dict(handler=0, task=0))
+ self.count['play'] += 1
+ self.play = play
+
+ # Write the next play on screen IN UPPERCASE, and make it permanent
+ name = play.get_name().strip()
+ if not name:
+ name = 'unnamed'
+ sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ else:
+ # Do not clear line, since we want to retain the previous output
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
+
+ # Reset at the start of each task
+ self.keep = False
+ self.shown_title = False
+ self.hosts = OrderedDict()
+ self.task = task
+ self.type = 'task'
+
+ # Enumerate task if not setup (task names are too long for dense output)
+ if task.get_name() != 'setup':
+ self.count['task'] += 1
+
+ # Write the next task on screen (behind the prompt is the previous output)
+ sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ # Leave the previous task on screen (as it has changes/errors)
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
+
+ # Reset at the start of each handler
+ self.keep = False
+ self.shown_title = False
+ self.hosts = OrderedDict()
+ self.task = task
+ self.type = 'handler'
+
+ # Enumerate handler if not setup (handler names may be too long for dense output)
+ if task.get_name() != 'setup':
+ self.count[self.type] += 1
+
+ # Write the next task on screen (behind the prompt is the previous output)
+ sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
+ sys.stdout.write(vt100.reset)
+ sys.stdout.flush()
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ # TBD
+ sys.stdout.write('cleanup.')
+ sys.stdout.flush()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._add_host(result, 'failed')
+
+ def v2_runner_on_ok(self, result):
+ if result._result.get('changed', False):
+ self._add_host(result, 'changed')
+ else:
+ self._add_host(result, 'ok')
+
+ def v2_runner_on_skipped(self, result):
+ self._add_host(result, 'skipped')
+
+ def v2_runner_on_unreachable(self, result):
+ self._add_host(result, 'unreachable')
+
+ def v2_runner_on_include(self, included_file):
+ pass
+
+ def v2_runner_on_file_diff(self, result, diff):
+ sys.stdout.write(vt100.bold)
+ self.super_ref.v2_runner_on_file_diff(result, diff)
+ sys.stdout.write(vt100.reset)
+
+ def v2_on_file_diff(self, result):
+ sys.stdout.write(vt100.bold)
+ self.super_ref.v2_on_file_diff(result)
+ sys.stdout.write(vt100.reset)
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_ok(self, result):
+ self.v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_ok(self, result):
+ if result._result.get('changed', False):
+ self._add_host(result, 'changed')
+ else:
+ self._add_host(result, 'ok')
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_failed(self, result):
+ self.v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self._add_host(result, 'failed')
+
+ # Old definition in v2.0
+ def v2_playbook_item_on_skipped(self, result):
+ self.v2_runner_item_on_skipped(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ self._add_host(result, 'skipped')
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+ self.keep = False
+
+ sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ def v2_playbook_on_include(self, included_file):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ if self._display.verbosity == 0 and self.keep:
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ else:
+ sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
+
+ # In normal mode screen output should be sufficient, summary is redundant
+ if self._display.verbosity == 0:
+ return
+
+ sys.stdout.write(vt100.bold + vt100.underline)
+ sys.stdout.write('SUMMARY')
+
+ sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
+ sys.stdout.flush()
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+ self._display.display(
+ u"%s : %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN),
+ ),
+ screen_only=True
+ )
+
+
+# When using -vv or higher, simply do the default action
+if display.verbosity >= 2 or not HAS_OD:
+ CallbackModule = CallbackModule_default
diff --git a/ansible_collections/community/general/plugins/callback/diy.py b/ansible_collections/community/general/plugins/callback/diy.py
new file mode 100644
index 000000000..75b3f4e24
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/diy.py
@@ -0,0 +1,1417 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Trevor Highfill <trevor.highfill@outlook.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: diy
+ type: stdout
+ short_description: Customize the output
+ version_added: 0.2.0
+ description:
+ - Callback plugin that allows you to supply your own custom callback templates to be output.
+ author: Trevor Highfill (@theque5t)
+ extends_documentation_fragment:
+ - default_callback
+ notes:
+ - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided.
+ - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options.
+ The dictionary is only available in the templating context for the options. It is not a variable that is available via the other
+ various execution contexts, such as playbook, play, task etc.
+ - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the
+ respective callback.
+ Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output
+ the top level variable names available to the callback.
+ - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example,
+ C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}")
+ - "**Condition** for all C(msg) options:
+ if value C(is None or omit),
+ then the option is not being used.
+ **Effect**: use of the C(default) callback plugin for output"
+ - "**Condition** for all C(msg) options:
+ if value C(is not None and not omit and length is not greater than 0),
+ then the option is being used without output.
+ **Effect**: suppress output"
+ - "**Condition** for all C(msg) options:
+ if value C(is not None and not omit and length is greater than 0),
+ then the option is being used with output.
+ **Effect**: render value as template and output"
+ - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan),
+ C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)"
+ seealso:
+ - name: default – default Ansible screen output
+ description: The official documentation on the B(default) callback plugin.
+ link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html
+ requirements:
+ - set as stdout_callback in configuration
+ options:
+ on_any_msg:
+ description: Output to be used for callback on_any.
+ ini:
+ - section: callback_diy
+ key: on_any_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG
+ vars:
+ - name: ansible_callback_diy_on_any_msg
+ type: str
+
+ on_any_msg_color:
+ description:
+ - Output color to be used for I(on_any_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_any_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_any_msg_color
+ type: str
+
+ runner_on_failed_msg:
+ description: Output to be used for callback runner_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg
+ type: str
+
+ runner_on_failed_msg_color:
+ description:
+ - Output color to be used for I(runner_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_failed_msg_color
+ type: str
+
+ runner_on_ok_msg:
+ description: Output to be used for callback runner_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg
+ type: str
+
+ runner_on_ok_msg_color:
+ description:
+ - Output color to be used for I(runner_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_ok_msg_color
+ type: str
+
+ runner_on_skipped_msg:
+ description: Output to be used for callback runner_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg
+ type: str
+
+ runner_on_skipped_msg_color:
+ description:
+ - Output color to be used for I(runner_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_skipped_msg_color
+ type: str
+
+ runner_on_unreachable_msg:
+ description: Output to be used for callback runner_on_unreachable.
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg
+ type: str
+
+ runner_on_unreachable_msg_color:
+ description:
+ - Output color to be used for I(runner_on_unreachable_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_unreachable_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_unreachable_msg_color
+ type: str
+
+ playbook_on_start_msg:
+ description: Output to be used for callback playbook_on_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg
+ type: str
+
+ playbook_on_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_start_msg_color
+ type: str
+
+ playbook_on_notify_msg:
+ description: Output to be used for callback playbook_on_notify.
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg
+ type: str
+
+ playbook_on_notify_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_notify_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_notify_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_notify_msg_color
+ type: str
+
+ playbook_on_no_hosts_matched_msg:
+ description: Output to be used for callback playbook_on_no_hosts_matched.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg
+ type: str
+
+ playbook_on_no_hosts_matched_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_no_hosts_matched_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_matched_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color
+ type: str
+
+ playbook_on_no_hosts_remaining_msg:
+ description: Output to be used for callback playbook_on_no_hosts_remaining.
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg
+ type: str
+
+ playbook_on_no_hosts_remaining_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_no_hosts_remaining_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_no_hosts_remaining_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color
+ type: str
+
+ playbook_on_task_start_msg:
+ description: Output to be used for callback playbook_on_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg
+ type: str
+
+ playbook_on_task_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_task_start_msg_color
+ type: str
+
+ playbook_on_handler_task_start_msg:
+ description: Output to be used for callback playbook_on_handler_task_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg
+ type: str
+
+ playbook_on_handler_task_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_handler_task_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_handler_task_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color
+ type: str
+
+ playbook_on_vars_prompt_msg:
+ description: Output to be used for callback playbook_on_vars_prompt.
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg
+ type: str
+
+ playbook_on_vars_prompt_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_vars_prompt_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_vars_prompt_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color
+ type: str
+
+ playbook_on_play_start_msg:
+ description: Output to be used for callback playbook_on_play_start.
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg
+ type: str
+
+ playbook_on_play_start_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_play_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_play_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_play_start_msg_color
+ type: str
+
+ playbook_on_stats_msg:
+ description: Output to be used for callback playbook_on_stats.
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg
+ type: str
+
+ playbook_on_stats_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_stats_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_stats_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_stats_msg_color
+ type: str
+
+ on_file_diff_msg:
+ description: Output to be used for callback on_file_diff.
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg
+ type: str
+
+ on_file_diff_msg_color:
+ description:
+ - Output color to be used for I(on_file_diff_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: on_file_diff_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_on_file_diff_msg_color
+ type: str
+
+ playbook_on_include_msg:
+ description: Output to be used for callback playbook_on_include.
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg
+ type: str
+
+ playbook_on_include_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_include_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_include_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_include_msg_color
+ type: str
+
+ runner_item_on_ok_msg:
+ description: Output to be used for callback runner_item_on_ok.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg
+ type: str
+
+ runner_item_on_ok_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_ok_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_ok_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_ok_msg_color
+ type: str
+
+ runner_item_on_failed_msg:
+ description: Output to be used for callback runner_item_on_failed.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg
+ type: str
+
+ runner_item_on_failed_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_failed_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_failed_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_failed_msg_color
+ type: str
+
+ runner_item_on_skipped_msg:
+ description: Output to be used for callback runner_item_on_skipped.
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg
+ type: str
+
+ runner_item_on_skipped_msg_color:
+ description:
+ - Output color to be used for I(runner_item_on_skipped_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_item_on_skipped_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_item_on_skipped_msg_color
+ type: str
+
+ runner_retry_msg:
+ description: Output to be used for callback runner_retry.
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg
+ type: str
+
+ runner_retry_msg_color:
+ description:
+ - Output color to be used for I(runner_retry_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_retry_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_retry_msg_color
+ type: str
+
+ runner_on_start_msg:
+ description: Output to be used for callback runner_on_start.
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg
+ type: str
+
+ runner_on_start_msg_color:
+ description:
+ - Output color to be used for I(runner_on_start_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_start_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_start_msg_color
+ type: str
+
+ runner_on_no_hosts_msg:
+ description: Output to be used for callback runner_on_no_hosts.
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg
+ type: str
+
+ runner_on_no_hosts_msg_color:
+ description:
+ - Output color to be used for I(runner_on_no_hosts_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: runner_on_no_hosts_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_runner_on_no_hosts_msg_color
+ type: str
+
+ playbook_on_setup_msg:
+ description: Output to be used for callback playbook_on_setup.
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg
+ type: str
+
+ playbook_on_setup_msg_color:
+ description:
+ - Output color to be used for I(playbook_on_setup_msg).
+ - Template should render a L(valid color value,#notes).
+ ini:
+ - section: callback_diy
+ key: playbook_on_setup_msg_color
+ env:
+ - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR
+ vars:
+ - name: ansible_callback_diy_playbook_on_setup_msg_color
+ type: str
+'''
+
+EXAMPLES = r'''
+ansible.cfg: >
+ # Enable plugin
+ [defaults]
+ stdout_callback=community.general.diy
+
+ [callback_diy]
+ # Output when playbook starts
+ playbook_on_start_msg="DIY output(via ansible.cfg): playbook example: {{ ansible_callback_diy.playbook.file_name }}"
+ playbook_on_start_msg_color=yellow
+
+ # Comment out to allow default plugin output
+ # playbook_on_play_start_msg="PLAY: starting play {{ ansible_callback_diy.play.name }}"
+
+ # Accept on_skipped_msg or ansible_callback_diy_runner_on_skipped_msg as input vars
+ # If neither are supplied, omit the option
+ runner_on_skipped_msg="{{ on_skipped_msg | default(ansible_callback_diy_runner_on_skipped_msg) | default(omit) }}"
+
+ # Newline after every callback
+ # on_any_msg='{{ " " | join("\n") }}'
+
+playbook.yml: >
+ ---
+ - name: "Default plugin output: play example"
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Default plugin output
+ ansible.builtin.debug:
+ msg: default plugin output
+
+ - name: Override from play vars
+ hosts: localhost
+ gather_facts: false
+ vars:
+ ansible_connection: local
+ green: "\e[0m\e[38;5;82m"
+ yellow: "\e[0m\e[38;5;11m"
+ bright_purple: "\e[0m\e[38;5;105m"
+ cyan: "\e[0m\e[38;5;51m"
+ green_bg_black_fg: "\e[0m\e[48;5;40m\e[38;5;232m"
+ yellow_bg_black_fg: "\e[0m\e[48;5;226m\e[38;5;232m"
+ purple_bg_white_fg: "\e[0m\e[48;5;57m\e[38;5;255m"
+ cyan_bg_black_fg: "\e[0m\e[48;5;87m\e[38;5;232m"
+ magenta: "\e[38;5;198m"
+ white: "\e[0m\e[38;5;255m"
+ ansible_callback_diy_playbook_on_play_start_msg: "\n{{green}}DIY output(via play vars): play example: {{magenta}}{{ansible_callback_diy.play.name}}\n\n"
+ ansible_callback_diy_playbook_on_task_start_msg: "DIY output(via play vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_playbook_on_task_start_msg_color: cyan
+ ansible_callback_diy_playbook_on_stats_msg: |+2
+ CUSTOM STATS
+ ==============================
+ {% for key in ansible_callback_diy.stats | sort %}
+ {% if ansible_callback_diy.stats[key] %}
+ {% if key == 'ok' %}
+ {% set color_one = lookup('vars','green_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','green') %}
+ {% elif key == 'changed' %}
+ {% set color_one = lookup('vars','yellow_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','yellow') %}
+ {% elif key == 'processed' %}
+ {% set color_one = lookup('vars','purple_bg_white_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','bright_purple') %}
+ {% elif key == 'skipped' %}
+ {% set color_one = lookup('vars','cyan_bg_black_fg') %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% set color_two = lookup('vars','cyan') %}
+ {% else %}
+ {% set color_one = "" %}
+ {% set prefix = "" %}
+ {% set suffix = "" %}
+ {% set color_two = "" %}
+ {% endif %}
+ {{ color_one }}{{ "%s%s%s" | format(prefix,key,suffix) }}{{ color_two }}: {{ ansible_callback_diy.stats[key] | to_nice_yaml }}
+ {% endif %}
+ {% endfor %}
+
+ tasks:
+ - name: Custom banner with default plugin result output
+ ansible.builtin.debug:
+ msg: "default plugin output: result example"
+
+ - name: Override from task vars
+ ansible.builtin.debug:
+ msg: "example {{ two }}"
+ changed_when: true
+ vars:
+ white_fg_red_bg: "\e[0m\e[48;5;1m"
+ two: "{{ white_fg_red_bg }} 2 "
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_playbook_on_task_start_msg_color: bright magenta
+ ansible_callback_diy_runner_on_ok_msg: "DIY output(via task vars): result example: \n{{ ansible_callback_diy.result.output.msg }}\n"
+ ansible_callback_diy_runner_on_ok_msg_color: "{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"
+
+ - name: Suppress output
+ ansible.builtin.debug:
+ msg: i should not be displayed
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ""
+ ansible_callback_diy_runner_on_ok_msg: ""
+
+ - name: Using alias vars (see ansible.cfg)
+ ansible.builtin.debug:
+ msg:
+ when: false
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ""
+ on_skipped_msg: "DIY output(via task vars): skipped example:\n\e[0m\e[38;5;4m\u25b6\u25b6 {{ ansible_callback_diy.result.task.name }}\n"
+ on_skipped_msg_color: white
+
+ - name: Just stdout
+ ansible.builtin.command: echo some stdout
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\n"
+ ansible_callback_diy_runner_on_ok_msg: "{{ ansible_callback_diy.result.output.stdout }}\n"
+
+ - name: Multiline output
+ ansible.builtin.debug:
+ msg: "{{ multiline }}"
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ multiline: "line\nline\nline"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ some
+ {{ ansible_callback_diy.result.output.msg }}
+ output
+
+ ansible_callback_diy_playbook_on_task_start_msg_color: bright blue
+
+ - name: Indentation
+ ansible.builtin.debug:
+ msg: "{{ item.msg }}"
+ with_items:
+ - { indent: 1, msg: one., color: red }
+ - { indent: 2, msg: two.., color: yellow }
+ - { indent: 3, msg: three..., color: bright yellow }
+ vars:
+ ansible_callback_diy_runner_item_on_ok_msg: "{{ ansible_callback_diy.result.output.msg | indent(item.indent, True) }}"
+ ansible_callback_diy_runner_item_on_ok_msg_color: "{{ item.color }}"
+ ansible_callback_diy_runner_on_ok_msg: "GO!!!"
+ ansible_callback_diy_runner_on_ok_msg_color: bright green
+
+ - name: Using lookup and template as file
+ ansible.builtin.shell: "echo {% raw %}'output from {{ file_name }}'{% endraw %} > {{ file_name }}"
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ file_name: diy_file_template_example
+ ansible_callback_diy_runner_on_ok_msg: "{{ lookup('template', file_name) }}"
+
+ - name: 'Look at top level vars available to the "runner_on_ok" callback'
+ ansible.builtin.debug:
+ msg: ''
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ {% for var in (ansible_callback_diy.top_level_var_names|reject('match','vars|ansible_callback_diy.*')) | sort %}
+ {{ green }}{{ var }}:
+ {{ white }}{{ lookup('vars', var) }}
+
+ {% endfor %}
+ ansible_callback_diy_runner_on_ok_msg_color: white
+
+ - name: 'Look at event data available to the "runner_on_ok" callback'
+ ansible.builtin.debug:
+ msg: ''
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: "\nDIY output(via task vars): task example: {{ ansible_callback_diy.task.name }}"
+ ansible_callback_diy_runner_on_ok_msg: |+2
+ {% for key in ansible_callback_diy | sort %}
+ {{ green }}{{ key }}:
+ {{ white }}{{ ansible_callback_diy[key] }}
+
+ {% endfor %}
+'''
+
+import sys
+from contextlib import contextmanager
+from ansible.template import Templar
+from ansible.vars.manager import VariableManager
+from ansible.plugins.callback.default import CallbackModule as Default
+from ansible.module_utils.common.text.converters import to_text
+
+
+class DummyStdout(object):
+ def flush(self):
+ pass
+
+ def write(self, b):
+ pass
+
+ def writelines(self, l):
+ pass
+
+
+class CallbackModule(Default):
+ """
+ Callback plugin that allows you to supply your own custom callback templates to be output.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.diy'
+
+ DIY_NS = 'ansible_callback_diy'
+
+ @contextmanager
+ def _suppress_stdout(self, enabled):
+ saved_stdout = sys.stdout
+ if enabled:
+ sys.stdout = DummyStdout()
+ yield
+ sys.stdout = saved_stdout
+
+ def _get_output_specification(self, loader, variables):
+ _ret = {}
+ _calling_method = sys._getframe(1).f_code.co_name
+ _callback_type = (_calling_method[3:] if _calling_method[:3] == "v2_" else _calling_method)
+ _callback_options = ['msg', 'msg_color']
+
+ for option in _callback_options:
+ _option_name = '%s_%s' % (_callback_type, option)
+ _option_template = variables.get(
+ self.DIY_NS + "_" + _option_name,
+ self.get_option(_option_name)
+ )
+ _ret.update({option: self._template(
+ loader=loader,
+ template=_option_template,
+ variables=variables
+ )})
+
+ _ret.update({'vars': variables})
+
+ return _ret
+
+ def _using_diy(self, spec):
+ return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit'])
+
+ def _parent_has_callback(self):
+ return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name)
+
+ def _template(self, loader, template, variables):
+ _templar = Templar(loader=loader, variables=variables)
+ return _templar.template(
+ template,
+ preserve_trailing_newlines=True,
+ convert_data=False,
+ escape_backslashes=True
+ )
+
+ def _output(self, spec, stderr=False):
+ _msg = to_text(spec['msg'])
+ if len(_msg) > 0:
+ self._display.display(msg=_msg, color=spec['msg_color'], stderr=stderr)
+
+ def _get_vars(self, playbook, play=None, host=None, task=None, included_file=None,
+ handler=None, result=None, stats=None, remove_attr_ref_loop=True):
+ def _get_value(obj, attr=None, method=None):
+ if attr:
+ return getattr(obj, attr, getattr(obj, "_" + attr, None))
+
+ if method:
+ _method = getattr(obj, method)
+ return _method()
+
+ def _remove_attr_ref_loop(obj, attributes):
+ _loop_var = getattr(obj, 'loop_control', None)
+ _loop_var = (_loop_var or 'item')
+
+ for attr in attributes:
+ if str(_loop_var) in str(_get_value(obj=obj, attr=attr)):
+ attributes.remove(attr)
+
+ return attributes
+
+ class CallbackDIYDict(dict):
+ def __deepcopy__(self, memo):
+ return self
+
+ _ret = {}
+
+ _variable_manager = VariableManager(loader=playbook.get_loader())
+
+ _all = _variable_manager.get_vars()
+ if play:
+ _all = play.get_variable_manager().get_vars(
+ play=play,
+ host=(host if host else getattr(result, '_host', None)),
+ task=(handler if handler else task)
+ )
+ _ret.update(_all)
+
+ _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()}))
+
+ _ret[self.DIY_NS].update({'playbook': {}})
+ _playbook_attributes = ['entries', 'file_name', 'basedir']
+
+ for attr in _playbook_attributes:
+ _ret[self.DIY_NS]['playbook'].update({attr: _get_value(obj=playbook, attr=attr)})
+
+ if play:
+ _ret[self.DIY_NS].update({'play': {}})
+ _play_attributes = ['any_errors_fatal', 'become', 'become_flags', 'become_method',
+ 'become_user', 'check_mode', 'collections', 'connection',
+ 'debugger', 'diff', 'environment', 'fact_path', 'finalized',
+ 'force_handlers', 'gather_facts', 'gather_subset',
+ 'gather_timeout', 'handlers', 'hosts', 'ignore_errors',
+ 'ignore_unreachable', 'included_conditional', 'included_path',
+ 'max_fail_percentage', 'module_defaults', 'name', 'no_log',
+ 'only_tags', 'order', 'port', 'post_tasks', 'pre_tasks',
+ 'remote_user', 'removed_hosts', 'roles', 'run_once', 'serial',
+ 'skip_tags', 'squashed', 'strategy', 'tags', 'tasks', 'uuid',
+ 'validated', 'vars_files', 'vars_prompt']
+
+ for attr in _play_attributes:
+ _ret[self.DIY_NS]['play'].update({attr: _get_value(obj=play, attr=attr)})
+
+ if host:
+ _ret[self.DIY_NS].update({'host': {}})
+ _host_attributes = ['name', 'uuid', 'address', 'implicit']
+
+ for attr in _host_attributes:
+ _ret[self.DIY_NS]['host'].update({attr: _get_value(obj=host, attr=attr)})
+
+ if task:
+ _ret[self.DIY_NS].update({'task': {}})
+ _task_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val',
+ 'become', 'become_flags', 'become_method', 'become_user',
+ 'changed_when', 'check_mode', 'collections', 'connection',
+ 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff',
+ 'environment', 'failed_when', 'finalized', 'ignore_errors',
+ 'ignore_unreachable', 'loop', 'loop_control', 'loop_with',
+ 'module_defaults', 'name', 'no_log', 'notify', 'parent', 'poll',
+ 'port', 'register', 'remote_user', 'retries', 'role', 'run_once',
+ 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated',
+ 'when']
+
+ # remove arguments that reference a loop var because they cause templating issues in
+ # callbacks that do not have the loop context(e.g. playbook_on_task_start)
+ if task.loop and remove_attr_ref_loop:
+ _task_attributes = _remove_attr_ref_loop(obj=task, attributes=_task_attributes)
+
+ for attr in _task_attributes:
+ _ret[self.DIY_NS]['task'].update({attr: _get_value(obj=task, attr=attr)})
+
+ if included_file:
+ _ret[self.DIY_NS].update({'included_file': {}})
+ _included_file_attributes = ['args', 'filename', 'hosts', 'is_role', 'task']
+
+ for attr in _included_file_attributes:
+ _ret[self.DIY_NS]['included_file'].update({attr: _get_value(
+ obj=included_file,
+ attr=attr
+ )})
+
+ if handler:
+ _ret[self.DIY_NS].update({'handler': {}})
+ _handler_attributes = ['action', 'any_errors_fatal', 'args', 'async', 'async_val',
+ 'become', 'become_flags', 'become_method', 'become_user',
+ 'changed_when', 'check_mode', 'collections', 'connection',
+ 'debugger', 'delay', 'delegate_facts', 'delegate_to', 'diff',
+ 'environment', 'failed_when', 'finalized', 'ignore_errors',
+ 'ignore_unreachable', 'listen', 'loop', 'loop_control',
+ 'loop_with', 'module_defaults', 'name', 'no_log',
+ 'notified_hosts', 'notify', 'parent', 'poll', 'port',
+ 'register', 'remote_user', 'retries', 'role', 'run_once',
+ 'squashed', 'tags', 'untagged', 'until', 'uuid', 'validated',
+ 'when']
+
+ if handler.loop and remove_attr_ref_loop:
+ _handler_attributes = _remove_attr_ref_loop(obj=handler,
+ attributes=_handler_attributes)
+
+ for attr in _handler_attributes:
+ _ret[self.DIY_NS]['handler'].update({attr: _get_value(obj=handler, attr=attr)})
+
+ _ret[self.DIY_NS]['handler'].update({'is_host_notified': handler.is_host_notified(host)})
+
+ if result:
+ _ret[self.DIY_NS].update({'result': {}})
+ _result_attributes = ['host', 'task', 'task_name']
+
+ for attr in _result_attributes:
+ _ret[self.DIY_NS]['result'].update({attr: _get_value(obj=result, attr=attr)})
+
+ _result_methods = ['is_changed', 'is_failed', 'is_skipped', 'is_unreachable']
+
+ for method in _result_methods:
+ _ret[self.DIY_NS]['result'].update({method: _get_value(obj=result, method=method)})
+
+ _ret[self.DIY_NS]['result'].update({'output': getattr(result, '_result', None)})
+
+ _ret.update(result._result)
+
+ if stats:
+ _ret[self.DIY_NS].update({'stats': {}})
+ _stats_attributes = ['changed', 'custom', 'dark', 'failures', 'ignored',
+ 'ok', 'processed', 'rescued', 'skipped']
+
+ for attr in _stats_attributes:
+ _ret[self.DIY_NS]['stats'].update({attr: _get_value(obj=stats, attr=attr)})
+
+ _ret[self.DIY_NS].update({'top_level_var_names': list(_ret.keys())})
+
+ return _ret
+
+ def v2_on_any(self, *args, **kwargs):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_on_any(*args, **kwargs)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec, stderr=(not ignore_errors))
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+
+ def v2_runner_on_ok(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_ok(result)
+
+ def v2_runner_on_skipped(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_skipped(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_unreachable(result)
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_poll(self, result):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_ok(self, result):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_runner_on_async_failed(self, result):
+ pass
+
+ def v2_runner_item_on_ok(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_ok(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_failed(result)
+
+ def v2_runner_item_on_skipped(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result,
+ remove_attr_ref_loop=False
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_item_on_skipped(result)
+
+ def v2_runner_retry(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_retry(result)
+
+ def v2_runner_on_start(self, host, task):
+ self._diy_host = host
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ host=self._diy_host,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_runner_on_start(host, task)
+
+ def v2_playbook_on_start(self, playbook):
+ self._diy_playbook = playbook
+ self._diy_loader = self._diy_playbook.get_loader()
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_start(playbook)
+
+ def v2_playbook_on_notify(self, handler, host):
+ self._diy_handler = handler
+ self._diy_host = host
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ host=self._diy_host,
+ handler=self._diy_handler
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_notify(handler, host)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_no_hosts_matched()
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_no_hosts_remaining()
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_task_start(task, is_conditional)
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._diy_task = task
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_handler_task_start(task)
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None,
+ confirm=False, salt_size=None, salt=None, default=None,
+ unsafe=None):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._diy_spec['vars']
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_vars_prompt(
+ varname, private, prompt, encrypt,
+ confirm, salt_size, salt, default,
+ unsafe
+ )
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ pass
+
+ # not implemented as the call to this is not implemented yet
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ pass
+
+ def v2_playbook_on_play_start(self, play):
+ self._diy_play = play
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_play_start(play)
+
+ def v2_playbook_on_stats(self, stats):
+ self._diy_stats = stats
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ stats=self._diy_stats
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_stats(stats)
+
+ def v2_playbook_on_include(self, included_file):
+ self._diy_included_file = included_file
+
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_included_file._task,
+ included_file=self._diy_included_file
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_playbook_on_include(included_file)
+
+ def v2_on_file_diff(self, result):
+ self._diy_spec = self._get_output_specification(
+ loader=self._diy_loader,
+ variables=self._get_vars(
+ playbook=self._diy_playbook,
+ play=self._diy_play,
+ task=self._diy_task,
+ result=result
+ )
+ )
+
+ if self._using_diy(spec=self._diy_spec):
+ self._output(spec=self._diy_spec)
+
+ if self._parent_has_callback():
+ with self._suppress_stdout(enabled=self._using_diy(spec=self._diy_spec)):
+ super(CallbackModule, self).v2_on_file_diff(result)
diff --git a/ansible_collections/community/general/plugins/callback/elastic.py b/ansible_collections/community/general/plugins/callback/elastic.py
new file mode 100644
index 000000000..37526c155
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/elastic.py
@@ -0,0 +1,424 @@
+# Copyright (c) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
+ name: elastic
+ type: notification
+ short_description: Create distributed traces for each Ansible task in Elastic APM
+ version_added: 3.8.0
+ description:
+ - This callback creates distributed traces for each Ansible task in Elastic APM.
+ - You can configure the plugin with environment variables.
+ - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html).
+ options:
+ hide_task_arguments:
+ default: false
+ type: bool
+ description:
+ - Hide the arguments for a task.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
+ apm_service_name:
+ default: ansible
+ type: str
+ description:
+ - The service name resource attribute.
+ env:
+ - name: ELASTIC_APM_SERVICE_NAME
+ apm_server_url:
+ type: str
+ description:
+ - Use the APM server and its environment variables.
+ env:
+ - name: ELASTIC_APM_SERVER_URL
+ apm_secret_token:
+ type: str
+ description:
+ - Use the APM server token
+ env:
+ - name: ELASTIC_APM_SECRET_TOKEN
+ apm_api_key:
+ type: str
+ description:
+ - Use the APM API key
+ env:
+ - name: ELASTIC_APM_API_KEY
+ apm_verify_server_cert:
+ default: true
+ type: bool
+ description:
+ - Verifies the SSL certificate if an HTTPS connection.
+ env:
+ - name: ELASTIC_APM_VERIFY_SERVER_CERT
+ traceparent:
+ type: str
+ description:
+ - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
+ env:
+ - name: TRACEPARENT
+ requirements:
+ - elastic-apm (Python library)
+'''
+
+
+EXAMPLES = '''
+examples: |
+ Enable the plugin in ansible.cfg:
+ [defaults]
+ callbacks_enabled = community.general.elastic
+
+ Set the environment variable:
+ export ELASTIC_APM_SERVER_URL=<your APM server URL)>
+ export ELASTIC_APM_SERVICE_NAME=your_service_name
+ export ELASTIC_APM_API_KEY=your_APM_API_KEY
+'''
+
+import getpass
+import socket
+import time
+import uuid
+
+from collections import OrderedDict
+from os.path import basename
+
+from ansible.errors import AnsibleError, AnsibleRuntimeError
+from ansible.module_utils.six import raise_from
+from ansible.plugins.callback import CallbackBase
+
+try:
+ from elasticapm import Client, capture_span, trace_parent_from_string, instrument, label
+except ImportError as imp_exc:
+ ELASTIC_LIBRARY_IMPORT_ERROR = imp_exc
+else:
+ ELASTIC_LIBRARY_IMPORT_ERROR = None
+
+
+class TaskData:
+ """
+ Data about an individual task.
+ """
+
+ def __init__(self, uuid, name, path, play, action, args):
+ self.uuid = uuid
+ self.name = name
+ self.path = path
+ self.play = play
+ self.host_data = OrderedDict()
+ self.start = time.time()
+ self.action = action
+ self.args = args
+
+ def add_host(self, host):
+ if host.uuid in self.host_data:
+ if host.status == 'included':
+ # concatenate task include output from multiple items
+ host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ else:
+ return
+
+ self.host_data[host.uuid] = host
+
+
+class HostData:
+ """
+ Data about an individual host.
+ """
+
+ def __init__(self, uuid, name, status, result):
+ self.uuid = uuid
+ self.name = name
+ self.status = status
+ self.result = result
+ self.finish = time.time()
+
+
+class ElasticSource(object):
+ def __init__(self, display):
+ self.ansible_playbook = ""
+ self.ansible_version = None
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ try:
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ except Exception as e:
+ self.ip_address = None
+ self.user = getpass.getuser()
+
+ self._display = display
+
+ def start_task(self, tasks_data, hide_task_arguments, play_name, task):
+ """ record the start of a task for one or more hosts """
+
+ uuid = task._uuid
+
+ if uuid in tasks_data:
+ return
+
+ name = task.get_name().strip()
+ path = task.get_path()
+ action = task.action
+ args = None
+
+ if not task.no_log and not hide_task_arguments:
+ args = ', '.join(('%s=%s' % a for a in task.args.items()))
+
+ tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
+
+ def finish_task(self, tasks_data, status, result):
+ """ record the results of a task for a single host """
+
+ task_uuid = result._task._uuid
+
+ if hasattr(result, '_host') and result._host is not None:
+ host_uuid = result._host._uuid
+ host_name = result._host.name
+ else:
+ host_uuid = 'include'
+ host_name = 'include'
+
+ task = tasks_data[task_uuid]
+
+ if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = result._task_fields['args'].get('_ansible_version')
+
+ task.add_host(HostData(host_uuid, host_name, status, result))
+
+ def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name,
+ apm_server_url, apm_verify_server_cert, apm_secret_token, apm_api_key):
+ """ generate distributed traces from the collected TaskData and HostData """
+
+ tasks = []
+ parent_start_time = None
+ for task_uuid, task in tasks_data.items():
+ if parent_start_time is None:
+ parent_start_time = task.start
+ tasks.append(task)
+
+ apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key)
+ if apm_cli:
+ instrument() # Only call this once, as early as possible.
+ if traceparent:
+ parent = trace_parent_from_string(traceparent)
+ apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time)
+ else:
+ apm_cli.begin_transaction("Session", start=parent_start_time)
+ # Populate trace metadata attributes
+ if self.ansible_version is not None:
+ label(ansible_version=self.ansible_version)
+ label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user)
+ if self.ip_address is not None:
+ label(ansible_host_ip=self.ip_address)
+
+ for task_data in tasks:
+ for host_uuid, host_data in task_data.host_data.items():
+ self.create_span_data(apm_cli, task_data, host_data)
+
+ apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time)
+
+ def create_span_data(self, apm_cli, task_data, host_data):
+ """ create the span with the given TaskData and HostData """
+
+ name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+
+ message = "success"
+ status = "success"
+ enriched_error_message = None
+ if host_data.status == 'included':
+ rc = 0
+ else:
+ res = host_data.result._result
+ rc = res.get('rc', 0)
+ if host_data.status == 'failed':
+ message = self.get_error_message(res)
+ enriched_error_message = self.enrich_error_message(res)
+ status = "failure"
+ elif host_data.status == 'skipped':
+ if 'skip_reason' in res:
+ message = res['skip_reason']
+ else:
+ message = 'skipped'
+ status = "unknown"
+
+ with capture_span(task_data.name,
+ start=task_data.start,
+ span_type="ansible.task.run",
+ duration=host_data.finish - task_data.start,
+ labels={"ansible.task.args": task_data.args,
+ "ansible.task.message": message,
+ "ansible.task.module": task_data.action,
+ "ansible.task.name": name,
+ "ansible.task.result": rc,
+ "ansible.task.host.name": host_data.name,
+ "ansible.task.host.status": host_data.status}) as span:
+ span.outcome = status
+ if 'failure' in status:
+ exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message))
+ apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True)
+
+ def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key):
+ if apm_server_url:
+ return Client(service_name=apm_service_name,
+ server_url=apm_server_url,
+ verify_server_cert=False,
+ secret_token=apm_secret_token,
+ api_key=apm_api_key,
+ use_elastic_traceparent_header=True,
+ debug=True)
+
+ @staticmethod
+ def get_error_message(result):
+ if result.get('exception') is not None:
+ return ElasticSource._last_line(result['exception'])
+ return result.get('msg', 'failed')
+
+ @staticmethod
+ def _last_line(text):
+ lines = text.strip().split('\n')
+ return lines[-1]
+
+ @staticmethod
+ def enrich_error_message(result):
+ message = result.get('msg', 'failed')
+ exception = result.get('exception')
+ stderr = result.get('stderr')
+ return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback creates distributed traces with Elastic APM.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.elastic'
+ CALLBACK_NEEDS_ENABLED = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.hide_task_arguments = None
+ self.apm_service_name = None
+ self.ansible_playbook = None
+ self.traceparent = False
+ self.play_name = None
+ self.tasks_data = None
+ self.errors = 0
+ self.disabled = False
+
+ if ELASTIC_LIBRARY_IMPORT_ERROR:
+ raise_from(
+ AnsibleError('The `elastic-apm` must be installed to use this plugin'),
+ ELASTIC_LIBRARY_IMPORT_ERROR)
+
+ self.tasks_data = OrderedDict()
+
+ self.elastic = ElasticSource(display=self._display)
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys,
+ var_options=var_options,
+ direct=direct)
+
+ self.hide_task_arguments = self.get_option('hide_task_arguments')
+
+ self.apm_service_name = self.get_option('apm_service_name')
+ if not self.apm_service_name:
+ self.apm_service_name = 'ansible'
+
+ self.apm_server_url = self.get_option('apm_server_url')
+ self.apm_secret_token = self.get_option('apm_secret_token')
+ self.apm_api_key = self.get_option('apm_api_key')
+ self.apm_verify_server_cert = self.get_option('apm_verify_server_cert')
+ self.traceparent = self.get_option('traceparent')
+
+ def v2_playbook_on_start(self, playbook):
+ self.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_play_start(self, play):
+ self.play_name = play.get_name()
+
+ def v2_runner_on_no_hosts(self, task):
+ self.elastic.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.elastic.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self.elastic.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.elastic.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.errors += 1
+ self.elastic.finish_task(
+ self.tasks_data,
+ 'failed',
+ result
+ )
+
+ def v2_runner_on_ok(self, result):
+ self.elastic.finish_task(
+ self.tasks_data,
+ 'ok',
+ result
+ )
+
+ def v2_runner_on_skipped(self, result):
+ self.elastic.finish_task(
+ self.tasks_data,
+ 'skipped',
+ result
+ )
+
+ def v2_playbook_on_include(self, included_file):
+ self.elastic.finish_task(
+ self.tasks_data,
+ 'included',
+ included_file
+ )
+
+ def v2_playbook_on_stats(self, stats):
+ if self.errors == 0:
+ status = "success"
+ else:
+ status = "failure"
+ self.elastic.generate_distributed_traces(
+ self.tasks_data,
+ status,
+ time.time(),
+ self.traceparent,
+ self.apm_service_name,
+ self.apm_server_url,
+ self.apm_verify_server_cert,
+ self.apm_secret_token,
+ self.apm_api_key
+ )
+
+ def v2_runner_on_async_failed(self, result, **kwargs):
+ self.errors += 1
diff --git a/ansible_collections/community/general/plugins/callback/hipchat.py b/ansible_collections/community/general/plugins/callback/hipchat.py
new file mode 100644
index 000000000..3e10b69e7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/hipchat.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2014, Matt Martz <matt@sivel.net>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: hipchat
+ type: notification
+ requirements:
+ - whitelist in configuration.
+ - prettytable (python lib)
+ short_description: post task events to hipchat
+ description:
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+ - Before 2.4 only environment variables were available for configuring this plugin.
+ options:
+ token:
+ description: HipChat API token for v1 or v2 API.
+ required: true
+ env:
+ - name: HIPCHAT_TOKEN
+ ini:
+ - section: callback_hipchat
+ key: token
+ api_version:
+ description: HipChat API version, v1 or v2.
+ required: false
+ default: v1
+ env:
+ - name: HIPCHAT_API_VERSION
+ ini:
+ - section: callback_hipchat
+ key: api_version
+ room:
+ description: HipChat room to post in.
+ default: ansible
+ env:
+ - name: HIPCHAT_ROOM
+ ini:
+ - section: callback_hipchat
+ key: room
+ from:
+ description: Name to post as
+ default: ansible
+ env:
+ - name: HIPCHAT_FROM
+ ini:
+ - section: callback_hipchat
+ key: from
+ notify:
+ description: Add notify flag to important messages
+ type: bool
+ default: true
+ env:
+ - name: HIPCHAT_NOTIFY
+ ini:
+ - section: callback_hipchat
+ key: notify
+
+'''
+
+import os
+import json
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url
+
+
+class CallbackModule(CallbackBase):
+ """This is an example ansible callback plugin that sends status
+ updates to a HipChat channel during playbook execution.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.hipchat'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ API_V1_URL = 'https://api.hipchat.com/v1/rooms/message'
+ API_V2_URL = 'https://api.hipchat.com/v2/'
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ if not HAS_PRETTYTABLE:
+ self.disabled = True
+ self._display.warning('The `prettytable` python module is not installed. '
+ 'Disabling the HipChat callback plugin.')
+ self.printed_playbook = False
+ self.playbook_name = None
+ self.play = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.token = self.get_option('token')
+ self.api_version = self.get_option('api_version')
+ self.from_name = self.get_option('from')
+ self.allow_notify = self.get_option('notify')
+ self.room = self.get_option('room')
+
+ if self.token is None:
+ self.disabled = True
+ self._display.warning('HipChat token could not be loaded. The HipChat '
+ 'token can be provided using the `HIPCHAT_TOKEN` '
+ 'environment variable.')
+
+ # Pick the request handler.
+ if self.api_version == 'v2':
+ self.send_msg = self.send_msg_v2
+ else:
+ self.send_msg = self.send_msg_v1
+
+ def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False):
+ """Method for sending a message to HipChat"""
+
+ headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'}
+
+ body = {}
+ body['room_id'] = self.room
+ body['from'] = self.from_name[:15] # max length is 15
+ body['message'] = msg
+ body['message_format'] = msg_format
+ body['color'] = color
+ body['notify'] = self.allow_notify and notify
+
+ data = json.dumps(body)
+ url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room)
+ try:
+ response = open_url(url, data=data, headers=headers, method='POST')
+ return response.read()
+ except Exception as ex:
+ self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
+
+ def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False):
+ """Method for sending a message to HipChat"""
+
+ params = {}
+ params['room_id'] = self.room
+ params['from'] = self.from_name[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['notify'] = int(self.allow_notify and notify)
+
+ url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token))
+ try:
+ response = open_url(url, data=urlencode(params))
+ return response.read()
+ except Exception as ex:
+ self._display.warning('Could not submit message to hipchat: {0}'.format(ex))
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Playbook and play start messages"""
+
+ self.play = play
+ name = play.name
+ # This block sends information about a playbook when it starts
+ # The playbook object is not immediately available at
+ # playbook_on_start so we grab it via the play
+ #
+ # Displays info about playbook being started by a person on an
+ # inventory, as well as Tags, Skip Tags and Limits
+ if not self.printed_playbook:
+ self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename))
+ host_list = self.play.playbook.inventory.host_list
+ inventory = os.path.basename(os.path.realpath(host_list))
+ self.send_msg("%s: Playbook initiated by %s against %s" %
+ (self.playbook_name,
+ self.play.playbook.remote_user,
+ inventory), notify=True)
+ self.printed_playbook = True
+ subset = self.play.playbook.inventory._subset
+ skip_tags = self.play.playbook.skip_tags
+ self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" %
+ (self.playbook_name,
+ ', '.join(self.play.playbook.only_tags),
+ ', '.join(skip_tags) if skip_tags else None,
+ ', '.join(subset) if subset else subset))
+
+ # This is where we actually say we are starting a play
+ self.send_msg("%s: Starting play: %s" %
+ (self.playbook_name, name))
+
+ def playbook_on_stats(self, stats):
+ """Display info about playbook statistics"""
+ hosts = sorted(stats.processed.keys())
+
+ t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
+ 'Failures'])
+
+ failures = False
+ unreachable = False
+
+ for h in hosts:
+ s = stats.summarize(h)
+
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
+ 'failures']])
+
+ self.send_msg("%s: Playbook complete" % self.playbook_name,
+ notify=True)
+
+ if failures or unreachable:
+ color = 'red'
+ self.send_msg("%s: Failures detected" % self.playbook_name,
+ color=color, notify=True)
+ else:
+ color = 'green'
+
+ self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color)
diff --git a/ansible_collections/community/general/plugins/callback/jabber.py b/ansible_collections/community/general/plugins/callback/jabber.py
new file mode 100644
index 000000000..d2d00496d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/jabber.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 maxn nikolaev.makc@gmail.com
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: jabber
+ type: notification
+ short_description: post task events to a jabber server
+ description:
+ - The chatty part of ChatOps with a Hipchat server as a target.
+ - This callback plugin sends status updates to a HipChat channel during playbook execution.
+ requirements:
+ - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy))
+ options:
+ server:
+ description: connection info to jabber server
+ required: true
+ env:
+ - name: JABBER_SERV
+ user:
+ description: Jabber user to authenticate as
+ required: true
+ env:
+ - name: JABBER_USER
+ password:
+ description: Password for the user to the jabber server
+ required: true
+ env:
+ - name: JABBER_PASS
+ to:
+ description: chat identifier that will receive the message
+ required: true
+ env:
+ - name: JABBER_TO
+'''
+
+import os
+
+HAS_XMPP = True
+try:
+ import xmpp
+except ImportError:
+ HAS_XMPP = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.jabber'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_XMPP:
+ self._display.warning("The required python xmpp library (xmpppy) is not installed. "
+ "pip install git+https://github.com/ArchipelProject/xmpppy")
+ self.disabled = True
+
+ self.serv = os.getenv('JABBER_SERV')
+ self.j_user = os.getenv('JABBER_USER')
+ self.j_pass = os.getenv('JABBER_PASS')
+ self.j_to = os.getenv('JABBER_TO')
+
+ if (self.j_user or self.j_pass or self.serv or self.j_to) is None:
+ self.disabled = True
+ self._display.warning('Jabber CallBack wants the JABBER_SERV, JABBER_USER, JABBER_PASS and JABBER_TO environment variables')
+
+ def send_msg(self, msg):
+ """Send message"""
+ jid = xmpp.JID(self.j_user)
+ client = xmpp.Client(self.serv, debug=[])
+ client.connect(server=(self.serv, 5222))
+ client.auth(jid.getNode(), self.j_pass, resource=jid.getResource())
+ message = xmpp.Message(self.j_to, msg)
+ message.setAttr('type', 'chat')
+ client.send(message)
+ client.disconnect()
+
+ def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
+ self.debug = self._dump_results(result._result)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task = task
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Playbook and play start messages"""
+ self.play = play
+ name = play.name
+ self.send_msg("Ansible starting play: %s" % (name))
+
+ def playbook_on_stats(self, stats):
+ name = self.play
+ hosts = sorted(stats.processed.keys())
+ failures = False
+ unreachable = False
+ for h in hosts:
+ s = stats.summarize(h)
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ if failures or unreachable:
+ out = self.debug
+ self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out))
+ else:
+ out = self.debug
+ self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out))
diff --git a/ansible_collections/community/general/plugins/callback/log_plays.py b/ansible_collections/community/general/plugins/callback/log_plays.py
new file mode 100644
index 000000000..e99054e17
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/log_plays.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: log_plays
+ type: notification
+ short_description: write playbook output to log file
+ description:
+ - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory.
+ requirements:
+ - Whitelist in configuration
+ - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller
+ options:
+ log_folder:
+ default: /var/log/ansible/hosts
+ description: The folder where log files will be created.
+ env:
+ - name: ANSIBLE_LOG_FOLDER
+ ini:
+ - section: callback_log_plays
+ key: log_folder
+'''
+
+import os
+import time
+import json
+
+from ansible.utils.path import makedirs_safe
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+# NOTE: in Ansible 1.2 or later general logging is available without
+# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
+# or log_path in the DEFAULTS section of your ansible configuration
+# file. This callback is an example of per hosts logging for those
+# that want it.
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs playbook results, per host, in /var/log/ansible/hosts
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.log_plays'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ TIME_FORMAT = "%b %d %Y %H:%M:%S"
+ MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n"
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.log_folder = self.get_option("log_folder")
+
+ if not os.path.exists(self.log_folder):
+ makedirs_safe(self.log_folder)
+
+ def log(self, result, category):
+ data = result._result
+ if isinstance(data, MutableMapping):
+ if '_ansible_verbose_override' in data:
+ # avoid logging extraneous data
+ data = 'omitted'
+ else:
+ data = data.copy()
+ invocation = data.pop('invocation', None)
+ data = json.dumps(data, cls=AnsibleJSONEncoder)
+ if invocation is not None:
+ data = json.dumps(invocation) + " => %s " % data
+
+ path = os.path.join(self.log_folder, result._host.get_name())
+ now = time.strftime(self.TIME_FORMAT, time.localtime())
+
+ msg = to_bytes(
+ self.MSG_FORMAT
+ % dict(
+ now=now,
+ playbook=self.playbook,
+ task_name=result._task.name,
+ task_action=result._task.action,
+ category=category,
+ data=data,
+ )
+ )
+ with open(path, "ab") as fd:
+ fd.write(msg)
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self.log(result, 'FAILED')
+
+ def v2_runner_on_ok(self, result):
+ self.log(result, 'OK')
+
+ def v2_runner_on_skipped(self, result):
+ self.log(result, 'SKIPPED')
+
+ def v2_runner_on_unreachable(self, result):
+ self.log(result, 'UNREACHABLE')
+
+ def v2_runner_on_async_failed(self, result):
+ self.log(result, 'ASYNC_FAILED')
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook._file_name
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ self.log(result, 'IMPORTED', imported_file)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ self.log(result, 'NOTIMPORTED', missing_file)
diff --git a/ansible_collections/community/general/plugins/callback/loganalytics.py b/ansible_collections/community/general/plugins/callback/loganalytics.py
new file mode 100644
index 000000000..fbcdc6f89
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/loganalytics.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: loganalytics
+ type: notification
+ short_description: Posts task results to Azure Log Analytics
+ author: "Cyrus Li (@zhcli) <cyrus1006@gmail.com>"
+ description:
+ - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace.
+ - Credits to authors of splunk callback plugin.
+ version_added: "2.4.0"
+ requirements:
+ - Whitelisting this callback plugin.
+ - An Azure log analytics work space has been established.
+ options:
+ workspace_id:
+ description: Workspace ID of the Azure log analytics workspace.
+ required: true
+ env:
+ - name: WORKSPACE_ID
+ ini:
+ - section: callback_loganalytics
+ key: workspace_id
+ shared_key:
+ description: Shared key to connect to Azure log analytics workspace.
+ required: true
+ env:
+ - name: WORKSPACE_SHARED_KEY
+ ini:
+ - section: callback_loganalytics
+ key: shared_key
+'''
+
+EXAMPLES = '''
+examples: |
+ Whitelist the plugin in ansible.cfg:
+ [defaults]
+ callback_whitelist = community.general.loganalytics
+ Set the environment variable:
+ export WORKSPACE_ID=01234567-0123-0123-0123-01234567890a
+ export WORKSPACE_SHARED_KEY=dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
+ Or configure the plugin in ansible.cfg in the callback_loganalytics block:
+ [callback_loganalytics]
+ workspace_id = 01234567-0123-0123-0123-01234567890a
+ shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==
+'''
+
+import hashlib
+import hmac
+import base64
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class AzureLogAnalyticsSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.user = getpass.getuser()
+ self.extra_vars = ""
+
+ def __build_signature(self, date, workspace_id, shared_key, content_length):
+ # Build authorisation signature for Azure log analytics API call
+ sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format(
+ str(content_length), date)
+ utf8_sigs = sigs.encode('utf-8')
+ decoded_shared_key = base64.b64decode(shared_key)
+ hmac_sha256_sigs = hmac.new(
+ decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest()
+ encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8')
+ signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash)
+ return signature
+
+ def __build_workspace_url(self, workspace_id):
+ return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id)
+
+ def __rfc1123date(self):
+ return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
+
+ def send_event(self, workspace_id, shared_key, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ data['status'] = state
+ data['timestamp'] = self.__rfc1123date()
+ data['host'] = self.host
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ # Removing args since it can contain sensitive data
+ if 'args' in data['ansible_task']:
+ data['ansible_task'].pop('args')
+ data['ansible_result'] = result._result
+ if 'content' in data['ansible_result']:
+ data['ansible_result'].pop('content')
+
+ # Adding extra vars info
+ data['extra_vars'] = self.extra_vars
+
+ # Preparing the playbook logs as JSON format and send to Azure log analytics
+ jsondata = json.dumps({'event': data}, cls=AnsibleJSONEncoder, sort_keys=True)
+ content_length = len(jsondata)
+ rfc1123date = self.__rfc1123date()
+ signature = self.__build_signature(rfc1123date, workspace_id, shared_key, content_length)
+ workspace_url = self.__build_workspace_url(workspace_id)
+
+ open_url(
+ workspace_url,
+ jsondata,
+ headers={
+ 'content-type': 'application/json',
+ 'Authorization': signature,
+ 'Log-Type': 'ansible_playbook',
+ 'x-ms-date': rfc1123date
+ },
+ method='POST'
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'loganalytics'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.workspace_id = None
+ self.shared_key = None
+ self.loganalytics = AzureLogAnalyticsSource()
+
+ def _seconds_since_start(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+ self.workspace_id = self.get_option('workspace_id')
+ self.shared_key = self.get_option('shared_key')
+
+ def v2_playbook_on_play_start(self, play):
+ vm = play.get_variable_manager()
+ extra_vars = vm.extra_vars
+ self.loganalytics.extra_vars = extra_vars
+
+ def v2_playbook_on_start(self, playbook):
+ self.loganalytics.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.loganalytics.send_event(
+ self.workspace_id,
+ self.shared_key,
+ 'OK',
+ result,
+ self._seconds_since_start(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.loganalytics.send_event(
+ self.workspace_id,
+ self.shared_key,
+ 'SKIPPED',
+ result,
+ self._seconds_since_start(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.loganalytics.send_event(
+ self.workspace_id,
+ self.shared_key,
+ 'FAILED',
+ result,
+ self._seconds_since_start(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.loganalytics.send_event(
+ self.workspace_id,
+ self.shared_key,
+ 'FAILED',
+ result,
+ self._seconds_since_start(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.loganalytics.send_event(
+ self.workspace_id,
+ self.shared_key,
+ 'UNREACHABLE',
+ result,
+ self._seconds_since_start(result)
+ )
diff --git a/ansible_collections/community/general/plugins/callback/logdna.py b/ansible_collections/community/general/plugins/callback/logdna.py
new file mode 100644
index 000000000..fc9a81ac8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/logdna.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Samir Musali <samir.musali@logdna.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: logdna
+ type: notification
+ short_description: Sends playbook logs to LogDNA
+ description:
+ - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)).
+ requirements:
+ - LogDNA Python Library (U(https://github.com/logdna/python))
+ - whitelisting in configuration
+ options:
+ conf_key:
+ required: true
+ description: LogDNA Ingestion Key.
+ type: string
+ env:
+ - name: LOGDNA_INGESTION_KEY
+ ini:
+ - section: callback_logdna
+ key: conf_key
+ plugin_ignore_errors:
+ required: false
+ description: Whether to ignore errors on failing or not.
+ type: boolean
+ env:
+ - name: ANSIBLE_IGNORE_ERRORS
+ ini:
+ - section: callback_logdna
+ key: plugin_ignore_errors
+ default: false
+ conf_hostname:
+ required: false
+ description: Alternative Host Name; the current host name by default.
+ type: string
+ env:
+ - name: LOGDNA_HOSTNAME
+ ini:
+ - section: callback_logdna
+ key: conf_hostname
+ conf_tags:
+ required: false
+ description: Tags.
+ type: string
+ env:
+ - name: LOGDNA_TAGS
+ ini:
+ - section: callback_logdna
+ key: conf_tags
+ default: ansible
+'''
+
+import logging
+import json
+import socket
+from uuid import getnode
+from ansible.plugins.callback import CallbackBase
+from ansible.parsing.ajson import AnsibleJSONEncoder
+
+try:
+ from logdna import LogDNAHandler
+ HAS_LOGDNA = True
+except ImportError:
+ HAS_LOGDNA = False
+
+
+# Getting MAC Address of system:
+def get_mac():
+ mac = "%012x" % getnode()
+ return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2))))
+
+
+# Getting hostname of system:
+def get_hostname():
+ return str(socket.gethostname()).split('.local', 1)[0]
+
+
+# Getting IP of system:
+def get_ip():
+ try:
+ return socket.gethostbyname(get_hostname())
+ except Exception:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ try:
+ s.connect(('10.255.255.255', 1))
+ IP = s.getsockname()[0]
+ except Exception:
+ IP = '127.0.0.1'
+ finally:
+ s.close()
+ return IP
+
+
+# Is it JSON?
+def isJSONable(obj):
+ try:
+ json.dumps(obj, sort_keys=True, cls=AnsibleJSONEncoder)
+ return True
+ except Exception:
+ return False
+
+
+# LogDNA Callback Module:
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 0.1
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.logdna'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+
+ self.disabled = True
+ self.playbook_name = None
+ self.playbook = None
+ self.conf_key = None
+ self.plugin_ignore_errors = None
+ self.conf_hostname = None
+ self.conf_tags = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.conf_key = self.get_option('conf_key')
+ self.plugin_ignore_errors = self.get_option('plugin_ignore_errors')
+ self.conf_hostname = self.get_option('conf_hostname')
+ self.conf_tags = self.get_option('conf_tags')
+ self.mac = get_mac()
+ self.ip = get_ip()
+
+ if self.conf_hostname is None:
+ self.conf_hostname = get_hostname()
+
+ self.conf_tags = self.conf_tags.split(',')
+
+ if HAS_LOGDNA:
+ self.log = logging.getLogger('logdna')
+ self.log.setLevel(logging.INFO)
+ self.options = {'hostname': self.conf_hostname, 'mac': self.mac, 'index_meta': True}
+ self.log.addHandler(LogDNAHandler(self.conf_key, self.options))
+ self.disabled = False
+ else:
+ self.disabled = True
+ self._display.warning('WARNING:\nPlease, install LogDNA Python Package: `pip install logdna`')
+
+ def metaIndexing(self, meta):
+ invalidKeys = []
+ ninvalidKeys = 0
+ for key, value in meta.items():
+ if not isJSONable(value):
+ invalidKeys.append(key)
+ ninvalidKeys += 1
+ if ninvalidKeys > 0:
+ for key in invalidKeys:
+ del meta[key]
+ meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys)
+ return meta
+
+ def sanitizeJSON(self, data):
+ try:
+ return json.loads(json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder))
+ except Exception:
+ return {'warnings': ['JSON Formatting Issue', json.dumps(data, sort_keys=True, cls=AnsibleJSONEncoder)]}
+
+ def flush(self, log, options):
+ if HAS_LOGDNA:
+ self.log.info(json.dumps(log), options)
+
+ def sendLog(self, host, category, logdata):
+ options = {'app': 'ansible', 'meta': {'playbook': self.playbook_name, 'host': host, 'category': category}}
+ logdata['info'].pop('invocation', None)
+ warnings = logdata['info'].pop('warnings', None)
+ if warnings is not None:
+ self.flush({'warn': warnings}, options)
+ self.flush(logdata, options)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
+ self.playbook_name = playbook._file_name
+
+ def v2_playbook_on_stats(self, stats):
+ result = dict()
+ for host in stats.processed.keys():
+ result[host] = stats.summarize(host)
+ self.sendLog(self.conf_hostname, 'STATS', {'info': self.sanitizeJSON(result)})
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ if self.plugin_ignore_errors:
+ ignore_errors = self.plugin_ignore_errors
+ self.sendLog(host, 'FAILED', {'info': self.sanitizeJSON(res), 'ignore_errors': ignore_errors})
+
+ def runner_on_ok(self, host, res):
+ self.sendLog(host, 'OK', {'info': self.sanitizeJSON(res)})
+
+ def runner_on_unreachable(self, host, res):
+ self.sendLog(host, 'UNREACHABLE', {'info': self.sanitizeJSON(res)})
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.sendLog(host, 'ASYNC_FAILED', {'info': self.sanitizeJSON(res), 'job_id': jid})
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.sendLog(host, 'ASYNC_OK', {'info': self.sanitizeJSON(res), 'job_id': jid})
diff --git a/ansible_collections/community/general/plugins/callback/logentries.py b/ansible_collections/community/general/plugins/callback/logentries.py
new file mode 100644
index 000000000..22322a4df
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/logentries.py
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: logentries
+ type: notification
+ short_description: Sends events to Logentries
+ description:
+ - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes.
+ - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named C(logentries.ini).
+ - In 2.4 and above you can just put it in the main Ansible configuration file.
+ requirements:
+ - whitelisting in configuration
+ - certifi (Python library)
+ - flatdict (Python library), if you want to use the 'flatten' option
+ options:
+ api:
+ description: URI to the Logentries API.
+ env:
+ - name: LOGENTRIES_API
+ default: data.logentries.com
+ ini:
+ - section: callback_logentries
+ key: api
+ port:
+ description: HTTP port to use when connecting to the API.
+ env:
+ - name: LOGENTRIES_PORT
+ default: 80
+ ini:
+ - section: callback_logentries
+ key: port
+ tls_port:
+ description: Port to use when connecting to the API when TLS is enabled.
+ env:
+ - name: LOGENTRIES_TLS_PORT
+ default: 443
+ ini:
+ - section: callback_logentries
+ key: tls_port
+ token:
+ description: The logentries C(TCP token).
+ env:
+ - name: LOGENTRIES_ANSIBLE_TOKEN
+ required: true
+ ini:
+ - section: callback_logentries
+ key: token
+ use_tls:
+ description:
+ - Toggle to decide whether to use TLS to encrypt the communications with the API server.
+ env:
+ - name: LOGENTRIES_USE_TLS
+ default: false
+ type: boolean
+ ini:
+ - section: callback_logentries
+ key: use_tls
+ flatten:
+ description: Flatten complex data structures into a single dictionary with complex keys.
+ type: boolean
+ default: false
+ env:
+ - name: LOGENTRIES_FLATTEN
+ ini:
+ - section: callback_logentries
+ key: flatten
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+
+ [defaults]
+ callback_whitelist = community.general.logentries
+
+ Either set the environment variables
+ export LOGENTRIES_API=data.logentries.com
+ export LOGENTRIES_PORT=10000
+ export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af
+
+ Or in the main Ansible config file
+ [callback_logentries]
+ api = data.logentries.com
+ port = 10000
+ tls_port = 20000
+ use_tls = no
+ token = dd21fc88-f00a-43ff-b977-e3a4233c53af
+ flatten = False
+'''
+
+import os
+import socket
+import random
+import time
+import uuid
+
+try:
+ import certifi
+ HAS_CERTIFI = True
+except ImportError:
+ HAS_CERTIFI = False
+
+try:
+ import flatdict
+ HAS_FLATDICT = True
+except ImportError:
+ HAS_FLATDICT = False
+
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.plugins.callback import CallbackBase
+
+# Todo:
+# * Better formatting of output before sending out to logentries data/api nodes.
+
+
+class PlainTextSocketAppender(object):
+ def __init__(self, display, LE_API='data.logentries.com', LE_PORT=80, LE_TLS_PORT=443):
+
+ self.LE_API = LE_API
+ self.LE_PORT = LE_PORT
+ self.LE_TLS_PORT = LE_TLS_PORT
+ self.MIN_DELAY = 0.1
+ self.MAX_DELAY = 10
+ # Error message displayed when an incorrect Token has been detected
+ self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n"
+ # Unicode Line separator character \u2028
+ self.LINE_SEP = u'\u2028'
+
+ self._display = display
+ self._conn = None
+
+ def open_connection(self):
+ self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._conn.connect((self.LE_API, self.LE_PORT))
+
+ def reopen_connection(self):
+ self.close_connection()
+
+ root_delay = self.MIN_DELAY
+ while True:
+ try:
+ self.open_connection()
+ return
+ except Exception as e:
+ self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e))
+
+ root_delay *= 2
+ if root_delay > self.MAX_DELAY:
+ root_delay = self.MAX_DELAY
+
+ wait_for = root_delay + random.uniform(0, root_delay)
+
+ try:
+ self._display.vvvv("sleeping %s before retry" % wait_for)
+ time.sleep(wait_for)
+ except KeyboardInterrupt:
+ raise
+
+ def close_connection(self):
+ if self._conn is not None:
+ self._conn.close()
+
+ def put(self, data):
+ # Replace newlines with Unicode line separator
+ # for multi-line events
+ data = to_text(data, errors='surrogate_or_strict')
+ multiline = data.replace(u'\n', self.LINE_SEP)
+ multiline += u"\n"
+ # Send data, reconnect if needed
+ while True:
+ try:
+ self._conn.send(to_bytes(multiline, errors='surrogate_or_strict'))
+ except socket.error:
+ self.reopen_connection()
+ continue
+ break
+
+ self.close_connection()
+
+
+try:
+ import ssl
+ HAS_SSL = True
+except ImportError: # for systems without TLS support.
+ SocketAppender = PlainTextSocketAppender
+ HAS_SSL = False
+else:
+
+ class TLSSocketAppender(PlainTextSocketAppender):
+ def open_connection(self):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock = ssl.wrap_socket(
+ sock=sock,
+ keyfile=None,
+ certfile=None,
+ server_side=False,
+ cert_reqs=ssl.CERT_REQUIRED,
+ ssl_version=getattr(
+ ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
+ ca_certs=certifi.where(),
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True, )
+ sock.connect((self.LE_API, self.LE_TLS_PORT))
+ self._conn = sock
+
+ SocketAppender = TLSSocketAppender
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.logentries'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ # TODO: allow for alternate posting methods (REST/UDP/agent/etc)
+ super(CallbackModule, self).__init__()
+
+ # verify dependencies
+ if not HAS_SSL:
+ self._display.warning("Unable to import ssl module. Will send over port 80.")
+
+ if not HAS_CERTIFI:
+ self.disabled = True
+ self._display.warning('The `certifi` python module is not installed.\nDisabling the Logentries callback plugin.')
+
+ self.le_jobid = str(uuid.uuid4())
+
+ # FIXME: make configurable, move to options
+ self.timeout = 10
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ # get options
+ try:
+ self.api_url = self.get_option('api')
+ self.api_port = self.get_option('port')
+ self.api_tls_port = self.get_option('tls_port')
+ self.use_tls = self.get_option('use_tls')
+ self.flatten = self.get_option('flatten')
+ except KeyError as e:
+ self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e))
+ self.disabled = True
+
+ try:
+ self.token = self.get_option('token')
+ except KeyError as e:
+ self._display.warning('Logentries token was not provided, this is required for this callback to operate, disabling')
+ self.disabled = True
+
+ if self.flatten and not HAS_FLATDICT:
+ self.disabled = True
+ self._display.warning('You have chosen to flatten and the `flatdict` python module is not installed.\nDisabling the Logentries callback plugin.')
+
+ self._initialize_connections()
+
+ def _initialize_connections(self):
+
+ if not self.disabled:
+ if self.use_tls:
+ self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port))
+ self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port)
+ else:
+ self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port))
+ self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port)
+ self._appender.reopen_connection()
+
+ def emit_formatted(self, record):
+ if self.flatten:
+ results = flatdict.FlatDict(record)
+ self.emit(self._dump_results(results))
+ else:
+ self.emit(self._dump_results(record))
+
+ def emit(self, record):
+ msg = record.rstrip('\n')
+ msg = "{0} {1}".format(self.token, msg)
+ self._appender.put(msg)
+ self._display.vvvv("Sent event to logentries")
+
+ def _set_info(self, host, res):
+ return {'le_jobid': self.le_jobid, 'hostname': host, 'results': res}
+
+ def runner_on_ok(self, host, res):
+ results = self._set_info(host, res)
+ results['status'] = 'OK'
+ self.emit_formatted(results)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ results = self._set_info(host, res)
+ results['status'] = 'FAILED'
+ self.emit_formatted(results)
+
+ def runner_on_skipped(self, host, item=None):
+ results = self._set_info(host, item)
+ del results['results']
+ results['status'] = 'SKIPPED'
+ self.emit_formatted(results)
+
+ def runner_on_unreachable(self, host, res):
+ results = self._set_info(host, res)
+ results['status'] = 'UNREACHABLE'
+ self.emit_formatted(results)
+
+ def runner_on_async_failed(self, host, res, jid):
+ results = self._set_info(host, res)
+ results['jid'] = jid
+ results['status'] = 'ASYNC_FAILED'
+ self.emit_formatted(results)
+
+ def v2_playbook_on_play_start(self, play):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['started_by'] = os.getlogin()
+ if play.name:
+ results['play'] = play.name
+ results['hosts'] = play.hosts
+ self.emit_formatted(results)
+
+ def playbook_on_stats(self, stats):
+ """ close connection """
+ self._appender.close_connection()
diff --git a/ansible_collections/community/general/plugins/callback/logstash.py b/ansible_collections/community/general/plugins/callback/logstash.py
new file mode 100644
index 000000000..144e1f991
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/logstash.py
@@ -0,0 +1,396 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Yevhen Khmelenko <ujenmr@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ author: Yevhen Khmelenko (@ujenmr)
+ name: logstash
+ type: notification
+ short_description: Sends events to Logstash
+ description:
+ - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash).
+ requirements:
+ - whitelisting in configuration
+ - logstash (Python library)
+ options:
+ server:
+ description: Address of the Logstash server.
+ env:
+ - name: LOGSTASH_SERVER
+ ini:
+ - section: callback_logstash
+ key: server
+ version_added: 1.0.0
+ default: localhost
+ port:
+ description: Port on which logstash is listening.
+ env:
+ - name: LOGSTASH_PORT
+ ini:
+ - section: callback_logstash
+ key: port
+ version_added: 1.0.0
+ default: 5000
+ type:
+ description: Message type.
+ env:
+ - name: LOGSTASH_TYPE
+ ini:
+ - section: callback_logstash
+ key: type
+ version_added: 1.0.0
+ default: ansible
+ pre_command:
+ description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field.
+ version_added: 2.0.0
+ ini:
+ - section: callback_logstash
+ key: pre_command
+ env:
+ - name: LOGSTASH_PRE_COMMAND
+ format_version:
+ description: Logging format.
+ type: str
+ version_added: 2.0.0
+ ini:
+ - section: callback_logstash
+ key: format_version
+ env:
+ - name: LOGSTASH_FORMAT_VERSION
+ default: v1
+ choices:
+ - v1
+ - v2
+
+'''
+
+EXAMPLES = r'''
+ansible.cfg: |
+ # Enable Callback plugin
+ [defaults]
+ callback_whitelist = community.general.logstash
+
+ [callback_logstash]
+ server = logstash.example.com
+ port = 5000
+ pre_command = git rev-parse HEAD
+ type = ansible
+
+11-input-tcp.conf: |
+ # Enable Logstash TCP Input
+ input {
+ tcp {
+ port => 5000
+ codec => json
+ add_field => { "[@metadata][beat]" => "notify" }
+ add_field => { "[@metadata][type]" => "ansible" }
+ }
+ }
+'''
+
+import os
+import json
+from ansible import context
+import socket
+import uuid
+import logging
+from datetime import datetime
+
+try:
+ import logstash
+ HAS_LOGSTASH = True
+except ImportError:
+ HAS_LOGSTASH = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.logstash'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ if not HAS_LOGSTASH:
+ self.disabled = True
+ self._display.warning("The required python-logstash/python3-logstash is not installed. "
+ "pip install python-logstash for Python 2"
+ "pip install python3-logstash for Python 3")
+
+ self.start_time = datetime.utcnow()
+
+ def _init_plugin(self):
+ if not self.disabled:
+ self.logger = logging.getLogger('python-logstash-logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.handler = logstash.TCPLogstashHandler(
+ self.ls_server,
+ self.ls_port,
+ version=1,
+ message_type=self.ls_type
+ )
+
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+ self.session = str(uuid.uuid4())
+ self.errors = 0
+
+ self.base_data = {
+ 'session': self.session,
+ 'host': self.hostname
+ }
+
+ if self.ls_pre_command is not None:
+ self.base_data['ansible_pre_command_output'] = os.popen(
+ self.ls_pre_command).read()
+
+ if context.CLIARGS is not None:
+ self.base_data['ansible_checkmode'] = context.CLIARGS.get('check')
+ self.base_data['ansible_tags'] = context.CLIARGS.get('tags')
+ self.base_data['ansible_skip_tags'] = context.CLIARGS.get('skip_tags')
+ self.base_data['inventory'] = context.CLIARGS.get('inventory')
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.ls_server = self.get_option('server')
+ self.ls_port = int(self.get_option('port'))
+ self.ls_type = self.get_option('type')
+ self.ls_pre_command = self.get_option('pre_command')
+ self.ls_format_version = self.get_option('format_version')
+
+ self._init_plugin()
+
+ def v2_playbook_on_start(self, playbook):
+ data = self.base_data.copy()
+ data['ansible_type'] = "start"
+ data['status'] = "OK"
+ data['ansible_playbook'] = playbook._file_name
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info(
+ "START PLAYBOOK | %s", data['ansible_playbook'], extra=data
+ )
+ else:
+ self.logger.info("ansible start", extra=data)
+
+ def v2_playbook_on_stats(self, stats):
+ end_time = datetime.utcnow()
+ runtime = end_time - self.start_time
+ summarize_stat = {}
+ for host in stats.processed.keys():
+ summarize_stat[host] = stats.summarize(host)
+
+ if self.errors == 0:
+ status = "OK"
+ else:
+ status = "FAILED"
+
+ data = self.base_data.copy()
+ data['ansible_type'] = "finish"
+ data['status'] = status
+ data['ansible_playbook_duration'] = runtime.total_seconds()
+ data['ansible_result'] = json.dumps(summarize_stat) # deprecated field
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info(
+ "FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data
+ )
+ else:
+ self.logger.info("ansible stats", extra=data)
+
+ def v2_playbook_on_play_start(self, play):
+ self.play_id = str(play._uuid)
+
+ if play.name:
+ self.play_name = play.name
+
+ data = self.base_data.copy()
+ data['ansible_type'] = "start"
+ data['status'] = "OK"
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info("START PLAY | %s", self.play_name, extra=data)
+ else:
+ self.logger.info("ansible play", extra=data)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.task_id = str(task._uuid)
+
+ '''
+ Tasks and handler tasks are dealt with here
+ '''
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
+
+ data = self.base_data.copy()
+ if task_name == 'setup':
+ data['ansible_type'] = "setup"
+ data['status'] = "OK"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['ansible_task'] = task_name
+ data['ansible_facts'] = self._dump_results(result._result)
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info(
+ "SETUP FACTS | %s", self._dump_results(result._result), extra=data
+ )
+ else:
+ self.logger.info("ansible facts", extra=data)
+ else:
+ if 'changed' in result._result.keys():
+ data['ansible_changed'] = result._result['changed']
+ else:
+ data['ansible_changed'] = False
+
+ data['ansible_type'] = "task"
+ data['status'] = "OK"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['ansible_task'] = task_name
+ data['ansible_task_id'] = self.task_id
+ data['ansible_result'] = self._dump_results(result._result)
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info(
+ "TASK OK | %s | RESULT | %s",
+ task_name, self._dump_results(result._result), extra=data
+ )
+ else:
+ self.logger.info("ansible ok", extra=data)
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
+
+ data = self.base_data.copy()
+ data['ansible_type'] = "task"
+ data['status'] = "SKIPPED"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['ansible_task'] = task_name
+ data['ansible_task_id'] = self.task_id
+ data['ansible_result'] = self._dump_results(result._result)
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info("TASK SKIPPED | %s", task_name, extra=data)
+ else:
+ self.logger.info("ansible skipped", extra=data)
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ data = self.base_data.copy()
+ data['ansible_type'] = "import"
+ data['status'] = "IMPORTED"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['imported_file'] = imported_file
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info("IMPORT | %s", imported_file, extra=data)
+ else:
+ self.logger.info("ansible import", extra=data)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ data = self.base_data.copy()
+ data['ansible_type'] = "import"
+ data['status'] = "NOT IMPORTED"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['imported_file'] = missing_file
+
+ if (self.ls_format_version == "v2"):
+ self.logger.info("NOT IMPORTED | %s", missing_file, extra=data)
+ else:
+ self.logger.info("ansible import", extra=data)
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
+
+ data = self.base_data.copy()
+ if 'changed' in result._result.keys():
+ data['ansible_changed'] = result._result['changed']
+ else:
+ data['ansible_changed'] = False
+
+ data['ansible_type'] = "task"
+ data['status'] = "FAILED"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['ansible_task'] = task_name
+ data['ansible_task_id'] = self.task_id
+ data['ansible_result'] = self._dump_results(result._result)
+
+ self.errors += 1
+ if (self.ls_format_version == "v2"):
+ self.logger.error(
+ "TASK FAILED | %s | HOST | %s | RESULT | %s",
+ task_name, self.hostname,
+ self._dump_results(result._result), extra=data
+ )
+ else:
+ self.logger.error("ansible failed", extra=data)
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
+
+ data = self.base_data.copy()
+ data['ansible_type'] = "task"
+ data['status'] = "UNREACHABLE"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['ansible_task'] = task_name
+ data['ansible_task_id'] = self.task_id
+ data['ansible_result'] = self._dump_results(result._result)
+
+ self.errors += 1
+ if (self.ls_format_version == "v2"):
+ self.logger.error(
+ "UNREACHABLE | %s | HOST | %s | RESULT | %s",
+ task_name, self.hostname,
+ self._dump_results(result._result), extra=data
+ )
+ else:
+ self.logger.error("ansible unreachable", extra=data)
+
+ def v2_runner_on_async_failed(self, result, **kwargs):
+ task_name = str(result._task).replace('TASK: ', '').replace('HANDLER: ', '')
+
+ data = self.base_data.copy()
+ data['ansible_type'] = "task"
+ data['status'] = "FAILED"
+ data['ansible_host'] = result._host.name
+ data['ansible_play_id'] = self.play_id
+ data['ansible_play_name'] = self.play_name
+ data['ansible_task'] = task_name
+ data['ansible_task_id'] = self.task_id
+ data['ansible_result'] = self._dump_results(result._result)
+
+ self.errors += 1
+ if (self.ls_format_version == "v2"):
+ self.logger.error(
+ "ASYNC FAILED | %s | HOST | %s | RESULT | %s",
+ task_name, self.hostname,
+ self._dump_results(result._result), extra=data
+ )
+ else:
+ self.logger.error("ansible async", extra=data)
diff --git a/ansible_collections/community/general/plugins/callback/mail.py b/ansible_collections/community/general/plugins/callback/mail.py
new file mode 100644
index 000000000..9e8314baf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/mail.py
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: mail
+type: notification
+short_description: Sends failure events via email
+description:
+- This callback will report failures via email.
+author:
+- Dag Wieers (@dagwieers)
+requirements:
+- whitelisting in configuration
+options:
+ mta:
+ description:
+ - Mail Transfer Agent, server that accepts SMTP.
+ type: str
+ env:
+ - name: SMTPHOST
+ ini:
+ - section: callback_mail
+ key: smtphost
+ default: localhost
+ mtaport:
+ description:
+ - Mail Transfer Agent Port.
+ - Port at which server SMTP.
+ type: int
+ ini:
+ - section: callback_mail
+ key: smtpport
+ default: 25
+ to:
+ description:
+ - Mail recipient.
+ type: list
+ elements: str
+ ini:
+ - section: callback_mail
+ key: to
+ default: [root]
+ sender:
+ description:
+ - Mail sender.
+ - This is required since community.general 6.0.0.
+ type: str
+ required: true
+ ini:
+ - section: callback_mail
+ key: sender
+ cc:
+ description:
+ - CC'd recipients.
+ type: list
+ elements: str
+ ini:
+ - section: callback_mail
+ key: cc
+ bcc:
+ description:
+ - BCC'd recipients.
+ type: list
+ elements: str
+ ini:
+ - section: callback_mail
+ key: bcc
+'''
+
+import json
+import os
+import re
+import email.utils
+import smtplib
+
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ ''' This Ansible callback plugin mails errors to interested parties. '''
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.mail'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.sender = None
+ self.to = 'root'
+ self.smtphost = os.getenv('SMTPHOST', 'localhost')
+ self.smtpport = 25
+ self.cc = None
+ self.bcc = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.sender = self.get_option('sender')
+ self.to = self.get_option('to')
+ self.smtphost = self.get_option('mta')
+ self.smtpport = self.get_option('mtaport')
+ self.cc = self.get_option('cc')
+ self.bcc = self.get_option('bcc')
+
+ def mail(self, subject='Ansible error mail', body=None):
+ if body is None:
+ body = subject
+
+ smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
+
+ sender_address = email.utils.parseaddr(self.sender)
+ if self.to:
+ to_addresses = email.utils.getaddresses(self.to)
+ if self.cc:
+ cc_addresses = email.utils.getaddresses(self.cc)
+ if self.bcc:
+ bcc_addresses = email.utils.getaddresses(self.bcc)
+
+ content = 'Date: %s\n' % email.utils.formatdate()
+ content += 'From: %s\n' % email.utils.formataddr(sender_address)
+ if self.to:
+ content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses])
+ if self.cc:
+ content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses])
+ content += 'Message-ID: %s\n' % email.utils.make_msgid()
+ content += 'Subject: %s\n\n' % subject.strip()
+ content += body
+
+ addresses = to_addresses
+ if self.cc:
+ addresses += cc_addresses
+ if self.bcc:
+ addresses += bcc_addresses
+
+ if not addresses:
+ self._display.warning('No receiver has been specified for the mail callback plugin.')
+
+ smtp.sendmail(self.sender, [address for name, address in addresses], to_bytes(content))
+
+ smtp.quit()
+
+ def subject_msg(self, multiline, failtype, linenr):
+ return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
+
+ def indent(self, multiline, indent=8):
+ return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
+
+ def body_blob(self, multiline, texttype):
+ ''' Turn some text output in a well-indented block for sending in a mail body '''
+ intro = 'with the following %s:\n\n' % texttype
+ blob = ''
+ for line in multiline.strip('\r\n').splitlines():
+ blob += '%s\n' % line
+ return intro + self.indent(blob) + '\n'
+
+ def mail_result(self, result, failtype):
+ host = result._host.get_name()
+ if not self.sender:
+ self.sender = '"Ansible: %s" <root>' % host
+
+ # Add subject
+ if self.itembody:
+ subject = self.itemsubject
+ elif result._result.get('failed_when_result') is True:
+ subject = "Failed due to 'failed_when' condition"
+ elif result._result.get('msg'):
+ subject = self.subject_msg(result._result['msg'], failtype, 0)
+ elif result._result.get('stderr'):
+ subject = self.subject_msg(result._result['stderr'], failtype, -1)
+ elif result._result.get('stdout'):
+ subject = self.subject_msg(result._result['stdout'], failtype, -1)
+ elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
+ subject = self.subject_msg(result._result['exception'], failtype, -1)
+ else:
+ subject = '%s: %s' % (failtype, result._task.name or result._task.action)
+
+ # Make playbook name visible (e.g. in Outlook/Gmail condensed view)
+ body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
+ if result._task.name:
+ body += 'Task: %s\n' % result._task.name
+ body += 'Module: %s\n' % result._task.action
+ body += 'Host: %s\n' % host
+ body += '\n'
+
+ # Add task information (as much as possible)
+ body += 'The following task failed:\n\n'
+ if 'invocation' in result._result:
+ body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
+ elif result._task.name:
+ body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
+ else:
+ body += self.indent('%s\n' % result._task.action)
+ body += '\n'
+
+ # Add item / message
+ if self.itembody:
+ body += self.itembody
+ elif result._result.get('failed_when_result') is True:
+ body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
+ elif result._result.get('msg'):
+ body += self.body_blob(result._result['msg'], 'message')
+
+ # Add stdout / stderr / exception / warnings / deprecations
+ if result._result.get('stdout'):
+ body += self.body_blob(result._result['stdout'], 'standard output')
+ if result._result.get('stderr'):
+ body += self.body_blob(result._result['stderr'], 'error output')
+ if result._result.get('exception'): # Unrelated exceptions are added to output :-/
+ body += self.body_blob(result._result['exception'], 'exception')
+ if result._result.get('warnings'):
+ for i in range(len(result._result.get('warnings'))):
+ body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
+ if result._result.get('deprecations'):
+ for i in range(len(result._result.get('deprecations'))):
+ body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
+
+ body += 'and a complete dump of the error:\n\n'
+ body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
+
+ self.mail(subject=subject, body=body)
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook = playbook
+ self.itembody = ''
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if ignore_errors:
+ return
+
+ self.mail_result(result, 'Failed')
+
+ def v2_runner_on_unreachable(self, result):
+ self.mail_result(result, 'Unreachable')
+
+ def v2_runner_on_async_failed(self, result):
+ self.mail_result(result, 'Async failure')
+
+ def v2_runner_item_on_failed(self, result):
+ # Pass item information to task failure
+ self.itemsubject = result._result['msg']
+ self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
diff --git a/ansible_collections/community/general/plugins/callback/nrdp.py b/ansible_collections/community/general/plugins/callback/nrdp.py
new file mode 100644
index 000000000..c16a3c7be
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/nrdp.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Remi Verchere <remi@verchere.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: nrdp
+ type: notification
+ author: "Remi VERCHERE (@rverchere)"
+ short_description: Post task results to a Nagios server through nrdp
+ description:
+ - This callback send playbook result to Nagios.
+ - Nagios shall use NRDP to recive passive events.
+ - The passive check is sent to a dedicated host/service for Ansible.
+ options:
+ url:
+ description: URL of the nrdp server.
+ required: true
+ env:
+ - name : NRDP_URL
+ ini:
+ - section: callback_nrdp
+ key: url
+ type: string
+ validate_certs:
+ description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.)
+ env:
+ - name: NRDP_VALIDATE_CERTS
+ ini:
+ - section: callback_nrdp
+ key: validate_nrdp_certs
+ - section: callback_nrdp
+ key: validate_certs
+ type: boolean
+ default: false
+ aliases: [ validate_nrdp_certs ]
+ token:
+ description: Token to be allowed to push nrdp events.
+ required: true
+ env:
+ - name: NRDP_TOKEN
+ ini:
+ - section: callback_nrdp
+ key: token
+ type: string
+ hostname:
+ description: Hostname where the passive check is linked to.
+ required: true
+ env:
+ - name : NRDP_HOSTNAME
+ ini:
+ - section: callback_nrdp
+ key: hostname
+ type: string
+ servicename:
+ description: Service where the passive check is linked to.
+ required: true
+ env:
+ - name : NRDP_SERVICENAME
+ ini:
+ - section: callback_nrdp
+ key: servicename
+ type: string
+'''
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.urls import open_url
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ '''
+ send ansible-playbook to Nagios server using nrdp protocol
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.nrdp'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ # Nagios states
+ OK = 0
+ WARNING = 1
+ CRITICAL = 2
+ UNKNOWN = 3
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ self.printed_playbook = False
+ self.playbook_name = None
+ self.play = None
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.url = self.get_option('url')
+ if not self.url.endswith('/'):
+ self.url += '/'
+ self.token = self.get_option('token')
+ self.hostname = self.get_option('hostname')
+ self.servicename = self.get_option('servicename')
+ self.validate_nrdp_certs = self.get_option('validate_certs')
+
+ if (self.url or self.token or self.hostname or
+ self.servicename) is None:
+ self._display.warning("NRDP callback wants the NRDP_URL,"
+ " NRDP_TOKEN, NRDP_HOSTNAME,"
+ " NRDP_SERVICENAME"
+ " environment variables'."
+ " The NRDP callback plugin is disabled.")
+ self.disabled = True
+
+ def _send_nrdp(self, state, msg):
+ '''
+ nrpd service check send XMLDATA like this:
+ <?xml version='1.0'?>
+ <checkresults>
+ <checkresult type='service'>
+ <hostname>somehost</hostname>
+ <servicename>someservice</servicename>
+ <state>1</state>
+ <output>WARNING: Danger Will Robinson!|perfdata</output>
+ </checkresult>
+ </checkresults>
+ '''
+ xmldata = "<?xml version='1.0'?>\n"
+ xmldata += "<checkresults>\n"
+ xmldata += "<checkresult type='service'>\n"
+ xmldata += "<hostname>%s</hostname>\n" % self.hostname
+ xmldata += "<servicename>%s</servicename>\n" % self.servicename
+ xmldata += "<state>%d</state>\n" % state
+ xmldata += "<output>%s</output>\n" % msg
+ xmldata += "</checkresult>\n"
+ xmldata += "</checkresults>\n"
+
+ body = {
+ 'cmd': 'submitcheck',
+ 'token': self.token,
+ 'XMLDATA': to_bytes(xmldata)
+ }
+
+ try:
+ response = open_url(self.url,
+ data=urlencode(body),
+ method='POST',
+ validate_certs=self.validate_nrdp_certs)
+ return response.read()
+ except Exception as ex:
+ self._display.warning("NRDP callback cannot send result {0}".format(ex))
+
+ def v2_playbook_on_play_start(self, play):
+ '''
+ Display Playbook and play start messages
+ '''
+ self.play = play
+
+ def v2_playbook_on_stats(self, stats):
+ '''
+ Display info about playbook statistics
+ '''
+ name = self.play
+ gstats = ""
+ hosts = sorted(stats.processed.keys())
+ critical = warning = 0
+ for host in hosts:
+ stat = stats.summarize(host)
+ gstats += "'%s_ok'=%d '%s_changed'=%d \
+ '%s_unreachable'=%d '%s_failed'=%d " % \
+ (host, stat['ok'], host, stat['changed'],
+ host, stat['unreachable'], host, stat['failures'])
+ # Critical when failed tasks or unreachable host
+ critical += stat['failures']
+ critical += stat['unreachable']
+ # Warning when changed tasks
+ warning += stat['changed']
+
+ msg = "%s | %s" % (name, gstats)
+ if critical:
+ # Send Critical
+ self._send_nrdp(self.CRITICAL, msg)
+ elif warning:
+ # Send Warning
+ self._send_nrdp(self.WARNING, msg)
+ else:
+ # Send OK
+ self._send_nrdp(self.OK, msg)
diff --git a/ansible_collections/community/general/plugins/callback/null.py b/ansible_collections/community/general/plugins/callback/null.py
new file mode 100644
index 000000000..f53a24294
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/null.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: 'null'
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: Don't display stuff to screen
+ description:
+ - This callback prevents outputing events to screen.
+'''
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This callback wont print messages to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.null'
diff --git a/ansible_collections/community/general/plugins/callback/opentelemetry.py b/ansible_collections/community/general/plugins/callback/opentelemetry.py
new file mode 100644
index 000000000..e00e1d71a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/opentelemetry.py
@@ -0,0 +1,569 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Victor Martinez (@v1v) <VictorMartinezRubio@gmail.com>
+ name: opentelemetry
+ type: notification
+ short_description: Create distributed traces with OpenTelemetry
+ version_added: 3.7.0
+ description:
+ - This callback creates distributed traces for each Ansible task with OpenTelemetry.
+ - You can configure the OpenTelemetry exporter and SDK with environment variables.
+ - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html).
+ - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables).
+ options:
+ hide_task_arguments:
+ default: false
+ type: bool
+ description:
+ - Hide the arguments for a task.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS
+ ini:
+ - section: callback_opentelemetry
+ key: hide_task_arguments
+ version_added: 5.3.0
+ enable_from_environment:
+ type: str
+ description:
+ - Whether to enable this callback only if the given environment variable exists and it is set to C(true).
+ - This is handy when you use Configuration as Code and want to send distributed traces
+ if running in the CI rather when running Ansible locally.
+ - For such, it evaluates the given I(enable_from_environment) value as environment variable
+ and if set to true this plugin will be enabled.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT
+ ini:
+ - section: callback_opentelemetry
+ key: enable_from_environment
+ version_added: 5.3.0
+ version_added: 3.8.0
+ otel_service_name:
+ default: ansible
+ type: str
+ description:
+ - The service name resource attribute.
+ env:
+ - name: OTEL_SERVICE_NAME
+ ini:
+ - section: callback_opentelemetry
+ key: otel_service_name
+ version_added: 5.3.0
+ traceparent:
+ default: None
+ type: str
+ description:
+ - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header).
+ env:
+ - name: TRACEPARENT
+ disable_logs:
+ default: false
+ type: bool
+ description:
+ - Disable sending logs.
+ env:
+ - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS
+ ini:
+ - section: callback_opentelemetry
+ key: disable_logs
+ version_added: 5.8.0
+ requirements:
+ - opentelemetry-api (Python library)
+ - opentelemetry-exporter-otlp (Python library)
+ - opentelemetry-sdk (Python library)
+'''
+
+
+EXAMPLES = '''
+examples: |
+ Enable the plugin in ansible.cfg:
+ [defaults]
+ callbacks_enabled = community.general.opentelemetry
+ [callback_opentelemetry]
+ enable_from_environment = ANSIBLE_OPENTELEMETRY_ENABLED
+
+ Set the environment variable:
+ export OTEL_EXPORTER_OTLP_ENDPOINT=<your endpoint (OTLP/HTTP)>
+ export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token"
+ export OTEL_SERVICE_NAME=your_service_name
+ export ANSIBLE_OPENTELEMETRY_ENABLED=true
+'''
+
+import getpass
+import os
+import socket
+import sys
+import time
+import uuid
+
+from collections import OrderedDict
+from os.path import basename
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import raise_from
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.plugins.callback import CallbackBase
+
+try:
+ from opentelemetry import trace
+ from opentelemetry.trace import SpanKind
+ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
+ from opentelemetry.sdk.resources import SERVICE_NAME, Resource
+ from opentelemetry.trace.status import Status, StatusCode
+ from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import (
+ BatchSpanProcessor
+ )
+
+ # Support for opentelemetry-api <= 1.12
+ try:
+ from opentelemetry.util._time import _time_ns
+ except ImportError as imp_exc:
+ OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
+ else:
+ OTEL_LIBRARY_TIME_NS_ERROR = None
+
+except ImportError as imp_exc:
+ OTEL_LIBRARY_IMPORT_ERROR = imp_exc
+ OTEL_LIBRARY_TIME_NS_ERROR = imp_exc
+else:
+ OTEL_LIBRARY_IMPORT_ERROR = None
+
+
+if sys.version_info >= (3, 7):
+ time_ns = time.time_ns
+elif not OTEL_LIBRARY_TIME_NS_ERROR:
+ time_ns = _time_ns
+else:
+ def time_ns():
+ # Support versions older than 3.7 with opentelemetry-api > 1.12
+ return int(time.time() * 1e9)
+
+
+class TaskData:
+ """
+ Data about an individual task.
+ """
+
+ def __init__(self, uuid, name, path, play, action, args):
+ self.uuid = uuid
+ self.name = name
+ self.path = path
+ self.play = play
+ self.host_data = OrderedDict()
+ self.start = time_ns()
+ self.action = action
+ self.args = args
+ self.dump = None
+
+ def add_host(self, host):
+ if host.uuid in self.host_data:
+ if host.status == 'included':
+ # concatenate task include output from multiple items
+ host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
+ else:
+ return
+
+ self.host_data[host.uuid] = host
+
+
+class HostData:
+ """
+ Data about an individual host.
+ """
+
+ def __init__(self, uuid, name, status, result):
+ self.uuid = uuid
+ self.name = name
+ self.status = status
+ self.result = result
+ self.finish = time_ns()
+
+
+class OpenTelemetrySource(object):
+ def __init__(self, display):
+ self.ansible_playbook = ""
+ self.ansible_version = None
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ try:
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ except Exception as e:
+ self.ip_address = None
+ self.user = getpass.getuser()
+
+ self._display = display
+
+ def traceparent_context(self, traceparent):
+ carrier = dict()
+ carrier['traceparent'] = traceparent
+ return TraceContextTextMapPropagator().extract(carrier=carrier)
+
+ def start_task(self, tasks_data, hide_task_arguments, play_name, task):
+ """ record the start of a task for one or more hosts """
+
+ uuid = task._uuid
+
+ if uuid in tasks_data:
+ return
+
+ name = task.get_name().strip()
+ path = task.get_path()
+ action = task.action
+ args = None
+
+ if not task.no_log and not hide_task_arguments:
+ args = task.args
+
+ tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args)
+
+ def finish_task(self, tasks_data, status, result, dump):
+ """ record the results of a task for a single host """
+
+ task_uuid = result._task._uuid
+
+ if hasattr(result, '_host') and result._host is not None:
+ host_uuid = result._host._uuid
+ host_name = result._host.name
+ else:
+ host_uuid = 'include'
+ host_name = 'include'
+
+ task = tasks_data[task_uuid]
+
+ if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = result._task_fields['args'].get('_ansible_version')
+
+ task.dump = dump
+ task.add_host(HostData(host_uuid, host_name, status, result))
+
+ def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent, disable_logs):
+ """ generate distributed traces from the collected TaskData and HostData """
+
+ tasks = []
+ parent_start_time = None
+ for task_uuid, task in tasks_data.items():
+ if parent_start_time is None:
+ parent_start_time = task.start
+ tasks.append(task)
+
+ trace.set_tracer_provider(
+ TracerProvider(
+ resource=Resource.create({SERVICE_NAME: otel_service_name})
+ )
+ )
+
+ processor = BatchSpanProcessor(OTLPSpanExporter())
+
+ trace.get_tracer_provider().add_span_processor(processor)
+
+ tracer = trace.get_tracer(__name__)
+
+ with tracer.start_as_current_span(ansible_playbook, context=self.traceparent_context(traceparent),
+ start_time=parent_start_time, kind=SpanKind.SERVER) as parent:
+ parent.set_status(status)
+ # Populate trace metadata attributes
+ if self.ansible_version is not None:
+ parent.set_attribute("ansible.version", self.ansible_version)
+ parent.set_attribute("ansible.session", self.session)
+ parent.set_attribute("ansible.host.name", self.host)
+ if self.ip_address is not None:
+ parent.set_attribute("ansible.host.ip", self.ip_address)
+ parent.set_attribute("ansible.host.user", self.user)
+ for task in tasks:
+ for host_uuid, host_data in task.host_data.items():
+ with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span:
+ self.update_span_data(task, host_data, span, disable_logs)
+
+ def update_span_data(self, task_data, host_data, span, disable_logs):
+ """ update the span with the given TaskData and HostData """
+
+ name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
+
+ message = 'success'
+ res = {}
+ rc = 0
+ status = Status(status_code=StatusCode.OK)
+ if host_data.status != 'included':
+ # Support loops
+ if 'results' in host_data.result._result:
+ if host_data.status == 'failed':
+ message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action)
+ enriched_error_message = self.enrich_error_message_from_results(host_data.result._result['results'], task_data.action)
+ else:
+ res = host_data.result._result
+ rc = res.get('rc', 0)
+ if host_data.status == 'failed':
+ message = self.get_error_message(res)
+ enriched_error_message = self.enrich_error_message(res)
+
+ if host_data.status == 'failed':
+ status = Status(status_code=StatusCode.ERROR, description=message)
+ # Record an exception with the task message
+ span.record_exception(BaseException(enriched_error_message))
+ elif host_data.status == 'skipped':
+ message = res['skip_reason'] if 'skip_reason' in res else 'skipped'
+ status = Status(status_code=StatusCode.UNSET)
+ elif host_data.status == 'ignored':
+ status = Status(status_code=StatusCode.UNSET)
+
+ span.set_status(status)
+ if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action:
+ names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys())
+ values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values())
+ self.set_span_attribute(span, ("ansible.task.args.name"), names)
+ self.set_span_attribute(span, ("ansible.task.args.value"), values)
+ self.set_span_attribute(span, "ansible.task.module", task_data.action)
+ self.set_span_attribute(span, "ansible.task.message", message)
+ self.set_span_attribute(span, "ansible.task.name", name)
+ self.set_span_attribute(span, "ansible.task.result", rc)
+ self.set_span_attribute(span, "ansible.task.host.name", host_data.name)
+ self.set_span_attribute(span, "ansible.task.host.status", host_data.status)
+ # This will allow to enrich the service map
+ self.add_attributes_for_service_map_if_possible(span, task_data)
+ # Send logs
+ if not disable_logs:
+ span.add_event(task_data.dump)
+ span.end(end_time=host_data.finish)
+
+ def set_span_attribute(self, span, attributeName, attributeValue):
+ """ update the span attribute with the given attribute and value if not None """
+
+ if span is None and self._display is not None:
+ self._display.warning('span object is None. Please double check if that is expected.')
+ else:
+ if attributeValue is not None:
+ span.set_attribute(attributeName, attributeValue)
+
+ def add_attributes_for_service_map_if_possible(self, span, task_data):
+ """Update the span attributes with the service that the task interacted with, if possible."""
+
+ redacted_url = self.parse_and_redact_url_if_possible(task_data.args)
+ if redacted_url:
+ self.set_span_attribute(span, "http.url", redacted_url.geturl())
+
+ @staticmethod
+ def parse_and_redact_url_if_possible(args):
+ """Parse and redact the url, if possible."""
+
+ try:
+ parsed_url = urlparse(OpenTelemetrySource.url_from_args(args))
+ except ValueError:
+ return None
+
+ if OpenTelemetrySource.is_valid_url(parsed_url):
+ return OpenTelemetrySource.redact_user_password(parsed_url)
+ return None
+
+ @staticmethod
+ def url_from_args(args):
+ # the order matters
+ url_args = ("url", "api_url", "baseurl", "repo", "server_url", "chart_repo_url", "registry_url", "endpoint", "uri", "updates_url")
+ for arg in url_args:
+ if args is not None and args.get(arg):
+ return args.get(arg)
+ return ""
+
+ @staticmethod
+ def redact_user_password(url):
+ return url._replace(netloc=url.hostname) if url.password else url
+
+ @staticmethod
+ def is_valid_url(url):
+ if all([url.scheme, url.netloc, url.hostname]):
+ return "{{" not in url.hostname
+ return False
+
+ @staticmethod
+ def transform_ansible_unicode_to_str(value):
+ parsed_url = urlparse(str(value))
+ if OpenTelemetrySource.is_valid_url(parsed_url):
+ return OpenTelemetrySource.redact_user_password(parsed_url).geturl()
+ return str(value)
+
+ @staticmethod
+ def get_error_message(result):
+ if result.get('exception') is not None:
+ return OpenTelemetrySource._last_line(result['exception'])
+ return result.get('msg', 'failed')
+
+ @staticmethod
+ def get_error_message_from_results(results, action):
+ for result in results:
+ if result.get('failed', False):
+ return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result))
+
+ @staticmethod
+ def _last_line(text):
+ lines = text.strip().split('\n')
+ return lines[-1]
+
+ @staticmethod
+ def enrich_error_message(result):
+ message = result.get('msg', 'failed')
+ exception = result.get('exception')
+ stderr = result.get('stderr')
+ return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr)
+
+ @staticmethod
+ def enrich_error_message_from_results(results, action):
+ message = ""
+ for result in results:
+ if result.get('failed', False):
+ message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message)
+ return message
+
+
+class CallbackModule(CallbackBase):
+ """
+ This callback creates distributed traces.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.opentelemetry'
+ CALLBACK_NEEDS_ENABLED = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.hide_task_arguments = None
+ self.disable_logs = None
+ self.otel_service_name = None
+ self.ansible_playbook = None
+ self.play_name = None
+ self.tasks_data = None
+ self.errors = 0
+ self.disabled = False
+ self.traceparent = False
+
+ if OTEL_LIBRARY_IMPORT_ERROR:
+ raise_from(
+ AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'),
+ OTEL_LIBRARY_IMPORT_ERROR)
+
+ self.tasks_data = OrderedDict()
+
+ self.opentelemetry = OpenTelemetrySource(display=self._display)
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys,
+ var_options=var_options,
+ direct=direct)
+
+ environment_variable = self.get_option('enable_from_environment')
+ if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true':
+ self.disabled = True
+ self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. "
+ "Disabling the `opentelemetry` callback plugin.".format(environment_variable))
+
+ self.hide_task_arguments = self.get_option('hide_task_arguments')
+
+ self.disable_logs = self.get_option('disable_logs')
+
+ self.otel_service_name = self.get_option('otel_service_name')
+
+ if not self.otel_service_name:
+ self.otel_service_name = 'ansible'
+
+ # See https://github.com/open-telemetry/opentelemetry-specification/issues/740
+ self.traceparent = self.get_option('traceparent')
+
+ def v2_playbook_on_start(self, playbook):
+ self.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_play_start(self, play):
+ self.play_name = play.get_name()
+
+ def v2_runner_on_no_hosts(self, task):
+ self.opentelemetry.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.opentelemetry.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self.opentelemetry.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.opentelemetry.start_task(
+ self.tasks_data,
+ self.hide_task_arguments,
+ self.play_name,
+ task
+ )
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if ignore_errors:
+ status = 'ignored'
+ else:
+ status = 'failed'
+ self.errors += 1
+
+ self.opentelemetry.finish_task(
+ self.tasks_data,
+ status,
+ result,
+ self._dump_results(result._result)
+ )
+
+ def v2_runner_on_ok(self, result):
+ self.opentelemetry.finish_task(
+ self.tasks_data,
+ 'ok',
+ result,
+ self._dump_results(result._result)
+ )
+
+ def v2_runner_on_skipped(self, result):
+ self.opentelemetry.finish_task(
+ self.tasks_data,
+ 'skipped',
+ result,
+ self._dump_results(result._result)
+ )
+
+ def v2_playbook_on_include(self, included_file):
+ self.opentelemetry.finish_task(
+ self.tasks_data,
+ 'included',
+ included_file,
+ ""
+ )
+
+ def v2_playbook_on_stats(self, stats):
+ if self.errors == 0:
+ status = Status(status_code=StatusCode.OK)
+ else:
+ status = Status(status_code=StatusCode.ERROR)
+ self.opentelemetry.generate_distributed_traces(
+ self.otel_service_name,
+ self.ansible_playbook,
+ self.tasks_data,
+ status,
+ self.traceparent,
+ self.disable_logs
+ )
+
+ def v2_runner_on_async_failed(self, result, **kwargs):
+ self.errors += 1
diff --git a/ansible_collections/community/general/plugins/callback/say.py b/ansible_collections/community/general/plugins/callback/say.py
new file mode 100644
index 000000000..005725a22
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/say.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: say
+ type: notification
+ requirements:
+ - whitelisting in configuration
+ - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program
+ short_description: notify using software speech synthesizer
+ description:
+ - This plugin will use the C(say) or C(espeak) program to "speak" about play events.
+ notes:
+ - In Ansible 2.8, this callback has been renamed from C(osx_say) into M(community.general.say).
+'''
+
+import platform
+import subprocess
+import os
+
+from ansible.module_utils.common.process import get_bin_path
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ makes Ansible much more exciting.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.say'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ self.FAILED_VOICE = None
+ self.REGULAR_VOICE = None
+ self.HAPPY_VOICE = None
+ self.LASER_VOICE = None
+
+ try:
+ self.synthesizer = get_bin_path('say')
+ if platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system())
+ else:
+ self.FAILED_VOICE = 'Zarvox'
+ self.REGULAR_VOICE = 'Trinoids'
+ self.HAPPY_VOICE = 'Cellos'
+ self.LASER_VOICE = 'Princess'
+ except ValueError:
+ try:
+ self.synthesizer = get_bin_path('espeak')
+ self.FAILED_VOICE = 'klatt'
+ self.HAPPY_VOICE = 'f5'
+ self.LASER_VOICE = 'whisper'
+ except ValueError:
+ self.synthesizer = None
+
+ # plugin disable itself if say is not present
+ # ansible will not call any callback if disabled is set to True
+ if not self.synthesizer:
+ self.disabled = True
+ self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__))
+
+ def say(self, msg, voice):
+ cmd = [self.synthesizer, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ subprocess.call(cmd)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_ok(self, host, res):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_skipped(self, host, item=None):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_unreachable(self, host, res):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def runner_on_async_ok(self, host, res, jid):
+ self.say("pew", self.LASER_VOICE)
+
+ def runner_on_async_failed(self, host, res, jid):
+ self.say("Failure on host %s" % host, self.FAILED_VOICE)
+
+ def playbook_on_start(self):
+ self.say("Running Playbook", self.REGULAR_VOICE)
+
+ def playbook_on_notify(self, host, handler):
+ self.say("pew", self.LASER_VOICE)
+
+ def playbook_on_task_start(self, name, is_conditional):
+ if not is_conditional:
+ self.say("Starting task: %s" % name, self.REGULAR_VOICE)
+ else:
+ self.say("Notifying task: %s" % name, self.REGULAR_VOICE)
+
+ def playbook_on_setup(self):
+ self.say("Gathering facts", self.REGULAR_VOICE)
+
+ def playbook_on_play_start(self, name):
+ self.say("Starting play: %s" % name, self.HAPPY_VOICE)
+
+ def playbook_on_stats(self, stats):
+ self.say("Play complete", self.HAPPY_VOICE)
diff --git a/ansible_collections/community/general/plugins/callback/selective.py b/ansible_collections/community/general/plugins/callback/selective.py
new file mode 100644
index 000000000..526975bd2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/selective.py
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Fastly, inc 2016
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: selective
+ type: stdout
+ requirements:
+ - set as main display callback
+ short_description: only print certain tasks
+ description:
+ - This callback only prints tasks that have been tagged with C(print_action) or that have failed.
+ This allows operators to focus on the tasks that provide value only.
+ - Tasks that are not printed are placed with a C(.).
+ - If you increase verbosity all tasks are printed.
+ options:
+ nocolor:
+ default: false
+ description: This setting allows suppressing colorizing output.
+ env:
+ - name: ANSIBLE_NOCOLOR
+ - name: ANSIBLE_SELECTIVE_DONT_COLORIZE
+ ini:
+ - section: defaults
+ key: nocolor
+ type: boolean
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug: msg="This will not be printed"
+ - ansible.builtin.debug: msg="But this will"
+ tags: [print_action]
+"""
+
+import difflib
+
+from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
+from ansible.module_utils.common.text.converters import to_text
+
+try:
+ codeCodes = C.COLOR_CODES
+except AttributeError:
+ # This constant was moved to ansible.constants in
+ # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67
+ # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions,
+ # we include from the original location.
+ from ansible.utils.color import codeCodes
+
+
+DONT_COLORIZE = False
+COLORS = {
+ 'normal': '\033[0m',
+ 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]),
+ 'bold': '\033[1m',
+ 'not_so_bold': '\033[1m\033[34m',
+ 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]),
+ 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]),
+ 'endc': '\033[0m',
+ 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]),
+}
+
+
+def dict_diff(prv, nxt):
+ """Return a dict of keys that differ with another config object."""
+ keys = set(list(prv.keys()) + list(nxt.keys()))
+ result = {}
+ for k in keys:
+ if prv.get(k) != nxt.get(k):
+ result[k] = (prv.get(k), nxt.get(k))
+ return result
+
+
+def colorize(msg, color):
+ """Given a string add necessary codes to format the string."""
+ if DONT_COLORIZE:
+ return msg
+ else:
+ return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc'])
+
+
+class CallbackModule(CallbackBase):
+ """selective.py callback plugin."""
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.selective'
+
+ def __init__(self, display=None):
+ """selective.py callback plugin."""
+ super(CallbackModule, self).__init__(display)
+ self.last_skipped = False
+ self.last_task_name = None
+ self.printed_last_task = False
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ global DONT_COLORIZE
+ DONT_COLORIZE = self.get_option('nocolor')
+
+ def _print_task(self, task_name=None):
+ if task_name is None:
+ task_name = self.last_task_name
+
+ if not self.printed_last_task:
+ self.printed_last_task = True
+ line_length = 120
+ if self.last_skipped:
+ print()
+ msg = colorize("# {0} {1}".format(task_name,
+ '*' * (line_length - len(task_name))), 'bold')
+ print(msg)
+
+ def _indent_text(self, text, indent_level):
+ lines = text.splitlines()
+ result_lines = []
+ for l in lines:
+ result_lines.append("{0}{1}".format(' ' * indent_level, l))
+ return '\n'.join(result_lines)
+
+ def _print_diff(self, diff, indent_level):
+ if isinstance(diff, dict):
+ try:
+ diff = '\n'.join(difflib.unified_diff(diff['before'].splitlines(),
+ diff['after'].splitlines(),
+ fromfile=diff.get('before_header',
+ 'new_file'),
+ tofile=diff['after_header']))
+ except AttributeError:
+ diff = dict_diff(diff['before'], diff['after'])
+ if diff:
+ diff = colorize(str(diff), 'changed')
+ print(self._indent_text(diff, indent_level + 4))
+
+ def _print_host_or_item(self, host_or_item, changed, msg, diff, is_host, error, stdout, stderr):
+ if is_host:
+ indent_level = 0
+ name = colorize(host_or_item.name, 'not_so_bold')
+ else:
+ indent_level = 4
+ if isinstance(host_or_item, dict):
+ if 'key' in host_or_item.keys():
+ host_or_item = host_or_item['key']
+ name = colorize(to_text(host_or_item), 'bold')
+
+ if error:
+ color = 'failed'
+ change_string = colorize('FAILED!!!', color)
+ else:
+ color = 'changed' if changed else 'ok'
+ change_string = colorize("changed={0}".format(changed), color)
+
+ msg = colorize(msg, color)
+
+ line_length = 120
+ spaces = ' ' * (40 - len(name) - indent_level)
+ line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string)
+
+ if len(msg) < 50:
+ line += ' -- {0}'.format(msg)
+ print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ else:
+ print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(self._indent_text(msg, indent_level + 4))
+
+ if diff:
+ self._print_diff(diff, indent_level)
+ if stdout:
+ stdout = colorize(stdout, 'failed')
+ print(self._indent_text(stdout, indent_level + 4))
+ if stderr:
+ stderr = colorize(stderr, 'failed')
+ print(self._indent_text(stderr, indent_level + 4))
+
+ def v2_playbook_on_play_start(self, play):
+ """Run on start of the play."""
+ pass
+
+ def v2_playbook_on_task_start(self, task, **kwargs):
+ """Run when a task starts."""
+ self.last_task_name = task.get_name()
+ self.printed_last_task = False
+
+ def _print_task_result(self, result, error=False, **kwargs):
+ """Run when a task finishes correctly."""
+
+ if 'print_action' in result._task.tags or error or self._display.verbosity > 1:
+ self._print_task()
+ self.last_skipped = False
+ msg = to_text(result._result.get('msg', '')) or\
+ to_text(result._result.get('reason', ''))
+
+ stderr = [result._result.get('exception', None),
+ result._result.get('module_stderr', None)]
+ stderr = "\n".join([e for e in stderr if e]).strip()
+
+ self._print_host_or_item(result._host,
+ result._result.get('changed', False),
+ msg,
+ result._result.get('diff', None),
+ is_host=True,
+ error=error,
+ stdout=result._result.get('module_stdout', None),
+ stderr=stderr.strip(),
+ )
+ if 'results' in result._result:
+ for r in result._result['results']:
+ failed = 'failed' in r and r['failed']
+
+ stderr = [r.get('exception', None), r.get('module_stderr', None)]
+ stderr = "\n".join([e for e in stderr if e]).strip()
+
+ self._print_host_or_item(r['item'],
+ r.get('changed', False),
+ to_text(r.get('msg', '')),
+ r.get('diff', None),
+ is_host=False,
+ error=failed,
+ stdout=r.get('module_stdout', None),
+ stderr=stderr.strip(),
+ )
+ else:
+ self.last_skipped = True
+ print('.', end="")
+
+ def v2_playbook_on_stats(self, stats):
+ """Display info about playbook statistics."""
+ print()
+ self.printed_last_task = False
+ self._print_task('STATS')
+
+ hosts = sorted(stats.processed.keys())
+ for host in hosts:
+ s = stats.summarize(host)
+
+ if s['failures'] or s['unreachable']:
+ color = 'failed'
+ elif s['changed']:
+ color = 'changed'
+ else:
+ color = 'ok'
+
+ msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format(
+ host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored'])
+ print(colorize(msg, color))
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ """Run when a task is skipped."""
+ if self._display.verbosity > 1:
+ self._print_task()
+ self.last_skipped = False
+
+ line_length = 120
+ spaces = ' ' * (31 - len(result._host.name) - 4)
+
+ line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'),
+ spaces,
+ colorize("skipped", 'skipped'),)
+
+ reason = result._result.get('skipped_reason', '') or \
+ result._result.get('skip_reason', '')
+ if len(reason) < 50:
+ line += ' -- {0}'.format(reason)
+ print("{0} {1}---------".format(line, '-' * (line_length - len(line))))
+ else:
+ print("{0} {1}".format(line, '-' * (line_length - len(line))))
+ print(self._indent_text(reason, 8))
+ print(reason)
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self._print_task_result(result, error=False, **kwargs)
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self._print_task_result(result, error=True, **kwargs)
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self._print_task_result(result, error=True, **kwargs)
+
+ v2_playbook_on_handler_task_start = v2_playbook_on_task_start
diff --git a/ansible_collections/community/general/plugins/callback/slack.py b/ansible_collections/community/general/plugins/callback/slack.py
new file mode 100644
index 000000000..e9b84bbb3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/slack.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2014-2015, Matt Martz <matt@sivel.net>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: slack
+ type: notification
+ requirements:
+ - whitelist in configuration
+ - prettytable (python library)
+ short_description: Sends play events to a Slack channel
+ description:
+ - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.
+ - Before Ansible 2.4 only environment variables were available for configuring this plugin.
+ options:
+ webhook_url:
+ required: true
+ description: Slack Webhook URL.
+ env:
+ - name: SLACK_WEBHOOK_URL
+ ini:
+ - section: callback_slack
+ key: webhook_url
+ channel:
+ default: "#ansible"
+ description: Slack room to post in.
+ env:
+ - name: SLACK_CHANNEL
+ ini:
+ - section: callback_slack
+ key: channel
+ username:
+ description: Username to post as.
+ env:
+ - name: SLACK_USERNAME
+ default: ansible
+ ini:
+ - section: callback_slack
+ key: username
+ validate_certs:
+ description: Validate the SSL certificate of the Slack server for HTTPS URLs.
+ env:
+ - name: SLACK_VALIDATE_CERTS
+ ini:
+ - section: callback_slack
+ key: validate_certs
+ default: true
+ type: bool
+'''
+
+import json
+import os
+import uuid
+
+from ansible import context
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.urls import open_url
+from ansible.plugins.callback import CallbackBase
+
+try:
+ import prettytable
+ HAS_PRETTYTABLE = True
+except ImportError:
+ HAS_PRETTYTABLE = False
+
+
+class CallbackModule(CallbackBase):
+ """This is an ansible callback plugin that sends status
+ updates to a Slack channel during playbook execution.
+ """
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.slack'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+
+ super(CallbackModule, self).__init__(display=display)
+
+ if not HAS_PRETTYTABLE:
+ self.disabled = True
+ self._display.warning('The `prettytable` python module is not '
+ 'installed. Disabling the Slack callback '
+ 'plugin.')
+
+ self.playbook_name = None
+
+ # This is a 6 character identifier provided with each message
+ # This makes it easier to correlate messages when there are more
+ # than 1 simultaneous playbooks running
+ self.guid = uuid.uuid4().hex[:6]
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.webhook_url = self.get_option('webhook_url')
+ self.channel = self.get_option('channel')
+ self.username = self.get_option('username')
+ self.show_invocation = (self._display.verbosity > 1)
+ self.validate_certs = self.get_option('validate_certs')
+
+ if self.webhook_url is None:
+ self.disabled = True
+ self._display.warning('Slack Webhook URL was not provided. The '
+ 'Slack Webhook URL can be provided using '
+ 'the `SLACK_WEBHOOK_URL` environment '
+ 'variable.')
+
+ def send_msg(self, attachments):
+ headers = {
+ 'Content-type': 'application/json',
+ }
+
+ payload = {
+ 'channel': self.channel,
+ 'username': self.username,
+ 'attachments': attachments,
+ 'parse': 'none',
+ 'icon_url': ('https://cdn2.hubspot.net/hub/330046/'
+ 'file-449187601-png/ansible_badge.png'),
+ }
+
+ data = json.dumps(payload)
+ self._display.debug(data)
+ self._display.debug(self.webhook_url)
+ try:
+ response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs,
+ headers=headers)
+ return response.read()
+ except Exception as e:
+ self._display.warning(u'Could not submit message to Slack: %s' %
+ to_text(e))
+
+ def v2_playbook_on_start(self, playbook):
+ self.playbook_name = os.path.basename(playbook._file_name)
+
+ title = [
+ '*Playbook initiated* (_%s_)' % self.guid
+ ]
+
+ invocation_items = []
+ if context.CLIARGS and self.show_invocation:
+ tags = context.CLIARGS['tags']
+ skip_tags = context.CLIARGS['skip_tags']
+ extra_vars = context.CLIARGS['extra_vars']
+ subset = context.CLIARGS['subset']
+ inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']]
+
+ invocation_items.append('Inventory: %s' % ', '.join(inventory))
+ if tags and tags != ['all']:
+ invocation_items.append('Tags: %s' % ', '.join(tags))
+ if skip_tags:
+ invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))
+ if subset:
+ invocation_items.append('Limit: %s' % subset)
+ if extra_vars:
+ invocation_items.append('Extra Vars: %s' %
+ ' '.join(extra_vars))
+
+ title.append('by *%s*' % context.CLIARGS['remote_user'])
+
+ title.append('\n\n*%s*' % self.playbook_name)
+ msg_items = [' '.join(title)]
+ if invocation_items:
+ msg_items.append('```\n%s\n```' % '\n'.join(invocation_items))
+
+ msg = '\n'.join(msg_items)
+
+ attachments = [{
+ 'fallback': msg,
+ 'fields': [
+ {
+ 'value': msg
+ }
+ ],
+ 'color': 'warning',
+ 'mrkdwn_in': ['text', 'fallback', 'fields'],
+ }]
+
+ self.send_msg(attachments=attachments)
+
+ def v2_playbook_on_play_start(self, play):
+ """Display Play start messages"""
+
+ name = play.name or 'Play name not specified (%s)' % play._uuid
+ msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name)
+ attachments = [
+ {
+ 'fallback': msg,
+ 'text': msg,
+ 'color': 'warning',
+ 'mrkdwn_in': ['text', 'fallback', 'fields'],
+ }
+ ]
+ self.send_msg(attachments=attachments)
+
+ def v2_playbook_on_stats(self, stats):
+ """Display info about playbook statistics"""
+
+ hosts = sorted(stats.processed.keys())
+
+ t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',
+ 'Failures', 'Rescued', 'Ignored'])
+
+ failures = False
+ unreachable = False
+
+ for h in hosts:
+ s = stats.summarize(h)
+
+ if s['failures'] > 0:
+ failures = True
+ if s['unreachable'] > 0:
+ unreachable = True
+
+ t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',
+ 'failures', 'rescued', 'ignored']])
+
+ attachments = []
+ msg_items = [
+ '*Playbook Complete* (_%s_)' % self.guid
+ ]
+ if failures or unreachable:
+ color = 'danger'
+ msg_items.append('\n*Failed!*')
+ else:
+ color = 'good'
+ msg_items.append('\n*Success!*')
+
+ msg_items.append('```\n%s\n```' % t)
+
+ msg = '\n'.join(msg_items)
+
+ attachments.append({
+ 'fallback': msg,
+ 'fields': [
+ {
+ 'value': msg
+ }
+ ],
+ 'color': color,
+ 'mrkdwn_in': ['text', 'fallback', 'fields']
+ })
+
+ self.send_msg(attachments=attachments)
diff --git a/ansible_collections/community/general/plugins/callback/splunk.py b/ansible_collections/community/general/plugins/callback/splunk.py
new file mode 100644
index 000000000..67ad944d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/splunk.py
@@ -0,0 +1,286 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: splunk
+ type: notification
+ short_description: Sends task result events to Splunk HTTP Event Collector
+ author: "Stuart Hirst (!UNKNOWN) <support@convergingdata.com>"
+ description:
+ - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
+ - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/).
+ - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
+ requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP Event Collector in Splunk'
+ - 'Define the URL and token in C(ansible.cfg)'
+ options:
+ url:
+ description: URL to the Splunk HTTP collector source.
+ env:
+ - name: SPLUNK_URL
+ ini:
+ - section: callback_splunk
+ key: url
+ authtoken:
+ description: Token to authenticate the connection to the Splunk HTTP collector.
+ env:
+ - name: SPLUNK_AUTHTOKEN
+ ini:
+ - section: callback_splunk
+ key: authtoken
+ validate_certs:
+ description: Whether to validate certificates for connections to HEC. It is not recommended to set to
+ C(false) except when you are sure that nobody can intercept the connection
+ between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks!
+ env:
+ - name: SPLUNK_VALIDATE_CERTS
+ ini:
+ - section: callback_splunk
+ key: validate_certs
+ type: bool
+ default: true
+ version_added: '1.0.0'
+ include_milliseconds:
+ description: Whether to include milliseconds as part of the generated timestamp field in the event
+ sent to the Splunk HTTP collector.
+ env:
+ - name: SPLUNK_INCLUDE_MILLISECONDS
+ ini:
+ - section: callback_splunk
+ key: include_milliseconds
+ type: bool
+ default: false
+ version_added: 2.0.0
+ batch:
+ description:
+ - Correlation ID which can be set across multiple playbook executions.
+ env:
+ - name: SPLUNK_BATCH
+ ini:
+ - section: callback_splunk
+ key: batch
+ type: str
+ version_added: 3.3.0
+'''
+
+EXAMPLES = '''
+examples: >
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = community.general.splunk
+ Set the environment variable
+ export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
+ export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
+ Set the ansible.cfg variable in the callback_splunk block
+ [callback_splunk]
+ url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
+ authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
+'''
+
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class SplunkHTTPCollectorSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ self.user = getpass.getuser()
+
+ def send_event(self, url, authtoken, validate_certs, include_milliseconds, batch, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ if batch is not None:
+ data['batch'] = batch
+ data['status'] = state
+
+ if include_milliseconds:
+ time_format = '%Y-%m-%d %H:%M:%S.%f +0000'
+ else:
+ time_format = '%Y-%m-%d %H:%M:%S +0000'
+
+ data['timestamp'] = datetime.utcnow().strftime(time_format)
+ data['host'] = self.host
+ data['ip_address'] = self.ip_address
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ data['ansible_result'] = result._result
+
+ # This wraps the json payload in and outer json event needed by Splunk
+ jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
+ jsondata = '{"event":' + jsondata + "}"
+
+ open_url(
+ url,
+ jsondata,
+ headers={
+ 'Content-type': 'application/json',
+ 'Authorization': 'Splunk ' + authtoken
+ },
+ method='POST',
+ validate_certs=validate_certs
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.splunk'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.url = None
+ self.authtoken = None
+ self.validate_certs = None
+ self.include_milliseconds = None
+ self.batch = None
+ self.splunk = SplunkHTTPCollectorSource()
+
+ def _runtime(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys,
+ var_options=var_options,
+ direct=direct)
+
+ self.url = self.get_option('url')
+
+ if self.url is None:
+ self.disabled = True
+ self._display.warning('Splunk HTTP collector source URL was '
+ 'not provided. The Splunk HTTP collector '
+ 'source URL can be provided using the '
+ '`SPLUNK_URL` environment variable or '
+ 'in the ansible.cfg file.')
+
+ self.authtoken = self.get_option('authtoken')
+
+ if self.authtoken is None:
+ self.disabled = True
+ self._display.warning('Splunk HTTP collector requires an authentication'
+ 'token. The Splunk HTTP collector '
+ 'authentication token can be provided using the '
+ '`SPLUNK_AUTHTOKEN` environment variable or '
+ 'in the ansible.cfg file.')
+
+ self.validate_certs = self.get_option('validate_certs')
+
+ self.include_milliseconds = self.get_option('include_milliseconds')
+
+ self.batch = self.get_option('batch')
+
+ def v2_playbook_on_start(self, playbook):
+ self.splunk.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ self.include_milliseconds,
+ self.batch,
+ 'OK',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ self.include_milliseconds,
+ self.batch,
+ 'SKIPPED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ self.include_milliseconds,
+ self.batch,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ self.include_milliseconds,
+ self.batch,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.splunk.send_event(
+ self.url,
+ self.authtoken,
+ self.validate_certs,
+ self.include_milliseconds,
+ self.batch,
+ 'UNREACHABLE',
+ result,
+ self._runtime(result)
+ )
diff --git a/ansible_collections/community/general/plugins/callback/sumologic.py b/ansible_collections/community/general/plugins/callback/sumologic.py
new file mode 100644
index 000000000..998081c35
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/sumologic.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: sumologic
+type: notification
+short_description: Sends task result events to Sumologic
+author: "Ryan Currah (@ryancurrah)"
+description:
+ - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source.
+requirements:
+ - Whitelisting this callback plugin
+ - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator
+ of C("timestamp": "(.*)")'
+options:
+ url:
+ description: URL to the Sumologic HTTP collector source.
+ env:
+ - name: SUMOLOGIC_URL
+ ini:
+ - section: callback_sumologic
+ key: url
+'''
+
+EXAMPLES = '''
+examples: |
+ To enable, add this to your ansible.cfg file in the defaults block
+ [defaults]
+ callback_whitelist = community.general.sumologic
+
+ Set the environment variable
+ export SUMOLOGIC_URL=https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
+
+ Set the ansible.cfg variable in the callback_sumologic block
+ [callback_sumologic]
+ url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp==
+'''
+
+import json
+import uuid
+import socket
+import getpass
+
+from datetime import datetime
+from os.path import basename
+
+from ansible.module_utils.urls import open_url
+from ansible.parsing.ajson import AnsibleJSONEncoder
+from ansible.plugins.callback import CallbackBase
+
+
+class SumologicHTTPCollectorSource(object):
+ def __init__(self):
+ self.ansible_check_mode = False
+ self.ansible_playbook = ""
+ self.ansible_version = ""
+ self.session = str(uuid.uuid4())
+ self.host = socket.gethostname()
+ self.ip_address = socket.gethostbyname(socket.gethostname())
+ self.user = getpass.getuser()
+
+ def send_event(self, url, state, result, runtime):
+ if result._task_fields['args'].get('_ansible_check_mode') is True:
+ self.ansible_check_mode = True
+
+ if result._task_fields['args'].get('_ansible_version'):
+ self.ansible_version = \
+ result._task_fields['args'].get('_ansible_version')
+
+ if result._task._role:
+ ansible_role = str(result._task._role)
+ else:
+ ansible_role = None
+
+ if 'args' in result._task_fields:
+ del result._task_fields['args']
+
+ data = {}
+ data['uuid'] = result._task._uuid
+ data['session'] = self.session
+ data['status'] = state
+ data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
+ '+0000')
+ data['host'] = self.host
+ data['ip_address'] = self.ip_address
+ data['user'] = self.user
+ data['runtime'] = runtime
+ data['ansible_version'] = self.ansible_version
+ data['ansible_check_mode'] = self.ansible_check_mode
+ data['ansible_host'] = result._host.name
+ data['ansible_playbook'] = self.ansible_playbook
+ data['ansible_role'] = ansible_role
+ data['ansible_task'] = result._task_fields
+ data['ansible_result'] = result._result
+
+ open_url(
+ url,
+ data=json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True),
+ headers={
+ 'Content-type': 'application/json',
+ 'X-Sumo-Host': data['ansible_host']
+ },
+ method='POST'
+ )
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.sumologic'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self, display=None):
+ super(CallbackModule, self).__init__(display=display)
+ self.start_datetimes = {} # Collect task start times
+ self.url = None
+ self.sumologic = SumologicHTTPCollectorSource()
+
+ def _runtime(self, result):
+ return (
+ datetime.utcnow() -
+ self.start_datetimes[result._task._uuid]
+ ).total_seconds()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ self.url = self.get_option('url')
+
+ if self.url is None:
+ self.disabled = True
+ self._display.warning('Sumologic HTTP collector source URL was '
+ 'not provided. The Sumologic HTTP collector '
+ 'source URL can be provided using the '
+ '`SUMOLOGIC_URL` environment variable or '
+ 'in the ansible.cfg file.')
+
+ def v2_playbook_on_start(self, playbook):
+ self.sumologic.ansible_playbook = basename(playbook._file_name)
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self.start_datetimes[task._uuid] = datetime.utcnow()
+
+ def v2_runner_on_ok(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'OK',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_skipped(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'SKIPPED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_failed(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def runner_on_async_failed(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'FAILED',
+ result,
+ self._runtime(result)
+ )
+
+ def v2_runner_on_unreachable(self, result, **kwargs):
+ self.sumologic.send_event(
+ self.url,
+ 'UNREACHABLE',
+ result,
+ self._runtime(result)
+ )
diff --git a/ansible_collections/community/general/plugins/callback/syslog_json.py b/ansible_collections/community/general/plugins/callback/syslog_json.py
new file mode 100644
index 000000000..0f5ec4d0d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/syslog_json.py
@@ -0,0 +1,129 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: syslog_json
+ type: notification
+ requirements:
+ - whitelist in configuration
+ short_description: sends JSON events to syslog
+ description:
+ - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format.
+ - Before Ansible 2.9 only environment variables were available for configuration.
+ options:
+ server:
+ description: Syslog server that will receive the event.
+ env:
+ - name: SYSLOG_SERVER
+ default: localhost
+ ini:
+ - section: callback_syslog_json
+ key: syslog_server
+ port:
+ description: Port on which the syslog server is listening.
+ env:
+ - name: SYSLOG_PORT
+ default: 514
+ ini:
+ - section: callback_syslog_json
+ key: syslog_port
+ facility:
+ description: Syslog facility to log as.
+ env:
+ - name: SYSLOG_FACILITY
+ default: user
+ ini:
+ - section: callback_syslog_json
+ key: syslog_facility
+ setup:
+ description: Log setup tasks.
+ env:
+ - name: ANSIBLE_SYSLOG_SETUP
+ type: bool
+ default: true
+ ini:
+ - section: callback_syslog_json
+ key: syslog_setup
+ version_added: 4.5.0
+'''
+
+import logging
+import logging.handlers
+
+import socket
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+ """
+ logs ansible-playbook and ansible runs to a syslog server in json format
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'community.general.syslog_json'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+
+ super(CallbackModule, self).__init__()
+
+ def set_options(self, task_keys=None, var_options=None, direct=None):
+
+ super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
+
+ syslog_host = self.get_option("server")
+ syslog_port = int(self.get_option("port"))
+ syslog_facility = self.get_option("facility")
+
+ self.logger = logging.getLogger('ansible logger')
+ self.logger.setLevel(logging.DEBUG)
+
+ self.handler = logging.handlers.SysLogHandler(
+ address=(syslog_host, syslog_port),
+ facility=syslog_facility
+ )
+ self.logger.addHandler(self.handler)
+ self.hostname = socket.gethostname()
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ res = result._result
+ host = result._host.get_name()
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def v2_runner_on_ok(self, result):
+ res = result._result
+ host = result._host.get_name()
+ if result._task.action != "gather_facts" or self.get_option("setup"):
+ self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def v2_runner_on_skipped(self, result):
+ host = result._host.get_name()
+ self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
+
+ def v2_runner_on_unreachable(self, result):
+ res = result._result
+ host = result._host.get_name()
+ self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def v2_runner_on_async_failed(self, result):
+ res = result._result
+ host = result._host.get_name()
+ jid = result._result.get('ansible_job_id')
+ self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ host = result._host.get_name()
+ self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ host = result._host.get_name()
+ self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)
diff --git a/ansible_collections/community/general/plugins/callback/unixy.py b/ansible_collections/community/general/plugins/callback/unixy.py
new file mode 100644
index 000000000..02a2e46ba
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/unixy.py
@@ -0,0 +1,248 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Allyson Bowles <@akatch>
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: unixy
+ type: stdout
+ author: Allyson Bowles (@akatch)
+ short_description: condensed Ansible output
+ description:
+ - Consolidated Ansible output in the style of LINUX/UNIX startup logs.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+from os.path import basename
+from ansible import constants as C
+from ansible import context
+from ansible.module_utils.common.text.converters import to_text
+from ansible.utils.color import colorize, hostcolor
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
+
+
+class CallbackModule(CallbackModule_default):
+
+ '''
+ Design goals:
+ - Print consolidated output that looks like a *NIX startup log
+ - Defaults should avoid displaying unnecessary information wherever possible
+
+ TODOs:
+ - Only display task names if the task runs on at least one host
+ - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line)
+ - Consolidate stats display
+ - Display whether run is in --check mode
+ - Don't show play name if no hosts found
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.unixy'
+
+ def _run_is_verbose(self, result):
+ return ((self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result)
+
+ def _get_task_display_name(self, task):
+ self.task_display_name = None
+ display_name = task.get_name().strip().split(" : ")
+
+ task_display_name = display_name[-1]
+ if task_display_name.startswith("include"):
+ return
+ else:
+ self.task_display_name = task_display_name
+
+ def _preprocess_result(self, result):
+ self.delegated_vars = result._result.get('_ansible_delegated_vars', None)
+ self._handle_exception(result._result, use_stderr=self.get_option('display_failed_stderr'))
+ self._handle_warnings(result._result)
+
+ def _process_result_output(self, result, msg):
+ task_host = result._host.get_name()
+ task_result = "%s %s" % (task_host, msg)
+
+ if self._run_is_verbose(result):
+ task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4))
+ return task_result
+
+ if self.delegated_vars:
+ task_delegate_host = self.delegated_vars['ansible_host']
+ task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg)
+
+ if result._result.get('msg') and result._result.get('msg') != "All items completed":
+ task_result += " | msg: " + to_text(result._result.get('msg'))
+
+ if result._result.get('stdout'):
+ task_result += " | stdout: " + result._result.get('stdout')
+
+ if result._result.get('stderr'):
+ task_result += " | stderr: " + result._result.get('stderr')
+
+ return task_result
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._get_task_display_name(task)
+ if self.task_display_name is not None:
+ self._display.display("%s..." % self.task_display_name)
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._get_task_display_name(task)
+ if self.task_display_name is not None:
+ self._display.display("%s (via handler)... " % self.task_display_name)
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if name and play.hosts:
+ msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts))
+ else:
+ msg = u"---"
+
+ self._display.display(msg)
+
+ def v2_runner_on_skipped(self, result, ignore_errors=False):
+ if self.get_option('display_skipped_hosts'):
+ self._preprocess_result(result)
+ display_color = C.COLOR_SKIP
+ msg = "skipped"
+
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+ else:
+ return
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ self._preprocess_result(result)
+ display_color = C.COLOR_ERROR
+ msg = "failed"
+ item_value = self._get_item_label(result._result)
+ if item_value:
+ msg += " | item: %s" % (item_value,)
+
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr'))
+
+ def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK):
+ self._preprocess_result(result)
+
+ result_was_changed = ('changed' in result._result and result._result['changed'])
+ if result_was_changed:
+ msg = "done"
+ item_value = self._get_item_label(result._result)
+ if item_value:
+ msg += " | item: %s" % (item_value,)
+ display_color = C.COLOR_CHANGED
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+ elif self.get_option('display_ok_hosts'):
+ task_result = self._process_result_output(result, msg)
+ self._display.display(" " + task_result, display_color)
+
+ def v2_runner_item_on_skipped(self, result):
+ self.v2_runner_on_skipped(result)
+
+ def v2_runner_item_on_failed(self, result):
+ self.v2_runner_on_failed(result)
+
+ def v2_runner_item_on_ok(self, result):
+ self.v2_runner_on_ok(result)
+
+ def v2_runner_on_unreachable(self, result):
+ self._preprocess_result(result)
+
+ msg = "unreachable"
+ display_color = C.COLOR_UNREACHABLE
+ task_result = self._process_result_output(result, msg)
+
+ self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr'))
+
+ def v2_on_file_diff(self, result):
+ if result._task.loop and 'results' in result._result:
+ for res in result._result['results']:
+ if 'diff' in res and res['diff'] and res.get('changed', False):
+ diff = self._get_diff(res['diff'])
+ if diff:
+ self._display.display(diff)
+ elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
+ diff = self._get_diff(result._result['diff'])
+ if diff:
+ self._display.display(diff)
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.display("\n- Play recap -", screen_only=True)
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ # TODO how else can we display these?
+ t = stats.summarize(h)
+
+ self._display.display(u" %s : %s %s %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR),
+ colorize(u'rescued', t['rescued'], C.COLOR_OK),
+ colorize(u'ignored', t['ignored'], C.COLOR_WARN)),
+ screen_only=True
+ )
+
+ self._display.display(u" %s : %s %s %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None),
+ colorize(u'rescued', t['rescued'], None),
+ colorize(u'ignored', t['ignored'], None)),
+ log_only=True
+ )
+ if stats.custom and self.get_option('show_custom_stats'):
+ self._display.banner("CUSTOM STATS: ")
+ # per host
+ # TODO: come up with 'pretty format'
+ for k in sorted(stats.custom.keys()):
+ if k == '_run':
+ continue
+ self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
+
+ # print per run custom stats
+ if '_run' in stats.custom:
+ self._display.display("", screen_only=True)
+ self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
+ self._display.display("", screen_only=True)
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display(" No hosts found!", color=C.COLOR_DEBUG)
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR)
+
+ def v2_playbook_on_start(self, playbook):
+ # TODO display whether this run is happening in check mode
+ self._display.display("Executing playbook %s" % basename(playbook._file_name))
+
+ # show CLI arguments
+ if self._display.verbosity > 3:
+ if context.CLIARGS.get('args'):
+ self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
+ color=C.COLOR_VERBOSE, screen_only=True)
+
+ for argument in (a for a in context.CLIARGS if a != 'args'):
+ val = context.CLIARGS[argument]
+ if val:
+ self._display.vvvv('%s: %s' % (argument, val))
+
+ def v2_runner_retry(self, result):
+ msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries'])
+ if self._run_is_verbose(result):
+ msg += "Result was: %s" % self._dump_results(result._result)
+ self._display.display(msg, color=C.COLOR_DEBUG)
diff --git a/ansible_collections/community/general/plugins/callback/yaml.py b/ansible_collections/community/general/plugins/callback/yaml.py
new file mode 100644
index 000000000..ae2c8f881
--- /dev/null
+++ b/ansible_collections/community/general/plugins/callback/yaml.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: yaml
+ type: stdout
+ short_description: YAML-ized Ansible screen output
+ description:
+ - Ansible output that can be quite a bit easier to read than the
+ default JSON formatting.
+ extends_documentation_fragment:
+ - default_callback
+ requirements:
+ - set as stdout in configuration
+'''
+
+import yaml
+import json
+import re
+import string
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.parsing.yaml.dumper import AnsibleDumper
+from ansible.plugins.callback import strip_internal_keys, module_response_deepcopy
+from ansible.plugins.callback.default import CallbackModule as Default
+
+
+# from http://stackoverflow.com/a/15423007/115478
+def should_use_block(value):
+ """Returns true if string should be in block format"""
+ for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
+ if c in value:
+ return True
+ return False
+
+
+class MyDumper(AnsibleDumper):
+ def represent_scalar(self, tag, value, style=None):
+ """Uses block style for multi-line strings"""
+ if style is None:
+ if should_use_block(value):
+ style = '|'
+ # we care more about readable than accuracy, so...
+ # ...no trailing space
+ value = value.rstrip()
+ # ...and non-printable characters
+ value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0)
+ # ...tabs prevent blocks from expanding
+ value = value.expandtabs()
+ # ...and odd bits of whitespace
+ value = re.sub(r'[\x0b\x0c\r]', '', value)
+ # ...as does trailing space
+ value = re.sub(r' +\n', '\n', value)
+ else:
+ style = self.default_style
+ node = yaml.representer.ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+
+class CallbackModule(Default):
+
+ """
+ Variation of the Default output which uses nicely readable YAML instead
+ of JSON for printing results.
+ """
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+ CALLBACK_NAME = 'community.general.yaml'
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
+ if result.get('_ansible_no_log', False):
+ return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
+
+ # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
+ abridged_result = strip_internal_keys(module_response_deepcopy(result))
+
+ # remove invocation unless specifically wanting it
+ if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
+ del abridged_result['invocation']
+
+ # remove diff information from screen output
+ if self._display.verbosity < 3 and 'diff' in result:
+ del abridged_result['diff']
+
+ # remove exception from screen output
+ if 'exception' in abridged_result:
+ del abridged_result['exception']
+
+ dumped = ''
+
+ # put changed and skipped into a header line
+ if 'changed' in abridged_result:
+ dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
+ del abridged_result['changed']
+
+ if 'skipped' in abridged_result:
+ dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
+ del abridged_result['skipped']
+
+ # if we already have stdout, we don't need stdout_lines
+ if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
+ abridged_result['stdout_lines'] = '<omitted>'
+
+ # if we already have stderr, we don't need stderr_lines
+ if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
+ abridged_result['stderr_lines'] = '<omitted>'
+
+ if abridged_result:
+ dumped += '\n'
+ dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False))
+
+ # indent by a couple of spaces
+ dumped = '\n '.join(dumped.split('\n')).rstrip()
+ return dumped
+
+ def _serialize_diff(self, diff):
+ return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
diff --git a/ansible_collections/community/general/plugins/connection/chroot.py b/ansible_collections/community/general/plugins/connection/chroot.py
new file mode 100644
index 000000000..ef6d5566d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/chroot.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Maykel Moya (!UNKNOWN) <mmoya@speedyrails.com>
+ name: chroot
+ short_description: Interact with local chroot
+ description:
+ - Run commands or put/fetch files to an existing chroot on the Ansible controller.
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ executable:
+ description:
+ - User specified executable shell
+ ini:
+ - section: defaults
+ key: executable
+ env:
+ - name: ANSIBLE_EXECUTABLE
+ vars:
+ - name: ansible_executable
+ default: /bin/sh
+ chroot_exe:
+ description:
+ - User specified chroot binary
+ ini:
+ - section: chroot_connection
+ key: exe
+ env:
+ - name: ANSIBLE_CHROOT_EXE
+ vars:
+ - name: ansible_chroot_exe
+ default: chroot
+'''
+
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.basic import is_executable
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ """ Local chroot based connections """
+
+ transport = 'community.general.chroot'
+ has_pipelining = True
+ # su currently has an undiagnosed issue with calculating the file
+ # checksums (so copy, for instance, doesn't work right)
+ # Have to look into that before re-enabling this
+ has_tty = False
+
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.chroot = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("chroot connection requires running as root")
+
+ # we're running as root on the local system so do some
+ # trivial checks for ensuring 'host' is actually a chroot'able dir
+ if not os.path.isdir(self.chroot):
+ raise AnsibleError("%s is not a directory" % self.chroot)
+
+ chrootsh = os.path.join(self.chroot, 'bin/sh')
+ # Want to check for a usable bourne shell inside the chroot.
+ # is_executable() == True is sufficient. For symlinks it
+ # gets really complicated really fast. So we punt on finding that
+ # out. As long as it's a symlink we assume that it will work
+ if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
+ raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
+
+ def _connect(self):
+ """ connect to the chroot """
+ if os.path.isabs(self.get_option('chroot_exe')):
+ self.chroot_cmd = self.get_option('chroot_exe')
+ else:
+ try:
+ self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ """ run a command on the chroot. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ """
+ executable = self.get_option('executable')
+ local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
+
+ display.vvv("EXEC %s" % local_cmd, host=self.chroot)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ run a command on the chroot """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return p.returncode, stdout, stderr
+
+ @staticmethod
+ def _prefix_login_path(remote_path):
+ """ Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ """
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ transfer a file from local to chroot """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from chroot to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("chroot connection requires dd command in the chroot")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ """ terminate the connection; nothing to do here """
+ super(Connection, self).close()
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/connection/funcd.py b/ansible_collections/community/general/plugins/connection/funcd.py
new file mode 100644
index 000000000..9f37f791d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/funcd.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Michael Scherer (@mscherer) <misc@zarb.org>
+ name: funcd
+ short_description: Use funcd to connect to target
+ description:
+ - This transport permits you to use Ansible over Func.
+ - For people who have already setup func and that wish to play with ansible,
+ this permit to move gradually to ansible without having to redo completely the setup of the network.
+ options:
+ remote_addr:
+ description:
+ - The path of the chroot you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_func_host
+'''
+
+HAVE_FUNC = False
+try:
+ import func.overlord.client as fc
+ HAVE_FUNC = True
+except ImportError:
+ pass
+
+import os
+import tempfile
+import shutil
+
+from ansible.errors import AnsibleError
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ """ Func-based connections """
+
+ has_pipelining = False
+
+ def __init__(self, runner, host, port, *args, **kwargs):
+ self.runner = runner
+ self.host = host
+ # port is unused, this go on func
+ self.port = port
+ self.client = None
+
+ def connect(self, port=None):
+ if not HAVE_FUNC:
+ raise AnsibleError("func is not installed")
+
+ self.client = fc.Client(self.host)
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """ run a command on the remote minion """
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ # totally ignores privlege escalation
+ display.vvv("EXEC %s" % cmd, host=self.host)
+ p = self.client.command.run(cmd)[self.host]
+ return p[0], p[1], p[2]
+
+ @staticmethod
+ def _normalize_path(path, prefix):
+ if not path.startswith(os.path.sep):
+ path = os.path.join(os.path.sep, path)
+ normpath = os.path.normpath(path)
+ return os.path.join(prefix, normpath[1:])
+
+ def put_file(self, in_path, out_path):
+ """ transfer a file from local to remote """
+
+ out_path = self._normalize_path(out_path, '/')
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self.client.local.copyfile.send(in_path, out_path)
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from remote to local """
+
+ in_path = self._normalize_path(in_path, '/')
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ # need to use a tmp dir due to difference of semantic for getfile
+ # ( who take a # directory as destination) and fetch_file, who
+ # take a file directly
+ tmpdir = tempfile.mkdtemp(prefix="func_ansible")
+ self.client.local.getfile.get(in_path, tmpdir)
+ shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)), out_path)
+ shutil.rmtree(tmpdir)
+
+ def close(self):
+ """ terminate the connection; nothing to do here """
+ pass
diff --git a/ansible_collections/community/general/plugins/connection/iocage.py b/ansible_collections/community/general/plugins/connection/iocage.py
new file mode 100644
index 000000000..2e2a6f093
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/iocage.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+# Based on jail.py
+# (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# (c) 2016, Stephan Lohse <dev-github@ploek.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Stephan Lohse (!UNKNOWN) <dev-github@ploek.org>
+ name: iocage
+ short_description: Run tasks in iocage jails
+ description:
+ - Run commands or put/fetch files to an existing iocage jail
+ options:
+ remote_addr:
+ description:
+ - Path to the jail
+ vars:
+ - name: ansible_host
+ - name: ansible_iocage_host
+ remote_user:
+ description:
+ - User to execute as inside the jail
+ vars:
+ - name: ansible_user
+ - name: ansible_iocage_user
+'''
+
+import subprocess
+
+from ansible_collections.community.general.plugins.connection.jail import Connection as Jail
+from ansible.module_utils.common.text.converters import to_native
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(Jail):
+ """ Local iocage based connections """
+
+ transport = 'community.general.iocage'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ self.ioc_jail = play_context.remote_addr
+
+ self.iocage_cmd = Jail._search_executable('iocage')
+
+ jail_uuid = self.get_jail_uuid()
+
+ kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid)
+
+ display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
+ iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
+ host=kwargs[Jail.modified_jailname_key])
+
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ def get_jail_uuid(self):
+ p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ stdout, stderr = p.communicate()
+
+ if stdout is not None:
+ stdout = to_native(stdout)
+
+ if stderr is not None:
+ stderr = to_native(stderr)
+
+ # otherwise p.returncode would not be set
+ p.wait()
+
+ if p.returncode != 0:
+ raise AnsibleError(u"iocage returned an error: {0}".format(stdout))
+
+ return stdout.strip('\n')
diff --git a/ansible_collections/community/general/plugins/connection/jail.py b/ansible_collections/community/general/plugins/connection/jail.py
new file mode 100644
index 000000000..3a3edd4b1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/jail.py
@@ -0,0 +1,204 @@
+# -*- coding: utf-8 -*-
+# Based on local.py by Michael DeHaan <michael.dehaan@gmail.com>
+# and chroot.py by Maykel Moya <mmoya@speedyrails.com>
+# Copyright (c) 2013, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Ansible Core Team
+ name: jail
+ short_description: Run tasks in jails
+ description:
+ - Run commands or put/fetch files to an existing jail
+ options:
+ remote_addr:
+ description:
+ - Path to the jail
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_jail_host
+ remote_user:
+ description:
+ - User to execute as inside the jail
+ vars:
+ - name: ansible_user
+ - name: ansible_jail_user
+'''
+
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ """ Local BSD Jail based connections """
+
+ modified_jailname_key = 'conn_jail_name'
+
+ transport = 'community.general.jail'
+ # Pipelining may work. Someone needs to test by setting this to True and
+ # having pipelining=True in their ansible.cfg
+ has_pipelining = True
+ has_tty = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.jail = self._play_context.remote_addr
+ if self.modified_jailname_key in kwargs:
+ self.jail = kwargs[self.modified_jailname_key]
+
+ if os.geteuid() != 0:
+ raise AnsibleError("jail connection requires running as root")
+
+ self.jls_cmd = self._search_executable('jls')
+ self.jexec_cmd = self._search_executable('jexec')
+
+ if self.jail not in self.list_jails():
+ raise AnsibleError("incorrect jail name %s" % self.jail)
+
+ @staticmethod
+ def _search_executable(executable):
+ try:
+ return get_bin_path(executable)
+ except ValueError:
+ raise AnsibleError("%s command not found in PATH" % executable)
+
+ def list_jails(self):
+ p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ stdout, stderr = p.communicate()
+
+ return to_text(stdout, errors='surrogate_or_strict').split()
+
+ def _connect(self):
+ """ connect to the jail; nothing to do here """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ """ run a command on the jail. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ """
+
+ local_cmd = [self.jexec_cmd]
+ set_env = ''
+
+ if self._play_context.remote_user is not None:
+ local_cmd += ['-U', self._play_context.remote_user]
+ # update HOME since -U does not update the jail environment
+ set_env = 'HOME=~' + self._play_context.remote_user + ' '
+
+ local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd]
+
+ display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ run a command on the jail """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return p.returncode, stdout, stderr
+
+ @staticmethod
+ def _prefix_login_path(remote_path):
+ """ Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ """
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ transfer a file from local to jail """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from jail to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr)))
+
+ def close(self):
+ """ terminate the connection; nothing to do here """
+ super(Connection, self).close()
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/connection/lxc.py b/ansible_collections/community/general/plugins/connection/lxc.py
new file mode 100644
index 000000000..adf3eec1c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/lxc.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Joerg Thalheim <joerg@higgsboson.tk>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Joerg Thalheim (!UNKNOWN) <joerg@higgsboson.tk>
+ name: lxc
+ short_description: Run tasks in lxc containers via lxc python library
+ description:
+ - Run commands or put/fetch files to an existing lxc container using lxc python library
+ options:
+ remote_addr:
+ description:
+ - Container identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_lxc_host
+ executable:
+ default: /bin/sh
+ description:
+ - Shell executable
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxc_executable
+'''
+
+import os
+import shutil
+import traceback
+import select
+import fcntl
+import errno
+
+HAS_LIBLXC = False
+try:
+ import lxc as _lxc
+ HAS_LIBLXC = True
+except ImportError:
+ pass
+
+from ansible import errors
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ """ Local lxc based connections """
+
+ transport = 'community.general.lxc'
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.container_name = self._play_context.remote_addr
+ self.container = None
+
+ def _connect(self):
+ """ connect to the lxc; nothing to do here """
+ super(Connection, self)._connect()
+
+ if not HAS_LIBLXC:
+ msg = "lxc bindings for python2 are not installed"
+ raise errors.AnsibleError(msg)
+
+ if self.container:
+ return
+
+ self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name)
+ self.container = _lxc.Container(self.container_name)
+ if self.container.state == "STOPPED":
+ raise errors.AnsibleError("%s is not running" % self.container_name)
+
+ @staticmethod
+ def _communicate(pid, in_data, stdin, stdout, stderr):
+ buf = {stdout: [], stderr: []}
+ read_fds = [stdout, stderr]
+ if in_data:
+ write_fds = [stdin]
+ else:
+ write_fds = []
+ while len(read_fds) > 0 or len(write_fds) > 0:
+ try:
+ ready_reads, ready_writes, dummy = select.select(read_fds, write_fds, [])
+ except select.error as e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+ for fd in ready_writes:
+ in_data = in_data[os.write(fd, in_data):]
+ if len(in_data) == 0:
+ write_fds.remove(fd)
+ for fd in ready_reads:
+ data = os.read(fd, 32768)
+ if not data:
+ read_fds.remove(fd)
+ buf[fd].append(data)
+
+ (pid, returncode) = os.waitpid(pid, 0)
+
+ return returncode, b"".join(buf[stdout]), b"".join(buf[stderr])
+
+ def _set_nonblocking(self, fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+ return fd
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ run a command on the chroot """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ # python2-lxc needs bytes. python3-lxc needs text.
+ executable = to_native(self._play_context.executable, errors='surrogate_or_strict')
+ local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')]
+
+ read_stdout, write_stdout = None, None
+ read_stderr, write_stderr = None, None
+ read_stdin, write_stdin = None, None
+
+ try:
+ read_stdout, write_stdout = os.pipe()
+ read_stderr, write_stderr = os.pipe()
+
+ kwargs = {
+ 'stdout': self._set_nonblocking(write_stdout),
+ 'stderr': self._set_nonblocking(write_stderr),
+ 'env_policy': _lxc.LXC_ATTACH_CLEAR_ENV
+ }
+
+ if in_data:
+ read_stdin, write_stdin = os.pipe()
+ kwargs['stdin'] = self._set_nonblocking(read_stdin)
+
+ self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name)
+ pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs)
+ if pid == -1:
+ msg = "failed to attach to container %s" % self.container_name
+ raise errors.AnsibleError(msg)
+
+ write_stdout = os.close(write_stdout)
+ write_stderr = os.close(write_stderr)
+ if read_stdin:
+ read_stdin = os.close(read_stdin)
+
+ return self._communicate(pid,
+ in_data,
+ write_stdin,
+ read_stdout,
+ read_stderr)
+ finally:
+ fds = [read_stdout,
+ write_stdout,
+ read_stderr,
+ write_stderr,
+ read_stdin,
+ write_stdin]
+ for fd in fds:
+ if fd:
+ os.close(fd)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to lxc '''
+ super(Connection, self).put_file(in_path, out_path)
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name)
+ in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ if not os.path.exists(in_path):
+ msg = "file or module does not exist: %s" % in_path
+ raise errors.AnsibleFileNotFound(msg)
+ try:
+ src_file = open(in_path, "rb")
+ except IOError:
+ traceback.print_exc()
+ raise errors.AnsibleError("failed to open input file to %s" % in_path)
+ try:
+ def write_file(args):
+ with open(out_path, 'wb+') as dst_file:
+ shutil.copyfileobj(src_file, dst_file)
+ try:
+ self.container.attach_wait(write_file, None)
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to transfer file to %s" % out_path
+ raise errors.AnsibleError(msg)
+ finally:
+ src_file.close()
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from lxc to local '''
+ super(Connection, self).fetch_file(in_path, out_path)
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name)
+ in_path = to_bytes(in_path, errors='surrogate_or_strict')
+ out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ try:
+ dst_file = open(out_path, "wb")
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to open output file %s" % out_path
+ raise errors.AnsibleError(msg)
+ try:
+ def write_file(args):
+ try:
+ with open(in_path, 'rb') as src_file:
+ shutil.copyfileobj(src_file, dst_file)
+ finally:
+ # this is needed in the lxc child process
+ # to flush internal python buffers
+ dst_file.close()
+ try:
+ self.container.attach_wait(write_file, None)
+ except IOError:
+ traceback.print_exc()
+ msg = "failed to transfer file from %s to %s" % (in_path, out_path)
+ raise errors.AnsibleError(msg)
+ finally:
+ dst_file.close()
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ super(Connection, self).close()
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/connection/lxd.py b/ansible_collections/community/general/plugins/connection/lxd.py
new file mode 100644
index 000000000..affb87dfd
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/lxd.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016 Matt Clay <matt@mystile.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Matt Clay (@mattclay) <matt@mystile.com>
+ name: lxd
+ short_description: Run tasks in lxc containers via lxc CLI
+ description:
+ - Run commands or put/fetch files to an existing lxc container using lxc CLI
+ options:
+ remote_addr:
+ description:
+ - Container identifier.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_lxd_host
+ executable:
+ description:
+ - shell to use for execution inside container
+ default: /bin/sh
+ vars:
+ - name: ansible_executable
+ - name: ansible_lxd_executable
+ remote:
+ description:
+ - Name of the LXD remote to use.
+ default: local
+ vars:
+ - name: ansible_lxd_remote
+ version_added: 2.0.0
+ project:
+ description:
+ - Name of the LXD project to use.
+ vars:
+ - name: ansible_lxd_project
+ version_added: 2.0.0
+'''
+
+import os
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.plugins.connection import ConnectionBase
+
+
+class Connection(ConnectionBase):
+ """ lxd based connections """
+
+ transport = 'community.general.lxd'
+ has_pipelining = True
+ default_user = 'root'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ try:
+ self._lxc_cmd = get_bin_path("lxc")
+ except ValueError:
+ raise AnsibleError("lxc command not found in PATH")
+
+ if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
+ self._display.warning('lxd does not support remote_user, using container default: root')
+
+ def _connect(self):
+ """connect to lxd (nothing to do here) """
+ super(Connection, self)._connect()
+
+ if not self._connected:
+ self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self.get_option('remote_addr'))
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ """ execute a command on the lxd host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ self._display.vvv(u"EXEC {0}".format(cmd), host=self.get_option('remote_addr'))
+
+ local_cmd = [self._lxc_cmd]
+ if self.get_option("project"):
+ local_cmd.extend(["--project", self.get_option("project")])
+ local_cmd.extend([
+ "exec",
+ "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")),
+ "--",
+ self.get_option("executable"), "-c", cmd
+ ])
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+ in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = process.communicate(in_data)
+
+ stdout = to_text(stdout)
+ stderr = to_text(stderr)
+
+ if stderr == "error: Container is not running.\n":
+ raise AnsibleConnectionFailure("container not running: %s" % self.get_option('remote_addr'))
+
+ if stderr == "error: not found\n":
+ raise AnsibleConnectionFailure("container not found: %s" % self.get_option('remote_addr'))
+
+ return process.returncode, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ put a file from local to lxd """
+ super(Connection, self).put_file(in_path, out_path)
+
+ self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
+
+ if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
+
+ local_cmd = [self._lxc_cmd]
+ if self.get_option("project"):
+ local_cmd.extend(["--project", self.get_option("project")])
+ local_cmd.extend([
+ "file", "push",
+ in_path,
+ "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path)
+ ])
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ process.communicate()
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from lxd to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr'))
+
+ local_cmd = [self._lxc_cmd]
+ if self.get_option("project"):
+ local_cmd.extend(["--project", self.get_option("project")])
+ local_cmd.extend([
+ "file", "pull",
+ "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path),
+ out_path
+ ])
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ process.communicate()
+
+ def close(self):
+ """ close the connection (nothing to do here) """
+ super(Connection, self).close()
+
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/connection/qubes.py b/ansible_collections/community/general/plugins/connection/qubes.py
new file mode 100644
index 000000000..25594e952
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/qubes.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+# Based on the buildah connection plugin
+# Copyright (c) 2017 Ansible Project
+# 2018 Kushal Das
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+#
+# Written by: Kushal Das (https://github.com/kushaldas)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+ name: qubes
+ short_description: Interact with an existing QubesOS AppVM
+
+ description:
+ - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools.
+
+ author: Kushal Das (@kushaldas)
+
+
+ options:
+ remote_addr:
+ description:
+ - vm name
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ remote_user:
+ description:
+ - The user to execute as inside the vm.
+ default: The *user* account as default in Qubes OS.
+ vars:
+ - name: ansible_user
+# keyword:
+# - name: hosts
+'''
+
+import subprocess
+
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.plugins.connection import ConnectionBase, ensure_connect
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# this _has to be_ named Connection
+class Connection(ConnectionBase):
+ """This is a connection plugin for qubes: it uses qubes-run-vm binary to interact with the containers."""
+
+ # String used to identify this Connection class from other classes
+ transport = 'community.general.qubes'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self._remote_vmname = self._play_context.remote_addr
+ self._connected = False
+ # Default username in Qubes
+ self.user = "user"
+ if self._play_context.remote_user:
+ self.user = self._play_context.remote_user
+
+ def _qubes(self, cmd=None, in_data=None, shell="qubes.VMShell"):
+ """run qvm-run executable
+
+ :param cmd: cmd string for remote system
+ :param in_data: data passed to qvm-run-vm's stdin
+ :return: return code, stdout, stderr
+ """
+ display.vvvv("CMD: ", cmd)
+ if not cmd.endswith("\n"):
+ cmd = cmd + "\n"
+ local_cmd = []
+
+ # For dom0
+ local_cmd.extend(["qvm-run", "--pass-io", "--service"])
+ if self.user != "user":
+ # Means we have a remote_user value
+ local_cmd.extend(["-u", self.user])
+
+ local_cmd.append(self._remote_vmname)
+
+ local_cmd.append(shell)
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ display.vvvv("Local cmd: ", local_cmd)
+
+ display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname)
+ p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # Here we are writing the actual command to the remote bash
+ p.stdin.write(to_bytes(cmd, errors='surrogate_or_strict'))
+ stdout, stderr = p.communicate(input=in_data)
+ return p.returncode, stdout, stderr
+
+ def _connect(self):
+ """No persistent connection is being maintained."""
+ super(Connection, self)._connect()
+ self._connected = True
+
+ @ensure_connect
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """Run specified command in a running QubesVM """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.vvvv("CMD IS: %s" % cmd)
+
+ rc, stdout, stderr = self._qubes(cmd)
+
+ display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr))
+ return rc, stdout, stderr
+
+ def put_file(self, in_path, out_path):
+ """ Place a local file located in 'in_path' inside VM at 'out_path' """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+
+ with open(in_path, "rb") as fobj:
+ source_data = fobj.read()
+
+ retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell")
+ # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and
+ # hope it will have appropriate permissions
+ if retcode == 127:
+ retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data)
+
+ if retcode != 0:
+ raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path))
+
+ def fetch_file(self, in_path, out_path):
+ """Obtain file specified via 'in_path' from the container and place it at 'out_path' """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname)
+
+ # We are running in dom0
+ cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)]
+ with open(out_path, "wb") as fobj:
+ p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj)
+ p.communicate()
+ if p.returncode != 0:
+ raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path))
+
+ def close(self):
+ """ Closing the connection """
+ super(Connection, self).close()
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/connection/saltstack.py b/ansible_collections/community/general/plugins/connection/saltstack.py
new file mode 100644
index 000000000..1dbc7296c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/saltstack.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# Based on func.py
+# Copyright (c) 2014, Michael Scherer <misc@zarb.org>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Michael Scherer (@mscherer) <misc@zarb.org>
+ name: saltstack
+ short_description: Allow ansible to piggyback on salt minions
+ description:
+ - This allows you to use existing Saltstack infrastructure to connect to targets.
+'''
+
+import os
+import base64
+
+from ansible import errors
+from ansible.plugins.connection import ConnectionBase
+
+HAVE_SALTSTACK = False
+try:
+ import salt.client as sc
+ HAVE_SALTSTACK = True
+except ImportError:
+ pass
+
+
+class Connection(ConnectionBase):
+ """ Salt-based connections """
+
+ has_pipelining = False
+ # while the name of the product is salt, naming that module salt cause
+ # trouble with module import
+ transport = 'community.general.saltstack'
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+ self.host = self._play_context.remote_addr
+
+ def _connect(self):
+ if not HAVE_SALTSTACK:
+ raise errors.AnsibleError("saltstack is not installed")
+
+ self.client = sc.LocalClient()
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ run a command on the remote minion """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ if in_data:
+ raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ self._display.vvv("EXEC %s" % cmd, host=self.host)
+ # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
+ res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
+ if self.host not in res:
+ raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
+
+ p = res[self.host]
+ return p['retcode'], p['stdout'], p['stderr']
+
+ @staticmethod
+ def _normalize_path(path, prefix):
+ if not path.startswith(os.path.sep):
+ path = os.path.join(os.path.sep, path)
+ normpath = os.path.normpath(path)
+ return os.path.join(prefix, normpath[1:])
+
+ def put_file(self, in_path, out_path):
+ """ transfer a file from local to remote """
+
+ super(Connection, self).put_file(in_path, out_path)
+
+ out_path = self._normalize_path(out_path, '/')
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ with open(in_path, 'rb') as in_fh:
+ content = in_fh.read()
+ self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path])
+
+ # TODO test it
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from remote to local """
+
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ in_path = self._normalize_path(in_path, '/')
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
+ open(out_path, 'wb').write(content)
+
+ def close(self):
+ """ terminate the connection; nothing to do here """
+ pass
diff --git a/ansible_collections/community/general/plugins/connection/zone.py b/ansible_collections/community/general/plugins/connection/zone.py
new file mode 100644
index 000000000..34827c7e3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/connection/zone.py
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 -*-
+# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
+# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
+# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Ansible Core Team
+ name: zone
+ short_description: Run tasks in a zone instance
+ description:
+ - Run commands or put/fetch files to an existing zone
+ options:
+ remote_addr:
+ description:
+ - Zone identifier
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_zone_host
+'''
+
+import os
+import os.path
+import subprocess
+import traceback
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ """ Local zone based connections """
+
+ transport = 'community.general.zone'
+ has_pipelining = True
+ has_tty = False
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.zone = self._play_context.remote_addr
+
+ if os.geteuid() != 0:
+ raise AnsibleError("zone connection requires running as root")
+
+ self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
+ self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
+
+ if self.zone not in self.list_zones():
+ raise AnsibleError("incorrect zone name %s" % self.zone)
+
+ @staticmethod
+ def _search_executable(executable):
+ try:
+ return get_bin_path(executable)
+ except ValueError:
+ raise AnsibleError("%s command not found in PATH" % executable)
+
+ def list_zones(self):
+ process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ zones = []
+ for line in process.stdout.readlines():
+ # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
+ s = line.split(':')
+ if s[1] != 'global':
+ zones.append(s[1])
+
+ return zones
+
+ def get_zone_path(self):
+ # solaris10vm# zoneadm -z cswbuild list -p
+ # -:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
+ process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # stdout, stderr = p.communicate()
+ path = process.stdout.readlines()[0].split(':')[3]
+ return path + '/root'
+
+ def _connect(self):
+ """ connect to the zone; nothing to do here """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
+ self._connected = True
+
+ def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
+ """ run a command on the zone. This is only needed for implementing
+ put_file() get_file() so that we don't have to read the whole file
+ into memory.
+
+ compared to exec_command() it looses some niceties like being able to
+ return the process's exit code immediately.
+ """
+ # NOTE: zlogin invokes a shell (just like ssh does) so we do not pass
+ # this through /bin/sh -c here. Instead it goes through the shell
+ # that zlogin selects.
+ local_cmd = [self.zlogin_cmd, self.zone, cmd]
+ local_cmd = map(to_bytes, local_cmd)
+
+ display.vvv("EXEC %s" % (local_cmd), host=self.zone)
+ p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ return p
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ run a command on the zone """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ p = self._buffered_exec_command(cmd)
+
+ stdout, stderr = p.communicate(in_data)
+ return p.returncode, stdout, stderr
+
+ def _prefix_login_path(self, remote_path):
+ """ Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ """
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ transfer a file from local to zone """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
+
+ out_path = shlex_quote(self._prefix_login_path(out_path))
+ try:
+ with open(in_path, 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ try:
+ p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
+ except OSError:
+ raise AnsibleError("jail connection requires dd command in the jail")
+ try:
+ stdout, stderr = p.communicate()
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+ except IOError:
+ raise AnsibleError("file or module does not exist at: %s" % in_path)
+
+ def fetch_file(self, in_path, out_path):
+ """ fetch a file from zone to local """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
+
+ in_path = shlex_quote(self._prefix_login_path(in_path))
+ try:
+ p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
+ except OSError:
+ raise AnsibleError("zone connection requires dd command in the zone")
+
+ with open(out_path, 'wb+') as out_file:
+ try:
+ chunk = p.stdout.read(BUFSIZE)
+ while chunk:
+ out_file.write(chunk)
+ chunk = p.stdout.read(BUFSIZE)
+ except Exception:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ def close(self):
+ """ terminate the connection; nothing to do here """
+ super(Connection, self).close()
+ self._connected = False
diff --git a/ansible_collections/community/general/plugins/doc_fragments/alicloud.py b/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
new file mode 100644
index 000000000..f464e178c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/alicloud.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Alicloud only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ alicloud_access_key:
+ description:
+ - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY),
+ C(ALICLOUD_ACCESS_KEY_ID) will be used instead.
+ aliases: ['access_key_id', 'access_key']
+ type: str
+ alicloud_secret_key:
+ description:
+ - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY),
+ C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead.
+ aliases: ['secret_access_key', 'secret_key']
+ type: str
+ alicloud_region:
+ description:
+ - The Alibaba Cloud region to use. If not specified then the value of environment variable
+ C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead.
+ aliases: ['region', 'region_id']
+ required: true
+ type: str
+ alicloud_security_token:
+ description:
+ - The Alibaba Cloud security token. If not specified then the value of environment variable
+ C(ALICLOUD_SECURITY_TOKEN) will be used instead.
+ aliases: ['security_token']
+ type: str
+ alicloud_assume_role:
+ description:
+ - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials.
+ - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name),
+ I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy)
+ type: dict
+ aliases: ['assume_role']
+ alicloud_assume_role_arn:
+ description:
+ - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string,
+ it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN.
+ ansible will execute with provided credentials.
+ aliases: ['assume_role_arn']
+ type: str
+ alicloud_assume_role_session_name:
+ description:
+ - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted,
+ 'ansible' is passed to the AssumeRole call as session name. It supports environment variable
+ ALICLOUD_ASSUME_ROLE_SESSION_NAME
+ aliases: ['assume_role_session_name']
+ type: str
+ alicloud_assume_role_session_expiration:
+ description:
+ - The Alibaba Cloud session_expiration. The time after which the established session for assuming
+ role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default
+ value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION
+ aliases: ['assume_role_session_expiration']
+ type: int
+ ecs_role_name:
+ description:
+ - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control'
+ section of the Alibaba Cloud console.
+ - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the
+ metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/<ecs_role_name>) to obtain the STS
+ credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding
+ credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage.
+ aliases: ['role_name']
+ type: str
+ profile:
+ description:
+ - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the
+ ALICLOUD_PROFILE environment variable.
+ type: str
+ shared_credentials_file:
+ description:
+ - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE
+ environment variable.
+ - If this is not set and a profile is specified, ~/.aliyun/config.json will be used.
+ type: str
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+notes:
+ - If parameters are not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID),
+ C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY),
+ C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID),
+ C(ALICLOUD_SECURITY_TOKEN),
+ C(ALICLOUD_ECS_ROLE_NAME),
+ C(ALICLOUD_SHARED_CREDENTIALS_FILE),
+ C(ALICLOUD_PROFILE),
+ C(ALICLOUD_ASSUME_ROLE_ARN),
+ C(ALICLOUD_ASSUME_ROLE_SESSION_NAME),
+ C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION),
+ - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the
+ ALICLOUD region, when required, but this can also be configured in the footmark config file
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/attributes.py b/ansible_collections/community/general/plugins/doc_fragments/attributes.py
new file mode 100644
index 000000000..9b8488e0a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/attributes.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options: {}
+attributes:
+ check_mode:
+ description: Can run in C(check_mode) and return changed status prediction without modifying target.
+ diff_mode:
+ description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
+'''
+
+ PLATFORM = r'''
+options: {}
+attributes:
+ platform:
+ description: Target OS/families that can be operated against.
+ support: N/A
+'''
+
+ # Should be used together with the standard fragment
+ INFO_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+'''
+
+ CONN = r'''
+options: {}
+attributes:
+ become:
+ description: Is usable alongside C(become) keywords.
+ connection:
+ description: Uses the target's configured connection information to execute code on it.
+ delegation:
+ description: Can be used in conjunction with C(delegate_to) and related keywords.
+'''
+
+ FACTS = r'''
+options: {}
+attributes:
+ facts:
+ description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
+'''
+
+ # Should be used together with the standard fragment and the FACTS fragment
+ FACTS_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+ facts:
+ support: full
+'''
+
+ FILES = r'''
+options: {}
+attributes:
+ safe_file_operations:
+ description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
+'''
+
+ FLOW = r'''
+options: {}
+attributes:
+ action:
+ description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
+ async:
+ description: Supports being used with the C(async) keyword.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py b/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
new file mode 100644
index 000000000..674fb1e9a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/auth_basic.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_url:
+ description:
+ - The resolvable endpoint for the API
+ type: str
+ api_username:
+ description:
+ - The username to use for authentication against the API
+ type: str
+ api_password:
+ description:
+ - The password to use for authentication against the API
+ type: str
+ validate_certs:
+ description:
+ - Whether or not to validate SSL certs when supplying a https endpoint.
+ type: bool
+ default: true
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py b/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
new file mode 100644
index 000000000..703bb412a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/bitbucket.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ client_id:
+ description:
+ - The OAuth consumer key.
+ - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used.
+ type: str
+ client_secret:
+ description:
+ - The OAuth consumer secret.
+ - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used.
+ type: str
+ user:
+ description:
+ - The username.
+ - If not set the environment variable C(BITBUCKET_USERNAME) will be used.
+ - I(username) is an alias of I(user) since community.genreal 6.0.0. It was an alias of I(workspace) before.
+ type: str
+ version_added: 4.0.0
+ aliases: [ username ]
+ password:
+ description:
+ - The App password.
+ - If not set the environment variable C(BITBUCKET_PASSWORD) will be used.
+ type: str
+ version_added: 4.0.0
+notes:
+ - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth.
+ - Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords.
+ - If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
new file mode 100644
index 000000000..f8372431e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, Dimension Data
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+
+class ModuleDocFragment(object):
+
+ # Dimension Data doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ region:
+ description:
+ - The target region.
+ - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]
+ - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html)
+ - Note that the default value "na" stands for "North America".
+ - The module prepends 'dd-' to the region choice.
+ type: str
+ default: na
+ mcp_user:
+ description:
+ - The username used to authenticate to the CloudControl API.
+ - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata).
+ type: str
+ mcp_password:
+ description:
+ - The password used to authenticate to the CloudControl API.
+ - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata).
+ - Required if I(mcp_user) is specified.
+ type: str
+ location:
+ description:
+ - The target datacenter.
+ type: str
+ required: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only be used on private instances of the CloudControl API that use self-signed certificates.
+ type: bool
+ default: true
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
new file mode 100644
index 000000000..d37152839
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/dimensiondata_wait.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, Dimension Data
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+
+class ModuleDocFragment(object):
+
+ # Dimension Data ("wait-for-completion" parameters) doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ wait:
+ description:
+ - Should we wait for the task to complete before moving onto the next.
+ type: bool
+ default: false
+ wait_time:
+ description:
+ - The maximum amount of time (in seconds) to wait for the task to complete.
+ - Only applicable if I(wait=true).
+ type: int
+ default: 600
+ wait_poll_interval:
+ description:
+ - The amount of time (in seconds) to wait between checks for task completion.
+ - Only applicable if I(wait=true).
+ type: int
+ default: 2
+ '''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/emc.py b/ansible_collections/community/general/plugins/doc_fragments/emc.py
new file mode 100644
index 000000000..e9e57a2c1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/emc.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Luca Lorenzetto (@remix_tj) <lorenzetto.luca@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for EMC VNX.
+'''
+
+ # Documentation fragment for VNX (emc_vnx)
+ EMC_VNX = r'''
+options:
+ sp_address:
+ description:
+ - Address of the SP of target/secondary storage.
+ type: str
+ required: true
+ sp_user:
+ description:
+ - Username for accessing SP.
+ type: str
+ default: sysadmin
+ sp_password:
+ description:
+ - password for accessing SP.
+ type: str
+ default: sysadmin
+requirements:
+ - An EMC VNX Storage device.
+ - Ansible 2.7.
+ - storops (0.5.10 or greater). Install using 'pip install storops'.
+notes:
+ - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/gitlab.py b/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
new file mode 100644
index 000000000..705a93c02
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/gitlab.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+requirements:
+ - requests (Python library U(https://pypi.org/project/requests/))
+
+options:
+ api_token:
+ description:
+ - GitLab access token with API permissions.
+ type: str
+ api_oauth_token:
+ description:
+ - GitLab OAuth token for logging in.
+ type: str
+ version_added: 4.2.0
+ api_job_token:
+ description:
+ - GitLab CI job token for logging in.
+ type: str
+ version_added: 4.2.0
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py b/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
new file mode 100644
index 000000000..606a2502a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/hpe3par.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HPE 3PAR doc fragment
+ DOCUMENTATION = '''
+options:
+ storage_system_ip:
+ description:
+ - The storage system IP address.
+ type: str
+ required: true
+ storage_system_password:
+ description:
+ - The storage system password.
+ type: str
+ required: true
+ storage_system_username:
+ description:
+ - The storage system user name.
+ type: str
+ required: true
+
+requirements:
+ - hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk).
+ - WSAPI service should be enabled on the 3PAR storage array.
+notes:
+ '''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/hwc.py b/ansible_collections/community/general/plugins/doc_fragments/hwc.py
new file mode 100644
index 000000000..d3cebb6db
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/hwc.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Huawei Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # HWC doc fragment.
+ DOCUMENTATION = '''
+options:
+ identity_endpoint:
+ description:
+ - The Identity authentication URL.
+ type: str
+ required: true
+ user:
+ description:
+ - The user name to login with (currently only user names are
+ supported, and not user IDs).
+ type: str
+ required: true
+ password:
+ description:
+ - The password to login with.
+ type: str
+ required: true
+ domain:
+ description:
+ - The name of the Domain to scope to (Identity v3).
+ (currently only domain names are supported, and not domain IDs).
+ type: str
+ required: true
+ project:
+ description:
+ - The name of the Tenant (Identity v2) or Project (Identity v3).
+ (currently only project names are supported, and not
+ project IDs).
+ type: str
+ required: true
+ region:
+ description:
+ - The region to which the project belongs.
+ type: str
+ id:
+ description:
+ - The id of resource to be managed.
+ type: str
+notes:
+ - For authentication, you can set identity_endpoint using the
+ C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable.
+ - For authentication, you can set user using the
+ C(ANSIBLE_HWC_USER) env variable.
+ - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env
+ variable.
+ - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env
+ variable.
+ - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env
+ variable.
+ - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable.
+ - Environment variables values will only be used if the playbook values are
+ not set.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py b/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
new file mode 100644
index 000000000..ff38c3fc7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/ibm_storage.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # ibm_storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ username:
+ description:
+ - Management user on the spectrum accelerate storage system.
+ type: str
+ required: true
+ password:
+ description:
+ - Password for username on the spectrum accelerate storage system.
+ type: str
+ required: true
+ endpoints:
+ description:
+ - The hostname or management IP of Spectrum Accelerate storage system.
+ type: str
+ required: true
+notes:
+ - This module requires pyxcli python library.
+ Use 'pip install pyxcli' in order to get pyxcli.
+requirements:
+ - python >= 2.7
+ - pyxcli
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/influxdb.py b/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
new file mode 100644
index 000000000..6aedd5ad3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/influxdb.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ansible Project
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for influxdb modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address on which InfluxDB server is listening.
+ - Since Ansible 2.5, defaulted to localhost.
+ type: str
+ default: localhost
+ username:
+ description:
+ - Username that will be used to authenticate against InfluxDB server.
+ - Alias C(login_username) added in Ansible 2.5.
+ type: str
+ default: root
+ aliases: [ login_username ]
+ password:
+ description:
+ - Password that will be used to authenticate against InfluxDB server.
+ - Alias C(login_password) added in Ansible 2.5.
+ type: str
+ default: root
+ aliases: [ login_password ]
+ port:
+ description:
+ - The port on which InfluxDB server is listening
+ type: int
+ default: 8086
+ path:
+ description:
+ - The path on which InfluxDB server is accessible
+ - Only available when using python-influxdb >= 5.1.0
+ type: str
+ default: ''
+ version_added: '0.2.0'
+ validate_certs:
+ description:
+ - If set to C(false), the SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ ssl:
+ description:
+ - Use https instead of http to connect to InfluxDB server.
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Number of seconds Requests will wait for client to establish a connection.
+ type: int
+ retries:
+ description:
+ - Number of retries client will try before aborting.
+ - C(0) indicates try until success.
+ - Only available when using python-influxdb >= 4.1.0
+ type: int
+ default: 3
+ use_udp:
+ description:
+ - Use UDP to connect to InfluxDB server.
+ type: bool
+ default: false
+ udp_port:
+ description:
+ - UDP port to connect to InfluxDB server.
+ type: int
+ default: 4444
+ proxies:
+ description:
+ - HTTP(S) proxy to use for Requests to connect to InfluxDB server.
+ type: dict
+ default: {}
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ipa.py b/ansible_collections/community/general/plugins/doc_fragments/ipa.py
new file mode 100644
index 000000000..5051c5539
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/ipa.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-18, Ansible Project
+# Copyright (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Parameters for FreeIPA/IPA modules
+ DOCUMENTATION = r'''
+options:
+ ipa_port:
+ description:
+ - Port of FreeIPA / IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead.
+ - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: int
+ default: 443
+ ipa_host:
+ description:
+ - IP or hostname of IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead.
+ - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server.
+ - The relevant entry needed in FreeIPA is the 'ipa-ca' entry.
+ - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ default: ipa.example.com
+ ipa_user:
+ description:
+ - Administrative account used on IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead.
+ - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ default: admin
+ ipa_pass:
+ description:
+ - Password of administrative user.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead.
+ - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA.
+ - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server.
+ - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate.
+ - If GSSAPI is not available, the usage of 'ipa_pass' is required.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ ipa_prot:
+ description:
+ - Protocol used by IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead.
+ - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set.
+ - Environment variable fallback mechanism is added in Ansible 2.5.
+ type: str
+ choices: [ http, https ]
+ default: https
+ validate_certs:
+ description:
+ - This only applies if C(ipa_prot) is I(https).
+ - If set to C(false), the SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ ipa_timeout:
+ description:
+ - Specifies idle timeout (in seconds) for the connection.
+ - For bulk operations, you may want to increase this in order to avoid timeout from IPA server.
+ - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead.
+ - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set.
+ type: int
+ default: 10
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/keycloak.py b/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
new file mode 100644
index 000000000..5d79fad7c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/keycloak.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
+
+ auth_client_id:
+ description:
+ - OpenID Connect I(client_id) to authenticate to the API with.
+ type: str
+ default: admin-cli
+
+ auth_realm:
+ description:
+ - Keycloak realm name to authenticate to for API access.
+ type: str
+
+ auth_client_secret:
+ description:
+ - Client Secret to use in conjunction with I(auth_client_id) (if required).
+ type: str
+
+ auth_username:
+ description:
+ - Username to authenticate for API access with.
+ type: str
+ aliases:
+ - username
+
+ auth_password:
+ description:
+ - Password to authenticate for API access with.
+ type: str
+ aliases:
+ - password
+
+ token:
+ description:
+ - Authentication token for Keycloak API.
+ type: str
+ version_added: 3.0.0
+
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: true
+
+ connection_timeout:
+ description:
+ - Controls the HTTP connections timeout period (in seconds) to Keycloak API.
+ type: int
+ default: 10
+ version_added: 4.5.0
+ http_agent:
+ description:
+ - Configures the HTTP User-Agent header.
+ type: str
+ default: Ansible
+ version_added: 5.4.0
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/ldap.py b/ansible_collections/community/general/plugins/doc_fragments/ldap.py
new file mode 100644
index 000000000..b321c75eb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/ldap.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard LDAP documentation fragment
+ DOCUMENTATION = r'''
+options:
+ bind_dn:
+ description:
+ - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default.
+ - If this is blank, we'll use an anonymous bind.
+ type: str
+ bind_pw:
+ description:
+ - The password to use with I(bind_dn).
+ type: str
+ default: ''
+ ca_path:
+ description:
+ - Set the path to PEM file with CA certs.
+ type: path
+ version_added: "6.5.0"
+ dn:
+ required: true
+ description:
+ - The DN of the entry to add or remove.
+ type: str
+ referrals_chasing:
+ choices: [disabled, anonymous]
+ default: anonymous
+ type: str
+ description:
+ - Set the referrals chasing behavior.
+ - C(anonymous) follow referrals anonymously. This is the default behavior.
+ - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off.
+ version_added: 2.0.0
+ server_uri:
+ description:
+ - The I(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields.
+ - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location.
+ - Note that when using multiple URIs you cannot determine to which URI your client gets connected.
+ - For URIs containing additional fields, particularly when using commas, behavior is undefined.
+ type: str
+ default: ldapi:///
+ start_tls:
+ description:
+ - If true, we'll use the START_TLS LDAP extension.
+ type: bool
+ default: false
+ validate_certs:
+ description:
+ - If set to C(false), SSL certificates will not be validated.
+ - This should only be used on sites using self-signed certificates.
+ type: bool
+ default: true
+ sasl_class:
+ description:
+ - The class to use for SASL authentication.
+ - Possible choices are C(external), C(gssapi).
+ type: str
+ choices: ['external', 'gssapi']
+ default: external
+ version_added: "2.0.0"
+ xorder_discovery:
+ description:
+ - Set the behavior on how to process Xordered DNs.
+ - C(enable) will perform a C(ONELEVEL) search below the superior RDN to find the matching DN.
+ - C(disable) will always use the DN unmodified (as passed by the I(dn) parameter).
+ - C(auto) will only perform a search if the first RDN does not contain an index number (C({x})).
+ - Possible choices are C(enable), C(auto), C(disable).
+ type: str
+ choices: ['enable', 'auto', 'disable']
+ default: auto
+ version_added: "6.4.0"
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py b/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
new file mode 100644
index 000000000..b5e7d7294
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/lxca_common.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2017 Lenovo, Inc.
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Standard Pylxca documentation fragment
+ DOCUMENTATION = r'''
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+
+options:
+ login_user:
+ description:
+ - The username for use in HTTP basic authentication.
+ type: str
+ required: true
+
+ login_password:
+ description:
+ - The password for use in HTTP basic authentication.
+ type: str
+ required: true
+
+ auth_url:
+ description:
+ - lxca https full web address
+ type: str
+ required: true
+
+requirements:
+ - pylxca
+
+notes:
+ - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca)
+ - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca)
+ - Check mode is not supported.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/manageiq.py b/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
new file mode 100644
index 000000000..030d68238
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/manageiq.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard ManageIQ documentation fragment
+ DOCUMENTATION = r'''
+options:
+ manageiq_connection:
+ description:
+ - ManageIQ connection configuration information.
+ required: false
+ type: dict
+ suboptions:
+ url:
+ description:
+ - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it.
+ type: str
+ required: false
+ username:
+ description:
+ - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in.
+ type: str
+ password:
+ description:
+ - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in.
+ type: str
+ token:
+ description:
+ - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in.
+ type: str
+ validate_certs:
+ description:
+ - Whether SSL certificates should be verified for HTTPS requests. defaults to True.
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ ca_cert:
+ description:
+ - The path to a CA bundle file or directory with certificates. defaults to None.
+ type: str
+ aliases: [ ca_bundle_path ]
+
+requirements:
+ - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)'
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/nomad.py b/ansible_collections/community/general/plugins/doc_fragments/nomad.py
new file mode 100644
index 000000000..b19404e83
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/nomad.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - FQDN of Nomad server.
+ required: true
+ type: str
+ use_ssl:
+ description:
+ - Use TLS/SSL connection.
+ type: bool
+ default: true
+ timeout:
+ description:
+ - Timeout (in seconds) for the request to Nomad.
+ type: int
+ default: 5
+ validate_certs:
+ description:
+ - Enable TLS/SSL certificate validation.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - Path of certificate for TLS/SSL.
+ type: path
+ client_key:
+ description:
+ - Path of certificate's private key for TLS/SSL.
+ type: path
+ namespace:
+ description:
+ - Namespace for Nomad.
+ type: str
+ token:
+ description:
+ - ACL token for authentification.
+ type: str
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oneview.py b/ansible_collections/community/general/plugins/doc_fragments/oneview.py
new file mode 100644
index 000000000..54288e51f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oneview.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # OneView doc fragment
+ DOCUMENTATION = r'''
+options:
+ config:
+ description:
+ - Path to a .json configuration file containing the OneView client configuration.
+ The configuration file is optional and when used should be present in the host running the ansible commands.
+ If the file path is not provided, the configuration will be loaded from environment variables.
+ For links to example configuration files or how to use the environment variables verify the notes section.
+ type: path
+ api_version:
+ description:
+ - OneView API Version.
+ type: int
+ image_streamer_hostname:
+ description:
+ - IP address or hostname for the HPE Image Streamer REST API.
+ type: str
+ hostname:
+ description:
+ - IP address or hostname for the appliance.
+ type: str
+ username:
+ description:
+ - Username for API authentication.
+ type: str
+ password:
+ description:
+ - Password for API authentication.
+ type: str
+
+requirements:
+ - python >= 2.7.9
+
+notes:
+ - "A sample configuration file for the config parameter can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
+ - "Check how to use environment variables for configuration at:
+ U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
+ - "Additional Playbooks for the HPE OneView Ansible modules can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
+ - "The OneView API version used will directly affect returned and expected fields in resources.
+ Information on setting the desired API version and can be found at:
+ U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)"
+ '''
+
+ VALIDATEETAG = r'''
+options:
+ validate_etag:
+ description:
+ - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
+ for the resource matches the ETag provided in the data.
+ type: bool
+ default: true
+'''
+
+ FACTSPARAMS = r'''
+options:
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(filter): A general filter/query string to narrow the list of items returned.
+ - C(sort): The sort order of the returned data set."
+ type: dict
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/online.py b/ansible_collections/community/general/plugins/doc_fragments/online.py
new file mode 100644
index 000000000..d7e13765b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/online.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_token:
+ description:
+ - Online OAuth token.
+ type: str
+ required: true
+ aliases: [ oauth_token ]
+ api_url:
+ description:
+ - Online API URL
+ type: str
+ default: 'https://api.online.net'
+ aliases: [ base_url ]
+ api_timeout:
+ description:
+ - HTTP timeout to Online API in seconds.
+ type: int
+ default: 30
+ aliases: [ timeout ]
+ validate_certs:
+ description:
+ - Validate SSL certs of the Online API.
+ type: bool
+ default: true
+notes:
+ - Also see the API documentation on U(https://console.online.net/en/api/)
+ - If C(api_token) is not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN)
+ - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL)
+ environment variable.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/opennebula.py b/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
new file mode 100644
index 000000000..0fc323271
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/opennebula.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # OpenNebula common documentation
+ DOCUMENTATION = r'''
+options:
+ api_url:
+ description:
+ - The ENDPOINT URL of the XMLRPC server.
+ - If not specified then the value of the ONE_URL environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_endpoint
+ api_username:
+ description:
+ - The name of the user for XMLRPC authentication.
+ - If not specified then the value of the ONE_USERNAME environment variable, if any, is used.
+ type: str
+ api_password:
+ description:
+ - The password or token for XMLRPC authentication.
+ - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used.
+ type: str
+ aliases:
+ - api_token
+ validate_certs:
+ description:
+ - Whether to validate the SSL certificates or not.
+ - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used.
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - Time to wait for the desired state to be reached before timeout, in seconds.
+ type: int
+ default: 300
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/openswitch.py b/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
new file mode 100644
index 000000000..9d5f0be74
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/openswitch.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Peter Sprygada <psprygada@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport. Note this argument
+ does not affect the SSH argument.
+ type: str
+ port:
+ description:
+ - Specifies the port to use when building the connection to the remote
+ device. This value applies to either I(cli) or I(rest). The port
+ value will default to the appropriate transport common port if
+ none is provided in the task. (cli=22, http=80, https=443). Note
+ this argument does not affect the SSH transport.
+ type: int
+ default: 0 (use common port)
+ username:
+ description:
+ - Configures the username to use to authenticate the connection to
+ the remote device. This value is used to authenticate
+ either the CLI login or the eAPI authentication depending on which
+ transport is used. Note this argument does not affect the SSH
+ transport. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
+ type: str
+ password:
+ description:
+ - Specifies the password to use to authenticate the connection to
+ the remote device. This is a common argument used for either I(cli)
+ or I(rest) transports. Note this argument does not affect the SSH
+ transport. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
+ type: str
+ timeout:
+ description:
+ - Specifies the timeout in seconds for communicating with the network device
+ for either connecting or sending commands. If the timeout is
+ exceeded before the operation is completed, the module will error.
+ type: int
+ default: 10
+ ssh_keyfile:
+ description:
+ - Specifies the SSH key to use to authenticate the connection to
+ the remote device. This argument is only used for the I(cli)
+ transports. If the value is not specified in the task, the value of
+ environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
+ type: path
+ transport:
+ description:
+ - Configures the transport connection to use when connecting to the
+ remote device. The transport argument supports connectivity to the
+ device over ssh, cli or REST.
+ required: true
+ type: str
+ choices: [ cli, rest, ssh ]
+ default: ssh
+ use_ssl:
+ description:
+ - Configures the I(transport) to use SSL if set to C(true) only when the
+ I(transport) argument is configured as rest. If the transport
+ argument is not I(rest), this value is ignored.
+ type: bool
+ default: true
+ provider:
+ description:
+ - Convenience method that allows all I(openswitch) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ type: dict
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle.py b/ansible_collections/community/general/plugins/doc_fragments/oracle.py
new file mode 100644
index 000000000..9ca4706ba
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ requirements:
+ - "python >= 2.7"
+ - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io)
+ notes:
+ - For OCI python sdk configuration, please refer to
+ U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html)
+ options:
+ config_file_location:
+ description:
+ - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable,
+ if any, is used. Otherwise, defaults to ~/.oci/config.
+ type: str
+ config_profile_name:
+ description:
+ - The profile to load from the config file referenced by C(config_file_location). If not set, then the
+ value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the
+ "DEFAULT" profile in C(config_file_location).
+ default: "DEFAULT"
+ type: str
+ api_user:
+ description:
+ - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the
+ value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user
+ is not specified through a configuration file (See C(config_file_location)). To get the user's OCID,
+ please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_fingerprint:
+ description:
+ - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT
+ environment variable, if any, is used. This option is required if the key fingerprint is not
+ specified through a configuration file (See C(config_file_location)). To get the key pair's
+ fingerprint value please refer
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm).
+ type: str
+ api_user_key_file:
+ description:
+ - Full path and filename of the private key (in PEM format). If not set, then the value of the
+ OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is
+ not specified through a configuration file (See C(config_file_location)). If the key is encrypted
+ with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided.
+ type: path
+ api_user_key_pass_phrase:
+ description:
+ - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then
+ the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the
+ key passphrase is not specified through a configuration file (See C(config_file_location)).
+ type: str
+ auth_type:
+ description:
+ - The type of authentication to use for making API requests. By default C(auth_type="api_key") based
+ authentication is performed and the API key (see I(api_user_key_file)) in your config file will be
+ used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE,
+ if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication
+ when running ansible playbooks within an OCI compute instance.
+ choices: ['api_key', 'instance_principal']
+ default: 'api_key'
+ type: str
+ tenancy:
+ description:
+ - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is
+ used. This option is required if the tenancy OCID is not specified through a configuration file
+ (See C(config_file_location)). To get the tenancy OCID, please refer
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm)
+ type: str
+ region:
+ description:
+ - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the
+ value of the OCI_REGION variable, if any, is used. This option is required if the region is
+ not specified through a configuration file (See C(config_file_location)). Please refer to
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information
+ on OCI regions.
+ type: str
+ """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
new file mode 100644
index 000000000..529381919
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_creatable_resource.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ force_create:
+ description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an
+ idempotent operation, and doesn't create the resource if it already exists. Setting this option
+ to true, forcefully creates a copy of the resource, even if it already exists.This option is
+ mutually exclusive with I(key_by).
+ default: false
+ type: bool
+ key_by:
+ description: The list of comma-separated attributes of this resource which should be used to uniquely
+ identify an instance of the resource. By default, all the attributes of a resource except
+ I(freeform_tags) are used to uniquely identify a resource.
+ type: list
+ elements: str
+ """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
new file mode 100644
index 000000000..eae5f4459
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_display_name_option.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ display_name:
+ description: Use I(display_name) along with the other options to return only resources that match the given
+ display name exactly.
+ type: str
+ """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
new file mode 100644
index 000000000..362071f94
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_name_option.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ name:
+ description: Use I(name) along with the other options to return only resources that match the given name
+ exactly.
+ type: str
+ """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py
new file mode 100644
index 000000000..3789dbe91
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_tags.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ defined_tags:
+ description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more
+ information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ freeform_tags:
+ description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name,
+ type, or namespace. For more information, see
+ U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm).
+ type: dict
+ """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py b/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
new file mode 100644
index 000000000..ce7ea776e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/oracle_wait_options.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = """
+ options:
+ wait:
+ description: Whether to wait for create or delete operation to complete.
+ default: true
+ type: bool
+ wait_timeout:
+ description: Time, in seconds, to wait when I(wait=true).
+ default: 1200
+ type: int
+ wait_until:
+ description: The lifecycle state to wait for the resource to transition into when I(wait=true). By default,
+ when I(wait=true), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/
+ RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/
+ TERMINATED lifecycle state during delete operation.
+ type: str
+ """
diff --git a/ansible_collections/community/general/plugins/doc_fragments/pritunl.py b/ansible_collections/community/general/plugins/doc_fragments/pritunl.py
new file mode 100644
index 000000000..51ab979b5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/pritunl.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r"""
+options:
+ pritunl_url:
+ type: str
+ required: true
+ description:
+ - URL and port of the Pritunl server on which the API is enabled.
+
+ pritunl_api_token:
+ type: str
+ required: true
+ description:
+ - API Token of a Pritunl admin user.
+ - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication.
+
+ pritunl_api_secret:
+ type: str
+ required: true
+ description:
+ - API Secret found in Administrators > USERNAME > API Secret.
+
+ validate_certs:
+ type: bool
+ required: false
+ default: true
+ description:
+ - If certificates should be validated or not.
+ - This should never be set to C(false), except if you are very sure that
+ your connection to the server can not be subject to a Man In The Middle
+ attack.
+"""
diff --git a/ansible_collections/community/general/plugins/doc_fragments/proxmox.py b/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
new file mode 100644
index 000000000..e39af4f3a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/proxmox.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for Proxmox VE modules
+ DOCUMENTATION = r'''
+options:
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ type: str
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ type: str
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ type: str
+ api_token_id:
+ description:
+ - Specify the token ID.
+ type: str
+ version_added: 1.3.0
+ api_token_secret:
+ description:
+ - Specify the token secret.
+ type: str
+ version_added: 1.3.0
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only be used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: false
+requirements: [ "proxmoxer", "requests" ]
+'''
+
+ SELECTION = r'''
+options:
+ vmid:
+ description:
+ - Specifies the instance ID.
+ - If not set the next available ID will be fetched from ProxmoxAPI.
+ type: int
+ node:
+ description:
+ - Proxmox VE node on which to operate.
+ - Only required for I(state=present).
+ - For every other states it will be autodiscovered.
+ type: str
+ pool:
+ description:
+ - Add the new VM to the specified pool.
+ type: str
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/purestorage.py b/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
new file mode 100644
index 000000000..8db8c3b3d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/purestorage.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Simon Dodsley <simon@purestorage.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Pure Storage documentation fragment
+ DOCUMENTATION = r'''
+options:
+ - See separate platform section for more details
+requirements:
+ - See separate platform section for more details
+notes:
+ - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade
+'''
+
+ # Documentation fragment for FlashBlade
+ FB = r'''
+options:
+ fb_url:
+ description:
+ - FlashBlade management IP address or Hostname.
+ type: str
+ api_token:
+ description:
+ - FlashBlade API token for admin privileged user.
+ type: str
+notes:
+ - This module requires the C(purity_fb) Python library
+ - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables
+ if I(fb_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purity_fb >= 1.1
+'''
+
+ # Documentation fragment for FlashArray
+ FA = r'''
+options:
+ fa_url:
+ description:
+ - FlashArray management IPv4 address or Hostname.
+ type: str
+ required: true
+ api_token:
+ description:
+ - FlashArray API token for admin privileged user.
+ type: str
+ required: true
+notes:
+ - This module requires the C(purestorage) Python library
+ - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables
+ if I(fa_url) and I(api_token) arguments are not passed to the module directly
+requirements:
+ - python >= 2.7
+ - purestorage
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/rackspace.py b/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
new file mode 100644
index 000000000..9e2231602
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/rackspace.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard Rackspace only documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_key:
+ description:
+ - Rackspace API key, overrides I(credentials).
+ type: str
+ aliases: [ password ]
+ credentials:
+ description:
+ - File to find the Rackspace credentials in. Ignored if I(api_key) and
+ I(username) are provided.
+ type: path
+ aliases: [ creds_file ]
+ env:
+ description:
+ - Environment as configured in I(~/.pyrax.cfg),
+ see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
+ type: str
+ region:
+ description:
+ - Region to create an instance in.
+ type: str
+ username:
+ description:
+ - Rackspace username, overrides I(credentials).
+ type: str
+ validate_certs:
+ description:
+ - Whether or not to require SSL validation of API endpoints.
+ type: bool
+ aliases: [ verify_ssl ]
+requirements:
+ - python >= 2.6
+ - pyrax
+notes:
+ - The following environment variables can be used, C(RAX_USERNAME),
+ C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
+ - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
+ - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
+ - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+'''
+
+ # Documentation fragment including attributes to enable communication
+ # of other OpenStack clouds. Not all rax modules support this.
+ OPENSTACK = r'''
+options:
+ api_key:
+ type: str
+ description:
+ - Rackspace API key, overrides I(credentials).
+ aliases: [ password ]
+ auth_endpoint:
+ type: str
+ description:
+ - The URI of the authentication service.
+ - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/)
+ credentials:
+ type: path
+ description:
+ - File to find the Rackspace credentials in. Ignored if I(api_key) and
+ I(username) are provided.
+ aliases: [ creds_file ]
+ env:
+ type: str
+ description:
+ - Environment as configured in I(~/.pyrax.cfg),
+ see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration).
+ identity_type:
+ type: str
+ description:
+ - Authentication mechanism to use, such as rackspace or keystone.
+ default: rackspace
+ region:
+ type: str
+ description:
+ - Region to create an instance in.
+ tenant_id:
+ type: str
+ description:
+ - The tenant ID used for authentication.
+ tenant_name:
+ type: str
+ description:
+ - The tenant name used for authentication.
+ username:
+ type: str
+ description:
+ - Rackspace username, overrides I(credentials).
+ validate_certs:
+ description:
+ - Whether or not to require SSL validation of API endpoints.
+ type: bool
+ aliases: [ verify_ssl ]
+requirements:
+ - python >= 2.6
+ - pyrax
+notes:
+ - The following environment variables can be used, C(RAX_USERNAME),
+ C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
+ - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
+ appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
+ - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
+ - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/redis.py b/ansible_collections/community/general/plugins/doc_fragments/redis.py
new file mode 100644
index 000000000..2d4033051
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/redis.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for Redis modules
+ DOCUMENTATION = r'''
+options:
+ login_host:
+ description:
+ - Specify the target host running the database.
+ default: localhost
+ type: str
+ login_port:
+ description:
+ - Specify the port to connect to.
+ default: 6379
+ type: int
+ login_user:
+ description:
+ - Specify the user to authenticate with.
+ - Requires L(redis,https://pypi.org/project/redis) >= 3.4.0.
+ type: str
+ login_password:
+ description:
+ - Specify the password to authenticate with.
+ - Usually not used when target is localhost.
+ type: str
+ tls:
+ description:
+ - Specify whether or not to use TLS for the connection.
+ type: bool
+ default: true
+ validate_certs:
+ description:
+ - Specify whether or not to validate TLS certificates.
+ - This should only be turned off for personally controlled sites or with
+ C(localhost) as target.
+ type: bool
+ default: true
+ ca_certs:
+ description:
+ - Path to root certificates file. If not set and I(tls) is
+ set to C(true), certifi ca-certificates will be used.
+ type: str
+requirements: [ "redis", "certifi" ]
+
+notes:
+ - Requires the C(redis) Python package on the remote host. You can
+ install it with pip (C(pip install redis)) or with a package manager.
+ Information on the library can be found at U(https://github.com/andymccurdy/redis-py).
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/rundeck.py b/ansible_collections/community/general/plugins/doc_fragments/rundeck.py
new file mode 100644
index 000000000..62c8648e9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/rundeck.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = r'''
+options:
+ url:
+ type: str
+ description:
+ - Rundeck instance URL.
+ required: true
+ api_version:
+ type: int
+ description:
+ - Rundeck API version to be used.
+ - API version must be at least 14.
+ default: 39
+ api_token:
+ type: str
+ description:
+ - Rundeck User API Token.
+ required: true
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/scaleway.py b/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
new file mode 100644
index 000000000..b08d11dbb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/scaleway.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ api_token:
+ description:
+ - Scaleway OAuth token.
+ type: str
+ required: true
+ aliases: [ oauth_token ]
+ api_url:
+ description:
+ - Scaleway API URL.
+ type: str
+ default: https://api.scaleway.com
+ aliases: [ base_url ]
+ api_timeout:
+ description:
+ - HTTP timeout to Scaleway API in seconds.
+ type: int
+ default: 30
+ aliases: [ timeout ]
+ query_parameters:
+ description:
+ - List of parameters passed to the query string.
+ type: dict
+ default: {}
+ validate_certs:
+ description:
+ - Validate SSL certs of the Scaleway API.
+ type: bool
+ default: true
+notes:
+ - Also see the API documentation on U(https://developer.scaleway.com/)
+ - If C(api_token) is not set within the module, the following
+ environment variables can be used in decreasing order of precedence
+ C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN).
+ - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL)
+ environment variable.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/scaleway_waitable_resource.py b/ansible_collections/community/general/plugins/doc_fragments/scaleway_waitable_resource.py
new file mode 100644
index 000000000..3ab5c7d6f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/scaleway_waitable_resource.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options:
+ wait:
+ description:
+ - Wait for the resource to reach its desired state before returning.
+ type: bool
+ default: true
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the resource to reach the expected state.
+ required: false
+ default: 300
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the resource.
+ required: false
+ default: 3
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/utm.py b/ansible_collections/community/general/plugins/doc_fragments/utm.py
new file mode 100644
index 000000000..73ad80503
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/utm.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ DOCUMENTATION = r'''
+options:
+ headers:
+ description:
+ - A dictionary of additional headers to be sent to POST and PUT requests.
+ - Is needed for some modules
+ type: dict
+ required: false
+ default: {}
+ utm_host:
+ description:
+ - The REST Endpoint of the Sophos UTM.
+ type: str
+ required: true
+ utm_port:
+ description:
+ - The port of the REST interface.
+ type: int
+ default: 4444
+ utm_token:
+ description:
+ - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\
+ PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2."
+ type: str
+ required: true
+ utm_protocol:
+ description:
+ - The protocol of the REST Endpoint.
+ choices: [ http, https ]
+ type: str
+ default: https
+ validate_certs:
+ description:
+ - Whether the REST interface's ssl certificate should be verified or not.
+ type: bool
+ default: true
+ state:
+ description:
+ - The desired state of the object.
+ - C(present) will create or update an object
+ - C(absent) will delete an object if it was present
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/vexata.py b/ansible_collections/community/general/plugins/doc_fragments/vexata.py
new file mode 100644
index 000000000..ff79613ee
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/vexata.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ DOCUMENTATION = r'''
+options:
+ - See respective platform section for more details
+requirements:
+ - See respective platform section for more details
+notes:
+ - Ansible modules are available for Vexata VX100 arrays.
+'''
+
+ # Documentation fragment for Vexata VX100 series
+ VX100 = r'''
+options:
+ array:
+ description:
+ - Vexata VX100 array hostname or IPv4 Address.
+ required: true
+ type: str
+ user:
+ description:
+ - Vexata API user with administrative privileges.
+ required: false
+ type: str
+ password:
+ description:
+ - Vexata API user password.
+ required: false
+ type: str
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If set to C(true), please make sure Python >= 2.7.9 is installed on the given machine.
+ required: false
+ type: bool
+ default: false
+
+requirements:
+ - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array
+ - vexatapi >= 0.0.1
+ - python >= 2.7
+ - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if
+ user and password arguments are not passed to the module directly.
+'''
diff --git a/ansible_collections/community/general/plugins/doc_fragments/xenserver.py b/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
new file mode 100644
index 000000000..eaee17384
--- /dev/null
+++ b/ansible_collections/community/general/plugins/doc_fragments/xenserver.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+ # Common parameters for XenServer modules
+ DOCUMENTATION = r'''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the XenServer host or XenServer pool master.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead.
+ type: str
+ default: localhost
+ aliases: [ host, pool ]
+ username:
+ description:
+ - The username to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead.
+ type: str
+ default: root
+ aliases: [ admin, user ]
+ password:
+ description:
+ - The password to use for connecting to XenServer.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead.
+ type: str
+ aliases: [ pass, pwd ]
+ validate_certs:
+ description:
+ - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted.
+ - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead.
+ type: bool
+ default: true
+'''
diff --git a/ansible_collections/community/general/plugins/filter/counter.py b/ansible_collections/community/general/plugins/filter/counter.py
new file mode 100644
index 000000000..1b79294b5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/counter.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Remy Keil <remy.keil@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: counter
+ short_description: Counts hashable elements in a sequence
+ version_added: 4.3.0
+ author: Rémy Keil (@keilr)
+ description:
+ - Counts hashable elements in a sequence.
+ options:
+ _input:
+ description: A sequence.
+ type: list
+ elements: any
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Count occurrences
+ ansible.builtin.debug:
+ msg: >-
+ {{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }}
+ # Produces: {1: 1, 'a': 3, 2: 2, 'b': 1}
+'''
+
+RETURN = '''
+ _value:
+ description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as values.
+ type: dictionary
+'''
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common._collections_compat import Sequence
+from collections import Counter
+
+
+def counter(sequence):
+ ''' Count elements in a sequence. Returns dict with count result. '''
+ if not isinstance(sequence, Sequence):
+ raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' %
+ (sequence, type(sequence)))
+
+ try:
+ result = dict(Counter(sequence))
+ except TypeError as e:
+ raise AnsibleFilterError(
+ "community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e)
+ )
+ return result
+
+
+class FilterModule(object):
+ ''' Ansible counter jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ 'counter': counter,
+ }
+
+ return filters
diff --git a/ansible_collections/community/general/plugins/filter/crc32.py b/ansible_collections/community/general/plugins/filter/crc32.py
new file mode 100644
index 000000000..1f0aa2e9b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/crc32.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Julien Riou <julien@riou.xyz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.common.collections import is_string
+
+try:
+ from zlib import crc32
+ HAS_ZLIB = True
+except ImportError:
+ HAS_ZLIB = False
+
+
+DOCUMENTATION = '''
+ name: crc32
+ short_description: Generate a CRC32 checksum
+ version_added: 5.4.0
+ description:
+ - Checksum a string using CRC32 algorithm and return its hexadecimal representation.
+ options:
+ _input:
+ description:
+ - The string to checksum.
+ type: string
+ required: true
+ author:
+ - Julien Riou
+'''
+
+EXAMPLES = '''
+ - name: Checksum a test string
+ ansible.builtin.debug:
+ msg: "{{ 'test' | community.general.crc32 }}"
+'''
+
+RETURN = '''
+ _value:
+ description: CRC32 checksum.
+ type: string
+'''
+
+
+def crc32s(value):
+ if not is_string(value):
+ raise AnsibleFilterError('Invalid value type (%s) for crc32 (%r)' %
+ (type(value), value))
+
+ if not HAS_ZLIB:
+ raise AnsibleFilterError('Failed to import zlib module')
+
+ data = to_bytes(value, errors='surrogate_or_strict')
+ return "{0:x}".format(crc32(data) & 0xffffffff)
+
+
+class FilterModule:
+ def filters(self):
+ return {
+ 'crc32': crc32s,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/dict.py b/ansible_collections/community/general/plugins/filter/dict.py
new file mode 100644
index 000000000..720c9def9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/dict.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: dict
+ short_description: Convert a list of tuples into a dictionary
+ version_added: 3.0.0
+ author: Felix Fontein (@felixfontein)
+ description:
+ - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function.
+ options:
+ _input:
+ description: A list of tuples (with exactly two elements).
+ type: list
+ elements: tuple
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Convert list of tuples into dictionary
+ ansible.builtin.set_fact:
+ dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}"
+ # Result is {1: 2, 'a': 'b'}
+
+- name: Create a list of dictionaries with map and the community.general.dict filter
+ ansible.builtin.debug:
+ msg: >-
+ {{ values | map('zip', ['k1', 'k2', 'k3'])
+ | map('map', 'reverse')
+ | map('community.general.dict') }}
+ vars:
+ values:
+ - - foo
+ - 23
+ - a
+ - - bar
+ - 42
+ - b
+ # Produces the following list of dictionaries:
+ # {
+ # "k1": "foo",
+ # "k2": 23,
+ # "k3": "a"
+ # },
+ # {
+ # "k1": "bar",
+ # "k2": 42,
+ # "k3": "b"
+ # }
+'''
+
+RETURN = '''
+ _value:
+ description: The dictionary having the provided key-value pairs.
+ type: boolean
+'''
+
+
+def dict_filter(sequence):
+ '''Convert a list of tuples to a dictionary.
+
+ Example: ``[[1, 2], ['a', 'b']] | community.general.dict`` results in ``{1: 2, 'a': 'b'}``
+ '''
+ return dict(sequence)
+
+
+class FilterModule(object):
+ '''Ansible jinja2 filters'''
+
+ def filters(self):
+ return {
+ 'dict': dict_filter,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/dict_kv.py b/ansible_collections/community/general/plugins/filter/dict_kv.py
new file mode 100644
index 000000000..59595f957
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/dict_kv.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2020 Stanislav German-Evtushenko (@giner) <ginermail@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: dict_kv
+ short_description: Convert a value to a dictionary with a single key-value pair
+ version_added: 1.3.0
+ author: Stanislav German-Evtushenko (@giner)
+ description:
+ - Convert a value to a dictionary with a single key-value pair.
+ positional: key
+ options:
+ _input:
+ description: The value for the single key-value pair.
+ type: any
+ required: true
+ key:
+ description: The key for the single key-value pair.
+ type: any
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Create a one-element dictionary from a value
+ ansible.builtin.debug:
+ msg: "{{ 'myvalue' | dict_kv('mykey') }}"
+ # Produces the dictionary {'mykey': 'myvalue'}
+'''
+
+RETURN = '''
+ _value:
+ description: A dictionary with a single key-value pair.
+ type: dictionary
+'''
+
+
+def dict_kv(value, key):
+ '''Return a dictionary with a single key-value pair
+
+ Example:
+
+ - hosts: localhost
+ gather_facts: false
+ vars:
+ myvar: myvalue
+ tasks:
+ - debug:
+ msg: "{{ myvar | dict_kv('thatsmyvar') }}"
+
+ produces:
+
+ ok: [localhost] => {
+ "msg": {
+ "thatsmyvar": "myvalue"
+ }
+ }
+
+ Example 2:
+
+ - hosts: localhost
+ gather_facts: false
+ vars:
+ common_config:
+ type: host
+ database: all
+ myservers:
+ - server1
+ - server2
+ tasks:
+ - debug:
+ msg: "{{ myservers | map('dict_kv', 'server') | map('combine', common_config) }}"
+
+ produces:
+
+ ok: [localhost] => {
+ "msg": [
+ {
+ "database": "all",
+ "server": "server1",
+ "type": "host"
+ },
+ {
+ "database": "all",
+ "server": "server2",
+ "type": "host"
+ }
+ ]
+ }
+ '''
+ return {key: value}
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'dict_kv': dict_kv
+ }
diff --git a/ansible_collections/community/general/plugins/filter/from_csv.py b/ansible_collections/community/general/plugins/filter/from_csv.py
new file mode 100644
index 000000000..6472b67b1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/from_csv.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
+# Copyright (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: from_csv
+ short_description: Converts CSV text input into list of dicts
+ version_added: 2.3.0
+ author: Andrew Pantuso (@Ajpantuso)
+ description:
+ - Converts CSV text input into list of dictionaries.
+ options:
+ _input:
+ description: A string containing a CSV document.
+ type: string
+ required: true
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include C(excel), C(excel-tab) or C(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by I(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by I(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by I(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Parse a CSV file's contents
+ ansible.builtin.debug:
+ msg: >-
+ {{ csv_data | community.genera.from_csv(dialect='unix') }}
+ vars:
+ csv_data: |
+ Column 1,Value
+ foo,23
+ bar,42
+ # Produces the following list of dictionaries:
+ # {
+ # "Column 1": "foo",
+ # "Value": "23",
+ # },
+ # {
+ # "Column 1": "bar",
+ # "Value": "42",
+ # }
+'''
+
+RETURN = '''
+ _value:
+ description: A list with one dictionary per row.
+ type: list
+ elements: dictionary
+'''
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
+ DialectNotAvailableError,
+ CustomDialectFailureError)
+
+
+def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitialspace=None, strict=None):
+
+ dialect_params = {
+ "delimiter": delimiter,
+ "skipinitialspace": skipinitialspace,
+ "strict": strict,
+ }
+
+ try:
+ dialect = initialize_dialect(dialect, **dialect_params)
+ except (CustomDialectFailureError, DialectNotAvailableError) as e:
+ raise AnsibleFilterError(to_native(e))
+
+ reader = read_csv(data, dialect, fieldnames)
+
+ data_list = []
+
+ try:
+ for row in reader:
+ data_list.append(row)
+ except CSVError as e:
+ raise AnsibleFilterError("Unable to process file: %s" % to_native(e))
+
+ return data_list
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'from_csv': from_csv
+ }
diff --git a/ansible_collections/community/general/plugins/filter/groupby_as_dict.py b/ansible_collections/community/general/plugins/filter/groupby_as_dict.py
new file mode 100644
index 000000000..4a8f4c6dc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/groupby_as_dict.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: groupby_as_dict
+ short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute
+ version_added: 3.1.0
+ author: Felix Fontein (@felixfontein)
+ description:
+ - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute.
+ positional: attribute
+ options:
+ _input:
+ description: A list of dictionaries
+ type: list
+ elements: dictionary
+ required: true
+ attribute:
+ description: The attribute to use as the key.
+ type: str
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Arrange a list of dictionaries as a dictionary of dictionaries
+ ansible.builtin.debug:
+ msg: "{{ sequence | community.general.groupby_as_dict('key') }}"
+ vars:
+ sequence:
+ - key: value
+ foo: bar
+ - key: other_value
+ baz: bar
+ # Produces the following nested structure:
+ #
+ # value:
+ # key: value
+ # foo: bar
+ # other_value:
+ # key: other_value
+ # baz: bar
+'''
+
+RETURN = '''
+ _value:
+ description: A dictionary containing the dictionaries from the list as values.
+ type: dictionary
+'''
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+
+
+def groupby_as_dict(sequence, attribute):
+ '''
+ Given a sequence of dictionaries and an attribute name, returns a dictionary mapping
+ the value of this attribute to the dictionary.
+
+ If multiple dictionaries in the sequence have the same value for this attribute,
+ the filter will fail.
+ '''
+ if not isinstance(sequence, Sequence):
+ raise AnsibleFilterError('Input is not a sequence')
+
+ result = dict()
+ for list_index, element in enumerate(sequence):
+ if not isinstance(element, Mapping):
+ raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index))
+ if attribute not in element:
+ raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index))
+ result_index = element[attribute]
+ if result_index in result:
+ raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index))
+ result[result_index] = element
+ return result
+
+
+class FilterModule(object):
+ ''' Ansible list filters '''
+
+ def filters(self):
+ return {
+ 'groupby_as_dict': groupby_as_dict,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/hashids.py b/ansible_collections/community/general/plugins/filter/hashids.py
new file mode 100644
index 000000000..45fba83c0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/hashids.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.errors import (
+ AnsibleError,
+ AnsibleFilterError,
+ AnsibleFilterTypeError,
+)
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.collections import is_sequence
+
+try:
+ from hashids import Hashids
+ HAS_HASHIDS = True
+except ImportError:
+ HAS_HASHIDS = False
+
+
+def initialize_hashids(**kwargs):
+ if not HAS_HASHIDS:
+ raise AnsibleError("The hashids library must be installed in order to use this plugin")
+
+ params = dict((k, v) for k, v in kwargs.items() if v)
+
+ try:
+ return Hashids(**params)
+ except TypeError as e:
+ raise AnsibleFilterError(
+ "The provided parameters %s are invalid: %s" % (
+ ', '.join(["%s=%s" % (k, v) for k, v in params.items()]),
+ to_native(e)
+ )
+ )
+
+
+def hashids_encode(nums, salt=None, alphabet=None, min_length=None):
+ """Generates a YouTube-like hash from a sequence of ints
+
+ :nums: Sequence of one or more ints to hash
+ :salt: String to use as salt when hashing
+ :alphabet: String of 16 or more unique characters to produce a hash
+ :min_length: Minimum length of hash produced
+ """
+
+ hashids = initialize_hashids(
+ salt=salt,
+ alphabet=alphabet,
+ min_length=min_length
+ )
+
+ # Handles the case where a single int is not encapsulated in a list or tuple.
+ # User convenience seems preferable to strict typing in this case
+ # Also avoids obfuscated error messages related to single invalid inputs
+ if not is_sequence(nums):
+ nums = [nums]
+
+ try:
+ hashid = hashids.encode(*nums)
+ except TypeError as e:
+ raise AnsibleFilterTypeError(
+ "Data to encode must by a tuple or list of ints: %s" % to_native(e)
+ )
+
+ return hashid
+
+
+def hashids_decode(hashid, salt=None, alphabet=None, min_length=None):
+ """Decodes a YouTube-like hash to a sequence of ints
+
+ :hashid: Hash string to decode
+ :salt: String to use as salt when hashing
+ :alphabet: String of 16 or more unique characters to produce a hash
+ :min_length: Minimum length of hash produced
+ """
+
+ hashids = initialize_hashids(
+ salt=salt,
+ alphabet=alphabet,
+ min_length=min_length
+ )
+ nums = hashids.decode(hashid)
+ return list(nums)
+
+
+class FilterModule(object):
+
+ def filters(self):
+ return {
+ 'hashids_encode': hashids_encode,
+ 'hashids_decode': hashids_decode,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/hashids_decode.yml b/ansible_collections/community/general/plugins/filter/hashids_decode.yml
new file mode 100644
index 000000000..3d2144f72
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/hashids_decode.yml
@@ -0,0 +1,43 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: hashids_decode
+ short_description: Decodes a sequence of numbers from a YouTube-like hash
+ version_added: 3.0.0
+ author: Andrew Pantuso (@Ajpantuso)
+ description:
+ - Decodes a sequence of numbers from a YouTube-like hash.
+ options:
+ _input:
+ description: A YouTube-like hash.
+ type: string
+ required: true
+ salt:
+ description:
+ - String to use as salt when hashing.
+ type: str
+ default: excel
+ alphabet:
+ description:
+ - String of 16 or more unique characters to produce a hash.
+ type: list
+ elements: str
+ min_length:
+ description:
+ - Minimum length of hash produced.
+ type: integer
+
+EXAMPLES: |
+ - name: Convert hash to list of integers
+ ansible.builtin.debug:
+ msg: "{{ 'o2fXhV' | community.general.hashids_decode }}"
+ # Produces: [1, 2, 3]
+
+RETURN:
+ _value:
+ description: A list of integers.
+ type: list
+ elements: integer
diff --git a/ansible_collections/community/general/plugins/filter/hashids_encode.yml b/ansible_collections/community/general/plugins/filter/hashids_encode.yml
new file mode 100644
index 000000000..af19522d0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/hashids_encode.yml
@@ -0,0 +1,43 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: hashids_encode
+ short_description: Encodes YouTube-like hashes from a sequence of integers
+ version_added: 3.0.0
+ author: Andrew Pantuso (@Ajpantuso)
+ description:
+ - Encodes YouTube-like hashes from a sequence of integers.
+ options:
+ _input:
+ description: A list of integers.
+ type: list
+ elements: integer
+ required: true
+ salt:
+ description:
+ - String to use as salt when hashing.
+ type: str
+ default: excel
+ alphabet:
+ description:
+ - String of 16 or more unique characters to produce a hash.
+ type: list
+ elements: str
+ min_length:
+ description:
+ - Minimum length of hash produced.
+ type: integer
+
+EXAMPLES: |
+ - name: Convert list of integers to hash
+ ansible.builtin.debug:
+ msg: "{{ [1, 2, 3] | community.general.hashids_encode }}"
+ # Produces: 'o2fXhV'
+
+RETURN:
+ _value:
+ description: A YouTube-like hash.
+ type: string
diff --git a/ansible_collections/community/general/plugins/filter/jc.py b/ansible_collections/community/general/plugins/filter/jc.py
new file mode 100644
index 000000000..3aa8d20a5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/jc.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# contributed by Kelly Brazil <kellyjonbrazil@gmail.com>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: jc
+ short_description: Convert output of many shell commands and file-types to JSON
+ version_added: 1.1.0
+ author: Kelly Brazil (@kellyjonbrazil)
+ description:
+ - Convert output of many shell commands and file-types to JSON.
+ - Uses the L(jc library,https://github.com/kellyjonbrazil/jc).
+ positional: parser
+ options:
+ _input:
+ description: The data to convert.
+ type: string
+ required: true
+ parser:
+ description:
+ - The correct parser for the input data.
+ - For example C(ifconfig).
+ - "Note: use underscores instead of dashes (if any) in the parser module name."
+ - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers.
+ type: string
+ required: true
+ quiet:
+ description: Set to C(false) to not suppress warnings.
+ type: boolean
+ default: true
+ raw:
+ description: Set to C(true) to return pre-processed JSON.
+ type: boolean
+ default: false
+ requirements:
+ - jc installed as a Python library (U(https://pypi.org/project/jc/))
+'''
+
+EXAMPLES = '''
+- name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller
+ delegate_to: localhost
+ ansible.builtin.pip:
+ name: jc
+ state: present
+
+- name: Run command
+ ansible.builtin.command: uname -a
+ register: result
+
+- name: Convert command's result to JSON
+ ansible.builtin.debug:
+ msg: "{{ result.stdout | community.general.jc('uname') }}"
+ # Possible output:
+ #
+ # "msg": {
+ # "hardware_platform": "x86_64",
+ # "kernel_name": "Linux",
+ # "kernel_release": "4.15.0-112-generic",
+ # "kernel_version": "#113-Ubuntu SMP Thu Jul 9 23:41:39 UTC 2020",
+ # "machine": "x86_64",
+ # "node_name": "kbrazil-ubuntu",
+ # "operating_system": "GNU/Linux",
+ # "processor": "x86_64"
+ # }
+'''
+
+RETURN = '''
+ _value:
+ description: The processed output.
+ type: any
+'''
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+import importlib
+
+try:
+ import jc # noqa: F401, pylint: disable=unused-import
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def jc_filter(data, parser, quiet=True, raw=False):
+ """Convert returned command output to JSON using the JC library
+
+ Arguments:
+
+ parser required (string) the correct parser for the input data (e.g. 'ifconfig')
+ see https://github.com/kellyjonbrazil/jc#parsers for latest list of parsers.
+ quiet optional (bool) True to suppress warning messages (default is True)
+ raw optional (bool) True to return pre-processed JSON (default is False)
+
+ Returns:
+
+ dictionary or list of dictionaries
+
+ Example:
+ - name: run date command
+ hosts: ubuntu
+ tasks:
+ - name: install the prereqs of the jc filter (jc Python package) on the Ansible controller
+ delegate_to: localhost
+ ansible.builtin.pip:
+ name: jc
+ state: present
+ - ansible.builtin.shell: date
+ register: result
+ - ansible.builtin.set_fact:
+ myvar: "{{ result.stdout | community.general.jc('date') }}"
+ - ansible.builtin.debug:
+ msg: "{{ myvar }}"
+
+ produces:
+
+ ok: [192.168.1.239] => {
+ "msg": {
+ "day": 9,
+ "hour": 22,
+ "minute": 6,
+ "month": "Aug",
+ "month_num": 8,
+ "second": 22,
+ "timezone": "UTC",
+ "weekday": "Sun",
+ "weekday_num": 1,
+ "year": 2020
+ }
+ }
+ """
+
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter')
+
+ try:
+ # new API (jc v1.18.0 and higher) allows use of plugin parsers
+ if hasattr(jc, 'parse'):
+ return jc.parse(parser, data, quiet=quiet, raw=raw)
+
+ # old API (jc v1.17.7 and lower)
+ else:
+ jc_parser = importlib.import_module('jc.parsers.' + parser)
+ return jc_parser.parse(data, quiet=quiet, raw=raw)
+
+ except Exception as e:
+ raise AnsibleFilterError('Error in jc filter plugin: %s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'jc': jc_filter,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/json_query.py b/ansible_collections/community/general/plugins/filter/json_query.py
new file mode 100644
index 000000000..9e8fa4ef2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/json_query.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: json_query
+ short_description: Select a single element or a data subset from a complex data structure
+ description:
+ - This filter lets you query a complex JSON structure and iterate over it using a loop structure.
+ positional: expr
+ options:
+ _input:
+ description:
+ - The JSON data to query.
+ type: any
+ required: true
+ expr:
+ description:
+ - The query expression.
+ - See U(http://jmespath.org/examples.html) for examples.
+ type: string
+ required: true
+ requirements:
+ - jmespath
+'''
+
+EXAMPLES = '''
+- name: Define data to work on in the examples below
+ ansible.builtin.set_fact:
+ domain_definition:
+ domain:
+ cluster:
+ - name: cluster1
+ - name: cluster2
+ server:
+ - name: server11
+ cluster: cluster1
+ port: '8080'
+ - name: server12
+ cluster: cluster1
+ port: '8090'
+ - name: server21
+ cluster: cluster2
+ port: '9080'
+ - name: server22
+ cluster: cluster2
+ port: '9090'
+ library:
+ - name: lib1
+ target: cluster1
+ - name: lib2
+ target: cluster2
+
+- name: Display all cluster names
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.cluster[*].name') }}"
+
+- name: Display all server names
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.server[*].name') }}"
+
+- name: Display all ports from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
+ vars:
+ server_name_cluster1_query: "domain.server[?cluster=='cluster1'].port"
+
+- name: Display all ports from cluster1 as a string
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | community.general.json_query('domain.server[?cluster==`cluster1`].port') | join(', ') }}"
+
+- name: Display all ports from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query('domain.server[?cluster==''cluster1''].port') }}"
+
+- name: Display all server ports and names from cluster1
+ ansible.builtin.debug:
+ var: item
+ loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}"
+ vars:
+ server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}"
+
+- name: Display all ports from cluster1
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
+ vars:
+ server_name_query: "domain.server[?starts_with(name,'server1')].port"
+
+- name: Display all ports from cluster1
+ ansible.builtin.debug:
+ msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}"
+ vars:
+ server_name_query: "domain.server[?contains(name,'server1')].port"
+'''
+
+RETURN = '''
+ _value:
+ description: The result of the query.
+ type: any
+'''
+
+from ansible.errors import AnsibleError, AnsibleFilterError
+
+try:
+ import jmespath
+ HAS_LIB = True
+except ImportError:
+ HAS_LIB = False
+
+
+def json_query(data, expr):
+ '''Query data using jmespath query language ( http://jmespath.org ). Example:
+ - ansible.builtin.debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
+ '''
+ if not HAS_LIB:
+ raise AnsibleError('You need to install "jmespath" prior to running '
+ 'json_query filter')
+
+ # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence
+ # See issue: https://github.com/ansible-collections/community.general/issues/320
+ jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', )
+ jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', )
+ jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', )
+ try:
+ return jmespath.search(expr, data)
+ except jmespath.exceptions.JMESPathError as e:
+ raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e)
+ except Exception as e:
+ # For older jmespath, we can get ValueError and TypeError without much info.
+ raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e)
+
+
+class FilterModule(object):
+ ''' Query filter '''
+
+ def filters(self):
+ return {
+ 'json_query': json_query
+ }
diff --git a/ansible_collections/community/general/plugins/filter/lists_mergeby.py b/ansible_collections/community/general/plugins/filter/lists_mergeby.py
new file mode 100644
index 000000000..036dfe4d7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/lists_mergeby.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020-2022, Vladimir Botka <vbotka@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: lists_mergeby
+ short_description: Merge two or more lists of dictionaries by a given attribute
+ version_added: 2.0.0
+ author: Vladimir Botka (@vbotka)
+ description:
+ - Merge two or more lists by attribute I(index). Optional parameters 'recursive' and 'list_merge'
+ control the merging of the lists in values. The function merge_hash from ansible.utils.vars
+ is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
+ Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
+ hashes/dictionaries".
+ positional: another_list, index
+ options:
+ _input:
+ description: A list of dictionaries.
+ type: list
+ elements: dictionary
+ required: true
+ another_list:
+ description: Another list of dictionaries. This parameter can be specified multiple times.
+ type: list
+ elements: dictionary
+ index:
+ description:
+ - The dictionary key that must be present in every dictionary in every list that is used to
+ merge the lists.
+ type: string
+ required: true
+ recursive:
+ description:
+ - Should the combine recursively merge nested dictionaries (hashes).
+ - "B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg)."
+ type: boolean
+ default: false
+ list_merge:
+ description:
+ - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists.
+ type: string
+ default: replace
+ choices:
+ - replace
+ - keep
+ - append
+ - prepend
+ - append_rp
+ - prepend_rp
+'''
+
+EXAMPLES = '''
+- name: Merge two lists
+ ansible.builtin.debug:
+ msg: >-
+ {{ list1 | community.general.lists_mergeby(
+ list2,
+ 'index',
+ recursive=True,
+ list_merge='append'
+ ) }}"
+ vars:
+ list1:
+ - index: a
+ value: 123
+ - index: b
+ value: 42
+ list2:
+ - index: a
+ foo: bar
+ - index: c
+ foo: baz
+ # Produces the following list of dictionaries:
+ # {
+ # "index": "a",
+ # "foo": "bar",
+ # "value": 123
+ # },
+ # {
+ # "index": "b",
+ # "value": 42
+ # },
+ # {
+ # "index": "c",
+ # "foo": "baz"
+ # }
+'''
+
+RETURN = '''
+ _value:
+ description: The merged list.
+ type: list
+ elements: dictionary
+'''
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.utils.vars import merge_hash
+
+from collections import defaultdict
+from operator import itemgetter
+
+
+def list_mergeby(x, y, index, recursive=False, list_merge='replace'):
+ ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used.
+ This function is used by the function lists_mergeby.
+ '''
+
+ d = defaultdict(dict)
+ for l in (x, y):
+ for elem in l:
+ if not isinstance(elem, Mapping):
+ msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s"
+ raise AnsibleFilterError(msg % (elem, type(elem)))
+ if index in elem.keys():
+ d[elem[index]].update(merge_hash(d[elem[index]], elem, recursive, list_merge))
+ return sorted(d.values(), key=itemgetter(index))
+
+
+def lists_mergeby(*terms, **kwargs):
+ ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge'
+ control the merging of the lists in values. The function merge_hash from ansible.utils.vars
+ is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see
+ Ansible User's Guide chapter "Using filters to manipulate data" section "Combining
+ hashes/dictionaries".
+
+ Example:
+ - debug:
+ msg: "{{ list1|
+ community.general.lists_mergeby(list2,
+ 'index',
+ recursive=True,
+ list_merge='append')|
+ list }}"
+ '''
+
+ recursive = kwargs.pop('recursive', False)
+ list_merge = kwargs.pop('list_merge', 'replace')
+ if kwargs:
+ raise AnsibleFilterError("'recursive' and 'list_merge' are the only valid keyword arguments.")
+ if len(terms) < 2:
+ raise AnsibleFilterError("At least one list and index are needed.")
+
+ # allow the user to do `[list1, list2, ...] | lists_mergeby('index')`
+ flat_list = []
+ for sublist in terms[:-1]:
+ if not isinstance(sublist, Sequence):
+ msg = ("All arguments before the argument index for community.general.lists_mergeby "
+ "must be lists. %s is %s")
+ raise AnsibleFilterError(msg % (sublist, type(sublist)))
+ if len(sublist) > 0:
+ if all(isinstance(l, Sequence) for l in sublist):
+ for item in sublist:
+ flat_list.append(item)
+ else:
+ flat_list.append(sublist)
+ lists = flat_list
+
+ if not lists:
+ return []
+
+ if len(lists) == 1:
+ return lists[0]
+
+ index = terms[-1]
+
+ if not isinstance(index, string_types):
+ msg = ("First argument after the lists for community.general.lists_mergeby must be string. "
+ "%s is %s")
+ raise AnsibleFilterError(msg % (index, type(index)))
+
+ high_to_low_prio_list_iterator = reversed(lists)
+ result = next(high_to_low_prio_list_iterator)
+ for list in high_to_low_prio_list_iterator:
+ result = list_mergeby(list, result, index, recursive, list_merge)
+
+ return result
+
+
+class FilterModule(object):
+ ''' Ansible list filters '''
+
+ def filters(self):
+ return {
+ 'lists_mergeby': lists_mergeby,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/random_mac.py b/ansible_collections/community/general/plugins/filter/random_mac.py
new file mode 100644
index 000000000..662c62b07
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/random_mac.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: random_mac
+ short_description: Generate a random MAC address
+ description:
+ - Generates random networking interfaces MAC addresses for a given prefix.
+ options:
+ _input:
+ description: A string prefix to use as a basis for the random MAC generated.
+ type: string
+ required: true
+ seed:
+ description:
+ - A randomization seed to initialize the process, used to get repeatable results.
+ - If no seed is provided, a system random source such as C(/dev/urandom) is used.
+ required: false
+ type: string
+'''
+
+EXAMPLES = '''
+- name: Random MAC given a prefix
+ ansible.builtin.debug:
+ msg: "{{ '52:54:00' | community.general.random_mac }}"
+ # => '52:54:00:ef:1c:03'
+
+- name: With a seed
+ ansible.builtin.debug:
+ msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}"
+'''
+
+RETURN = '''
+ _value:
+ description: The generated MAC.
+ type: string
+'''
+
+import re
+from random import Random, SystemRandom
+
+from ansible.errors import AnsibleFilterError
+from ansible.module_utils.six import string_types
+
+
+def random_mac(value, seed=None):
+ ''' takes string prefix, and return it completed with random bytes
+ to get a complete 6 bytes MAC address '''
+
+ if not isinstance(value, string_types):
+ raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' %
+ (type(value), value))
+
+ value = value.lower()
+ mac_items = value.split(':')
+
+ if len(mac_items) > 5:
+ raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated'
+ ' items max' % value)
+
+ err = ""
+ for mac in mac_items:
+ if not mac:
+ err += ",empty item"
+ continue
+ if not re.match('[a-f0-9]{2}', mac):
+ err += ",%s not hexa byte" % mac
+ err = err.strip(',')
+
+ if err:
+ raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err))
+
+ if seed is None:
+ r = SystemRandom()
+ else:
+ r = Random(seed)
+ # Generate random int between x1000000000 and xFFFFFFFFFF
+ v = r.randint(68719476736, 1099511627775)
+ # Select first n chars to complement input prefix
+ remain = 2 * (6 - len(mac_items))
+ rnd = ('%x' % v)[:remain]
+ return value + re.sub(r'(..)', r':\1', rnd)
+
+
+class FilterModule:
+ ''' Ansible jinja2 filters '''
+ def filters(self):
+ return {
+ 'random_mac': random_mac,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/time.py b/ansible_collections/community/general/plugins/filter/time.py
new file mode 100644
index 000000000..25970cd26
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/time.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from ansible.errors import AnsibleFilterError
+
+
+UNIT_FACTORS = {
+ 'ms': [],
+ 's': [1000],
+ 'm': [1000, 60],
+ 'h': [1000, 60, 60],
+ 'd': [1000, 60, 60, 24],
+ 'w': [1000, 60, 60, 24, 7],
+ 'mo': [1000, 60, 60, 24, 30],
+ 'y': [1000, 60, 60, 24, 365],
+}
+
+
+UNIT_TO_SHORT_FORM = {
+ 'millisecond': 'ms',
+ 'msec': 'ms',
+ 'msecond': 'ms',
+ 'sec': 's',
+ 'second': 's',
+ 'hour': 'h',
+ 'min': 'm',
+ 'minute': 'm',
+ 'day': 'd',
+ 'week': 'w',
+ 'month': 'mo',
+ 'year': 'y',
+}
+
+
+def multiply(factors):
+ result = 1
+ for factor in factors:
+ result = result * factor
+ return result
+
+
+def to_time_unit(human_time, unit='ms', **kwargs):
+ ''' Return a time unit from a human readable string '''
+
+ # No need to handle 0
+ if human_time == "0":
+ return 0
+
+ unit_to_short_form = UNIT_TO_SHORT_FORM
+ unit_factors = UNIT_FACTORS
+
+ unit = unit_to_short_form.get(unit.rstrip('s'), unit)
+ if unit not in unit_factors:
+ raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. "
+ "Available units (singular or plural): %s. "
+ "Available short units: %s"
+ % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys())))
+
+ if 'year' in kwargs:
+ unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')]
+ if 'month' in kwargs:
+ unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')]
+
+ if kwargs:
+ raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys()))
+
+ result = 0
+ for h_time_string in human_time.split():
+ res = re.match(r'(-?\d+)(\w+)', h_time_string)
+ if not res:
+ raise AnsibleFilterError(
+ "to_time_unit() can not interpret following string: %s" % human_time)
+
+ h_time_int = int(res.group(1))
+ h_time_unit = res.group(2)
+
+ h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit)
+ if h_time_unit not in unit_factors:
+ raise AnsibleFilterError(
+ "to_time_unit() can not interpret following string: %s" % human_time)
+
+ time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit])
+ result += time_in_milliseconds
+ return round(result / multiply(unit_factors[unit]), 12)
+
+
+def to_milliseconds(human_time, **kwargs):
+ ''' Return milli seconds from a human readable string '''
+ return to_time_unit(human_time, 'ms', **kwargs)
+
+
+def to_seconds(human_time, **kwargs):
+ ''' Return seconds from a human readable string '''
+ return to_time_unit(human_time, 's', **kwargs)
+
+
+def to_minutes(human_time, **kwargs):
+ ''' Return minutes from a human readable string '''
+ return to_time_unit(human_time, 'm', **kwargs)
+
+
+def to_hours(human_time, **kwargs):
+ ''' Return hours from a human readable string '''
+ return to_time_unit(human_time, 'h', **kwargs)
+
+
+def to_days(human_time, **kwargs):
+ ''' Return days from a human readable string '''
+ return to_time_unit(human_time, 'd', **kwargs)
+
+
+def to_weeks(human_time, **kwargs):
+ ''' Return weeks from a human readable string '''
+ return to_time_unit(human_time, 'w', **kwargs)
+
+
+def to_months(human_time, **kwargs):
+ ''' Return months from a human readable string '''
+ return to_time_unit(human_time, 'mo', **kwargs)
+
+
+def to_years(human_time, **kwargs):
+ ''' Return years from a human readable string '''
+ return to_time_unit(human_time, 'y', **kwargs)
+
+
+class FilterModule(object):
+ ''' Ansible time jinja2 filters '''
+
+ def filters(self):
+ filters = {
+ 'to_time_unit': to_time_unit,
+ 'to_milliseconds': to_milliseconds,
+ 'to_seconds': to_seconds,
+ 'to_minutes': to_minutes,
+ 'to_hours': to_hours,
+ 'to_days': to_days,
+ 'to_weeks': to_weeks,
+ 'to_months': to_months,
+ 'to_years': to_years,
+ }
+
+ return filters
diff --git a/ansible_collections/community/general/plugins/filter/to_days.yml b/ansible_collections/community/general/plugins/filter/to_days.yml
new file mode 100644
index 000000000..19bc8faf2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_days.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_days
+ short_description: Converte a duration string to days
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to days.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into days
+ ansible.builtin.debug:
+ msg: "{{ '1y 7m 5d 30h' | community.general.to_days }}"
+
+RETURN:
+ _value:
+ description: Number of days.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_hours.yml b/ansible_collections/community/general/plugins/filter/to_hours.yml
new file mode 100644
index 000000000..83826a590
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_hours.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_hours
+ short_description: Converte a duration string to hours
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to hours.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into hours
+ ansible.builtin.debug:
+ msg: "{{ '7d 30h 20m 10s 123ms' | community.general.to_hours }}"
+
+RETURN:
+ _value:
+ description: Number of hours.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_milliseconds.yml b/ansible_collections/community/general/plugins/filter/to_milliseconds.yml
new file mode 100644
index 000000000..b6bb7e4be
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_milliseconds.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_milliseconds
+ short_description: Converte a duration string to milliseconds
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to milliseconds.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into milliseconds
+ ansible.builtin.debug:
+ msg: "{{ '30h 20m 10s 123ms' | community.general.to_milliseconds }}"
+
+RETURN:
+ _value:
+ description: Number of milliseconds.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_minutes.yml b/ansible_collections/community/general/plugins/filter/to_minutes.yml
new file mode 100644
index 000000000..3b85dadc4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_minutes.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_minutes
+ short_description: Converte a duration string to minutes
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to minutes.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into minutes
+ ansible.builtin.debug:
+ msg: "{{ '30h 20m 10s 123ms' | community.general.to_minutes }}"
+
+RETURN:
+ _value:
+ description: Number of minutes.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_months.yml b/ansible_collections/community/general/plugins/filter/to_months.yml
new file mode 100644
index 000000000..f13cee918
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_months.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_months
+ short_description: Converte a duration string to months
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to months.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into months
+ ansible.builtin.debug:
+ msg: "{{ '1y 7m 5d 30h' | community.general.to_months }}"
+
+RETURN:
+ _value:
+ description: Number of months.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_seconds.yml b/ansible_collections/community/general/plugins/filter/to_seconds.yml
new file mode 100644
index 000000000..d6e6c4e46
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_seconds.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_seconds
+ short_description: Converte a duration string to seconds
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to seconds.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into seconds
+ ansible.builtin.debug:
+ msg: "{{ '30h 20m 10s 123ms' | community.general.to_seconds }}"
+
+RETURN:
+ _value:
+ description: Number of seconds.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_time_unit.yml b/ansible_collections/community/general/plugins/filter/to_time_unit.yml
new file mode 100644
index 000000000..c0149f0ac
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_time_unit.yml
@@ -0,0 +1,89 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_time_unit
+ short_description: Converte a duration string to the given time unit
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to the given time unit.
+ positional: unit
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ unit:
+ description:
+ - Time unit to convert the duration to.
+ default: ms
+ choices:
+ - millisecond
+ - milliseconds
+ - ms
+ - msec
+ - msecs
+ - msecond
+ - mseconds
+ - s
+ - sec
+ - secs
+ - second
+ - seconds
+ - h
+ - hour
+ - hours
+ - hs
+ - m
+ - min
+ - mins
+ - minute
+ - minutes
+ - d
+ - ds
+ - day
+ - days
+ - w
+ - ws
+ - week
+ - weeks
+ - mo
+ - mos
+ - month
+ - months
+ - y
+ - ys
+ - year
+ - years
+ type: string
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into seconds
+ ansible.builtin.debug:
+ msg: "{{ '1053d 17h 53m -10s 391ms' | community.general.to_time_unit('s') }}"
+
+RETURN:
+ _value:
+ description: Number of time units.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_weeks.yml b/ansible_collections/community/general/plugins/filter/to_weeks.yml
new file mode 100644
index 000000000..499c38627
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_weeks.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_weeks
+ short_description: Converte a duration string to weeks
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to weeks.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into weeks
+ ansible.builtin.debug:
+ msg: "{{ '1y 7m 5d 30h' | community.general.to_weeks }}"
+
+RETURN:
+ _value:
+ description: Number of weeks.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/to_years.yml b/ansible_collections/community/general/plugins/filter/to_years.yml
new file mode 100644
index 000000000..1a244a276
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/to_years.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+DOCUMENTATION:
+ name: to_years
+ short_description: Converte a duration string to years
+ version_added: 0.2.0
+ description:
+ - Parse a human readable time duration string and convert to years.
+ options:
+ _input:
+ description:
+ - The time string to convert.
+ - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week,
+ C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec)
+ and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s)
+ can be added to a unit as well, so C(seconds) is the same as C(second).
+ - Valid strings are space separated combinations of an integer with an optional minus sign and a unit.
+ - Examples are C(1h), C(-5m), and C(3h -5m 6s).
+ type: string
+ required: true
+ year:
+ description:
+ - Number of days per year.
+ default: 365
+ type: float
+ month:
+ description:
+ - Number of days per month.
+ default: 30
+ type: float
+ author:
+ - René Moser (@resmo)
+
+EXAMPLES: |
+ - name: Convert a duration into years
+ ansible.builtin.debug:
+ msg: "{{ '1053d 30h' | community.general.to_years }}"
+
+RETURN:
+ _value:
+ description: Number of years.
+ type: float
diff --git a/ansible_collections/community/general/plugins/filter/unicode_normalize.py b/ansible_collections/community/general/plugins/filter/unicode_normalize.py
new file mode 100644
index 000000000..dfbf20c57
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/unicode_normalize.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: unicode_normalize
+ short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms
+ version_added: 3.7.0
+ author: Andrew Pantuso (@Ajpantuso)
+ description:
+ - Normalizes unicode strings to facilitate comparison of characters with normalized forms.
+ positional: form
+ options:
+ _input:
+ description: A unicode string.
+ type: string
+ required: true
+ form:
+ description:
+ - The normal form to use.
+ - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details.
+ type: string
+ default: NFC
+ choices:
+ - NFC
+ - NFD
+ - NFKC
+ - NFKD
+'''
+
+EXAMPLES = '''
+- name: Normalize unicode string
+ ansible.builtin.set_fact:
+ dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}"
+ # The resulting string has length 2: one letter is 'a', the other
+ # the diacritic combiner.
+'''
+
+RETURN = '''
+ _value:
+ description: The normalized unicode string of the specified normal form.
+ type: string
+'''
+
+from unicodedata import normalize
+
+from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError
+from ansible.module_utils.six import text_type
+
+
+def unicode_normalize(data, form='NFC'):
+ """Applies normalization to 'unicode' strings.
+
+ Args:
+ data: A unicode string piped into the Jinja filter
+ form: One of ('NFC', 'NFD', 'NFKC', 'NFKD').
+ See https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize for more information.
+
+ Returns:
+ A normalized unicode string of the specified 'form'.
+ """
+
+ if not isinstance(data, text_type):
+ raise AnsibleFilterTypeError("%s is not a valid input type" % type(data))
+
+ if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'):
+ raise AnsibleFilterError("%s is not a valid form" % form)
+
+ return normalize(form, data)
+
+
+class FilterModule(object):
+ def filters(self):
+ return {
+ 'unicode_normalize': unicode_normalize,
+ }
diff --git a/ansible_collections/community/general/plugins/filter/version_sort.py b/ansible_collections/community/general/plugins/filter/version_sort.py
new file mode 100644
index 000000000..09eedbf56
--- /dev/null
+++ b/ansible_collections/community/general/plugins/filter/version_sort.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2021 Eric Lavarde <elavarde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: version_sort
+ short_description: Sort a list according to version order instead of pure alphabetical one
+ version_added: 2.2.0
+ author: Eric L. (@ericzolf)
+ description:
+ - Sort a list according to version order instead of pure alphabetical one.
+ options:
+ _input:
+ description: A list of strings to sort.
+ type: list
+ elements: string
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Convert list of tuples into dictionary
+ ansible.builtin.set_fact:
+ dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}"
+ # Result is ['2.1', '2.9', '2.10']
+'''
+
+RETURN = '''
+ _value:
+ description: The list of strings sorted by version.
+ type: list
+ elements: string
+'''
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+def version_sort(value, reverse=False):
+ '''Sort a list according to loose versions so that e.g. 2.9 is smaller than 2.10'''
+ return sorted(value, key=LooseVersion, reverse=reverse)
+
+
+class FilterModule(object):
+ ''' Version sort filter '''
+
+ def filters(self):
+ return {
+ 'version_sort': version_sort
+ }
diff --git a/ansible_collections/community/general/plugins/inventory/cobbler.py b/ansible_collections/community/general/plugins/inventory/cobbler.py
new file mode 100644
index 000000000..936a409ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/cobbler.py
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2020 Orion Poplawski <orion@nwra.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Orion Poplawski (@opoplawski)
+ name: cobbler
+ short_description: Cobbler inventory source
+ version_added: 1.0.0
+ description:
+ - Get inventory hosts from the cobbler service.
+ - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry."
+ extends_documentation_fragment:
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own.
+ required: true
+ choices: [ 'cobbler', 'community.general.cobbler' ]
+ url:
+ description: URL to cobbler.
+ default: 'http://cobbler/cobbler_api'
+ env:
+ - name: COBBLER_SERVER
+ user:
+ description: Cobbler authentication user.
+ required: false
+ env:
+ - name: COBBLER_USER
+ password:
+ description: Cobbler authentication password
+ required: false
+ env:
+ - name: COBBLER_PASSWORD
+ cache_fallback:
+ description: Fallback to cached results if connection to cobbler fails
+ type: boolean
+ default: false
+ exclude_profiles:
+ description:
+ - Profiles to exclude from inventory.
+ - Ignored if I(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ include_profiles:
+ description:
+ - Profiles to include from inventory.
+ - If specified, all other profiles will be excluded.
+ - I(exclude_profiles) is ignored if I(include_profiles) is specified.
+ type: list
+ default: []
+ elements: str
+ version_added: 4.4.0
+ group_by:
+ description: Keys to group hosts by
+ type: list
+ elements: string
+ default: [ 'mgmt_classes', 'owners', 'status' ]
+ group:
+ description: Group to place all hosts into
+ default: cobbler
+ group_prefix:
+ description: Prefix to apply to cobbler groups
+ default: cobbler_
+ want_facts:
+ description: Toggle, if C(true) the plugin will retrieve host facts from the server
+ type: boolean
+ default: true
+'''
+
+EXAMPLES = '''
+# my.cobbler.yml
+plugin: community.general.cobbler
+url: http://cobbler/cobbler_api
+user: ansible-tester
+password: secure
+'''
+
+import socket
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six import iteritems
+from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name
+
+# xmlrpc
+try:
+ import xmlrpclib as xmlrpc_client
+ HAS_XMLRPC_CLIENT = True
+except ImportError:
+ try:
+ import xmlrpc.client as xmlrpc_client
+ HAS_XMLRPC_CLIENT = True
+ except ImportError:
+ HAS_XMLRPC_CLIENT = False
+
+
+class InventoryModule(BaseInventoryPlugin, Cacheable):
+ ''' Host inventory parser for ansible using cobbler as source. '''
+
+ NAME = 'community.general.cobbler'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+ self.cache_key = None
+ self.connection = None
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('cobbler.yaml', 'cobbler.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"')
+ return valid
+
+ def _get_connection(self):
+ if not HAS_XMLRPC_CLIENT:
+ raise AnsibleError('Could not import xmlrpc client library')
+
+ if self.connection is None:
+ self.display.vvvv('Connecting to %s\n' % self.cobbler_url)
+ self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True)
+ self.token = None
+ if self.get_option('user') is not None:
+ self.token = self.connection.login(self.get_option('user'), self.get_option('password'))
+ return self.connection
+
+ def _init_cache(self):
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {}
+
+ def _reload_cache(self):
+ if self.get_option('cache_fallback'):
+ self.display.vvv('Cannot connect to server, loading cache\n')
+ self._options['cache_timeout'] = 0
+ self.load_cache_plugin()
+ self._cache.get(self.cache_key, {})
+
+ def _get_profiles(self):
+ if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}):
+ c = self._get_connection()
+ try:
+ if self.token is not None:
+ data = c.get_profiles(self.token)
+ else:
+ data = c.get_profiles()
+ except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
+ self._reload_cache()
+ else:
+ self._init_cache()
+ self._cache[self.cache_key]['profiles'] = data
+
+ return self._cache[self.cache_key]['profiles']
+
+ def _get_systems(self):
+ if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}):
+ c = self._get_connection()
+ try:
+ if self.token is not None:
+ data = c.get_systems(self.token)
+ else:
+ data = c.get_systems()
+ except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError):
+ self._reload_cache()
+ else:
+ self._init_cache()
+ self._cache[self.cache_key]['systems'] = data
+
+ return self._cache[self.cache_key]['systems']
+
+ def _add_safe_group_name(self, group, child=None):
+ group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", ""))))
+ if child is not None:
+ self.inventory.add_child(group_name, child)
+ return group_name
+
+ def _exclude_profile(self, profile):
+ if self.include_profiles:
+ return profile not in self.include_profiles
+ else:
+ return profile in self.exclude_profiles
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # get connection host
+ self.cobbler_url = self.get_option('url')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ self.exclude_profiles = self.get_option('exclude_profiles')
+ self.include_profiles = self.get_option('include_profiles')
+ self.group_by = self.get_option('group_by')
+
+ for profile in self._get_profiles():
+ if profile['parent']:
+ self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent']))
+ if not self._exclude_profile(profile['parent']):
+ parent_group_name = self._add_safe_group_name(profile['parent'])
+ self.display.vvvv('Added profile parent group %s\n' % parent_group_name)
+ if not self._exclude_profile(profile['name']):
+ group_name = self._add_safe_group_name(profile['name'])
+ self.display.vvvv('Added profile group %s\n' % group_name)
+ self.inventory.add_child(parent_group_name, group_name)
+ else:
+ self.display.vvvv('Processing profile %s without parent\n' % profile['name'])
+ # Create a hierarchy of profile names
+ profile_elements = profile['name'].split('-')
+ i = 0
+ while i < len(profile_elements) - 1:
+ profile_group = '-'.join(profile_elements[0:i + 1])
+ profile_group_child = '-'.join(profile_elements[0:i + 2])
+ if self._exclude_profile(profile_group):
+ self.display.vvvv('Excluding profile %s\n' % profile_group)
+ break
+ group_name = self._add_safe_group_name(profile_group)
+ self.display.vvvv('Added profile group %s\n' % group_name)
+ child_group_name = self._add_safe_group_name(profile_group_child)
+ self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name))
+ self.inventory.add_child(group_name, child_group_name)
+ i = i + 1
+
+ # Add default group for this inventory if specified
+ self.group = to_safe_group_name(self.get_option('group'))
+ if self.group is not None and self.group != '':
+ self.inventory.add_group(self.group)
+ self.display.vvvv('Added site group %s\n' % self.group)
+
+ for host in self._get_systems():
+ # Get the FQDN for the host and add it to the right groups
+ hostname = host['hostname'] # None
+ interfaces = host['interfaces']
+
+ if self._exclude_profile(host['profile']):
+ self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile']))
+ continue
+
+ # hostname is often empty for non-static IP hosts
+ if hostname == '':
+ for (iname, ivalue) in iteritems(interfaces):
+ if ivalue['management'] or not ivalue['static']:
+ this_dns_name = ivalue.get('dns_name', None)
+ if this_dns_name is not None and this_dns_name != "":
+ hostname = this_dns_name
+ self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname))
+
+ if hostname == '':
+ self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name'])
+ continue
+
+ self.inventory.add_host(hostname)
+ self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname))
+
+ # Add host to profile group
+ group_name = self._add_safe_group_name(host['profile'], child=hostname)
+ self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name))
+
+ # Add host to groups specified by group_by fields
+ for group_by in self.group_by:
+ if host[group_by] == '<<inherit>>':
+ groups = []
+ else:
+ groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by]
+ for group in groups:
+ group_name = self._add_safe_group_name(group, child=hostname)
+ self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name))
+
+ # Add to group for this inventory
+ if self.group is not None:
+ self.inventory.add_child(self.group, hostname)
+
+ # Add host variables
+ if self.get_option('want_facts'):
+ try:
+ self.inventory.set_variable(hostname, 'cobbler', host)
+ except ValueError as e:
+ self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e)))
diff --git a/ansible_collections/community/general/plugins/inventory/gitlab_runners.py b/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
new file mode 100644
index 000000000..d68b8d4e2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/gitlab_runners.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: gitlab_runners
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for GitLab runners.
+ requirements:
+ - python >= 2.7
+ - python-gitlab > 1.8.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the GitLab API.
+ - Uses a YAML configuration file gitlab_runners.[yml|yaml].
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices:
+ - gitlab_runners
+ - community.general.gitlab_runners
+ server_url:
+ description: The URL of the GitLab server, with protocol (i.e. http or https).
+ env:
+ - name: GITLAB_SERVER_URL
+ version_added: 1.0.0
+ type: str
+ required: true
+ api_token:
+ description: GitLab token for logging in.
+ env:
+ - name: GITLAB_API_TOKEN
+ version_added: 1.0.0
+ type: str
+ aliases:
+ - private_token
+ - access_token
+ filter:
+ description: filter runners from GitLab API
+ env:
+ - name: GITLAB_FILTER
+ version_added: 1.0.0
+ type: str
+ choices: ['active', 'paused', 'online', 'specific', 'shared']
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+# gitlab_runners.yml
+plugin: community.general.gitlab_runners
+host: https://gitlab.com
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.general.gitlab_runners
+host: https://gitlab.com
+strict: false
+keyed_groups:
+ # add e.g. amd64 hosts to an arch_amd64 group
+ - prefix: arch
+ key: 'architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'platform'
+ # create a group per runner tag
+ # e.g. a runner tagged w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'tag_list'
+ prefix: tag
+'''
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+
+try:
+ import gitlab
+ HAS_GITLAB = True
+except ImportError:
+ HAS_GITLAB = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using GitLab API as source. '''
+
+ NAME = 'community.general.gitlab_runners'
+
+ def _populate(self):
+ gl = gitlab.Gitlab(self.get_option('server_url'), private_token=self.get_option('api_token'))
+ self.inventory.add_group('gitlab_runners')
+ try:
+ if self.get_option('filter'):
+ runners = gl.runners.all(scope=self.get_option('filter'))
+ else:
+ runners = gl.runners.all()
+ for runner in runners:
+ host = str(runner['id'])
+ ip_address = runner['ip_address']
+ host_attrs = vars(gl.runners.get(runner['id']))['_attrs']
+ self.inventory.add_host(host, group='gitlab_runners')
+ self.inventory.set_variable(host, 'ansible_host', ip_address)
+ if self.get_option('verbose_output', True):
+ self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host_attrs, host, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host_attrs, host, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict)
+ except Exception as e:
+ raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(("gitlab_runners.yaml", "gitlab_runners.yml")))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_GITLAB:
+ raise AnsibleError('The GitLab runners dynamic inventory plugin requires python-gitlab: https://python-gitlab.readthedocs.io/en/stable/')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/icinga2.py b/ansible_collections/community/general/plugins/inventory/icinga2.py
new file mode 100644
index 000000000..70e0f5733
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/icinga2.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
+# Copyright (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: icinga2
+ short_description: Icinga2 inventory source
+ version_added: 3.7.0
+ author:
+ - Cliff Hults (@BongoEADGC6) <cliff.hults@gmail.com>
+ description:
+ - Get inventory hosts from the Icinga2 API.
+ - "Uses a configuration file as an inventory source, it must end in
+ C(.icinga2.yml) or C(.icinga2.yaml)."
+ extends_documentation_fragment:
+ - constructed
+ options:
+ strict:
+ version_added: 4.4.0
+ compose:
+ version_added: 4.4.0
+ groups:
+ version_added: 4.4.0
+ keyed_groups:
+ version_added: 4.4.0
+ plugin:
+ description: Name of the plugin.
+ required: true
+ type: string
+ choices: ['community.general.icinga2']
+ url:
+ description: Root URL of Icinga2 API.
+ type: string
+ required: true
+ user:
+ description: Username to query the API.
+ type: string
+ required: true
+ password:
+ description: Password to query the API.
+ type: string
+ required: true
+ host_filter:
+ description:
+ - An Icinga2 API valid host filter. Leave blank for no filtering
+ type: string
+ required: false
+ validate_certs:
+ description: Enables or disables SSL certificate verification.
+ type: boolean
+ default: true
+ inventory_attr:
+ description:
+ - Allows the override of the inventory name based on different attributes.
+ - This allows for changing the way limits are used.
+ - The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead.
+ type: string
+ default: address
+ choices: ['name', 'display_name', 'address']
+ version_added: 4.2.0
+'''
+
+EXAMPLES = r'''
+# my.icinga2.yml
+plugin: community.general.icinga2
+url: http://localhost:5665
+user: ansible
+password: secure
+host_filter: \"linux-servers\" in host.groups
+validate_certs: false
+inventory_attr: name
+groups:
+ # simple name matching
+ webservers: inventory_hostname.startswith('web')
+
+ # using icinga2 template
+ databaseservers: "'db-template' in (icinga2_attributes.templates|list)"
+
+compose:
+ # set all icinga2 attributes to a host variable 'icinga2_attrs'
+ icinga2_attrs: icinga2_attributes
+
+ # set 'ansible_user' and 'ansible_port' from icinga2 host vars
+ ansible_user: icinga2_attributes.vars.ansible_user
+ ansible_port: icinga2_attributes.vars.ansible_port | default(22)
+'''
+
+import json
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Icinga2 as source. '''
+
+ NAME = 'community.general.icinga2'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.icinga2_url = None
+ self.icinga2_user = None
+ self.icinga2_password = None
+ self.ssl_verify = None
+ self.host_filter = None
+ self.inventory_attr = None
+
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('icinga2.yaml', 'icinga2.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "icinga2.yaml" nor "icinga2.yml"')
+ return valid
+
+ def _api_connect(self):
+ self.headers = {
+ 'User-Agent': "ansible-icinga2-inv",
+ 'Accept': "application/json",
+ }
+ api_status_url = self.icinga2_url + "/status"
+ request_args = {
+ 'headers': self.headers,
+ 'url_username': self.icinga2_user,
+ 'url_password': self.icinga2_password,
+ 'validate_certs': self.ssl_verify
+ }
+ open_url(api_status_url, **request_args)
+
+ def _post_request(self, request_url, data=None):
+ self.display.vvv("Requested URL: %s" % request_url)
+ request_args = {
+ 'headers': self.headers,
+ 'url_username': self.icinga2_user,
+ 'url_password': self.icinga2_password,
+ 'validate_certs': self.ssl_verify
+ }
+ if data is not None:
+ request_args['data'] = json.dumps(data)
+ self.display.vvv("Request Args: %s" % request_args)
+ try:
+ response = open_url(request_url, **request_args)
+ except HTTPError as e:
+ try:
+ error_body = json.loads(e.read().decode())
+ self.display.vvv("Error returned: {0}".format(error_body))
+ except Exception:
+ error_body = {"status": None}
+ if e.code == 404 and error_body.get('status') == "No objects found.":
+ raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid")
+ raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body))
+
+ response_body = response.read()
+ json_data = json.loads(response_body.decode('utf-8'))
+ self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True))
+ if 200 <= response.status <= 299:
+ return json_data
+ if response.status == 404 and json_data['status'] == "No objects found.":
+ raise AnsibleParserError(
+ "API returned no data -- Response: %s - %s"
+ % (response.status, json_data['status']))
+ if response.status == 401:
+ raise AnsibleParserError(
+ "API was unable to complete query -- Response: %s - %s"
+ % (response.status, json_data['status']))
+ if response.status == 500:
+ raise AnsibleParserError(
+ "API Response - %s - %s"
+ % (json_data['status'], json_data['errors']))
+ raise AnsibleParserError(
+ "Unexpected data returned - %s - %s"
+ % (json_data['status'], json_data['errors']))
+
+ def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None):
+ query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url)
+ self.headers['X-HTTP-Method-Override'] = 'GET'
+ data_dict = dict()
+ if hosts:
+ data_dict['hosts'] = hosts
+ if attrs is not None:
+ data_dict['attrs'] = attrs
+ if joins is not None:
+ data_dict['joins'] = joins
+ if host_filter is not None:
+ data_dict['filter'] = host_filter.replace("\\\"", "\"")
+ self.display.vvv(host_filter)
+ host_dict = self._post_request(query_hosts_url, data_dict)
+ return host_dict['results']
+
+ def get_inventory_from_icinga(self):
+ """Query for all hosts """
+ self.display.vvv("Querying Icinga2 for inventory")
+ query_args = {
+ "attrs": ["address", "address6", "name", "display_name", "state_type", "state", "templates", "groups", "vars", "zone"],
+ }
+ if self.host_filter is not None:
+ query_args['host_filter'] = self.host_filter
+ # Icinga2 API Call
+ results_json = self._query_hosts(**query_args)
+ # Manipulate returned API data to Ansible inventory spec
+ ansible_inv = self._convert_inv(results_json)
+ return ansible_inv
+
+ def _apply_constructable(self, name, variables):
+ strict = self.get_option('strict')
+ self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
+ self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
+
+ def _populate(self):
+ groups = self._to_json(self.get_inventory_from_icinga())
+ return groups
+
+ def _to_json(self, in_dict):
+ """Convert dictionary to JSON"""
+ return json.dumps(in_dict, sort_keys=True, indent=2)
+
+ def _convert_inv(self, json_data):
+ """Convert Icinga2 API data to JSON format for Ansible"""
+ groups_dict = {"_meta": {"hostvars": {}}}
+ for entry in json_data:
+ host_attrs = entry['attrs']
+ if self.inventory_attr == "name":
+ host_name = entry.get('name')
+ if self.inventory_attr == "address":
+ # When looking for address for inventory, if missing fallback to object name
+ if host_attrs.get('address', '') != '':
+ host_name = host_attrs.get('address')
+ else:
+ host_name = entry.get('name')
+ if self.inventory_attr == "display_name":
+ host_name = host_attrs.get('display_name')
+ if host_attrs['state'] == 0:
+ host_attrs['state'] = 'on'
+ else:
+ host_attrs['state'] = 'off'
+ host_groups = host_attrs.get('groups')
+ self.inventory.add_host(host_name)
+ for group in host_groups:
+ if group not in self.inventory.groups.keys():
+ self.inventory.add_group(group)
+ self.inventory.add_child(group, host_name)
+ # If the address attribute is populated, override ansible_host with the value
+ if host_attrs.get('address') != '':
+ self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address'))
+ self.inventory.set_variable(host_name, 'hostname', entry.get('name'))
+ self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name'))
+ self.inventory.set_variable(host_name, 'state',
+ host_attrs['state'])
+ self.inventory.set_variable(host_name, 'state_type',
+ host_attrs['state_type'])
+ # Adds all attributes to a variable 'icinga2_attributes'
+ construct_vars = dict(self.inventory.get_host(host_name).get_vars())
+ construct_vars['icinga2_attributes'] = host_attrs
+ self._apply_constructable(host_name, construct_vars)
+ return groups_dict
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # Store the options from the YAML file
+ self.icinga2_url = self.get_option('url').rstrip('/') + '/v1'
+ self.icinga2_user = self.get_option('user')
+ self.icinga2_password = self.get_option('password')
+ self.ssl_verify = self.get_option('validate_certs')
+ self.host_filter = self.get_option('host_filter')
+ self.inventory_attr = self.get_option('inventory_attr')
+ # Not currently enabled
+ # self.cache_key = self.get_cache_key(path)
+ # self.use_cache = cache and self.get_option('cache')
+
+ # Test connection to API
+ self._api_connect()
+
+ # Call our internal helper to populate the dynamic inventory
+ self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/linode.py b/ansible_collections/community/general/plugins/inventory/linode.py
new file mode 100644
index 000000000..b28cfa27b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/linode.py
@@ -0,0 +1,313 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: linode
+ author:
+ - Luke Murphy (@decentral1se)
+ short_description: Ansible dynamic inventory plugin for Linode.
+ requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+ description:
+ - Reads inventories from the Linode API v4.
+ - Uses a YAML configuration file that ends with linode.(yml|yaml).
+ - Linode labels are used by default as the hostnames.
+ - The default inventory groups are built from groups (deprecated by
+ Linode) and not tags.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ options:
+ cache:
+ version_added: 4.5.0
+ cache_plugin:
+ version_added: 4.5.0
+ cache_timeout:
+ version_added: 4.5.0
+ cache_connection:
+ version_added: 4.5.0
+ cache_prefix:
+ version_added: 4.5.0
+ plugin:
+ description: Marks this as an instance of the 'linode' plugin.
+ required: true
+ choices: ['linode', 'community.general.linode']
+ ip_style:
+ description: Populate hostvars with all information available from the Linode APIv4.
+ type: string
+ default: plain
+ choices:
+ - plain
+ - api
+ version_added: 3.6.0
+ access_token:
+ description: The Linode account personal access token.
+ required: true
+ env:
+ - name: LINODE_ACCESS_TOKEN
+ regions:
+ description: Populate inventory with instances in this region.
+ default: []
+ type: list
+ elements: string
+ tags:
+ description: Populate inventory only with instances which have at least one of the tags listed here.
+ default: []
+ type: list
+ elements: string
+ version_added: 2.0.0
+ types:
+ description: Populate inventory with instances with this type.
+ default: []
+ type: list
+ elements: string
+ strict:
+ version_added: 2.0.0
+ compose:
+ version_added: 2.0.0
+ groups:
+ version_added: 2.0.0
+ keyed_groups:
+ version_added: 2.0.0
+'''
+
+EXAMPLES = r'''
+# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
+plugin: community.general.linode
+
+# You can use Jinja to template the access token.
+plugin: community.general.linode
+access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}"
+# For older Ansible versions, you need to write this as:
+# access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}"
+
+# Example with regions, types, groups and access token
+plugin: community.general.linode
+access_token: foobar
+regions:
+ - eu-west
+types:
+ - g5-standard-2
+
+# Example with keyed_groups, groups, and compose
+plugin: community.general.linode
+access_token: foobar
+keyed_groups:
+ - key: tags
+ separator: ''
+ - key: region
+ prefix: region
+groups:
+ webservers: "'web' in (tags|list)"
+ mailservers: "'mail' in (tags|list)"
+compose:
+ # By default, Ansible tries to connect to the label of the instance.
+ # Since that might not be a valid name to connect to, you can
+ # replace it with the first IPv4 address of the linode as follows:
+ ansible_ssh_host: ipv4[0]
+ ansible_port: 2222
+
+# Example where control traffic limited to internal network
+plugin: community.general.linode
+access_token: foobar
+ip_style: api
+compose:
+ ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first"
+'''
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+
+
+try:
+ from linode_api4 import LinodeClient
+ from linode_api4.objects.linode import Instance
+ from linode_api4.errors import ApiError as LinodeApiError
+ HAS_LINODE = True
+except ImportError:
+ HAS_LINODE = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.linode'
+
+ def _build_client(self, loader):
+ """Build the Linode client."""
+
+ access_token = self.get_option('access_token')
+ if self.templar.is_template(access_token):
+ access_token = self.templar.template(variable=access_token, disable_lookups=False)
+
+ if access_token is None:
+ raise AnsibleError((
+ 'Could not retrieve Linode access token '
+ 'from plugin configuration sources'
+ ))
+
+ self.client = LinodeClient(access_token)
+
+ def _get_instances_inventory(self):
+ """Retrieve Linode instance information from cloud inventory."""
+ try:
+ self.instances = self.client.linode.instances()
+ except LinodeApiError as exception:
+ raise AnsibleError('Linode client raised: %s' % exception)
+
+ def _add_groups(self):
+ """Add Linode instance groups to the dynamic inventory."""
+ self.linode_groups = set(
+ filter(None, [
+ instance.group
+ for instance
+ in self.instances
+ ])
+ )
+
+ for linode_group in self.linode_groups:
+ self.inventory.add_group(linode_group)
+
+ def _filter_by_config(self):
+ """Filter instances by user specified configuration."""
+ regions = self.get_option('regions')
+ if regions:
+ self.instances = [
+ instance for instance in self.instances
+ if instance.region.id in regions
+ ]
+
+ types = self.get_option('types')
+ if types:
+ self.instances = [
+ instance for instance in self.instances
+ if instance.type.id in types
+ ]
+
+ tags = self.get_option('tags')
+ if tags:
+ self.instances = [
+ instance for instance in self.instances
+ if any(tag in instance.tags for tag in tags)
+ ]
+
+ def _add_instances_to_groups(self):
+ """Add instance names to their dynamic inventory groups."""
+ for instance in self.instances:
+ self.inventory.add_host(instance.label, group=instance.group)
+
+ def _add_hostvars_for_instances(self):
+ """Add hostvars for instances in the dynamic inventory."""
+ ip_style = self.get_option('ip_style')
+ for instance in self.instances:
+ hostvars = instance._raw_json
+ for hostvar_key in hostvars:
+ if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']:
+ continue
+ self.inventory.set_variable(
+ instance.label,
+ hostvar_key,
+ hostvars[hostvar_key]
+ )
+ if ip_style == 'api':
+ ips = instance.ips.ipv4.public + instance.ips.ipv4.private
+ ips += [instance.ips.ipv6.slaac, instance.ips.ipv6.link_local]
+ ips += instance.ips.ipv6.pools
+
+ for ip_type in set(ip.type for ip in ips):
+ self.inventory.set_variable(
+ instance.label,
+ ip_type,
+ self._ip_data([ip for ip in ips if ip.type == ip_type])
+ )
+
+ def _ip_data(self, ip_list):
+ data = []
+ for ip in list(ip_list):
+ data.append(
+ {
+ 'address': ip.address,
+ 'subnet_mask': ip.subnet_mask,
+ 'gateway': ip.gateway,
+ 'public': ip.public,
+ 'prefix': ip.prefix,
+ 'rdns': ip.rdns,
+ 'type': ip.type
+ }
+ )
+ return data
+
+ def _cacheable_inventory(self):
+ return [i._raw_json for i in self.instances]
+
+ def populate(self):
+ strict = self.get_option('strict')
+
+ self._filter_by_config()
+
+ self._add_groups()
+ self._add_instances_to_groups()
+ self._add_hostvars_for_instances()
+ for instance in self.instances:
+ variables = self.inventory.get_host(instance.label).get_vars()
+ self._add_host_to_composed_groups(
+ self.get_option('groups'),
+ variables,
+ instance.label,
+ strict=strict)
+ self._add_host_to_keyed_groups(
+ self.get_option('keyed_groups'),
+ variables,
+ instance.label,
+ strict=strict)
+ self._set_composite_vars(
+ self.get_option('compose'),
+ variables,
+ instance.label,
+ strict=strict)
+
+ def verify_file(self, path):
+ """Verify the Linode configuration file."""
+ if super(InventoryModule, self).verify_file(path):
+ endings = ('linode.yaml', 'linode.yml')
+ if any((path.endswith(ending) for ending in endings)):
+ return True
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+ """Dynamically parse Linode the cloud inventory."""
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self.instances = None
+
+ if not HAS_LINODE:
+ raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
+
+ self._read_config_data(path)
+
+ cache_key = self.get_cache_key(path)
+
+ if cache:
+ cache = self.get_option('cache')
+
+ update_cache = False
+ if cache:
+ try:
+ self.instances = [Instance(None, i["id"], i) for i in self._cache[cache_key]]
+ except KeyError:
+ update_cache = True
+
+ # Check for None rather than False in order to allow
+ # for empty sets of cached instances
+ if self.instances is None:
+ self._build_client(loader)
+ self._get_instances_inventory()
+
+ if update_cache:
+ self._cache[cache_key] = self._cacheable_inventory()
+
+ self.populate()
diff --git a/ansible_collections/community/general/plugins/inventory/lxd.py b/ansible_collections/community/general/plugins/inventory/lxd.py
new file mode 100644
index 000000000..bd0a6ce00
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/lxd.py
@@ -0,0 +1,1099 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Frank Dornheim <dornheim@posteo.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: lxd
+ short_description: Returns Ansible inventory from lxd host
+ description:
+ - Get inventory from the lxd.
+ - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'.
+ version_added: "3.0.0"
+ author: "Frank Dornheim (@conloos)"
+ requirements:
+ - ipaddress
+ - lxd >= 4.0
+ options:
+ plugin:
+ description: Token that ensures this is a source file for the 'lxd' plugin.
+ required: true
+ choices: [ 'community.general.lxd' ]
+ url:
+ description:
+ - The unix domain socket path or the https URL for the lxd server.
+ - Sockets in filesystem have to start with C(unix:).
+ - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket).
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ aliases: [ key_file ]
+ default: $HOME/.config/lxc/client.key
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ aliases: [ cert_file ]
+ default: $HOME/.config/lxc/client.crt
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the lxd server before
+ running this module using the following command
+ C(lxc config set core.trust_password <some random password>)
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).
+ - If I(trust_password) is set, this module send a request for authentication before sending any requests.
+ type: str
+ state:
+ description: Filter the instance according to the current status.
+ type: str
+ default: none
+ choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ]
+ project:
+ description: Filter the instance according to the given project.
+ type: str
+ default: default
+ version_added: 6.2.0
+ type_filter:
+ description:
+ - Filter the instances by type C(virtual-machine), C(container) or C(both).
+ - The first version of the inventory only supported containers.
+ type: str
+ default: container
+ choices: [ 'virtual-machine', 'container', 'both' ]
+ version_added: 4.2.0
+ prefered_instance_network_interface:
+ description:
+ - If an instance has multiple network interfaces, select which one is the prefered as pattern.
+ - Combined with the first number that can be found e.g. 'eth' + 0.
+ - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0.
+ The old name still works as an alias.
+ type: str
+ default: eth
+ aliases:
+ - prefered_container_network_interface
+ prefered_instance_network_family:
+ description:
+ - If an instance has multiple network interfaces, which one is the prefered by family.
+ - Specify C(inet) for IPv4 and C(inet6) for IPv6.
+ type: str
+ default: inet
+ choices: [ 'inet', 'inet6' ]
+ groupby:
+ description:
+ - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid).
+ - See example for syntax.
+ type: dict
+'''
+
+EXAMPLES = '''
+# simple lxd.yml
+plugin: community.general.lxd
+url: unix:/var/snap/lxd/common/lxd/unix.socket
+
+# simple lxd.yml including filter
+plugin: community.general.lxd
+url: unix:/var/snap/lxd/common/lxd/unix.socket
+state: RUNNING
+
+# simple lxd.yml including virtual machines and containers
+plugin: community.general.lxd
+url: unix:/var/snap/lxd/common/lxd/unix.socket
+type_filter: both
+
+# grouping lxd.yml
+groupby:
+ locationBerlin:
+ type: location
+ attribute: Berlin
+ netRangeIPv4:
+ type: network_range
+ attribute: 10.98.143.0/24
+ netRangeIPv6:
+ type: network_range
+ attribute: fd42:bd00:7b11:2167:216:3eff::/24
+ osUbuntu:
+ type: os
+ attribute: ubuntu
+ testpattern:
+ type: pattern
+ attribute: test
+ profileDefault:
+ type: profile
+ attribute: default
+ profileX11:
+ type: profile
+ attribute: x11
+ releaseFocal:
+ type: release
+ attribute: focal
+ releaseBionic:
+ type: release
+ attribute: bionic
+ typeVM:
+ type: type
+ attribute: virtual-machine
+ typeContainer:
+ type: type
+ attribute: container
+ vlan666:
+ type: vlanid
+ attribute: 666
+ projectInternals:
+ type: project
+ attribute: internals
+'''
+
+import json
+import re
+import time
+import os
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible.module_utils.six import raise_from
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+
+try:
+ import ipaddress
+except ImportError as exc:
+ IPADDRESS_IMPORT_ERROR = exc
+else:
+ IPADDRESS_IMPORT_ERROR = None
+
+
+class InventoryModule(BaseInventoryPlugin):
+ DEBUG = 4
+ NAME = 'community.general.lxd'
+ SNAP_SOCKET_URL = 'unix:/var/snap/lxd/common/lxd/unix.socket'
+ SOCKET_URL = 'unix:/var/lib/lxd/unix.socket'
+
+ @staticmethod
+ def load_json_data(path):
+ """Load json data
+
+ Load json data from file
+
+ Args:
+ list(path): Path elements
+ str(file_name): Filename of data
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(json_data): json data"""
+ try:
+ with open(path, 'r') as json_file:
+ return json.load(json_file)
+ except (IOError, json.decoder.JSONDecodeError) as err:
+ raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err)))
+
+ def save_json_data(self, path, file_name=None):
+ """save data as json
+
+ Save data as json file
+
+ Args:
+ list(path): Path elements
+ str(file_name): Filename of data
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+
+ if file_name:
+ path.append(file_name)
+ else:
+ prefix = 'lxd_data-'
+ time_stamp = time.strftime('%Y%m%d-%H%M%S')
+ suffix = '.atd'
+ path.append(prefix + time_stamp + suffix)
+
+ try:
+ cwd = os.path.abspath(os.path.dirname(__file__))
+ with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file:
+ json.dump(self.data, json_file)
+ except IOError as err:
+ raise AnsibleParserError('Could not save data: {0}'.format(to_native(err)))
+
+ def verify_file(self, path):
+ """Check the config
+
+ Return true/false if the config-file is valid for this plugin
+
+ Args:
+ str(path): path to the config
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ bool(valid): is valid"""
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('lxd.yaml', 'lxd.yml')):
+ valid = True
+ else:
+ self.display.vvv('Inventory source not ending in "lxd.yaml" or "lxd.yml"')
+ return valid
+
+ @staticmethod
+ def validate_url(url):
+ """validate url
+
+ check whether the url is correctly formatted
+
+ Args:
+ url
+ Kwargs:
+ None
+ Raises:
+ AnsibleError
+ Returns:
+ bool"""
+ if not isinstance(url, str):
+ return False
+ if not url.startswith(('unix:', 'https:')):
+ raise AnsibleError('URL is malformed: {0}'.format(to_native(url)))
+ return True
+
+ def _connect_to_socket(self):
+ """connect to lxd socket
+
+ Connect to lxd socket by provided url or defaults
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ AnsibleError
+ Returns:
+ None"""
+ error_storage = {}
+ url_list = [self.get_option('url'), self.SNAP_SOCKET_URL, self.SOCKET_URL]
+ urls = (url for url in url_list if self.validate_url(url))
+ for url in urls:
+ try:
+ socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug)
+ return socket_connection
+ except LXDClientException as err:
+ error_storage[url] = err
+ raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage)))
+
+ def _get_networks(self):
+ """Get Networknames
+
+ Returns all network config names
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ list(names): names of all network_configs"""
+ # e.g. {'type': 'sync',
+ # 'status': 'Success',
+ # 'status_code': 200,
+ # 'operation': '',
+ # 'error_code': 0,
+ # 'error': '',
+ # 'metadata': ['/1.0/networks/lxdbr0']}
+ network_configs = self.socket.do('GET', '/1.0/networks')
+ return [m.split('/')[3] for m in network_configs['metadata']]
+
+ def _get_instances(self):
+ """Get instancenames
+
+ Returns all instancenames
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ list(names): names of all instances"""
+ # e.g. {
+ # "metadata": [
+ # "/1.0/instances/foo",
+ # "/1.0/instances/bar"
+ # ],
+ # "status": "Success",
+ # "status_code": 200,
+ # "type": "sync"
+ # }
+ url = '/1.0/instances'
+ if self.project:
+ url = url + '?{0}'.format(urlencode(dict(project=self.project)))
+
+ instances = self.socket.do('GET', url)
+
+ if self.project:
+ return [m.split('/')[3].split('?')[0] for m in instances['metadata']]
+
+ return [m.split('/')[3] for m in instances['metadata']]
+
+ def _get_config(self, branch, name):
+ """Get inventory of instance
+
+ Get config of instance
+
+ Args:
+ str(branch): Name oft the API-Branch
+ str(name): Name of instance
+ Kwargs:
+ None
+ Source:
+ https://github.com/lxc/lxd/blob/master/doc/rest-api.md
+ Raises:
+ None
+ Returns:
+ dict(config): Config of the instance"""
+ config = {}
+ if isinstance(branch, (tuple, list)):
+ config[name] = {branch[1]: self.socket.do(
+ 'GET', '/1.0/{0}/{1}/{2}?{3}'.format(to_native(branch[0]), to_native(name), to_native(branch[1]), urlencode(dict(project=self.project))))}
+ else:
+ config[name] = {branch: self.socket.do(
+ 'GET', '/1.0/{0}/{1}?{2}'.format(to_native(branch), to_native(name), urlencode(dict(project=self.project))))}
+ return config
+
+ def get_instance_data(self, names):
+ """Create Inventory of the instance
+
+ Iterate through the different branches of the instances and collect Informations.
+
+ Args:
+ list(names): List of instance names
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # tuple(('instances','metadata/templates')) to get section in branch
+ # e.g. /1.0/instances/<name>/metadata/templates
+ branches = ['instances', ('instances', 'state')]
+ instance_config = {}
+ for branch in branches:
+ for name in names:
+ instance_config['instances'] = self._get_config(branch, name)
+ self.data = dict_merge(instance_config, self.data)
+
+ def get_network_data(self, names):
+ """Create Inventory of the instance
+
+ Iterate through the different branches of the instances and collect Informations.
+
+ Args:
+ list(names): List of instance names
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # tuple(('instances','metadata/templates')) to get section in branch
+ # e.g. /1.0/instances/<name>/metadata/templates
+ branches = [('networks', 'state')]
+ network_config = {}
+ for branch in branches:
+ for name in names:
+ try:
+ network_config['networks'] = self._get_config(branch, name)
+ except LXDClientException:
+ network_config['networks'] = {name: None}
+ self.data = dict_merge(network_config, self.data)
+
+ def extract_network_information_from_instance_config(self, instance_name):
+ """Returns the network interface configuration
+
+ Returns the network ipv4 and ipv6 config of the instance without local-link
+
+ Args:
+ str(instance_name): Name oft he instance
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(network_configuration): network config"""
+ instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name))
+ network_configuration = None
+ if instance_network_interfaces:
+ network_configuration = {}
+ gen_interface_names = [interface_name for interface_name in instance_network_interfaces if interface_name != 'lo']
+ for interface_name in gen_interface_names:
+ gen_address = [address for address in instance_network_interfaces[interface_name]['addresses'] if address.get('scope') != 'link']
+ network_configuration[interface_name] = []
+ for address in gen_address:
+ address_set = {}
+ address_set['family'] = address.get('family')
+ address_set['address'] = address.get('address')
+ address_set['netmask'] = address.get('netmask')
+ address_set['combined'] = address.get('address') + '/' + address.get('netmask')
+ network_configuration[interface_name].append(address_set)
+ return network_configuration
+
+ def get_prefered_instance_network_interface(self, instance_name):
+ """Helper to get the prefered interface of thr instance
+
+ Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'.
+
+ Args:
+ str(containe_name): name of instance
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ str(prefered_interface): None or interface name"""
+ instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
+ prefered_interface = None # init
+ if instance_network_interfaces: # instance have network interfaces
+ # generator if interfaces which start with the desired pattern
+ net_generator = [interface for interface in instance_network_interfaces if interface.startswith(self.prefered_instance_network_interface)]
+ selected_interfaces = [] # init
+ for interface in net_generator:
+ selected_interfaces.append(interface)
+ if len(selected_interfaces) > 0:
+ prefered_interface = sorted(selected_interfaces)[0]
+ return prefered_interface
+
+ def get_instance_vlans(self, instance_name):
+ """Get VLAN(s) from instance
+
+ Helper to get the VLAN_ID from the instance
+
+ Args:
+ str(containe_name): name of instance
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # get network device configuration and store {network: vlan_id}
+ network_vlans = {}
+ for network in self._get_data_entry('networks'):
+ if self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network)):
+ network_vlans[network] = self._get_data_entry('state/metadata/vlan/vid', data=self.data['networks'].get(network))
+
+ # get networkdevices of instance and return
+ # e.g.
+ # "eth0":{ "name":"eth0",
+ # "network":"lxdbr0",
+ # "type":"nic"},
+ vlan_ids = {}
+ devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name)))
+ for device in devices:
+ if 'network' in devices[device]:
+ if devices[device]['network'] in network_vlans:
+ vlan_ids[devices[device].get('network')] = network_vlans[devices[device].get('network')]
+ return vlan_ids if vlan_ids else None
+
+ def _get_data_entry(self, path, data=None, delimiter='/'):
+ """Helper to get data
+
+ Helper to get data from self.data by a path like 'path/to/target'
+ Attention: Escaping of the delimiter is not (yet) provided.
+
+ Args:
+ str(path): path to nested dict
+ Kwargs:
+ dict(data): datastore
+ str(delimiter): delimiter in Path.
+ Raises:
+ None
+ Returns:
+ *(value)"""
+ try:
+ if not data:
+ data = self.data
+ if delimiter in path:
+ path = path.split(delimiter)
+
+ if isinstance(path, list) and len(path) > 1:
+ data = data[path.pop(0)]
+ path = delimiter.join(path)
+ return self._get_data_entry(path, data, delimiter) # recursion
+ return data[path]
+ except KeyError:
+ return None
+
+ def _set_data_entry(self, instance_name, key, value, path=None):
+ """Helper to save data
+
+ Helper to save the data in self.data
+ Detect if data is already in branch and use dict_merge() to prevent that branch is overwritten.
+
+ Args:
+ str(instance_name): name of instance
+ str(key): same as dict
+ *(value): same as dict
+ Kwargs:
+ str(path): path to branch-part
+ Raises:
+ AnsibleParserError
+ Returns:
+ None"""
+ if not path:
+ path = self.data['inventory']
+ if instance_name not in path:
+ path[instance_name] = {}
+
+ try:
+ if isinstance(value, dict) and key in path[instance_name]:
+ path[instance_name] = dict_merge(value, path[instance_name][key])
+ else:
+ path[instance_name][key] = value
+ except KeyError as err:
+ raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err)))
+
+ def extract_information_from_instance_configs(self):
+ """Process configuration information
+
+ Preparation of the data
+
+ Args:
+ dict(configs): instance configurations
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # create branch "inventory"
+ if 'inventory' not in self.data:
+ self.data['inventory'] = {}
+
+ for instance_name in self.data['instances']:
+ self._set_data_entry(instance_name, 'os', self._get_data_entry(
+ 'instances/{0}/instances/metadata/config/image.os'.format(instance_name)))
+ self._set_data_entry(instance_name, 'release', self._get_data_entry(
+ 'instances/{0}/instances/metadata/config/image.release'.format(instance_name)))
+ self._set_data_entry(instance_name, 'version', self._get_data_entry(
+ 'instances/{0}/instances/metadata/config/image.version'.format(instance_name)))
+ self._set_data_entry(instance_name, 'profile', self._get_data_entry(
+ 'instances/{0}/instances/metadata/profiles'.format(instance_name)))
+ self._set_data_entry(instance_name, 'location', self._get_data_entry(
+ 'instances/{0}/instances/metadata/location'.format(instance_name)))
+ self._set_data_entry(instance_name, 'state', self._get_data_entry(
+ 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name)))
+ self._set_data_entry(instance_name, 'type', self._get_data_entry(
+ 'instances/{0}/instances/metadata/type'.format(instance_name)))
+ self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name))
+ self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name))
+ self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name))
+ self._set_data_entry(instance_name, 'project', self._get_data_entry(
+ 'instances/{0}/instances/metadata/project'.format(instance_name)))
+
+ def build_inventory_network(self, instance_name):
+ """Add the network interfaces of the instance to the inventory
+
+ Logic:
+ - if the instance have no interface -> 'ansible_connection: local'
+ - get preferred_interface & prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
+ - first Interface from: network_interfaces prefered_instance_network_family -> 'ansible_connection: ssh' & 'ansible_host: <IP>'
+
+ Args:
+ str(instance_name): name of instance
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+
+ def interface_selection(instance_name):
+ """Select instance Interface for inventory
+
+ Logic:
+ - get preferred_interface & prefered_instance_network_family -> str(IP)
+ - first Interface from: network_interfaces prefered_instance_network_family -> str(IP)
+
+ Args:
+ str(instance_name): name of instance
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(interface_name: ip)"""
+ prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None
+ prefered_instance_network_family = self.prefered_instance_network_family
+
+ ip_address = ''
+ if prefered_interface:
+ interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface))
+ for config in interface:
+ if config['family'] == prefered_instance_network_family:
+ ip_address = config['address']
+ break
+ else:
+ interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name))
+ for interface in interfaces.values():
+ for config in interface:
+ if config['family'] == prefered_instance_network_family:
+ ip_address = config['address']
+ break
+ return ip_address
+
+ if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces
+ self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh')
+ self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name))
+ else:
+ self.inventory.set_variable(instance_name, 'ansible_connection', 'local')
+
+ def build_inventory_hosts(self):
+ """Build host-part dynamic inventory
+
+ Build the host-part of the dynamic inventory.
+ Add Hosts and host_vars to the inventory.
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ for instance_name in self.data['inventory']:
+ instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower()
+
+ # Only consider instances that match the "state" filter, if self.state is not None
+ if self.filter:
+ if self.filter.lower() != instance_state:
+ continue
+ # add instance
+ self.inventory.add_host(instance_name)
+ # add network informations
+ self.build_inventory_network(instance_name)
+ # add os
+ v = self._get_data_entry('inventory/{0}/os'.format(instance_name))
+ if v:
+ self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower())
+ # add release
+ v = self._get_data_entry('inventory/{0}/release'.format(instance_name))
+ if v:
+ self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower())
+ # add profile
+ self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name)))
+ # add state
+ self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state)
+ # add type
+ self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name)))
+ # add location information
+ if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None'
+ self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name)))
+ # add VLAN_ID information
+ if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)):
+ self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)))
+ # add project
+ self.inventory.set_variable(instance_name, 'ansible_lxd_project', self._get_data_entry('inventory/{0}/project'.format(instance_name)))
+
+ def build_inventory_groups_location(self, group_name):
+ """create group by attribute: location
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ for instance_name in self.inventory.hosts:
+ if 'ansible_lxd_location' in self.inventory.get_host(instance_name).get_vars():
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_pattern(self, group_name):
+ """create group by name pattern
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ regex_pattern = self.groupby[group_name].get('attribute')
+
+ for instance_name in self.inventory.hosts:
+ result = re.search(regex_pattern, instance_name)
+ if result:
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_network_range(self, group_name):
+ """check if IP is in network-class
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ try:
+ network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute')))
+ except ValueError as err:
+ raise AnsibleParserError(
+ 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err)))
+
+ for instance_name in self.inventory.hosts:
+ if self.data['inventory'][instance_name].get('network_interfaces') is not None:
+ for interface in self.data['inventory'][instance_name].get('network_interfaces'):
+ for interface_family in self.data['inventory'][instance_name].get('network_interfaces')[interface]:
+ try:
+ address = ipaddress.ip_address(to_text(interface_family['address']))
+ if address.version == network.version and address in network:
+ self.inventory.add_child(group_name, instance_name)
+ except ValueError:
+ # Ignore invalid IP addresses returned by lxd
+ pass
+
+ def build_inventory_groups_project(self, group_name):
+ """create group by attribute: project
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts
+ if 'ansible_lxd_project' in self.inventory.get_host(instance_name).get_vars()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_project'):
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_os(self, group_name):
+ """create group by attribute: os
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts
+ if 'ansible_lxd_os' in self.inventory.get_host(instance_name).get_vars()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_os'):
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_release(self, group_name):
+ """create group by attribute: release
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts
+ if 'ansible_lxd_release' in self.inventory.get_host(instance_name).get_vars()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_release'):
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_profile(self, group_name):
+ """create group by attribute: profile
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts.keys()
+ if 'ansible_lxd_profile' in self.inventory.get_host(instance_name).get_vars().keys()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute').lower() in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_profile'):
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_vlanid(self, group_name):
+ """create group by attribute: vlanid
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts.keys()
+ if 'ansible_lxd_vlan_ids' in self.inventory.get_host(instance_name).get_vars().keys()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute') in self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_vlan_ids').values():
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups_type(self, group_name):
+ """create group by attribute: type
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ # maybe we just want to expand one group
+ if group_name not in self.inventory.groups:
+ self.inventory.add_group(group_name)
+
+ gen_instances = [
+ instance_name for instance_name in self.inventory.hosts
+ if 'ansible_lxd_type' in self.inventory.get_host(instance_name).get_vars()]
+ for instance_name in gen_instances:
+ if self.groupby[group_name].get('attribute').lower() == self.inventory.get_host(instance_name).get_vars().get('ansible_lxd_type'):
+ self.inventory.add_child(group_name, instance_name)
+
+ def build_inventory_groups(self):
+ """Build group-part dynamic inventory
+
+ Build the group-part of the dynamic inventory.
+ Add groups to the inventory.
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+
+ def group_type(group_name):
+ """create groups defined by lxd.yml or defaultvalues
+
+ create groups defined by lxd.yml or defaultvalues
+ supportetd:
+ * 'location'
+ * 'pattern'
+ * 'network_range'
+ * 'os'
+ * 'release'
+ * 'profile'
+ * 'vlanid'
+ * 'type'
+ * 'project'
+
+ Args:
+ str(group_name): Group name
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+
+ # Due to the compatibility with python 2 no use of map
+ if self.groupby[group_name].get('type') == 'location':
+ self.build_inventory_groups_location(group_name)
+ elif self.groupby[group_name].get('type') == 'pattern':
+ self.build_inventory_groups_pattern(group_name)
+ elif self.groupby[group_name].get('type') == 'network_range':
+ self.build_inventory_groups_network_range(group_name)
+ elif self.groupby[group_name].get('type') == 'os':
+ self.build_inventory_groups_os(group_name)
+ elif self.groupby[group_name].get('type') == 'release':
+ self.build_inventory_groups_release(group_name)
+ elif self.groupby[group_name].get('type') == 'profile':
+ self.build_inventory_groups_profile(group_name)
+ elif self.groupby[group_name].get('type') == 'vlanid':
+ self.build_inventory_groups_vlanid(group_name)
+ elif self.groupby[group_name].get('type') == 'type':
+ self.build_inventory_groups_type(group_name)
+ elif self.groupby[group_name].get('type') == 'project':
+ self.build_inventory_groups_project(group_name)
+ else:
+ raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name)))
+
+ if self.groupby:
+ for group_name in self.groupby:
+ if not group_name.isalnum():
+ raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name)))
+ group_type(group_name)
+
+ def build_inventory(self):
+ """Build dynamic inventory
+
+ Build the dynamic inventory.
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+
+ self.build_inventory_hosts()
+ self.build_inventory_groups()
+
+ def cleandata(self):
+ """Clean the dynamic inventory
+
+ The first version of the inventory only supported container.
+ This will change in the future.
+ The following function cleans up the data and remove the all items with the wrong type.
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ iter_keys = list(self.data['instances'].keys())
+ for instance_name in iter_keys:
+ if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter:
+ del self.data['instances'][instance_name]
+
+ def _populate(self):
+ """Return the hosts and groups
+
+ Returns the processed instance configurations from the lxd import
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+
+ if len(self.data) == 0: # If no data is injected by unittests open socket
+ self.socket = self._connect_to_socket()
+ self.get_instance_data(self._get_instances())
+ self.get_network_data(self._get_networks())
+
+ # The first version of the inventory only supported containers.
+ # This will change in the future.
+ # The following function cleans up the data.
+ if self.type_filter != 'both':
+ self.cleandata()
+
+ self.extract_information_from_instance_configs()
+
+ # self.display.vvv(self.save_json_data([os.path.abspath(__file__)]))
+
+ self.build_inventory()
+
+ def parse(self, inventory, loader, path, cache):
+ """Return dynamic inventory from source
+
+ Returns the processed inventory from the lxd import
+
+ Args:
+ str(inventory): inventory object with existing data and
+ the methods to add hosts/groups/variables
+ to inventory
+ str(loader): Ansible's DataLoader
+ str(path): path to the config
+ bool(cache): use or avoid caches
+ Kwargs:
+ None
+ Raises:
+ AnsibleParserError
+ Returns:
+ None"""
+ if IPADDRESS_IMPORT_ERROR:
+ raise_from(
+ AnsibleError('another_library must be installed to use this plugin'),
+ IPADDRESS_IMPORT_ERROR)
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=False)
+ # Read the inventory YAML file
+ self._read_config_data(path)
+ try:
+ self.client_key = self.get_option('client_key')
+ self.client_cert = self.get_option('client_cert')
+ self.project = self.get_option('project')
+ self.debug = self.DEBUG
+ self.data = {} # store for inventory-data
+ self.groupby = self.get_option('groupby')
+ self.plugin = self.get_option('plugin')
+ self.prefered_instance_network_family = self.get_option('prefered_instance_network_family')
+ self.prefered_instance_network_interface = self.get_option('prefered_instance_network_interface')
+ self.type_filter = self.get_option('type_filter')
+ if self.get_option('state').lower() == 'none': # none in config is str()
+ self.filter = None
+ else:
+ self.filter = self.get_option('state').lower()
+ self.trust_password = self.get_option('trust_password')
+ self.url = self.get_option('url')
+ except Exception as err:
+ raise AnsibleParserError(
+ 'All correct options required: {0}'.format(to_native(err)))
+ # Call our internal helper to populate the dynamic inventory
+ self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/nmap.py b/ansible_collections/community/general/plugins/inventory/nmap.py
new file mode 100644
index 000000000..a03cf3e6f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/nmap.py
@@ -0,0 +1,295 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: nmap
+ short_description: Uses nmap to find hosts to target
+ description:
+ - Uses a YAML configuration file with a valid YAML extension.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ requirements:
+ - nmap CLI installed
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'nmap' plugin.
+ required: true
+ choices: ['nmap', 'community.general.nmap']
+ sudo:
+ description: Set to C(true) to execute a C(sudo nmap) plugin scan.
+ version_added: 4.8.0
+ default: false
+ type: boolean
+ address:
+ description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation.
+ required: true
+ env:
+ - name: ANSIBLE_NMAP_ADDRESS
+ version_added: 6.6.0
+ exclude:
+ description:
+ - List of addresses to exclude.
+ - For example C(10.2.2.15-25) or C(10.2.2.15,10.2.2.16).
+ type: list
+ elements: string
+ env:
+ - name: ANSIBLE_NMAP_EXCLUDE
+ version_added: 6.6.0
+ port:
+ description:
+ - Only scan specific port or port range (C(-p)).
+ - For example, you could pass C(22) for a single port, C(1-65535) for a range of ports,
+ or C(U:53,137,T:21-25,139,8080,S:9) to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all.
+ type: string
+ version_added: 6.5.0
+ ports:
+ description: Enable/disable scanning ports.
+ type: boolean
+ default: true
+ ipv4:
+ description: use IPv4 type addresses
+ type: boolean
+ default: true
+ ipv6:
+ description: use IPv6 type addresses
+ type: boolean
+ default: true
+ udp_scan:
+ description:
+ - Scan via UDP.
+ - Depending on your system you might need I(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ icmp_timestamp:
+ description:
+ - Scan via ICMP Timestamp (C(-PP)).
+ - Depending on your system you might need I(sudo=true) for this to work.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ open:
+ description: Only scan for open (or possibly open) ports.
+ type: boolean
+ default: false
+ version_added: 6.5.0
+ dns_resolve:
+ description: Whether to always (C(true)) or never (C(false)) do DNS resolution.
+ type: boolean
+ default: false
+ version_added: 6.1.0
+ notes:
+ - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False.
+ - 'TODO: add OS fingerprinting'
+'''
+EXAMPLES = '''
+# inventory.config file in YAML format
+plugin: community.general.nmap
+strict: false
+address: 192.168.0.0/24
+
+
+# a sudo nmap scan to fully use nmap scan power.
+plugin: community.general.nmap
+sudo: true
+strict: false
+address: 192.168.0.0/24
+
+# an nmap scan specifying ports and classifying results to an inventory group
+plugin: community.general.nmap
+address: 192.168.0.0/24
+exclude: 192.168.0.1, web.example.com
+port: 22, 443
+groups:
+ web_servers: "ports | selectattr('port', 'equalto', '443')"
+'''
+
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible import constants as C
+from ansible.errors import AnsibleParserError
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.process import get_bin_path
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.nmap'
+ find_host = re.compile(r'^Nmap scan report for ([\w,.,-]+)(?: \(([\w,.,:,\[,\]]+)\))?')
+ find_port = re.compile(r'^(\d+)/(\w+)\s+(\w+)\s+(\w+)')
+
+ def __init__(self):
+ self._nmap = None
+ super(InventoryModule, self).__init__()
+
+ def _populate(self, hosts):
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ for host in hosts:
+ hostname = host['name']
+ self.inventory.add_host(hostname)
+ for var, value in host.items():
+ self.inventory.set_variable(hostname, var, value)
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ file_name, ext = os.path.splitext(path)
+
+ if not ext or ext in C.YAML_FILENAME_EXTENSIONS:
+ valid = True
+
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ try:
+ self._nmap = get_bin_path('nmap')
+ except ValueError as e:
+ raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e)))
+
+ super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
+
+ self._read_config_data(path)
+
+ cache_key = self.get_cache_key(path)
+
+ # cache may be True or False at this point to indicate if the inventory is being refreshed
+ # get the user's cache option too to see if we should save the cache if it is changing
+ user_cache_setting = self.get_option('cache')
+
+ # read if the user has caching enabled and the cache isn't being refreshed
+ attempt_to_read_cache = user_cache_setting and cache
+ # update if the user has caching enabled and the cache is being refreshed; update this value to True if the cache has expired below
+ cache_needs_update = user_cache_setting and not cache
+
+ if attempt_to_read_cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # This occurs if the cache_key is not in the cache or if the cache_key expired, so the cache needs to be updated
+ cache_needs_update = True
+
+ if not user_cache_setting or cache_needs_update:
+ # setup command
+ cmd = [self._nmap]
+
+ if self._options['sudo']:
+ cmd.insert(0, 'sudo')
+
+ if self._options['port']:
+ cmd.append('-p')
+ cmd.append(self._options['port'])
+
+ if not self._options['ports']:
+ cmd.append('-sP')
+
+ if self._options['ipv4'] and not self._options['ipv6']:
+ cmd.append('-4')
+ elif self._options['ipv6'] and not self._options['ipv4']:
+ cmd.append('-6')
+ elif not self._options['ipv6'] and not self._options['ipv4']:
+ raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin')
+
+ if self._options['exclude']:
+ cmd.append('--exclude')
+ cmd.append(','.join(self._options['exclude']))
+
+ if self._options['dns_resolve']:
+ cmd.append('-n')
+
+ if self._options['udp_scan']:
+ cmd.append('-sU')
+
+ if self._options['icmp_timestamp']:
+ cmd.append('-PP')
+
+ if self._options['open']:
+ cmd.append('--open')
+
+ cmd.append(self._options['address'])
+ try:
+ # execute
+ p = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr)))
+
+ # parse results
+ host = None
+ ip = None
+ ports = []
+ results = []
+
+ try:
+ t_stdout = to_text(stdout, errors='surrogate_or_strict')
+ except UnicodeError as e:
+ raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e))
+
+ for line in t_stdout.splitlines():
+ hits = self.find_host.match(line)
+ if hits:
+ if host is not None and ports:
+ results[-1]['ports'] = ports
+
+ # if dns only shows arpa, just use ip instead as hostname
+ if hits.group(1).endswith('.in-addr.arpa'):
+ host = hits.group(2)
+ else:
+ host = hits.group(1)
+
+ # if no reverse dns exists, just use ip instead as hostname
+ if hits.group(2) is not None:
+ ip = hits.group(2)
+ else:
+ ip = hits.group(1)
+
+ if host is not None:
+ # update inventory
+ results.append(dict())
+ results[-1]['name'] = host
+ results[-1]['ip'] = ip
+ ports = []
+ continue
+
+ host_ports = self.find_port.match(line)
+ if host is not None and host_ports:
+ ports.append({'port': host_ports.group(1),
+ 'protocol': host_ports.group(2),
+ 'state': host_ports.group(3),
+ 'service': host_ports.group(4)})
+ continue
+
+ # if any leftovers
+ if host and ports:
+ results[-1]['ports'] = ports
+
+ except Exception as e:
+ raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e)))
+
+ if cache_needs_update:
+ self._cache[cache_key] = results
+
+ self._populate(results)
diff --git a/ansible_collections/community/general/plugins/inventory/online.py b/ansible_collections/community/general/plugins/inventory/online.py
new file mode 100644
index 000000000..3fccd58d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/online.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: online
+ author:
+ - Remy Leone (@remyleone)
+ short_description: Scaleway (previously Online SAS or Online.net) inventory source
+ description:
+ - Get inventory hosts from Scaleway (previously Online SAS or Online.net).
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'online' plugin.
+ required: true
+ choices: ['online', 'community.general.online']
+ oauth_token:
+ required: true
+ description: Online OAuth token.
+ env:
+ # in order of precedence
+ - name: ONLINE_TOKEN
+ - name: ONLINE_API_KEY
+ - name: ONLINE_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - hostname
+ groups:
+ description: List of groups.
+ type: list
+ elements: string
+ choices:
+ - location
+ - offer
+ - rpn
+'''
+
+EXAMPLES = r'''
+# online_inventory.yml file in YAML format
+# Example command line: ansible-inventory --list -i online_inventory.yml
+
+plugin: community.general.online
+hostnames:
+ - public_ipv4
+groups:
+ - location
+ - offer
+ - rpn
+'''
+
+import json
+from sys import version as python_version
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+
+class InventoryModule(BaseInventoryPlugin):
+ NAME = 'community.general.online'
+ API_ENDPOINT = "https://api.online.net"
+
+ def extract_public_ipv4(self, host_infos):
+ try:
+ return host_infos["network"]["ip"][0]
+ except (KeyError, TypeError, IndexError):
+ self.display.warning("An error happened while extracting public IPv4 address. Information skipped.")
+ return None
+
+ def extract_private_ipv4(self, host_infos):
+ try:
+ return host_infos["network"]["private"][0]
+ except (KeyError, TypeError, IndexError):
+ self.display.warning("An error happened while extracting private IPv4 address. Information skipped.")
+ return None
+
+ def extract_os_name(self, host_infos):
+ try:
+ return host_infos["os"]["name"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting OS name. Information skipped.")
+ return None
+
+ def extract_os_version(self, host_infos):
+ try:
+ return host_infos["os"]["version"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting OS version. Information skipped.")
+ return None
+
+ def extract_hostname(self, host_infos):
+ try:
+ return host_infos["hostname"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting hostname. Information skipped.")
+ return None
+
+ def extract_location(self, host_infos):
+ try:
+ return host_infos["location"]["datacenter"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting datacenter location. Information skipped.")
+ return None
+
+ def extract_offer(self, host_infos):
+ try:
+ return host_infos["offer"]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting commercial offer. Information skipped.")
+ return None
+
+ def extract_rpn(self, host_infos):
+ try:
+ return self.rpn_lookup_cache[host_infos["id"]]
+ except (KeyError, TypeError):
+ self.display.warning("An error happened while extracting RPN information. Information skipped.")
+ return None
+
+ def _fetch_information(self, url):
+ try:
+ response = open_url(url, headers=self.headers)
+ except Exception as e:
+ self.display.warning("An error happened while fetching: %s" % url)
+ return None
+
+ try:
+ raw_data = to_text(response.read(), errors='surrogate_or_strict')
+ except UnicodeError:
+ raise AnsibleError("Incorrect encoding of fetched payload from Online servers")
+
+ try:
+ return json.loads(raw_data)
+ except ValueError:
+ raise AnsibleError("Incorrect JSON payload")
+
+ @staticmethod
+ def extract_rpn_lookup_cache(rpn_list):
+ lookup = {}
+ for rpn in rpn_list:
+ for member in rpn["members"]:
+ lookup[member["id"]] = rpn["name"]
+ return lookup
+
+ def _fill_host_variables(self, hostname, host_infos):
+ targeted_attributes = (
+ "offer",
+ "id",
+ "hostname",
+ "location",
+ "boot_mode",
+ "power",
+ "last_reboot",
+ "anti_ddos",
+ "hardware_watch",
+ "support"
+ )
+ for attribute in targeted_attributes:
+ self.inventory.set_variable(hostname, attribute, host_infos[attribute])
+
+ if self.extract_public_ipv4(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos))
+ self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos))
+
+ if self.extract_private_ipv4(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos))
+
+ if self.extract_os_name(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos))
+
+ if self.extract_os_version(host_infos=host_infos):
+ self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos))
+
+ def _filter_host(self, host_infos, hostname_preferences):
+
+ for pref in hostname_preferences:
+ if self.extractors[pref](host_infos):
+ return self.extractors[pref](host_infos)
+
+ return None
+
+ def do_server_inventory(self, host_infos, hostname_preferences, group_preferences):
+
+ hostname = self._filter_host(host_infos=host_infos,
+ hostname_preferences=hostname_preferences)
+
+ # No suitable hostname were found in the attributes and the host won't be in the inventory
+ if not hostname:
+ return
+
+ self.inventory.add_host(host=hostname)
+ self._fill_host_variables(hostname=hostname, host_infos=host_infos)
+
+ for g in group_preferences:
+ group = self.group_extractors[g](host_infos)
+
+ if not group:
+ return
+
+ self.inventory.add_group(group=group)
+ self.inventory.add_host(group=group, host=hostname)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ token = self.get_option("oauth_token")
+ hostname_preferences = self.get_option("hostnames")
+
+ group_preferences = self.get_option("groups")
+ if group_preferences is None:
+ group_preferences = []
+
+ self.extractors = {
+ "public_ipv4": self.extract_public_ipv4,
+ "private_ipv4": self.extract_private_ipv4,
+ "hostname": self.extract_hostname,
+ }
+
+ self.group_extractors = {
+ "location": self.extract_location,
+ "offer": self.extract_offer,
+ "rpn": self.extract_rpn
+ }
+
+ self.headers = {
+ 'Authorization': "Bearer %s" % token,
+ 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]),
+ 'Content-type': 'application/json'
+ }
+
+ servers_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/server")
+ servers_api_path = self._fetch_information(url=servers_url)
+
+ if "rpn" in group_preferences:
+ rpn_groups_url = urljoin(InventoryModule.API_ENDPOINT, "api/v1/rpn/group")
+ rpn_list = self._fetch_information(url=rpn_groups_url)
+ self.rpn_lookup_cache = self.extract_rpn_lookup_cache(rpn_list)
+
+ for server_api_path in servers_api_path:
+
+ server_url = urljoin(InventoryModule.API_ENDPOINT, server_api_path)
+ raw_server_info = self._fetch_information(url=server_url)
+
+ if raw_server_info is None:
+ continue
+
+ self.do_server_inventory(host_infos=raw_server_info,
+ hostname_preferences=hostname_preferences,
+ group_preferences=group_preferences)
diff --git a/ansible_collections/community/general/plugins/inventory/opennebula.py b/ansible_collections/community/general/plugins/inventory/opennebula.py
new file mode 100644
index 000000000..603920edc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/opennebula.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: opennebula
+ author:
+ - Kristian Feldsam (@feldsam)
+ short_description: OpenNebula inventory source
+ version_added: "3.8.0"
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Get inventory hosts from OpenNebula cloud.
+ - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml)
+ to set parameter values.
+ - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file.
+ options:
+ plugin:
+ description: Token that ensures this is a source file for the 'opennebula' plugin.
+ type: string
+ required: true
+ choices: [ community.general.opennebula ]
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ env:
+ - name: ONE_URL
+ required: true
+ type: string
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ then the value of the C(ONE_USERNAME) environment variable is used.
+ env:
+ - name: ONE_USERNAME
+ type: string
+ api_password:
+ description:
+ - Password or a token of the user to login into OpenNebula RPC server.
+ - If not set, the value of the C(ONE_PASSWORD) environment variable is used.
+ env:
+ - name: ONE_PASSWORD
+ required: false
+ type: string
+ api_authfile:
+ description:
+ - If both I(api_username) or I(api_password) are not set, then it will try
+ authenticate with ONE auth file. Default path is C(~/.one/one_auth).
+ - Set environment variable C(ONE_AUTH) to override this path.
+ env:
+ - name: ONE_AUTH
+ required: false
+ type: string
+ hostname:
+ description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM.
+ type: string
+ default: v4_first_ip
+ choices:
+ - v4_first_ip
+ - v6_first_ip
+ - name
+ filter_by_label:
+ description: Only return servers filtered by this label.
+ type: string
+ group_by_labels:
+ description: Create host groups by vm labels
+ type: bool
+ default: true
+'''
+
+EXAMPLES = r'''
+# inventory_opennebula.yml file in YAML format
+# Example command line: ansible-inventory --list -i inventory_opennebula.yml
+
+# Pass a label filter to the API
+plugin: community.general.opennebula
+api_url: https://opennebula:2633/RPC2
+filter_by_label: Cache
+'''
+
+try:
+ import pyone
+
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.module_utils.common.text.converters import to_native
+
+from collections import namedtuple
+import os
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ NAME = 'community.general.opennebula'
+
+ def verify_file(self, path):
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('opennebula.yaml', 'opennebula.yml')):
+ valid = True
+ return valid
+
+ def _get_connection_info(self):
+ url = self.get_option('api_url')
+ username = self.get_option('api_username')
+ password = self.get_option('api_password')
+ authfile = self.get_option('api_authfile')
+
+ if not username and not password:
+ if authfile is None:
+ authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
+ try:
+ with open(authfile, "r") as fp:
+ authstring = fp.read().rstrip()
+ username, password = authstring.split(":")
+ except (OSError, IOError):
+ raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile))
+ except Exception:
+ raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile))
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+ def _get_vm_ipv4(self, vm):
+ nic = vm.TEMPLATE.get('NIC')
+
+ if isinstance(nic, dict):
+ nic = [nic]
+
+ for net in nic:
+ return net['IP']
+
+ return False
+
+ def _get_vm_ipv6(self, vm):
+ nic = vm.TEMPLATE.get('NIC')
+
+ if isinstance(nic, dict):
+ nic = [nic]
+
+ for net in nic:
+ if net.get('IP6_GLOBAL'):
+ return net['IP6_GLOBAL']
+
+ return False
+
+ def _get_vm_pool(self):
+ auth = self._get_connection_info()
+
+ if not (auth.username and auth.password):
+ raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.')
+ else:
+ one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ # get hosts (VMs)
+ try:
+ vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3)
+ except Exception as e:
+ raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e)))
+
+ return vm_pool
+
+ def _retrieve_servers(self, label_filter=None):
+ vm_pool = self._get_vm_pool()
+
+ result = []
+
+ # iterate over hosts
+ for vm in vm_pool.VM:
+ server = vm.USER_TEMPLATE
+
+ labels = []
+ if vm.USER_TEMPLATE.get('LABELS'):
+ labels = [s for s in vm.USER_TEMPLATE.get('LABELS') if s == ',' or s == '-' or s.isalnum() or s.isspace()]
+ labels = ''.join(labels)
+ labels = labels.replace(' ', '_')
+ labels = labels.replace('-', '_')
+ labels = labels.split(',')
+
+ # filter by label
+ if label_filter is not None:
+ if label_filter not in labels:
+ continue
+
+ server['name'] = vm.NAME
+ server['LABELS'] = labels
+ server['v4_first_ip'] = self._get_vm_ipv4(vm)
+ server['v6_first_ip'] = self._get_vm_ipv6(vm)
+
+ result.append(server)
+
+ return result
+
+ def _populate(self):
+ hostname_preference = self.get_option('hostname')
+ group_by_labels = self.get_option('group_by_labels')
+ strict = self.get_option('strict')
+
+ # Add a top group 'one'
+ self.inventory.add_group(group='all')
+
+ filter_by_label = self.get_option('filter_by_label')
+ servers = self._retrieve_servers(filter_by_label)
+ for server in servers:
+ hostname = server['name']
+ # check for labels
+ if group_by_labels and server['LABELS']:
+ for label in server['LABELS']:
+ self.inventory.add_group(group=label)
+ self.inventory.add_host(host=hostname, group=label)
+
+ self.inventory.add_host(host=hostname, group='all')
+
+ for attribute, value in server.items():
+ self.inventory.set_variable(hostname, attribute, value)
+
+ if hostname_preference != 'name':
+ self.inventory.set_variable(hostname, 'ansible_host', server[hostname_preference])
+
+ if server.get('SSH_PORT'):
+ self.inventory.set_variable(hostname, 'ansible_port', server['SSH_PORT'])
+
+ # handle construcable implementation: get composed variables if any
+ self._set_composite_vars(self.get_option('compose'), server, hostname, strict=strict)
+
+ # groups based on jinja conditionals get added to specific groups
+ self._add_host_to_composed_groups(self.get_option('groups'), server, hostname, strict=strict)
+
+ # groups based on variables associated with them in the inventory
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), server, hostname, strict=strict)
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_PYONE:
+ raise AnsibleError('OpenNebula Inventory plugin requires pyone to work!')
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/proxmox.py b/ansible_collections/community/general/plugins/inventory/proxmox.py
new file mode 100644
index 000000000..dc2e1febc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/proxmox.py
@@ -0,0 +1,644 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Guido Günther <agx@sigxcpu.org>, Daniel Lobato Garcia <dlobatog@redhat.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: proxmox
+ short_description: Proxmox inventory source
+ version_added: "1.2.0"
+ author:
+ - Jeffrey van Pelt (@Thulium-Drake) <jeff@vanpelt.one>
+ requirements:
+ - requests >= 1.1
+ description:
+ - Get inventory hosts from a Proxmox PVE cluster.
+ - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)"
+ - Will retrieve the first network interface with an IP for Proxmox nodes.
+ - Can retrieve LXC/QEMU configuration as facts.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own.
+ required: true
+ choices: ['community.general.proxmox']
+ type: str
+ url:
+ description:
+ - URL to Proxmox cluster.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead.
+ - Since community.general 4.7.0 you can also use templating to specify the value of the I(url).
+ default: 'http://localhost:8006'
+ type: str
+ env:
+ - name: PROXMOX_URL
+ version_added: 2.0.0
+ user:
+ description:
+ - Proxmox authentication user.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead.
+ - Since community.general 4.7.0 you can also use templating to specify the value of the I(user).
+ required: true
+ type: str
+ env:
+ - name: PROXMOX_USER
+ version_added: 2.0.0
+ password:
+ description:
+ - Proxmox authentication password.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead.
+ - Since community.general 4.7.0 you can also use templating to specify the value of the I(password).
+ - If you do not specify a password, you must set I(token_id) and I(token_secret) instead.
+ type: str
+ env:
+ - name: PROXMOX_PASSWORD
+ version_added: 2.0.0
+ token_id:
+ description:
+ - Proxmox authentication token ID.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_ID) will be used instead.
+ - To use token authentication, you must also specify I(token_secret). If you do not specify I(token_id) and I(token_secret),
+ you must set a password instead.
+ - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead.
+ version_added: 4.8.0
+ type: str
+ env:
+ - name: PROXMOX_TOKEN_ID
+ token_secret:
+ description:
+ - Proxmox authentication token secret.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_SECRET) will be used instead.
+ - To use token authentication, you must also specify I(token_id). If you do not specify I(token_id) and I(token_secret),
+ you must set a password instead.
+ version_added: 4.8.0
+ type: str
+ env:
+ - name: PROXMOX_TOKEN_SECRET
+ validate_certs:
+ description: Verify SSL certificate if using HTTPS.
+ type: boolean
+ default: true
+ group_prefix:
+ description: Prefix to apply to Proxmox groups.
+ default: proxmox_
+ type: str
+ facts_prefix:
+ description: Prefix to apply to LXC/QEMU config facts.
+ default: proxmox_
+ type: str
+ want_facts:
+ description:
+ - Gather LXC/QEMU configuration facts.
+ - When I(want_facts) is set to C(true) more details about QEMU VM status are possible, besides the running and stopped states.
+ Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group,
+ but its actual state will be paused. See I(qemu_extended_statuses) for how to retrieve the real status.
+ default: false
+ type: bool
+ qemu_extended_statuses:
+ description:
+ - Requires I(want_facts) to be set to C(true) to function. This will allow you to differentiate betweend C(paused) and C(prelaunch)
+ statuses of the QEMU VMs.
+ - This introduces multiple groups [prefixed with I(group_prefix)] C(prelaunch) and C(paused).
+ default: false
+ type: bool
+ version_added: 5.1.0
+ want_proxmox_nodes_ansible_host:
+ version_added: 3.0.0
+ description:
+ - Whether to set C(ansbile_host) for proxmox nodes.
+ - When set to C(true) (default), will use the first available interface. This can be different from what you expect.
+ - The default of this option changed from C(true) to C(false) in community.general 6.0.0.
+ type: bool
+ default: false
+ filters:
+ version_added: 4.6.0
+ description: A list of Jinja templates that allow filtering hosts.
+ type: list
+ elements: str
+ default: []
+ strict:
+ version_added: 2.5.0
+ compose:
+ version_added: 2.5.0
+ groups:
+ version_added: 2.5.0
+ keyed_groups:
+ version_added: 2.5.0
+'''
+
+EXAMPLES = '''
+# Minimal example which will not gather additional facts for QEMU/LXC guests
+# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006
+# my.proxmox.yml
+plugin: community.general.proxmox
+user: ansible@pve
+password: secure
+# Note that this can easily give you wrong values as ansible_host. See further below for
+# an example where this is set to `false` and where ansible_host is set with `compose`.
+want_proxmox_nodes_ansible_host: true
+
+# Instead of login with password, proxmox supports api token authentication since release 6.2.
+plugin: community.general.proxmox
+user: ci@pve
+token_id: gitlab-1
+token_secret: fa256e9c-26ab-41ec-82da-707a2c079829
+
+# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET.
+token_secret: !vault |
+ $ANSIBLE_VAULT;1.1;AES256
+ 62353634333163633336343265623632626339313032653563653165313262343931643431656138
+ 6134333736323265656466646539663134306166666237630a653363623262636663333762316136
+ 34616361326263383766366663393837626437316462313332663736623066656237386531663731
+ 3037646432383064630a663165303564623338666131353366373630656661333437393937343331
+ 32643131386134396336623736393634373936356332623632306561356361323737313663633633
+ 6231313333666361656537343562333337323030623732323833
+
+# More complete example demonstrating the use of 'want_facts' and the constructed options
+# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true'
+# my.proxmox.yml
+plugin: community.general.proxmox
+url: http://pve.domain.com:8006
+user: ansible@pve
+password: secure
+validate_certs: false
+want_facts: true
+keyed_groups:
+ # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true'
+ - key: proxmox_tags_parsed
+ separator: ""
+ prefix: group
+groups:
+ webservers: "'web' in (proxmox_tags_parsed|list)"
+ mailservers: "'mail' in (proxmox_tags_parsed|list)"
+compose:
+ ansible_port: 2222
+# Note that this can easily give you wrong values as ansible_host. See further below for
+# an example where this is set to `false` and where ansible_host is set with `compose`.
+want_proxmox_nodes_ansible_host: true
+
+# Using the inventory to allow ansible to connect via the first IP address of the VM / Container
+# (Default is connection by name of QEMU/LXC guests)
+# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory.
+# my.proxmox.yml
+plugin: community.general.proxmox
+url: http://pve.domain.com:8006
+user: ansible@pve
+password: secure
+validate_certs: false
+want_facts: true
+want_proxmox_nodes_ansible_host: false
+compose:
+ ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address')
+ my_inv_var_1: "'my_var1_value'"
+ my_inv_var_2: >
+ "my_var_2_value"
+
+# Specify the url, user and password using templating
+# my.proxmox.yml
+plugin: community.general.proxmox
+url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}"
+user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}"
+password: "{{ lookup('community.general.random_string', base64=True) }}"
+# Note that this can easily give you wrong values as ansible_host. See further up for
+# an example where this is set to `false` and where ansible_host is set with `compose`.
+want_proxmox_nodes_ansible_host: true
+
+'''
+
+import itertools
+import re
+
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.utils.display import Display
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+# 3rd party imports
+try:
+ import requests
+ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
+ raise ImportError
+ HAS_REQUESTS = True
+except ImportError:
+ HAS_REQUESTS = False
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using Proxmox as source. '''
+
+ NAME = 'community.general.proxmox'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.proxmox_url = None
+
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('proxmox.yaml', 'proxmox.yml')):
+ valid = True
+ else:
+ self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"')
+ return valid
+
+ def _get_session(self):
+ if not self.session:
+ self.session = requests.session()
+ self.session.verify = self.get_option('validate_certs')
+ return self.session
+
+ def _get_auth(self):
+ credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
+
+ if self.proxmox_password:
+
+ credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, })
+
+ a = self._get_session()
+
+ if a.verify is False:
+ from requests.packages.urllib3 import disable_warnings
+ disable_warnings()
+
+ ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials)
+
+ json = ret.json()
+
+ self.headers = {
+ # only required for POST/PUT/DELETE methods, which we are not using currently
+ # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'],
+ 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket'])
+ }
+
+ else:
+
+ self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)}
+
+ def _get_json(self, url, ignore_errors=None):
+
+ if not self.use_cache or url not in self._cache.get(self.cache_key, {}):
+
+ if self.cache_key not in self._cache:
+ self._cache[self.cache_key] = {'url': ''}
+
+ data = []
+ s = self._get_session()
+ while True:
+ ret = s.get(url, headers=self.headers)
+ if ignore_errors and ret.status_code in ignore_errors:
+ break
+ ret.raise_for_status()
+ json = ret.json()
+
+ # process results
+ # FIXME: This assumes 'return type' matches a specific query,
+ # it will break if we expand the queries and they dont have different types
+ if 'data' not in json:
+ # /hosts/:id does not have a 'data' key
+ data = json
+ break
+ elif isinstance(json['data'], MutableMapping):
+ # /facts are returned as dict in 'data'
+ data = json['data']
+ break
+ else:
+ # /hosts 's 'results' is a list of all hosts, returned is paginated
+ data = data + json['data']
+ break
+
+ self._cache[self.cache_key][url] = data
+
+ return self._cache[self.cache_key][url]
+
+ def _get_nodes(self):
+ return self._get_json("%s/api2/json/nodes" % self.proxmox_url)
+
+ def _get_pools(self):
+ return self._get_json("%s/api2/json/pools" % self.proxmox_url)
+
+ def _get_lxc_per_node(self, node):
+ return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node))
+
+ def _get_qemu_per_node(self, node):
+ return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node))
+
+ def _get_members_per_pool(self, pool):
+ ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool))
+ return ret['members']
+
+ def _get_node_ip(self, node):
+ ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node))
+
+ for iface in ret:
+ try:
+ return iface['address']
+ except Exception:
+ return None
+
+ def _get_agent_network_interfaces(self, node, vmid, vmtype):
+ result = []
+
+ try:
+ ifaces = self._get_json(
+ "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % (
+ self.proxmox_url, node, vmtype, vmid
+ )
+ )['result']
+
+ if "error" in ifaces:
+ if "class" in ifaces["error"]:
+ # This happens on Windows, even though qemu agent is running, the IP address
+ # cannot be fetched, as it's unsupported, also a command disabled can happen.
+ errorClass = ifaces["error"]["class"]
+ if errorClass in ["Unsupported"]:
+ self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported")
+ elif errorClass in ["CommandDisabled"]:
+ self.display.v("Retrieving network interfaces from guest agents has been disabled")
+ return result
+
+ for iface in ifaces:
+ result.append({
+ 'name': iface['name'],
+ 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '',
+ 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else []
+ })
+ except requests.HTTPError:
+ pass
+
+ return result
+
+ def _get_vm_config(self, properties, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid))
+
+ properties[self._fact('node')] = node
+ properties[self._fact('vmid')] = vmid
+ properties[self._fact('vmtype')] = vmtype
+
+ plaintext_configs = [
+ 'description',
+ ]
+
+ for config in ret:
+ key = self._fact(config)
+ value = ret[config]
+ try:
+ # fixup disk images as they have no key
+ if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')):
+ value = ('disk_image=' + value)
+
+ # Additional field containing parsed tags as list
+ if config == 'tags':
+ stripped_value = value.strip()
+ if stripped_value:
+ parsed_key = key + "_parsed"
+ properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")]
+
+ # The first field in the agent string tells you whether the agent is enabled
+ # the rest of the comma separated string is extra config for the agent.
+ # In some (newer versions of proxmox) instances it can be 'enabled=1'.
+ if config == 'agent':
+ agent_enabled = 0
+ try:
+ agent_enabled = int(value.split(',')[0])
+ except ValueError:
+ if value.split(',')[0] == "enabled=1":
+ agent_enabled = 1
+ if agent_enabled:
+ agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype)
+ if agent_iface_value:
+ agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces"))
+ properties[agent_iface_key] = agent_iface_value
+
+ if config == 'lxc':
+ out_val = {}
+ for k, v in value:
+ if k.startswith('lxc.'):
+ k = k[len('lxc.'):]
+ out_val[k] = v
+ value = out_val
+
+ if config not in plaintext_configs and isinstance(value, string_types) \
+ and all("=" in v for v in value.split(",")):
+ # split off strings with commas to a dict
+ # skip over any keys that cannot be processed
+ try:
+ value = dict(key.split("=", 1) for key in value.split(","))
+ except Exception:
+ continue
+
+ properties[key] = value
+ except NameError:
+ return None
+
+ def _get_vm_status(self, properties, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid))
+ properties[self._fact('status')] = ret['status']
+ if vmtype == 'qemu':
+ properties[self._fact('qmpstatus')] = ret['qmpstatus']
+
+ def _get_vm_snapshots(self, properties, node, vmid, vmtype, name):
+ ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid))
+ snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current']
+ properties[self._fact('snapshots')] = snapshots
+
+ def to_safe(self, word):
+ '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
+ #> ProxmoxInventory.to_safe("foo-bar baz")
+ 'foo_barbaz'
+ '''
+ regex = r"[^A-Za-z0-9\_]"
+ return re.sub(regex, "_", word.replace(" ", ""))
+
+ def _fact(self, name):
+ '''Generate a fact's full name from the common prefix and a name.'''
+ return self.to_safe('%s%s' % (self.facts_prefix, name.lower()))
+
+ def _group(self, name):
+ '''Generate a group's full name from the common prefix and a name.'''
+ return self.to_safe('%s%s' % (self.group_prefix, name.lower()))
+
+ def _can_add_host(self, name, properties):
+ '''Ensure that a host satisfies all defined hosts filters. If strict mode is
+ enabled, any error during host filter compositing will lead to an AnsibleError
+ being raised, otherwise the filter will be ignored.
+ '''
+ for host_filter in self.host_filters:
+ try:
+ if not self._compose(host_filter, properties):
+ return False
+ except Exception as e: # pylint: disable=broad-except
+ message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e))
+ if self.strict:
+ raise AnsibleError(message)
+ display.warning(message)
+ return True
+
+ def _add_host(self, name, variables):
+ self.inventory.add_host(name)
+ for k, v in variables.items():
+ self.inventory.set_variable(name, k, v)
+ variables = self.inventory.get_host(name).get_vars()
+ self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict)
+ self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict)
+
+ def _handle_item(self, node, ittype, item):
+ '''Handle an item from the list of LXC containers and Qemu VM. The
+ return value will be either None if the item was skipped or the name of
+ the item if it was added to the inventory.'''
+ if item.get('template'):
+ return None
+
+ properties = dict()
+ name, vmid = item['name'], item['vmid']
+
+ # get status, config and snapshots if want_facts == True
+ want_facts = self.get_option('want_facts')
+ if want_facts:
+ self._get_vm_status(properties, node, vmid, ittype, name)
+ self._get_vm_config(properties, node, vmid, ittype, name)
+ self._get_vm_snapshots(properties, node, vmid, ittype, name)
+
+ # ensure the host satisfies filters
+ if not self._can_add_host(name, properties):
+ return None
+
+ # add the host to the inventory
+ self._add_host(name, properties)
+ node_type_group = self._group('%s_%s' % (node, ittype))
+ self.inventory.add_child(self._group('all_' + ittype), name)
+ self.inventory.add_child(node_type_group, name)
+
+ item_status = item['status']
+ if item_status == 'running':
+ if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'):
+ # get more details about the status of the qemu VM
+ item_status = properties.get(self._fact('qmpstatus'), item_status)
+ self.inventory.add_child(self._group('all_%s' % (item_status, )), name)
+
+ return name
+
+ def _populate_pool_groups(self, added_hosts):
+ '''Generate groups from Proxmox resource pools, ignoring VMs and
+ containers that were skipped.'''
+ for pool in self._get_pools():
+ poolid = pool.get('poolid')
+ if not poolid:
+ continue
+ pool_group = self._group('pool_' + poolid)
+ self.inventory.add_group(pool_group)
+
+ for member in self._get_members_per_pool(poolid):
+ name = member.get('name')
+ if name and name in added_hosts:
+ self.inventory.add_child(pool_group, name)
+
+ def _populate(self):
+
+ # create common groups
+ default_groups = ['lxc', 'qemu', 'running', 'stopped']
+
+ if self.get_option('qemu_extended_statuses'):
+ default_groups.extend(['prelaunch', 'paused'])
+
+ for group in default_groups:
+ self.inventory.add_group(self._group('all_%s' % (group)))
+
+ nodes_group = self._group('nodes')
+ self.inventory.add_group(nodes_group)
+
+ want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host")
+
+ # gather vm's on nodes
+ self._get_auth()
+ hosts = []
+ for node in self._get_nodes():
+ if not node.get('node'):
+ continue
+
+ self.inventory.add_host(node['node'])
+ if node['type'] == 'node':
+ self.inventory.add_child(nodes_group, node['node'])
+
+ if node['status'] == 'offline':
+ continue
+
+ # get node IP address
+ if want_proxmox_nodes_ansible_host:
+ ip = self._get_node_ip(node['node'])
+ self.inventory.set_variable(node['node'], 'ansible_host', ip)
+
+ # add LXC/Qemu groups for the node
+ for ittype in ('lxc', 'qemu'):
+ node_type_group = self._group('%s_%s' % (node['node'], ittype))
+ self.inventory.add_group(node_type_group)
+
+ # get LXC containers and Qemu VMs for this node
+ lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node']))
+ qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node']))
+ for ittype, item in itertools.chain(lxc_objects, qemu_objects):
+ name = self._handle_item(node['node'], ittype, item)
+ if name is not None:
+ hosts.append(name)
+
+ # gather vm's in pools
+ self._populate_pool_groups(hosts)
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_REQUESTS:
+ raise AnsibleError('This module requires Python Requests 1.1.0 or higher: '
+ 'https://github.com/psf/requests.')
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+
+ # read and template auth options
+ for o in ('url', 'user', 'password', 'token_id', 'token_secret'):
+ v = self.get_option(o)
+ if self.templar.is_template(v):
+ v = self.templar.template(v, disable_lookups=False)
+ setattr(self, 'proxmox_%s' % o, v)
+
+ # some more cleanup and validation
+ self.proxmox_url = self.proxmox_url.rstrip('/')
+
+ if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None):
+ raise AnsibleError('You must specify either a password or both token_id and token_secret.')
+
+ if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'):
+ raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.')
+
+ # read rest of options
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+ self.host_filters = self.get_option('filters')
+ self.group_prefix = self.get_option('group_prefix')
+ self.facts_prefix = self.get_option('facts_prefix')
+ self.strict = self.get_option('strict')
+
+ # actually populate inventory
+ self._populate()
diff --git a/ansible_collections/community/general/plugins/inventory/scaleway.py b/ansible_collections/community/general/plugins/inventory/scaleway.py
new file mode 100644
index 000000000..6aacc9f66
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/scaleway.py
@@ -0,0 +1,344 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: scaleway
+ author:
+ - Remy Leone (@remyleone)
+ short_description: Scaleway inventory source
+ description:
+ - Get inventory hosts from Scaleway.
+ requirements:
+ - PyYAML
+ options:
+ plugin:
+ description: Token that ensures this is a source file for the 'scaleway' plugin.
+ required: true
+ choices: ['scaleway', 'community.general.scaleway']
+ regions:
+ description: Filter results on a specific Scaleway region.
+ type: list
+ elements: string
+ default:
+ - ams1
+ - par1
+ - par2
+ - waw1
+ tags:
+ description: Filter results on a specific tag.
+ type: list
+ elements: string
+ scw_profile:
+ description:
+ - The config profile to use in config file.
+ - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined.
+ type: string
+ version_added: 4.4.0
+ oauth_token:
+ description:
+ - Scaleway OAuth token.
+ - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file
+ (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)).
+ - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/).
+ env:
+ # in order of precedence
+ - name: SCW_TOKEN
+ - name: SCW_API_KEY
+ - name: SCW_OAUTH_TOKEN
+ hostnames:
+ description: List of preference about what to use as an hostname.
+ type: list
+ elements: string
+ default:
+ - public_ipv4
+ choices:
+ - public_ipv4
+ - private_ipv4
+ - public_ipv6
+ - hostname
+ - id
+ variables:
+ description: 'Set individual variables: keys are variable names and
+ values are templates. Any value returned by the
+ L(Scaleway API, https://developer.scaleway.com/#servers-server-get)
+ can be used.'
+ type: dict
+'''
+
+EXAMPLES = r'''
+# scaleway_inventory.yml file in YAML format
+# Example command line: ansible-inventory --list -i scaleway_inventory.yml
+
+# use hostname as inventory_hostname
+# use the private IP address to connect to the host
+plugin: community.general.scaleway
+regions:
+ - ams1
+ - par1
+tags:
+ - foobar
+hostnames:
+ - hostname
+variables:
+ ansible_host: private_ip
+ state: state
+
+# use hostname as inventory_hostname and public IP address to connect to the host
+plugin: community.general.scaleway
+hostnames:
+ - hostname
+regions:
+ - par1
+variables:
+ ansible_host: public_ip.address
+
+# Using static strings as variables
+plugin: community.general.scaleway
+hostnames:
+ - hostname
+variables:
+ ansible_host: public_ip.address
+ ansible_connection: "'ssh'"
+ ansible_user: "'admin'"
+'''
+
+import os
+import json
+
+try:
+ import yaml
+except ImportError as exc:
+ YAML_IMPORT_ERROR = exc
+else:
+ YAML_IMPORT_ERROR = None
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.six import raise_from
+
+import ansible.module_utils.six.moves.urllib.parse as urllib_parse
+
+
+def _fetch_information(token, url):
+ results = []
+ paginated_url = url
+ while True:
+ try:
+ response = open_url(paginated_url,
+ headers={'X-Auth-Token': token,
+ 'Content-type': 'application/json'})
+ except Exception as e:
+ raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e)))
+ try:
+ raw_json = json.loads(to_text(response.read()))
+ except ValueError:
+ raise AnsibleError("Incorrect JSON payload")
+
+ try:
+ results.extend(raw_json["servers"])
+ except KeyError:
+ raise AnsibleError("Incorrect format from the Scaleway API response")
+
+ link = response.headers['Link']
+ if not link:
+ return results
+ relations = parse_pagination_link(link)
+ if 'next' not in relations:
+ return results
+ paginated_url = urllib_parse.urljoin(paginated_url, relations['next'])
+
+
+def _build_server_url(api_endpoint):
+ return "/".join([api_endpoint, "servers"])
+
+
+def extract_public_ipv4(server_info):
+ try:
+ return server_info["public_ip"]["address"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_private_ipv4(server_info):
+ try:
+ return server_info["private_ip"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_hostname(server_info):
+ try:
+ return server_info["hostname"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_server_id(server_info):
+ try:
+ return server_info["id"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_public_ipv6(server_info):
+ try:
+ return server_info["ipv6"]["address"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_tags(server_info):
+ try:
+ return server_info["tags"]
+ except (KeyError, TypeError):
+ return None
+
+
+def extract_zone(server_info):
+ try:
+ return server_info["location"]["zone_id"]
+ except (KeyError, TypeError):
+ return None
+
+
+extractors = {
+ "public_ipv4": extract_public_ipv4,
+ "private_ipv4": extract_private_ipv4,
+ "public_ipv6": extract_public_ipv6,
+ "hostname": extract_hostname,
+ "id": extract_server_id
+}
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ NAME = 'community.general.scaleway'
+
+ def _fill_host_variables(self, host, server_info):
+ targeted_attributes = (
+ "arch",
+ "commercial_type",
+ "id",
+ "organization",
+ "state",
+ "hostname",
+ )
+ for attribute in targeted_attributes:
+ self.inventory.set_variable(host, attribute, server_info[attribute])
+
+ self.inventory.set_variable(host, "tags", server_info["tags"])
+
+ if extract_public_ipv6(server_info=server_info):
+ self.inventory.set_variable(host, "public_ipv6", extract_public_ipv6(server_info=server_info))
+
+ if extract_public_ipv4(server_info=server_info):
+ self.inventory.set_variable(host, "public_ipv4", extract_public_ipv4(server_info=server_info))
+
+ if extract_private_ipv4(server_info=server_info):
+ self.inventory.set_variable(host, "private_ipv4", extract_private_ipv4(server_info=server_info))
+
+ def _get_zones(self, config_zones):
+ return set(SCALEWAY_LOCATION.keys()).intersection(config_zones)
+
+ def match_groups(self, server_info, tags):
+ server_zone = extract_zone(server_info=server_info)
+ server_tags = extract_tags(server_info=server_info)
+
+ # If a server does not have a zone, it means it is archived
+ if server_zone is None:
+ return set()
+
+ # If no filtering is defined, all tags are valid groups
+ if tags is None:
+ return set(server_tags).union((server_zone,))
+
+ matching_tags = set(server_tags).intersection(tags)
+
+ if not matching_tags:
+ return set()
+ return matching_tags.union((server_zone,))
+
+ def _filter_host(self, host_infos, hostname_preferences):
+
+ for pref in hostname_preferences:
+ if extractors[pref](host_infos):
+ return extractors[pref](host_infos)
+
+ return None
+
+ def do_zone_inventory(self, zone, token, tags, hostname_preferences):
+ self.inventory.add_group(zone)
+ zone_info = SCALEWAY_LOCATION[zone]
+
+ url = _build_server_url(zone_info["api_endpoint"])
+ raw_zone_hosts_infos = _fetch_information(url=url, token=token)
+
+ for host_infos in raw_zone_hosts_infos:
+
+ hostname = self._filter_host(host_infos=host_infos,
+ hostname_preferences=hostname_preferences)
+
+ # No suitable hostname were found in the attributes and the host won't be in the inventory
+ if not hostname:
+ continue
+
+ groups = self.match_groups(host_infos, tags)
+
+ for group in groups:
+ self.inventory.add_group(group=group)
+ self.inventory.add_host(group=group, host=hostname)
+ self._fill_host_variables(host=hostname, server_info=host_infos)
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('variables'), host_infos, hostname, strict=False)
+
+ def get_oauth_token(self):
+ oauth_token = self.get_option('oauth_token')
+
+ if 'SCW_CONFIG_PATH' in os.environ:
+ scw_config_path = os.getenv('SCW_CONFIG_PATH')
+ elif 'XDG_CONFIG_HOME' in os.environ:
+ scw_config_path = os.path.join(os.getenv('XDG_CONFIG_HOME'), 'scw', 'config.yaml')
+ else:
+ scw_config_path = os.path.join(os.path.expanduser('~'), '.config', 'scw', 'config.yaml')
+
+ if not oauth_token and os.path.exists(scw_config_path):
+ with open(scw_config_path) as fh:
+ scw_config = yaml.safe_load(fh)
+ ansible_profile = self.get_option('scw_profile')
+
+ if ansible_profile:
+ active_profile = ansible_profile
+ else:
+ active_profile = scw_config.get('active_profile', 'default')
+
+ if active_profile == 'default':
+ oauth_token = scw_config.get('secret_key')
+ else:
+ oauth_token = scw_config['profiles'][active_profile].get('secret_key')
+
+ return oauth_token
+
+ def parse(self, inventory, loader, path, cache=True):
+ if YAML_IMPORT_ERROR:
+ raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR)
+ super(InventoryModule, self).parse(inventory, loader, path)
+ self._read_config_data(path=path)
+
+ config_zones = self.get_option("regions")
+ tags = self.get_option("tags")
+ token = self.get_oauth_token()
+ if not token:
+ raise AnsibleError("'oauth_token' value is null, you must configure it either in inventory, envvars or scaleway-cli config.")
+ hostname_preference = self.get_option("hostnames")
+
+ for zone in self._get_zones(config_zones):
+ self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference)
diff --git a/ansible_collections/community/general/plugins/inventory/stackpath_compute.py b/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
new file mode 100644
index 000000000..39f880e82
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/stackpath_compute.py
@@ -0,0 +1,283 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: stackpath_compute
+ short_description: StackPath Edge Computing inventory source
+ version_added: 1.2.0
+ author:
+ - UNKNOWN (@shayrybak)
+ extends_documentation_fragment:
+ - inventory_cache
+ - constructed
+ description:
+ - Get inventory hosts from StackPath Edge Computing.
+ - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml).
+ options:
+ plugin:
+ description:
+ - A token that ensures this is a source file for the plugin.
+ required: true
+ choices: ['community.general.stackpath_compute']
+ client_id:
+ description:
+ - An OAuth client ID generated from the API Management section of the StackPath customer portal
+ U(https://control.stackpath.net/api-management).
+ required: true
+ type: str
+ client_secret:
+ description:
+ - An OAuth client secret generated from the API Management section of the StackPath customer portal
+ U(https://control.stackpath.net/api-management).
+ required: true
+ type: str
+ stack_slugs:
+ description:
+ - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account.
+ type: list
+ elements: str
+ use_internal_ip:
+ description:
+ - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise.
+ - If an instance doesn't have an external IP it will not be returned when this option is set to false.
+ type: bool
+'''
+
+EXAMPLES = '''
+# Example using credentials to fetch all workload instances in a stack.
+---
+plugin: community.general.stackpath_compute
+client_id: my_client_id
+client_secret: my_client_secret
+stack_slugs:
+- my_first_stack_slug
+- my_other_stack_slug
+use_internal_ip: false
+'''
+
+import traceback
+import json
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import open_url
+from ansible.plugins.inventory import (
+ BaseInventoryPlugin,
+ Constructable,
+ Cacheable
+)
+from ansible.utils.display import Display
+
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+
+ NAME = 'community.general.stackpath_compute'
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+
+ # credentials
+ self.client_id = None
+ self.client_secret = None
+ self.stack_slug = None
+ self.api_host = "https://gateway.stackpath.com"
+ self.group_keys = [
+ "stackSlug",
+ "workloadId",
+ "cityCode",
+ "countryCode",
+ "continent",
+ "target",
+ "name",
+ "workloadSlug"
+ ]
+
+ def _validate_config(self, config):
+ if config['plugin'] != 'community.general.stackpath_compute':
+ raise AnsibleError("plugin doesn't match this plugin")
+ try:
+ client_id = config['client_id']
+ if len(client_id) != 32:
+ raise AnsibleError("client_id must be 32 characters long")
+ except KeyError:
+ raise AnsibleError("config missing client_id, a required option")
+ try:
+ client_secret = config['client_secret']
+ if len(client_secret) != 64:
+ raise AnsibleError("client_secret must be 64 characters long")
+ except KeyError:
+ raise AnsibleError("config missing client_id, a required option")
+ return True
+
+ def _set_credentials(self):
+ '''
+ :param config_data: contents of the inventory config file
+ '''
+ self.client_id = self.get_option('client_id')
+ self.client_secret = self.get_option('client_secret')
+
+ def _authenticate(self):
+ payload = json.dumps(
+ {
+ "client_id": self.client_id,
+ "client_secret": self.client_secret,
+ "grant_type": "client_credentials",
+ }
+ )
+ headers = {
+ "Content-Type": "application/json",
+ }
+ resp = open_url(
+ self.api_host + '/identity/v1/oauth2/token',
+ headers=headers,
+ data=payload,
+ method="POST"
+ )
+ status_code = resp.code
+ if status_code == 200:
+ body = resp.read()
+ self.auth_token = json.loads(body)["access_token"]
+
+ def _query(self):
+ results = []
+ workloads = []
+ self._authenticate()
+ for stack_slug in self.stack_slugs:
+ try:
+ workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads')
+ except Exception:
+ raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc())
+ for workload in workloads:
+ try:
+ workload_instances = self._stackpath_query_get_list(
+ self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances'
+ )
+ except Exception:
+ raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc())
+ for instance in workload_instances:
+ if instance["phase"] == "RUNNING":
+ instance["stackSlug"] = stack_slug
+ instance["workloadId"] = workload["id"]
+ instance["workloadSlug"] = workload["slug"]
+ instance["cityCode"] = instance["location"]["cityCode"]
+ instance["countryCode"] = instance["location"]["countryCode"]
+ instance["continent"] = instance["location"]["continent"]
+ instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"]
+ try:
+ if instance[self.hostname_key]:
+ results.append(instance)
+ except KeyError:
+ pass
+ return results
+
+ def _populate(self, instances):
+ for instance in instances:
+ for group_key in self.group_keys:
+ group = group_key + "_" + instance[group_key]
+ group = group.lower().replace(" ", "_").replace("-", "_")
+ self.inventory.add_group(group)
+ self.inventory.add_host(instance[self.hostname_key],
+ group=group)
+
+ def _stackpath_query_get_list(self, url):
+ self._authenticate()
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + self.auth_token,
+ }
+ next_page = True
+ result = []
+ cursor = '-1'
+ while next_page:
+ resp = open_url(
+ url + '?page_request.first=10&page_request.after=%s' % cursor,
+ headers=headers,
+ method="GET"
+ )
+ status_code = resp.code
+ if status_code == 200:
+ body = resp.read()
+ body_json = json.loads(body)
+ result.extend(body_json["results"])
+ next_page = body_json["pageInfo"]["hasNextPage"]
+ if next_page:
+ cursor = body_json["pageInfo"]["endCursor"]
+ return result
+
+ def _get_stack_slugs(self, stacks):
+ self.stack_slugs = [stack["slug"] for stack in stacks]
+
+ def verify_file(self, path):
+ '''
+ :param loader: an ansible.parsing.dataloader.DataLoader object
+ :param path: the path to the inventory config file
+ :return the contents of the config file
+ '''
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')):
+ return True
+ display.debug(
+ "stackpath_compute inventory filename must end with \
+ 'stackpath_compute.yml' or 'stackpath_compute.yaml'"
+ )
+ return False
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ config = self._read_config_data(path)
+ self._validate_config(config)
+ self._set_credentials()
+
+ # get user specifications
+ self.use_internal_ip = self.get_option('use_internal_ip')
+ if self.use_internal_ip:
+ self.hostname_key = "ipAddress"
+ else:
+ self.hostname_key = "externalIpAddress"
+
+ self.stack_slugs = self.get_option('stack_slugs')
+ if not self.stack_slugs:
+ try:
+ stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks')
+ self._get_stack_slugs(stacks)
+ except Exception:
+ raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc())
+
+ cache_key = self.get_cache_key(path)
+ # false when refresh_cache or --flush-cache is used
+ if cache:
+ # get the user-specified directive
+ cache = self.get_option('cache')
+
+ # Generate inventory
+ cache_needs_update = False
+ if cache:
+ try:
+ results = self._cache[cache_key]
+ except KeyError:
+ # if cache expires or cache file doesn't exist
+ cache_needs_update = True
+
+ if not cache or cache_needs_update:
+ results = self._query()
+
+ self._populate(results)
+
+ # If the cache has expired/doesn't exist or
+ # if refresh_inventory/flush cache is used
+ # when the user is using caching, update the cached inventory
+ try:
+ if cache_needs_update or (not cache and self.get_option('cache')):
+ self._cache[cache_key] = results
+ except Exception:
+ raise AnsibleError("Failed to populate data: %s" % traceback.format_exc())
diff --git a/ansible_collections/community/general/plugins/inventory/virtualbox.py b/ansible_collections/community/general/plugins/inventory/virtualbox.py
new file mode 100644
index 000000000..c926d8b44
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/virtualbox.py
@@ -0,0 +1,287 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: virtualbox
+ short_description: virtualbox inventory source
+ description:
+ - Get inventory hosts from the local virtualbox installation.
+ - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml).
+ - The inventory_hostname is always the 'Name' of the virtualbox instance.
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ options:
+ plugin:
+ description: token that ensures this is a source file for the 'virtualbox' plugin
+ required: true
+ choices: ['virtualbox', 'community.general.virtualbox']
+ running_only:
+ description: toggles showing all vms vs only those currently running
+ type: boolean
+ default: false
+ settings_password_file:
+ description: provide a file containing the settings password (equivalent to --settingspwfile)
+ network_info_path:
+ description: property path to query for network information (ansible_host)
+ default: "/VirtualBox/GuestInfo/Net/0/V4/IP"
+ query:
+ description: create vars from virtualbox properties
+ type: dictionary
+ default: {}
+'''
+
+EXAMPLES = '''
+# file must be named vbox.yaml or vbox.yml
+simple_config_file:
+ plugin: community.general.virtualbox
+ settings_password_file: /etc/virtulbox/secrets
+ query:
+ logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList
+ compose:
+ ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh')
+
+# add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory'
+plugin: community.general.virtualbox
+groups:
+ container: "'minis' in (inventory_hostname)"
+'''
+
+import os
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleParserError
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.common._collections_compat import MutableMapping
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.module_utils.common.process import get_bin_path
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using local virtualbox. '''
+
+ NAME = 'community.general.virtualbox'
+ VBOX = "VBoxManage"
+
+ def __init__(self):
+ self._vbox_path = None
+ super(InventoryModule, self).__init__()
+
+ def _query_vbox_data(self, host, property_path):
+ ret = None
+ try:
+ cmd = [self._vbox_path, b'guestproperty', b'get',
+ to_bytes(host, errors='surrogate_or_strict'),
+ to_bytes(property_path, errors='surrogate_or_strict')]
+ x = Popen(cmd, stdout=PIPE)
+ ipinfo = to_text(x.stdout.read(), errors='surrogate_or_strict')
+ if 'Value' in ipinfo:
+ a, ip = ipinfo.split(':', 1)
+ ret = ip.strip()
+ except Exception:
+ pass
+ return ret
+
+ def _set_variables(self, hostvars):
+
+ # set vars in inventory from hostvars
+ for host in hostvars:
+
+ query = self.get_option('query')
+ # create vars from vbox properties
+ if query and isinstance(query, MutableMapping):
+ for varname in query:
+ hostvars[host][varname] = self._query_vbox_data(host, query[varname])
+
+ strict = self.get_option('strict')
+
+ # create composite vars
+ self._set_composite_vars(self.get_option('compose'), hostvars[host], host, strict=strict)
+
+ # actually update inventory
+ for key in hostvars[host]:
+ self.inventory.set_variable(host, key, hostvars[host][key])
+
+ # constructed groups based on conditionals
+ self._add_host_to_composed_groups(self.get_option('groups'), hostvars[host], host, strict=strict)
+
+ # constructed keyed_groups
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict)
+
+ def _populate_from_cache(self, source_data):
+ hostvars = source_data.pop('_meta', {}).get('hostvars', {})
+ for group in source_data:
+ if group == 'all':
+ continue
+ else:
+ group = self.inventory.add_group(group)
+ hosts = source_data[group].get('hosts', [])
+ for host in hosts:
+ self._populate_host_vars([host], hostvars.get(host, {}), group)
+ self.inventory.add_child('all', group)
+ if not source_data:
+ for host in hostvars:
+ self.inventory.add_host(host)
+ self._populate_host_vars([host], hostvars.get(host, {}))
+
+ def _populate_from_source(self, source_data, using_current_cache=False):
+ if using_current_cache:
+ self._populate_from_cache(source_data)
+ return source_data
+
+ cacheable_results = {'_meta': {'hostvars': {}}}
+
+ hostvars = {}
+ prevkey = pref_k = ''
+ current_host = None
+
+ # needed to possibly set ansible_host
+ netinfo = self.get_option('network_info_path')
+
+ for line in source_data:
+ line = to_text(line)
+ if ':' not in line:
+ continue
+ try:
+ k, v = line.split(':', 1)
+ except Exception:
+ # skip non splitable
+ continue
+
+ if k.strip() == '':
+ # skip empty
+ continue
+
+ v = v.strip()
+ # found host
+ if k.startswith('Name') and ',' not in v: # some setting strings appear in Name
+ current_host = v
+ if current_host not in hostvars:
+ hostvars[current_host] = {}
+ self.inventory.add_host(current_host)
+
+ # try to get network info
+ netdata = self._query_vbox_data(current_host, netinfo)
+ if netdata:
+ self.inventory.set_variable(current_host, 'ansible_host', netdata)
+
+ # found groups
+ elif k == 'Groups':
+ for group in v.split('/'):
+ if group:
+ group = self.inventory.add_group(group)
+ self.inventory.add_child(group, current_host)
+ if group not in cacheable_results:
+ cacheable_results[group] = {'hosts': []}
+ cacheable_results[group]['hosts'].append(current_host)
+ continue
+
+ else:
+ # found vars, accumulate in hostvars for clean inventory set
+ pref_k = 'vbox_' + k.strip().replace(' ', '_')
+ leading_spaces = len(k) - len(k.lstrip(' '))
+ if 0 < leading_spaces <= 2:
+ if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict):
+ hostvars[current_host][prevkey] = {}
+ hostvars[current_host][prevkey][pref_k] = v
+ elif leading_spaces > 2:
+ continue
+ else:
+ if v != '':
+ hostvars[current_host][pref_k] = v
+ if self._ungrouped_host(current_host, cacheable_results):
+ if 'ungrouped' not in cacheable_results:
+ cacheable_results['ungrouped'] = {'hosts': []}
+ cacheable_results['ungrouped']['hosts'].append(current_host)
+
+ prevkey = pref_k
+
+ self._set_variables(hostvars)
+ for host in hostvars:
+ h = self.inventory.get_host(host)
+ cacheable_results['_meta']['hostvars'][h.name] = h.vars
+
+ return cacheable_results
+
+ def _ungrouped_host(self, host, inventory):
+ def find_host(host, inventory):
+ for k, v in inventory.items():
+ if k == '_meta':
+ continue
+ if isinstance(v, dict):
+ yield self._ungrouped_host(host, v)
+ elif isinstance(v, list):
+ yield host not in v
+ yield True
+
+ return all(find_host(host, inventory))
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('virtualbox.yaml', 'virtualbox.yml', 'vbox.yaml', 'vbox.yml')):
+ valid = True
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+
+ try:
+ self._vbox_path = get_bin_path(self.VBOX)
+ except ValueError as e:
+ raise AnsibleParserError(e)
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ cache_key = self.get_cache_key(path)
+
+ config_data = self._read_config_data(path)
+
+ # set _options from config data
+ self._consume_options(config_data)
+
+ source_data = None
+ if cache:
+ cache = self.get_option('cache')
+
+ update_cache = False
+ if cache:
+ try:
+ source_data = self._cache[cache_key]
+ except KeyError:
+ update_cache = True
+
+ if not source_data:
+ b_pwfile = to_bytes(self.get_option('settings_password_file'), errors='surrogate_or_strict', nonstring='passthru')
+ running = self.get_option('running_only')
+
+ # start getting data
+ cmd = [self._vbox_path, b'list', b'-l']
+ if running:
+ cmd.append(b'runningvms')
+ else:
+ cmd.append(b'vms')
+
+ if b_pwfile and os.path.exists(b_pwfile):
+ cmd.append(b'--settingspwfile')
+ cmd.append(b_pwfile)
+
+ try:
+ p = Popen(cmd, stdout=PIPE)
+ except Exception as e:
+ raise AnsibleParserError(to_native(e))
+
+ source_data = p.stdout.read().splitlines()
+
+ using_current_cache = cache and not update_cache
+ cacheable_results = self._populate_from_source(source_data, using_current_cache)
+
+ if update_cache:
+ self._cache[cache_key] = cacheable_results
diff --git a/ansible_collections/community/general/plugins/inventory/xen_orchestra.py b/ansible_collections/community/general/plugins/inventory/xen_orchestra.py
new file mode 100644
index 000000000..ddbdd9bb0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/inventory/xen_orchestra.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: xen_orchestra
+ short_description: Xen Orchestra inventory source
+ version_added: 4.1.0
+ author:
+ - Dom Del Nano (@ddelnano) <ddelnano@gmail.com>
+ - Samori Gorse (@shinuza) <samorigorse@gmail.com>
+ requirements:
+ - websocket-client >= 1.0.0
+ description:
+ - Get inventory hosts from a Xen Orchestra deployment.
+ - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).'
+ extends_documentation_fragment:
+ - constructed
+ - inventory_cache
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own.
+ required: true
+ choices: ['community.general.xen_orchestra']
+ type: str
+ api_host:
+ description:
+ - API host to XOA API.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead.
+ type: str
+ env:
+ - name: ANSIBLE_XO_HOST
+ user:
+ description:
+ - Xen Orchestra user.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_USER
+ password:
+ description:
+ - Xen Orchestra password.
+ - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead.
+ required: true
+ type: str
+ env:
+ - name: ANSIBLE_XO_PASSWORD
+ validate_certs:
+ description: Verify TLS certificate if using HTTPS.
+ type: boolean
+ default: true
+ use_ssl:
+ description: Use wss when connecting to the Xen Orchestra API
+ type: boolean
+ default: true
+'''
+
+
+EXAMPLES = '''
+# file must be named xen_orchestra.yaml or xen_orchestra.yml
+plugin: community.general.xen_orchestra
+api_host: 192.168.1.255
+user: xo
+password: xo_pwd
+validate_certs: true
+use_ssl: true
+groups:
+ kube_nodes: "'kube_node' in tags"
+compose:
+ ansible_port: 2222
+
+'''
+
+import json
+import ssl
+from time import sleep
+
+from ansible.errors import AnsibleError
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+# 3rd party imports
+try:
+ HAS_WEBSOCKET = True
+ import websocket
+ from websocket import create_connection
+
+ if LooseVersion(websocket.__version__) <= LooseVersion('1.0.0'):
+ raise ImportError
+except ImportError as e:
+ HAS_WEBSOCKET = False
+
+
+HALTED = 'Halted'
+PAUSED = 'Paused'
+RUNNING = 'Running'
+SUSPENDED = 'Suspended'
+POWER_STATES = [RUNNING, HALTED, SUSPENDED, PAUSED]
+HOST_GROUP = 'xo_hosts'
+POOL_GROUP = 'xo_pools'
+
+
+def clean_group_name(label):
+ return label.lower().replace(' ', '-').replace('-', '_')
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using XenOrchestra as source. '''
+
+ NAME = 'community.general.xen_orchestra'
+
+ def __init__(self):
+
+ super(InventoryModule, self).__init__()
+
+ # from config
+ self.counter = -1
+ self.session = None
+ self.cache_key = None
+ self.use_cache = None
+
+ @property
+ def pointer(self):
+ self.counter += 1
+ return self.counter
+
+ def create_connection(self, xoa_api_host):
+ validate_certs = self.get_option('validate_certs')
+ use_ssl = self.get_option('use_ssl')
+ proto = 'wss' if use_ssl else 'ws'
+
+ sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE}
+ self.conn = create_connection(
+ '{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt)
+
+ CALL_TIMEOUT = 100
+ """Number of 1/10ths of a second to wait before method call times out."""
+
+ def call(self, method, params):
+ """Calls a method on the XO server with the provided parameters."""
+ id = self.pointer
+ self.conn.send(json.dumps({
+ 'id': id,
+ 'jsonrpc': '2.0',
+ 'method': method,
+ 'params': params
+ }))
+
+ waited = 0
+ while waited < self.CALL_TIMEOUT:
+ response = json.loads(self.conn.recv())
+ if 'id' in response and response['id'] == id:
+ return response
+ else:
+ sleep(0.1)
+ waited += 1
+
+ raise AnsibleError(
+ 'Method call {method} timed out after {timeout} seconds.'.format(method=method, timeout=self.CALL_TIMEOUT / 10))
+
+ def login(self, user, password):
+ result = self.call('session.signIn', {
+ 'username': user, 'password': password
+ })
+
+ if 'error' in result:
+ raise AnsibleError(
+ 'Could not connect: {0}'.format(result['error']))
+
+ def get_object(self, name):
+ answer = self.call('xo.getAllObjects', {'filter': {'type': name}})
+
+ if 'error' in answer:
+ raise AnsibleError(
+ 'Could not request: {0}'.format(answer['error']))
+
+ return answer['result']
+
+ def _get_objects(self):
+ self.create_connection(self.xoa_api_host)
+ self.login(self.xoa_user, self.xoa_password)
+
+ return {
+ 'vms': self.get_object('VM'),
+ 'pools': self.get_object('pool'),
+ 'hosts': self.get_object('host'),
+ }
+
+ def _apply_constructable(self, name, variables):
+ strict = self.get_option('strict')
+ self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=strict)
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=strict)
+ self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict)
+
+ def _add_vms(self, vms, hosts, pools):
+ for uuid, vm in vms.items():
+ group = 'with_ip'
+ ip = vm.get('mainIpAddress')
+ entry_name = uuid
+ power_state = vm['power_state'].lower()
+ pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId'])
+ host_name = self._host_group_name_for_uuid(hosts, vm['$container'])
+
+ self.inventory.add_host(entry_name)
+
+ # Grouping by power state
+ self.inventory.add_child(power_state, entry_name)
+
+ # Grouping by host
+ if host_name:
+ self.inventory.add_child(host_name, entry_name)
+
+ # Grouping by pool
+ if pool_name:
+ self.inventory.add_child(pool_name, entry_name)
+
+ # Grouping VMs with an IP together
+ if ip is None:
+ group = 'without_ip'
+ self.inventory.add_group(group)
+ self.inventory.add_child(group, entry_name)
+
+ # Adding meta
+ self.inventory.set_variable(entry_name, 'uuid', uuid)
+ self.inventory.set_variable(entry_name, 'ip', ip)
+ self.inventory.set_variable(entry_name, 'ansible_host', ip)
+ self.inventory.set_variable(entry_name, 'power_state', power_state)
+ self.inventory.set_variable(
+ entry_name, 'name_label', vm['name_label'])
+ self.inventory.set_variable(entry_name, 'type', vm['type'])
+ self.inventory.set_variable(
+ entry_name, 'cpus', vm['CPUs']['number'])
+ self.inventory.set_variable(entry_name, 'tags', vm['tags'])
+ self.inventory.set_variable(
+ entry_name, 'memory', vm['memory']['size'])
+ self.inventory.set_variable(
+ entry_name, 'has_ip', group == 'with_ip')
+ self.inventory.set_variable(
+ entry_name, 'is_managed', vm.get('managementAgentDetected', False))
+ self.inventory.set_variable(
+ entry_name, 'os_version', vm['os_version'])
+
+ self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars())
+
+ def _add_hosts(self, hosts, pools):
+ for host in hosts.values():
+ entry_name = host['uuid']
+ group_name = 'xo_host_{0}'.format(
+ clean_group_name(host['name_label']))
+ pool_name = self._pool_group_name_for_uuid(pools, host['$poolId'])
+
+ self.inventory.add_group(group_name)
+ self.inventory.add_host(entry_name)
+ self.inventory.add_child(HOST_GROUP, entry_name)
+ self.inventory.add_child(pool_name, entry_name)
+
+ self.inventory.set_variable(entry_name, 'enabled', host['enabled'])
+ self.inventory.set_variable(
+ entry_name, 'hostname', host['hostname'])
+ self.inventory.set_variable(entry_name, 'memory', host['memory'])
+ self.inventory.set_variable(entry_name, 'address', host['address'])
+ self.inventory.set_variable(entry_name, 'cpus', host['cpus'])
+ self.inventory.set_variable(entry_name, 'type', 'host')
+ self.inventory.set_variable(entry_name, 'tags', host['tags'])
+ self.inventory.set_variable(entry_name, 'version', host['version'])
+ self.inventory.set_variable(
+ entry_name, 'power_state', host['power_state'].lower())
+ self.inventory.set_variable(
+ entry_name, 'product_brand', host['productBrand'])
+
+ for pool in pools.values():
+ group_name = 'xo_pool_{0}'.format(
+ clean_group_name(pool['name_label']))
+
+ self.inventory.add_group(group_name)
+
+ def _add_pools(self, pools):
+ for pool in pools.values():
+ group_name = 'xo_pool_{0}'.format(
+ clean_group_name(pool['name_label']))
+
+ self.inventory.add_group(group_name)
+
+ # TODO: Refactor
+ def _pool_group_name_for_uuid(self, pools, pool_uuid):
+ for pool in pools:
+ if pool == pool_uuid:
+ return 'xo_pool_{0}'.format(
+ clean_group_name(pools[pool_uuid]['name_label']))
+
+ # TODO: Refactor
+ def _host_group_name_for_uuid(self, hosts, host_uuid):
+ for host in hosts:
+ if host == host_uuid:
+ return 'xo_host_{0}'.format(
+ clean_group_name(hosts[host_uuid]['name_label']
+ ))
+
+ def _populate(self, objects):
+ # Prepare general groups
+ self.inventory.add_group(HOST_GROUP)
+ self.inventory.add_group(POOL_GROUP)
+ for group in POWER_STATES:
+ self.inventory.add_group(group.lower())
+
+ self._add_pools(objects['pools'])
+ self._add_hosts(objects['hosts'], objects['pools'])
+ self._add_vms(objects['vms'], objects['hosts'], objects['pools'])
+
+ def verify_file(self, path):
+
+ valid = False
+ if super(InventoryModule, self).verify_file(path):
+ if path.endswith(('xen_orchestra.yaml', 'xen_orchestra.yml')):
+ valid = True
+ else:
+ self.display.vvv(
+ 'Skipping due to inventory source not ending in "xen_orchestra.yaml" nor "xen_orchestra.yml"')
+ return valid
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_WEBSOCKET:
+ raise AnsibleError('This plugin requires websocket-client 1.0.0 or higher: '
+ 'https://github.com/websocket-client/websocket-client.')
+
+ super(InventoryModule, self).parse(inventory, loader, path)
+
+ # read config from file, this sets 'options'
+ self._read_config_data(path)
+ self.inventory = inventory
+
+ self.protocol = 'wss'
+ self.xoa_api_host = self.get_option('api_host')
+ self.xoa_user = self.get_option('user')
+ self.xoa_password = self.get_option('password')
+ self.cache_key = self.get_cache_key(path)
+ self.use_cache = cache and self.get_option('cache')
+
+ self.validate_certs = self.get_option('validate_certs')
+ if not self.get_option('use_ssl'):
+ self.protocol = 'ws'
+
+ objects = self._get_objects()
+ self._populate(objects)
diff --git a/ansible_collections/community/general/plugins/lookup/bitwarden.py b/ansible_collections/community/general/plugins/lookup/bitwarden.py
new file mode 100644
index 000000000..27de1afe6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/bitwarden.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Jonathan Lung <lungj@heresjono.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ name: bitwarden
+ author:
+ - Jonathan Lung (@lungj) <lungj@heresjono.com>
+ requirements:
+ - bw (command line utility)
+ - be logged into bitwarden
+ - bitwarden vault unlocked
+ - C(BW_SESSION) environment variable set
+ short_description: Retrieve secrets from Bitwarden
+ version_added: 5.4.0
+ description:
+ - Retrieve secrets from Bitwarden.
+ options:
+ _terms:
+ description: Key(s) to fetch values for from login info.
+ required: true
+ type: list
+ elements: str
+ search:
+ description: Field to retrieve, for example C(name) or C(id).
+ type: str
+ default: name
+ version_added: 5.7.0
+ field:
+ description: Field to fetch. Leave unset to fetch whole response.
+ type: str
+ collection_id:
+ description: Collection ID to filter results by collection. Leave unset to skip filtering.
+ type: str
+ version_added: 6.3.0
+"""
+
+EXAMPLES = """
+- name: "Get 'password' from Bitwarden record named 'a_test'"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='password') }}
+
+- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
+
+- name: "Get 'password' from Bitwarden record named 'a_test' from collection"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
+
+- name: "Get full Bitwarden record named 'a_test'"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test') }}
+
+- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
+ ansible.builtin.debug:
+ msg: >-
+ {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
+"""
+
+RETURN = """
+ _raw:
+ description: List of requested field or JSON object of list of matches.
+ type: list
+ elements: raw
+"""
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.parsing.ajson import AnsibleJSONDecoder
+from ansible.plugins.lookup import LookupBase
+
+
+class BitwardenException(AnsibleError):
+ pass
+
+
+class Bitwarden(object):
+
+ def __init__(self, path='bw'):
+ self._cli_path = path
+
+ @property
+ def cli_path(self):
+ return self._cli_path
+
+ @property
+ def unlocked(self):
+ out, err = self._run(['status'], stdin="")
+ decoded = AnsibleJSONDecoder().raw_decode(out)[0]
+ return decoded['status'] == 'unlocked'
+
+ def _run(self, args, stdin=None, expected_rc=0):
+ p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(to_bytes(stdin))
+ rc = p.wait()
+ if rc != expected_rc:
+ raise BitwardenException(err)
+ return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
+
+ def _get_matches(self, search_value, search_field, collection_id):
+ """Return matching records whose search_field is equal to key.
+ """
+
+ # Prepare set of params for Bitwarden CLI
+ params = ['list', 'items', '--search', search_value]
+
+ if collection_id:
+ params.extend(['--collectionid', collection_id])
+
+ out, err = self._run(params)
+
+ # This includes things that matched in different fields.
+ initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
+
+ # Filter to only include results from the right field.
+ return [item for item in initial_matches if item[search_field] == search_value]
+
+ def get_field(self, field, search_value, search_field="name", collection_id=None):
+ """Return a list of the specified field for records whose search_field match search_value
+ and filtered by collection if collection has been provided.
+
+ If field is None, return the whole record for each match.
+ """
+ matches = self._get_matches(search_value, search_field, collection_id)
+
+ if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:
+ return [match['login'][field] for match in matches]
+ elif not field:
+ return matches
+ else:
+ custom_field_matches = []
+ for match in matches:
+ for custom_field in match['fields']:
+ if custom_field['name'] == field:
+ custom_field_matches.append(custom_field['value'])
+ if matches and not custom_field_matches:
+ raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
+ return custom_field_matches
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+ field = self.get_option('field')
+ search_field = self.get_option('search')
+ collection_id = self.get_option('collection_id')
+ if not _bitwarden.unlocked:
+ raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
+
+ return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
+
+
+_bitwarden = Bitwarden()
diff --git a/ansible_collections/community/general/plugins/lookup/cartesian.py b/ansible_collections/community/general/plugins/lookup/cartesian.py
new file mode 100644
index 000000000..d63f3943b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/cartesian.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013, Bradley Young <young.bradley@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: cartesian
+ short_description: returns the cartesian product of lists
+ description:
+ - Takes the input lists and returns a list that represents the product of the input lists.
+ - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b].
+ You can see the exact syntax in the examples section.
+ options:
+ _terms:
+ description:
+ - a set of lists
+ type: list
+ elements: list
+ required: true
+'''
+
+EXAMPLES = """
+- name: Example of the change in the description
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}"
+
+- name: loops over the cartesian product of the supplied lists
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cartesian:
+ - "{{list1}}"
+ - "{{list2}}"
+ - [1,2,3,4,5,6]
+"""
+
+RETURN = """
+ _list:
+ description:
+ - list of lists composed of elements of the input lists
+ type: list
+ elements: list
+"""
+
+from itertools import product
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+ """
+ Create the cartesian product of lists
+ """
+
+ def _lookup_variables(self, terms):
+ """
+ Turn this:
+ terms == ["1,2,3", "a,b"]
+ into this:
+ terms == [[1,2,3], [a, b]]
+ """
+ results = []
+ for x in terms:
+ try:
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar)
+ except TypeError:
+ # The loader argument is deprecated in ansible-core 2.14+. Fall back to
+ # pre-2.14 behavior for older ansible-core versions.
+ intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ terms = self._lookup_variables(terms)
+
+ my_list = terms[:]
+ if len(my_list) == 0:
+ raise AnsibleError("with_cartesian requires at least one element in each list")
+
+ return [self._flatten(x) for x in product(*my_list)]
diff --git a/ansible_collections/community/general/plugins/lookup/chef_databag.py b/ansible_collections/community/general/plugins/lookup/chef_databag.py
new file mode 100644
index 000000000..b14d924ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/chef_databag.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Josh Bradley <jbradley(at)digitalocean.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: chef_databag
+ short_description: fetches data from a Chef Databag
+ description:
+ - "This is a lookup plugin to provide access to chef data bags using the pychef package.
+ It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from,
+ starting from either the given base path or the current working directory.
+ The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration
+ file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb"
+ requirements:
+ - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))"
+ options:
+ name:
+ description:
+ - Name of the databag
+ required: true
+ item:
+ description:
+ - Item to fetch
+ required: true
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug:
+ msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - The value from the databag.
+ type: list
+ elements: dict
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.parsing.splitter import parse_kv
+
+try:
+ import chef
+ HAS_CHEF = True
+except ImportError as missing_module:
+ HAS_CHEF = False
+
+
+class LookupModule(LookupBase):
+ """
+ Chef data bag lookup module
+ """
+ def __init__(self, loader=None, templar=None, **kwargs):
+
+ super(LookupModule, self).__init__(loader, templar, **kwargs)
+
+ # setup vars for data bag name and data bag item
+ self.name = None
+ self.item = None
+
+ def parse_kv_args(self, args):
+ """
+ parse key-value style arguments
+ """
+
+ for arg in ["name", "item"]:
+ try:
+ arg_raw = args.pop(arg, None)
+ if arg_raw is None:
+ continue
+ parsed = str(arg_raw)
+ setattr(self, arg, parsed)
+ except ValueError:
+ raise AnsibleError(
+ "can't parse arg {0}={1} as string".format(arg, arg_raw)
+ )
+ if args:
+ raise AnsibleError(
+ "unrecognized arguments to with_sequence: %r" % list(args.keys())
+ )
+
+ def run(self, terms, variables=None, **kwargs):
+ # Ensure pychef has been loaded
+ if not HAS_CHEF:
+ raise AnsibleError('PyChef needed for lookup plugin, try `pip install pychef`')
+
+ for term in terms:
+ self.parse_kv_args(parse_kv(term))
+
+ api_object = chef.autoconfigure()
+
+ if not isinstance(api_object, chef.api.ChefAPI):
+ raise AnsibleError('Unable to connect to Chef Server API.')
+
+ data_bag_object = chef.DataBag(self.name)
+
+ data_bag_item = data_bag_object[self.item]
+
+ return [dict(data_bag_item)]
diff --git a/ansible_collections/community/general/plugins/lookup/collection_version.py b/ansible_collections/community/general/plugins/lookup/collection_version.py
new file mode 100644
index 000000000..4d25585b8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/collection_version.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+name: collection_version
+author: Felix Fontein (@felixfontein)
+version_added: "4.0.0"
+short_description: Retrieves the version of an installed collection
+description:
+ - This lookup allows to query the version of an installed collection, and to determine whether a
+ collection is installed at all.
+ - By default it returns C(none) for non-existing collections and C(*) for collections without a
+ version number. The latter should only happen in development environments, or when installing
+ a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted
+ by providing other values with I(result_not_found) and I(result_no_version).
+options:
+ _terms:
+ description:
+ - The collections to look for.
+ - For example C(community.general).
+ type: list
+ elements: str
+ required: true
+ result_not_found:
+ description:
+ - The value to return when the collection could not be found.
+ - By default, C(none) is returned.
+ type: string
+ default: ~
+ result_no_version:
+ description:
+ - The value to return when the collection has no version number.
+ - This can happen for collections installed from git which do not have a version number
+ in C(galaxy.yml).
+ - By default, C(*) is returned.
+ type: string
+ default: '*'
+"""
+
+EXAMPLES = """
+- name: Check version of community.general
+ ansible.builtin.debug:
+ msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - The version number of the collections listed as input.
+ - If a collection can not be found, it will return the value provided in I(result_not_found).
+ By default, this is C(none).
+ - If a collection can be found, but the version not identified, it will return the value provided in
+ I(result_no_version). By default, this is C(*). This can happen for collections installed
+ from git which do not have a version number in C(galaxy.yml).
+ type: list
+ elements: str
+"""
+
+import json
+import os
+import re
+
+import yaml
+
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.compat.importlib import import_module
+from ansible.plugins.lookup import LookupBase
+
+
+FQCN_RE = re.compile(r'^[A-Za-z0-9_]+\.[A-Za-z0-9_]+$')
+
+
+def load_collection_meta_manifest(manifest_path):
+ with open(manifest_path, 'rb') as f:
+ meta = json.load(f)
+ return {
+ 'version': meta['collection_info']['version'],
+ }
+
+
+def load_collection_meta_galaxy(galaxy_path, no_version='*'):
+ with open(galaxy_path, 'rb') as f:
+ meta = yaml.safe_load(f)
+ return {
+ 'version': meta.get('version') or no_version,
+ }
+
+
+def load_collection_meta(collection_pkg, no_version='*'):
+ path = os.path.dirname(collection_pkg.__file__)
+
+ # Try to load MANIFEST.json
+ manifest_path = os.path.join(path, 'MANIFEST.json')
+ if os.path.exists(manifest_path):
+ return load_collection_meta_manifest(manifest_path)
+
+ # Try to load galaxy.y(a)ml
+ galaxy_path = os.path.join(path, 'galaxy.yml')
+ galaxy_alt_path = os.path.join(path, 'galaxy.yaml')
+ # galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed
+ # in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for
+ # ansible-core 2.12.
+ for path in (galaxy_path, galaxy_alt_path):
+ if os.path.exists(path):
+ return load_collection_meta_galaxy(path, no_version=no_version)
+
+ return {}
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ result = []
+ self.set_options(var_options=variables, direct=kwargs)
+ not_found = self.get_option('result_not_found')
+ no_version = self.get_option('result_no_version')
+
+ for term in terms:
+ if not FQCN_RE.match(term):
+ raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term))
+
+ try:
+ collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term))
+ except ImportError:
+ # Collection not found
+ result.append(not_found)
+ continue
+
+ try:
+ data = load_collection_meta(collection_pkg, no_version=no_version)
+ except Exception as exc:
+ raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc))
+
+ result.append(data.get('version', no_version))
+
+ return result
diff --git a/ansible_collections/community/general/plugins/lookup/consul_kv.py b/ansible_collections/community/general/plugins/lookup/consul_kv.py
new file mode 100644
index 000000000..f17f1b269
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/consul_kv.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: consul_kv
+ short_description: Fetch metadata from a Consul key value store.
+ description:
+ - Lookup metadata for a playbook from the key value store in a Consul cluster.
+ Values can be easily set in the kv store with simple rest commands
+ - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata)
+ requirements:
+ - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)'
+ options:
+ _raw:
+ description: List of key(s) to retrieve.
+ type: list
+ elements: string
+ recurse:
+ type: boolean
+ description: If true, will retrieve all the values that have the given key as prefix.
+ default: false
+ index:
+ description:
+ - If the key has a value with the specified index then this is returned allowing access to historical values.
+ datacenter:
+ description:
+ - Retrieve the key from a consul datacenter other than the default for the consul host.
+ token:
+ description: The acl token to allow access to restricted values.
+ host:
+ default: localhost
+ description:
+ - The target to connect to, must be a resolvable address.
+ Will be determined from C(ANSIBLE_CONSUL_URL) if that is set.
+ - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)"
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: host
+ port:
+ description:
+ - The port of the target host to connect to.
+ - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ default: 8500
+ scheme:
+ default: http
+ description:
+ - Whether to use http or https.
+ - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there.
+ validate_certs:
+ default: true
+ description: Whether to verify the ssl connection or not.
+ env:
+ - name: ANSIBLE_CONSUL_VALIDATE_CERTS
+ ini:
+ - section: lookup_consul
+ key: validate_certs
+ client_cert:
+ description: The client cert to verify the ssl connection.
+ env:
+ - name: ANSIBLE_CONSUL_CLIENT_CERT
+ ini:
+ - section: lookup_consul
+ key: client_cert
+ url:
+ description: "The target to connect to, should look like this: C(https://my.consul.server:8500)."
+ type: str
+ version_added: 1.0.0
+ env:
+ - name: ANSIBLE_CONSUL_URL
+ ini:
+ - section: lookup_consul
+ key: url
+'''
+
+EXAMPLES = """
+ - ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to/retrieve'
+
+ - name: Parameters can be provided after the key be more specific about what to retrieve
+ ansible.builtin.debug:
+ msg: 'key contains {{item}}'
+ with_community.general.consul_kv:
+ - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98'
+
+ - name: retrieving a KV from a remote cluster on non default port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - Value(s) stored in consul.
+ type: dict
+"""
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_text
+
+try:
+ import consul
+
+ HAS_CONSUL = True
+except ImportError as e:
+ HAS_CONSUL = False
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not HAS_CONSUL:
+ raise AnsibleError(
+ 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation')
+
+ # get options
+ self.set_options(direct=kwargs)
+
+ scheme = self.get_option('scheme')
+ host = self.get_option('host')
+ port = self.get_option('port')
+ url = self.get_option('url')
+ if url is not None:
+ u = urlparse(url)
+ if u.scheme:
+ scheme = u.scheme
+ host = u.hostname
+ if u.port is not None:
+ port = u.port
+
+ validate_certs = self.get_option('validate_certs')
+ client_cert = self.get_option('client_cert')
+
+ values = []
+ try:
+ for term in terms:
+ params = self.parse_params(term)
+ consul_api = consul.Consul(host=host, port=port, scheme=scheme, verify=validate_certs, cert=client_cert)
+
+ results = consul_api.kv.get(params['key'],
+ token=params['token'],
+ index=params['index'],
+ recurse=params['recurse'],
+ dc=params['datacenter'])
+ if results[1]:
+ # responds with a single or list of result maps
+ if isinstance(results[1], list):
+ for r in results[1]:
+ values.append(to_text(r['Value']))
+ else:
+ values.append(to_text(results[1]['Value']))
+ except Exception as e:
+ raise AnsibleError(
+ "Error locating '%s' in kv store. Error was %s" % (term, e))
+
+ return values
+
+ def parse_params(self, term):
+ params = term.split(' ')
+
+ paramvals = {
+ 'key': params[0],
+ 'token': self.get_option('token'),
+ 'recurse': self.get_option('recurse'),
+ 'index': self.get_option('index'),
+ 'datacenter': self.get_option('datacenter')
+ }
+
+ # parameters specified?
+ try:
+ for param in params[1:]:
+ if param and len(param) > 0:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name)
+ paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+
+ return paramvals
diff --git a/ansible_collections/community/general/plugins/lookup/credstash.py b/ansible_collections/community/general/plugins/lookup/credstash.py
new file mode 100644
index 000000000..6a3f58595
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/credstash.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Ensighten <infra@ensighten.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: credstash
+ short_description: retrieve secrets from Credstash on AWS
+ requirements:
+ - credstash (python library)
+ description:
+ - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash"
+ options:
+ _terms:
+ description: term or list of terms to lookup in the credit store
+ type: list
+ elements: string
+ required: true
+ table:
+ description: name of the credstash table to query
+ type: str
+ default: 'credential-store'
+ version:
+ description: Credstash version
+ type: str
+ default: ''
+ region:
+ description: AWS region
+ type: str
+ profile_name:
+ description: AWS profile to use for authentication
+ type: str
+ env:
+ - name: AWS_PROFILE
+ aws_access_key_id:
+ description: AWS access key ID
+ type: str
+ env:
+ - name: AWS_ACCESS_KEY_ID
+ aws_secret_access_key:
+ description: AWS access key
+ type: str
+ env:
+ - name: AWS_SECRET_ACCESS_KEY
+ aws_session_token:
+ description: AWS session token
+ type: str
+ env:
+ - name: AWS_SESSION_TOKEN
+'''
+
+EXAMPLES = """
+- name: first use credstash to store your secrets
+ ansible.builtin.shell: credstash put my-github-password secure123
+
+- name: "Test credstash lookup plugin -- get my github password"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-github-password') }}"
+
+- name: "Test credstash lookup plugin -- get my other password from us-west-1"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'my-other-password', region='us-west-1') }}"
+
+- name: "Test credstash lookup plugin -- get the company's github password"
+ ansible.builtin.debug:
+ msg: "Credstash lookup! {{ lookup('community.general.credstash', 'company-github-password', table='company-passwords') }}"
+
+- name: Example play using the 'context' feature
+ hosts: localhost
+ vars:
+ context:
+ app: my_app
+ environment: production
+ tasks:
+
+ - name: "Test credstash lookup plugin -- get the password with a context passed as a variable"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}"
+
+ - name: "Test credstash lookup plugin -- get the password with a context defined here"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - Value(s) stored in Credstash.
+ type: str
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+CREDSTASH_INSTALLED = False
+
+try:
+ import credstash
+ CREDSTASH_INSTALLED = True
+except ImportError:
+ CREDSTASH_INSTALLED = False
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ if not CREDSTASH_INSTALLED:
+ raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ version = self.get_option('version')
+ region = self.get_option('region')
+ table = self.get_option('table')
+ profile_name = self.get_option('profile_name')
+ aws_access_key_id = self.get_option('aws_access_key_id')
+ aws_secret_access_key = self.get_option('aws_secret_access_key')
+ aws_session_token = self.get_option('aws_session_token')
+
+ context = dict(
+ (k, v) for k, v in kwargs.items()
+ if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token')
+ )
+
+ kwargs_pass = {
+ 'profile_name': profile_name,
+ 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key,
+ 'aws_session_token': aws_session_token,
+ }
+
+ ret = []
+ for term in terms:
+ try:
+ ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass))
+ except credstash.ItemNotFound:
+ raise AnsibleError('Key {0} not found'.format(term))
+ except Exception as e:
+ raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py b/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py
new file mode 100644
index 000000000..c3cc427df
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/cyberarkpassword.py
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Edward Nunez <edward.nunez@cyberark.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author: Unknown (!UNKNOWN)
+ name: cyberarkpassword
+ short_description: get secrets from CyberArk AIM
+ requirements:
+ - CyberArk AIM tool installed
+ description:
+ - Get secrets from CyberArk AIM.
+ options :
+ _command:
+ description: Cyberark CLI utility.
+ env:
+ - name: AIM_CLIPASSWORDSDK_CMD
+ default: '/opt/CARKaim/sdk/clipasswordsdk'
+ appid:
+ description: Defines the unique ID of the application that is issuing the password request.
+ required: true
+ query:
+ description: Describes the filter criteria for the password retrieval.
+ required: true
+ output:
+ description:
+ - Specifies the desired output fields separated by commas.
+ - "They could be: Password, PassProps.<property>, PasswordChangeInProcess"
+ default: 'password'
+ _extra:
+ description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide"
+ notes:
+ - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe.
+'''
+
+EXAMPLES = """
+ - name: passing options to the lookup
+ ansible.builtin.debug:
+ msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}'
+ vars:
+ cyquery:
+ appid: "app_ansible"
+ query: "safe=CyberArk_Passwords;folder=root;object=AdminPass"
+ output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess"
+
+
+ - name: used in a loop
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.cyberarkpassword:
+ appid: 'app_ansible'
+ query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass'
+ output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess'
+"""
+
+RETURN = """
+_result:
+ description: A list containing one dictionary.
+ type: list
+ elements: dictionary
+ contains:
+ password:
+ description:
+ - The actual value stored
+ passprops:
+ description: properties assigned to the entry
+ type: dictionary
+ passwordchangeinprocess:
+ description: did the password change?
+"""
+
+import os
+import subprocess
+from subprocess import PIPE
+from subprocess import Popen
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+from ansible.utils.display import Display
+
+display = Display()
+
+CLIPASSWORDSDK_CMD = os.getenv('AIM_CLIPASSWORDSDK_CMD', '/opt/CARKaim/sdk/clipasswordsdk')
+
+
+class CyberarkPassword:
+
+ def __init__(self, appid=None, query=None, output=None, **kwargs):
+
+ self.appid = appid
+ self.query = query
+ self.output = output
+
+ # Support for Generic parameters to be able to specify
+ # FailRequestOnPasswordChange, Queryformat, Reason, etc.
+ self.extra_parms = []
+ for key, value in kwargs.items():
+ self.extra_parms.append('-p')
+ self.extra_parms.append("%s=%s" % (key, value))
+
+ if self.appid is None:
+ raise AnsibleError("CyberArk Error: No Application ID specified")
+ if self.query is None:
+ raise AnsibleError("CyberArk Error: No Vault query specified")
+
+ if self.output is None:
+ # If no output is specified, return at least the password
+ self.output = "password"
+ else:
+ # To avoid reference issues/confusion to values, all
+ # output 'keys' will be in lowercase.
+ self.output = self.output.lower()
+
+ self.b_delimiter = b"@#@" # Known delimiter to split output results
+
+ def get(self):
+
+ result_dict = {}
+
+ try:
+ all_parms = [
+ CLIPASSWORDSDK_CMD,
+ 'GetPassword',
+ '-p', 'AppDescs.AppID=%s' % self.appid,
+ '-p', 'Query=%s' % self.query,
+ '-o', self.output,
+ '-d', self.b_delimiter]
+ all_parms.extend(self.extra_parms)
+
+ b_credential = b""
+ b_all_params = [to_bytes(v) for v in all_parms]
+ tmp_output, tmp_error = Popen(b_all_params, stdout=PIPE, stderr=PIPE, stdin=PIPE).communicate()
+
+ if tmp_output:
+ b_credential = to_bytes(tmp_output)
+
+ if tmp_error:
+ raise AnsibleError("ERROR => %s " % (tmp_error))
+
+ if b_credential and b_credential.endswith(b'\n'):
+ b_credential = b_credential[:-1]
+
+ output_names = self.output.split(",")
+ output_values = b_credential.split(self.b_delimiter)
+
+ for i in range(len(output_names)):
+ if output_names[i].startswith("passprops."):
+ if "passprops" not in result_dict:
+ result_dict["passprops"] = {}
+ output_prop_name = output_names[i][10:]
+ result_dict["passprops"][output_prop_name] = to_native(output_values[i])
+ else:
+ result_dict[output_names[i]] = to_native(output_values[i])
+
+ except subprocess.CalledProcessError as e:
+ raise AnsibleError(e.output)
+ except OSError as e:
+ raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror))
+
+ return [result_dict]
+
+
+class LookupModule(LookupBase):
+
+ """
+ USAGE:
+
+ """
+
+ def run(self, terms, variables=None, **kwargs):
+ display.vvvv("%s" % terms)
+ if isinstance(terms, list):
+ return_values = []
+ for term in terms:
+ display.vvvv("Term: %s" % term)
+ cyberark_conn = CyberarkPassword(**term)
+ return_values.append(cyberark_conn.get())
+ return return_values
+ else:
+ cyberark_conn = CyberarkPassword(**terms)
+ result = cyberark_conn.get()
+ return result
diff --git a/ansible_collections/community/general/plugins/lookup/dependent.py b/ansible_collections/community/general/plugins/lookup/dependent.py
new file mode 100644
index 000000000..54714344e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/dependent.py
@@ -0,0 +1,224 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015-2021, Felix Fontein <felix@fontein.de>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+name: dependent
+short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables
+author: Felix Fontein (@felixfontein)
+version_added: 3.1.0
+description:
+ - "Takes the input lists and returns a list with elements that are lists, dictionaries,
+ or template expressions which evaluate to lists or dicts, composed of the elements of
+ the input evaluated lists and dictionaries."
+options:
+ _terms:
+ description:
+ - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary.
+ The name is the index that is used in the result object. The value is iterated over as described below.
+ - If the value is a list, it is simply iterated over.
+ - If the value is a dictionary, it is iterated over and returned as if they would be processed by the
+ R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter).
+ - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen
+ elements with C(item.<index_name>). The result must be a list or a dictionary.
+ type: list
+ elements: dict
+ required: true
+"""
+
+EXAMPLES = """
+- name: Install/remove public keys for active admin users
+ ansible.posix.authorized_key:
+ user: "{{ item.admin.key }}"
+ key: "{{ lookup('file', item.key.public_key) }}"
+ state: "{{ 'present' if item.key.active else 'absent' }}"
+ when: item.admin.value.active
+ with_community.general.dependent:
+ - admin: admin_user_data
+ - key: admin_ssh_keys[item.admin.key]
+ loop_control:
+ # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists
+ label: "{{ [item.admin.key, 'active' if item.key.active else 'inactive', item.key.public_key] }}"
+ vars:
+ admin_user_data:
+ admin1:
+ name: Alice
+ active: true
+ admin2:
+ name: Bob
+ active: true
+ admin_ssh_keys:
+ admin1:
+ - private_key: keys/private_key_admin1.pem
+ public_key: keys/private_key_admin1.pub
+ active: true
+ admin2:
+ - private_key: keys/private_key_admin2.pem
+ public_key: keys/private_key_admin2.pub
+ active: true
+ - private_key: keys/private_key_admin2-old.pem
+ public_key: keys/private_key_admin2-old.pub
+ active: false
+
+- name: Update DNS records
+ community.aws.route53:
+ zone: "{{ item.zone.key }}"
+ record: "{{ item.prefix.key ~ '.' if item.prefix.key else '' }}{{ item.zone.key }}"
+ type: "{{ item.entry.key }}"
+ ttl: "{{ item.entry.value.ttl | default(3600) }}"
+ value: "{{ item.entry.value.value }}"
+ state: "{{ 'absent' if (item.entry.value.absent | default(False)) else 'present' }}"
+ overwrite: true
+ loop_control:
+ # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists
+ label: |-
+ {{ [item.zone.key, item.prefix.key, item.entry.key,
+ item.entry.value.ttl | default(3600),
+ item.entry.value.absent | default(False), item.entry.value.value] }}
+ with_community.general.dependent:
+ - zone: dns_setup
+ - prefix: item.zone.value
+ - entry: item.prefix.value
+ vars:
+ dns_setup:
+ example.com:
+ '':
+ A:
+ value:
+ - 1.2.3.4
+ AAAA:
+ value:
+ - "2a01:1:2:3::1"
+ 'test._domainkey':
+ TXT:
+ ttl: 300
+ value:
+ - '"k=rsa; t=s; p=MIGfMA..."'
+ example.org:
+ 'www':
+ A:
+ value:
+ - 1.2.3.4
+ - 5.6.7.8
+"""
+
+RETURN = """
+ _list:
+ description:
+ - A list composed of dictionaries whose keys are the variable names from the input list.
+ type: list
+ elements: dict
+ sample:
+ - key1: a
+ key2: test
+ - key1: a
+ key2: foo
+ - key1: b
+ key2: bar
+"""
+
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+from ansible.release import __version__ as ansible_version
+from ansible.template import Templar
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+# Whether Templar has a cache, which can be controlled by Templar.template()'s cache option.
+# The cache was removed for ansible-core 2.14 (https://github.com/ansible/ansible/pull/78419)
+_TEMPLAR_HAS_TEMPLATE_CACHE = LooseVersion(ansible_version) < LooseVersion('2.14.0')
+
+
+class LookupModule(LookupBase):
+ def __evaluate(self, expression, templar, variables):
+ """Evaluate expression with templar.
+
+ ``expression`` is the expression to evaluate.
+ ``variables`` are the variables to use.
+ """
+ templar.available_variables = variables or {}
+ expression = "{0}{1}{2}".format("{{", expression, "}}")
+ if _TEMPLAR_HAS_TEMPLATE_CACHE:
+ return templar.template(expression, cache=False)
+ return templar.template(expression)
+
+ def __process(self, result, terms, index, current, templar, variables):
+ """Fills ``result`` list with evaluated items.
+
+ ``result`` is a list where the resulting items are placed.
+ ``terms`` is the parsed list of terms
+ ``index`` is the current index to be processed in the list.
+ ``current`` is a dictionary where the first ``index`` values are filled in.
+ ``variables`` are the variables currently available.
+ """
+ # If we are done, add to result list:
+ if index == len(terms):
+ result.append(current.copy())
+ return
+
+ key, expression, values = terms[index]
+
+ if expression is not None:
+ # Evaluate expression in current context
+ vars = variables.copy()
+ vars['item'] = current.copy()
+ try:
+ values = self.__evaluate(expression, templar, variables=vars)
+ except Exception as e:
+ raise AnsibleLookupError(
+ 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format(
+ error=e, key=key, item=current))
+
+ if isinstance(values, Mapping):
+ for idx, val in sorted(values.items()):
+ current[key] = dict([('key', idx), ('value', val)])
+ self.__process(result, terms, index + 1, current, templar, variables)
+ elif isinstance(values, Sequence):
+ for elt in values:
+ current[key] = elt
+ self.__process(result, terms, index + 1, current, templar, variables)
+ else:
+ raise AnsibleLookupError(
+ 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format(
+ key=key, item=current, type=type(values)))
+
+ def run(self, terms, variables=None, **kwargs):
+ """Generate list."""
+ self.set_options(var_options=variables, direct=kwargs)
+
+ result = []
+ if len(terms) > 0:
+ templar = Templar(loader=self._templar._loader)
+ data = []
+ vars_so_far = set()
+ for index, term in enumerate(terms):
+ if not isinstance(term, Mapping):
+ raise AnsibleLookupError(
+ 'Parameter {index} must be a dictionary, got {type}'.format(
+ index=index, type=type(term)))
+ if len(term) != 1:
+ raise AnsibleLookupError(
+ 'Parameter {index} must be a one-element dictionary, got {count} elements'.format(
+ index=index, count=len(term)))
+ k, v = list(term.items())[0]
+ if k in vars_so_far:
+ raise AnsibleLookupError(
+ 'The variable {key!r} appears more than once'.format(key=k))
+ vars_so_far.add(k)
+ if isinstance(v, string_types):
+ data.append((k, v, None))
+ elif isinstance(v, (Sequence, Mapping)):
+ data.append((k, None, v))
+ else:
+ raise AnsibleLookupError(
+ 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format(
+ index=index, key=k, type=type(v)))
+ self.__process(result, data, 0, {}, templar, variables)
+ return result
diff --git a/ansible_collections/community/general/plugins/lookup/dig.py b/ansible_collections/community/general/plugins/lookup/dig.py
new file mode 100644
index 000000000..fa915220b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/dig.py
@@ -0,0 +1,451 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: dig
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ short_description: query DNS using the dnspython library
+ requirements:
+ - dnspython (python library, http://www.dnspython.org/)
+ description:
+ - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name).
+ It is possible to lookup any DNS record in this manner.
+ - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name.
+ It is also possible to explicitly specify the DNS server(s) to use for lookups.
+ - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN
+ - In addition to (default) A record, it is also possible to specify a different record type that should be queried.
+ This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried.
+ - If multiple values are associated with the requested record, the results will be returned as a comma-separated list.
+ In such cases you may want to pass option I(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup),
+ which will result in the record values being returned as a list over which you can iterate later on.
+ - By default, the lookup will rely on system-wide configured DNS servers for performing the query.
+ It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation.
+ This needs to be passed-in as an additional parameter to the lookup
+ options:
+ _terms:
+ description: Domain(s) to query.
+ type: list
+ elements: str
+ qtype:
+ description:
+ - Record type to query.
+ - C(DLV) has been removed in community.general 6.0.0.
+ - C(CAA) has been added in community.general 6.3.0.
+ type: str
+ default: 'A'
+ choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT]
+ flat:
+ description: If 0 each record is returned as a dictionary, otherwise a string.
+ type: int
+ default: 1
+ retry_servfail:
+ description: Retry a nameserver if it returns SERVFAIL.
+ default: false
+ type: bool
+ version_added: 3.6.0
+ fail_on_error:
+ description:
+ - Abort execution on lookup errors.
+ - The default for this option will likely change to C(true) in the future.
+ The current default, C(false), is used for backwards compatibility, and will result in empty strings
+ or the string C(NXDOMAIN) in the result in case of errors.
+ default: false
+ type: bool
+ version_added: 5.4.0
+ real_empty:
+ description:
+ - Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
+ - The default for this option will likely change to C(true) in the future.
+ - This option will be forced to C(true) if multiple domains to be queried are specified.
+ default: false
+ type: bool
+ version_added: 6.0.0
+ class:
+ description:
+ - "Class."
+ type: str
+ default: 'IN'
+ notes:
+ - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary.
+ - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary.
+ - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly.
+ Syntax for specifying the record type is shown in the examples below.
+ - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake.
+'''
+
+EXAMPLES = """
+- name: Simple A record (IPV4 address) lookup for example.com
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.com.')}}"
+
+- name: "The TXT record for example.org."
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org.', qtype='TXT') }}"
+
+- name: "The TXT record for example.org, alternative syntax."
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org./TXT') }}"
+
+- name: use in a loop
+ ansible.builtin.debug:
+ msg: "MX record for gmail.com {{ item }}"
+ with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}"
+
+- name: Lookup multiple names at once
+ ansible.builtin.debug:
+ msg: "A record found {{ item }}"
+ loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}"
+
+- name: Lookup multiple names at once (from list variable)
+ ansible.builtin.debug:
+ msg: "A record found {{ item }}"
+ loop: "{{ query('community.general.dig', *hosts) }}"
+ vars:
+ hosts:
+ - example.org.
+ - example.com.
+ - gmail.com.
+
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}"
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa./PTR') }}"
+- ansible.builtin.debug:
+ msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '5.2.0.192.in-addr.arpa.', qtype='PTR') }}"
+- ansible.builtin.debug:
+ msg: "Querying 198.51.100.23 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@198.51.100.23') }}"
+
+- ansible.builtin.debug:
+ msg: "XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
+ with_items: "{{ lookup('community.general.dig', '_xmpp-server._tcp.gmail.com./SRV', flat=0, wantlist=true) }}"
+
+- name: Retry nameservers that return SERVFAIL
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - List of composed strings or dictionaries with key and value
+ If a dictionary, fields shows the keys returned depending on query type
+ type: list
+ elements: raw
+ contains:
+ ALL:
+ description:
+ - owner, ttl, type
+ A:
+ description:
+ - address
+ AAAA:
+ description:
+ - address
+ CAA:
+ description:
+ - flags
+ - tag
+ - value
+ version_added: 6.3.0
+ CNAME:
+ description:
+ - target
+ DNAME:
+ description:
+ - target
+ DNSKEY:
+ description:
+ - flags, algorithm, protocol, key
+ DS:
+ description:
+ - algorithm, digest_type, key_tag, digest
+ HINFO:
+ description:
+ - cpu, os
+ LOC:
+ description:
+ - latitude, longitude, altitude, size, horizontal_precision, vertical_precision
+ MX:
+ description:
+ - preference, exchange
+ NAPTR:
+ description:
+ - order, preference, flags, service, regexp, replacement
+ NS:
+ description:
+ - target
+ NSEC3PARAM:
+ description:
+ - algorithm, flags, iterations, salt
+ PTR:
+ description:
+ - target
+ RP:
+ description:
+ - mbox, txt
+ SOA:
+ description:
+ - mname, rname, serial, refresh, retry, expire, minimum
+ SPF:
+ description:
+ - strings
+ SRV:
+ description:
+ - priority, weight, port, target
+ SSHFP:
+ description:
+ - algorithm, fp_type, fingerprint
+ TLSA:
+ description:
+ - usage, selector, mtype, cert
+ TXT:
+ description:
+ - strings
+"""
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.utils.display import Display
+import socket
+
+try:
+ import dns.exception
+ import dns.name
+ import dns.resolver
+ import dns.reversename
+ import dns.rdataclass
+ from dns.rdatatype import (A, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC,
+ MX, NAPTR, NS, NSEC3PARAM, PTR, RP, SOA, SPF, SRV, SSHFP, TLSA, TXT)
+ HAVE_DNS = True
+except ImportError:
+ HAVE_DNS = False
+
+
+display = Display()
+
+
+def make_rdata_dict(rdata):
+ ''' While the 'dig' lookup plugin supports anything which dnspython supports
+ out of the box, the following supported_types list describes which
+ DNS query types we can convert to a dict.
+
+ Note: adding support for RRSIG is hard work. :)
+ '''
+ supported_types = {
+ A: ['address'],
+ AAAA: ['address'],
+ CAA: ['flags', 'tag', 'value'],
+ CNAME: ['target'],
+ DNAME: ['target'],
+ DNSKEY: ['flags', 'algorithm', 'protocol', 'key'],
+ DS: ['algorithm', 'digest_type', 'key_tag', 'digest'],
+ HINFO: ['cpu', 'os'],
+ LOC: ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
+ MX: ['preference', 'exchange'],
+ NAPTR: ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
+ NS: ['target'],
+ NSEC3PARAM: ['algorithm', 'flags', 'iterations', 'salt'],
+ PTR: ['target'],
+ RP: ['mbox', 'txt'],
+ # RRSIG: ['type_covered', 'algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'key_tag', 'signer', 'signature'],
+ SOA: ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
+ SPF: ['strings'],
+ SRV: ['priority', 'weight', 'port', 'target'],
+ SSHFP: ['algorithm', 'fp_type', 'fingerprint'],
+ TLSA: ['usage', 'selector', 'mtype', 'cert'],
+ TXT: ['strings'],
+ }
+
+ rd = {}
+
+ if rdata.rdtype in supported_types:
+ fields = supported_types[rdata.rdtype]
+ for f in fields:
+ val = rdata.__getattribute__(f)
+
+ if isinstance(val, dns.name.Name):
+ val = dns.name.Name.to_text(val)
+
+ if rdata.rdtype == DS and f == 'digest':
+ val = dns.rdata._hexify(rdata.digest).replace(' ', '')
+ if rdata.rdtype == DNSKEY and f == 'algorithm':
+ val = int(val)
+ if rdata.rdtype == DNSKEY and f == 'key':
+ val = dns.rdata._base64ify(rdata.key).replace(' ', '')
+ if rdata.rdtype == NSEC3PARAM and f == 'salt':
+ val = dns.rdata._hexify(rdata.salt).replace(' ', '')
+ if rdata.rdtype == SSHFP and f == 'fingerprint':
+ val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
+ if rdata.rdtype == TLSA and f == 'cert':
+ val = dns.rdata._hexify(rdata.cert).replace(' ', '')
+
+ rd[f] = val
+
+ return rd
+
+
+# ==============================================================
+# dig: Lookup DNS records
+#
+# --------------------------------------------------------------
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ '''
+ terms contains a string with things to `dig' for. We support the
+ following formats:
+ example.com # A record
+ example.com qtype=A # same
+ example.com/TXT # specific qtype
+ example.com qtype=txt # same
+ 192.0.2.23/PTR # reverse PTR
+ ^^ shortcut for 23.2.0.192.in-addr.arpa/PTR
+ example.net/AAAA @nameserver # query specified server
+ ^^^ can be comma-sep list of names/addresses
+
+ ... flat=0 # returns a dict; default is 1 == string
+ '''
+ if HAVE_DNS is False:
+ raise AnsibleError("The dig lookup requires the python 'dnspython' library and it is not installed")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ # Create Resolver object so that we can set NS if necessary
+ myres = dns.resolver.Resolver(configure=True)
+ edns_size = 4096
+ myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
+
+ domains = []
+ qtype = self.get_option('qtype')
+ flat = self.get_option('flat')
+ fail_on_error = self.get_option('fail_on_error')
+ real_empty = self.get_option('real_empty')
+ try:
+ rdclass = dns.rdataclass.from_text(self.get_option('class'))
+ except Exception as e:
+ raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
+ myres.retry_servfail = self.get_option('retry_servfail')
+
+ for t in terms:
+ if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok.
+ nsset = t[1:].split(',')
+ for ns in nsset:
+ nameservers = []
+ # Check if we have a valid IP address. If so, use that, otherwise
+ # try to resolve name to address using system's resolver. If that
+ # fails we bail out.
+ try:
+ socket.inet_aton(ns)
+ nameservers.append(ns)
+ except Exception:
+ try:
+ nsaddr = dns.resolver.query(ns)[0].address
+ nameservers.append(nsaddr)
+ except Exception as e:
+ raise AnsibleError("dns lookup NS: %s" % to_native(e))
+ myres.nameservers = nameservers
+ continue
+ if '=' in t:
+ try:
+ opt, arg = t.split('=', 1)
+ except Exception:
+ pass
+
+ if opt == 'qtype':
+ qtype = arg.upper()
+ elif opt == 'flat':
+ flat = int(arg)
+ elif opt == 'class':
+ try:
+ rdclass = dns.rdataclass.from_text(arg)
+ except Exception as e:
+ raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e))
+ elif opt == 'retry_servfail':
+ myres.retry_servfail = boolean(arg)
+ elif opt == 'fail_on_error':
+ fail_on_error = boolean(arg)
+ elif opt == 'real_empty':
+ real_empty = boolean(arg)
+
+ continue
+
+ if '/' in t:
+ try:
+ domain, qtype = t.split('/')
+ domains.append(domain)
+ except Exception:
+ domains.append(t)
+ else:
+ domains.append(t)
+
+ # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass)
+
+ if qtype.upper() == 'PTR':
+ reversed_domains = []
+ for domain in domains:
+ try:
+ n = dns.reversename.from_address(domain)
+ reversed_domains.append(n.to_text())
+ except dns.exception.SyntaxError:
+ pass
+ except Exception as e:
+ raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e))
+ domains = reversed_domains
+
+ if len(domains) > 1:
+ real_empty = True
+
+ ret = []
+
+ for domain in domains:
+ try:
+ answers = myres.query(domain, qtype, rdclass=rdclass)
+ for rdata in answers:
+ s = rdata.to_text()
+ if qtype.upper() == 'TXT':
+ s = s[1:-1] # Strip outside quotes on TXT rdata
+
+ if flat:
+ ret.append(s)
+ else:
+ try:
+ rd = make_rdata_dict(rdata)
+ rd['owner'] = answers.canonical_name.to_text()
+ rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
+ rd['ttl'] = answers.rrset.ttl
+ rd['class'] = dns.rdataclass.to_text(rdata.rdclass)
+
+ ret.append(rd)
+ except Exception as err:
+ if fail_on_error:
+ raise AnsibleError("Lookup failed: %s" % str(err))
+ ret.append(str(err))
+
+ except dns.resolver.NXDOMAIN as err:
+ if fail_on_error:
+ raise AnsibleError("Lookup failed: %s" % str(err))
+ if not real_empty:
+ ret.append('NXDOMAIN')
+ except dns.resolver.NoAnswer as err:
+ if fail_on_error:
+ raise AnsibleError("Lookup failed: %s" % str(err))
+ if not real_empty:
+ ret.append("")
+ except dns.resolver.Timeout as err:
+ if fail_on_error:
+ raise AnsibleError("Lookup failed: %s" % str(err))
+ if not real_empty:
+ ret.append("")
+ except dns.exception.DNSException as err:
+ raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err))
+
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/dnstxt.py b/ansible_collections/community/general/plugins/lookup/dnstxt.py
new file mode 100644
index 000000000..55067dc82
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/dnstxt.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: dnstxt
+ author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ short_description: query a domain(s)'s DNS txt fields
+ requirements:
+ - dns/dns.resolver (python library)
+ description:
+ - Uses a python library to return the DNS TXT record for a domain.
+ options:
+ _terms:
+ description: domain or list of domains to query TXT records from
+ required: true
+ type: list
+ elements: string
+ real_empty:
+ description:
+ - Return empty result without empty strings, and return empty list instead of C(NXDOMAIN).
+ - The default for this option will likely change to C(true) in the future.
+ default: false
+ type: bool
+ version_added: 6.0.0
+'''
+
+EXAMPLES = """
+- name: show txt entry
+ ansible.builtin.debug:
+ msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}"
+
+- name: iterate over txt entries
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.dnstxt:
+ - 'test.example.com'
+ - 'other.example.com'
+ - 'last.example.com'
+
+- name: iterate of a comma delimited DNS TXT entry
+ ansible.builtin.debug:
+ msg: "{{item}}"
+ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}"
+"""
+
+RETURN = """
+ _list:
+ description:
+ - values returned by the DNS TXT record.
+ type: list
+"""
+
+HAVE_DNS = False
+try:
+ import dns.resolver
+ from dns.exception import DNSException
+ HAVE_DNS = True
+except ImportError:
+ pass
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.plugins.lookup import LookupBase
+
+# ==============================================================
+# DNSTXT: DNS TXT records
+#
+# key=domainname
+# TODO: configurable resolver IPs
+# --------------------------------------------------------------
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ if HAVE_DNS is False:
+ raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
+
+ real_empty = self.get_option('real_empty')
+
+ ret = []
+ for term in terms:
+ domain = term.split()[0]
+ string = []
+ try:
+ answers = dns.resolver.query(domain, 'TXT')
+ for rdata in answers:
+ s = rdata.to_text()
+ string.append(s[1:-1]) # Strip outside quotes on TXT rdata
+
+ except dns.resolver.NXDOMAIN:
+ if real_empty:
+ continue
+ string = 'NXDOMAIN'
+ except dns.resolver.Timeout:
+ if real_empty:
+ continue
+ string = ''
+ except dns.resolver.NoAnswer:
+ if real_empty:
+ continue
+ string = ''
+ except DNSException as e:
+ raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
+
+ ret.append(''.join(string))
+
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/dsv.py b/ansible_collections/community/general/plugins/lookup/dsv.py
new file mode 100644
index 000000000..91a9d9921
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/dsv.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+name: dsv
+author: Adam Migus (@amigus) <adam@migus.org>
+short_description: Get secrets from Thycotic DevOps Secrets Vault
+version_added: 1.0.0
+description:
+ - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a
+ DSV I(tenant) using a I(client_id) and I(client_secret).
+requirements:
+ - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/
+options:
+ _terms:
+ description: The path to the secret, e.g. C(/staging/servers/web1).
+ required: true
+ tenant:
+ description: The first format parameter in the default I(url_template).
+ env:
+ - name: DSV_TENANT
+ ini:
+ - section: dsv_lookup
+ key: tenant
+ required: true
+ tld:
+ default: com
+ description: The top-level domain of the tenant; the second format
+ parameter in the default I(url_template).
+ env:
+ - name: DSV_TLD
+ ini:
+ - section: dsv_lookup
+ key: tld
+ required: false
+ client_id:
+ description: The client_id with which to request the Access Grant.
+ env:
+ - name: DSV_CLIENT_ID
+ ini:
+ - section: dsv_lookup
+ key: client_id
+ required: true
+ client_secret:
+ description: The client secret associated with the specific I(client_id).
+ env:
+ - name: DSV_CLIENT_SECRET
+ ini:
+ - section: dsv_lookup
+ key: client_secret
+ required: true
+ url_template:
+ default: https://{}.secretsvaultcloud.{}/v1
+ description: The path to prepend to the base URL to form a valid REST
+ API request.
+ env:
+ - name: DSV_URL_TEMPLATE
+ ini:
+ - section: dsv_lookup
+ key: url_template
+ required: false
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - One or more JSON responses to C(GET /secrets/{path}).
+ - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret).
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: "{{ lookup('community.general.dsv', '/test/secret') }}"
+ tasks:
+ - ansible.builtin.debug:
+ msg: 'the password is {{ secret["data"]["password"] }}'
+"""
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+
+sdk_is_missing = False
+
+try:
+ from thycotic.secrets.vault import (
+ SecretsVault,
+ SecretsVaultError,
+ )
+except ImportError:
+ sdk_is_missing = True
+
+from ansible.utils.display import Display
+from ansible.plugins.lookup import LookupBase
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+ @staticmethod
+ def Client(vault_parameters):
+ try:
+ vault = SecretsVault(**vault_parameters)
+ return vault
+ except TypeError:
+ raise AnsibleError("python-dsv-sdk==0.0.1 must be installed to use this plugin")
+
+ def run(self, terms, variables, **kwargs):
+ if sdk_is_missing:
+ raise AnsibleError("python-dsv-sdk==0.0.1 must be installed to use this plugin")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ vault = LookupModule.Client(
+ {
+ "tenant": self.get_option("tenant"),
+ "client_id": self.get_option("client_id"),
+ "client_secret": self.get_option("client_secret"),
+ "tld": self.get_option("tld"),
+ "url_template": self.get_option("url_template"),
+ }
+ )
+ result = []
+
+ for term in terms:
+ display.debug("dsv_lookup term: %s" % term)
+ try:
+ path = term.lstrip("[/:]")
+
+ if path == "":
+ raise AnsibleOptionsError("Invalid secret path: %s" % term)
+
+ display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path)
+ result.append(vault.get_secret_json(path))
+ except SecretsVaultError as error:
+ raise AnsibleError(
+ "DevOps Secrets Vault lookup failure: %s" % error.message
+ )
+ return result
diff --git a/ansible_collections/community/general/plugins/lookup/etcd.py b/ansible_collections/community/general/plugins/lookup/etcd.py
new file mode 100644
index 000000000..d6a12293e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/etcd.py
@@ -0,0 +1,169 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
+# (m) 2016, Mihai Moldovanu <mihaim@tfm.ro>
+# (m) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Jan-Piet Mens (@jpmens)
+ name: etcd
+ short_description: get info from an etcd server
+ description:
+ - Retrieves data from an etcd server
+ options:
+ _terms:
+ description:
+ - the list of keys to lookup on the etcd server
+ type: list
+ elements: string
+ required: true
+ url:
+ description:
+ - Environment variable with the url for the etcd server
+ default: 'http://127.0.0.1:4001'
+ env:
+ - name: ANSIBLE_ETCD_URL
+ version:
+ description:
+ - Environment variable with the etcd protocol version
+ default: 'v1'
+ env:
+ - name: ANSIBLE_ETCD_VERSION
+ validate_certs:
+ description:
+ - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs.
+ default: true
+ type: boolean
+'''
+
+EXAMPLES = '''
+- name: "a value from a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo/bar') }}"
+
+- name: "values from multiple folders on a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}"
+
+- name: "since Ansible 2.5 you can set server options inline"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - list of values associated with input keys
+ type: list
+ elements: string
+'''
+
+import json
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url
+
+# this can be made configurable, not should not use ansible.cfg
+#
+# Made module configurable from playbooks:
+# If etcd v2 running on host 192.168.1.21 on port 2379
+# we can use the following in a playbook to retrieve /tfm/network/config key
+#
+# - ansible.builtin.debug: msg={{lookup('etcd','/tfm/network/config', url='http://192.168.1.21:2379' , version='v2')}}
+#
+# Example Output:
+#
+# TASK [debug] *******************************************************************
+# ok: [localhost] => {
+# "msg": {
+# "Backend": {
+# "Type": "vxlan"
+# },
+# "Network": "172.30.0.0/16",
+# "SubnetLen": 24
+# }
+# }
+#
+#
+#
+#
+
+
+class Etcd:
+ def __init__(self, url, version, validate_certs):
+ self.url = url
+ self.version = version
+ self.baseurl = '%s/%s/keys' % (self.url, self.version)
+ self.validate_certs = validate_certs
+
+ def _parse_node(self, node):
+ # This function will receive all etcd tree,
+ # if the level requested has any node, the recursion starts
+ # create a list in the dir variable and it is passed to the
+ # recursive function, and so on, if we get a variable,
+ # the function will create a key-value at this level and
+ # undoing the loop.
+ path = {}
+ if node.get('dir', False):
+ for n in node.get('nodes', []):
+ path[n['key'].split('/')[-1]] = self._parse_node(n)
+
+ else:
+ path = node['value']
+
+ return path
+
+ def get(self, key):
+ url = "%s/%s?recursive=true" % (self.baseurl, key)
+ data = None
+ value = {}
+ try:
+ r = open_url(url, validate_certs=self.validate_certs)
+ data = r.read()
+ except Exception:
+ return None
+
+ try:
+ # I will not support Version 1 of etcd for folder parsing
+ item = json.loads(data)
+ if self.version == 'v1':
+ # When ETCD are working with just v1
+ if 'value' in item:
+ value = item['value']
+ else:
+ if 'node' in item:
+ # When a usual result from ETCD
+ value = self._parse_node(item['node'])
+
+ if 'errorCode' in item:
+ # Here return an error when an unknown entry responds
+ value = "ENOENT"
+ except Exception:
+ raise
+
+ return value
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ validate_certs = self.get_option('validate_certs')
+ url = self.get_option('url')
+ version = self.get_option('version')
+
+ etcd = Etcd(url=url, version=version, validate_certs=validate_certs)
+
+ ret = []
+ for term in terms:
+ key = term.split()[0]
+ value = etcd.get(key)
+ ret.append(value)
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/etcd3.py b/ansible_collections/community/general/plugins/lookup/etcd3.py
new file mode 100644
index 000000000..7f0a0cf90
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/etcd3.py
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Eric Belhomme (@eric-belhomme) <ebelhomme@fr.scc.com>
+ version_added: '0.2.0'
+ name: etcd3
+ short_description: Get key values from etcd3 server
+ description:
+ - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API.
+ - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables.
+ - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview.
+
+ options:
+ _terms:
+ description:
+ - The list of keys (or key prefixes) to look up on the etcd3 server.
+ type: list
+ elements: str
+ required: true
+ prefix:
+ description:
+ - Look for key or prefix key.
+ type: bool
+ default: false
+ endpoints:
+ description:
+ - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable.
+ Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(<host>:<port>) form.
+ - The C(host) part is overwritten by I(host) option, if defined.
+ - The C(port) part is overwritten by I(port) option, if defined.
+ env:
+ - name: ETCDCTL_ENDPOINTS
+ default: '127.0.0.1:2379'
+ type: str
+ host:
+ description:
+ - etcd3 listening client host.
+ - Takes precedence over I(endpoints).
+ type: str
+ port:
+ description:
+ - etcd3 listening client port.
+ - Takes precedence over I(endpoints).
+ type: int
+ ca_cert:
+ description:
+ - etcd3 CA authority.
+ env:
+ - name: ETCDCTL_CACERT
+ type: str
+ cert_cert:
+ description:
+ - etcd3 client certificate.
+ env:
+ - name: ETCDCTL_CERT
+ type: str
+ cert_key:
+ description:
+ - etcd3 client private key.
+ env:
+ - name: ETCDCTL_KEY
+ type: str
+ timeout:
+ description:
+ - Client timeout.
+ default: 60
+ env:
+ - name: ETCDCTL_DIAL_TIMEOUT
+ type: int
+ user:
+ description:
+ - Authenticated user name.
+ env:
+ - name: ETCDCTL_USER
+ type: str
+ password:
+ description:
+ - Authenticated user password.
+ env:
+ - name: ETCDCTL_PASSWORD
+ type: str
+
+ notes:
+ - I(host) and I(port) options take precedence over (endpoints) option.
+ - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT)
+ environment variable and keep I(endpoints), I(host), and I(port) unused.
+ seealso:
+ - module: community.general.etcd3
+ - ref: ansible_collections.community.general.etcd_lookup
+ description: The etcd v2 lookup.
+
+ requirements:
+ - "etcd3 >= 0.10"
+'''
+
+EXAMPLES = '''
+- name: "a value from a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}"
+
+- name: "values from multiple folders on a locally running etcd"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo', 'bar', 'baz') }}"
+
+- name: "look for a key prefix"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', '/foo/bar', prefix=True) }}"
+
+- name: "connect to etcd3 with a client certificate"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - List of keys and associated values.
+ type: list
+ elements: dict
+ contains:
+ key:
+ description: The element's key.
+ type: str
+ value:
+ description: The element's value.
+ type: str
+'''
+
+import re
+
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+try:
+ import etcd3
+ HAS_ETCD = True
+except ImportError:
+ HAS_ETCD = False
+
+display = Display()
+
+etcd3_cnx_opts = (
+ 'host',
+ 'port',
+ 'ca_cert',
+ 'cert_key',
+ 'cert_cert',
+ 'timeout',
+ 'user',
+ 'password',
+ # 'grpc_options' Etcd3Client() option currently not supported by lookup module (maybe in future ?)
+)
+
+
+def etcd3_client(client_params):
+ try:
+ etcd = etcd3.client(**client_params)
+ etcd.status()
+ except Exception as exp:
+ raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp)))
+ return etcd
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ if not HAS_ETCD:
+ display.error(missing_required_lib('etcd3'))
+ return None
+
+ # create the etcd3 connection parameters dict to pass to etcd3 class
+ client_params = {}
+
+ # etcd3 class expects host and port as connection parameters, so endpoints
+ # must be mangled a bit to fit in this scheme.
+ # so here we use a regex to extract server and port
+ match = re.compile(
+ r'^(https?://)?(?P<host>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})|([-_\d\w\.]+))(:(?P<port>\d{1,5}))?/?$'
+ ).match(self.get_option('endpoints'))
+ if match:
+ if match.group('host'):
+ client_params['host'] = match.group('host')
+ if match.group('port'):
+ client_params['port'] = match.group('port')
+
+ for opt in etcd3_cnx_opts:
+ if self.get_option(opt):
+ client_params[opt] = self.get_option(opt)
+
+ cnx_log = dict(client_params)
+ if 'password' in cnx_log:
+ cnx_log['password'] = '<redacted>'
+ display.verbose("etcd3 connection parameters: %s" % cnx_log)
+
+ # connect to etcd3 server
+ etcd = etcd3_client(client_params)
+
+ ret = []
+ # we can pass many keys to lookup
+ for term in terms:
+ if self.get_option('prefix'):
+ try:
+ for val, meta in etcd.get_prefix(term):
+ if val and meta:
+ ret.append({'key': to_native(meta.key), 'value': to_native(val)})
+ except Exception as exp:
+ display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp)))
+ else:
+ try:
+ val, meta = etcd.get(term)
+ if val and meta:
+ ret.append({'key': to_native(meta.key), 'value': to_native(val)})
+ except Exception as exp:
+ display.warning('Caught except during etcd3.get: %s' % (to_native(exp)))
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/filetree.py b/ansible_collections/community/general/plugins/lookup/filetree.py
new file mode 100644
index 000000000..f12cc4519
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/filetree.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016 Dag Wieers <dag@wieers.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+name: filetree
+author: Dag Wieers (@dagwieers) <dag@wieers.com>
+short_description: recursively match all files in a directory tree
+description:
+- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership.
+- Supports directories, files and symlinks, including SELinux and other file properties.
+- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths.
+ This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role.
+options:
+ _terms:
+ description: path(s) of files to read
+ required: true
+'''
+
+EXAMPLES = r"""
+- name: Create directories
+ ansible.builtin.file:
+ path: /web/{{ item.path }}
+ state: directory
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'directory'
+
+- name: Template files (explicitly skip directories in order to use the 'src' attribute)
+ ansible.builtin.template:
+ src: '{{ item.src }}'
+ # Your template files should be stored with a .j2 file extension,
+ # but should not be deployed with it. splitext|first removes it.
+ dest: /web/{{ item.path | splitext | first }}
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'file'
+
+- name: Recreate symlinks
+ ansible.builtin.file:
+ src: '{{ item.src }}'
+ dest: /web/{{ item.path }}
+ state: link
+ follow: false # avoid corrupting target files if the link already exists
+ force: true
+ mode: '{{ item.mode }}'
+ with_community.general.filetree: web/
+ when: item.state == 'link'
+
+- name: list all files under web/
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.filetree', 'web/') }}"
+"""
+
+RETURN = r"""
+ _raw:
+ description: List of dictionaries with file information.
+ type: list
+ elements: dict
+ contains:
+ src:
+ description:
+ - Full path to file.
+ - Not returned when I(item.state) is set to C(directory).
+ type: path
+ root:
+ description: Allows filtering by original location.
+ type: path
+ path:
+ description: Contains the relative path to root.
+ type: path
+ mode:
+ description: The permissions the resulting file or directory.
+ type: str
+ state:
+ description: TODO
+ type: str
+ owner:
+ description: Name of the user that owns the file/directory.
+ type: raw
+ group:
+ description: Name of the group that owns the file/directory.
+ type: raw
+ seuser:
+ description: The user part of the SELinux file context.
+ type: raw
+ serole:
+ description: The role part of the SELinux file context.
+ type: raw
+ setype:
+ description: The type part of the SELinux file context.
+ type: raw
+ selevel:
+ description: The level part of the SELinux file context.
+ type: raw
+ uid:
+ description: Owner ID of the file/directory.
+ type: int
+ gid:
+ description: Group ID of the file/directory.
+ type: int
+ size:
+ description: Size of the target.
+ type: int
+ mtime:
+ description: Time of last modification.
+ type: float
+ ctime:
+ description: Time of last metadata update or creation (depends on OS).
+ type: float
+"""
+import os
+import pwd
+import grp
+import stat
+
+HAVE_SELINUX = False
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ pass
+
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.utils.display import Display
+
+display = Display()
+
+
+# If selinux fails to find a default, return an array of None
+def selinux_context(path):
+ context = [None, None, None, None]
+ if HAVE_SELINUX and selinux.is_selinux_enabled():
+ try:
+ # note: the selinux module uses byte strings on python2 and text
+ # strings on python3
+ ret = selinux.lgetfilecon_raw(to_native(path))
+ except OSError:
+ return context
+ if ret[0] != -1:
+ # Limit split to 4 because the selevel, the last in the list,
+ # may contain ':' characters
+ context = ret[1].split(':', 3)
+ return context
+
+
+def file_props(root, path):
+ ''' Returns dictionary with file properties, or return None on failure '''
+ abspath = os.path.join(root, path)
+
+ try:
+ st = os.lstat(abspath)
+ except OSError as e:
+ display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e))
+ return None
+
+ ret = dict(root=root, path=path)
+
+ if stat.S_ISLNK(st.st_mode):
+ ret['state'] = 'link'
+ ret['src'] = os.readlink(abspath)
+ elif stat.S_ISDIR(st.st_mode):
+ ret['state'] = 'directory'
+ elif stat.S_ISREG(st.st_mode):
+ ret['state'] = 'file'
+ ret['src'] = abspath
+ else:
+ display.warning('filetree: Error file type of %s is not supported' % abspath)
+ return None
+
+ ret['uid'] = st.st_uid
+ ret['gid'] = st.st_gid
+ try:
+ ret['owner'] = pwd.getpwuid(st.st_uid).pw_name
+ except KeyError:
+ ret['owner'] = st.st_uid
+ try:
+ ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name)
+ except KeyError:
+ ret['group'] = st.st_gid
+ ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode))
+ ret['size'] = st.st_size
+ ret['mtime'] = st.st_mtime
+ ret['ctime'] = st.st_ctime
+
+ if HAVE_SELINUX and selinux.is_selinux_enabled() == 1:
+ context = selinux_context(abspath)
+ ret['seuser'] = context[0]
+ ret['serole'] = context[1]
+ ret['setype'] = context[2]
+ ret['selevel'] = context[3]
+
+ return ret
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ basedir = self.get_basedir(variables)
+
+ ret = []
+ for term in terms:
+ term_file = os.path.basename(term)
+ dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term))
+ path = os.path.join(dwimmed_path, term_file)
+ display.debug("Walking '{0}'".format(path))
+ for root, dirs, files in os.walk(path, topdown=True):
+ for entry in dirs + files:
+ relpath = os.path.relpath(os.path.join(root, entry), path)
+
+ # Skip if relpath was already processed (from another root)
+ if relpath not in [entry['path'] for entry in ret]:
+ props = file_props(path, relpath)
+ if props is not None:
+ display.debug(" found '{0}'".format(os.path.join(path, relpath)))
+ ret.append(props)
+
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/flattened.py b/ansible_collections/community/general/plugins/lookup/flattened.py
new file mode 100644
index 000000000..e955b6478
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/flattened.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: flattened
+ author: Serge van Ginderachter (!UNKNOWN) <serge@vanginderachter.be>
+ short_description: return single list completely flattened
+ description:
+ - Given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left.
+ options:
+ _terms:
+ description: lists to flatten
+ type: list
+ elements: raw
+ required: true
+ notes:
+ - Unlike the R(items lookup,ansible_collections.ansible.builtin.items_lookup) which only flattens 1 level,
+ this plugin will continue to flatten until it cannot find lists anymore.
+ - Aka highlander plugin, there can only be one (list).
+'''
+
+EXAMPLES = """
+- name: "'unnest' all elements into single list"
+ ansible.builtin.debug:
+ msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - flattened list
+ type: list
+"""
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import string_types
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+
+class LookupModule(LookupBase):
+
+ def _check_list_of_one_list(self, term):
+ # make sure term is not a list of one (list of one..) item
+ # return the final non list item if so
+
+ if isinstance(term, list) and len(term) == 1:
+ term = term[0]
+ if isinstance(term, list):
+ term = self._check_list_of_one_list(term)
+
+ return term
+
+ def _do_flatten(self, terms, variables):
+
+ ret = []
+ for term in terms:
+ term = self._check_list_of_one_list(term)
+
+ if term == 'None' or term == 'null':
+ # ignore undefined items
+ break
+
+ if isinstance(term, string_types):
+ # convert a variable to a list
+ try:
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar)
+ except TypeError:
+ # The loader argument is deprecated in ansible-core 2.14+. Fall back to
+ # pre-2.14 behavior for older ansible-core versions.
+ term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
+ # but avoid converting a plain string to a list of one string
+ if term2 != [term]:
+ term = term2
+
+ if isinstance(term, list):
+ # if it's a list, check recursively for items that are a list
+ term = self._do_flatten(term, variables)
+ ret.extend(term)
+ else:
+ ret.append(term)
+
+ return ret
+
+ def run(self, terms, variables=None, **kwargs):
+ if not isinstance(terms, list):
+ raise AnsibleError("with_flattened expects a list")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ return self._do_flatten(terms, variables)
diff --git a/ansible_collections/community/general/plugins/lookup/hiera.py b/ansible_collections/community/general/plugins/lookup/hiera.py
new file mode 100644
index 000000000..fa4d0a199
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/hiera.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Juan Manuel Parrilla <jparrill@redhat.com>
+# Copyright (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Juan Manuel Parrilla (@jparrill)
+ name: hiera
+ short_description: get info from hiera data
+ requirements:
+ - hiera (command line utility)
+ description:
+ - Retrieves data from an Puppetmaster node using Hiera as ENC.
+ options:
+ _terms:
+ description:
+ - The list of keys to lookup on the Puppetmaster.
+ type: list
+ elements: string
+ required: true
+ executable:
+ description:
+ - Binary file to execute Hiera.
+ default: '/usr/bin/hiera'
+ env:
+ - name: ANSIBLE_HIERA_BIN
+ config_file:
+ description:
+ - File that describes the hierarchy of Hiera.
+ default: '/etc/hiera.yaml'
+ env:
+ - name: ANSIBLE_HIERA_CFG
+# FIXME: incomplete options .. _terms? environment/fqdn?
+'''
+
+EXAMPLES = """
+# All this examples depends on hiera.yml that describes the hierarchy
+
+- name: "a value from Hiera 'DB'"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo') }}"
+
+- name: "a value from a Hiera 'DB' on other environment"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo environment=production') }}"
+
+- name: "a value from a Hiera 'DB' for a concrete node"
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}"
+"""
+
+RETURN = """
+ _raw:
+ description:
+ - a value associated with input key
+ type: list
+ elements: str
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.cmd_functions import run_cmd
+from ansible.module_utils.common.text.converters import to_text
+
+
+class Hiera(object):
+ def __init__(self, hiera_cfg, hiera_bin):
+ self.hiera_cfg = hiera_cfg
+ self.hiera_bin = hiera_bin
+
+ def get(self, hiera_key):
+ pargs = [self.hiera_bin]
+ pargs.extend(['-c', self.hiera_cfg])
+
+ pargs.extend(hiera_key)
+
+ rc, output, err = run_cmd("{0} -c {1} {2}".format(
+ self.hiera_bin, self.hiera_cfg, hiera_key[0]))
+
+ return to_text(output.strip())
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ hiera = Hiera(self.get_option('config_file'), self.get_option('executable'))
+ ret = [hiera.get(terms)]
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/keyring.py b/ansible_collections/community/general/plugins/lookup/keyring.py
new file mode 100644
index 000000000..a4c914ed1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/keyring.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Samuel Boucher <boucher.samuel.c@gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: keyring
+ author:
+ - Samuel Boucher (!UNKNOWN) <boucher.samuel.c@gmail.com>
+ requirements:
+ - keyring (python library)
+ short_description: grab secrets from the OS keyring
+ description:
+ - Allows you to access data stored in the OS provided keyring/keychain.
+'''
+
+EXAMPLES = """
+- name: output secrets to screen (BAD IDEA)
+ ansible.builtin.debug:
+ msg: "Password: {{item}}"
+ with_community.general.keyring:
+ - 'servicename username'
+
+- name: access mysql with password from keyring
+ community.mysql.mysql_db:
+ login_password: "{{ lookup('community.general.keyring', 'mysql joe') }}"
+ login_user: joe
+"""
+
+RETURN = """
+ _raw:
+ description: Secrets stored.
+ type: list
+ elements: str
+"""
+
+HAS_KEYRING = True
+
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+try:
+ import keyring
+except ImportError:
+ HAS_KEYRING = False
+
+from ansible.plugins.lookup import LookupBase
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ if not HAS_KEYRING:
+ raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ display.vvvv(u"keyring: %s" % keyring.get_keyring())
+ ret = []
+ for term in terms:
+ (servicename, username) = (term.split()[0], term.split()[1])
+ display.vvvv(u"username: %s, servicename: %s " % (username, servicename))
+ password = keyring.get_password(servicename, username)
+ if password is None:
+ raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username))
+ ret.append(password.rstrip())
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/lastpass.py b/ansible_collections/community/general/plugins/lookup/lastpass.py
new file mode 100644
index 000000000..8eb3090b7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/lastpass.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Andrew Zenk <azenk@umn.edu>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: lastpass
+ author:
+ - Andrew Zenk (!UNKNOWN) <azenk@umn.edu>
+ requirements:
+ - lpass (command line utility)
+ - must have already logged into LastPass
+ short_description: fetch data from LastPass
+ description:
+ - Use the lpass command line utility to fetch specific fields from LastPass.
+ options:
+ _terms:
+ description: Key from which you want to retrieve the field.
+ required: true
+ type: list
+ elements: str
+ field:
+ description: Field to return from LastPass.
+ default: 'password'
+ type: str
+'''
+
+EXAMPLES = """
+- name: get 'custom_field' from LastPass entry 'entry-name'
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}"
+"""
+
+RETURN = """
+ _raw:
+ description: secrets stored
+ type: list
+ elements: str
+"""
+
+from subprocess import Popen, PIPE
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.plugins.lookup import LookupBase
+
+
+class LPassException(AnsibleError):
+ pass
+
+
+class LPass(object):
+
+ def __init__(self, path='lpass'):
+ self._cli_path = path
+
+ @property
+ def cli_path(self):
+ return self._cli_path
+
+ @property
+ def logged_in(self):
+ out, err = self._run(self._build_args("logout"), stdin="n\n", expected_rc=1)
+ return err.startswith("Are you sure you would like to log out?")
+
+ def _run(self, args, stdin=None, expected_rc=0):
+ p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(to_bytes(stdin))
+ rc = p.wait()
+ if rc != expected_rc:
+ raise LPassException(err)
+ return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
+
+ def _build_args(self, command, args=None):
+ if args is None:
+ args = []
+ args = [command] + args
+ args += ["--color=never"]
+ return args
+
+ def get_field(self, key, field):
+ if field in ['username', 'password', 'url', 'notes', 'id', 'name']:
+ out, err = self._run(self._build_args("show", ["--{0}".format(field), key]))
+ else:
+ out, err = self._run(self._build_args("show", ["--field={0}".format(field), key]))
+ return out.strip()
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+ field = self.get_option('field')
+
+ lp = LPass()
+
+ if not lp.logged_in:
+ raise AnsibleError("Not logged into LastPass: please run 'lpass login' first")
+
+ values = []
+ for term in terms:
+ values.append(lp.get_field(term, field))
+ return values
diff --git a/ansible_collections/community/general/plugins/lookup/lmdb_kv.py b/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
new file mode 100644
index 000000000..0950249dc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/lmdb_kv.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017-2018, Jan-Piet Mens <jpmens(at)gmail.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: lmdb_kv
+ author:
+ - Jan-Piet Mens (@jpmens)
+ version_added: '0.2.0'
+ short_description: fetch data from LMDB
+ description:
+ - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it.
+ requirements:
+ - lmdb (python library https://lmdb.readthedocs.io/en/release/)
+ options:
+ _terms:
+ description: List of keys to query.
+ type: list
+ elements: str
+ db:
+ description: Path to LMDB database.
+ type: str
+ default: 'ansible.mdb'
+ vars:
+ - name: lmdb_kv_db
+'''
+
+EXAMPLES = """
+- name: query LMDB for a list of country codes
+ ansible.builtin.debug:
+ msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}"
+
+- name: use list of values in a loop by key wildcard
+ ansible.builtin.debug:
+ msg: "Hello from {{ item.0 }} a.k.a. {{ item.1 }}"
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - "n*"
+
+- name: get an item by key
+ ansible.builtin.assert:
+ that:
+ - item == 'Belgium'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - be
+"""
+
+RETURN = """
+_raw:
+ description: value(s) stored in LMDB
+ type: list
+ elements: raw
+"""
+
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_native, to_text
+
+HAVE_LMDB = True
+try:
+ import lmdb
+except ImportError:
+ HAVE_LMDB = False
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ '''
+ terms contain any number of keys to be retrieved.
+ If terms is None, all keys from the database are returned
+ with their values, and if term ends in an asterisk, we
+ start searching there
+
+ The LMDB database defaults to 'ansible.mdb' if Ansible's
+ variable 'lmdb_kv_db' is not set:
+
+ vars:
+ - lmdb_kv_db: "jp.mdb"
+ '''
+ if HAVE_LMDB is False:
+ raise AnsibleError("Can't LOOKUP(lmdb_kv): this module requires lmdb to be installed")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ db = self.get_option('db')
+
+ try:
+ env = lmdb.open(str(db), readonly=True)
+ except Exception as e:
+ raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e)))
+
+ ret = []
+ if len(terms) == 0:
+ with env.begin() as txn:
+ cursor = txn.cursor()
+ cursor.first()
+ for key, value in cursor:
+ ret.append((to_text(key), to_native(value)))
+
+ else:
+ for term in terms:
+ with env.begin() as txn:
+ if term.endswith('*'):
+ cursor = txn.cursor()
+ prefix = term[:-1] # strip asterisk
+ cursor.set_range(to_text(term).encode())
+ while cursor.key().startswith(to_text(prefix).encode()):
+ for key, value in cursor:
+ ret.append((to_text(key), to_native(value)))
+ cursor.next()
+ else:
+ value = txn.get(to_text(term).encode())
+ if value is not None:
+ ret.append(to_native(value))
+
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/manifold.py b/ansible_collections/community/general/plugins/lookup/manifold.py
new file mode 100644
index 000000000..049d453e4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/manifold.py
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Arigato Machine Inc.
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
+ name: manifold
+ short_description: get credentials from Manifold.co
+ description:
+ - Retrieves resources' credentials from Manifold.co
+ options:
+ _terms:
+ description:
+ - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
+ matched resources will be returned.
+ type: list
+ elements: string
+ required: false
+ api_token:
+ description:
+ - manifold API token
+ type: string
+ required: true
+ env:
+ - name: MANIFOLD_API_TOKEN
+ project:
+ description:
+ - The project label you want to get the resource for.
+ type: string
+ required: false
+ team:
+ description:
+ - The team label you want to get the resource for.
+ type: string
+ required: false
+'''
+
+EXAMPLES = '''
+ - name: all available resources
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
+ - name: all available resources for a specific project in specific team
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
+ - name: two specific resources
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
+'''
+
+RETURN = '''
+ _raw:
+ description:
+ - dictionary of credentials ready to be consumed as environment variables. If multiple resources define
+ the same environment variable(s), the last one returned by the Manifold API will take precedence.
+ type: dict
+'''
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils import six
+from ansible.utils.display import Display
+from traceback import format_exception
+import json
+import sys
+
+display = Display()
+
+
+class ApiError(Exception):
+ pass
+
+
+class ManifoldApiClient(object):
+ base_url = 'https://api.{api}.manifold.co/v1/{endpoint}'
+ http_agent = 'python-manifold-ansible-1.0.0'
+
+ def __init__(self, token):
+ self._token = token
+
+ def request(self, api, endpoint, *args, **kwargs):
+ """
+ Send a request to API backend and pre-process a response.
+ :param api: API to send a request to
+ :type api: str
+ :param endpoint: API endpoint to fetch data from
+ :type endpoint: str
+ :param args: other args for open_url
+ :param kwargs: other kwargs for open_url
+ :return: server response. JSON response is automatically deserialized.
+ :rtype: dict | list | str
+ """
+
+ default_headers = {
+ 'Authorization': "Bearer {0}".format(self._token),
+ 'Accept': "*/*" # Otherwise server doesn't set content-type header
+ }
+
+ url = self.base_url.format(api=api, endpoint=endpoint)
+
+ headers = default_headers
+ arg_headers = kwargs.pop('headers', None)
+ if arg_headers:
+ headers.update(arg_headers)
+
+ try:
+ display.vvvv('manifold lookup connecting to {0}'.format(url))
+ response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
+ data = response.read()
+ if response.headers.get('content-type') == 'application/json':
+ data = json.loads(data)
+ return data
+ except ValueError:
+ raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url))
+ except HTTPError as e:
+ raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format(
+ err=str(e), url=url, response=e.read()))
+ except URLError as e:
+ raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e)))
+ except SSLValidationError as e:
+ raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e)))
+ except ConnectionError as e:
+ raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e)))
+
+ def get_resources(self, team_id=None, project_id=None, label=None):
+ """
+ Get resources list
+ :param team_id: ID of the Team to filter resources by
+ :type team_id: str
+ :param project_id: ID of the project to filter resources by
+ :type project_id: str
+ :param label: filter resources by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of resources
+ :rtype: list
+ """
+ api = 'marketplace'
+ endpoint = 'resources'
+ query_params = {}
+
+ if team_id:
+ query_params['team_id'] = team_id
+ if project_id:
+ query_params['project_id'] = project_id
+ if label:
+ query_params['label'] = label
+
+ if query_params:
+ endpoint += '?' + urlencode(query_params)
+
+ return self.request(api, endpoint)
+
+ def get_teams(self, label=None):
+ """
+ Get teams list
+ :param label: filter teams by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of teams
+ :rtype: list
+ """
+ api = 'identity'
+ endpoint = 'teams'
+ data = self.request(api, endpoint)
+ # Label filtering is not supported by API, however this function provides uniform interface
+ if label:
+ data = list(filter(lambda x: x['body']['label'] == label, data))
+ return data
+
+ def get_projects(self, label=None):
+ """
+ Get projects list
+ :param label: filter projects by a label, returns a list with one or zero elements
+ :type label: str
+ :return: list of projects
+ :rtype: list
+ """
+ api = 'marketplace'
+ endpoint = 'projects'
+ query_params = {}
+
+ if label:
+ query_params['label'] = label
+
+ if query_params:
+ endpoint += '?' + urlencode(query_params)
+
+ return self.request(api, endpoint)
+
+ def get_credentials(self, resource_id):
+ """
+ Get resource credentials
+ :param resource_id: ID of the resource to filter credentials by
+ :type resource_id: str
+ :return:
+ """
+ api = 'marketplace'
+ endpoint = 'credentials?' + urlencode({'resource_id': resource_id})
+ return self.request(api, endpoint)
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ """
+ :param terms: a list of resources lookups to run.
+ :param variables: ansible variables active at the time of the lookup
+ :param api_token: API token
+ :param project: optional project label
+ :param team: optional team label
+ :return: a dictionary of resources credentials
+ """
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ api_token = self.get_option('api_token')
+ project = self.get_option('project')
+ team = self.get_option('team')
+
+ try:
+ labels = terms
+ client = ManifoldApiClient(api_token)
+
+ if team:
+ team_data = client.get_teams(team)
+ if len(team_data) == 0:
+ raise AnsibleError("Team '{0}' does not exist".format(team))
+ team_id = team_data[0]['id']
+ else:
+ team_id = None
+
+ if project:
+ project_data = client.get_projects(project)
+ if len(project_data) == 0:
+ raise AnsibleError("Project '{0}' does not exist".format(project))
+ project_id = project_data[0]['id']
+ else:
+ project_id = None
+
+ if len(labels) == 1: # Use server-side filtering if one resource is requested
+ resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
+ else: # Get all resources and optionally filter labels
+ resources_data = client.get_resources(team_id=team_id, project_id=project_id)
+ if labels:
+ resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
+
+ if labels and len(resources_data) < len(labels):
+ fetched_labels = [r['body']['label'] for r in resources_data]
+ not_found_labels = [label for label in labels if label not in fetched_labels]
+ raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels)))
+
+ credentials = {}
+ cred_map = {}
+ for resource in resources_data:
+ resource_credentials = client.get_credentials(resource['id'])
+ if len(resource_credentials) and resource_credentials[0]['body']['values']:
+ for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
+ label = resource['body']['label']
+ if cred_key in credentials:
+ display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data "
+ "with label '{new_label}'".format(cred_key=cred_key,
+ old_label=cred_map[cred_key],
+ new_label=label))
+ credentials[cred_key] = cred_val
+ cred_map[cred_key] = label
+
+ ret = [credentials]
+ return ret
+ except ApiError as e:
+ raise AnsibleError('API Error: {0}'.format(str(e)))
+ except AnsibleError as e:
+ raise e
+ except Exception:
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
diff --git a/ansible_collections/community/general/plugins/lookup/merge_variables.py b/ansible_collections/community/general/plugins/lookup/merge_variables.py
new file mode 100644
index 000000000..cd5fa5b7d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/merge_variables.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Thales Netherlands
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+ author:
+ - Roy Lenferink (@rlenferink)
+ - Mark Ettema (@m-a-r-k-e)
+ name: merge_variables
+ short_description: merge variables with a certain suffix
+ description:
+ - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or
+ regular expressions, optionally.
+ version_added: 6.5.0
+ options:
+ _terms:
+ description:
+ - Depending on the value of I(pattern_type), this is a list of prefixes, suffixes, or regular expressions
+ that will be used to match all variables that should be merged.
+ required: true
+ type: list
+ elements: str
+ pattern_type:
+ description:
+ - Change the way of searching for the specified pattern.
+ type: str
+ default: 'regex'
+ choices:
+ - prefix
+ - suffix
+ - regex
+ env:
+ - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE
+ ini:
+ - section: merge_variables_lookup
+ key: pattern_type
+ initial_value:
+ description:
+ - An initial value to start with.
+ type: raw
+ override:
+ description:
+ - Return an error, print a warning or ignore it when a key will be overwritten.
+ - The default behavior C(error) makes the plugin fail when a key would be overwritten.
+ - When C(warn) and C(ignore) are used, note that it is important to know that the variables
+ are sorted by name before being merged. Keys for later variables in this order will overwrite
+ keys of the same name for variables earlier in this order. To avoid potential confusion,
+ better use I(override=error) whenever possible.
+ type: str
+ default: 'error'
+ choices:
+ - error
+ - warn
+ - ignore
+ env:
+ - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE
+ ini:
+ - section: merge_variables_lookup
+ key: override
+"""
+
+EXAMPLES = """
+# Some example variables, they can be defined anywhere as long as they are in scope
+test_init_list:
+ - "list init item 1"
+ - "list init item 2"
+
+testa__test_list:
+ - "test a item 1"
+
+testb__test_list:
+ - "test b item 1"
+
+testa__test_dict:
+ ports:
+ - 1
+
+testb__test_dict:
+ ports:
+ - 3
+
+
+# Merge variables that end with '__test_dict' and store the result in a variable 'example_a'
+example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}"
+
+# The variable example_a now contains:
+# ports:
+# - 1
+# - 3
+
+
+# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the
+# result in a variable 'example_b'
+example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}"
+
+# The variable example_b now contains:
+# - "list init item 1"
+# - "list init item 2"
+# - "test a item 1"
+# - "test b item 1"
+"""
+
+RETURN = """
+ _raw:
+ description: In case the search matches list items, a list will be returned. In case the search matches dicts, a
+ dict will be returned.
+ type: raw
+ elements: raw
+"""
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+display = Display()
+
+
+def _verify_and_get_type(variable):
+ if isinstance(variable, list):
+ return "list"
+ elif isinstance(variable, dict):
+ return "dict"
+ else:
+ raise AnsibleError("Not supported type detected, variable must be a list or a dict")
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(direct=kwargs)
+ initial_value = self.get_option("initial_value", None)
+ self._override = self.get_option('override', 'error')
+ self._pattern_type = self.get_option('pattern_type', 'regex')
+
+ ret = []
+ for term in terms:
+ if not isinstance(term, str):
+ raise AnsibleError("Non-string type '{0}' passed, only 'str' types are allowed!".format(type(term)))
+
+ ret.append(self._merge_vars(term, initial_value, variables))
+
+ return ret
+
+ def _var_matches(self, key, search_pattern):
+ if self._pattern_type == "prefix":
+ return key.startswith(search_pattern)
+ elif self._pattern_type == "suffix":
+ return key.endswith(search_pattern)
+ elif self._pattern_type == "regex":
+ matcher = re.compile(search_pattern)
+ return matcher.search(key)
+
+ return False
+
+ def _merge_vars(self, search_pattern, initial_value, variables):
+ display.vvv("Merge variables with {0}: {1}".format(self._pattern_type, search_pattern))
+ var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)])
+ display.vvv("The following variables will be merged: {0}".format(var_merge_names))
+
+ prev_var_type = None
+ result = None
+
+ if initial_value is not None:
+ prev_var_type = _verify_and_get_type(initial_value)
+ result = initial_value
+
+ for var_name in var_merge_names:
+ var_value = self._templar.template(variables[var_name]) # Render jinja2 templates
+ var_type = _verify_and_get_type(var_value)
+
+ if prev_var_type is None:
+ prev_var_type = var_type
+ elif prev_var_type != var_type:
+ raise AnsibleError("Unable to merge, not all variables are of the same type")
+
+ if result is None:
+ result = var_value
+ continue
+
+ if var_type == "dict":
+ result = self._merge_dict(var_value, result, [var_name])
+ else: # var_type == "list"
+ result += var_value
+
+ return result
+
+ def _merge_dict(self, src, dest, path):
+ for key, value in src.items():
+ if isinstance(value, dict):
+ node = dest.setdefault(key, {})
+ self._merge_dict(value, node, path + [key])
+ elif isinstance(value, list) and key in dest:
+ dest[key] += value
+ else:
+ if (key in dest) and dest[key] != value:
+ msg = "The key '{0}' with value '{1}' will be overwritten with value '{2}' from '{3}.{0}'".format(
+ key, dest[key], value, ".".join(path))
+
+ if self._override == "error":
+ raise AnsibleError(msg)
+ if self._override == "warn":
+ display.warning(msg)
+
+ dest[key] = value
+
+ return dest
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword.py b/ansible_collections/community/general/plugins/lookup/onepassword.py
new file mode 100644
index 000000000..0e78e4b1a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/onepassword.py
@@ -0,0 +1,625 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: onepassword
+ author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ short_description: fetch field values from 1Password
+ description:
+ - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password.
+ options:
+ _terms:
+ description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve.
+ required: true
+ field:
+ description: field to return from each matching item (case-insensitive).
+ default: 'password'
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ domain:
+ description: Domain of 1Password.
+ version_added: 3.2.0
+ default: '1password.com'
+ type: str
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ username:
+ description: The username used to sign in.
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the
+ C(master_password) is required. You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 2.7.2
+'''
+
+EXAMPLES = """
+# These examples only work when already signed in to 1Password
+- name: Retrieve password for KITT when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'KITT')
+
+- name: Retrieve password for Wintermute when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'Tessier-Ashpool', section='Wintermute')
+
+- name: Retrieve username for HAL when already signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword', 'HAL 9000', field='username', vault='Discovery')
+
+- name: Retrieve password for HAL when not signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword'
+ 'HAL 9000'
+ subdomain='Discovery'
+ master_password=vault_master_password)
+
+- name: Retrieve password for HAL when never signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword'
+ 'HAL 9000'
+ subdomain='Discovery'
+ master_password=vault_master_password
+ username='tweety@acme.com'
+ secret_key=vault_secret_key)
+"""
+
+RETURN = """
+ _raw:
+ description: field data requested
+ type: list
+ elements: str
+"""
+
+import abc
+import os
+import json
+import subprocess
+
+from ansible.plugins.lookup import LookupBase
+from ansible.errors import AnsibleLookupError
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+from ansible.module_utils.six import with_metaclass
+
+from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig
+
+
+class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)):
+ bin = "op"
+
+ def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None):
+ self.subdomain = subdomain
+ self.domain = domain
+ self.username = username
+ self.master_password = master_password
+ self.secret_key = secret_key
+
+ self._path = None
+ self._version = None
+
+ def _check_required_params(self, required_params):
+ non_empty_attrs = dict((param, getattr(self, param, None)) for param in required_params if getattr(self, param, None))
+ missing = set(required_params).difference(non_empty_attrs)
+ if missing:
+ prefix = "Unable to sign in to 1Password. Missing required parameter"
+ plural = ""
+ suffix = ": {params}.".format(params=", ".join(missing))
+ if len(missing) > 1:
+ plural = "s"
+
+ msg = "{prefix}{plural}{suffix}".format(prefix=prefix, plural=plural, suffix=suffix)
+ raise AnsibleLookupError(msg)
+
+ @abc.abstractmethod
+ def _parse_field(self, data_json, field_name, section_title):
+ """Main method for parsing data returned from the op command line tool"""
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False, environment_update=None):
+ command = [self.path] + args
+ call_kwargs = {
+ "stdout": subprocess.PIPE,
+ "stderr": subprocess.PIPE,
+ "stdin": subprocess.PIPE,
+ }
+
+ if environment_update:
+ env = os.environ.copy()
+ env.update(environment_update)
+ call_kwargs["env"] = env
+
+ p = subprocess.Popen(command, **call_kwargs)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleLookupError(to_text(err))
+
+ return rc, out, err
+
+ @abc.abstractmethod
+ def assert_logged_in(self):
+ """Check whether a login session exists"""
+
+ @abc.abstractmethod
+ def full_signin(self):
+ """Performa full login"""
+
+ @abc.abstractmethod
+ def get_raw(self, item_id, vault=None, token=None):
+ """Gets the specified item from the vault"""
+
+ @abc.abstractmethod
+ def signin(self):
+ """Sign in using the master password"""
+
+ @property
+ def path(self):
+ if self._path is None:
+ self._path = get_bin_path(self.bin)
+
+ return self._path
+
+ @property
+ def version(self):
+ if self._version is None:
+ self._version = self.get_current_version()
+
+ return self._version
+
+ @classmethod
+ def get_current_version(cls):
+ """Standalone method to get the op CLI version. Useful when determining which class to load
+ based on the current version."""
+ try:
+ bin_path = get_bin_path(cls.bin)
+ except ValueError:
+ raise AnsibleLookupError("Unable to locate '%s' command line tool" % cls.bin)
+
+ try:
+ b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE)
+ except subprocess.CalledProcessError as cpe:
+ raise AnsibleLookupError("Unable to get the op version: %s" % cpe)
+
+ return to_text(b_out).strip()
+
+
+class OnePassCLIv1(OnePassCLIBase):
+ supports_version = "1"
+
+ def _parse_field(self, data_json, field_name, section_title):
+ """
+ Retrieves the desired field from the `op` response payload
+
+ When the item is a `password` type, the password is a key within the `details` key:
+
+ $ op get item 'test item' | jq
+ {
+ [...]
+ "templateUuid": "005",
+ "details": {
+ "notesPlain": "",
+ "password": "foobar",
+ "passwordHistory": [],
+ "sections": [
+ {
+ "name": "linked items",
+ "title": "Related Items"
+ }
+ ]
+ },
+ [...]
+ }
+
+ However, when the item is a `login` type, the password is within a fields array:
+
+ $ op get item 'test item' | jq
+ {
+ [...]
+ "details": {
+ "fields": [
+ {
+ "designation": "username",
+ "name": "username",
+ "type": "T",
+ "value": "foo"
+ },
+ {
+ "designation": "password",
+ "name": "password",
+ "type": "P",
+ "value": "bar"
+ }
+ ],
+ [...]
+ },
+ [...]
+ """
+ data = json.loads(data_json)
+ if section_title is None:
+ # https://github.com/ansible-collections/community.general/pull/1610:
+ # check the details dictionary for `field_name` and return it immediately if it exists
+ # when the entry is a "password" instead of a "login" item, the password field is a key
+ # in the `details` dictionary:
+ if field_name in data["details"]:
+ return data["details"][field_name]
+
+ # when the field is not found above, iterate through the fields list in the object details
+ for field_data in data["details"].get("fields", []):
+ if field_data.get("name", "").lower() == field_name.lower():
+ return field_data.get("value", "")
+
+ for section_data in data["details"].get("sections", []):
+ if section_title is not None and section_title.lower() != section_data["title"].lower():
+ continue
+
+ for field_data in section_data.get("fields", []):
+ if field_data.get("t", "").lower() == field_name.lower():
+ return field_data.get("v", "")
+
+ return ""
+
+ def assert_logged_in(self):
+ args = ["get", "account"]
+ if self.subdomain:
+ account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
+ args.extend(["--account", account])
+
+ rc, out, err = self._run(args, ignore_errors=True)
+
+ return not bool(rc)
+
+ def full_signin(self):
+ required_params = [
+ "subdomain",
+ "username",
+ "secret_key",
+ "master_password",
+ ]
+ self._check_required_params(required_params)
+
+ args = [
+ "signin",
+ "{0}.{1}".format(self.subdomain, self.domain),
+ to_bytes(self.username),
+ to_bytes(self.secret_key),
+ "--raw",
+ ]
+
+ return self._run(args, command_input=to_bytes(self.master_password))
+
+ def get_raw(self, item_id, vault=None, token=None):
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ["--vault={0}".format(vault)]
+
+ if token is not None:
+ args += [to_bytes("--session=") + token]
+
+ return self._run(args)
+
+ def signin(self):
+ self._check_required_params(['master_password'])
+
+ args = ["signin", "--raw"]
+ if self.subdomain:
+ args.append(self.subdomain)
+
+ return self._run(args, command_input=to_bytes(self.master_password))
+
+
+class OnePassCLIv2(OnePassCLIBase):
+ """
+ CLIv2 Syntax Reference: https://developer.1password.com/docs/cli/upgrade#step-2-update-your-scripts
+ """
+ supports_version = "2"
+
+ def _parse_field(self, data_json, field_name, section_title=None):
+ """
+ Schema reference: https://developer.1password.com/docs/cli/item-template-json
+
+ Example Data:
+
+ # Password item
+ {
+ "id": "ywvdbojsguzgrgnokmcxtydgdv",
+ "title": "Authy Backup",
+ "version": 1,
+ "vault": {
+ "id": "bcqxysvcnejjrwzoqrwzcqjqxc",
+ "name": "Personal"
+ },
+ "category": "PASSWORD",
+ "last_edited_by": "7FUPZ8ZNE02KSHMAIMKHIVUE17",
+ "created_at": "2015-01-18T13:13:38Z",
+ "updated_at": "2016-02-20T16:23:54Z",
+ "additional_information": "Jan 18, 2015, 08:13:38",
+ "fields": [
+ {
+ "id": "password",
+ "type": "CONCEALED",
+ "purpose": "PASSWORD",
+ "label": "password",
+ "value": "OctoberPoppyNuttyDraperySabbath",
+ "reference": "op://Personal/Authy Backup/password",
+ "password_details": {
+ "strength": "FANTASTIC"
+ }
+ },
+ {
+ "id": "notesPlain",
+ "type": "STRING",
+ "purpose": "NOTES",
+ "label": "notesPlain",
+ "value": "Backup password to restore Authy",
+ "reference": "op://Personal/Authy Backup/notesPlain"
+ }
+ ]
+ }
+
+ # Login item
+ {
+ "id": "awk4s2u44fhnrgppszcsvc663i",
+ "title": "Dummy Login",
+ "version": 2,
+ "vault": {
+ "id": "stpebbaccrq72xulgouxsk4p7y",
+ "name": "Personal"
+ },
+ "category": "LOGIN",
+ "last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU",
+ "created_at": "2018-04-25T21:55:19Z",
+ "updated_at": "2018-04-25T21:56:06Z",
+ "additional_information": "agent.smith",
+ "urls": [
+ {
+ "primary": true,
+ "href": "https://acme.com"
+ }
+ ],
+ "sections": [
+ {
+ "id": "linked items",
+ "label": "Related Items"
+ }
+ ],
+ "fields": [
+ {
+ "id": "username",
+ "type": "STRING",
+ "purpose": "USERNAME",
+ "label": "username",
+ "value": "agent.smith",
+ "reference": "op://Personal/Dummy Login/username"
+ },
+ {
+ "id": "password",
+ "type": "CONCEALED",
+ "purpose": "PASSWORD",
+ "label": "password",
+ "value": "Q7vFwTJcqwxKmTU]Dzx7NW*wrNPXmj",
+ "entropy": 159.6083697084228,
+ "reference": "op://Personal/Dummy Login/password",
+ "password_details": {
+ "entropy": 159,
+ "generated": true,
+ "strength": "FANTASTIC"
+ }
+ },
+ {
+ "id": "notesPlain",
+ "type": "STRING",
+ "purpose": "NOTES",
+ "label": "notesPlain",
+ "reference": "op://Personal/Dummy Login/notesPlain"
+ }
+ ]
+ }
+ """
+ data = json.loads(data_json)
+ for field in data.get("fields", []):
+ if section_title is None:
+ # If the field name exists in the section, return that value
+ if field.get(field_name):
+ return field.get(field_name)
+
+ # If the field name doesn't exist in the section, match on the value of "label"
+ # then "id" and return "value"
+ if field.get("label") == field_name:
+ return field["value"]
+
+ if field.get("id") == field_name:
+ return field["value"]
+
+ # Look at the section data and get an indentifier. The value of 'id' is either a unique ID
+ # or a human-readable string. If a 'label' field exists, prefer that since
+ # it is the value visible in the 1Password UI when both 'id' and 'label' exist.
+ section = field.get("section", {})
+ current_section_title = section.get("label", section.get("id"))
+ if section_title == current_section_title:
+ # In the correct section. Check "label" then "id" for the desired field_name
+ if field.get("label") == field_name:
+ return field["value"]
+
+ if field.get("id") == field_name:
+ return field["value"]
+
+ return ""
+
+ def assert_logged_in(self):
+ args = ["account", "list"]
+ if self.subdomain:
+ account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
+ args.extend(["--account", account])
+
+ rc, out, err = self._run(args)
+
+ if out:
+ # Running 'op account get' if there are no accounts configured on the system drops into
+ # an interactive prompt. Only run 'op account get' after first listing accounts to see
+ # if there are any previously configured accounts.
+ args = ["account", "get"]
+ if self.subdomain:
+ account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain)
+ args.extend(["--account", account])
+
+ rc, out, err = self._run(args, ignore_errors=True)
+
+ return not bool(rc)
+
+ return False
+
+ def full_signin(self):
+ required_params = [
+ "subdomain",
+ "username",
+ "secret_key",
+ "master_password",
+ ]
+ self._check_required_params(required_params)
+
+ args = [
+ "account", "add", "--raw",
+ "--address", "{0}.{1}".format(self.subdomain, self.domain),
+ "--email", to_bytes(self.username),
+ "--signin",
+ ]
+
+ environment_update = {"OP_SECRET_KEY": self.secret_key}
+ return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update)
+
+ def get_raw(self, item_id, vault=None, token=None):
+ args = ["item", "get", item_id, "--format", "json"]
+ if vault is not None:
+ args += ["--vault={0}".format(vault)]
+ if token is not None:
+ args += [to_bytes("--session=") + token]
+
+ return self._run(args)
+
+ def signin(self):
+ self._check_required_params(['master_password'])
+
+ args = ["signin", "--raw"]
+ if self.subdomain:
+ args.extend(["--account", self.subdomain])
+
+ return self._run(args, command_input=to_bytes(self.master_password))
+
+
+class OnePass(object):
+ def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None):
+ self.subdomain = subdomain
+ self.domain = domain
+ self.username = username
+ self.secret_key = secret_key
+ self.master_password = master_password
+
+ self.logged_in = False
+ self.token = None
+
+ self._config = OnePasswordConfig()
+ self._cli = self._get_cli_class()
+
+ def _get_cli_class(self):
+ version = OnePassCLIBase.get_current_version()
+ for cls in OnePassCLIBase.__subclasses__():
+ if cls.supports_version == version.split(".")[0]:
+ try:
+ return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password)
+ except TypeError as e:
+ raise AnsibleLookupError(e)
+
+ raise AnsibleLookupError("op version %s is unsupported" % version)
+
+ def set_token(self):
+ if self._config.config_file_path and os.path.isfile(self._config.config_file_path):
+ # If the config file exists, assume an initial sign in has taken place and try basic sign in
+ try:
+ rc, out, err = self._cli.signin()
+ except AnsibleLookupError as exc:
+ test_strings = (
+ "missing required parameters",
+ "unauthorized",
+ )
+ if any(string in exc.message.lower() for string in test_strings):
+ # A required parameter is missing, or a bad master password was supplied
+ # so don't bother attempting a full signin
+ raise
+
+ rc, out, err = self._cli.full_signin()
+
+ self.token = out.strip()
+
+ else:
+ # Attempt a full signin since there appears to be no existing signin
+ rc, out, err = self._cli.full_signin()
+ self.token = out.strip()
+
+ def assert_logged_in(self):
+ logged_in = self._cli.assert_logged_in()
+ if logged_in:
+ self.logged_in = logged_in
+ pass
+ else:
+ self.set_token()
+
+ def get_raw(self, item_id, vault=None):
+ rc, out, err = self._cli.get_raw(item_id, vault, self.token)
+ return out
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ if output:
+ return self._cli._parse_field(output, field, section)
+
+ return ""
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ field = self.get_option("field")
+ section = self.get_option("section")
+ vault = self.get_option("vault")
+ subdomain = self.get_option("subdomain")
+ domain = self.get_option("domain")
+ username = self.get_option("username")
+ secret_key = self.get_option("secret_key")
+ master_password = self.get_option("master_password")
+
+ op = OnePass(subdomain, domain, username, secret_key, master_password)
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ values.append(op.get_field(term, field, section, vault))
+
+ return values
diff --git a/ansible_collections/community/general/plugins/lookup/onepassword_raw.py b/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
new file mode 100644
index 000000000..9b87a3f61
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/onepassword_raw.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Scott Buchanan <sbuchanan@ri.pn>
+# Copyright (c) 2016, Andrew Zenk <azenk@umn.edu> (lastpass.py used as starting point)
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: onepassword_raw
+ author:
+ - Scott Buchanan (@scottsb)
+ - Andrew Zenk (@azenk)
+ - Sam Doran (@samdoran)
+ requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+ short_description: fetch an entire item from 1Password
+ description:
+ - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password
+ options:
+ _terms:
+ description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve.
+ required: true
+ master_password:
+ description: The password used to unlock the specified vault.
+ aliases: ['vault_password']
+ section:
+ description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section.
+ subdomain:
+ description: The 1Password subdomain to authenticate against.
+ domain:
+ description: Domain of 1Password.
+ version_added: 6.0.0
+ default: '1password.com'
+ type: str
+ username:
+ description: The username used to sign in.
+ secret_key:
+ description: The secret key used when performing an initial sign in.
+ vault:
+ description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults.
+ notes:
+ - This lookup will use an existing 1Password session if one exists. If not, and you have already
+ performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required.
+ You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op).
+ - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password).
+ - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials
+ needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength
+ to the 1Password master password.
+ - This lookup stores potentially sensitive data from 1Password as Ansible facts.
+ Facts are subject to caching if enabled, which means this data could be stored in clear text
+ on disk or in a database.
+ - Tested with C(op) version 2.7.0
+'''
+
+EXAMPLES = """
+- name: Retrieve all data about Wintermute
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_raw', 'Wintermute')
+
+- name: Retrieve all data about Wintermute when not signed in to 1Password
+ ansible.builtin.debug:
+ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl')
+"""
+
+RETURN = """
+ _raw:
+ description: field data requested
+ type: list
+ elements: dict
+"""
+
+import json
+
+from ansible_collections.community.general.plugins.lookup.onepassword import OnePass
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+
+ vault = self.get_option("vault")
+ subdomain = self.get_option("subdomain")
+ domain = self.get_option("domain", "1password.com")
+ username = self.get_option("username")
+ secret_key = self.get_option("secret_key")
+ master_password = self.get_option("master_password")
+
+ op = OnePass(subdomain, domain, username, secret_key, master_password)
+ op.assert_logged_in()
+
+ values = []
+ for term in terms:
+ data = json.loads(op.get_raw(term, vault))
+ values.append(data)
+
+ return values
diff --git a/ansible_collections/community/general/plugins/lookup/passwordstore.py b/ansible_collections/community/general/plugins/lookup/passwordstore.py
new file mode 100644
index 000000000..7e37a3785
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/passwordstore.py
@@ -0,0 +1,494 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Patrick Deelman <patrick@patrickdeelman.nl>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+ name: passwordstore
+ author:
+ - Patrick Deelman (!UNKNOWN) <patrick@patrickdeelman.nl>
+ short_description: manage passwords with passwordstore.org's pass utility
+ description:
+ - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility.
+ It also retrieves YAML style keys stored as multilines in the passwordfile.
+ - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to
+ C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using I(lock=readwrite) instead.
+ options:
+ _terms:
+ description: query key.
+ required: true
+ directory:
+ description:
+ - The directory of the password store.
+ - If I(backend=pass), the default is C(~/.password-store) is used.
+ - If I(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml),
+ falling back to C(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config.
+ type: path
+ vars:
+ - name: passwordstore
+ env:
+ - name: PASSWORD_STORE_DIR
+ create:
+ description: Create the password if it does not already exist. Takes precedence over C(missing).
+ type: bool
+ default: false
+ overwrite:
+ description: Overwrite the password if it does already exist.
+ type: bool
+ default: false
+ umask:
+ description:
+ - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable).
+ - Note pass' default value is C('077').
+ env:
+ - name: PASSWORD_STORE_UMASK
+ version_added: 1.3.0
+ returnall:
+ description: Return all the content of the password, not only the first line.
+ type: bool
+ default: false
+ subkey:
+ description: Return a specific subkey of the password. When set to C(password), always returns the first line.
+ type: str
+ default: password
+ userpass:
+ description: Specify a password to save, instead of a generated one.
+ type: str
+ length:
+ description: The length of the generated password.
+ type: integer
+ default: 16
+ backup:
+ description: Used with C(overwrite=true). Backup the previous password in a subkey.
+ type: bool
+ default: false
+ nosymbols:
+ description: Use alphanumeric characters.
+ type: bool
+ default: false
+ missing:
+ description:
+ - List of preference about what to do if the password file is missing.
+ - If I(create=true), the value for this option is ignored and assumed to be C(create).
+ - If set to C(error), the lookup will error out if the passname does not exist.
+ - If set to C(create), the passname will be created with the provided length I(length) if it does not exist.
+ - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist.
+ When using C(lookup) and not C(query), this will be translated to an empty string.
+ version_added: 3.1.0
+ type: str
+ default: error
+ choices:
+ - error
+ - warn
+ - empty
+ - create
+ lock:
+ description:
+ - How to synchronize operations.
+ - The default of C(write) only synchronizes write operations.
+ - C(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel.
+ - C(none) does not do any synchronization.
+ ini:
+ - section: passwordstore_lookup
+ key: lock
+ type: str
+ default: write
+ choices:
+ - readwrite
+ - write
+ - none
+ version_added: 4.5.0
+ locktimeout:
+ description:
+ - Lock timeout applied when I(lock) is not C(none).
+ - Time with a unit suffix, C(s), C(m), C(h) for seconds, minutes, and hours, respectively. For example, C(900s) equals C(15m).
+ - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details.
+ ini:
+ - section: passwordstore_lookup
+ key: locktimeout
+ type: str
+ default: 15m
+ version_added: 4.5.0
+ backend:
+ description:
+ - Specify which backend to use.
+ - Defaults to C(pass), passwordstore.org's original pass utility.
+ - C(gopass) support is incomplete.
+ ini:
+ - section: passwordstore_lookup
+ key: backend
+ vars:
+ - name: passwordstore_backend
+ type: str
+ default: pass
+ choices:
+ - pass
+ - gopass
+ version_added: 5.2.0
+ notes:
+ - The lookup supports passing all options as lookup parameters since community.general 6.0.0.
+'''
+EXAMPLES = """
+ansible.cfg: |
+ [passwordstore_lookup]
+ lock=readwrite
+ locktimeout=45s
+
+tasks.yml: |
+ ---
+
+ # Debug is used for examples, BAD IDEA to show passwords on screen
+ - name: Basic lookup. Fails if example/test does not exist
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test')}}"
+
+ - name: Basic lookup. Warns if example/test does not exist and returns empty string
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test', missing='warn')}}"
+
+ - name: Create pass with random 16 character password. If password exists just give the password
+ ansible.builtin.debug:
+ var: mypassword
+ vars:
+ mypassword: "{{ lookup('community.general.passwordstore', 'example/test', create=true)}}"
+
+ - name: Create pass with random 16 character password. If password exists just give the password
+ ansible.builtin.debug:
+ var: mypassword
+ vars:
+ mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}"
+
+ - name: Prints 'abc' if example/test does not exist, just give the password otherwise
+ ansible.builtin.debug:
+ var: mypassword
+ vars:
+ mypassword: >-
+ {{ lookup('community.general.passwordstore', 'example/test', missing='empty')
+ | default('abc', true) }}
+
+ - name: Different size password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, length=42)}}"
+
+ - name: >-
+ Create password and overwrite the password if it exists.
+ As a bonus, this module includes the old password inside the pass file
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, overwrite=true)}}"
+
+ - name: Create an alphanumeric password
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, nosymbols=true) }}"
+
+ - name: Return the value for user in the KV pair user, username
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.passwordstore', 'example/test', subkey='user')}}"
+
+ - name: Return the entire password file content
+ ansible.builtin.set_fact:
+ passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}"
+"""
+
+RETURN = """
+_raw:
+ description:
+ - a password
+ type: list
+ elements: str
+"""
+
+from contextlib import contextmanager
+import os
+import re
+import subprocess
+import time
+import yaml
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.utils.display import Display
+from ansible.utils.encrypt import random_password
+from ansible.plugins.lookup import LookupBase
+from ansible import constants as C
+
+from ansible_collections.community.general.plugins.module_utils._filelock import FileLock
+
+display = Display()
+
+
+# backhacked check_output with input for python 2.7
+# http://stackoverflow.com/questions/10103551/passing-data-to-subprocess-check-output
+# note: contains special logic for calling 'pass', so not a drop-in replacement for check_output
+def check_output2(*popenargs, **kwargs):
+ if 'stdout' in kwargs:
+ raise ValueError('stdout argument not allowed, it will be overridden.')
+ if 'stderr' in kwargs:
+ raise ValueError('stderr argument not allowed, it will be overridden.')
+ if 'input' in kwargs:
+ if 'stdin' in kwargs:
+ raise ValueError('stdin and input arguments may not both be used.')
+ b_inputdata = to_bytes(kwargs['input'], errors='surrogate_or_strict')
+ del kwargs['input']
+ kwargs['stdin'] = subprocess.PIPE
+ else:
+ b_inputdata = None
+ process = subprocess.Popen(*popenargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
+ try:
+ b_out, b_err = process.communicate(b_inputdata)
+ except Exception:
+ process.kill()
+ process.wait()
+ raise
+ retcode = process.poll()
+ if retcode == 0 and (b'encryption failed: Unusable public key' in b_out or
+ b'encryption failed: Unusable public key' in b_err):
+ retcode = 78 # os.EX_CONFIG
+ if retcode != 0:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise subprocess.CalledProcessError(
+ retcode,
+ cmd,
+ to_native(b_out + b_err, errors='surrogate_or_strict')
+ )
+ return b_out
+
+
+class LookupModule(LookupBase):
+ def __init__(self, loader=None, templar=None, **kwargs):
+
+ super(LookupModule, self).__init__(loader, templar, **kwargs)
+ self.realpass = None
+
+ def is_real_pass(self):
+ if self.realpass is None:
+ try:
+ passoutput = to_text(
+ check_output2([self.pass_cmd, "--version"], env=self.env),
+ errors='surrogate_or_strict'
+ )
+ self.realpass = 'pass: the standard unix password manager' in passoutput
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+
+ return self.realpass
+
+ def parse_params(self, term):
+ # I went with the "traditional" param followed with space separated KV pairs.
+ # Waiting for final implementation of lookup parameter parsing.
+ # See: https://github.com/ansible/ansible/issues/12255
+ params = term.split()
+ if len(params) > 0:
+ # the first param is the pass-name
+ self.passname = params[0]
+ # next parse the optional parameters in keyvalue pairs
+ try:
+ for param in params[1:]:
+ name, value = param.split('=', 1)
+ if name not in self.paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ self.paramvals[name] = value
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+ # check and convert values
+ try:
+ for key in ['create', 'returnall', 'overwrite', 'backup', 'nosymbols']:
+ if not isinstance(self.paramvals[key], bool):
+ self.paramvals[key] = boolean(self.paramvals[key])
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
+ if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']:
+ raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing']))
+ if not isinstance(self.paramvals['length'], int):
+ if self.paramvals['length'].isdigit():
+ self.paramvals['length'] = int(self.paramvals['length'])
+ else:
+ raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length']))
+
+ if self.paramvals['create']:
+ self.paramvals['missing'] = 'create'
+
+ # Collect pass environment variables from the plugin's parameters.
+ self.env = os.environ.copy()
+ self.env['LANGUAGE'] = 'C' # make sure to get errors in English as required by check_output2
+
+ if self.backend == 'gopass':
+ self.env['GOPASS_NO_REMINDER'] = "YES"
+ elif os.path.isdir(self.paramvals['directory']):
+ # Set PASSWORD_STORE_DIR
+ self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory']
+ elif self.is_real_pass():
+ raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory']))
+
+ # Set PASSWORD_STORE_UMASK if umask is set
+ if self.paramvals.get('umask') is not None:
+ if len(self.paramvals['umask']) != 3:
+ raise AnsibleError('Passwordstore umask must have a length of 3.')
+ elif int(self.paramvals['umask'][0]) > 3:
+ raise AnsibleError('Passwordstore umask not allowed (password not user readable).')
+ else:
+ self.env['PASSWORD_STORE_UMASK'] = self.paramvals['umask']
+
+ def check_pass(self):
+ try:
+ self.passoutput = to_text(
+ check_output2([self.pass_cmd, 'show'] +
+ [self.passname], env=self.env),
+ errors='surrogate_or_strict'
+ ).splitlines()
+ self.password = self.passoutput[0]
+ self.passdict = {}
+ try:
+ values = yaml.safe_load('\n'.join(self.passoutput[1:]))
+ for key, item in values.items():
+ self.passdict[key] = item
+ except (yaml.YAMLError, AttributeError):
+ for line in self.passoutput[1:]:
+ if ':' in line:
+ name, value = line.split(':', 1)
+ self.passdict[name.strip()] = value.strip()
+ if (self.backend == 'gopass' or
+ os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg"))
+ or not self.is_real_pass()):
+ # When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise)
+ return True
+ except (subprocess.CalledProcessError) as e:
+ # 'not in password store' is the expected error if a password wasn't found
+ if 'not in the password store' not in e.output:
+ raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+
+ if self.paramvals['missing'] == 'error':
+ raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname))
+ elif self.paramvals['missing'] == 'warn':
+ display.warning('passwordstore: passname {0} not found'.format(self.passname))
+
+ return False
+
+ def get_newpass(self):
+ if self.paramvals['nosymbols']:
+ chars = C.DEFAULT_PASSWORD_CHARS[:62]
+ else:
+ chars = C.DEFAULT_PASSWORD_CHARS
+
+ if self.paramvals['userpass']:
+ newpass = self.paramvals['userpass']
+ else:
+ newpass = random_password(length=self.paramvals['length'], chars=chars)
+ return newpass
+
+ def update_password(self):
+ # generate new password, insert old lines from current result and return new password
+ newpass = self.get_newpass()
+ datetime = time.strftime("%d/%m/%Y %H:%M:%S")
+ msg = newpass + '\n'
+ if self.passoutput[1:]:
+ msg += '\n'.join(self.passoutput[1:]) + '\n'
+ if self.paramvals['backup']:
+ msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime)
+ try:
+ check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+ return newpass
+
+ def generate_password(self):
+ # generate new file and insert lookup_pass: Generated by Ansible on {date}
+ # use pwgen to generate the password and insert values with pass -m
+ newpass = self.get_newpass()
+ datetime = time.strftime("%d/%m/%Y %H:%M:%S")
+ msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime)
+ try:
+ check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env)
+ except (subprocess.CalledProcessError) as e:
+ raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output))
+ return newpass
+
+ def get_passresult(self):
+ if self.paramvals['returnall']:
+ return os.linesep.join(self.passoutput)
+ if self.paramvals['subkey'] == 'password':
+ return self.password
+ else:
+ if self.paramvals['subkey'] in self.passdict:
+ return self.passdict[self.paramvals['subkey']]
+ else:
+ return None
+
+ @contextmanager
+ def opt_lock(self, type):
+ if self.get_option('lock') == type:
+ tmpdir = os.environ.get('TMPDIR', '/tmp')
+ lockfile = os.path.join(tmpdir, '.passwordstore.lock')
+ with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout):
+ self.locked = type
+ yield
+ self.locked = None
+ else:
+ yield
+
+ def setup(self, variables):
+ self.backend = self.get_option('backend')
+ self.pass_cmd = self.backend # pass and gopass are commands as well
+ self.locked = None
+ timeout = self.get_option('locktimeout')
+ if not re.match('^[0-9]+[smh]$', timeout):
+ raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout))
+ unit_to_seconds = {"s": 1, "m": 60, "h": 3600}
+ self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]]
+
+ directory = self.get_option('directory')
+ if directory is None:
+ if self.backend == 'gopass':
+ try:
+ with open(os.path.expanduser('~/.config/gopass/config.yml')) as f:
+ directory = yaml.safe_load(f)['path']
+ except (FileNotFoundError, KeyError, yaml.YAMLError):
+ directory = os.path.expanduser('~/.local/share/gopass/stores/root')
+ else:
+ directory = os.path.expanduser('~/.password-store')
+
+ self.paramvals = {
+ 'subkey': self.get_option('subkey'),
+ 'directory': directory,
+ 'create': self.get_option('create'),
+ 'returnall': self.get_option('returnall'),
+ 'overwrite': self.get_option('overwrite'),
+ 'nosymbols': self.get_option('nosymbols'),
+ 'userpass': self.get_option('userpass') or '',
+ 'length': self.get_option('length'),
+ 'backup': self.get_option('backup'),
+ 'missing': self.get_option('missing'),
+ 'umask': self.get_option('umask'),
+ }
+
+ def run(self, terms, variables, **kwargs):
+ self.set_options(var_options=variables, direct=kwargs)
+ self.setup(variables)
+ result = []
+
+ for term in terms:
+ self.parse_params(term) # parse the input into paramvals
+ with self.opt_lock('readwrite'):
+ if self.check_pass(): # password exists
+ if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password':
+ with self.opt_lock('write'):
+ result.append(self.update_password())
+ else:
+ result.append(self.get_passresult())
+ else: # password does not exist
+ if self.paramvals['missing'] == 'create':
+ with self.opt_lock('write'):
+ if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock
+ result.append(self.get_passresult())
+ else:
+ result.append(self.generate_password())
+ else:
+ result.append(None)
+
+ return result
diff --git a/ansible_collections/community/general/plugins/lookup/random_pet.py b/ansible_collections/community/general/plugins/lookup/random_pet.py
new file mode 100644
index 000000000..71a62cbca
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/random_pet.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+ name: random_pet
+ author:
+ - Abhijeet Kasurde (@Akasurde)
+ short_description: Generates random pet names
+ version_added: '3.1.0'
+ requirements:
+ - petname U(https://github.com/dustinkirkland/python-petname)
+ description:
+ - Generates random pet names that can be used as unique identifiers for the resources.
+ options:
+ words:
+ description:
+ - The number of words in the pet name.
+ default: 2
+ type: int
+ length:
+ description:
+ - The maximal length of every component of the pet name.
+ - Values below 3 will be set to 3 by petname.
+ default: 6
+ type: int
+ prefix:
+ description: A string to prefix with the name.
+ type: str
+ separator:
+ description: The character to separate words in the pet name.
+ default: "-"
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Generate pet name
+ ansible.builtin.debug:
+ var: lookup('community.general.random_pet')
+ # Example result: 'loving-raptor'
+
+- name: Generate pet name with 3 words
+ ansible.builtin.debug:
+ var: lookup('community.general.random_pet', words=3)
+ # Example result: 'fully-fresh-macaw'
+
+- name: Generate pet name with separator
+ ansible.builtin.debug:
+ var: lookup('community.general.random_pet', separator="_")
+ # Example result: 'causal_snipe'
+
+- name: Generate pet name with length
+ ansible.builtin.debug:
+ var: lookup('community.general.random_pet', length=7)
+ # Example result: 'natural-peacock'
+'''
+
+RETURN = r'''
+ _raw:
+ description: A one-element list containing a random pet name
+ type: list
+ elements: str
+'''
+
+try:
+ import petname
+
+ HAS_PETNAME = True
+except ImportError:
+ HAS_PETNAME = False
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not HAS_PETNAME:
+ raise AnsibleError('Python petname library is required. '
+ 'Please install using "pip install petname"')
+
+ self.set_options(var_options=variables, direct=kwargs)
+ words = self.get_option('words')
+ length = self.get_option('length')
+ prefix = self.get_option('prefix')
+ separator = self.get_option('separator')
+
+ values = petname.Generate(words=words, separator=separator, letters=length)
+ if prefix:
+ values = "%s%s%s" % (prefix, separator, values)
+
+ return [values]
diff --git a/ansible_collections/community/general/plugins/lookup/random_string.py b/ansible_collections/community/general/plugins/lookup/random_string.py
new file mode 100644
index 000000000..199aa1396
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/random_string.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ name: random_string
+ author:
+ - Abhijeet Kasurde (@Akasurde)
+ short_description: Generates random string
+ version_added: '3.2.0'
+ description:
+ - Generates random string based upon the given constraints.
+ options:
+ length:
+ description: The length of the string.
+ default: 8
+ type: int
+ upper:
+ description:
+ - Include uppercase letters in the string.
+ default: true
+ type: bool
+ lower:
+ description:
+ - Include lowercase letters in the string.
+ default: true
+ type: bool
+ numbers:
+ description:
+ - Include numbers in the string.
+ default: true
+ type: bool
+ special:
+ description:
+ - Include special characters in the string.
+ - Special characters are taken from Python standard library C(string).
+ See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation)
+ for which characters will be used.
+ - The choice of special characters can be changed to setting I(override_special).
+ default: true
+ type: bool
+ min_numeric:
+ description:
+ - Minimum number of numeric characters in the string.
+ - If set, overrides I(numbers=false).
+ default: 0
+ type: int
+ min_upper:
+ description:
+ - Minimum number of uppercase alphabets in the string.
+ - If set, overrides I(upper=false).
+ default: 0
+ type: int
+ min_lower:
+ description:
+ - Minimum number of lowercase alphabets in the string.
+ - If set, overrides I(lower=false).
+ default: 0
+ type: int
+ min_special:
+ description:
+ - Minimum number of special character in the string.
+ default: 0
+ type: int
+ override_special:
+ description:
+ - Overide a list of special characters to use in the string.
+ - If set I(min_special) should be set to a non-default value.
+ type: str
+ override_all:
+ description:
+ - Override all values of I(numbers), I(upper), I(lower), and I(special) with
+ the given list of characters.
+ type: str
+ base64:
+ description:
+ - Returns base64 encoded string.
+ type: bool
+ default: false
+"""
+
+EXAMPLES = r"""
+- name: Generate random string
+ ansible.builtin.debug:
+ var: lookup('community.general.random_string')
+ # Example result: ['DeadBeeF']
+
+- name: Generate random string with length 12
+ ansible.builtin.debug:
+ var: lookup('community.general.random_string', length=12)
+ # Example result: ['Uan0hUiX5kVG']
+
+- name: Generate base64 encoded random string
+ ansible.builtin.debug:
+ var: lookup('community.general.random_string', base64=True)
+ # Example result: ['NHZ6eWN5Qk0=']
+
+- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast)
+ ansible.builtin.debug:
+ var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1)
+ # Example result: ['&Qw2|E[-']
+
+- name: Generate a random string with all lower case characters
+ debug:
+ var: query('community.general.random_string', upper=false, numbers=false, special=false)
+ # Example result: ['exolxzyz']
+
+- name: Generate random hexadecimal string
+ debug:
+ var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false)
+ vars:
+ hex_chars: '0123456789ABCDEF'
+ # Example result: ['D2A40737']
+
+- name: Generate random hexadecimal string with override_all
+ debug:
+ var: query('community.general.random_string', override_all=hex_chars)
+ vars:
+ hex_chars: '0123456789ABCDEF'
+ # Example result: ['D2A40737']
+"""
+
+RETURN = r"""
+ _raw:
+ description: A one-element list containing a random string
+ type: list
+ elements: str
+"""
+
+import base64
+import random
+import string
+
+from ansible.errors import AnsibleLookupError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+ @staticmethod
+ def get_random(random_generator, chars, length):
+ if not chars:
+ raise AnsibleLookupError(
+ "Available characters cannot be None, please change constraints"
+ )
+ return "".join(random_generator.choice(chars) for dummy in range(length))
+
+ @staticmethod
+ def b64encode(string_value, encoding="utf-8"):
+ return to_text(
+ base64.b64encode(
+ to_bytes(string_value, encoding=encoding, errors="surrogate_or_strict")
+ )
+ )
+
+ def run(self, terms, variables=None, **kwargs):
+ number_chars = string.digits
+ lower_chars = string.ascii_lowercase
+ upper_chars = string.ascii_uppercase
+ special_chars = string.punctuation
+ random_generator = random.SystemRandom()
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ length = self.get_option("length")
+ base64_flag = self.get_option("base64")
+ override_all = self.get_option("override_all")
+ values = ""
+ available_chars_set = ""
+
+ if override_all:
+ # Override all the values
+ available_chars_set = override_all
+ else:
+ upper = self.get_option("upper")
+ lower = self.get_option("lower")
+ numbers = self.get_option("numbers")
+ special = self.get_option("special")
+ override_special = self.get_option("override_special")
+
+ if override_special:
+ special_chars = override_special
+
+ if upper:
+ available_chars_set += upper_chars
+ if lower:
+ available_chars_set += lower_chars
+ if numbers:
+ available_chars_set += number_chars
+ if special:
+ available_chars_set += special_chars
+
+ mapping = {
+ "min_numeric": number_chars,
+ "min_lower": lower_chars,
+ "min_upper": upper_chars,
+ "min_special": special_chars,
+ }
+
+ for m in mapping:
+ if self.get_option(m):
+ values += self.get_random(random_generator, mapping[m], self.get_option(m))
+
+ remaining_pass_len = length - len(values)
+ values += self.get_random(random_generator, available_chars_set, remaining_pass_len)
+
+ # Get pseudo randomization
+ shuffled_values = list(values)
+ # Randomize the order
+ random.shuffle(shuffled_values)
+
+ if base64_flag:
+ return [self.b64encode("".join(shuffled_values))]
+
+ return ["".join(shuffled_values)]
diff --git a/ansible_collections/community/general/plugins/lookup/random_words.py b/ansible_collections/community/general/plugins/lookup/random_words.py
new file mode 100644
index 000000000..a4aa1b317
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/random_words.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""The community.general.random_words Ansible lookup plugin."""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+ name: random_words
+ author:
+ - Thomas Sjögren (@konstruktoid)
+ short_description: Return a number of random words
+ version_added: "4.0.0"
+ requirements:
+ - xkcdpass U(https://github.com/redacted/XKCD-password-generator)
+ description:
+ - Returns a number of random words. The output can for example be used for
+ passwords.
+ - See U(https://xkcd.com/936/) for background.
+ options:
+ numwords:
+ description:
+ - The number of words.
+ default: 6
+ type: int
+ min_length:
+ description:
+ - Minimum length of words to make password.
+ default: 5
+ type: int
+ max_length:
+ description:
+ - Maximum length of words to make password.
+ default: 9
+ type: int
+ delimiter:
+ description:
+ - The delimiter character between words.
+ default: " "
+ type: str
+ case:
+ description:
+ - The method for setting the case of each word in the passphrase.
+ choices: ["alternating", "upper", "lower", "random", "capitalize"]
+ default: "lower"
+ type: str
+"""
+
+EXAMPLES = r"""
+- name: Generate password with default settings
+ ansible.builtin.debug:
+ var: lookup('community.general.random_words')
+ # Example result: 'traitor gigabyte cesarean unless aspect clear'
+
+- name: Generate password with six, five character, words
+ ansible.builtin.debug:
+ var: lookup('community.general.random_words', min_length=5, max_length=5)
+ # Example result: 'brink banjo getup staff trump comfy'
+
+- name: Generate password with three capitalized words and the '-' delimiter
+ ansible.builtin.debug:
+ var: lookup('community.general.random_words', numwords=3, delimiter='-', case='capitalize')
+ # Example result: 'Overlabor-Faucet-Coastline'
+
+- name: Generate password with three words without any delimiter
+ ansible.builtin.debug:
+ var: lookup('community.general.random_words', numwords=3, delimiter='')
+ # Example result: 'deskworkmonopolystriking'
+ # https://www.ncsc.gov.uk/blog-post/the-logic-behind-three-random-words
+"""
+
+RETURN = r"""
+ _raw:
+ description: A single-element list containing random words.
+ type: list
+ elements: str
+"""
+
+from ansible.errors import AnsibleLookupError
+from ansible.plugins.lookup import LookupBase
+
+try:
+ from xkcdpass import xkcd_password as xp
+
+ HAS_XKCDPASS = True
+except ImportError:
+ HAS_XKCDPASS = False
+
+
+class LookupModule(LookupBase):
+ """The random_words Ansible lookup class."""
+
+ def run(self, terms, variables=None, **kwargs):
+
+ if not HAS_XKCDPASS:
+ raise AnsibleLookupError(
+ "Python xkcdpass library is required. "
+ 'Please install using "pip install xkcdpass"'
+ )
+
+ self.set_options(var_options=variables, direct=kwargs)
+ method = self.get_option("case")
+ delimiter = self.get_option("delimiter")
+ max_length = self.get_option("max_length")
+ min_length = self.get_option("min_length")
+ numwords = self.get_option("numwords")
+
+ words = xp.locate_wordfile()
+ wordlist = xp.generate_wordlist(
+ max_length=max_length, min_length=min_length, wordfile=words
+ )
+
+ values = xp.generate_xkcdpassword(
+ wordlist, case=method, delimiter=delimiter, numwords=numwords
+ )
+
+ return [values]
diff --git a/ansible_collections/community/general/plugins/lookup/redis.py b/ansible_collections/community/general/plugins/lookup/redis.py
new file mode 100644
index 000000000..43b046a79
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/redis.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: redis
+ author:
+ - Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
+ - Ansible Core Team
+ short_description: fetch data from Redis
+ description:
+ - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it
+ requirements:
+ - redis (python library https://github.com/andymccurdy/redis-py/)
+ options:
+ _terms:
+ description: list of keys to query
+ host:
+ description: location of Redis host
+ default: '127.0.0.1'
+ env:
+ - name: ANSIBLE_REDIS_HOST
+ ini:
+ - section: lookup_redis
+ key: host
+ port:
+ description: port on which Redis is listening on
+ default: 6379
+ type: int
+ env:
+ - name: ANSIBLE_REDIS_PORT
+ ini:
+ - section: lookup_redis
+ key: port
+ socket:
+ description: path to socket on which to query Redis, this option overrides host and port options when set.
+ type: path
+ env:
+ - name: ANSIBLE_REDIS_SOCKET
+ ini:
+ - section: lookup_redis
+ key: socket
+'''
+
+EXAMPLES = """
+- name: query redis for somekey (default or configured settings used)
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'somekey') }}"
+
+- name: query redis for list of keys and non-default host and port
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', item, host='myredis.internal.com', port=2121) }}"
+ loop: '{{list_of_redis_keys}}'
+
+- name: use list directly
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'key1', 'key2', 'key3') }}"
+
+- name: use list directly with a socket
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}"
+
+"""
+
+RETURN = """
+_raw:
+ description: value(s) stored in Redis
+ type: list
+ elements: str
+"""
+
+HAVE_REDIS = False
+try:
+ import redis
+ HAVE_REDIS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ if not HAVE_REDIS:
+ raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
+
+ # get options
+ self.set_options(direct=kwargs)
+
+ # setup connection
+ host = self.get_option('host')
+ port = self.get_option('port')
+ socket = self.get_option('socket')
+ if socket is None:
+ conn = redis.Redis(host=host, port=port)
+ else:
+ conn = redis.Redis(unix_socket_path=socket)
+
+ ret = []
+ for term in terms:
+ try:
+ res = conn.get(term)
+ if res is None:
+ res = ""
+ ret.append(to_text(res))
+ except Exception as e:
+ # connection failed or key not found
+ raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e))
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/revbitspss.py b/ansible_collections/community/general/plugins/lookup/revbitspss.py
new file mode 100644
index 000000000..552970804
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/revbitspss.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, RevBits <info@revbits.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+name: revbitspss
+author: RevBits (@RevBits) <info@revbits.com>
+short_description: Get secrets from RevBits PAM server
+version_added: 4.1.0
+description:
+ - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM
+ Server using API key authentication with the REST API.
+requirements:
+ - revbits_ansible - U(https://pypi.org/project/revbits_ansible/)
+options:
+ _terms:
+ description:
+ - This will be an array of keys for secrets which you want to fetch from RevBits PAM.
+ required: true
+ type: list
+ elements: string
+ base_url:
+ description:
+ - This will be the base URL of the server, for example C(https://server-url-here).
+ required: true
+ type: string
+ api_key:
+ description:
+ - This will be the API key for authentication. You can get it from the RevBits PAM secret manager module.
+ required: true
+ type: string
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - The JSON responses which you can access with defined keys.
+ - If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets.
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: >-
+ {{
+ lookup(
+ 'community.general.revbitspss',
+ 'UUIDPAM', 'DB_PASS',
+ base_url='https://server-url-here',
+ api_key='API_KEY_GOES_HERE'
+ )
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }}
+"""
+
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+from ansible.errors import AnsibleError
+from ansible.module_utils.six import raise_from
+
+try:
+ from pam.revbits_ansible.server import SecretServer
+except ImportError as imp_exc:
+ ANOTHER_LIBRARY_IMPORT_ERROR = imp_exc
+else:
+ ANOTHER_LIBRARY_IMPORT_ERROR = None
+
+
+display = Display()
+
+
+class LookupModule(LookupBase):
+
+ @staticmethod
+ def Client(server_parameters):
+ return SecretServer(**server_parameters)
+
+ def run(self, terms, variables, **kwargs):
+ if ANOTHER_LIBRARY_IMPORT_ERROR:
+ raise_from(
+ AnsibleError('revbits_ansible must be installed to use this plugin'),
+ ANOTHER_LIBRARY_IMPORT_ERROR
+ )
+ self.set_options(var_options=variables, direct=kwargs)
+ secret_server = LookupModule.Client(
+ {
+ "base_url": self.get_option('base_url'),
+ "api_key": self.get_option('api_key'),
+ }
+ )
+ result = []
+ for term in terms:
+ try:
+ display.vvv(u"Secret Server lookup of Secret with ID %s" % term)
+ result.append({term: secret_server.get_pam_secret(term)})
+ except Exception as error:
+ raise AnsibleError("Secret Server lookup failure: %s" % error.message)
+ return result
diff --git a/ansible_collections/community/general/plugins/lookup/shelvefile.py b/ansible_collections/community/general/plugins/lookup/shelvefile.py
new file mode 100644
index 000000000..35f1097c8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/shelvefile.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# Copyright (c) 2012-17 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: shelvefile
+ author: Alejandro Guirao (!UNKNOWN) <lekumberri@gmail.com>
+ short_description: read keys from Python shelve file
+ description:
+ - Read keys from Python shelve file.
+ options:
+ _terms:
+ description: Sets of key value pairs of parameters.
+ key:
+ description: Key to query.
+ required: true
+ file:
+ description: Path to shelve file.
+ required: true
+'''
+
+EXAMPLES = """
+- name: Retrieve a string value corresponding to a key inside a Python shelve file
+ ansible.builtin.debug:
+ msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}"
+"""
+
+RETURN = """
+_list:
+ description: Value(s) of key(s) in shelve file(s).
+ type: list
+ elements: str
+"""
+import shelve
+
+from ansible.errors import AnsibleError, AnsibleAssertionError
+from ansible.plugins.lookup import LookupBase
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+
+
+class LookupModule(LookupBase):
+
+ def read_shelve(self, shelve_filename, key):
+ """
+ Read the value of "key" from a shelve file
+ """
+ d = shelve.open(to_bytes(shelve_filename))
+ res = d.get(key, None)
+ d.close()
+ return res
+
+ def run(self, terms, variables=None, **kwargs):
+ if not isinstance(terms, list):
+ terms = [terms]
+
+ ret = []
+
+ for term in terms:
+ paramvals = {"file": None, "key": None}
+ params = term.split()
+
+ try:
+ for param in params:
+ name, value = param.split('=')
+ if name not in paramvals:
+ raise AnsibleAssertionError('%s not in paramvals' % name)
+ paramvals[name] = value
+
+ except (ValueError, AssertionError) as e:
+ # In case "file" or "key" are not present
+ raise AnsibleError(e)
+
+ key = paramvals['key']
+
+ # Search also in the role/files directory and in the playbook directory
+ shelvefile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
+
+ if shelvefile:
+ res = self.read_shelve(shelvefile, key)
+ if res is None:
+ raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile))
+ # Convert the value read to string
+ ret.append(to_text(res))
+ break
+ else:
+ raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file'])
+
+ return ret
diff --git a/ansible_collections/community/general/plugins/lookup/tss.py b/ansible_collections/community/general/plugins/lookup/tss.py
new file mode 100644
index 000000000..935b5f4b4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/lookup/tss.py
@@ -0,0 +1,299 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+name: tss
+author: Adam Migus (@amigus) <adam@migus.org>
+short_description: Get secrets from Thycotic Secret Server
+version_added: 1.0.0
+description:
+ - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret
+ Server using token authentication with I(username) and I(password) on
+ the REST API at I(base_url).
+ - When using self-signed certificates the environment variable
+ C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates
+ (in C(.pem) format).
+ - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt').
+requirements:
+ - python-tss-sdk - https://pypi.org/project/python-tss-sdk/
+options:
+ _terms:
+ description: The integer ID of the secret.
+ required: true
+ type: int
+ base_url:
+ description: The base URL of the server, e.g. C(https://localhost/SecretServer).
+ env:
+ - name: TSS_BASE_URL
+ ini:
+ - section: tss_lookup
+ key: base_url
+ required: true
+ username:
+ description: The username with which to request the OAuth2 Access Grant.
+ env:
+ - name: TSS_USERNAME
+ ini:
+ - section: tss_lookup
+ key: username
+ password:
+ description:
+ - The password associated with the supplied username.
+ - Required when I(token) is not provided.
+ env:
+ - name: TSS_PASSWORD
+ ini:
+ - section: tss_lookup
+ key: password
+ domain:
+ default: ""
+ description:
+ - The domain with which to request the OAuth2 Access Grant.
+ - Optional when I(token) is not provided.
+ - Requires C(python-tss-sdk) version 1.0.0 or greater.
+ env:
+ - name: TSS_DOMAIN
+ ini:
+ - section: tss_lookup
+ key: domain
+ required: false
+ version_added: 3.6.0
+ token:
+ description:
+ - Existing token for Thycotic authorizer.
+ - If provided, I(username) and I(password) are not needed.
+ - Requires C(python-tss-sdk) version 1.0.0 or greater.
+ env:
+ - name: TSS_TOKEN
+ ini:
+ - section: tss_lookup
+ key: token
+ version_added: 3.7.0
+ api_path_uri:
+ default: /api/v1
+ description: The path to append to the base URL to form a valid REST
+ API request.
+ env:
+ - name: TSS_API_PATH_URI
+ required: false
+ token_path_uri:
+ default: /oauth2/token
+ description: The path to append to the base URL to form a valid OAuth2
+ Access Grant request.
+ env:
+ - name: TSS_TOKEN_PATH_URI
+ required: false
+"""
+
+RETURN = r"""
+_list:
+ description:
+ - The JSON responses to C(GET /secrets/{id}).
+ - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get).
+ type: list
+ elements: dict
+"""
+
+EXAMPLES = r"""
+- hosts: localhost
+ vars:
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password'
+ )
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
+
+- hosts: localhost
+ vars:
+ secret: >-
+ {{
+ lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ username='user.name',
+ password='password',
+ domain='domain'
+ )
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: >
+ the password is {{
+ (secret['items']
+ | items2dict(key_name='slug',
+ value_name='itemValue'))['password']
+ }}
+
+- hosts: localhost
+ vars:
+ secret_password: >-
+ {{
+ ((lookup(
+ 'community.general.tss',
+ 102,
+ base_url='https://secretserver.domain.com/SecretServer/',
+ token='thycotic_access_token',
+ ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password']
+ }}
+ tasks:
+ - ansible.builtin.debug:
+ msg: the password is {{ secret_password }}
+"""
+
+import abc
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils import six
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.display import Display
+
+try:
+ from thycotic.secrets.server import SecretServer, SecretServerError
+
+ HAS_TSS_SDK = True
+except ImportError:
+ try:
+ from delinea.secrets.server import SecretServer, SecretServerError
+
+ HAS_TSS_SDK = True
+ except ImportError:
+ SecretServer = None
+ SecretServerError = None
+ HAS_TSS_SDK = False
+
+try:
+ from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
+
+ HAS_TSS_AUTHORIZER = True
+except ImportError:
+ try:
+ from delinea.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer
+
+ HAS_TSS_AUTHORIZER = True
+ except ImportError:
+ PasswordGrantAuthorizer = None
+ DomainPasswordGrantAuthorizer = None
+ AccessTokenAuthorizer = None
+ HAS_TSS_AUTHORIZER = False
+
+
+display = Display()
+
+
+@six.add_metaclass(abc.ABCMeta)
+class TSSClient(object):
+ def __init__(self):
+ self._client = None
+
+ @staticmethod
+ def from_params(**server_parameters):
+ if HAS_TSS_AUTHORIZER:
+ return TSSClientV1(**server_parameters)
+ else:
+ return TSSClientV0(**server_parameters)
+
+ def get_secret(self, term):
+ display.debug("tss_lookup term: %s" % term)
+
+ secret_id = self._term_to_secret_id(term)
+ display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id)
+
+ return self._client.get_secret_json(secret_id)
+
+ @staticmethod
+ def _term_to_secret_id(term):
+ try:
+ return int(term)
+ except ValueError:
+ raise AnsibleOptionsError("Secret ID must be an integer")
+
+
+class TSSClientV0(TSSClient):
+ def __init__(self, **server_parameters):
+ super(TSSClientV0, self).__init__()
+
+ if server_parameters.get("domain"):
+ raise AnsibleError("The 'domain' option requires 'python-tss-sdk' version 1.0.0 or greater")
+
+ self._client = SecretServer(
+ server_parameters["base_url"],
+ server_parameters["username"],
+ server_parameters["password"],
+ server_parameters["api_path_uri"],
+ server_parameters["token_path_uri"],
+ )
+
+
+class TSSClientV1(TSSClient):
+ def __init__(self, **server_parameters):
+ super(TSSClientV1, self).__init__()
+
+ authorizer = self._get_authorizer(**server_parameters)
+ self._client = SecretServer(
+ server_parameters["base_url"], authorizer, server_parameters["api_path_uri"]
+ )
+
+ @staticmethod
+ def _get_authorizer(**server_parameters):
+ if server_parameters.get("token"):
+ return AccessTokenAuthorizer(
+ server_parameters["token"],
+ )
+
+ if server_parameters.get("domain"):
+ return DomainPasswordGrantAuthorizer(
+ server_parameters["base_url"],
+ server_parameters["username"],
+ server_parameters["domain"],
+ server_parameters["password"],
+ server_parameters["token_path_uri"],
+ )
+
+ return PasswordGrantAuthorizer(
+ server_parameters["base_url"],
+ server_parameters["username"],
+ server_parameters["password"],
+ server_parameters["token_path_uri"],
+ )
+
+
+class LookupModule(LookupBase):
+ def run(self, terms, variables, **kwargs):
+ if not HAS_TSS_SDK:
+ raise AnsibleError("python-tss-sdk must be installed to use this plugin")
+
+ self.set_options(var_options=variables, direct=kwargs)
+
+ tss = TSSClient.from_params(
+ base_url=self.get_option("base_url"),
+ username=self.get_option("username"),
+ password=self.get_option("password"),
+ domain=self.get_option("domain"),
+ token=self.get_option("token"),
+ api_path_uri=self.get_option("api_path_uri"),
+ token_path_uri=self.get_option("token_path_uri"),
+ )
+
+ try:
+ return [tss.get_secret(term) for term in terms]
+ except SecretServerError as error:
+ raise AnsibleError("Secret Server lookup failure: %s" % error.message)
diff --git a/ansible_collections/community/general/plugins/module_utils/_filelock.py b/ansible_collections/community/general/plugins/module_utils/_filelock.py
new file mode 100644
index 000000000..a35d0b91c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/_filelock.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+# NOTE:
+# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import stat
+import time
+import fcntl
+import sys
+
+from contextlib import contextmanager
+
+
+class LockTimeout(Exception):
+ pass
+
+
+class FileLock:
+ '''
+ Currently FileLock is implemented via fcntl.flock on a lock file, however this
+ behaviour may change in the future. Avoid mixing lock types fcntl.flock,
+ fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause
+ unwanted and/or unexpected behaviour
+ '''
+ def __init__(self):
+ self.lockfd = None
+
+ @contextmanager
+ def lock_file(self, path, tmpdir, lock_timeout=None):
+ '''
+ Context for lock acquisition
+ '''
+ try:
+ self.set_lock(path, tmpdir, lock_timeout)
+ yield
+ finally:
+ self.unlock()
+
+ def set_lock(self, path, tmpdir, lock_timeout=None):
+ '''
+ Create a lock file based on path with flock to prevent other processes
+ using given path.
+ Please note that currently file locking only works when it's executed by
+ the same user, I.E single user scenarios
+
+ :kw path: Path (file) to lock
+ :kw tmpdir: Path where to place the temporary .lock file
+ :kw lock_timeout:
+ Wait n seconds for lock acquisition, fail if timeout is reached.
+ 0 = Do not wait, fail if lock cannot be acquired immediately,
+ Default is None, wait indefinitely until lock is released.
+ :returns: True
+ '''
+ lock_path = os.path.join(tmpdir, 'ansible-{0}.lock'.format(os.path.basename(path)))
+ l_wait = 0.1
+ r_exception = IOError
+ if sys.version_info[0] == 3:
+ r_exception = BlockingIOError
+
+ self.lockfd = open(lock_path, 'w')
+
+ if lock_timeout <= 0:
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+ return True
+
+ if lock_timeout:
+ e_secs = 0
+ while e_secs < lock_timeout:
+ try:
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+ return True
+ except r_exception:
+ time.sleep(l_wait)
+ e_secs += l_wait
+ continue
+
+ self.lockfd.close()
+ raise LockTimeout('{0} sec'.format(lock_timeout))
+
+ fcntl.flock(self.lockfd, fcntl.LOCK_EX)
+ os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD)
+
+ return True
+
+ def unlock(self):
+ '''
+ Make sure lock file is available for everyone and Unlock the file descriptor
+ locked by set_lock
+
+ :returns: True
+ '''
+ if not self.lockfd:
+ return True
+
+ try:
+ fcntl.flock(self.lockfd, fcntl.LOCK_UN)
+ self.lockfd.close()
+ except ValueError: # file wasn't opened, let context manager fail gracefully
+ pass
+
+ return True
diff --git a/ansible_collections/community/general/plugins/module_utils/_mount.py b/ansible_collections/community/general/plugins/module_utils/_mount.py
new file mode 100644
index 000000000..63de457d7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/_mount.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is based on
+# Lib/posixpath.py of cpython
+#
+# Copyright (c) 2001-2022 Python Software Foundation. All rights reserved.
+# It is licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# (See LICENSES/PSF-2.0.txt in this collection)
+# SPDX-License-Identifier: PSF-2.0
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import os
+
+
+def ismount(path):
+ """Test whether a path is a mount point
+ This is a copy of the upstream version of ismount(). Originally this was copied here as a workaround
+ until Python issue 2466 was fixed. Now it is here so this will work on older versions of Python
+ that may not have the upstream fix.
+ https://github.com/ansible/ansible-modules-core/issues/2186
+ http://bugs.python.org/issue2466
+ """
+ try:
+ s1 = os.lstat(path)
+ except (OSError, ValueError):
+ # It doesn't exist -- so not a mount point. :-)
+ return False
+ else:
+ # A symlink can never be a mount point
+ if os.path.stat.S_ISLNK(s1.st_mode):
+ return False
+
+ if isinstance(path, bytes):
+ parent = os.path.join(path, b'..')
+ else:
+ parent = os.path.join(path, '..')
+ parent = os.path.realpath(parent)
+ try:
+ s2 = os.lstat(parent)
+ except (OSError, ValueError):
+ return False
+
+ dev1 = s1.st_dev
+ dev2 = s2.st_dev
+ if dev1 != dev2:
+ return True # path/.. on a different device as path
+ ino1 = s1.st_ino
+ ino2 = s2.st_ino
+ if ino1 == ino2:
+ return True # path/.. is the same i-node as path
+ return False
diff --git a/ansible_collections/community/general/plugins/module_utils/_stormssh.py b/ansible_collections/community/general/plugins/module_utils/_stormssh.py
new file mode 100644
index 000000000..ec364b83d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/_stormssh.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is based on
+# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py
+# Copyright (C) <2013> <Emre Yilmaz>
+# SPDX-License-Identifier: MIT
+
+from __future__ import (absolute_import, division, print_function)
+import os
+import re
+import traceback
+from operator import itemgetter
+
+__metaclass__ = type
+
+try:
+ from paramiko.config import SSHConfig
+except ImportError:
+ SSHConfig = object
+ HAS_PARAMIKO = False
+ PARAMIKO_IMPORT_ERROR = traceback.format_exc()
+else:
+ HAS_PARAMIKO = True
+ PARAMIKO_IMPORT_ERROR = None
+
+
+class StormConfig(SSHConfig):
+ def parse(self, file_obj):
+ """
+ Read an OpenSSH config from the given file object.
+ @param file_obj: a file-like object to read the config file from
+ @type file_obj: file
+ """
+ order = 1
+ host = {"host": ['*'], "config": {}, }
+ for line in file_obj:
+ line = line.rstrip('\n').lstrip()
+ if line == '':
+ self._config.append({
+ 'type': 'empty_line',
+ 'value': line,
+ 'host': '',
+ 'order': order,
+ })
+ order += 1
+ continue
+
+ if line.startswith('#'):
+ self._config.append({
+ 'type': 'comment',
+ 'value': line,
+ 'host': '',
+ 'order': order,
+ })
+ order += 1
+ continue
+
+ if '=' in line:
+ # Ensure ProxyCommand gets properly split
+ if line.lower().strip().startswith('proxycommand'):
+ proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I)
+ match = proxy_re.match(line)
+ key, value = match.group(1).lower(), match.group(2)
+ else:
+ key, value = line.split('=', 1)
+ key = key.strip().lower()
+ else:
+ # find first whitespace, and split there
+ i = 0
+ while (i < len(line)) and not line[i].isspace():
+ i += 1
+ if i == len(line):
+ raise Exception('Unparsable line: %r' % line)
+ key = line[:i].lower()
+ value = line[i:].lstrip()
+ if key == 'host':
+ self._config.append(host)
+ value = value.split()
+ host = {
+ key: value,
+ 'config': {},
+ 'type': 'entry',
+ 'order': order
+ }
+ order += 1
+ elif key in ['identityfile', 'localforward', 'remoteforward']:
+ if key in host['config']:
+ host['config'][key].append(value)
+ else:
+ host['config'][key] = [value]
+ elif key not in host['config']:
+ host['config'].update({key: value})
+ self._config.append(host)
+
+
+class ConfigParser(object):
+ """
+ Config parser for ~/.ssh/config files.
+ """
+
+ def __init__(self, ssh_config_file=None):
+ if not ssh_config_file:
+ ssh_config_file = self.get_default_ssh_config_file()
+
+ self.defaults = {}
+
+ self.ssh_config_file = ssh_config_file
+
+ if not os.path.exists(self.ssh_config_file):
+ if not os.path.exists(os.path.dirname(self.ssh_config_file)):
+ os.makedirs(os.path.dirname(self.ssh_config_file))
+ open(self.ssh_config_file, 'w+').close()
+ os.chmod(self.ssh_config_file, 0o600)
+
+ self.config_data = []
+
+ def get_default_ssh_config_file(self):
+ return os.path.expanduser("~/.ssh/config")
+
+ def load(self):
+ config = StormConfig()
+
+ with open(self.ssh_config_file) as fd:
+ config.parse(fd)
+
+ for entry in config.__dict__.get("_config"):
+ if entry.get("host") == ["*"]:
+ self.defaults.update(entry.get("config"))
+
+ if entry.get("type") in ["comment", "empty_line"]:
+ self.config_data.append(entry)
+ continue
+
+ host_item = {
+ 'host': entry["host"][0],
+ 'options': entry.get("config"),
+ 'type': 'entry',
+ 'order': entry.get("order", 0),
+ }
+
+ if len(entry["host"]) > 1:
+ host_item.update({
+ 'host': " ".join(entry["host"]),
+ })
+ # minor bug in paramiko.SSHConfig that duplicates
+ # "Host *" entries.
+ if entry.get("config") and len(entry.get("config")) > 0:
+ self.config_data.append(host_item)
+
+ return self.config_data
+
+ def add_host(self, host, options):
+ self.config_data.append({
+ 'host': host,
+ 'options': options,
+ 'order': self.get_last_index(),
+ })
+
+ return self
+
+ def update_host(self, host, options, use_regex=False):
+ for index, host_entry in enumerate(self.config_data):
+ if host_entry.get("host") == host or \
+ (use_regex and re.match(host, host_entry.get("host"))):
+
+ if 'deleted_fields' in options:
+ deleted_fields = options.pop("deleted_fields")
+ for deleted_field in deleted_fields:
+ del self.config_data[index]["options"][deleted_field]
+
+ self.config_data[index]["options"].update(options)
+
+ return self
+
+ def search_host(self, search_string):
+ results = []
+ for host_entry in self.config_data:
+ if host_entry.get("type") != 'entry':
+ continue
+ if host_entry.get("host") == "*":
+ continue
+
+ searchable_information = host_entry.get("host")
+ for key, value in host_entry.get("options").items():
+ if isinstance(value, list):
+ value = " ".join(value)
+ if isinstance(value, int):
+ value = str(value)
+
+ searchable_information += " " + value
+
+ if search_string in searchable_information:
+ results.append(host_entry)
+
+ return results
+
+ def delete_host(self, host):
+ found = 0
+ for index, host_entry in enumerate(self.config_data):
+ if host_entry.get("host") == host:
+ del self.config_data[index]
+ found += 1
+
+ if found == 0:
+ raise ValueError('No host found')
+ return self
+
+ def delete_all_hosts(self):
+ self.config_data = []
+ self.write_to_ssh_config()
+
+ return self
+
+ def dump(self):
+ if len(self.config_data) < 1:
+ return
+
+ file_content = ""
+ self.config_data = sorted(self.config_data, key=itemgetter("order"))
+
+ for host_item in self.config_data:
+ if host_item.get("type") in ['comment', 'empty_line']:
+ file_content += host_item.get("value") + "\n"
+ continue
+ host_item_content = "Host {0}\n".format(host_item.get("host"))
+ for key, value in host_item.get("options").items():
+ if isinstance(value, list):
+ sub_content = ""
+ for value_ in value:
+ sub_content += " {0} {1}\n".format(
+ key, value_
+ )
+ host_item_content += sub_content
+ else:
+ host_item_content += " {0} {1}\n".format(
+ key, value
+ )
+ file_content += host_item_content
+
+ return file_content
+
+ def write_to_ssh_config(self):
+ with open(self.ssh_config_file, 'w+') as f:
+ data = self.dump()
+ if data:
+ f.write(data)
+ return self
+
+ def get_last_index(self):
+ last_index = 0
+ indexes = []
+ for item in self.config_data:
+ if item.get("order"):
+ indexes.append(item.get("order"))
+ if len(indexes) > 0:
+ last_index = max(indexes)
+
+ return last_index
diff --git a/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py b/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py
new file mode 100644
index 000000000..8210793c7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/alicloud_ecs.py
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+import traceback
+from ansible.module_utils.basic import env_fallback
+
+try:
+ import footmark
+ import footmark.ecs
+ import footmark.slb
+ import footmark.vpc
+ import footmark.rds
+ import footmark.ess
+ import footmark.sts
+ import footmark.dns
+ import footmark.ram
+ import footmark.market
+
+ FOOTMARK_IMP_ERR = None
+ HAS_FOOTMARK = True
+except ImportError:
+ FOOTMARK_IMP_ERR = traceback.format_exc()
+ HAS_FOOTMARK = False
+
+
+class AnsibleACSError(Exception):
+ pass
+
+
+def acs_common_argument_spec():
+ return dict(
+ alicloud_access_key=dict(aliases=['access_key_id', 'access_key'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_ACCESS_KEY', 'ALICLOUD_ACCESS_KEY_ID'])),
+ alicloud_secret_key=dict(aliases=['secret_access_key', 'secret_key'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_SECRET_KEY', 'ALICLOUD_SECRET_ACCESS_KEY'])),
+ alicloud_security_token=dict(aliases=['security_token'], no_log=True,
+ fallback=(env_fallback, ['ALICLOUD_SECURITY_TOKEN'])),
+ ecs_role_name=dict(aliases=['role_name'], fallback=(env_fallback, ['ALICLOUD_ECS_ROLE_NAME']))
+ )
+
+
+def ecs_argument_spec():
+ spec = acs_common_argument_spec()
+ spec.update(
+ dict(
+ alicloud_region=dict(required=True, aliases=['region', 'region_id'],
+ fallback=(env_fallback, ['ALICLOUD_REGION', 'ALICLOUD_REGION_ID'])),
+ alicloud_assume_role_arn=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_ARN']),
+ aliases=['assume_role_arn']),
+ alicloud_assume_role_session_name=dict(fallback=(env_fallback, ['ALICLOUD_ASSUME_ROLE_SESSION_NAME']),
+ aliases=['assume_role_session_name']),
+ alicloud_assume_role_session_expiration=dict(type='int',
+ fallback=(env_fallback,
+ ['ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION']),
+ aliases=['assume_role_session_expiration']),
+ alicloud_assume_role=dict(type='dict', aliases=['assume_role']),
+ profile=dict(fallback=(env_fallback, ['ALICLOUD_PROFILE'])),
+ shared_credentials_file=dict(fallback=(env_fallback, ['ALICLOUD_SHARED_CREDENTIALS_FILE']))
+ )
+ )
+ return spec
+
+
+def get_acs_connection_info(params):
+
+ ecs_params = dict(acs_access_key_id=params.get('alicloud_access_key'),
+ acs_secret_access_key=params.get('alicloud_secret_key'),
+ security_token=params.get('alicloud_security_token'),
+ ecs_role_name=params.get('ecs_role_name'),
+ user_agent='Ansible-Provider-Alicloud')
+ return ecs_params
+
+
+def connect_to_acs(acs_module, region, **params):
+ conn = acs_module.connect_to_region(region, **params)
+ if not conn:
+ if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]:
+ raise AnsibleACSError(
+ "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__))
+ else:
+ raise AnsibleACSError(
+ "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__))
+ return conn
+
+
+def get_assume_role(params):
+ """ Return new params """
+ sts_params = get_acs_connection_info(params)
+ assume_role = {}
+ if params.get('assume_role'):
+ assume_role['alicloud_assume_role_arn'] = params['assume_role'].get('role_arn')
+ assume_role['alicloud_assume_role_session_name'] = params['assume_role'].get('session_name')
+ assume_role['alicloud_assume_role_session_expiration'] = params['assume_role'].get('session_expiration')
+ assume_role['alicloud_assume_role_policy'] = params['assume_role'].get('policy')
+
+ assume_role_params = {
+ 'role_arn': params.get('alicloud_assume_role_arn') if params.get('alicloud_assume_role_arn') else assume_role.get('alicloud_assume_role_arn'),
+ 'role_session_name': params.get('alicloud_assume_role_session_name') if params.get('alicloud_assume_role_session_name')
+ else assume_role.get('alicloud_assume_role_session_name'),
+ 'duration_seconds': params.get('alicloud_assume_role_session_expiration') if params.get('alicloud_assume_role_session_expiration')
+ else assume_role.get('alicloud_assume_role_session_expiration', 3600),
+ 'policy': assume_role.get('alicloud_assume_role_policy', {})
+ }
+
+ try:
+ sts = connect_to_acs(footmark.sts, params.get('alicloud_region'), **sts_params).assume_role(**assume_role_params).read()
+ sts_params['acs_access_key_id'], sts_params['acs_secret_access_key'], sts_params['security_token'] \
+ = sts['access_key_id'], sts['access_key_secret'], sts['security_token']
+ except AnsibleACSError as e:
+ params.fail_json(msg=str(e))
+ return sts_params
+
+
+def get_profile(params):
+ if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']:
+ path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json'
+ auth = {}
+ with open(path, 'r') as f:
+ for pro in json.load(f)['profiles']:
+ if params['profile'] == pro['name']:
+ auth = pro
+ if auth:
+ if auth['mode'] == 'AK' and auth.get('access_key_id') and auth.get('access_key_secret'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'StsToken' and auth.get('access_key_id') and auth.get('access_key_secret') and auth.get('sts_token'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['security_token'] = auth.get('sts_token')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'EcsRamRole':
+ params['ecs_role_name'] = auth.get('ram_role_name')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_acs_connection_info(params)
+ elif auth['mode'] == 'RamRoleArn' and auth.get('ram_role_arn'):
+ params['alicloud_access_key'] = auth.get('access_key_id')
+ params['alicloud_secret_key'] = auth.get('access_key_secret')
+ params['security_token'] = auth.get('sts_token')
+ params['ecs_role_name'] = auth.get('ram_role_name')
+ params['alicloud_assume_role_arn'] = auth.get('ram_role_arn')
+ params['alicloud_assume_role_session_name'] = auth.get('ram_session_name')
+ params['alicloud_assume_role_session_expiration'] = auth.get('expired_seconds')
+ params['alicloud_region'] = auth.get('region_id')
+ params = get_assume_role(params)
+ elif params.get('alicloud_assume_role_arn') or params.get('assume_role'):
+ params = get_assume_role(params)
+ else:
+ params = get_acs_connection_info(params)
+ return params
+
+
+def ecs_connect(module):
+ """ Return an ecs connection"""
+ ecs_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ecs = connect_to_acs(footmark.ecs, region, **ecs_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ecs
+
+
+def slb_connect(module):
+ """ Return an slb connection"""
+ slb_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ slb = connect_to_acs(footmark.slb, region, **slb_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return slb
+
+
+def dns_connect(module):
+ """ Return an dns connection"""
+ dns_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ dns = connect_to_acs(footmark.dns, region, **dns_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return dns
+
+
+def vpc_connect(module):
+ """ Return an vpc connection"""
+ vpc_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ vpc = connect_to_acs(footmark.vpc, region, **vpc_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return vpc
+
+
+def rds_connect(module):
+ """ Return an rds connection"""
+ rds_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ rds = connect_to_acs(footmark.rds, region, **rds_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return rds
+
+
+def ess_connect(module):
+ """ Return an ess connection"""
+ ess_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ess = connect_to_acs(footmark.ess, region, **ess_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ess
+
+
+def sts_connect(module):
+ """ Return an sts connection"""
+ sts_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ sts = connect_to_acs(footmark.sts, region, **sts_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return sts
+
+
+def ram_connect(module):
+ """ Return an ram connection"""
+ ram_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ ram = connect_to_acs(footmark.ram, region, **ram_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return ram
+
+
+def market_connect(module):
+ """ Return an market connection"""
+ market_params = get_profile(module.params)
+ # If we have a region specified, connect to its endpoint.
+ region = module.params.get('alicloud_region')
+ if region:
+ try:
+ market = connect_to_acs(footmark.market, region, **market_params)
+ except AnsibleACSError as e:
+ module.fail_json(msg=str(e))
+ # Otherwise, no region so we fallback to the old connection method
+ return market
diff --git a/ansible_collections/community/general/plugins/module_utils/btrfs.py b/ansible_collections/community/general/plugins/module_utils/btrfs.py
new file mode 100644
index 000000000..d9f979584
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/btrfs.py
@@ -0,0 +1,464 @@
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.text.converters import to_bytes
+import re
+import os
+
+
+def normalize_subvolume_path(path):
+ """
+ Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes.
+ In addition, if the path is prefixed with a leading <FS_TREE>, this value is removed.
+ """
+ fstree_stripped = re.sub(r'^<FS_TREE>', '', path)
+ result = re.sub(r'/+$', '', re.sub(r'/+', '/', '/' + fstree_stripped))
+ return result if len(result) > 0 else '/'
+
+
+class BtrfsModuleException(Exception):
+ pass
+
+
+class BtrfsCommands(object):
+
+ """
+ Provides access to a subset of the Btrfs command line
+ """
+
+ def __init__(self, module):
+ self.__module = module
+ self.__btrfs = self.__module.get_bin_path("btrfs", required=True)
+
+ def filesystem_show(self):
+ command = "%s filesystem show -d" % (self.__btrfs)
+ result = self.__module.run_command(command, check_rc=True)
+ stdout = [x.strip() for x in result[1].splitlines()]
+ filesystems = []
+ current = None
+ for line in stdout:
+ if line.startswith('Label'):
+ current = self.__parse_filesystem(line)
+ filesystems.append(current)
+ elif line.startswith('devid'):
+ current['devices'].append(self.__parse_filesystem_device(line))
+ return filesystems
+
+ def __parse_filesystem(self, line):
+ label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line))
+ id = re.sub(r'^.*uuid:\s*', '', line)
+
+ filesystem = {}
+ filesystem['label'] = label.strip("'") if label != 'none' else None
+ filesystem['uuid'] = id
+ filesystem['devices'] = []
+ filesystem['mountpoints'] = []
+ filesystem['subvolumes'] = []
+ filesystem['default_subvolid'] = None
+ return filesystem
+
+ def __parse_filesystem_device(self, line):
+ return re.sub(r'^.*path\s', '', line)
+
+ def subvolumes_list(self, filesystem_path):
+ command = "%s subvolume list -tap %s" % (self.__btrfs, filesystem_path)
+ result = self.__module.run_command(command, check_rc=True)
+ stdout = [x.split('\t') for x in result[1].splitlines()]
+ subvolumes = [{'id': 5, 'parent': None, 'path': '/'}]
+ if len(stdout) > 2:
+ subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]])
+ return subvolumes
+
+ def __parse_subvolume_list_record(self, item):
+ return {
+ 'id': int(item[0]),
+ 'parent': int(item[2]),
+ 'path': normalize_subvolume_path(item[5]),
+ }
+
+ def subvolume_get_default(self, filesystem_path):
+ command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)]
+ result = self.__module.run_command(command, check_rc=True)
+ # ID [n] ...
+ return int(result[1].strip().split()[1])
+
+ def subvolume_set_default(self, filesystem_path, subvolume_id):
+ command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)]
+ result = self.__module.run_command(command, check_rc=True)
+
+ def subvolume_create(self, subvolume_path):
+ command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)]
+ result = self.__module.run_command(command, check_rc=True)
+
+ def subvolume_snapshot(self, snapshot_source, snapshot_destination):
+ command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)]
+ result = self.__module.run_command(command, check_rc=True)
+
+ def subvolume_delete(self, subvolume_path):
+ command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)]
+ result = self.__module.run_command(command, check_rc=True)
+
+
+class BtrfsInfoProvider(object):
+
+ """
+ Utility providing details of the currently available btrfs filesystems
+ """
+
+ def __init__(self, module):
+ self.__module = module
+ self.__btrfs_api = BtrfsCommands(module)
+ self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True)
+
+ def get_filesystems(self):
+ filesystems = self.__btrfs_api.filesystem_show()
+ mountpoints = self.__find_mountpoints()
+ for filesystem in filesystems:
+ device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices'])
+ filesystem['mountpoints'] = device_mountpoints
+
+ if len(device_mountpoints) > 0:
+
+ # any path within the filesystem can be used to query metadata
+ mountpoint = device_mountpoints[0]['mountpoint']
+ filesystem['subvolumes'] = self.get_subvolumes(mountpoint)
+ filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint)
+
+ return filesystems
+
+ def get_mountpoints(self, filesystem_devices):
+ mountpoints = self.__find_mountpoints()
+ return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices)
+
+ def get_subvolumes(self, filesystem_path):
+ return self.__btrfs_api.subvolumes_list(filesystem_path)
+
+ def get_default_subvolume_id(self, filesystem_path):
+ return self.__btrfs_api.subvolume_get_default(filesystem_path)
+
+ def __filter_mountpoints_for_devices(self, mountpoints, devices):
+ return [m for m in mountpoints if (m['device'] in devices)]
+
+ def __find_mountpoints(self):
+ command = "%s -t btrfs -nvP" % self.__findmnt_path
+ result = self.__module.run_command(command)
+ mountpoints = []
+ if result[0] == 0:
+ lines = result[1].splitlines()
+ for line in lines:
+ mountpoint = self.__parse_mountpoint_pairs(line)
+ mountpoints.append(mountpoint)
+ return mountpoints
+
+ def __parse_mountpoint_pairs(self, line):
+ pattern = re.compile(r'^TARGET="(?P<target>.*)"\s+SOURCE="(?P<source>.*)"\s+FSTYPE="(?P<fstype>.*)"\s+OPTIONS="(?P<options>.*)"\s*$')
+ match = pattern.search(line)
+ if match is not None:
+ groups = match.groupdict()
+
+ return {
+ 'mountpoint': groups['target'],
+ 'device': groups['source'],
+ 'subvolid': self.__extract_mount_subvolid(groups['options']),
+ }
+ else:
+ raise BtrfsModuleException("Failed to parse findmnt result for line: '%s'" % line)
+
+ def __extract_mount_subvolid(self, mount_options):
+ for option in mount_options.split(','):
+ if option.startswith('subvolid='):
+ return int(option[len('subvolid='):])
+ raise BtrfsModuleException("Failed to find subvolid for mountpoint in options '%s'" % mount_options)
+
+
+class BtrfsSubvolume(object):
+
+ """
+ Wrapper class providing convenience methods for inspection of a btrfs subvolume
+ """
+
+ def __init__(self, filesystem, subvolume_id):
+ self.__filesystem = filesystem
+ self.__subvolume_id = subvolume_id
+
+ def get_filesystem(self):
+ return self.__filesystem
+
+ def is_mounted(self):
+ mountpoints = self.get_mountpoints()
+ return mountpoints is not None and len(mountpoints) > 0
+
+ def is_filesystem_root(self):
+ return 5 == self.__subvolume_id
+
+ def is_filesystem_default(self):
+ return self.__filesystem.default_subvolid == self.__subvolume_id
+
+ def get_mounted_path(self):
+ mountpoints = self.get_mountpoints()
+ if mountpoints is not None and len(mountpoints) > 0:
+ return mountpoints[0]
+ elif self.parent is not None:
+ parent = self.__filesystem.get_subvolume_by_id(self.parent)
+ parent_path = parent.get_mounted_path()
+ if parent_path is not None:
+ return parent_path + os.path.sep + self.name
+ else:
+ return None
+
+ def get_mountpoints(self):
+ return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id)
+
+ def get_child_relative_path(self, absolute_child_path):
+ """
+ Get the relative path from this subvolume to the named child subvolume.
+ The provided parameter is expected to be normalized as by normalize_subvolume_path.
+ """
+ path = self.path
+ if absolute_child_path.startswith(path):
+ relative = absolute_child_path[len(path):]
+ return re.sub(r'^/*', '', relative)
+ else:
+ raise BtrfsModuleException("Path '%s' doesn't start with '%s'" % (absolute_child_path, path))
+
+ def get_parent_subvolume(self):
+ parent_id = self.parent
+ return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None
+
+ def get_child_subvolumes(self):
+ return self.__filesystem.get_subvolume_children(self.__subvolume_id)
+
+ @property
+ def __info(self):
+ return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id)
+
+ @property
+ def id(self):
+ return self.__subvolume_id
+
+ @property
+ def name(self):
+ return self.path.split('/').pop()
+
+ @property
+ def path(self):
+ return self.__info['path']
+
+ @property
+ def parent(self):
+ return self.__info['parent']
+
+
+class BtrfsFilesystem(object):
+
+ """
+ Wrapper class providing convenience methods for inspection of a btrfs filesystem
+ """
+
+ def __init__(self, info, provider, module):
+ self.__provider = provider
+
+ # constant for module execution
+ self.__uuid = info['uuid']
+ self.__label = info['label']
+ self.__devices = info['devices']
+
+ # refreshable
+ self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None
+ self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else [])
+ self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else [])
+
+ @property
+ def uuid(self):
+ return self.__uuid
+
+ @property
+ def label(self):
+ return self.__label
+
+ @property
+ def default_subvolid(self):
+ return self.__default_subvolid
+
+ @property
+ def devices(self):
+ return list(self.__devices)
+
+ def refresh(self):
+ self.refresh_mountpoints()
+ self.refresh_subvolumes()
+ self.refresh_default_subvolume()
+
+ def refresh_mountpoints(self):
+ mountpoints = self.__provider.get_mountpoints(list(self.__devices))
+ self.__update_mountpoints(mountpoints)
+
+ def __update_mountpoints(self, mountpoints):
+ self.__mountpoints = dict()
+ for i in mountpoints:
+ subvolid = i['subvolid']
+ mountpoint = i['mountpoint']
+ if subvolid not in self.__mountpoints:
+ self.__mountpoints[subvolid] = []
+ self.__mountpoints[subvolid].append(mountpoint)
+
+ def refresh_subvolumes(self):
+ filesystem_path = self.get_any_mountpoint()
+ if filesystem_path is not None:
+ subvolumes = self.__provider.get_subvolumes(filesystem_path)
+ self.__update_subvolumes(subvolumes)
+
+ def __update_subvolumes(self, subvolumes):
+ # TODO strategy for retaining information on deleted subvolumes?
+ self.__subvolumes = dict()
+ for subvolume in subvolumes:
+ self.__subvolumes[subvolume['id']] = subvolume
+
+ def refresh_default_subvolume(self):
+ filesystem_path = self.get_any_mountpoint()
+ if filesystem_path is not None:
+ self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path)
+
+ def contains_device(self, device):
+ return device in self.__devices
+
+ def contains_subvolume(self, subvolume):
+ return self.get_subvolume_by_name(subvolume) is not None
+
+ def get_subvolume_by_id(self, subvolume_id):
+ return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None
+
+ def get_subvolume_info_for_id(self, subvolume_id):
+ return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None
+
+ def get_subvolume_by_name(self, subvolume):
+ for subvolume_info in self.__subvolumes.values():
+ if subvolume_info['path'] == subvolume:
+ return BtrfsSubvolume(self, subvolume_info['id'])
+ return None
+
+ def get_any_mountpoint(self):
+ for subvol_mountpoints in self.__mountpoints.values():
+ if len(subvol_mountpoints) > 0:
+ return subvol_mountpoints[0]
+ # maybe error?
+ return None
+
+ def get_any_mounted_subvolume(self):
+ for subvolid, subvol_mountpoints in self.__mountpoints.items():
+ if len(subvol_mountpoints) > 0:
+ return self.get_subvolume_by_id(subvolid)
+ return None
+
+ def get_mountpoints_by_subvolume_id(self, subvolume_id):
+ return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else []
+
+ def get_nearest_subvolume(self, subvolume):
+ """Return the identified subvolume if existing, else the closest matching parent"""
+ subvolumes_by_path = self.__get_subvolumes_by_path()
+ while len(subvolume) > 1:
+ if subvolume in subvolumes_by_path:
+ return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id'])
+ else:
+ subvolume = re.sub(r'/[^/]+$', '', subvolume)
+
+ return BtrfsSubvolume(self, 5)
+
+ def get_mountpath_as_child(self, subvolume_name):
+ """Find a path to the target subvolume through a mounted ancestor"""
+ nearest = self.get_nearest_subvolume(subvolume_name)
+ if nearest.path == subvolume_name:
+ nearest = nearest.get_parent_subvolume()
+ if nearest is None or nearest.get_mounted_path() is None:
+ raise BtrfsModuleException("Failed to find a path '%s' through a mounted parent subvolume" % subvolume_name)
+ else:
+ return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name)
+
+ def get_subvolume_children(self, subvolume_id):
+ return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id]
+
+ def __get_subvolumes_by_path(self):
+ result = {}
+ for s in self.__subvolumes.values():
+ path = s['path']
+ result[path] = s
+ return result
+
+ def is_mounted(self):
+ return self.__mountpoints is not None and len(self.__mountpoints) > 0
+
+ def get_summary(self):
+ subvolumes = []
+ sources = self.__subvolumes.values() if self.__subvolumes is not None else []
+ for subvolume in sources:
+ id = subvolume['id']
+ subvolumes.append({
+ 'id': id,
+ 'path': subvolume['path'],
+ 'parent': subvolume['parent'],
+ 'mountpoints': self.get_mountpoints_by_subvolume_id(id),
+ })
+
+ return {
+ 'default_subvolume': self.__default_subvolid,
+ 'devices': self.__devices,
+ 'label': self.__label,
+ 'uuid': self.__uuid,
+ 'subvolumes': subvolumes,
+ }
+
+
+class BtrfsFilesystemsProvider(object):
+
+ """
+ Provides methods to query available btrfs filesystems
+ """
+
+ def __init__(self, module):
+ self.__module = module
+ self.__provider = BtrfsInfoProvider(module)
+ self.__filesystems = None
+
+ def get_matching_filesystem(self, criteria):
+ if criteria['device'] is not None:
+ criteria['device'] = os.path.realpath(criteria['device'])
+
+ self.__check_init()
+ matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)]
+ if len(matching) == 1:
+ return matching[0]
+ else:
+ raise BtrfsModuleException("Found %d filesystems matching criteria uuid=%s label=%s device=%s" % (
+ len(matching),
+ criteria['uuid'],
+ criteria['label'],
+ criteria['device']
+ ))
+
+ def __filesystem_matches_criteria(self, filesystem, criteria):
+ return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and
+ (criteria['label'] is None or filesystem.label == criteria['label']) and
+ (criteria['device'] is None or filesystem.contains_device(criteria['device'])))
+
+ def get_filesystem_for_device(self, device):
+ real_device = os.path.realpath(device)
+ self.__check_init()
+ for fs in self.__filesystems.values():
+ if fs.contains_device(real_device):
+ return fs
+ return None
+
+ def get_filesystems(self):
+ self.__check_init()
+ return list(self.__filesystems.values())
+
+ def __check_init(self):
+ if self.__filesystems is None:
+ self.__filesystems = dict()
+ for f in self.__provider.get_filesystems():
+ uuid = f['uuid']
+ self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module)
diff --git a/ansible_collections/community/general/plugins/module_utils/cloud.py b/ansible_collections/community/general/plugins/module_utils/cloud.py
new file mode 100644
index 000000000..092fe16ad
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/cloud.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Allen Sanabria, <asanabria@linuxdynasty.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+This module adds shared support for generic cloud modules
+
+In order to use this module, include it as part of a custom
+module as shown below.
+
+from ansible.module_utils.cloud import CloudRetry
+
+The 'cloud' module provides the following common classes:
+
+ * CloudRetry
+ - The base class to be used by other cloud providers, in order to
+ provide a backoff/retry decorator based on status codes.
+
+ - Example using the AWSRetry class which inherits from CloudRetry.
+
+ @AWSRetry.exponential_backoff(retries=10, delay=3)
+ get_ec2_security_group_ids_from_names()
+
+ @AWSRetry.jittered_backoff()
+ get_ec2_security_group_ids_from_names()
+
+"""
+import random
+from functools import wraps
+import syslog
+import time
+
+
+def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
+ """ Customizable exponential backoff strategy.
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Initial (base) delay.
+ backoff (float): base of the exponent to use for exponential
+ backoff.
+ max_delay (int): Optional. If provided each delay generated is capped
+ at this amount. Defaults to 60 seconds.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for an exponential backoff strategy.
+ Usage:
+ >>> backoff = _exponential_backoff()
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ sleep = delay * backoff ** retry
+ yield sleep if max_delay is None else min(sleep, max_delay)
+ return backoff_gen
+
+
+def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
+ """ Implements the "Full Jitter" backoff strategy described here
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+ Args:
+ retries (int): Maximum number of times to retry a request.
+ delay (float): Approximate number of seconds to sleep for the first
+ retry.
+ max_delay (int): The maximum number of seconds to sleep for any retry.
+ _random (random.Random or None): Makes this generator testable by
+ allowing developers to explicitly pass in the a seeded Random.
+ Returns:
+ Callable that returns a generator. This generator yields durations in
+ seconds to be used as delays for a full jitter backoff strategy.
+ Usage:
+ >>> backoff = _full_jitter_backoff(retries=5)
+ >>> backoff
+ <function backoff_backoff at 0x7f0d939facf8>
+ >>> list(backoff())
+ [3, 6, 5, 23, 38]
+ >>> list(backoff())
+ [2, 1, 6, 6, 31]
+ """
+ def backoff_gen():
+ for retry in range(0, retries):
+ yield _random.randint(0, min(max_delay, delay * 2 ** retry))
+ return backoff_gen
+
+
+class CloudRetry(object):
+ """ CloudRetry can be used by any cloud provider, in order to implement a
+ backoff algorithm/retry effect based on Status Code from Exceptions.
+ """
+ # This is the base class of the exception.
+ # AWS Example botocore.exceptions.ClientError
+ base_class = None
+
+ @staticmethod
+ def status_code_from_exception(error):
+ """ Return the status code from the exception object
+ Args:
+ error (object): The exception itself.
+ """
+ pass
+
+ @staticmethod
+ def found(response_code, catch_extra_error_codes=None):
+ """ Return True if the Response Code to retry on was found.
+ Args:
+ response_code (str): This is the Response Code that is being matched against.
+ """
+ pass
+
+ @classmethod
+ def _backoff(cls, backoff_strategy, catch_extra_error_codes=None):
+ """ Retry calling the Cloud decorated function using the provided
+ backoff strategy.
+ Args:
+ backoff_strategy (callable): Callable that returns a generator. The
+ generator should yield sleep times for each retry of the decorated
+ function.
+ """
+ def deco(f):
+ @wraps(f)
+ def retry_func(*args, **kwargs):
+ for delay in backoff_strategy():
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type
+ response_code = cls.status_code_from_exception(e)
+ if cls.found(response_code, catch_extra_error_codes):
+ msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
+ syslog.syslog(syslog.LOG_INFO, msg)
+ time.sleep(delay)
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ else:
+ # Return original exception if exception is not a ClientError
+ raise e
+ return f(*args, **kwargs)
+
+ return retry_func # true decorator
+
+ return deco
+
+ @classmethod
+ def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ max_delay (int or None): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_exponential_backoff(
+ retries=retries, delay=delay, backoff=backoff, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def jittered_backoff(cls, retries=10, delay=3, max_delay=60, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using a jittered backoff
+ strategy. More on this strategy here:
+
+ https://www.awsarchitectureblog.com/2015/03/backoff.html
+
+ Kwargs:
+ retries (int): Number of times to retry a failed request before giving up
+ default=10
+ delay (int): Initial delay between retries in seconds
+ default=3
+ max_delay (int): maximum amount of time to wait between retries.
+ default=60
+ """
+ return cls._backoff(_full_jitter_backoff(
+ retries=retries, delay=delay, max_delay=max_delay), catch_extra_error_codes)
+
+ @classmethod
+ def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None):
+ """
+ Retry calling the Cloud decorated function using an exponential backoff.
+
+ Compatibility for the original implementation of CloudRetry.backoff that
+ did not provide configurable backoff strategies. Developers should use
+ CloudRetry.exponential_backoff instead.
+
+ Kwargs:
+ tries (int): Number of times to try (not retry) before giving up
+ default=10
+ delay (int or float): Initial delay between retries in seconds
+ default=3
+ backoff (int or float): backoff multiplier e.g. value of 2 will
+ double the delay each retry
+ default=1.1
+ """
+ return cls.exponential_backoff(
+ retries=tries - 1, delay=delay, backoff=backoff, max_delay=None, catch_extra_error_codes=catch_extra_error_codes)
diff --git a/ansible_collections/community/general/plugins/module_utils/cmd_runner.py b/ansible_collections/community/general/plugins/module_utils/cmd_runner.py
new file mode 100644
index 000000000..21d61a6a5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/cmd_runner.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from functools import wraps
+
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.six import iteritems
+
+
+def _ensure_list(value):
+ return list(value) if is_sequence(value) else [value]
+
+
+def _process_as_is(rc, out, err):
+ return rc, out, err
+
+
+class CmdRunnerException(Exception):
+ pass
+
+
+class MissingArgumentFormat(CmdRunnerException):
+ def __init__(self, arg, args_order, args_formats):
+ self.args_order = args_order
+ self.arg = arg
+ self.args_formats = args_formats
+
+ def __repr__(self):
+ return "MissingArgumentFormat({0!r}, {1!r}, {2!r})".format(
+ self.arg,
+ self.args_order,
+ self.args_formats,
+ )
+
+ def __str__(self):
+ return "Cannot find format for parameter {0} {1} in: {2}".format(
+ self.arg,
+ self.args_order,
+ self.args_formats,
+ )
+
+
+class MissingArgumentValue(CmdRunnerException):
+ def __init__(self, args_order, arg):
+ self.args_order = args_order
+ self.arg = arg
+
+ def __repr__(self):
+ return "MissingArgumentValue({0!r}, {1!r})".format(
+ self.args_order,
+ self.arg,
+ )
+
+ def __str__(self):
+ return "Cannot find value for parameter {0} in {1}".format(
+ self.arg,
+ self.args_order,
+ )
+
+
+class FormatError(CmdRunnerException):
+ def __init__(self, name, value, args_formats, exc):
+ self.name = name
+ self.value = value
+ self.args_formats = args_formats
+ self.exc = exc
+ super(FormatError, self).__init__()
+
+ def __repr__(self):
+ return "FormatError({0!r}, {1!r}, {2!r}, {3!r})".format(
+ self.name,
+ self.value,
+ self.args_formats,
+ self.exc,
+ )
+
+ def __str__(self):
+ return "Failed to format parameter {0} with value {1}: {2}".format(
+ self.name,
+ self.value,
+ self.exc,
+ )
+
+
+class _ArgFormat(object):
+ def __init__(self, func, ignore_none=None, ignore_missing_value=False):
+ self.func = func
+ self.ignore_none = ignore_none
+ self.ignore_missing_value = ignore_missing_value
+
+ def __call__(self, value, ctx_ignore_none):
+ ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none
+ if value is None and ignore_none:
+ return []
+ f = self.func
+ return [str(x) for x in f(value)]
+
+
+class _Format(object):
+ @staticmethod
+ def as_bool(args_true, args_false=None, ignore_none=None):
+ if args_false is not None:
+ if ignore_none is None:
+ ignore_none = False
+ else:
+ args_false = []
+ return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none)
+
+ @staticmethod
+ def as_bool_not(args):
+ return _ArgFormat(lambda value: [] if value else _ensure_list(args), ignore_none=False)
+
+ @staticmethod
+ def as_optval(arg, ignore_none=None):
+ return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none)
+
+ @staticmethod
+ def as_opt_val(arg, ignore_none=None):
+ return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none)
+
+ @staticmethod
+ def as_opt_eq_val(arg, ignore_none=None):
+ return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none)
+
+ @staticmethod
+ def as_list(ignore_none=None):
+ return _ArgFormat(_ensure_list, ignore_none=ignore_none)
+
+ @staticmethod
+ def as_fixed(args):
+ return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True)
+
+ @staticmethod
+ def as_func(func, ignore_none=None):
+ return _ArgFormat(func, ignore_none=ignore_none)
+
+ @staticmethod
+ def as_map(_map, default=None, ignore_none=None):
+ if default is None:
+ default = []
+ return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none)
+
+ @staticmethod
+ def as_default_type(_type, arg="", ignore_none=None):
+ fmt = _Format
+ if _type == "dict":
+ return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], ignore_none=ignore_none)
+ if _type == "list":
+ return fmt.as_func(lambda value: ["--{0}".format(x) for x in value], ignore_none=ignore_none)
+ if _type == "bool":
+ return fmt.as_bool("--{0}".format(arg))
+
+ return fmt.as_opt_val("--{0}".format(arg), ignore_none=ignore_none)
+
+ @staticmethod
+ def unpack_args(func):
+ @wraps(func)
+ def wrapper(v):
+ return func(*v)
+ return wrapper
+
+ @staticmethod
+ def unpack_kwargs(func):
+ @wraps(func)
+ def wrapper(v):
+ return func(**v)
+ return wrapper
+
+
+class CmdRunner(object):
+ """
+ Wrapper for ``AnsibleModule.run_command()``.
+
+ It aims to provide a reusable runner with consistent argument formatting
+ and sensible defaults.
+ """
+
+ @staticmethod
+ def _prepare_args_order(order):
+ return tuple(order) if is_sequence(order) else tuple(order.split())
+
+ def __init__(self, module, command, arg_formats=None, default_args_order=(),
+ check_rc=False, force_lang="C", path_prefix=None, environ_update=None):
+ self.module = module
+ self.command = _ensure_list(command)
+ self.default_args_order = self._prepare_args_order(default_args_order)
+ if arg_formats is None:
+ arg_formats = {}
+ self.arg_formats = dict(arg_formats)
+ self.check_rc = check_rc
+ self.force_lang = force_lang
+ self.path_prefix = path_prefix
+ if environ_update is None:
+ environ_update = {}
+ self.environ_update = environ_update
+
+ self.command[0] = module.get_bin_path(self.command[0], opt_dirs=path_prefix, required=True)
+
+ for mod_param_name, spec in iteritems(module.argument_spec):
+ if mod_param_name not in self.arg_formats:
+ self.arg_formats[mod_param_name] = _Format.as_default_type(spec['type'], mod_param_name)
+
+ def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs):
+ if output_process is None:
+ output_process = _process_as_is
+ if args_order is None:
+ args_order = self.default_args_order
+ args_order = self._prepare_args_order(args_order)
+ for p in args_order:
+ if p not in self.arg_formats:
+ raise MissingArgumentFormat(p, args_order, tuple(self.arg_formats.keys()))
+ return _CmdRunnerContext(runner=self,
+ args_order=args_order,
+ output_process=output_process,
+ ignore_value_none=ignore_value_none,
+ check_mode_skip=check_mode_skip,
+ check_mode_return=check_mode_return, **kwargs)
+
+ def has_arg_format(self, arg):
+ return arg in self.arg_formats
+
+ # not decided whether to keep it or not, but if deprecating it will happen in a farther future.
+ context = __call__
+
+
+class _CmdRunnerContext(object):
+ def __init__(self, runner, args_order, output_process, ignore_value_none, check_mode_skip, check_mode_return, **kwargs):
+ self.runner = runner
+ self.args_order = tuple(args_order)
+ self.output_process = output_process
+ self.ignore_value_none = ignore_value_none
+ self.check_mode_skip = check_mode_skip
+ self.check_mode_return = check_mode_return
+ self.run_command_args = dict(kwargs)
+
+ self.environ_update = runner.environ_update
+ self.environ_update.update(self.run_command_args.get('environ_update', {}))
+ if runner.force_lang:
+ self.environ_update.update({
+ 'LANGUAGE': runner.force_lang,
+ 'LC_ALL': runner.force_lang,
+ })
+ self.run_command_args['environ_update'] = self.environ_update
+
+ if 'check_rc' not in self.run_command_args:
+ self.run_command_args['check_rc'] = runner.check_rc
+ self.check_rc = self.run_command_args['check_rc']
+
+ self.cmd = None
+ self.results_rc = None
+ self.results_out = None
+ self.results_err = None
+ self.results_processed = None
+
+ def run(self, **kwargs):
+ runner = self.runner
+ module = self.runner.module
+ self.cmd = list(runner.command)
+ self.context_run_args = dict(kwargs)
+
+ named_args = dict(module.params)
+ named_args.update(kwargs)
+ for arg_name in self.args_order:
+ value = None
+ try:
+ if arg_name in named_args:
+ value = named_args[arg_name]
+ elif not runner.arg_formats[arg_name].ignore_missing_value:
+ raise MissingArgumentValue(self.args_order, arg_name)
+ self.cmd.extend(runner.arg_formats[arg_name](value, ctx_ignore_none=self.ignore_value_none))
+ except MissingArgumentValue:
+ raise
+ except Exception as e:
+ raise FormatError(arg_name, value, runner.arg_formats[arg_name], e)
+
+ if self.check_mode_skip and module.check_mode:
+ return self.check_mode_return
+ results = module.run_command(self.cmd, **self.run_command_args)
+ self.results_rc, self.results_out, self.results_err = results
+ self.results_processed = self.output_process(*results)
+ return self.results_processed
+
+ @property
+ def run_info(self):
+ return dict(
+ ignore_value_none=self.ignore_value_none,
+ check_rc=self.check_rc,
+ environ_update=self.environ_update,
+ args_order=self.args_order,
+ cmd=self.cmd,
+ run_command_args=self.run_command_args,
+ context_run_args=self.context_run_args,
+ results_rc=self.results_rc,
+ results_out=self.results_out,
+ results_err=self.results_err,
+ results_processed=self.results_processed,
+ )
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+cmd_runner_fmt = _Format()
+
+#
+# The fmt form is deprecated and will be removed in community.general 7.0.0
+# Please use:
+# cmd_runner_fmt
+# Or, to retain the same effect, use:
+# from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt as fmt
+fmt = cmd_runner_fmt
diff --git a/ansible_collections/community/general/plugins/module_utils/csv.py b/ansible_collections/community/general/plugins/module_utils/csv.py
new file mode 100644
index 000000000..200548a46
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/csv.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andrew Pantuso (@ajpantuso) <ajpantuso@gmail.com>
+# Copyright (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import csv
+from io import BytesIO, StringIO
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six import PY3
+
+
+class CustomDialectFailureError(Exception):
+ pass
+
+
+class DialectNotAvailableError(Exception):
+ pass
+
+
+CSVError = csv.Error
+
+
+def initialize_dialect(dialect, **kwargs):
+ # Add Unix dialect from Python 3
+ class unix_dialect(csv.Dialect):
+ """Describe the usual properties of Unix-generated CSV files."""
+ delimiter = ','
+ quotechar = '"'
+ doublequote = True
+ skipinitialspace = False
+ lineterminator = '\n'
+ quoting = csv.QUOTE_ALL
+
+ csv.register_dialect("unix", unix_dialect)
+
+ if dialect not in csv.list_dialects():
+ raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect)
+
+ # Create a dictionary from only set options
+ dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None)
+ if dialect_params:
+ try:
+ csv.register_dialect('custom', dialect, **dialect_params)
+ except TypeError as e:
+ raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e))
+ dialect = 'custom'
+
+ return dialect
+
+
+def read_csv(data, dialect, fieldnames=None):
+ BOM = to_native(u'\ufeff')
+ data = to_native(data, errors='surrogate_or_strict')
+ if data.startswith(BOM):
+ data = data[len(BOM):]
+
+ if PY3:
+ fake_fh = StringIO(data)
+ else:
+ fake_fh = BytesIO(data)
+
+ reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect)
+
+ return reader
diff --git a/ansible_collections/community/general/plugins/module_utils/database.py b/ansible_collections/community/general/plugins/module_utils/database.py
new file mode 100644
index 000000000..db1dc9c23
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/database.py
@@ -0,0 +1,191 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+
+# Input patterns for is_input_dangerous function:
+#
+# 1. '"' in string and '--' in string or
+# "'" in string and '--' in string
+PATTERN_1 = re.compile(r'(\'|\").*--')
+
+# 2. union \ intersect \ except + select
+PATTERN_2 = re.compile(r'(UNION|INTERSECT|EXCEPT).*SELECT', re.IGNORECASE)
+
+# 3. ';' and any KEY_WORDS
+PATTERN_3 = re.compile(r';.*(SELECT|UPDATE|INSERT|DELETE|DROP|TRUNCATE|ALTER)', re.IGNORECASE)
+
+
+class SQLParseError(Exception):
+ pass
+
+
+class UnclosedQuoteError(SQLParseError):
+ pass
+
+
+# maps a type of identifier to the maximum number of dot levels that are
+# allowed to specify that identifier. For example, a database column can be
+# specified by up to 4 levels: database.schema.table.column
+_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
+ database=1,
+ schema=2,
+ table=3,
+ column=4,
+ role=1,
+ tablespace=1,
+ sequence=3,
+ publication=1,
+)
+_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
+
+
+def _find_end_quote(identifier, quote_char):
+ accumulate = 0
+ while True:
+ try:
+ quote = identifier.index(quote_char)
+ except ValueError:
+ raise UnclosedQuoteError
+ accumulate = accumulate + quote
+ try:
+ next_char = identifier[quote + 1]
+ except IndexError:
+ return accumulate
+ if next_char == quote_char:
+ try:
+ identifier = identifier[quote + 2:]
+ accumulate = accumulate + 2
+ except IndexError:
+ raise UnclosedQuoteError
+ else:
+ return accumulate
+
+
+def _identifier_parse(identifier, quote_char):
+ if not identifier:
+ raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
+
+ already_quoted = False
+ if identifier.startswith(quote_char):
+ already_quoted = True
+ try:
+ end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
+ except UnclosedQuoteError:
+ already_quoted = False
+ else:
+ if end_quote < len(identifier) - 1:
+ if identifier[end_quote + 1] == '.':
+ dot = end_quote + 1
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ further_identifiers.insert(0, first_identifier)
+ else:
+ raise SQLParseError('User escaped identifiers must escape extra quotes')
+ else:
+ further_identifiers = [identifier]
+
+ if not already_quoted:
+ try:
+ dot = identifier.index('.')
+ except ValueError:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ if dot == 0 or dot >= len(identifier) - 1:
+ identifier = identifier.replace(quote_char, quote_char * 2)
+ identifier = ''.join((quote_char, identifier, quote_char))
+ further_identifiers = [identifier]
+ else:
+ first_identifier = identifier[:dot]
+ next_identifier = identifier[dot + 1:]
+ further_identifiers = _identifier_parse(next_identifier, quote_char)
+ first_identifier = first_identifier.replace(quote_char, quote_char * 2)
+ first_identifier = ''.join((quote_char, first_identifier, quote_char))
+ further_identifiers.insert(0, first_identifier)
+
+ return further_identifiers
+
+
+def pg_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='"')
+ if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+ return '.'.join(identifier_fragments)
+
+
+def mysql_quote_identifier(identifier, id_type):
+ identifier_fragments = _identifier_parse(identifier, quote_char='`')
+ if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
+ raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
+
+ special_cased_fragments = []
+ for fragment in identifier_fragments:
+ if fragment == '`*`':
+ special_cased_fragments.append('*')
+ else:
+ special_cased_fragments.append(fragment)
+
+ return '.'.join(special_cased_fragments)
+
+
+def is_input_dangerous(string):
+ """Check if the passed string is potentially dangerous.
+ Can be used to prevent SQL injections.
+
+ Note: use this function only when you can't use
+ psycopg2's cursor.execute method parametrized
+ (typically with DDL queries).
+ """
+ if not string:
+ return False
+
+ for pattern in (PATTERN_1, PATTERN_2, PATTERN_3):
+ if re.search(pattern, string):
+ return True
+
+ return False
+
+
+def check_input(module, *args):
+ """Wrapper for is_input_dangerous function."""
+ needs_to_check = args
+
+ dangerous_elements = []
+
+ for elem in needs_to_check:
+ if isinstance(elem, str):
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ elif isinstance(elem, list):
+ for e in elem:
+ if is_input_dangerous(e):
+ dangerous_elements.append(e)
+
+ elif elem is None or isinstance(elem, bool):
+ pass
+
+ else:
+ elem = str(elem)
+ if is_input_dangerous(elem):
+ dangerous_elements.append(elem)
+
+ if dangerous_elements:
+ module.fail_json(msg="Passed input '%s' is "
+ "potentially dangerous" % ', '.join(dangerous_elements))
diff --git a/ansible_collections/community/general/plugins/module_utils/deps.py b/ansible_collections/community/general/plugins/module_utils/deps.py
new file mode 100644
index 000000000..a2413d195
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/deps.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+# (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2022, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import traceback
+from contextlib import contextmanager
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.basic import missing_required_lib
+
+
+_deps = dict()
+
+
+class _Dependency(object):
+ _states = ["pending", "failure", "success"]
+
+ def __init__(self, name, reason=None, url=None, msg=None):
+ self.name = name
+ self.reason = reason
+ self.url = url
+ self.msg = msg
+
+ self.state = 0
+ self.trace = None
+ self.exc = None
+
+ def succeed(self):
+ self.state = 2
+
+ def fail(self, exc, trace):
+ self.state = 1
+ self.exc = exc
+ self.trace = trace
+
+ @property
+ def message(self):
+ if self.msg:
+ return to_native(self.msg)
+ else:
+ return missing_required_lib(self.name, reason=self.reason, url=self.url)
+
+ @property
+ def failed(self):
+ return self.state == 1
+
+ def validate(self, module):
+ if self.failed:
+ module.fail_json(msg=self.message, exception=self.trace)
+
+ def __str__(self):
+ return "<dependency: {0} [{1}]>".format(self.name, self._states[self.state])
+
+
+@contextmanager
+def declare(name, *args, **kwargs):
+ dep = _Dependency(name, *args, **kwargs)
+ try:
+ yield dep
+ except Exception as e:
+ dep.fail(e, traceback.format_exc())
+ else:
+ dep.succeed()
+ finally:
+ _deps[name] = dep
+
+
+def _select_names(spec):
+ dep_names = sorted(_deps)
+
+ if spec:
+ if spec.startswith("-"):
+ spec_split = spec[1:].split(":")
+ for d in spec_split:
+ dep_names.remove(d)
+ else:
+ spec_split = spec.split(":")
+ dep_names = []
+ for d in spec_split:
+ _deps[d] # ensure it exists
+ dep_names.append(d)
+
+ return dep_names
+
+
+def validate(module, spec=None):
+ for dep in _select_names(spec):
+ _deps[dep].validate(module)
+
+
+def failed(spec=None):
+ return any(_deps[d].failed for d in _select_names(spec))
diff --git a/ansible_collections/community/general/plugins/module_utils/dimensiondata.py b/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
new file mode 100644
index 000000000..0300f6c1e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/dimensiondata.py
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Mark Maglana <mmaglana@gmail.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# Common functionality to be used by various module components
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import traceback
+
+# (TODO: remove AnsibleModule from next line!)
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import
+from ansible.module_utils.six.moves import configparser
+from os.path import expanduser
+from uuid import UUID
+
+LIBCLOUD_IMP_ERR = None
+try:
+ from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus # noqa: F401, pylint: disable=unused-import
+ from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import
+ from libcloud.compute.providers import get_driver
+ from libcloud.compute.types import Provider
+
+ import libcloud.security
+
+ HAS_LIBCLOUD = True
+except ImportError:
+ LIBCLOUD_IMP_ERR = traceback.format_exc()
+ HAS_LIBCLOUD = False
+
+# MCP 2.x version patten for location (datacenter) names.
+#
+# Note that this is not a totally reliable way of determining MCP version.
+# Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties.
+# At some point we may therefore want to either enhance libcloud or enable overriding mcp_version
+# by specifying it in the module parameters.
+MCP_2_LOCATION_NAME_PATTERN = re.compile(r".*MCP\s?2.*")
+
+
+class DimensionDataModule(object):
+ """
+ The base class containing common functionality used by Dimension Data modules for Ansible.
+ """
+
+ def __init__(self, module):
+ """
+ Create a new DimensionDataModule.
+
+ Will fail if Apache libcloud is not present.
+
+ :param module: The underlying Ansible module.
+ :type module: AnsibleModule
+ """
+
+ self.module = module
+
+ if not HAS_LIBCLOUD:
+ self.module.fail_json(msg=missing_required_lib('libcloud'), exception=LIBCLOUD_IMP_ERR)
+
+ # Credentials are common to all Dimension Data modules.
+ credentials = self.get_credentials()
+ self.user_id = credentials['user_id']
+ self.key = credentials['key']
+
+ # Region and location are common to all Dimension Data modules.
+ region = self.module.params['region']
+ self.region = 'dd-{0}'.format(region)
+ self.location = self.module.params['location']
+
+ libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs']
+
+ self.driver = get_driver(Provider.DIMENSIONDATA)(
+ self.user_id,
+ self.key,
+ region=self.region
+ )
+
+ # Determine the MCP API version (this depends on the target datacenter).
+ self.mcp_version = self.get_mcp_version(self.location)
+
+ # Optional "wait-for-completion" arguments
+ if 'wait' in self.module.params:
+ self.wait = self.module.params['wait']
+ self.wait_time = self.module.params['wait_time']
+ self.wait_poll_interval = self.module.params['wait_poll_interval']
+ else:
+ self.wait = False
+ self.wait_time = 0
+ self.wait_poll_interval = 0
+
+ def get_credentials(self):
+ """
+ Get user_id and key from module configuration, environment, or dotfile.
+ Order of priority is module, environment, dotfile.
+
+ To set in environment:
+
+ export MCP_USER='myusername'
+ export MCP_PASSWORD='mypassword'
+
+ To set in dot file place a file at ~/.dimensiondata with
+ the following contents:
+
+ [dimensiondatacloud]
+ MCP_USER: myusername
+ MCP_PASSWORD: mypassword
+ """
+
+ if not HAS_LIBCLOUD:
+ self.module.fail_json(msg='libcloud is required for this module.')
+
+ user_id = None
+ key = None
+
+ # First, try the module configuration
+ if 'mcp_user' in self.module.params:
+ if 'mcp_password' not in self.module.params:
+ self.module.fail_json(
+ msg='"mcp_user" parameter was specified, but not "mcp_password" (either both must be specified, or neither).'
+ )
+
+ user_id = self.module.params['mcp_user']
+ key = self.module.params['mcp_password']
+
+ # Fall back to environment
+ if not user_id or not key:
+ user_id = os.environ.get('MCP_USER', None)
+ key = os.environ.get('MCP_PASSWORD', None)
+
+ # Finally, try dotfile (~/.dimensiondata)
+ if not user_id or not key:
+ home = expanduser('~')
+ config = configparser.RawConfigParser()
+ config.read("%s/.dimensiondata" % home)
+
+ try:
+ user_id = config.get("dimensiondatacloud", "MCP_USER")
+ key = config.get("dimensiondatacloud", "MCP_PASSWORD")
+ except (configparser.NoSectionError, configparser.NoOptionError):
+ pass
+
+ # One or more credentials not found. Function can't recover from this
+ # so it has to raise an error instead of fail silently.
+ if not user_id:
+ raise MissingCredentialsError("Dimension Data user id not found")
+ elif not key:
+ raise MissingCredentialsError("Dimension Data key not found")
+
+ # Both found, return data
+ return dict(user_id=user_id, key=key)
+
+ def get_mcp_version(self, location):
+ """
+ Get the MCP version for the specified location.
+ """
+
+ location = self.driver.ex_get_location_by_id(location)
+ if MCP_2_LOCATION_NAME_PATTERN.match(location.name):
+ return '2.0'
+
+ return '1.0'
+
+ def get_network_domain(self, locator, location):
+ """
+ Retrieve a network domain by its name or Id.
+ """
+
+ if is_uuid(locator):
+ network_domain = self.driver.ex_get_network_domain(locator)
+ else:
+ matching_network_domains = [
+ network_domain for network_domain in self.driver.ex_list_network_domains(location=location)
+ if network_domain.name == locator
+ ]
+
+ if matching_network_domains:
+ network_domain = matching_network_domains[0]
+ else:
+ network_domain = None
+
+ if network_domain:
+ return network_domain
+
+ raise UnknownNetworkError("Network '%s' could not be found" % locator)
+
+ def get_vlan(self, locator, location, network_domain):
+ """
+ Get a VLAN object by its name or id
+ """
+ if is_uuid(locator):
+ vlan = self.driver.ex_get_vlan(locator)
+ else:
+ matching_vlans = [
+ vlan for vlan in self.driver.ex_list_vlans(location, network_domain)
+ if vlan.name == locator
+ ]
+
+ if matching_vlans:
+ vlan = matching_vlans[0]
+ else:
+ vlan = None
+
+ if vlan:
+ return vlan
+
+ raise UnknownVLANError("VLAN '%s' could not be found" % locator)
+
+ @staticmethod
+ def argument_spec(**additional_argument_spec):
+ """
+ Build an argument specification for a Dimension Data module.
+ :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
+ :return: A dict containing the argument specification.
+ """
+
+ spec = dict(
+ region=dict(type='str', default='na'),
+ mcp_user=dict(type='str', required=False),
+ mcp_password=dict(type='str', required=False, no_log=True),
+ location=dict(type='str', required=True),
+ validate_certs=dict(type='bool', required=False, default=True)
+ )
+
+ if additional_argument_spec:
+ spec.update(additional_argument_spec)
+
+ return spec
+
+ @staticmethod
+ def argument_spec_with_wait(**additional_argument_spec):
+ """
+ Build an argument specification for a Dimension Data module that includes "wait for completion" arguments.
+ :param additional_argument_spec: An optional dictionary representing the specification for additional module arguments (if any).
+ :return: A dict containing the argument specification.
+ """
+
+ spec = DimensionDataModule.argument_spec(
+ wait=dict(type='bool', required=False, default=False),
+ wait_time=dict(type='int', required=False, default=600),
+ wait_poll_interval=dict(type='int', required=False, default=2)
+ )
+
+ if additional_argument_spec:
+ spec.update(additional_argument_spec)
+
+ return spec
+
+ @staticmethod
+ def required_together(*additional_required_together):
+ """
+ Get the basic argument specification for Dimension Data modules indicating which arguments are must be specified together.
+ :param additional_required_together: An optional list representing the specification for additional module arguments that must be specified together.
+ :return: An array containing the argument specifications.
+ """
+
+ required_together = [
+ ['mcp_user', 'mcp_password']
+ ]
+
+ if additional_required_together:
+ required_together.extend(additional_required_together)
+
+ return required_together
+
+
+class LibcloudNotFound(Exception):
+ """
+ Exception raised when Apache libcloud cannot be found.
+ """
+
+ pass
+
+
+class MissingCredentialsError(Exception):
+ """
+ Exception raised when credentials for Dimension Data CloudControl cannot be found.
+ """
+
+ pass
+
+
+class UnknownNetworkError(Exception):
+ """
+ Exception raised when a network or network domain cannot be found.
+ """
+
+ pass
+
+
+class UnknownVLANError(Exception):
+ """
+ Exception raised when a VLAN cannot be found.
+ """
+
+ pass
+
+
+def get_dd_regions():
+ """
+ Get the list of available regions whose vendor is Dimension Data.
+ """
+
+ # Get endpoints
+ all_regions = API_ENDPOINTS.keys()
+
+ # Only Dimension Data endpoints (no prefix)
+ regions = [region[3:] for region in all_regions if region.startswith('dd-')]
+
+ return regions
+
+
+def is_uuid(u, version=4):
+ """
+ Test if valid v4 UUID
+ """
+ try:
+ uuid_obj = UUID(u, version=version)
+
+ return str(uuid_obj) == u
+ except ValueError:
+ return False
diff --git a/ansible_collections/community/general/plugins/module_utils/gandi_livedns_api.py b/ansible_collections/community/general/plugins/module_utils/gandi_livedns_api.py
new file mode 100644
index 000000000..53245d44d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/gandi_livedns_api.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+
+
+class GandiLiveDNSAPI(object):
+
+ api_endpoint = 'https://api.gandi.net/v5/livedns'
+ changed = False
+
+ error_strings = {
+ 400: 'Bad request',
+ 401: 'Permission denied',
+ 404: 'Resource not found',
+ }
+
+ attribute_map = {
+ 'record': 'rrset_name',
+ 'type': 'rrset_type',
+ 'ttl': 'rrset_ttl',
+ 'values': 'rrset_values'
+ }
+
+ def __init__(self, module):
+ self.module = module
+ self.api_key = module.params['api_key']
+
+ def _build_error_message(self, module, info):
+ s = ''
+ body = info.get('body')
+ if body:
+ errors = module.from_json(body).get('errors')
+ if errors:
+ error = errors[0]
+ name = error.get('name')
+ if name:
+ s += '{0} :'.format(name)
+ description = error.get('description')
+ if description:
+ s += description
+ return s
+
+ def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True):
+ headers = {'Authorization': 'Apikey {0}'.format(self.api_key),
+ 'Content-Type': 'application/json'}
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
+
+ resp, info = fetch_url(self.module,
+ self.api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method)
+
+ error_msg = ''
+ if info['status'] >= 400 and (info['status'] != 404 or error_on_404):
+ err_s = self.error_strings.get(info['status'], '')
+
+ error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info))
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ content = None
+
+ if content:
+ try:
+ result = json.loads(to_text(content, errors='surrogate_or_strict'))
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
+
+ if error_msg:
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def build_result(self, result, domain):
+ if result is None:
+ return None
+
+ res = {}
+ for k in self.attribute_map:
+ v = result.get(self.attribute_map[k], None)
+ if v is not None:
+ if k == 'record' and v == '@':
+ v = ''
+ res[k] = v
+
+ res['domain'] = domain
+
+ return res
+
+ def build_results(self, results, domain):
+ if results is None:
+ return []
+ return [self.build_result(r, domain) for r in results]
+
+ def get_records(self, record, type, domain):
+ url = '/domains/%s/records' % (domain)
+ if record:
+ url += '/%s' % (record)
+ if type:
+ url += '/%s' % (type)
+
+ records, status = self._gandi_api_call(url, error_on_404=False)
+
+ if status == 404:
+ return []
+
+ if not isinstance(records, list):
+ records = [records]
+
+ # filter by type if record is not set
+ if not record and type:
+ records = [r
+ for r in records
+ if r['rrset_type'] == type]
+
+ return records
+
+ def create_record(self, record, type, values, ttl, domain):
+ url = '/domains/%s/records' % (domain)
+ new_record = {
+ 'rrset_name': record,
+ 'rrset_type': type,
+ 'rrset_values': values,
+ 'rrset_ttl': ttl,
+ }
+ record, status = self._gandi_api_call(url, method='POST', payload=new_record)
+
+ if status in (200, 201,):
+ return new_record
+
+ return None
+
+ def update_record(self, record, type, values, ttl, domain):
+ url = '/domains/%s/records/%s/%s' % (domain, record, type)
+ new_record = {
+ 'rrset_values': values,
+ 'rrset_ttl': ttl,
+ }
+ record = self._gandi_api_call(url, method='PUT', payload=new_record)[0]
+ return record
+
+ def delete_record(self, record, type, domain):
+ url = '/domains/%s/records/%s/%s' % (domain, record, type)
+
+ self._gandi_api_call(url, method='DELETE')
+
+ def delete_dns_record(self, record, type, values, domain):
+ if record == '':
+ record = '@'
+
+ records = self.get_records(record, type, domain)
+
+ if records:
+ cur_record = records[0]
+
+ self.changed = True
+
+ if values is not None and set(cur_record['rrset_values']) != set(values):
+ new_values = set(cur_record['rrset_values']) - set(values)
+ if new_values:
+ # Removing one or more values from a record, we update the record with the remaining values
+ self.update_record(record, type, list(new_values), cur_record['rrset_ttl'], domain)
+ records = self.get_records(record, type, domain)
+ return records[0], self.changed
+
+ if not self.module.check_mode:
+ self.delete_record(record, type, domain)
+ else:
+ cur_record = None
+
+ return None, self.changed
+
+ def ensure_dns_record(self, record, type, ttl, values, domain):
+ if record == '':
+ record = '@'
+
+ records = self.get_records(record, type, domain)
+
+ if records:
+ cur_record = records[0]
+
+ do_update = False
+ if ttl is not None and cur_record['rrset_ttl'] != ttl:
+ do_update = True
+ if values is not None and set(cur_record['rrset_values']) != set(values):
+ do_update = True
+
+ if do_update:
+ if self.module.check_mode:
+ result = dict(
+ rrset_type=type,
+ rrset_name=record,
+ rrset_values=values,
+ rrset_ttl=ttl
+ )
+ else:
+ self.update_record(record, type, values, ttl, domain)
+
+ records = self.get_records(record, type, domain)
+ result = records[0]
+ self.changed = True
+ return result, self.changed
+ else:
+ return cur_record, self.changed
+
+ if self.module.check_mode:
+ new_record = dict(
+ rrset_type=type,
+ rrset_name=record,
+ rrset_values=values,
+ rrset_ttl=ttl
+ )
+ result = new_record
+ else:
+ result = self.create_record(record, type, values, ttl, domain)
+
+ self.changed = True
+ return result, self.changed
diff --git a/ansible_collections/community/general/plugins/module_utils/gconftool2.py b/ansible_collections/community/general/plugins/module_utils/gconftool2.py
new file mode 100644
index 000000000..e90c3fb2c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/gconftool2.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+_state_map = {
+ "present": "--set",
+ "absent": "--unset",
+ "get": "--get",
+}
+
+
+def gconftool2_runner(module, **kwargs):
+ return CmdRunner(
+ module,
+ command='gconftool-2',
+ arg_formats=dict(
+ state=cmd_runner_fmt.as_map(_state_map),
+ key=cmd_runner_fmt.as_list(),
+ value_type=cmd_runner_fmt.as_opt_val("--type"),
+ value=cmd_runner_fmt.as_list(),
+ direct=cmd_runner_fmt.as_bool("--direct"),
+ config_source=cmd_runner_fmt.as_opt_val("--config-source"),
+ ),
+ **kwargs
+ )
diff --git a/ansible_collections/community/general/plugins/module_utils/gitlab.py b/ansible_collections/community/general/plugins/module_utils/gitlab.py
new file mode 100644
index 000000000..7cb59e4c2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/gitlab.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+try:
+ from urlparse import urljoin
+except ImportError:
+ from urllib.parse import urljoin # Python 3+
+
+import traceback
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab
+ import requests
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ gitlab = None
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+
+def auth_argument_spec(spec=None):
+ arg_spec = (dict(
+ api_token=dict(type='str', no_log=True),
+ api_oauth_token=dict(type='str', no_log=True),
+ api_job_token=dict(type='str', no_log=True),
+ ))
+ if spec:
+ arg_spec.update(spec)
+ return arg_spec
+
+
+def find_project(gitlab_instance, identifier):
+ try:
+ project = gitlab_instance.projects.get(identifier)
+ except Exception as e:
+ current_user = gitlab_instance.user
+ try:
+ project = gitlab_instance.projects.get(current_user.username + '/' + identifier)
+ except Exception as e:
+ return None
+
+ return project
+
+
+def find_group(gitlab_instance, identifier):
+ try:
+ project = gitlab_instance.groups.get(identifier)
+ except Exception as e:
+ return None
+
+ return project
+
+
+def ensure_gitlab_package(module):
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(
+ msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'),
+ exception=GITLAB_IMP_ERR
+ )
+
+
+def gitlab_authentication(module):
+ gitlab_url = module.params['api_url']
+ validate_certs = module.params['validate_certs']
+ gitlab_user = module.params['api_username']
+ gitlab_password = module.params['api_password']
+ gitlab_token = module.params['api_token']
+ gitlab_oauth_token = module.params['api_oauth_token']
+ gitlab_job_token = module.params['api_job_token']
+
+ ensure_gitlab_package(module)
+
+ try:
+ # python-gitlab library remove support for username/password authentication since 1.13.0
+ # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
+ # This condition allow to still support older version of the python-gitlab library
+ if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password,
+ private_token=gitlab_token, api_version=4)
+ else:
+ # We can create an oauth_token using a username and password
+ # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
+ if gitlab_user:
+ data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password}
+ resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs)
+ resp_data = resp.json()
+ gitlab_oauth_token = resp_data["access_token"]
+
+ gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token,
+ oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4)
+
+ gitlab_instance.auth()
+ except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e:
+ module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e))
+ except (gitlab.exceptions.GitlabHttpError) as e:
+ module.fail_json(msg="Failed to connect to GitLab server: %s. \
+ GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e))
+
+ return gitlab_instance
+
+
+def filter_returned_variables(gitlab_variables):
+ # pop properties we don't know
+ existing_variables = [dict(x.attributes) for x in gitlab_variables]
+ KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope']
+ for item in existing_variables:
+ for key in list(item.keys()):
+ if key not in KNOWN:
+ item.pop(key)
+ return existing_variables
diff --git a/ansible_collections/community/general/plugins/module_utils/heroku.py b/ansible_collections/community/general/plugins/module_utils/heroku.py
new file mode 100644
index 000000000..f5ed3e2b8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/heroku.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+
+HAS_HEROKU = False
+HEROKU_IMP_ERR = None
+try:
+ import heroku3
+ HAS_HEROKU = True
+except ImportError:
+ HEROKU_IMP_ERR = traceback.format_exc()
+
+
+class HerokuHelper():
+ def __init__(self, module):
+ self.module = module
+ self.check_lib()
+ self.api_key = module.params["api_key"]
+
+ def check_lib(self):
+ if not HAS_HEROKU:
+ self.module.fail_json(msg=missing_required_lib('heroku3'), exception=HEROKU_IMP_ERR)
+
+ @staticmethod
+ def heroku_argument_spec():
+ return dict(
+ api_key=dict(fallback=(env_fallback, ['HEROKU_API_KEY', 'TF_VAR_HEROKU_API_KEY']), type='str', no_log=True))
+
+ def get_heroku_client(self):
+ client = heroku3.from_key(self.api_key)
+
+ if not client.is_authenticated:
+ self.module.fail_json(msg='Heroku authentication failure, please check your API Key')
+
+ return client
diff --git a/ansible_collections/community/general/plugins/module_utils/hwc_utils.py b/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
new file mode 100644
index 000000000..a21cc8e48
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/hwc_utils.py
@@ -0,0 +1,442 @@
+# -*- coding: utf-8 -*-
+# Copyright (c), Google Inc, 2017
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import time
+import traceback
+
+THIRD_LIBRARIES_IMP_ERR = None
+try:
+ from keystoneauth1.adapter import Adapter
+ from keystoneauth1.identity import v3
+ from keystoneauth1 import session
+ HAS_THIRD_LIBRARIES = True
+except ImportError:
+ THIRD_LIBRARIES_IMP_ERR = traceback.format_exc()
+ HAS_THIRD_LIBRARIES = False
+
+from ansible.module_utils.basic import (AnsibleModule, env_fallback,
+ missing_required_lib)
+from ansible.module_utils.common.text.converters import to_text
+
+
+class HwcModuleException(Exception):
+ def __init__(self, message):
+ super(HwcModuleException, self).__init__()
+
+ self._message = message
+
+ def __str__(self):
+ return "[HwcClientException] message=%s" % self._message
+
+
+class HwcClientException(Exception):
+ def __init__(self, code, message):
+ super(HwcClientException, self).__init__()
+
+ self._code = code
+ self._message = message
+
+ def __str__(self):
+ msg = " code=%s," % str(self._code) if self._code != 0 else ""
+ return "[HwcClientException]%s message=%s" % (
+ msg, self._message)
+
+
+class HwcClientException404(HwcClientException):
+ def __init__(self, message):
+ super(HwcClientException404, self).__init__(404, message)
+
+ def __str__(self):
+ return "[HwcClientException404] message=%s" % self._message
+
+
+def session_method_wrapper(f):
+ def _wrap(self, url, *args, **kwargs):
+ try:
+ url = self.endpoint + url
+ r = f(self, url, *args, **kwargs)
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Sending request failed, error=%s" % ex)
+
+ result = None
+ if r.content:
+ try:
+ result = r.json()
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Parsing response to json failed, error: %s" % ex)
+
+ code = r.status_code
+ if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]:
+ msg = ""
+ for i in ['message', 'error.message']:
+ try:
+ msg = navigate_value(result, i)
+ break
+ except Exception:
+ pass
+ else:
+ msg = str(result)
+
+ if code == 404:
+ raise HwcClientException404(msg)
+
+ raise HwcClientException(code, msg)
+
+ return result
+
+ return _wrap
+
+
+class _ServiceClient(object):
+ def __init__(self, client, endpoint, product):
+ self._client = client
+ self._endpoint = endpoint
+ self._default_header = {
+ 'User-Agent': "Huawei-Ansible-MM-%s" % product,
+ 'Accept': 'application/json',
+ }
+
+ @property
+ def endpoint(self):
+ return self._endpoint
+
+ @endpoint.setter
+ def endpoint(self, e):
+ self._endpoint = e
+
+ @session_method_wrapper
+ def get(self, url, body=None, header=None, timeout=None):
+ return self._client.get(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def post(self, url, body=None, header=None, timeout=None):
+ return self._client.post(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def delete(self, url, body=None, header=None, timeout=None):
+ return self._client.delete(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ @session_method_wrapper
+ def put(self, url, body=None, header=None, timeout=None):
+ return self._client.put(url, json=body, timeout=timeout,
+ headers=self._header(header))
+
+ def _header(self, header):
+ if header and isinstance(header, dict):
+ for k, v in self._default_header.items():
+ if k not in header:
+ header[k] = v
+ else:
+ header = self._default_header
+
+ return header
+
+
+class Config(object):
+ def __init__(self, module, product):
+ self._project_client = None
+ self._domain_client = None
+ self._module = module
+ self._product = product
+ self._endpoints = {}
+
+ self._validate()
+ self._gen_provider_client()
+
+ @property
+ def module(self):
+ return self._module
+
+ def client(self, region, service_type, service_level):
+ c = self._project_client
+ if service_level == "domain":
+ c = self._domain_client
+
+ e = self._get_service_endpoint(c, service_type, region)
+
+ return _ServiceClient(c, e, self._product)
+
+ def _gen_provider_client(self):
+ m = self._module
+ p = {
+ "auth_url": m.params['identity_endpoint'],
+ "password": m.params['password'],
+ "username": m.params['user'],
+ "project_name": m.params['project'],
+ "user_domain_name": m.params['domain'],
+ "reauthenticate": True
+ }
+
+ self._project_client = Adapter(
+ session.Session(auth=v3.Password(**p)),
+ raise_exc=False)
+
+ p.pop("project_name")
+ self._domain_client = Adapter(
+ session.Session(auth=v3.Password(**p)),
+ raise_exc=False)
+
+ def _get_service_endpoint(self, client, service_type, region):
+ k = "%s.%s" % (service_type, region if region else "")
+
+ if k in self._endpoints:
+ return self._endpoints.get(k)
+
+ url = None
+ try:
+ url = client.get_endpoint(service_type=service_type,
+ region_name=region, interface="public")
+ except Exception as ex:
+ raise HwcClientException(
+ 0, "Getting endpoint failed, error=%s" % ex)
+
+ if url == "":
+ raise HwcClientException(
+ 0, "Can not find the enpoint for %s" % service_type)
+
+ if url[-1] != "/":
+ url += "/"
+
+ self._endpoints[k] = url
+ return url
+
+ def _validate(self):
+ if not HAS_THIRD_LIBRARIES:
+ self.module.fail_json(
+ msg=missing_required_lib('keystoneauth1'),
+ exception=THIRD_LIBRARIES_IMP_ERR)
+
+
+class HwcModule(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ arg_spec = kwargs.setdefault('argument_spec', {})
+
+ arg_spec.update(
+ dict(
+ identity_endpoint=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_IDENTITY_ENDPOINT']),
+ ),
+ user=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_USER']),
+ ),
+ password=dict(
+ required=True, type='str', no_log=True,
+ fallback=(env_fallback, ['ANSIBLE_HWC_PASSWORD']),
+ ),
+ domain=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_DOMAIN']),
+ ),
+ project=dict(
+ required=True, type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_PROJECT']),
+ ),
+ region=dict(
+ type='str',
+ fallback=(env_fallback, ['ANSIBLE_HWC_REGION']),
+ ),
+ id=dict(type='str')
+ )
+ )
+
+ super(HwcModule, self).__init__(*args, **kwargs)
+
+
+class _DictComparison(object):
+ ''' This class takes in two dictionaries `a` and `b`.
+ These are dictionaries of arbitrary depth, but made up of standard
+ Python types only.
+ This differ will compare all values in `a` to those in `b`.
+ If value in `a` is None, always returns True, indicating
+ this value is no need to compare.
+ Note: On all lists, order does matter.
+ '''
+
+ def __init__(self, request):
+ self.request = request
+
+ def __eq__(self, other):
+ return self._compare_dicts(self.request, other.request)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _compare_dicts(self, dict1, dict2):
+ if dict1 is None:
+ return True
+
+ if set(dict1.keys()) != set(dict2.keys()):
+ return False
+
+ for k in dict1:
+ if not self._compare_value(dict1.get(k), dict2.get(k)):
+ return False
+
+ return True
+
+ def _compare_lists(self, list1, list2):
+ """Takes in two lists and compares them."""
+ if list1 is None:
+ return True
+
+ if len(list1) != len(list2):
+ return False
+
+ for i in range(len(list1)):
+ if not self._compare_value(list1[i], list2[i]):
+ return False
+
+ return True
+
+ def _compare_value(self, value1, value2):
+ """
+ return: True: value1 is same as value2, otherwise False.
+ """
+ if value1 is None:
+ return True
+
+ if not (value1 and value2):
+ return (not value1) and (not value2)
+
+ # Can assume non-None types at this point.
+ if isinstance(value1, list) and isinstance(value2, list):
+ return self._compare_lists(value1, value2)
+
+ elif isinstance(value1, dict) and isinstance(value2, dict):
+ return self._compare_dicts(value1, value2)
+
+ # Always use to_text values to avoid unicode issues.
+ return (to_text(value1, errors='surrogate_or_strict') == to_text(
+ value2, errors='surrogate_or_strict'))
+
+
+def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3):
+ is_last_time = False
+ not_found_times = 0
+ wait = 0
+
+ time.sleep(delay)
+
+ end = time.time() + timeout
+ while not is_last_time:
+ if time.time() > end:
+ is_last_time = True
+
+ obj, status = refresh()
+
+ if obj is None:
+ not_found_times += 1
+
+ if not_found_times > 10:
+ raise HwcModuleException(
+ "not found the object for %d times" % not_found_times)
+ else:
+ not_found_times = 0
+
+ if status in target:
+ return obj
+
+ if pending and status not in pending:
+ raise HwcModuleException(
+ "unexpect status(%s) occurred" % status)
+
+ if not is_last_time:
+ wait *= 2
+ if wait < min_interval:
+ wait = min_interval
+ elif wait > 10:
+ wait = 10
+
+ time.sleep(wait)
+
+ raise HwcModuleException("asycn wait timeout after %d seconds" % timeout)
+
+
+def navigate_value(data, index, array_index=None):
+ if array_index and (not isinstance(array_index, dict)):
+ raise HwcModuleException("array_index must be dict")
+
+ d = data
+ for n in range(len(index)):
+ if d is None:
+ return None
+
+ if not isinstance(d, dict):
+ raise HwcModuleException(
+ "can't navigate value from a non-dict object")
+
+ i = index[n]
+ if i not in d:
+ raise HwcModuleException(
+ "navigate value failed: key(%s) is not exist in dict" % i)
+ d = d[i]
+
+ if not array_index:
+ continue
+
+ k = ".".join(index[: (n + 1)])
+ if k not in array_index:
+ continue
+
+ if d is None:
+ return None
+
+ if not isinstance(d, list):
+ raise HwcModuleException(
+ "can't navigate value from a non-list object")
+
+ j = array_index.get(k)
+ if j >= len(d):
+ raise HwcModuleException(
+ "navigate value failed: the index is out of list")
+ d = d[j]
+
+ return d
+
+
+def build_path(module, path, kv=None):
+ if kv is None:
+ kv = dict()
+
+ v = {}
+ for p in re.findall(r"{[^/]*}", path):
+ n = p[1:][:-1]
+
+ if n in kv:
+ v[n] = str(kv[n])
+
+ else:
+ if n in module.params:
+ v[n] = str(module.params.get(n))
+ else:
+ v[n] = ""
+
+ return path.format(**v)
+
+
+def get_region(module):
+ if module.params['region']:
+ return module.params['region']
+
+ return module.params['project'].split("_")[0]
+
+
+def is_empty_value(v):
+ return (not v)
+
+
+def are_different_dicts(dict1, dict2):
+ return _DictComparison(dict1) != _DictComparison(dict2)
diff --git a/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py b/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py
new file mode 100644
index 000000000..abbb57f52
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/ibm_sa_utils.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from functools import wraps
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.basic import missing_required_lib
+
+PYXCLI_INSTALLED = True
+PYXCLI_IMP_ERR = None
+try:
+ from pyxcli import client, errors
+except ImportError:
+ PYXCLI_IMP_ERR = traceback.format_exc()
+ PYXCLI_INSTALLED = False
+
+AVAILABLE_PYXCLI_FIELDS = ['pool', 'size', 'snapshot_size',
+ 'domain', 'perf_class', 'vol',
+ 'iscsi_chap_name', 'iscsi_chap_secret',
+ 'cluster', 'host', 'lun', 'override',
+ 'fcaddress', 'iscsi_name', 'max_dms',
+ 'max_cgs', 'ldap_id', 'max_mirrors',
+ 'max_pools', 'max_volumes', 'hard_capacity',
+ 'soft_capacity']
+
+
+def xcli_wrapper(func):
+ """ Catch xcli errors and return a proper message"""
+ @wraps(func)
+ def wrapper(module, *args, **kwargs):
+ try:
+ return func(module, *args, **kwargs)
+ except errors.CommandExecutionError as e:
+ module.fail_json(msg=to_native(e))
+ return wrapper
+
+
+@xcli_wrapper
+def connect_ssl(module):
+ endpoints = module.params['endpoints']
+ username = module.params['username']
+ password = module.params['password']
+ if not (username and password and endpoints):
+ module.fail_json(
+ msg="Username, password or endpoints arguments "
+ "are missing from the module arguments")
+
+ try:
+ return client.XCLIClient.connect_multiendpoint_ssl(username,
+ password,
+ endpoints)
+ except errors.CommandFailedConnectionError as e:
+ module.fail_json(
+ msg="Connection with Spectrum Accelerate system has "
+ "failed: {[0]}.".format(to_native(e)))
+
+
+def spectrum_accelerate_spec():
+ """ Return arguments spec for AnsibleModule """
+ return dict(
+ endpoints=dict(required=True),
+ username=dict(required=True),
+ password=dict(no_log=True, required=True),
+ )
+
+
+@xcli_wrapper
+def execute_pyxcli_command(module, xcli_command, xcli_client):
+ pyxcli_args = build_pyxcli_command(module.params)
+ getattr(xcli_client.cmd, xcli_command)(**(pyxcli_args))
+ return True
+
+
+def build_pyxcli_command(fields):
+ """ Builds the args for pyxcli using the exact args from ansible"""
+ pyxcli_args = {}
+ for field in fields:
+ if not fields[field]:
+ continue
+ if field in AVAILABLE_PYXCLI_FIELDS and fields[field] != '':
+ pyxcli_args[field] = fields[field]
+ return pyxcli_args
+
+
+def is_pyxcli_installed(module):
+ if not PYXCLI_INSTALLED:
+ module.fail_json(msg=missing_required_lib('pyxcli'),
+ exception=PYXCLI_IMP_ERR)
diff --git a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
new file mode 100644
index 000000000..7e421f3bb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak.py
@@ -0,0 +1,2390 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# BSD 2-Clause license (see LICENSES/BSD-2-Clause.txt)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+import traceback
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.common.text.converters import to_native, to_text
+
+URL_REALM_INFO = "{url}/realms/{realm}"
+URL_REALMS = "{url}/admin/realms"
+URL_REALM = "{url}/admin/realms/{realm}"
+
+URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token"
+URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}"
+URL_CLIENTS = "{url}/admin/realms/{realm}/clients"
+
+URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles"
+URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}"
+URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites"
+
+URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles"
+URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}"
+URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm"
+URL_REALM_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/available"
+URL_REALM_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm/composite"
+URL_REALM_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/roles/{name}/composites"
+
+URL_ROLES_BY_ID = "{url}/admin/realms/{realm}/roles-by-id/{id}"
+URL_ROLES_BY_ID_COMPOSITES_CLIENTS = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites/clients/{cid}"
+URL_ROLES_BY_ID_COMPOSITES = "{url}/admin/realms/{realm}/roles-by-id/{id}/composites"
+
+URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}"
+URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates"
+URL_GROUPS = "{url}/admin/realms/{realm}/groups"
+URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}"
+URL_GROUP_CHILDREN = "{url}/admin/realms/{realm}/groups/{groupid}/children"
+
+URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes"
+URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}"
+URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models"
+URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}"
+
+URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes"
+URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}"
+URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes"
+URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}"
+
+URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes"
+URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}"
+URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes"
+URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}"
+
+URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}"
+URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available"
+URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite"
+
+URL_USERS = "{url}/admin/realms/{realm}/users"
+URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user"
+URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}"
+URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available"
+URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite"
+
+URL_CLIENTSECRET = "{url}/admin/realms/{realm}/clients/{id}/client-secret"
+
+URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows"
+URL_AUTHENTICATION_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{id}"
+URL_AUTHENTICATION_FLOW_COPY = "{url}/admin/realms/{realm}/authentication/flows/{copyfrom}/copy"
+URL_AUTHENTICATION_FLOW_EXECUTIONS = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions"
+URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/execution"
+URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW = "{url}/admin/realms/{realm}/authentication/flows/{flowalias}/executions/flow"
+URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication/executions/{id}/config"
+URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority"
+URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority"
+URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}"
+
+URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances"
+URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}"
+URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers"
+URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}"
+
+URL_COMPONENTS = "{url}/admin/realms/{realm}/components"
+URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}"
+
+URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}"
+URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope"
+
+
+def keycloak_argument_spec():
+ """
+ Returns argument_spec of options common to keycloak_*-modules
+
+ :return: argument_spec dict
+ """
+ return dict(
+ auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
+ auth_client_id=dict(type='str', default='admin-cli'),
+ auth_realm=dict(type='str'),
+ auth_client_secret=dict(type='str', default=None, no_log=True),
+ auth_username=dict(type='str', aliases=['username']),
+ auth_password=dict(type='str', aliases=['password'], no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ connection_timeout=dict(type='int', default=10),
+ token=dict(type='str', no_log=True),
+ http_agent=dict(type='str', default='Ansible'),
+ )
+
+
+def camel(words):
+ return words.split('_')[0] + ''.join(x.capitalize() or '_' for x in words.split('_')[1:])
+
+
+class KeycloakError(Exception):
+ pass
+
+
+def get_token(module_params):
+ """ Obtains connection header with token for the authentication,
+ token already given or obtained from credentials
+ :param module_params: parameters of the module
+ :return: connection header
+ """
+ token = module_params.get('token')
+ base_url = module_params.get('auth_keycloak_url')
+ http_agent = module_params.get('http_agent')
+
+ if not base_url.lower().startswith(('http', 'https')):
+ raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url)
+
+ if token is None:
+ base_url = module_params.get('auth_keycloak_url')
+ validate_certs = module_params.get('validate_certs')
+ auth_realm = module_params.get('auth_realm')
+ client_id = module_params.get('auth_client_id')
+ auth_username = module_params.get('auth_username')
+ auth_password = module_params.get('auth_password')
+ client_secret = module_params.get('auth_client_secret')
+ connection_timeout = module_params.get('connection_timeout')
+ auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm)
+ temp_payload = {
+ 'grant_type': 'password',
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'username': auth_username,
+ 'password': auth_password,
+ }
+ # Remove empty items, for instance missing client_secret
+ payload = dict(
+ (k, v) for k, v in temp_payload.items() if v is not None)
+ try:
+ r = json.loads(to_native(open_url(auth_url, method='POST',
+ validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout,
+ data=urlencode(payload)).read()))
+ except ValueError as e:
+ raise KeycloakError(
+ 'API returned invalid JSON when trying to obtain access token from %s: %s'
+ % (auth_url, str(e)))
+ except Exception as e:
+ raise KeycloakError('Could not obtain access token from %s: %s'
+ % (auth_url, str(e)))
+
+ try:
+ token = r['access_token']
+ except KeyError:
+ raise KeycloakError(
+ 'Could not obtain access token from %s' % auth_url)
+ return {
+ 'Authorization': 'Bearer ' + token,
+ 'Content-Type': 'application/json'
+ }
+
+
+def is_struct_included(struct1, struct2, exclude=None):
+ """
+ This function compare if the first parameter structure is included in the second.
+ The function use every elements of struct1 and validates they are present in the struct2 structure.
+ The two structure does not need to be equals for that function to return true.
+ Each elements are compared recursively.
+ :param struct1:
+ type:
+ dict for the initial call, can be dict, list, bool, int or str for recursive calls
+ description:
+ reference structure
+ :param struct2:
+ type:
+ dict for the initial call, can be dict, list, bool, int or str for recursive calls
+ description:
+ structure to compare with first parameter.
+ :param exclude:
+ type:
+ list
+ description:
+ Key to exclude from the comparison.
+ default: None
+ :return:
+ type:
+ bool
+ description:
+ Return True if all element of dict 1 are present in dict 2, return false otherwise.
+ """
+ if isinstance(struct1, list) and isinstance(struct2, list):
+ if not struct1 and not struct2:
+ return True
+ for item1 in struct1:
+ if isinstance(item1, (list, dict)):
+ for item2 in struct2:
+ if is_struct_included(item1, item2, exclude):
+ break
+ else:
+ return False
+ else:
+ if item1 not in struct2:
+ return False
+ return True
+ elif isinstance(struct1, dict) and isinstance(struct2, dict):
+ if not struct1 and not struct2:
+ return True
+ try:
+ for key in struct1:
+ if not (exclude and key in exclude):
+ if not is_struct_included(struct1[key], struct2[key], exclude):
+ return False
+ except KeyError:
+ return False
+ return True
+ elif isinstance(struct1, bool) and isinstance(struct2, bool):
+ return struct1 == struct2
+ else:
+ return to_text(struct1, 'utf-8') == to_text(struct2, 'utf-8')
+
+
+class KeycloakAPI(object):
+ """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which
+ is obtained through OpenID connect
+ """
+ def __init__(self, module, connection_header):
+ self.module = module
+ self.baseurl = self.module.params.get('auth_keycloak_url')
+ self.validate_certs = self.module.params.get('validate_certs')
+ self.connection_timeout = self.module.params.get('connection_timeout')
+ self.restheaders = connection_header
+ self.http_agent = self.module.params.get('http_agent')
+
+ def get_realm_info_by_id(self, realm='master'):
+ """ Obtain realm public info by id
+
+ :param realm: realm id
+ :return: dict of real, representation or None if none matching exist
+ """
+ realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(realm_info_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+
+ def get_realm_by_id(self, realm='master'):
+ """ Obtain realm representation by id
+
+ :param realm: realm id
+ :return: dict of real, representation or None if none matching exist
+ """
+ realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(realm_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+
+ def update_realm(self, realmrep, realm="master"):
+ """ Update an existing realm
+ :param realmrep: corresponding (partial/full) realm representation with updates
+ :param realm: realm to be updated in Keycloak
+ :return: HTTPResponse object on success
+ """
+ realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(realm_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(realmrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+
+ def create_realm(self, realmrep):
+ """ Create a realm in keycloak
+ :param realmrep: Realm representation of realm to be created.
+ :return: HTTPResponse object on success
+ """
+ realm_url = URL_REALMS.format(url=self.baseurl)
+
+ try:
+ return open_url(realm_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(realmrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)),
+ exception=traceback.format_exc())
+
+ def delete_realm(self, realm="master"):
+ """ Delete a realm from Keycloak
+
+ :param realm: realm to be deleted
+ :return: HTTPResponse object on success
+ """
+ realm_url = URL_REALM.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(realm_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)),
+ exception=traceback.format_exc())
+
+ def get_clients(self, realm='master', filter=None):
+ """ Obtains client representations for clients in a realm
+
+ :param realm: realm to be queried
+ :param filter: if defined, only the client with clientId specified in the filter is returned
+ :return: list of dicts of client representations
+ """
+ clientlist_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
+ if filter is not None:
+ clientlist_url += '?clientId=%s' % filter
+
+ try:
+ return json.loads(to_native(open_url(clientlist_url, http_agent=self.http_agent, method='GET', headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_client_by_clientid(self, client_id, realm='master'):
+ """ Get client representation by clientId
+ :param client_id: The clientId to be queried
+ :param realm: realm from which to obtain the client representation
+ :return: dict with a client representation or None if none matching exist
+ """
+ r = self.get_clients(realm=realm, filter=client_id)
+ if len(r) > 0:
+ return r[0]
+ else:
+ return None
+
+ def get_client_by_id(self, id, realm='master'):
+ """ Obtain client representation by id
+
+ :param id: id (not clientId) of client to be queried
+ :param realm: client from this realm
+ :return: dict of client representation or None if none matching exist
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return json.loads(to_native(open_url(client_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain client %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_id(self, client_id, realm='master'):
+ """ Obtain id of client by client_id
+
+ :param client_id: client_id of client to be queried
+ :param realm: client template from this realm
+ :return: id of client (usually a UUID)
+ """
+ result = self.get_client_by_clientid(client_id, realm)
+ if isinstance(result, dict) and 'id' in result:
+ return result['id']
+ else:
+ return None
+
+ def update_client(self, id, clientrep, realm="master"):
+ """ Update an existing client
+ :param id: id (not clientId) of client to be updated in Keycloak
+ :param clientrep: corresponding (partial/full) client representation with updates
+ :param realm: realm the client is in
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(client_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update client %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def create_client(self, clientrep, realm="master"):
+ """ Create a client in keycloak
+ :param clientrep: Client representation of client to be created. Must at least contain field clientId.
+ :param realm: realm for client to be created.
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(client_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(clientrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create client %s in realm %s: %s'
+ % (clientrep['clientId'], realm, str(e)))
+
+ def delete_client(self, id, realm="master"):
+ """ Delete a client from Keycloak
+
+ :param id: id (not clientId) of client to be deleted
+ :param realm: realm of client to be deleted
+ :return: HTTPResponse object on success
+ """
+ client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(client_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete client %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_roles_by_id(self, cid, realm="master"):
+ """ Fetch the roles of the a client on the Keycloak server.
+
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The rollemappings of specified group and client of the realm (default "master").
+ """
+ client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s"
+ % (cid, realm, str(e)))
+
+ def get_client_role_id_by_name(self, cid, name, realm="master"):
+ """ Get the role ID of a client.
+
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param name: Name of the role.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The ID of the role, None if not found.
+ """
+ rolemappings = self.get_client_roles_by_id(cid, realm=realm)
+ for role in rolemappings:
+ if name == role['name']:
+ return role['id']
+ return None
+
+ def get_client_group_rolemapping_by_id(self, gid, cid, rid, realm='master'):
+ """ Obtain client representation by id
+
+ :param gid: ID of the group from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param rid: ID of the role.
+ :param realm: client from this realm
+ :return: dict of rolemapping representation or None if none matching exist
+ """
+ rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
+ try:
+ rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ for role in rolemappings:
+ if rid == role['id']:
+ return role
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
+ return None
+
+ def get_client_group_available_rolemappings(self, gid, cid, realm="master"):
+ """ Fetch the available role of a client in a specified goup on the Keycloak server.
+
+ :param gid: ID of the group from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The rollemappings of specified group and client of the realm (default "master").
+ """
+ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
+ try:
+ return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
+
+ def get_client_group_composite_rolemappings(self, gid, cid, realm="master"):
+ """ Fetch the composite role of a client in a specified group on the Keycloak server.
+
+ :param gid: ID of the group from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The rollemappings of specified group and client of the realm (default "master").
+ """
+ composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid)
+ try:
+ return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
+
+ def get_role_by_id(self, rid, realm="master"):
+ """ Fetch a role by its id on the Keycloak server.
+
+ :param rid: ID of the role.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The role.
+ """
+ client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid)
+ try:
+ return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch role for id %s in realm %s: %s"
+ % (rid, realm, str(e)))
+
+ def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"):
+ """ Fetch a role by its id on the Keycloak server.
+
+ :param rid: ID of the composite role.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The role.
+ """
+ client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid)
+ try:
+ return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch role for id %s and cid %s in realm %s: %s"
+ % (rid, cid, realm, str(e)))
+
+ def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"):
+ """ Assign roles to composite role
+
+ :param rid: ID of the composite role.
+ :param roles_rep: Representation of the roles to assign.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: None.
+ """
+ available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid)
+ try:
+ open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not assign roles to composite role %s and realm %s: %s"
+ % (rid, realm, str(e)))
+
+ def add_group_rolemapping(self, gid, cid, role_rep, realm="master"):
+ """ Fetch the composite role of a client in a specified goup on the Keycloak server.
+
+ :param gid: ID of the group from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param role_rep: Representation of the role to assign.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: None.
+ """
+ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
+ try:
+ open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
+
+ def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"):
+ """ Delete the rolemapping of a client in a specified group on the Keycloak server.
+
+ :param gid: ID of the group from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param role_rep: Representation of the role to assign.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: None.
+ """
+ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid)
+ try:
+ open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s"
+ % (cid, gid, realm, str(e)))
+
+ def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'):
+ """ Obtain client representation by id
+
+ :param uid: ID of the user from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param rid: ID of the role.
+ :param realm: client from this realm
+ :return: dict of rolemapping representation or None if none matching exist
+ """
+ rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
+ try:
+ rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ for role in rolemappings:
+ if rid == role['id']:
+ return role
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s"
+ % (cid, uid, realm, str(e)))
+ return None
+
+ def get_client_user_available_rolemappings(self, uid, cid, realm="master"):
+ """ Fetch the available role of a client for a specified user on the Keycloak server.
+
+ :param uid: ID of the user from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The effective rollemappings of specified client and user of the realm (default "master").
+ """
+ available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
+ try:
+ return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s"
+ % (cid, uid, realm, str(e)))
+
+ def get_client_user_composite_rolemappings(self, uid, cid, realm="master"):
+ """ Fetch the composite role of a client for a specified user on the Keycloak server.
+
+ :param uid: ID of the user from which to obtain the rolemappings.
+ :param cid: ID of the client from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The rollemappings of specified group and client of the realm (default "master").
+ """
+ composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid)
+ try:
+ return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
+ % (uid, realm, str(e)))
+
+ def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'):
+ """ Obtain role representation by id
+
+ :param uid: ID of the user from which to obtain the rolemappings.
+ :param rid: ID of the role.
+ :param realm: client from this realm
+ :return: dict of rolemapping representation or None if none matching exist
+ """
+ rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
+ try:
+ rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ for role in rolemappings:
+ if rid == role['id']:
+ return role
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch rolemappings for user %s, realm %s: %s"
+ % (uid, realm, str(e)))
+ return None
+
+ def get_realm_user_available_rolemappings(self, uid, realm="master"):
+ """ Fetch the available role of a realm for a specified user on the Keycloak server.
+
+ :param uid: ID of the user from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The rollemappings of specified group and client of the realm (default "master").
+ """
+ available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid)
+ try:
+ return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s"
+ % (uid, realm, str(e)))
+
+ def get_realm_user_composite_rolemappings(self, uid, realm="master"):
+ """ Fetch the composite role of a realm for a specified user on the Keycloak server.
+
+ :param uid: ID of the user from which to obtain the rolemappings.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: The effective rollemappings of specified client and user of the realm (default "master").
+ """
+ composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid)
+ try:
+ return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch effective rolemappings for user %s, realm %s: %s"
+ % (uid, realm, str(e)))
+
+ def get_user_by_username(self, username, realm="master"):
+ """ Fetch a keycloak user within a realm based on its username.
+
+ If the user does not exist, None is returned.
+ :param username: Username of the user to fetch.
+ :param realm: Realm in which the user resides; default 'master'
+ """
+ users_url = URL_USERS.format(url=self.baseurl, realm=realm)
+ users_url += '?username=%s&exact=true' % username
+ try:
+ userrep = None
+ users = json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ for user in users:
+ if user['username'] == username:
+ userrep = user
+ break
+ return userrep
+
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s'
+ % (realm, username, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain the user for realm %s and username %s: %s'
+ % (realm, username, str(e)))
+
+ def get_service_account_user_by_client_id(self, client_id, realm="master"):
+ """ Fetch a keycloak service account user within a realm based on its client_id.
+
+ If the user does not exist, None is returned.
+ :param client_id: clientId of the service account user to fetch.
+ :param realm: Realm in which the user resides; default 'master'
+ """
+ cid = self.get_client_id(client_id, realm=realm)
+
+ service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return json.loads(to_native(open_url(service_account_user_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s'
+ % (realm, client_id, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain the service-account-user for realm %s and client_id %s: %s'
+ % (realm, client_id, str(e)))
+
+ def add_user_rolemapping(self, uid, cid, role_rep, realm="master"):
+ """ Assign a realm or client role to a specified user on the Keycloak server.
+
+ :param uid: ID of the user roles are assigned to.
+ :param cid: ID of the client from which to obtain the rolemappings. If empty, roles are from the realm
+ :param role_rep: Representation of the role to assign.
+ :param realm: Realm from which to obtain the rolemappings.
+ :return: None.
+ """
+ if cid is None:
+ user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
+ try:
+ open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not map roles to userId %s for realm %s and roles %s: %s"
+ % (uid, realm, json.dumps(role_rep), str(e)))
+ else:
+ user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
+ try:
+ open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s"
+ % (cid, uid, realm, json.dumps(role_rep), str(e)))
+
+ def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"):
+ """ Delete the rolemapping of a client in a specified user on the Keycloak server.
+
+ :param uid: ID of the user from which to remove the rolemappings.
+ :param cid: ID of the client from which to remove the rolemappings.
+ :param role_rep: Representation of the role to remove from rolemappings.
+ :param realm: Realm from which to remove the rolemappings.
+ :return: None.
+ """
+ if cid is None:
+ user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid)
+ try:
+ open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not remove roles %s from userId %s, realm %s: %s"
+ % (json.dumps(role_rep), uid, realm, str(e)))
+ else:
+ user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid)
+ try:
+ open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep),
+ validate_certs=self.validate_certs, timeout=self.connection_timeout)
+ except Exception as e:
+ self.module.fail_json(msg="Could not remove roles %s for client %s from userId %s, realm %s: %s"
+ % (json.dumps(role_rep), cid, uid, realm, str(e)))
+
+ def get_client_templates(self, realm='master'):
+ """ Obtains client template representations for client templates in a realm
+
+ :param realm: realm to be queried
+ :return: list of dicts of client representations
+ """
+ url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_client_template_by_id(self, id, realm='master'):
+ """ Obtain client template representation by id
+
+ :param id: id (not name) of client template to be queried
+ :param realm: client template from this realm
+ :return: dict of client template representation or None if none matching exist
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm)
+
+ try:
+ return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_client_template_by_name(self, name, realm='master'):
+ """ Obtain client template representation by name
+
+ :param name: name of client template to be queried
+ :param realm: client template from this realm
+ :return: dict of client template representation or None if none matching exist
+ """
+ result = self.get_client_templates(realm)
+ if isinstance(result, list):
+ result = [x for x in result if x['name'] == name]
+ if len(result) > 0:
+ return result[0]
+ return None
+
+ def get_client_template_id(self, name, realm='master'):
+ """ Obtain client template id by name
+
+ :param name: name of client template to be queried
+ :param realm: client template from this realm
+ :return: client template id (usually a UUID)
+ """
+ result = self.get_client_template_by_name(name, realm)
+ if isinstance(result, dict) and 'id' in result:
+ return result['id']
+ else:
+ return None
+
+ def update_client_template(self, id, clienttrep, realm="master"):
+ """ Update an existing client template
+ :param id: id (not name) of client template to be updated in Keycloak
+ :param clienttrep: corresponding (partial/full) client template representation with updates
+ :param realm: realm the client template is in
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update client template %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def create_client_template(self, clienttrep, realm="master"):
+ """ Create a client in keycloak
+ :param clienttrep: Client template representation of client template to be created. Must at least contain field name
+ :param realm: realm for client template to be created in
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm)
+
+ try:
+ return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(clienttrep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create client template %s in realm %s: %s'
+ % (clienttrep['clientId'], realm, str(e)))
+
+ def delete_client_template(self, id, realm="master"):
+ """ Delete a client template from Keycloak
+
+ :param id: id (not name) of client to be deleted
+ :param realm: realm of client template to be deleted
+ :return: HTTPResponse object on success
+ """
+ url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete client template %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_clientscopes(self, realm="master"):
+ """ Fetch the name and ID of all clientscopes on the Keycloak server.
+
+ To fetch the full data of the group, make a subsequent call to
+ get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return.
+
+ :param realm: Realm in which the clientscope resides; default 'master'.
+ :return The clientscopes of this realm (default "master")
+ """
+ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s"
+ % (realm, str(e)))
+
+ def get_clientscope_by_clientscopeid(self, cid, realm="master"):
+ """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID.
+
+ If the clientscope does not exist, None is returned.
+
+ gid is a UUID provided by the Keycloak API
+ :param cid: UUID of the clientscope to be returned
+ :param realm: Realm in which the clientscope resides; default 'master'.
+ """
+ clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return json.loads(to_native(open_url(clientscope_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s"
+ % (cid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s"
+ % (cid, realm, str(e)))
+
+ def get_clientscope_by_name(self, name, realm="master"):
+ """ Fetch a keycloak clientscope within a realm based on its name.
+
+ The Keycloak API does not allow filtering of the clientscopes resource by name.
+ As a result, this method first retrieves the entire list of clientscopes - name and ID -
+ then performs a second query to fetch the group.
+
+ If the clientscope does not exist, None is returned.
+ :param name: Name of the clientscope to fetch.
+ :param realm: Realm in which the clientscope resides; default 'master'
+ """
+ try:
+ all_clientscopes = self.get_clientscopes(realm=realm)
+
+ for clientscope in all_clientscopes:
+ if clientscope['name'] == name:
+ return self.get_clientscope_by_clientscopeid(clientscope['id'], realm=realm)
+
+ return None
+
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s"
+ % (name, realm, str(e)))
+
+ def create_clientscope(self, clientscoperep, realm="master"):
+ """ Create a Keycloak clientscope.
+
+ :param clientscoperep: a ClientScopeRepresentation of the clientscope to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm)
+ try:
+ return open_url(clientscopes_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s"
+ % (clientscoperep['name'], realm, str(e)))
+
+ def update_clientscope(self, clientscoperep, realm="master"):
+ """ Update an existing clientscope.
+
+ :param grouprep: A GroupRepresentation of the updated group.
+ :return HTTPResponse object on success
+ """
+ clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id'])
+
+ try:
+ return open_url(clientscope_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(clientscoperep), validate_certs=self.validate_certs)
+
+ except Exception as e:
+ self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s'
+ % (clientscoperep['name'], realm, str(e)))
+
+ def delete_clientscope(self, name=None, cid=None, realm="master"):
+ """ Delete a clientscope. One of name or cid must be provided.
+
+ Providing the clientscope ID is preferred as it avoids a second lookup to
+ convert a clientscope name to an ID.
+
+ :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
+ :param cid: The ID of the clientscope (preferred to name).
+ :param realm: The realm in which this group resides, default "master".
+ """
+
+ if cid is None and name is None:
+ # prefer an exception since this is almost certainly a programming error in the module itself.
+ raise Exception("Unable to delete group - one of group ID or name must be provided.")
+
+ # only lookup the name if cid isn't provided.
+ # in the case that both are provided, prefer the ID, since it's one
+ # less lookup.
+ if cid is None and name is not None:
+ for clientscope in self.get_clientscopes(realm=realm):
+ if clientscope['name'] == name:
+ cid = clientscope['id']
+ break
+
+ # if the group doesn't exist - no problem, nothing to delete.
+ if cid is None:
+ return None
+
+ # should have a good cid by here.
+ clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl)
+ try:
+ return open_url(clientscope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+
+ except Exception as e:
+ self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e)))
+
+ def get_clientscope_protocolmappers(self, cid, realm="master"):
+ """ Fetch the name and ID of all clientscopes on the Keycloak server.
+
+ To fetch the full data of the group, make a subsequent call to
+ get_clientscope_by_clientscopeid, passing in the ID of the group you wish to return.
+
+ :param cid: id of clientscope (not name).
+ :param realm: Realm in which the clientscope resides; default 'master'.
+ :return The protocolmappers of this realm (default "master")
+ """
+ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(protocolmappers_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s"
+ % (realm, str(e)))
+
+ def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"):
+ """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID.
+
+ If the clientscope does not exist, None is returned.
+
+ gid is a UUID provided by the Keycloak API
+
+ :param cid: UUID of the protocolmapper to be returned
+ :param cid: UUID of the clientscope to be returned
+ :param realm: Realm in which the clientscope resides; default 'master'.
+ """
+ protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid)
+ try:
+ return json.loads(to_native(open_url(protocolmapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
+ % (pid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
+ % (cid, realm, str(e)))
+
+ def get_clientscope_protocolmapper_by_name(self, cid, name, realm="master"):
+ """ Fetch a keycloak clientscope within a realm based on its name.
+
+ The Keycloak API does not allow filtering of the clientscopes resource by name.
+ As a result, this method first retrieves the entire list of clientscopes - name and ID -
+ then performs a second query to fetch the group.
+
+ If the clientscope does not exist, None is returned.
+ :param cid: Id of the clientscope (not name).
+ :param name: Name of the protocolmapper to fetch.
+ :param realm: Realm in which the clientscope resides; default 'master'
+ """
+ try:
+ all_protocolmappers = self.get_clientscope_protocolmappers(cid, realm=realm)
+
+ for protocolmapper in all_protocolmappers:
+ if protocolmapper['name'] == name:
+ return self.get_clientscope_protocolmapper_by_protocolmapperid(protocolmapper['id'], cid, realm=realm)
+
+ return None
+
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s"
+ % (name, realm, str(e)))
+
+ def create_clientscope_protocolmapper(self, cid, mapper_rep, realm="master"):
+ """ Create a Keycloak clientscope protocolmapper.
+
+ :param cid: Id of the clientscope.
+ :param mapper_rep: a ProtocolMapperRepresentation of the protocolmapper to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm)
+ try:
+ return open_url(protocolmappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s"
+ % (mapper_rep['name'], realm, str(e)))
+
+ def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"):
+ """ Update an existing clientscope.
+
+ :param cid: Id of the clientscope.
+ :param mapper_rep: A ProtocolMapperRepresentation of the updated protocolmapper.
+ :return HTTPResponse object on success
+ """
+ protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id'])
+
+ try:
+ return open_url(protocolmapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(mapper_rep), validate_certs=self.validate_certs)
+
+ except Exception as e:
+ self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s'
+ % (mapper_rep, realm, str(e)))
+
+ def get_default_clientscopes(self, realm, client_id=None):
+ """Fetch the name and ID of all clientscopes on the Keycloak server.
+
+ To fetch the full data of the client scope, make a subsequent call to
+ get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
+
+ :param realm: Realm in which the clientscope resides.
+ :param client_id: The client in which the clientscope resides.
+ :return The default clientscopes of this realm or client
+ """
+ url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES
+ return self._get_clientscopes_of_type(realm, url, 'default', client_id)
+
+ def get_optional_clientscopes(self, realm, client_id=None):
+ """Fetch the name and ID of all clientscopes on the Keycloak server.
+
+ To fetch the full data of the client scope, make a subsequent call to
+ get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
+
+ :param realm: Realm in which the clientscope resides.
+ :param client_id: The client in which the clientscope resides.
+ :return The optinal clientscopes of this realm or client
+ """
+ url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES
+ return self._get_clientscopes_of_type(realm, url, 'optional', client_id)
+
+ def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None):
+ """Fetch the name and ID of all clientscopes on the Keycloak server.
+
+ To fetch the full data of the client scope, make a subsequent call to
+ get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return.
+
+ :param realm: Realm in which the clientscope resides.
+ :param url_template the template for the right type
+ :param scope_type this can be either optinal or default
+ :param client_id: The client in which the clientscope resides.
+ :return The clientscopes of the specified type of this realm
+ """
+ if client_id is None:
+ clientscopes_url = url_template.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e)))
+ else:
+ cid = self.get_client_id(client_id=client_id, realm=realm)
+ clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid)
+ try:
+ return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout, validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url))
+
+ def _decide_url_type_clientscope(self, client_id=None, scope_type="default"):
+ """Decides which url to use.
+ :param scope_type this can be either optinal or default
+ :param client_id: The client in which the clientscope resides.
+ """
+ if client_id is None:
+ if scope_type == "default":
+ return URL_DEFAULT_CLIENTSCOPE
+ if scope_type == "optional":
+ return URL_OPTIONAL_CLIENTSCOPE
+ else:
+ if scope_type == "default":
+ return URL_CLIENT_DEFAULT_CLIENTSCOPE
+ if scope_type == "optional":
+ return URL_CLIENT_OPTIONAL_CLIENTSCOPE
+
+ def add_default_clientscope(self, id, realm="master", client_id=None):
+ """Add a client scope as default either on realm or client level.
+
+ :param id: Client scope Id.
+ :param realm: Realm in which the clientscope resides.
+ :param client_id: The client in which the clientscope resides.
+ """
+ self._action_type_clientscope(id, client_id, "default", realm, 'add')
+
+ def add_optional_clientscope(self, id, realm="master", client_id=None):
+ """Add a client scope as optional either on realm or client level.
+
+ :param id: Client scope Id.
+ :param realm: Realm in which the clientscope resides.
+ :param client_id: The client in which the clientscope resides.
+ """
+ self._action_type_clientscope(id, client_id, "optional", realm, 'add')
+
+ def delete_default_clientscope(self, id, realm="master", client_id=None):
+ """Remove a client scope as default either on realm or client level.
+
+ :param id: Client scope Id.
+ :param realm: Realm in which the clientscope resides.
+ :param client_id: The client in which the clientscope resides.
+ """
+ self._action_type_clientscope(id, client_id, "default", realm, 'delete')
+
+ def delete_optional_clientscope(self, id, realm="master", client_id=None):
+ """Remove a client scope as optional either on realm or client level.
+
+ :param id: Client scope Id.
+ :param realm: Realm in which the clientscope resides.
+ :param client_id: The client in which the clientscope resides.
+ """
+ self._action_type_clientscope(id, client_id, "optional", realm, 'delete')
+
+ def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'):
+ """ Delete or add a clientscope of type.
+ :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID.
+ :param client_id: The ID of the clientscope (preferred to name).
+ :param scope_type 'default' or 'optional'
+ :param realm: The realm in which this group resides, default "master".
+ """
+ cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm)
+ # should have a good cid by here.
+ clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl)
+ try:
+ method = 'PUT' if action == "add" else 'DELETE'
+ return open_url(clientscope_type_url, method=method, http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+
+ except Exception as e:
+ place = 'realm' if client_id is None else 'client ' + client_id
+ self.module.fail_json(msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e)))
+
+ def create_clientsecret(self, id, realm="master"):
+ """ Generate a new client secret by id
+
+ :param id: id (not clientId) of client to be queried
+ :param realm: client from this realm
+ :return: dict of credential representation
+ """
+ clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return json.loads(to_native(open_url(clientsecret_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_clientsecret(self, id, realm="master"):
+ """ Obtain client secret by id
+
+ :param id: id (not clientId) of client to be queried
+ :param realm: client from this realm
+ :return: dict of credential representation
+ """
+ clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return json.loads(to_native(open_url(clientsecret_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ % (id, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s'
+ % (id, realm, str(e)))
+
+ def get_groups(self, realm="master"):
+ """ Fetch the name and ID of all groups on the Keycloak server.
+
+ To fetch the full data of the group, make a subsequent call to
+ get_group_by_groupid, passing in the ID of the group you wish to return.
+
+ :param realm: Return the groups of this realm (default "master").
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s"
+ % (realm, str(e)))
+
+ def get_group_by_groupid(self, gid, realm="master"):
+ """ Fetch a keycloak group from the provided realm using the group's unique ID.
+
+ If the group does not exist, None is returned.
+
+ gid is a UUID provided by the Keycloak API
+ :param gid: UUID of the group to be returned
+ :param realm: Realm in which the group resides; default 'master'.
+ """
+ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid)
+ try:
+ return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (gid, realm, str(e)))
+
+ def get_group_by_name(self, name, realm="master", parents=None):
+ """ Fetch a keycloak group within a realm based on its name.
+
+ The Keycloak API does not allow filtering of the Groups resource by name.
+ As a result, this method first retrieves the entire list of groups - name and ID -
+ then performs a second query to fetch the group.
+
+ If the group does not exist, None is returned.
+ :param name: Name of the group to fetch.
+ :param realm: Realm in which the group resides; default 'master'
+ :param parents: Optional list of parents when group to look for is a subgroup
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ if parents:
+ parent = self.get_subgroup_direct_parent(parents, realm)
+
+ if not parent:
+ return None
+
+ all_groups = parent['subGroups']
+ else:
+ all_groups = self.get_groups(realm=realm)
+
+ for group in all_groups:
+ if group['name'] == name:
+ return self.get_group_by_groupid(group['id'], realm=realm)
+
+ return None
+
+ except Exception as e:
+ self.module.fail_json(msg="Could not fetch group %s in realm %s: %s"
+ % (name, realm, str(e)))
+
+ def _get_normed_group_parent(self, parent):
+ """ Converts parent dict information into a more easy to use form.
+
+ :param parent: parent describing dict
+ """
+ if parent['id']:
+ return (parent['id'], True)
+
+ return (parent['name'], False)
+
+ def get_subgroup_by_chain(self, name_chain, realm="master"):
+ """ Access a subgroup API object by walking down a given name/id chain.
+
+ Groups can be given either as by name or by ID, the first element
+ must either be a toplvl group or given as ID, all parents must exist.
+
+ If the group cannot be found, None is returned.
+ :param name_chain: Topdown ordered list of subgroup parent (ids or names) + its own name at the end
+ :param realm: Realm in which the group resides; default 'master'
+ """
+ cp = name_chain[0]
+
+ # for 1st parent in chain we must query the server
+ cp, is_id = self._get_normed_group_parent(cp)
+
+ if is_id:
+ tmp = self.get_group_by_groupid(cp, realm=realm)
+ else:
+ # given as name, assume toplvl group
+ tmp = self.get_group_by_name(cp, realm=realm)
+
+ if not tmp:
+ return None
+
+ for p in name_chain[1:]:
+ for sg in tmp['subGroups']:
+ pv, is_id = self._get_normed_group_parent(p)
+
+ if is_id:
+ cmpkey = "id"
+ else:
+ cmpkey = "name"
+
+ if pv == sg[cmpkey]:
+ tmp = sg
+ break
+
+ if not tmp:
+ return None
+
+ return tmp
+
+ def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None):
+ """ Get keycloak direct parent group API object for a given chain of parents.
+
+ To succesfully work the API for subgroups we actually dont need
+ to "walk the whole tree" for nested groups but only need to know
+ the ID for the direct predecessor of current subgroup. This
+ method will guarantee us this information getting there with
+ as minimal work as possible.
+
+ Note that given parent list can and might be incomplete at the
+ upper levels as long as it starts with an ID instead of a name
+
+ If the group does not exist, None is returned.
+ :param parents: Topdown ordered list of subgroup parents
+ :param realm: Realm in which the group resides; default 'master'
+ """
+ if children_to_resolve is None:
+ # start recursion by reversing parents (in optimal cases
+ # we dont need to walk the whole tree upwarts)
+ parents = list(reversed(parents))
+ children_to_resolve = []
+
+ if not parents:
+ # walk complete parents list to the top, all names, no id's,
+ # try to resolve it assuming list is complete and 1st
+ # element is a toplvl group
+ return self.get_subgroup_by_chain(list(reversed(children_to_resolve)), realm=realm)
+
+ cp = parents[0]
+ unused, is_id = self._get_normed_group_parent(cp)
+
+ if is_id:
+ # current parent is given as ID, we can stop walking
+ # upwards searching for an entry point
+ return self.get_subgroup_by_chain([cp] + list(reversed(children_to_resolve)), realm=realm)
+ else:
+ # current parent is given as name, it must be resolved
+ # later, try next parent (recurse)
+ children_to_resolve.append(cp)
+ return self.get_subgroup_direct_parent(
+ parents[1:],
+ realm=realm, children_to_resolve=children_to_resolve
+ )
+
+ def create_group(self, grouprep, realm="master"):
+ """ Create a Keycloak group.
+
+ :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm)
+ try:
+ return open_url(groups_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Could not create group %s in realm %s: %s"
+ % (grouprep['name'], realm, str(e)))
+
+ def create_subgroup(self, parents, grouprep, realm="master"):
+ """ Create a Keycloak subgroup.
+
+ :param parents: list of one or more parent groups
+ :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ parent_id = "---UNDETERMINED---"
+ try:
+ parent_id = self.get_subgroup_direct_parent(parents, realm)
+
+ if not parent_id:
+ raise Exception(
+ "Could not determine subgroup parent ID for given"
+ " parent chain {0}. Assure that all parents exist"
+ " already and the list is complete and properly"
+ " ordered, starts with an ID or starts at the"
+ " top level".format(parents)
+ )
+
+ parent_id = parent_id["id"]
+ url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id)
+ return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Could not create subgroup %s for parent group %s in realm %s: %s"
+ % (grouprep['name'], parent_id, realm, str(e)))
+
+ def update_group(self, grouprep, realm="master"):
+ """ Update an existing group.
+
+ :param grouprep: A GroupRepresentation of the updated group.
+ :return HTTPResponse object on success
+ """
+ group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id'])
+
+ try:
+ return open_url(group_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(grouprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update group %s in realm %s: %s'
+ % (grouprep['name'], realm, str(e)))
+
+ def delete_group(self, name=None, groupid=None, realm="master"):
+ """ Delete a group. One of name or groupid must be provided.
+
+ Providing the group ID is preferred as it avoids a second lookup to
+ convert a group name to an ID.
+
+ :param name: The name of the group. A lookup will be performed to retrieve the group ID.
+ :param groupid: The ID of the group (preferred to name).
+ :param realm: The realm in which this group resides, default "master".
+ """
+
+ if groupid is None and name is None:
+ # prefer an exception since this is almost certainly a programming error in the module itself.
+ raise Exception("Unable to delete group - one of group ID or name must be provided.")
+
+ # only lookup the name if groupid isn't provided.
+ # in the case that both are provided, prefer the ID, since it's one
+ # less lookup.
+ if groupid is None and name is not None:
+ for group in self.get_groups(realm=realm):
+ if group['name'] == name:
+ groupid = group['id']
+ break
+
+ # if the group doesn't exist - no problem, nothing to delete.
+ if groupid is None:
+ return None
+
+ # should have a good groupid by here.
+ group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl)
+ try:
+ return open_url(group_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e)))
+
+ def get_realm_roles(self, realm='master'):
+ """ Obtains role representations for roles in a realm
+
+ :param realm: realm to be queried
+ :return: list of dicts of role representations
+ """
+ rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_realm_role(self, name, realm='master'):
+ """ Fetch a keycloak role from the provided realm using the role's name.
+
+ If the role does not exist, None is returned.
+ :param name: Name of the role to fetch.
+ :param realm: Realm in which the role resides; default 'master'.
+ """
+ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
+ try:
+ return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
+ % (name, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not fetch role %s in realm %s: %s'
+ % (name, realm, str(e)))
+
+ def create_realm_role(self, rolerep, realm='master'):
+ """ Create a Keycloak realm role.
+
+ :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name.
+ :return: HTTPResponse object on success
+ """
+ roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm)
+ try:
+ return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create role %s in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
+
+ def update_realm_role(self, rolerep, realm='master'):
+ """ Update an existing realm role.
+
+ :param rolerep: A RoleRepresentation of the updated role.
+ :return HTTPResponse object on success
+ """
+ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']))
+ try:
+ return open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update role %s in realm %s: %s'
+ % (rolerep['name'], realm, str(e)))
+
+ def delete_realm_role(self, name, realm='master'):
+ """ Delete a realm role.
+
+ :param name: The name of the role.
+ :param realm: The realm in which this role resides, default "master".
+ """
+ role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name))
+ try:
+ return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete role %s in realm %s: %s'
+ % (name, realm, str(e)))
+
+ def get_client_roles(self, clientid, realm='master'):
+ """ Obtains role representations for client roles in a specific client
+
+ :param clientid: Client id to be queried
+ :param realm: Realm to be queried
+ :return: List of dicts of role representations
+ """
+ cid = self.get_client_id(clientid, realm=realm)
+ if cid is None:
+ self.module.fail_json(msg='Could not find client %s in realm %s'
+ % (clientid, realm))
+ rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s'
+ % (clientid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s'
+ % (clientid, realm, str(e)))
+
+ def get_client_role(self, name, clientid, realm='master'):
+ """ Fetch a keycloak client role from the provided realm using the role's name.
+
+ :param name: Name of the role to fetch.
+ :param clientid: Client id for the client role
+ :param realm: Realm in which the role resides
+ :return: Dict of role representation
+ If the role does not exist, None is returned.
+ """
+ cid = self.get_client_id(clientid, realm=realm)
+ if cid is None:
+ self.module.fail_json(msg='Could not find client %s in realm %s'
+ % (clientid, realm))
+ role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
+ try:
+ return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s'
+ % (name, clientid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s'
+ % (name, clientid, realm, str(e)))
+
+ def create_client_role(self, rolerep, clientid, realm='master'):
+ """ Create a Keycloak client role.
+
+ :param rolerep: a RoleRepresentation of the role to be created. Must contain at minimum the field name.
+ :param clientid: Client id for the client role
+ :param realm: Realm in which the role resides
+ :return: HTTPResponse object on success
+ """
+ cid = self.get_client_id(clientid, realm=realm)
+ if cid is None:
+ self.module.fail_json(msg='Could not find client %s in realm %s'
+ % (clientid, realm))
+ roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s'
+ % (rolerep['name'], clientid, realm, str(e)))
+
+ def update_client_role(self, rolerep, clientid, realm="master"):
+ """ Update an existing client role.
+
+ :param rolerep: A RoleRepresentation of the updated role.
+ :param clientid: Client id for the client role
+ :param realm: Realm in which the role resides
+ :return HTTPResponse object on success
+ """
+ cid = self.get_client_id(clientid, realm=realm)
+ if cid is None:
+ self.module.fail_json(msg='Could not find client %s in realm %s'
+ % (clientid, realm))
+ role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name']))
+ try:
+ return open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(rolerep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s'
+ % (rolerep['name'], clientid, realm, str(e)))
+
+ def delete_client_role(self, name, clientid, realm="master"):
+ """ Delete a role. One of name or roleid must be provided.
+
+ :param name: The name of the role.
+ :param clientid: Client id for the client role
+ :param realm: Realm in which the role resides
+ """
+ cid = self.get_client_id(clientid, realm=realm)
+ if cid is None:
+ self.module.fail_json(msg='Could not find client %s in realm %s'
+ % (clientid, realm))
+ role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name))
+ try:
+ return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s'
+ % (name, clientid, realm, str(e)))
+
+ def get_authentication_flow_by_alias(self, alias, realm='master'):
+ """
+ Get an authentication flow by it's alias
+ :param alias: Alias of the authentication flow to get.
+ :param realm: Realm.
+ :return: Authentication flow representation.
+ """
+ try:
+ authentication_flow = {}
+ # Check if the authentication flow exists on the Keycloak serveraders
+ authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout, validate_certs=self.validate_certs))
+ for authentication in authentications:
+ if authentication["alias"] == alias:
+ authentication_flow = authentication
+ break
+ return authentication_flow
+ except Exception as e:
+ self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e)))
+
+ def delete_authentication_flow_by_id(self, id, realm='master'):
+ """
+ Delete an authentication flow from Keycloak
+ :param id: id of authentication flow to be deleted
+ :param realm: realm of client to be deleted
+ :return: HTTPResponse object on success
+ """
+ flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id)
+
+ try:
+ return open_url(flow_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s'
+ % (id, realm, str(e)))
+
+ def copy_auth_flow(self, config, realm='master'):
+ """
+ Create a new authentication flow from a copy of another.
+ :param config: Representation of the authentication flow to create.
+ :param realm: Realm.
+ :return: Representation of the new authentication flow.
+ """
+ try:
+ new_name = dict(
+ newName=config["alias"]
+ )
+ open_url(
+ URL_AUTHENTICATION_FLOW_COPY.format(
+ url=self.baseurl,
+ realm=realm,
+ copyfrom=quote(config["copyFrom"])),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(new_name),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ flow_list = json.load(
+ open_url(
+ URL_AUTHENTICATION_FLOWS.format(url=self.baseurl,
+ realm=realm),
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs))
+ for flow in flow_list:
+ if flow["alias"] == config["alias"]:
+ return flow
+ return None
+ except Exception as e:
+ self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
+
+ def create_empty_auth_flow(self, config, realm='master'):
+ """
+ Create a new empty authentication flow.
+ :param config: Representation of the authentication flow to create.
+ :param realm: Realm.
+ :return: Representation of the new authentication flow.
+ """
+ try:
+ new_flow = dict(
+ alias=config["alias"],
+ providerId=config["providerId"],
+ description=config["description"],
+ topLevel=True
+ )
+ open_url(
+ URL_AUTHENTICATION_FLOWS.format(
+ url=self.baseurl,
+ realm=realm),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(new_flow),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ flow_list = json.load(
+ open_url(
+ URL_AUTHENTICATION_FLOWS.format(
+ url=self.baseurl,
+ realm=realm),
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs))
+ for flow in flow_list:
+ if flow["alias"] == config["alias"]:
+ return flow
+ return None
+ except Exception as e:
+ self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
+
+ def update_authentication_executions(self, flowAlias, updatedExec, realm='master'):
+ """ Update authentication executions
+
+ :param flowAlias: name of the parent flow
+ :param updatedExec: JSON containing updated execution
+ :return: HTTPResponse object on success
+ """
+ try:
+ open_url(
+ URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
+ url=self.baseurl,
+ realm=realm,
+ flowalias=quote(flowAlias)),
+ method='PUT',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(updatedExec),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except HTTPError as e:
+ self.module.fail_json(msg="Unable to update execution '%s': %s: %s %s" %
+ (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec)))
+ except Exception as e:
+ self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e)))
+
+ def add_authenticationConfig_to_execution(self, executionId, authenticationConfig, realm='master'):
+ """ Add autenticatorConfig to the execution
+
+ :param executionId: id of execution
+ :param authenticationConfig: config to add to the execution
+ :return: HTTPResponse object on success
+ """
+ try:
+ open_url(
+ URL_AUTHENTICATION_EXECUTION_CONFIG.format(
+ url=self.baseurl,
+ realm=realm,
+ id=executionId),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(authenticationConfig),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e)))
+
+ def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'):
+ """ Create new sublow on the flow
+
+ :param subflowName: name of the subflow to create
+ :param flowAlias: name of the parent flow
+ :return: HTTPResponse object on success
+ """
+ try:
+ newSubFlow = {}
+ newSubFlow["alias"] = subflowName
+ newSubFlow["provider"] = "registration-page-form"
+ newSubFlow["type"] = flowType
+ open_url(
+ URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format(
+ url=self.baseurl,
+ realm=realm,
+ flowalias=quote(flowAlias)),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(newSubFlow),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e)))
+
+ def create_execution(self, execution, flowAlias, realm='master'):
+ """ Create new execution on the flow
+
+ :param execution: name of execution to create
+ :param flowAlias: name of the parent flow
+ :return: HTTPResponse object on success
+ """
+ try:
+ newExec = {}
+ newExec["provider"] = execution["providerId"]
+ newExec["requirement"] = execution["requirement"]
+ open_url(
+ URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format(
+ url=self.baseurl,
+ realm=realm,
+ flowalias=quote(flowAlias)),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ data=json.dumps(newExec),
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except HTTPError as e:
+ self.module.fail_json(msg="Unable to create new execution '%s' %s: %s: %s %s" %
+ (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec)))
+ except Exception as e:
+ self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e)))
+
+ def change_execution_priority(self, executionId, diff, realm='master'):
+ """ Raise or lower execution priority of diff time
+
+ :param executionId: id of execution to lower priority
+ :param realm: realm the client is in
+ :param diff: Integer number, raise of diff time if positive lower of diff time if negative
+ :return: HTTPResponse object on success
+ """
+ try:
+ if diff > 0:
+ for i in range(diff):
+ open_url(
+ URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format(
+ url=self.baseurl,
+ realm=realm,
+ id=executionId),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ elif diff < 0:
+ for i in range(-diff):
+ open_url(
+ URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format(
+ url=self.baseurl,
+ realm=realm,
+ id=executionId),
+ method='POST',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e)))
+
+ def get_executions_representation(self, config, realm='master'):
+ """
+ Get a representation of the executions for an authentication flow.
+ :param config: Representation of the authentication flow
+ :param realm: Realm
+ :return: Representation of the executions
+ """
+ try:
+ # Get executions created
+ executions = json.load(
+ open_url(
+ URL_AUTHENTICATION_FLOW_EXECUTIONS.format(
+ url=self.baseurl,
+ realm=realm,
+ flowalias=quote(config["alias"])),
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs))
+ for execution in executions:
+ if "authenticationConfig" in execution:
+ execConfigId = execution["authenticationConfig"]
+ execConfig = json.load(
+ open_url(
+ URL_AUTHENTICATION_CONFIG.format(
+ url=self.baseurl,
+ realm=realm,
+ id=execConfigId),
+ method='GET',
+ http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs))
+ execution["authenticationConfig"] = execConfig
+ return executions
+ except Exception as e:
+ self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
+
+ def get_identity_providers(self, realm='master'):
+ """ Fetch representations for identity providers in a realm
+ :param realm: realm to be queried
+ :return: list of representations for identity providers
+ """
+ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
+ try:
+ return json.loads(to_native(open_url(idps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_identity_provider(self, alias, realm='master'):
+ """ Fetch identity provider representation from a realm using the idp's alias.
+ If the identity provider does not exist, None is returned.
+ :param alias: Alias of the identity provider to fetch.
+ :param realm: Realm in which the identity provider resides; default 'master'.
+ """
+ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
+ try:
+ return json.loads(to_native(open_url(idp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
+ % (alias, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s'
+ % (alias, realm, str(e)))
+
+ def create_identity_provider(self, idprep, realm='master'):
+ """ Create an identity provider.
+ :param idprep: Identity provider representation of the idp to be created.
+ :param realm: Realm in which this identity provider resides, default "master".
+ :return: HTTPResponse object on success
+ """
+ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm)
+ try:
+ return open_url(idps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(idprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s'
+ % (idprep['alias'], realm, str(e)))
+
+ def update_identity_provider(self, idprep, realm='master'):
+ """ Update an existing identity provider.
+ :param idprep: Identity provider representation of the idp to be updated.
+ :param realm: Realm in which this identity provider resides, default "master".
+ :return HTTPResponse object on success
+ """
+ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias'])
+ try:
+ return open_url(idp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(idprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s'
+ % (idprep['alias'], realm, str(e)))
+
+ def delete_identity_provider(self, alias, realm='master'):
+ """ Delete an identity provider.
+ :param alias: Alias of the identity provider.
+ :param realm: Realm in which this identity provider resides, default "master".
+ """
+ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias)
+ try:
+ return open_url(idp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s'
+ % (alias, realm, str(e)))
+
+ def get_identity_provider_mappers(self, alias, realm='master'):
+ """ Fetch representations for identity provider mappers
+ :param alias: Alias of the identity provider.
+ :param realm: realm to be queried
+ :return: list of representations for identity provider mappers
+ """
+ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
+ try:
+ return json.loads(to_native(open_url(mappers_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s'
+ % (alias, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s'
+ % (alias, realm, str(e)))
+
+ def get_identity_provider_mapper(self, mid, alias, realm='master'):
+ """ Fetch identity provider representation from a realm using the idp's alias.
+ If the identity provider does not exist, None is returned.
+ :param mid: Unique ID of the mapper to fetch.
+ :param alias: Alias of the identity provider.
+ :param realm: Realm in which the identity provider resides; default 'master'.
+ """
+ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
+ try:
+ return json.loads(to_native(open_url(mapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
+ % (mid, alias, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s'
+ % (mid, alias, realm, str(e)))
+
+ def create_identity_provider_mapper(self, mapper, alias, realm='master'):
+ """ Create an identity provider mapper.
+ :param mapper: IdentityProviderMapperRepresentation of the mapper to be created.
+ :param alias: Alias of the identity provider.
+ :param realm: Realm in which this identity provider resides, default "master".
+ :return: HTTPResponse object on success
+ """
+ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias)
+ try:
+ return open_url(mappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(mapper), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s'
+ % (mapper['name'], alias, realm, str(e)))
+
+ def update_identity_provider_mapper(self, mapper, alias, realm='master'):
+ """ Update an existing identity provider.
+ :param mapper: IdentityProviderMapperRepresentation of the mapper to be updated.
+ :param alias: Alias of the identity provider.
+ :param realm: Realm in which this identity provider resides, default "master".
+ :return HTTPResponse object on success
+ """
+ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id'])
+ try:
+ return open_url(mapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(mapper), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s'
+ % (mapper['id'], alias, realm, str(e)))
+
+ def delete_identity_provider_mapper(self, mid, alias, realm='master'):
+ """ Delete an identity provider.
+ :param mid: Unique ID of the mapper to delete.
+ :param alias: Alias of the identity provider.
+ :param realm: Realm in which this identity provider resides, default "master".
+ """
+ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid)
+ try:
+ return open_url(mapper_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s'
+ % (mid, alias, realm, str(e)))
+
+ def get_components(self, filter=None, realm='master'):
+ """ Fetch representations for components in a realm
+ :param realm: realm to be queried
+ :param filter: search filter
+ :return: list of representations for components
+ """
+ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
+ if filter is not None:
+ comps_url += '?%s' % filter
+
+ try:
+ return json.loads(to_native(open_url(comps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except ValueError as e:
+ self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s'
+ % (realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not obtain list of components for realm %s: %s'
+ % (realm, str(e)))
+
+ def get_component(self, cid, realm='master'):
+ """ Fetch component representation from a realm using its cid.
+ If the component does not exist, None is returned.
+ :param cid: Unique ID of the component to fetch.
+ :param realm: Realm in which the component resides; default 'master'.
+ """
+ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except HTTPError as e:
+ if e.code == 404:
+ return None
+ else:
+ self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
+ % (cid, realm, str(e)))
+ except Exception as e:
+ self.module.fail_json(msg='Could not fetch component %s in realm %s: %s'
+ % (cid, realm, str(e)))
+
+ def create_component(self, comprep, realm='master'):
+ """ Create an component.
+ :param comprep: Component representation of the component to be created.
+ :param realm: Realm in which this component resides, default "master".
+ :return: Component representation of the created component
+ """
+ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm)
+ try:
+ resp = open_url(comps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(comprep), validate_certs=self.validate_certs)
+ comp_url = resp.getheader('Location')
+ if comp_url is None:
+ self.module.fail_json(msg='Could not create component in realm %s: %s'
+ % (realm, 'unexpected response'))
+ return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception as e:
+ self.module.fail_json(msg='Could not create component in realm %s: %s'
+ % (realm, str(e)))
+
+ def update_component(self, comprep, realm='master'):
+ """ Update an existing component.
+ :param comprep: Component representation of the component to be updated.
+ :param realm: Realm in which this component resides, default "master".
+ :return HTTPResponse object on success
+ """
+ cid = comprep.get('id')
+ if cid is None:
+ self.module.fail_json(msg='Cannot update component without id')
+ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return open_url(comp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(comprep), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not update component %s in realm %s: %s'
+ % (cid, realm, str(e)))
+
+ def delete_component(self, cid, realm='master'):
+ """ Delete an component.
+ :param cid: Unique ID of the component.
+ :param realm: Realm in which this component resides, default "master".
+ """
+ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid)
+ try:
+ return open_url(comp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete component %s in realm %s: %s'
+ % (cid, realm, str(e)))
+
+ def get_authz_authorization_scope_by_name(self, name, client_id, realm):
+ url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
+ search_url = "%s/search?name=%s" % (url, quote(name))
+
+ try:
+ return json.loads(to_native(open_url(search_url, method='GET', http_agent=self.http_agent, headers=self.restheaders,
+ timeout=self.connection_timeout,
+ validate_certs=self.validate_certs).read()))
+ except Exception:
+ return False
+
+ def create_authz_authorization_scope(self, payload, client_id, realm):
+ """Create an authorization scope for a Keycloak client"""
+ url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(payload), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+
+ def update_authz_authorization_scope(self, payload, id, client_id, realm):
+ """Update an authorization scope for a Keycloak client"""
+ url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ data=json.dumps(payload), validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e)))
+
+ def remove_authz_authorization_scope(self, id, client_id, realm):
+ """Remove an authorization scope from a Keycloak client"""
+ url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm)
+
+ try:
+ return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout,
+ validate_certs=self.validate_certs)
+ except Exception as e:
+ self.module.fail_json(msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e)))
diff --git a/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py
new file mode 100644
index 000000000..85caa8e16
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, John Cant <a.johncant@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \
+ keycloak_argument_spec
+
+
+def keycloak_clientsecret_module():
+ """
+ Returns an AnsibleModule definition for modules that interact with a client
+ secret.
+
+ :return: argument_spec dict
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ client_id=dict(type='str', aliases=['clientId']),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'client_id'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ mutually_exclusive=[
+ ['token', 'auth_realm'],
+ ['token', 'auth_username'],
+ ['token', 'auth_password']
+ ])
+
+ return module
+
+
+def keycloak_clientsecret_module_resolve_params(module, kc):
+ """
+ Given an AnsibleModule definition for keycloak_clientsecret_*, and a
+ KeycloakAPI client, resolve the params needed to interact with the Keycloak
+ client secret, looking up the client by clientId if necessary via an API
+ call.
+
+ :return: tuple of id, realm
+ """
+
+ realm = module.params.get('realm')
+ id = module.params.get('id')
+ client_id = module.params.get('client_id')
+
+ # only lookup the client_id if id isn't provided.
+ # in the case that both are provided, prefer the ID, since it's one
+ # less lookup.
+ if id is None:
+ # Due to the required_one_of spec, client_id is guaranteed to not be None
+ client = kc.get_client_by_clientid(client_id, realm=realm)
+
+ if client is None:
+ module.fail_json(
+ msg='Client does not exist {client_id}'.format(client_id=client_id)
+ )
+
+ id = client['id']
+
+ return id, realm
diff --git a/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py
new file mode 100644
index 000000000..9cb6e527a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/ilo_redfish_utils.py
@@ -0,0 +1,307 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+import time
+
+
+class iLORedfishUtils(RedfishUtils):
+
+ def get_ilo_sessions(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ session_list = []
+ sessions_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'Id', 'Name', 'UserName']
+
+ # Changed self.sessions_uri to Hardcoded string.
+ response = self.get_request(
+ self.root_uri + self.service_root + "SessionService/Sessions/")
+ if not response['ret']:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if 'Oem' in data:
+ if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]:
+ current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]
+
+ for sessions in data[u'Members']:
+ # session_list[] are URIs
+ session_list.append(sessions[u'@odata.id'])
+ # for each session, get details
+ for uri in session_list:
+ session = {}
+ if uri != current_session:
+ response = self.get_request(self.root_uri + uri)
+ if not response['ret']:
+ return response
+ data = response['data']
+ for property in properties:
+ if property in data:
+ session[property] = data[property]
+ sessions_results.append(session)
+ result["msg"] = sessions_results
+ result["ret"] = True
+ return result
+
+ def set_ntp_server(self, mgr_attributes):
+ result = {}
+ setkey = mgr_attributes['mgr_attr_name']
+
+ nic_info = self.get_manager_ethernet_uri()
+ ethuri = nic_info["nic_addr"]
+
+ response = self.get_request(self.root_uri + ethuri)
+ if not response['ret']:
+ return response
+ result['ret'] = True
+ data = response['data']
+ payload = {"DHCPv4": {
+ "UseNTPServers": ""
+ }}
+
+ if data["DHCPv4"]["UseNTPServers"]:
+ payload["DHCPv4"]["UseNTPServers"] = False
+ res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
+ if not res_dhv4['ret']:
+ return res_dhv4
+
+ payload = {"DHCPv6": {
+ "UseNTPServers": ""
+ }}
+
+ if data["DHCPv6"]["UseNTPServers"]:
+ payload["DHCPv6"]["UseNTPServers"] = False
+ res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
+ if not res_dhv6['ret']:
+ return res_dhv6
+
+ datetime_uri = self.manager_uri + "DateTime"
+
+ listofips = mgr_attributes['mgr_attr_value'].split(" ")
+ if len(listofips) > 2:
+ return {'ret': False, 'changed': False, 'msg': "More than 2 NTP Servers mentioned"}
+
+ ntp_list = []
+ for ips in listofips:
+ ntp_list.append(ips)
+
+ while len(ntp_list) < 2:
+ ntp_list.append("0.0.0.0")
+
+ payload = {setkey: ntp_list}
+
+ response1 = self.patch_request(self.root_uri + datetime_uri, payload)
+ if not response1['ret']:
+ return response1
+
+ return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']}
+
+ def set_time_zone(self, attr):
+ key = attr['mgr_attr_name']
+
+ uri = self.manager_uri + "DateTime/"
+ response = self.get_request(self.root_uri + uri)
+ if not response['ret']:
+ return response
+
+ data = response["data"]
+
+ if key not in data:
+ return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key}
+
+ timezones = data["TimeZoneList"]
+ index = ""
+ for tz in timezones:
+ if attr['mgr_attr_value'] in tz["Name"]:
+ index = tz["Index"]
+ break
+
+ payload = {key: {"Index": index}}
+ response = self.patch_request(self.root_uri + uri, payload)
+ if not response['ret']:
+ return response
+
+ return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
+
+ def set_dns_server(self, attr):
+ key = attr['mgr_attr_name']
+ nic_info = self.get_manager_ethernet_uri()
+ uri = nic_info["nic_addr"]
+
+ listofips = attr['mgr_attr_value'].split(" ")
+ if len(listofips) > 3:
+ return {'ret': False, 'changed': False, 'msg': "More than 3 DNS Servers mentioned"}
+
+ dns_list = []
+ for ips in listofips:
+ dns_list.append(ips)
+
+ while len(dns_list) < 3:
+ dns_list.append("0.0.0.0")
+
+ payload = {
+ "Oem": {
+ "Hpe": {
+ "IPv4": {
+ key: dns_list
+ }
+ }
+ }
+ }
+
+ response = self.patch_request(self.root_uri + uri, payload)
+ if not response['ret']:
+ return response
+
+ return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
+
+ def set_domain_name(self, attr):
+ key = attr['mgr_attr_name']
+
+ nic_info = self.get_manager_ethernet_uri()
+ ethuri = nic_info["nic_addr"]
+
+ response = self.get_request(self.root_uri + ethuri)
+ if not response['ret']:
+ return response
+
+ data = response['data']
+
+ payload = {"DHCPv4": {
+ "UseDomainName": ""
+ }}
+
+ if data["DHCPv4"]["UseDomainName"]:
+ payload["DHCPv4"]["UseDomainName"] = False
+ res_dhv4 = self.patch_request(self.root_uri + ethuri, payload)
+ if not res_dhv4['ret']:
+ return res_dhv4
+
+ payload = {"DHCPv6": {
+ "UseDomainName": ""
+ }}
+
+ if data["DHCPv6"]["UseDomainName"]:
+ payload["DHCPv6"]["UseDomainName"] = False
+ res_dhv6 = self.patch_request(self.root_uri + ethuri, payload)
+ if not res_dhv6['ret']:
+ return res_dhv6
+
+ domain_name = attr['mgr_attr_value']
+
+ payload = {"Oem": {
+ "Hpe": {
+ key: domain_name
+ }
+ }}
+
+ response = self.patch_request(self.root_uri + ethuri, payload)
+ if not response['ret']:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']}
+
+ def set_wins_registration(self, mgrattr):
+ Key = mgrattr['mgr_attr_name']
+
+ nic_info = self.get_manager_ethernet_uri()
+ ethuri = nic_info["nic_addr"]
+
+ payload = {
+ "Oem": {
+ "Hpe": {
+ "IPv4": {
+ Key: False
+ }
+ }
+ }
+ }
+
+ response = self.patch_request(self.root_uri + ethuri, payload)
+ if not response['ret']:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']}
+
+ def get_server_poststate(self):
+ # Get server details
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if not response["ret"]:
+ return response
+ server_data = response["data"]
+
+ if "Hpe" in server_data["Oem"]:
+ return {
+ "ret": True,
+ "server_poststate": server_data["Oem"]["Hpe"]["PostState"]
+ }
+ else:
+ return {
+ "ret": True,
+ "server_poststate": server_data["Oem"]["Hp"]["PostState"]
+ }
+
+ def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800):
+ # This method checks if OOB controller reboot is completed
+ time.sleep(10)
+
+ # Check server poststate
+ state = self.get_server_poststate()
+ if not state["ret"]:
+ return state
+
+ count = int(max_polling_time / polling_interval)
+ times = 0
+
+ # When server is powered OFF
+ pcount = 0
+ while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5:
+ time.sleep(10)
+ state = self.get_server_poststate()
+ if not state["ret"]:
+ return state
+
+ if state["server_poststate"] not in ["PowerOff", "Off"]:
+ break
+ pcount = pcount + 1
+ if state["server_poststate"] in ["PowerOff", "Off"]:
+ return {
+ "ret": False,
+ "changed": False,
+ "msg": "Server is powered OFF"
+ }
+
+ # When server is not rebooting
+ if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
+ return {
+ "ret": True,
+ "changed": False,
+ "msg": "Server is not rebooting"
+ }
+
+ while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times:
+ state = self.get_server_poststate()
+ if not state["ret"]:
+ return state
+
+ if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]:
+ return {
+ "ret": True,
+ "changed": True,
+ "msg": "Server reboot is completed"
+ }
+ time.sleep(polling_interval)
+ times = times + 1
+
+ return {
+ "ret": False,
+ "changed": False,
+ "msg": "Server Reboot has failed, server state: {state} ".format(state=state)
+ }
diff --git a/ansible_collections/community/general/plugins/module_utils/influxdb.py b/ansible_collections/community/general/plugins/module_utils/influxdb.py
new file mode 100644
index 000000000..580cabe7d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/influxdb.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests.exceptions # noqa: F401, pylint: disable=unused-import
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+INFLUXDB_IMP_ERR = None
+try:
+ from influxdb import InfluxDBClient
+ from influxdb import __version__ as influxdb_version
+ from influxdb import exceptions # noqa: F401, pylint: disable=unused-import
+ HAS_INFLUXDB = True
+except ImportError:
+ INFLUXDB_IMP_ERR = traceback.format_exc()
+ HAS_INFLUXDB = False
+
+
+class InfluxDb():
+ def __init__(self, module):
+ self.module = module
+ self.params = self.module.params
+ self.check_lib()
+ self.hostname = self.params['hostname']
+ self.port = self.params['port']
+ self.path = self.params['path']
+ self.username = self.params['username']
+ self.password = self.params['password']
+ self.database_name = self.params.get('database_name')
+
+ def check_lib(self):
+ if not HAS_REQUESTS:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+
+ if not HAS_INFLUXDB:
+ self.module.fail_json(msg=missing_required_lib('influxdb'), exception=INFLUXDB_IMP_ERR)
+
+ @staticmethod
+ def influxdb_argument_spec():
+ return dict(
+ hostname=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8086),
+ path=dict(type='str', default=''),
+ username=dict(type='str', default='root', aliases=['login_username']),
+ password=dict(type='str', default='root', no_log=True, aliases=['login_password']),
+ ssl=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=True),
+ timeout=dict(type='int'),
+ retries=dict(type='int', default=3),
+ proxies=dict(type='dict', default={}),
+ use_udp=dict(type='bool', default=False),
+ udp_port=dict(type='int', default=4444),
+ )
+
+ def connect_to_influxdb(self):
+ args = dict(
+ host=self.hostname,
+ port=self.port,
+ username=self.username,
+ password=self.password,
+ database=self.database_name,
+ ssl=self.params['ssl'],
+ verify_ssl=self.params['validate_certs'],
+ timeout=self.params['timeout'],
+ use_udp=self.params['use_udp'],
+ udp_port=self.params['udp_port'],
+ proxies=self.params['proxies'],
+ )
+ influxdb_api_version = LooseVersion(influxdb_version)
+ if influxdb_api_version >= LooseVersion('4.1.0'):
+ # retries option is added in version 4.1.0
+ args.update(retries=self.params['retries'])
+
+ if influxdb_api_version >= LooseVersion('5.1.0'):
+ # path argument is added in version 5.1.0
+ args.update(path=self.path)
+
+ return InfluxDBClient(**args)
diff --git a/ansible_collections/community/general/plugins/module_utils/ipa.py b/ansible_collections/community/general/plugins/module_utils/ipa.py
new file mode 100644
index 000000000..eda9b4132
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/ipa.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import socket
+import uuid
+
+import re
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url, HAS_GSSAPI
+from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound
+
+
+def _env_then_dns_fallback(*args, **kwargs):
+ ''' Load value from environment or DNS in that order'''
+ try:
+ result = env_fallback(*args, **kwargs)
+ if result == '':
+ raise AnsibleFallbackNotFound
+ return result
+ except AnsibleFallbackNotFound:
+ # If no host was given, we try to guess it from IPA.
+ # The ipa-ca entry is a standard entry that IPA will have set for
+ # the CA.
+ try:
+ return socket.gethostbyaddr(socket.gethostbyname('ipa-ca'))[0]
+ except Exception:
+ raise AnsibleFallbackNotFound
+
+
+class IPAClient(object):
+ def __init__(self, module, host, port, protocol):
+ self.host = host
+ self.port = port
+ self.protocol = protocol
+ self.module = module
+ self.headers = None
+ self.timeout = module.params.get('ipa_timeout')
+ self.use_gssapi = False
+
+ def get_base_url(self):
+ return '%s://%s/ipa' % (self.protocol, self.host)
+
+ def get_json_url(self):
+ return '%s/session/json' % self.get_base_url()
+
+ def login(self, username, password):
+ if 'KRB5CCNAME' in os.environ and HAS_GSSAPI:
+ self.use_gssapi = True
+ elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI:
+ ccache = "MEMORY:" + str(uuid.uuid4())
+ os.environ['KRB5CCNAME'] = ccache
+ self.use_gssapi = True
+ else:
+ if not password:
+ if 'KRB5CCNAME' in os.environ or 'KRB5_CLIENT_KTNAME' in os.environ:
+ self.module.warn("In order to use GSSAPI, you need to install 'urllib_gssapi'")
+ self._fail('login', 'Password is required if not using '
+ 'GSSAPI. To use GSSAPI, please set the '
+ 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) '
+ ' environment variables.')
+ url = '%s/session/login_password' % self.get_base_url()
+ data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
+ headers = {'referer': self.get_base_url(),
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Accept': 'text/plain'}
+ try:
+ resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers, timeout=self.timeout)
+ status_code = info['status']
+ if status_code not in [200, 201, 204]:
+ self._fail('login', info['msg'])
+
+ self.headers = {'Cookie': info.get('set-cookie')}
+ except Exception as e:
+ self._fail('login', to_native(e))
+ if not self.headers:
+ self.headers = dict()
+ self.headers.update({
+ 'referer': self.get_base_url(),
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'})
+
+ def _fail(self, msg, e):
+ if 'message' in e:
+ err_string = e.get('message')
+ else:
+ err_string = e
+ self.module.fail_json(msg='%s: %s' % (msg, err_string))
+
+ def get_ipa_version(self):
+ response = self.ping()['summary']
+ ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*')
+ version_match = ipa_ver_regex.match(response)
+ ipa_version = None
+ if version_match:
+ ipa_version = version_match.groups()[0]
+ return ipa_version
+
+ def ping(self):
+ return self._post_json(method='ping', name=None)
+
+ def _post_json(self, method, name, item=None):
+ if item is None:
+ item = {}
+ url = '%s/session/json' % self.get_base_url()
+ data = dict(method=method)
+
+ # TODO: We should probably handle this a little better.
+ if method in ('ping', 'config_show', 'otpconfig_show'):
+ data['params'] = [[], {}]
+ elif method in ('config_mod', 'otpconfig_mod'):
+ data['params'] = [[], item]
+ else:
+ data['params'] = [[name], item]
+
+ try:
+ resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)),
+ headers=self.headers, timeout=self.timeout, use_gssapi=self.use_gssapi)
+ status_code = info['status']
+ if status_code not in [200, 201, 204]:
+ self._fail(method, info['msg'])
+ except Exception as e:
+ self._fail('post %s' % method, to_native(e))
+
+ if PY3:
+ charset = resp.headers.get_content_charset('latin-1')
+ else:
+ response_charset = resp.headers.getparam('charset')
+ if response_charset:
+ charset = response_charset
+ else:
+ charset = 'latin-1'
+ resp = json.loads(to_text(resp.read(), encoding=charset))
+ err = resp.get('error')
+ if err is not None:
+ self._fail('response %s' % method, err)
+
+ if 'result' in resp:
+ result = resp.get('result')
+ if 'result' in result:
+ result = result.get('result')
+ if isinstance(result, list):
+ if len(result) > 0:
+ return result[0]
+ else:
+ return {}
+ return result
+ return None
+
+ def get_diff(self, ipa_data, module_data):
+ result = []
+ for key in module_data.keys():
+ mod_value = module_data.get(key, None)
+ if isinstance(mod_value, list):
+ default = []
+ else:
+ default = None
+ ipa_value = ipa_data.get(key, default)
+ if isinstance(ipa_value, list) and not isinstance(mod_value, list):
+ mod_value = [mod_value]
+ if isinstance(ipa_value, list) and isinstance(mod_value, list):
+ mod_value = sorted(mod_value)
+ ipa_value = sorted(ipa_value)
+ if mod_value != ipa_value:
+ result.append(key)
+ return result
+
+ def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None, append=None):
+ changed = False
+ diff = list(set(ipa_list) - set(module_list))
+ if append is not True and len(diff) > 0:
+ changed = True
+ if not self.module.check_mode:
+ if item:
+ remove_method(name=name, item={item: diff})
+ else:
+ remove_method(name=name, item=diff)
+
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not self.module.check_mode:
+ if item:
+ add_method(name=name, item={item: diff})
+ else:
+ add_method(name=name, item=diff)
+
+ return changed
+
+
+def ipa_argument_spec():
+ return dict(
+ ipa_prot=dict(type='str', default='https', choices=['http', 'https'], fallback=(env_fallback, ['IPA_PROT'])),
+ ipa_host=dict(type='str', default='ipa.example.com', fallback=(_env_then_dns_fallback, ['IPA_HOST'])),
+ ipa_port=dict(type='int', default=443, fallback=(env_fallback, ['IPA_PORT'])),
+ ipa_user=dict(type='str', default='admin', fallback=(env_fallback, ['IPA_USER'])),
+ ipa_pass=dict(type='str', no_log=True, fallback=(env_fallback, ['IPA_PASS'])),
+ ipa_timeout=dict(type='int', default=10, fallback=(env_fallback, ['IPA_TIMEOUT'])),
+ validate_certs=dict(type='bool', default=True),
+ )
diff --git a/ansible_collections/community/general/plugins/module_utils/jenkins.py b/ansible_collections/community/general/plugins/module_utils/jenkins.py
new file mode 100644
index 000000000..c742b364b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/jenkins.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import os
+import time
+
+
+def download_updates_file(updates_expiration):
+ updates_filename = 'jenkins-plugin-cache.json'
+ updates_dir = os.path.expanduser('~/.ansible/tmp')
+ updates_file = os.path.join(updates_dir, updates_filename)
+ download_updates = True
+
+ # Make sure the destination directory exists
+ if not os.path.isdir(updates_dir):
+ os.makedirs(updates_dir, 0o700)
+
+ # Check if we need to download new updates file
+ if os.path.isfile(updates_file):
+ # Get timestamp when the file was changed last time
+ ts_file = os.stat(updates_file).st_mtime
+ ts_now = time.time()
+
+ if ts_now - ts_file < updates_expiration:
+ download_updates = False
+
+ return updates_file, download_updates
diff --git a/ansible_collections/community/general/plugins/module_utils/known_hosts.py b/ansible_collections/community/general/plugins/module_utils/known_hosts.py
new file mode 100644
index 000000000..25dd3e174
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/known_hosts.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import hmac
+import re
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+try:
+ from hashlib import sha1
+except ImportError:
+ import sha as sha1
+
+HASHED_KEY_MAGIC = "|1|"
+
+
+def is_ssh_url(url):
+
+ """ check if url is ssh """
+
+ if "@" in url and "://" not in url:
+ return True
+ for scheme in "ssh://", "git+ssh://", "ssh+git://":
+ if url.startswith(scheme):
+ return True
+ return False
+
+
+def get_fqdn_and_port(repo_url):
+
+ """ chop the hostname and port out of a url """
+
+ fqdn = None
+ port = None
+ ipv6_re = re.compile(r'(\[[^]]*\])(?::([0-9]+))?')
+ if "@" in repo_url and "://" not in repo_url:
+ # most likely an user@host:path or user@host/path type URL
+ repo_url = repo_url.split("@", 1)[1]
+ match = ipv6_re.match(repo_url)
+ # For this type of URL, colon specifies the path, not the port
+ if match:
+ fqdn, path = match.groups()
+ elif ":" in repo_url:
+ fqdn = repo_url.split(":")[0]
+ elif "/" in repo_url:
+ fqdn = repo_url.split("/")[0]
+ elif "://" in repo_url:
+ # this should be something we can parse with urlparse
+ parts = urlparse(repo_url)
+ # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
+ # ensure we actually have a parts[1] before continuing.
+ if parts[1] != '':
+ fqdn = parts[1]
+ if "@" in fqdn:
+ fqdn = fqdn.split("@", 1)[1]
+ match = ipv6_re.match(fqdn)
+ if match:
+ fqdn, port = match.groups()
+ elif ":" in fqdn:
+ fqdn, port = fqdn.split(":")[0:2]
+ return fqdn, port
+
+
+def check_hostkey(module, fqdn):
+ return not not_in_host_file(module, fqdn)
+
+
+# this is a variant of code found in connection_plugins/paramiko.py and we should modify
+# the paramiko code to import and use this.
+
+def not_in_host_file(self, host):
+
+ if 'USER' in os.environ:
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_host_file = "~/.ssh/known_hosts"
+ user_host_file = os.path.expanduser(user_host_file)
+
+ host_file_list = [
+ user_host_file,
+ "/etc/ssh/ssh_known_hosts",
+ "/etc/ssh/ssh_known_hosts2",
+ "/etc/openssh/ssh_known_hosts",
+ ]
+
+ hfiles_not_found = 0
+ for hf in host_file_list:
+ if not os.path.exists(hf):
+ hfiles_not_found += 1
+ continue
+
+ try:
+ host_fh = open(hf)
+ except IOError:
+ hfiles_not_found += 1
+ continue
+ else:
+ data = host_fh.read()
+ host_fh.close()
+
+ for line in data.split("\n"):
+ if line is None or " " not in line:
+ continue
+ tokens = line.split()
+ if tokens[0].find(HASHED_KEY_MAGIC) == 0:
+ # this is a hashed known host entry
+ try:
+ (kn_salt, kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|", 2)
+ hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+ hash.update(host)
+ if hash.digest() == kn_host.decode('base64'):
+ return False
+ except Exception:
+ # invalid hashed host key, skip it
+ continue
+ else:
+ # standard host file entry
+ if host in tokens[0]:
+ return False
+
+ return True
+
+
+def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False):
+
+ """ use ssh-keyscan to add the hostkey """
+
+ keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
+
+ if 'USER' in os.environ:
+ user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_ssh_dir = "~/.ssh/"
+ user_host_file = "~/.ssh/known_hosts"
+ user_ssh_dir = os.path.expanduser(user_ssh_dir)
+
+ if not os.path.exists(user_ssh_dir):
+ if create_dir:
+ try:
+ os.makedirs(user_ssh_dir, int('700', 8))
+ except Exception:
+ module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
+ else:
+ module.fail_json(msg="%s does not exist" % user_ssh_dir)
+ elif not os.path.isdir(user_ssh_dir):
+ module.fail_json(msg="%s is not a directory" % user_ssh_dir)
+
+ if port:
+ this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn)
+ else:
+ this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
+
+ rc, out, err = module.run_command(this_cmd)
+ # ssh-keyscan gives a 0 exit code and prints nothing on timeout
+ if rc != 0 or not out:
+ msg = 'failed to retrieve hostkey'
+ if not out:
+ msg += '. "%s" returned no matches.' % this_cmd
+ else:
+ msg += ' using command "%s". [stdout]: %s' % (this_cmd, out)
+
+ if err:
+ msg += ' [stderr]: %s' % err
+
+ module.fail_json(msg=msg)
+
+ module.append_to_file(user_host_file, out)
+
+ return rc, out, err
diff --git a/ansible_collections/community/general/plugins/module_utils/ldap.py b/ansible_collections/community/general/plugins/module_utils/ldap.py
new file mode 100644
index 000000000..655371321
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/ldap.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# Copyright (c) 2017-2018 Keller Fuchs (@KellerFuchs) <kellerfuchs@hashbang.sh>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+import traceback
+from ansible.module_utils.common.text.converters import to_native
+
+try:
+ import ldap
+ import ldap.dn
+ import ldap.filter
+ import ldap.sasl
+
+ HAS_LDAP = True
+
+ SASCL_CLASS = {
+ 'gssapi': ldap.sasl.gssapi,
+ 'external': ldap.sasl.external,
+ }
+except ImportError:
+ HAS_LDAP = False
+
+
+def gen_specs(**specs):
+ specs.update({
+ 'bind_dn': dict(),
+ 'bind_pw': dict(default='', no_log=True),
+ 'ca_path': dict(type='path'),
+ 'dn': dict(required=True),
+ 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']),
+ 'server_uri': dict(default='ldapi:///'),
+ 'start_tls': dict(default=False, type='bool'),
+ 'validate_certs': dict(default=True, type='bool'),
+ 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'),
+ 'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'),
+ })
+
+ return specs
+
+
+class LdapGeneric(object):
+ def __init__(self, module):
+ # Shortcuts
+ self.module = module
+ self.bind_dn = self.module.params['bind_dn']
+ self.bind_pw = self.module.params['bind_pw']
+ self.ca_path = self.module.params['ca_path']
+ self.referrals_chasing = self.module.params['referrals_chasing']
+ self.server_uri = self.module.params['server_uri']
+ self.start_tls = self.module.params['start_tls']
+ self.verify_cert = self.module.params['validate_certs']
+ self.sasl_class = self.module.params['sasl_class']
+ self.xorder_discovery = self.module.params['xorder_discovery']
+
+ # Establish connection
+ self.connection = self._connect_to_ldap()
+
+ if self.xorder_discovery == "enable" or (self.xorder_discovery == "auto" and not self._xorder_dn()):
+ # Try to find the X_ORDERed version of the DN
+ self.dn = self._find_dn()
+ else:
+ self.dn = self.module.params['dn']
+
+ def fail(self, msg, exn):
+ self.module.fail_json(
+ msg=msg,
+ details=to_native(exn),
+ exception=traceback.format_exc()
+ )
+
+ def _find_dn(self):
+ dn = self.module.params['dn']
+
+ explode_dn = ldap.dn.explode_dn(dn)
+
+ if len(explode_dn) > 1:
+ try:
+ escaped_value = ldap.filter.escape_filter_chars(explode_dn[0])
+ filterstr = "(%s)" % escaped_value
+ dns = self.connection.search_s(','.join(explode_dn[1:]),
+ ldap.SCOPE_ONELEVEL, filterstr)
+ if len(dns) == 1:
+ dn, dummy = dns[0]
+ except Exception:
+ pass
+
+ return dn
+
+ def _connect_to_ldap(self):
+ if not self.verify_cert:
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+
+ if self.ca_path:
+ ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path)
+
+ connection = ldap.initialize(self.server_uri)
+
+ if self.referrals_chasing == 'disabled':
+ # Switch off chasing of referrals (https://github.com/ansible-collections/community.general/issues/1067)
+ connection.set_option(ldap.OPT_REFERRALS, 0)
+
+ if self.start_tls:
+ try:
+ connection.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ if self.bind_dn is not None:
+ connection.simple_bind_s(self.bind_dn, self.bind_pw)
+ else:
+ klass = SASCL_CLASS.get(self.sasl_class, ldap.sasl.external)
+ connection.sasl_interactive_bind_s('', klass())
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+
+ return connection
+
+ def _xorder_dn(self):
+ # match X_ORDERed DNs
+ regex = r"\w+=\{\d+\}.+"
+ return re.match(regex, self.module.params['dn']) is not None
diff --git a/ansible_collections/community/general/plugins/module_utils/linode.py b/ansible_collections/community/general/plugins/module_utils/linode.py
new file mode 100644
index 000000000..cedd3e0d5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/linode.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Luke Murphy @decentral1se
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def get_user_agent(module):
+ """Retrieve a user-agent to send with LinodeClient requests."""
+ try:
+ from ansible.module_utils.ansible_release import __version__ as ansible_version
+ except ImportError:
+ ansible_version = 'unknown'
+ return 'Ansible-%s/%s' % (module, ansible_version)
diff --git a/ansible_collections/community/general/plugins/module_utils/lxd.py b/ansible_collections/community/general/plugins/module_utils/lxd.py
new file mode 100644
index 000000000..7f5362532
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/lxd.py
@@ -0,0 +1,134 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import socket
+import ssl
+import json
+
+from ansible.module_utils.urls import generic_urlparse
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six.moves import http_client
+from ansible.module_utils.common.text.converters import to_text
+
+# httplib/http.client connection using unix domain socket
+HTTPConnection = http_client.HTTPConnection
+HTTPSConnection = http_client.HTTPSConnection
+
+
+class UnixHTTPConnection(HTTPConnection):
+ def __init__(self, path):
+ HTTPConnection.__init__(self, 'localhost')
+ self.path = path
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.path)
+ self.sock = sock
+
+
+class LXDClientException(Exception):
+ def __init__(self, msg, **kwargs):
+ self.msg = msg
+ self.kwargs = kwargs
+
+
+class LXDClient(object):
+ def __init__(self, url, key_file=None, cert_file=None, debug=False):
+ """LXD Client.
+
+ :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
+ :type url: ``str``
+ :param key_file: The path of the client certificate key file.
+ :type key_file: ``str``
+ :param cert_file: The path of the client certificate file.
+ :type cert_file: ``str``
+ :param debug: The debug flag. The request and response are stored in logs when debug is true.
+ :type debug: ``bool``
+ """
+ self.url = url
+ self.debug = debug
+ self.logs = []
+ if url.startswith('https:'):
+ self.cert_file = cert_file
+ self.key_file = key_file
+ parts = generic_urlparse(urlparse(self.url))
+ ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
+ ctx.load_cert_chain(cert_file, keyfile=key_file)
+ self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
+ elif url.startswith('unix:'):
+ unix_socket_path = url[len('unix:'):]
+ self.connection = UnixHTTPConnection(unix_socket_path)
+ else:
+ raise LXDClientException('URL scheme must be unix: or https:')
+
+ def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None):
+ resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
+ if resp_json['type'] == 'async':
+ url = '{0}/wait'.format(resp_json['operation'])
+ resp_json = self._send_request('GET', url)
+ if wait_for_container:
+ while resp_json['metadata']['status'] == 'Running':
+ resp_json = self._send_request('GET', url)
+ if resp_json['metadata']['status'] != 'Success':
+ self._raise_err_from_json(resp_json)
+ return resp_json
+
+ def authenticate(self, trust_password):
+ body_json = {'type': 'client', 'password': trust_password}
+ return self._send_request('POST', '/1.0/certificates', body_json=body_json)
+
+ def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
+ try:
+ body = json.dumps(body_json)
+ self.connection.request(method, url, body=body)
+ resp = self.connection.getresponse()
+ resp_data = resp.read()
+ resp_data = to_text(resp_data, errors='surrogate_or_strict')
+ resp_json = json.loads(resp_data)
+ self.logs.append({
+ 'type': 'sent request',
+ 'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
+ 'response': {'json': resp_json}
+ })
+ resp_type = resp_json.get('type', None)
+ if resp_type == 'error':
+ if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
+ return resp_json
+ if resp_json['error'] == "Certificate already in trust store":
+ return resp_json
+ self._raise_err_from_json(resp_json)
+ return resp_json
+ except socket.error as e:
+ raise LXDClientException('cannot connect to the LXD server', err=e)
+
+ def _raise_err_from_json(self, resp_json):
+ err_params = {}
+ if self.debug:
+ err_params['logs'] = self.logs
+ raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
+
+ @staticmethod
+ def _get_err_from_resp_json(resp_json):
+ err = None
+ metadata = resp_json.get('metadata', None)
+ if metadata is not None:
+ err = metadata.get('err', None)
+ if err is None:
+ err = resp_json.get('error', None)
+ return err
+
+
+def default_key_file():
+ return os.path.expanduser('~/.config/lxc/client.key')
+
+
+def default_cert_file():
+ return os.path.expanduser('~/.config/lxc/client.crt')
diff --git a/ansible_collections/community/general/plugins/module_utils/manageiq.py b/ansible_collections/community/general/plugins/module_utils/manageiq.py
new file mode 100644
index 000000000..cbce05b8e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/manageiq.py
@@ -0,0 +1,470 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+#
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import traceback
+
+from ansible.module_utils.basic import missing_required_lib
+
+CLIENT_IMP_ERR = None
+try:
+ from manageiq_client.api import ManageIQClient
+ HAS_CLIENT = True
+except ImportError:
+ CLIENT_IMP_ERR = traceback.format_exc()
+ HAS_CLIENT = False
+
+
+def manageiq_argument_spec():
+ options = dict(
+ url=dict(default=os.environ.get('MIQ_URL', None)),
+ username=dict(default=os.environ.get('MIQ_USERNAME', None)),
+ password=dict(default=os.environ.get('MIQ_PASSWORD', None), no_log=True),
+ token=dict(default=os.environ.get('MIQ_TOKEN', None), no_log=True),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ ca_cert=dict(required=False, default=None, aliases=['ca_bundle_path']),
+ )
+
+ return dict(
+ manageiq_connection=dict(type='dict',
+ apply_defaults=True,
+ options=options),
+ )
+
+
+def check_client(module):
+ if not HAS_CLIENT:
+ module.fail_json(msg=missing_required_lib('manageiq-client'), exception=CLIENT_IMP_ERR)
+
+
+def validate_connection_params(module):
+ params = module.params['manageiq_connection']
+ error_str = "missing required argument: manageiq_connection[{}]"
+ url = params['url']
+ token = params['token']
+ username = params['username']
+ password = params['password']
+
+ if (url and username and password) or (url and token):
+ return params
+ for arg in ['url', 'username', 'password']:
+ if params[arg] in (None, ''):
+ module.fail_json(msg=error_str.format(arg))
+
+
+def manageiq_entities():
+ return {
+ 'provider': 'providers', 'host': 'hosts', 'vm': 'vms',
+ 'category': 'categories', 'cluster': 'clusters', 'data store': 'data_stores',
+ 'group': 'groups', 'resource pool': 'resource_pools', 'service': 'services',
+ 'service template': 'service_templates', 'template': 'templates',
+ 'tenant': 'tenants', 'user': 'users', 'blueprint': 'blueprints'
+ }
+
+
+class ManageIQ(object):
+ """
+ class encapsulating ManageIQ API client.
+ """
+
+ def __init__(self, module):
+ # handle import errors
+ check_client(module)
+
+ params = validate_connection_params(module)
+
+ url = params['url']
+ username = params['username']
+ password = params['password']
+ token = params['token']
+ verify_ssl = params['validate_certs']
+ ca_bundle_path = params['ca_cert']
+
+ self._module = module
+ self._api_url = url + '/api'
+ self._auth = dict(user=username, password=password, token=token)
+ try:
+ self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path)
+ except Exception as e:
+ self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e)))
+
+ @property
+ def module(self):
+ """ Ansible module module
+
+ Returns:
+ the ansible module
+ """
+ return self._module
+
+ @property
+ def api_url(self):
+ """ Base ManageIQ API
+
+ Returns:
+ the base ManageIQ API
+ """
+ return self._api_url
+
+ @property
+ def client(self):
+ """ ManageIQ client
+
+ Returns:
+ the ManageIQ client
+ """
+ return self._client
+
+ def find_collection_resource_by(self, collection_name, **params):
+ """ Searches the collection resource by the collection name and the param passed.
+
+ Returns:
+ the resource as an object if it exists in manageiq, None otherwise.
+ """
+ try:
+ entity = self.client.collections.__getattribute__(collection_name).get(**params)
+ except ValueError:
+ return None
+ except Exception as e:
+ self.module.fail_json(msg="failed to find resource {error}".format(error=e))
+ return vars(entity)
+
+ def find_collection_resource_or_fail(self, collection_name, **params):
+ """ Searches the collection resource by the collection name and the param passed.
+
+ Returns:
+ the resource as an object if it exists in manageiq, Fail otherwise.
+ """
+ resource = self.find_collection_resource_by(collection_name, **params)
+ if resource:
+ return resource
+ else:
+ msg = "{collection_name} where {params} does not exist in manageiq".format(
+ collection_name=collection_name, params=str(params))
+ self.module.fail_json(msg=msg)
+
+ def policies(self, resource_id, resource_type, resource_name):
+ manageiq = ManageIQ(self.module)
+
+ # query resource id, fail if resource does not exist
+ if resource_id is None:
+ resource_id = manageiq.find_collection_resource_or_fail(resource_type, name=resource_name)['id']
+
+ return ManageIQPolicies(manageiq, resource_type, resource_id)
+
+ def query_resource_id(self, resource_type, resource_name):
+ """ Query the resource name in ManageIQ.
+
+ Returns:
+ the resource ID if it exists in ManageIQ, Fail otherwise.
+ """
+ resource = self.find_collection_resource_by(resource_type, name=resource_name)
+ if resource:
+ return resource["id"]
+ else:
+ msg = "{resource_name} {resource_type} does not exist in manageiq".format(
+ resource_name=resource_name, resource_type=resource_type)
+ self.module.fail_json(msg=msg)
+
+
+class ManageIQPolicies(object):
+ """
+ Object to execute policies management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def query_profile_href(self, profile):
+ """ Add or Update the policy_profile href field
+
+ Example:
+ {name: STR, ...} => {name: STR, href: STR}
+ """
+ resource = self.manageiq.find_collection_resource_or_fail(
+ "policy_profiles", **profile)
+ return dict(name=profile['name'], href=resource['href'])
+
+ def query_resource_profiles(self):
+ """ Returns a set of the profile objects objects assigned to the resource
+ """
+ url = '{resource_url}/policy_profiles?expand=resources'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api profile object to look like:
+ # {profile_name: STR, profile_description: STR, policies: ARR<POLICIES>}
+ profiles = [self.clean_profile_object(profile) for profile in resources]
+
+ return profiles
+
+ def query_profile_policies(self, profile_id):
+ """ Returns a set of the policy objects assigned to the resource
+ """
+ url = '{api_url}/policy_profiles/{profile_id}?expand=policies'
+ try:
+ response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id))
+ except Exception as e:
+ msg = "Failed to query {resource_type} policies: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('policies', [])
+
+ # clean the returned rest api policy object to look like:
+ # {name: STR, description: STR, active: BOOL}
+ policies = [self.clean_policy_object(policy) for policy in resources]
+
+ return policies
+
+ def clean_policy_object(self, policy):
+ """ Clean a policy object to have human readable form of:
+ {
+ name: STR,
+ description: STR,
+ active: BOOL
+ }
+ """
+ name = policy.get('name')
+ description = policy.get('description')
+ active = policy.get('active')
+
+ return dict(
+ name=name,
+ description=description,
+ active=active)
+
+ def clean_profile_object(self, profile):
+ """ Clean a profile object to have human readable form of:
+ {
+ profile_name: STR,
+ profile_description: STR,
+ policies: ARR<POLICIES>
+ }
+ """
+ profile_id = profile['id']
+ name = profile.get('name')
+ description = profile.get('description')
+ policies = self.query_profile_policies(profile_id)
+
+ return dict(
+ profile_name=name,
+ profile_description=description,
+ policies=policies)
+
+ def profiles_to_update(self, profiles, action):
+ """ Create a list of policies we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ profiles_to_post = []
+ assigned_profiles = self.query_resource_profiles()
+
+ # make a list of assigned full profile names strings
+ # e.g. ['openscap profile', ...]
+ assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles])
+
+ for profile in profiles:
+ assigned = profile.get('name') in assigned_profiles_set
+
+ if (action == 'unassign' and assigned) or (action == 'assign' and not assigned):
+ # add/update the policy profile href field
+ # {name: STR, ...} => {name: STR, href: STR}
+ profile = self.query_profile_href(profile)
+ profiles_to_post.append(profile)
+
+ return profiles_to_post
+
+ def assign_or_unassign_profiles(self, profiles, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of profiles needed to be changed
+ profiles_to_post = self.profiles_to_update(profiles, action)
+ if not profiles_to_post:
+ return dict(
+ changed=False,
+ msg="Profiles {profiles} already {action}ed, nothing to do".format(
+ action=action,
+ profiles=profiles))
+
+ # try to assign or unassign profiles to resource
+ url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=profiles_to_post)
+ except Exception as e:
+ msg = "Failed to {action} profile: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed profiles
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed profiles: {profiles}".format(
+ action=action,
+ profiles=profiles))
+
+
+class ManageIQTags(object):
+ """
+ Object to execute tags management operations of manageiq resources.
+ """
+
+ def __init__(self, manageiq, resource_type, resource_id):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ self.resource_type = resource_type
+ self.resource_id = resource_id
+ self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format(
+ api_url=self.api_url,
+ resource_type=resource_type,
+ resource_id=resource_id)
+
+ def full_tag_name(self, tag):
+ """ Returns the full tag name in manageiq
+ """
+ return '/managed/{tag_category}/{tag_name}'.format(
+ tag_category=tag['category'],
+ tag_name=tag['name'])
+
+ def clean_tag_object(self, tag):
+ """ Clean a tag object to have human readable form of:
+ {
+ full_name: STR,
+ name: STR,
+ display_name: STR,
+ category: STR
+ }
+ """
+ full_name = tag.get('name')
+ categorization = tag.get('categorization', {})
+
+ return dict(
+ full_name=full_name,
+ name=categorization.get('name'),
+ display_name=categorization.get('display_name'),
+ category=categorization.get('category', {}).get('name'))
+
+ def query_resource_tags(self):
+ """ Returns a set of the tag objects assigned to the resource
+ """
+ url = '{resource_url}/tags?expand=resources&attributes=categorization'
+ try:
+ response = self.client.get(url.format(resource_url=self.resource_url))
+ except Exception as e:
+ msg = "Failed to query {resource_type} tags: {error}".format(
+ resource_type=self.resource_type,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ resources = response.get('resources', [])
+
+ # clean the returned rest api tag object to look like:
+ # {full_name: STR, name: STR, display_name: STR, category: STR}
+ tags = [self.clean_tag_object(tag) for tag in resources]
+
+ return tags
+
+ def tags_to_update(self, tags, action):
+ """ Create a list of tags we need to update in ManageIQ.
+
+ Returns:
+ Whether or not a change took place and a message describing the
+ operation executed.
+ """
+ tags_to_post = []
+ assigned_tags = self.query_resource_tags()
+
+ # make a list of assigned full tag names strings
+ # e.g. ['/managed/environment/prod', ...]
+ assigned_tags_set = set([tag['full_name'] for tag in assigned_tags])
+
+ for tag in tags:
+ assigned = self.full_tag_name(tag) in assigned_tags_set
+
+ if assigned and action == 'unassign':
+ tags_to_post.append(tag)
+ elif (not assigned) and action == 'assign':
+ tags_to_post.append(tag)
+
+ return tags_to_post
+
+ def assign_or_unassign_tags(self, tags, action):
+ """ Perform assign/unassign action
+ """
+ # get a list of tags needed to be changed
+ tags_to_post = self.tags_to_update(tags, action)
+ if not tags_to_post:
+ return dict(
+ changed=False,
+ msg="Tags already {action}ed, nothing to do".format(action=action))
+
+ # try to assign or unassign tags to resource
+ url = '{resource_url}/tags'.format(resource_url=self.resource_url)
+ try:
+ response = self.client.post(url, action=action, resources=tags)
+ except Exception as e:
+ msg = "Failed to {action} tag: {error}".format(
+ action=action,
+ error=e)
+ self.module.fail_json(msg=msg)
+
+ # check all entities in result to be successful
+ for result in response['results']:
+ if not result['success']:
+ msg = "Failed to {action}: {message}".format(
+ action=action,
+ message=result['message'])
+ self.module.fail_json(msg=msg)
+
+ # successfully changed all needed tags
+ return dict(
+ changed=True,
+ msg="Successfully {action}ed tags".format(action=action))
diff --git a/ansible_collections/community/general/plugins/module_utils/memset.py b/ansible_collections/community/general/plugins/module_utils/memset.py
new file mode 100644
index 000000000..374b40ff4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/memset.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import open_url, urllib_error
+from ansible.module_utils.basic import json
+
+
+class Response(object):
+ '''
+ Create a response object to mimic that of requests.
+ '''
+
+ def __init__(self):
+ self.content = None
+ self.status_code = None
+ self.stderr = None
+
+ def json(self):
+ return json.loads(self.content)
+
+
+def memset_api_call(api_key, api_method, payload=None):
+ '''
+ Generic function which returns results back to calling function.
+
+ Requires an API key and an API method to assemble the API URL.
+ Returns response text to be analysed.
+ '''
+ # instantiate a response object
+ response = Response()
+
+ # if we've already started preloading the payload then copy it
+ # and use that, otherwise we need to isntantiate it.
+ if payload is None:
+ payload = dict()
+ else:
+ payload = payload.copy()
+
+ # set some sane defaults
+ has_failed = False
+ msg = None
+
+ data = urlencode(payload)
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ api_uri_base = 'https://api.memset.com/v1/json/'
+ api_uri = '{0}{1}/' . format(api_uri_base, api_method)
+
+ try:
+ resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key)
+ response.content = resp.read().decode('utf-8')
+ response.status_code = resp.getcode()
+ except urllib_error.HTTPError as e:
+ try:
+ errorcode = e.code
+ except AttributeError:
+ errorcode = None
+
+ has_failed = True
+ response.content = e.read().decode('utf8')
+ response.status_code = errorcode
+
+ if response.status_code is not None:
+ msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error'])
+ else:
+ msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error'])
+ except urllib_error.URLError as e:
+ has_failed = True
+ msg = "An URLError occured ({0})." . format(type(e))
+ response.stderr = "{0}" . format(e)
+
+ if msg is None:
+ msg = response.json()
+
+ return has_failed, msg, response
+
+
+def check_zone_domain(data, domain):
+ '''
+ Returns true if domain already exists, and false if not.
+ '''
+ exists = False
+
+ if data.status_code in [201, 200]:
+ for zone_domain in data.json():
+ if zone_domain['domain'] == domain:
+ exists = True
+
+ return exists
+
+
+def check_zone(data, name):
+ '''
+ Returns true if zone already exists, and false if not.
+ '''
+ counter = 0
+ exists = False
+
+ if data.status_code in [201, 200]:
+ for zone in data.json():
+ if zone['nickname'] == name:
+ counter += 1
+ if counter == 1:
+ exists = True
+
+ return exists, counter
+
+
+def get_zone_id(zone_name, current_zones):
+ '''
+ Returns the zone's id if it exists and is unique
+ '''
+ zone_exists = False
+ zone_id, msg = None, None
+ zone_list = []
+
+ for zone in current_zones:
+ if zone['nickname'] == zone_name:
+ zone_list.append(zone['id'])
+
+ counter = len(zone_list)
+
+ if counter == 0:
+ msg = 'No matching zone found'
+ elif counter == 1:
+ zone_id = zone_list[0]
+ zone_exists = True
+ elif counter > 1:
+ zone_id = None
+ msg = 'Zone ID could not be returned as duplicate zone names were detected'
+
+ return zone_exists, msg, counter, zone_id
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/base.py b/ansible_collections/community/general/plugins/module_utils/mh/base.py
new file mode 100644
index 000000000..b10762eab
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/base.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE
+from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
+
+
+class ModuleHelperBase(object):
+ module = None
+ ModuleHelperException = _MHE
+ _delegated_to_module = (
+ 'check_mode', 'get_bin_path', 'warn', 'deprecate',
+ )
+
+ def __init__(self, module=None):
+ self._changed = False
+
+ if module:
+ self.module = module
+
+ if not isinstance(self.module, AnsibleModule):
+ self.module = AnsibleModule(**self.module)
+
+ @property
+ def diff_mode(self):
+ return self.module._diff
+
+ @property
+ def verbosity(self):
+ return self.module._verbosity
+
+ def do_raise(self, *args, **kwargs):
+ raise _MHE(*args, **kwargs)
+
+ def __getattr__(self, attr):
+ if attr in self._delegated_to_module:
+ return getattr(self.module, attr)
+ raise AttributeError("ModuleHelperBase has no attribute '%s'" % (attr, ))
+
+ def __init_module__(self):
+ pass
+
+ def __run__(self):
+ raise NotImplementedError()
+
+ def __quit_module__(self):
+ pass
+
+ def __changed__(self):
+ raise NotImplementedError()
+
+ @property
+ def changed(self):
+ try:
+ return self.__changed__()
+ except NotImplementedError:
+ return self._changed
+
+ @changed.setter
+ def changed(self, value):
+ self._changed = value
+
+ def has_changed(self):
+ raise NotImplementedError()
+
+ @property
+ def output(self):
+ raise NotImplementedError()
+
+ @module_fails_on_exception
+ def run(self):
+ self.__init_module__()
+ self.__run__()
+ self.__quit_module__()
+ output = self.output
+ if 'failed' not in output:
+ output['failed'] = False
+ self.module.exit_json(changed=self.has_changed(), **output)
+
+ @classmethod
+ def execute(cls, module=None):
+ cls(module).run()
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/deco.py b/ansible_collections/community/general/plugins/module_utils/mh/deco.py
new file mode 100644
index 000000000..5138b212c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/deco.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+from functools import wraps
+
+from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException
+
+
+def cause_changes(on_success=None, on_failure=None):
+
+ def deco(func):
+ if on_success is None and on_failure is None:
+ return func
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ self = args[0]
+ func(*args, **kwargs)
+ if on_success is not None:
+ self.changed = on_success
+ except Exception:
+ if on_failure is not None:
+ self.changed = on_failure
+ raise
+
+ return wrapper
+
+ return deco
+
+
+def module_fails_on_exception(func):
+ conflict_list = ('msg', 'exception', 'output', 'vars', 'changed')
+
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ def fix_var_conflicts(output):
+ result = dict([
+ (k if k not in conflict_list else "_" + k, v)
+ for k, v in output.items()
+ ])
+ return result
+
+ try:
+ func(self, *args, **kwargs)
+ except SystemExit:
+ raise
+ except ModuleHelperException as e:
+ if e.update_output:
+ self.update_output(e.update_output)
+ # patchy solution to resolve conflict with output variables
+ output = fix_var_conflicts(self.output)
+ self.module.fail_json(msg=e.msg, exception=traceback.format_exc(),
+ output=self.output, vars=self.vars.output(), **output)
+ except Exception as e:
+ # patchy solution to resolve conflict with output variables
+ output = fix_var_conflicts(self.output)
+ msg = "Module failed with exception: {0}".format(str(e).strip())
+ self.module.fail_json(msg=msg, exception=traceback.format_exc(),
+ output=self.output, vars=self.vars.output(), **output)
+ return wrapper
+
+
+def check_mode_skip(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ if not self.module.check_mode:
+ return func(self, *args, **kwargs)
+ return wrapper
+
+
+def check_mode_skip_returns(callable=None, value=None):
+
+ def deco(func):
+ if callable is not None:
+ @wraps(func)
+ def wrapper_callable(self, *args, **kwargs):
+ if self.module.check_mode:
+ return callable(self, *args, **kwargs)
+ return func(self, *args, **kwargs)
+ return wrapper_callable
+
+ if value is not None:
+ @wraps(func)
+ def wrapper_value(self, *args, **kwargs):
+ if self.module.check_mode:
+ return value
+ return func(self, *args, **kwargs)
+ return wrapper_value
+
+ if callable is None and value is None:
+ return check_mode_skip
+
+ return deco
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py b/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py
new file mode 100644
index 000000000..68af5ba67
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/exceptions.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.common.text.converters import to_native
+
+
+class ModuleHelperException(Exception):
+ def __init__(self, msg, update_output=None, *args, **kwargs):
+ self.msg = to_native(msg or "Module failed with exception: {0}".format(self))
+ if update_output is None:
+ update_output = {}
+ self.update_output = update_output
+ super(ModuleHelperException, self).__init__(*args)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py
new file mode 100644
index 000000000..a7d379394
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/cmd.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from functools import partial
+
+
+class ArgFormat(object):
+ """
+ Argument formatter for use as a command line parameter. Used in CmdMixin.
+ """
+ BOOLEAN = 0
+ PRINTF = 1
+ FORMAT = 2
+ BOOLEAN_NOT = 3
+
+ @staticmethod
+ def stars_deco(num):
+ if num == 1:
+ def deco(f):
+ return lambda v: f(*v)
+ return deco
+ elif num == 2:
+ def deco(f):
+ return lambda v: f(**v)
+ return deco
+
+ return lambda f: f
+
+ def __init__(self, name, fmt=None, style=FORMAT, stars=0):
+ """
+ THIS CLASS IS BEING DEPRECATED.
+ It was never meant to be used outside the scope of CmdMixin, and CmdMixin is being deprecated.
+ See the deprecation notice in ``CmdMixin.__init__()`` below.
+
+ Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for
+ the CLI command execution.
+ :param name: Name of the argument to be formatted
+ :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that
+ :param style: Whether arg_format (as str) should use printf-style formatting.
+ Ignored if arg_format is None or not a str (should be callable).
+ :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value
+ """
+ def printf_fmt(_fmt, v):
+ try:
+ return [_fmt % v]
+ except TypeError as e:
+ if e.args[0] != 'not all arguments converted during string formatting':
+ raise
+ return [_fmt]
+
+ _fmts = {
+ ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []),
+ ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]),
+ ArgFormat.PRINTF: printf_fmt,
+ ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)],
+ }
+
+ self.name = name
+ self.stars = stars
+ self.style = style
+
+ if fmt is None:
+ fmt = "{0}"
+ style = ArgFormat.FORMAT
+
+ if isinstance(fmt, str):
+ func = _fmts[style]
+ self.arg_format = partial(func, fmt)
+ elif isinstance(fmt, list) or isinstance(fmt, tuple):
+ self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt]
+ elif hasattr(fmt, '__call__'):
+ self.arg_format = fmt
+ else:
+ raise TypeError('Parameter fmt must be either: a string, a list/tuple of '
+ 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt))
+
+ if stars:
+ self.arg_format = (self.stars_deco(stars))(self.arg_format)
+
+ def to_text(self, value):
+ if value is None and self.style != ArgFormat.BOOLEAN_NOT:
+ return []
+ func = self.arg_format
+ return [str(p) for p in func(value)]
+
+
+class CmdMixin(object):
+ """
+ THIS CLASS IS BEING DEPRECATED.
+ See the deprecation notice in ``CmdMixin.__init__()`` below.
+
+ Mixin for mapping module options to running a CLI command with its arguments.
+ """
+ command = None
+ command_args_formats = {}
+ run_command_fixed_options = {}
+ check_rc = False
+ force_lang = "C"
+
+ @property
+ def module_formats(self):
+ result = {}
+ for param in self.module.params.keys():
+ result[param] = ArgFormat(param)
+ return result
+
+ @property
+ def custom_formats(self):
+ result = {}
+ for param, fmt_spec in self.command_args_formats.items():
+ result[param] = ArgFormat(param, **fmt_spec)
+ return result
+
+ def __init__(self, *args, **kwargs):
+ super(CmdMixin, self).__init__(*args, **kwargs)
+ self.module.deprecate(
+ 'The CmdMixin used in classes CmdModuleHelper and CmdStateModuleHelper is being deprecated. '
+ 'Modules should use community.general.plugins.module_utils.cmd_runner.CmdRunner instead.',
+ version='8.0.0',
+ collection_name='community.general',
+ )
+
+ def _calculate_args(self, extra_params=None, params=None):
+ def add_arg_formatted_param(_cmd_args, arg_format, _value):
+ args = list(arg_format.to_text(_value))
+ return _cmd_args + args
+
+ def find_format(_param):
+ return self.custom_formats.get(_param, self.module_formats.get(_param))
+
+ extra_params = extra_params or dict()
+ cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command)
+ try:
+ cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True)
+ except ValueError:
+ pass
+ param_list = params if params else self.vars.keys()
+
+ for param in param_list:
+ if isinstance(param, dict):
+ if len(param) != 1:
+ self.do_raise("run_command parameter as a dict must contain only one key: {0}".format(param))
+ _param = list(param.keys())[0]
+ fmt = find_format(_param)
+ value = param[_param]
+ elif isinstance(param, str):
+ if param in self.vars.keys():
+ fmt = find_format(param)
+ value = self.vars[param]
+ elif param in extra_params:
+ fmt = find_format(param)
+ value = extra_params[param]
+ else:
+ self.do_raise('Cannot determine value for parameter: {0}'.format(param))
+ else:
+ self.do_raise("run_command parameter must be either a str or a dict: {0}".format(param))
+ cmd_args = add_arg_formatted_param(cmd_args, fmt, value)
+
+ return cmd_args
+
+ def process_command_output(self, rc, out, err):
+ return rc, out, err
+
+ def run_command(self,
+ extra_params=None,
+ params=None,
+ process_output=None,
+ publish_rc=True,
+ publish_out=True,
+ publish_err=True,
+ publish_cmd=True,
+ *args, **kwargs):
+ cmd_args = self._calculate_args(extra_params, params)
+ options = dict(self.run_command_fixed_options)
+ options['check_rc'] = options.get('check_rc', self.check_rc)
+ options.update(kwargs)
+ env_update = dict(options.get('environ_update', {}))
+ if self.force_lang:
+ env_update.update({
+ 'LANGUAGE': self.force_lang,
+ 'LC_ALL': self.force_lang,
+ })
+ self.update_output(force_lang=self.force_lang)
+ options['environ_update'] = env_update
+ rc, out, err = self.module.run_command(cmd_args, *args, **options)
+ if publish_rc:
+ self.update_output(rc=rc)
+ if publish_out:
+ self.update_output(stdout=out)
+ if publish_err:
+ self.update_output(stderr=err)
+ if publish_cmd:
+ self.update_output(cmd_args=cmd_args)
+ if process_output is None:
+ _process = self.process_command_output
+ else:
+ _process = process_output
+
+ return _process(rc, out, err)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deprecate_attrs.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deprecate_attrs.py
new file mode 100644
index 000000000..c3bfb06c6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deprecate_attrs.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class DeprecateAttrsMixin(object):
+
+ def _deprecate_setup(self, attr, target, module):
+ if target is None:
+ target = self
+ if not hasattr(target, attr):
+ raise ValueError("Target {0} has no attribute {1}".format(target, attr))
+ if module is None:
+ if isinstance(target, AnsibleModule):
+ module = target
+ elif hasattr(target, "module") and isinstance(target.module, AnsibleModule):
+ module = target.module
+ else:
+ raise ValueError("Failed to automatically discover the AnsibleModule instance. Pass 'module' parameter explicitly.")
+
+ # setup internal state dicts
+ value_attr = "__deprecated_attr_value"
+ trigger_attr = "__deprecated_attr_trigger"
+ if not hasattr(target, value_attr):
+ setattr(target, value_attr, {})
+ if not hasattr(target, trigger_attr):
+ setattr(target, trigger_attr, {})
+ value_dict = getattr(target, value_attr)
+ trigger_dict = getattr(target, trigger_attr)
+ return target, module, value_dict, trigger_dict
+
+ def _deprecate_attr(self, attr, msg, version=None, date=None, collection_name=None, target=None, value=None, module=None):
+ target, module, value_dict, trigger_dict = self._deprecate_setup(attr, target, module)
+
+ value_dict[attr] = getattr(target, attr, value)
+ trigger_dict[attr] = False
+
+ def _trigger():
+ if not trigger_dict[attr]:
+ module.deprecate(msg, version=version, date=date, collection_name=collection_name)
+ trigger_dict[attr] = True
+
+ def _getter(_self):
+ _trigger()
+ return value_dict[attr]
+
+ def _setter(_self, new_value):
+ _trigger()
+ value_dict[attr] = new_value
+
+ # override attribute
+ prop = property(_getter)
+ setattr(target, attr, prop)
+ setattr(target, "_{0}_setter".format(attr), prop.setter(_setter))
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
new file mode 100644
index 000000000..bab8c090b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/deps.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase
+from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception
+
+
+class DependencyCtxMgr(object):
+ def __init__(self, name, msg=None):
+ self.name = name
+ self.msg = msg
+ self.has_it = False
+ self.exc_type = None
+ self.exc_val = None
+ self.exc_tb = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.has_it = exc_type is None
+ self.exc_type = exc_type
+ self.exc_val = exc_val
+ self.exc_tb = exc_tb
+ return not self.has_it
+
+ @property
+ def text(self):
+ return self.msg or str(self.exc_val)
+
+
+class DependencyMixin(ModuleHelperBase):
+ _dependencies = []
+
+ @classmethod
+ def dependency(cls, name, msg):
+ cls._dependencies.append(DependencyCtxMgr(name, msg))
+ return cls._dependencies[-1]
+
+ def fail_on_missing_deps(self):
+ for d in self._dependencies:
+ if not d.has_it:
+ self.module.fail_json(changed=False,
+ exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),
+ msg=d.text,
+ **self.output)
+
+ @module_fails_on_exception
+ def run(self):
+ self.fail_on_missing_deps()
+ super(DependencyMixin, self).run()
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/state.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/state.py
new file mode 100644
index 000000000..4e2937989
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/state.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class StateMixin(object):
+ state_param = 'state'
+ default_state = None
+
+ def _state(self):
+ state = self.module.params.get(self.state_param)
+ return self.default_state if state is None else state
+
+ def _method(self, state):
+ return "{0}_{1}".format(self.state_param, state)
+
+ def __run__(self):
+ state = self._state()
+ self.vars.state = state
+
+ # resolve aliases
+ if state not in self.module.params:
+ aliased = [name for name, param in self.module.argument_spec.items() if state in param.get('aliases', [])]
+ if aliased:
+ state = aliased[0]
+ self.vars.effective_state = state
+
+ method = self._method(state)
+ if not hasattr(self, method):
+ return self.__state_fallback__()
+ func = getattr(self, method)
+ return func()
+
+ def __state_fallback__(self):
+ raise ValueError("Cannot find method: {0}".format(self._method(self._state())))
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py b/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py
new file mode 100644
index 000000000..6dfb29bab
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/mixins/vars.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import copy
+
+
+class VarMeta(object):
+ NOTHING = object()
+
+ def __init__(self, diff=False, output=True, change=None, fact=False):
+ self.init = False
+ self.initial_value = None
+ self.value = None
+
+ self.diff = diff
+ self.change = diff if change is None else change
+ self.output = output
+ self.fact = fact
+
+ def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING):
+ if diff is not None:
+ self.diff = diff
+ if output is not None:
+ self.output = output
+ if change is not None:
+ self.change = change
+ if fact is not None:
+ self.fact = fact
+ if initial_value is not self.NOTHING:
+ self.initial_value = copy.deepcopy(initial_value)
+
+ def set_value(self, value):
+ if not self.init:
+ self.initial_value = copy.deepcopy(value)
+ self.init = True
+ self.value = value
+ return self
+
+ @property
+ def has_changed(self):
+ return self.change and (self.initial_value != self.value)
+
+ @property
+ def diff_result(self):
+ return None if not (self.diff and self.has_changed) else {
+ 'before': self.initial_value,
+ 'after': self.value,
+ }
+
+ def __str__(self):
+ return "<VarMeta: value={0}, initial={1}, diff={2}, output={3}, change={4}>".format(
+ self.value, self.initial_value, self.diff, self.output, self.change
+ )
+
+
+class VarDict(object):
+ def __init__(self):
+ self._data = dict()
+ self._meta = dict()
+
+ def __getitem__(self, item):
+ return self._data[item]
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def __getattr__(self, item):
+ try:
+ return self._data[item]
+ except KeyError:
+ return getattr(self._data, item)
+
+ def __setattr__(self, key, value):
+ if key in ('_data', '_meta'):
+ super(VarDict, self).__setattr__(key, value)
+ else:
+ self.set(key, value)
+
+ def meta(self, name):
+ return self._meta[name]
+
+ def set_meta(self, name, **kwargs):
+ self.meta(name).set(**kwargs)
+
+ def set(self, name, value, **kwargs):
+ if name in ('_data', '_meta'):
+ raise ValueError("Names _data and _meta are reserved for use by ModuleHelper")
+ self._data[name] = value
+ if name in self._meta:
+ meta = self.meta(name)
+ else:
+ meta = VarMeta(**kwargs)
+ meta.set_value(value)
+ self._meta[name] = meta
+
+ def output(self):
+ return dict((k, v) for k, v in self._data.items() if self.meta(k).output)
+
+ def diff(self):
+ diff_results = [(k, self.meta(k).diff_result) for k in self._data]
+ diff_results = [dr for dr in diff_results if dr[1] is not None]
+ if diff_results:
+ before = dict((dr[0], dr[1]['before']) for dr in diff_results)
+ after = dict((dr[0], dr[1]['after']) for dr in diff_results)
+ return {'before': before, 'after': after}
+ return None
+
+ def facts(self):
+ facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact)
+ return facts_result if facts_result else None
+
+ def change_vars(self):
+ return [v for v in self._data if self.meta(v).change]
+
+ def has_changed(self, v):
+ return self._meta[v].has_changed
+
+
+class VarsMixin(object):
+
+ def __init__(self, module=None):
+ self.vars = VarDict()
+ super(VarsMixin, self).__init__(module)
+
+ def update_vars(self, meta=None, **kwargs):
+ if meta is None:
+ meta = {}
+ for k, v in kwargs.items():
+ self.vars.set(k, v, **meta)
diff --git a/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py b/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
new file mode 100644
index 000000000..c5973262d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/mh/module_helper.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+# (TODO: remove AnsibleModule!) pylint: disable-next=unused-import
+from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule # noqa: F401
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin
+
+
+class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase):
+ facts_name = None
+ output_params = ()
+ diff_params = ()
+ change_params = ()
+ facts_params = ()
+
+ def __init__(self, module=None):
+ super(ModuleHelper, self).__init__(module)
+ for name, value in self.module.params.items():
+ self.vars.set(
+ name, value,
+ diff=name in self.diff_params,
+ output=name in self.output_params,
+ change=None if not self.change_params else name in self.change_params,
+ fact=name in self.facts_params,
+ )
+
+ def update_output(self, **kwargs):
+ self.update_vars(meta={"output": True}, **kwargs)
+
+ def update_facts(self, **kwargs):
+ self.update_vars(meta={"fact": True}, **kwargs)
+
+ def _vars_changed(self):
+ return any(self.vars.has_changed(v) for v in self.vars.change_vars())
+
+ def has_changed(self):
+ return self.changed or self._vars_changed()
+
+ @property
+ def output(self):
+ result = dict(self.vars.output())
+ if self.facts_name:
+ facts = self.vars.facts()
+ if facts is not None:
+ result['ansible_facts'] = {self.facts_name: facts}
+ if self.diff_mode:
+ diff = result.get('diff', {})
+ vars_diff = self.vars.diff() or {}
+ result['diff'] = dict_merge(dict(diff), vars_diff)
+
+ return result
+
+
+class StateModuleHelper(StateMixin, ModuleHelper):
+ pass
+
+
+class CmdModuleHelper(CmdMixin, ModuleHelper):
+ """
+ THIS CLASS IS BEING DEPRECATED.
+ See the deprecation notice in ``CmdMixin.__init__()``.
+ """
+ pass
+
+
+class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper):
+ """
+ THIS CLASS IS BEING DEPRECATED.
+ See the deprecation notice in ``CmdMixin.__init__()``.
+ """
+ pass
diff --git a/ansible_collections/community/general/plugins/module_utils/module_helper.py b/ansible_collections/community/general/plugins/module_utils/module_helper.py
new file mode 100644
index 000000000..8a51de665
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/module_helper.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020, Ansible Project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( # noqa: F401, pylint: disable=unused-import
+ ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule
+)
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401, pylint: disable=unused-import
+# pylint: disable-next=unused-import
+from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception # noqa: F401
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict # noqa: F401, pylint: disable=unused-import
diff --git a/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py b/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py
new file mode 100644
index 000000000..cd2abc568
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/net_tools/pritunl/api.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""
+Pritunl API that offers CRUD operations on Pritunl Organizations and Users
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import base64
+import hashlib
+import hmac
+import json
+import time
+import uuid
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.urls import open_url
+
+__metaclass__ = type
+
+
+class PritunlException(Exception):
+ pass
+
+
+def pritunl_argument_spec():
+ return dict(
+ pritunl_url=dict(required=True, type="str"),
+ pritunl_api_token=dict(required=True, type="str", no_log=False),
+ pritunl_api_secret=dict(required=True, type="str", no_log=True),
+ validate_certs=dict(required=False, type="bool", default=True),
+ )
+
+
+def get_pritunl_settings(module):
+ """
+ Helper function to set required Pritunl request params from module arguments.
+ """
+ return {
+ "api_token": module.params.get("pritunl_api_token"),
+ "api_secret": module.params.get("pritunl_api_secret"),
+ "base_url": module.params.get("pritunl_url"),
+ "validate_certs": module.params.get("validate_certs"),
+ }
+
+
+def _get_pritunl_organizations(api_token, api_secret, base_url, validate_certs=True):
+ return pritunl_auth_request(
+ base_url=base_url,
+ api_token=api_token,
+ api_secret=api_secret,
+ method="GET",
+ path="/organization",
+ validate_certs=validate_certs,
+ )
+
+
+def _delete_pritunl_organization(
+ api_token, api_secret, base_url, organization_id, validate_certs=True
+):
+ return pritunl_auth_request(
+ base_url=base_url,
+ api_token=api_token,
+ api_secret=api_secret,
+ method="DELETE",
+ path="/organization/%s" % (organization_id),
+ validate_certs=validate_certs,
+ )
+
+
+def _post_pritunl_organization(
+ api_token, api_secret, base_url, organization_data, validate_certs=True
+):
+ return pritunl_auth_request(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ method="POST",
+ path="/organization/%s",
+ headers={"Content-Type": "application/json"},
+ data=json.dumps(organization_data),
+ validate_certs=validate_certs,
+ )
+
+
+def _get_pritunl_users(
+ api_token, api_secret, base_url, organization_id, validate_certs=True
+):
+ return pritunl_auth_request(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ method="GET",
+ path="/user/%s" % organization_id,
+ validate_certs=validate_certs,
+ )
+
+
+def _delete_pritunl_user(
+ api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
+):
+ return pritunl_auth_request(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ method="DELETE",
+ path="/user/%s/%s" % (organization_id, user_id),
+ validate_certs=validate_certs,
+ )
+
+
+def _post_pritunl_user(
+ api_token, api_secret, base_url, organization_id, user_data, validate_certs=True
+):
+ return pritunl_auth_request(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ method="POST",
+ path="/user/%s" % organization_id,
+ headers={"Content-Type": "application/json"},
+ data=json.dumps(user_data),
+ validate_certs=validate_certs,
+ )
+
+
+def _put_pritunl_user(
+ api_token,
+ api_secret,
+ base_url,
+ organization_id,
+ user_id,
+ user_data,
+ validate_certs=True,
+):
+ return pritunl_auth_request(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ method="PUT",
+ path="/user/%s/%s" % (organization_id, user_id),
+ headers={"Content-Type": "application/json"},
+ data=json.dumps(user_data),
+ validate_certs=validate_certs,
+ )
+
+
+def list_pritunl_organizations(
+ api_token, api_secret, base_url, validate_certs=True, filters=None
+):
+ orgs = []
+
+ response = _get_pritunl_organizations(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ validate_certs=validate_certs,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException("Could not retrieve organizations from Pritunl")
+ else:
+ for org in json.loads(response.read()):
+ # No filtering
+ if filters is None:
+ orgs.append(org)
+ else:
+ if not any(
+ filter_val != org[filter_key]
+ for filter_key, filter_val in iteritems(filters)
+ ):
+ orgs.append(org)
+
+ return orgs
+
+
+def list_pritunl_users(
+ api_token, api_secret, base_url, organization_id, validate_certs=True, filters=None
+):
+ users = []
+
+ response = _get_pritunl_users(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ validate_certs=validate_certs,
+ organization_id=organization_id,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException("Could not retrieve users from Pritunl")
+ else:
+ for user in json.loads(response.read()):
+ # No filtering
+ if filters is None:
+ users.append(user)
+
+ else:
+ if not any(
+ filter_val != user[filter_key]
+ for filter_key, filter_val in iteritems(filters)
+ ):
+ users.append(user)
+
+ return users
+
+
+def post_pritunl_organization(
+ api_token,
+ api_secret,
+ base_url,
+ organization_name,
+ validate_certs=True,
+):
+ response = _post_pritunl_organization(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ organization_data={"name": organization_name},
+ validate_certs=True,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException(
+ "Could not add organization %s to Pritunl" % (organization_name)
+ )
+ # The user PUT request returns the updated user object
+ return json.loads(response.read())
+
+
+def post_pritunl_user(
+ api_token,
+ api_secret,
+ base_url,
+ organization_id,
+ user_data,
+ user_id=None,
+ validate_certs=True,
+):
+ # If user_id is provided will do PUT otherwise will do POST
+ if user_id is None:
+ response = _post_pritunl_user(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ organization_id=organization_id,
+ user_data=user_data,
+ validate_certs=True,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException(
+ "Could not remove user %s from organization %s from Pritunl"
+ % (user_id, organization_id)
+ )
+ # user POST request returns an array of a single item,
+ # so return this item instead of the list
+ return json.loads(response.read())[0]
+ else:
+ response = _put_pritunl_user(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ organization_id=organization_id,
+ user_data=user_data,
+ user_id=user_id,
+ validate_certs=True,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException(
+ "Could not update user %s from organization %s from Pritunl"
+ % (user_id, organization_id)
+ )
+ # The user PUT request returns the updated user object
+ return json.loads(response.read())
+
+
+def delete_pritunl_organization(
+ api_token, api_secret, base_url, organization_id, validate_certs=True
+):
+ response = _delete_pritunl_organization(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ organization_id=organization_id,
+ validate_certs=True,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException(
+ "Could not remove organization %s from Pritunl" % (organization_id)
+ )
+
+ return json.loads(response.read())
+
+
+def delete_pritunl_user(
+ api_token, api_secret, base_url, organization_id, user_id, validate_certs=True
+):
+ response = _delete_pritunl_user(
+ api_token=api_token,
+ api_secret=api_secret,
+ base_url=base_url,
+ organization_id=organization_id,
+ user_id=user_id,
+ validate_certs=True,
+ )
+
+ if response.getcode() != 200:
+ raise PritunlException(
+ "Could not remove user %s from organization %s from Pritunl"
+ % (user_id, organization_id)
+ )
+
+ return json.loads(response.read())
+
+
+def pritunl_auth_request(
+ api_token,
+ api_secret,
+ base_url,
+ method,
+ path,
+ validate_certs=True,
+ headers=None,
+ data=None,
+):
+ """
+ Send an API call to a Pritunl server.
+ Taken from https://pritunl.com/api and adaped work with Ansible open_url
+ """
+ auth_timestamp = str(int(time.time()))
+ auth_nonce = uuid.uuid4().hex
+
+ auth_string = "&".join(
+ [api_token, auth_timestamp, auth_nonce, method.upper(), path]
+ )
+
+ auth_signature = base64.b64encode(
+ hmac.new(
+ api_secret.encode("utf-8"), auth_string.encode("utf-8"), hashlib.sha256
+ ).digest()
+ )
+
+ auth_headers = {
+ "Auth-Token": api_token,
+ "Auth-Timestamp": auth_timestamp,
+ "Auth-Nonce": auth_nonce,
+ "Auth-Signature": auth_signature,
+ }
+
+ if headers:
+ auth_headers.update(headers)
+
+ try:
+ uri = "%s%s" % (base_url, path)
+
+ return open_url(
+ uri,
+ method=method.upper(),
+ headers=auth_headers,
+ data=data,
+ validate_certs=validate_certs,
+ )
+ except Exception as e:
+ raise PritunlException(e)
diff --git a/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py b/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py
new file mode 100644
index 000000000..acc2ceae4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/ocapi_utils.py
@@ -0,0 +1,502 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022 Western Digital Corporation
+# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import os
+import uuid
+
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+
+GET_HEADERS = {'accept': 'application/json'}
+PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
+POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json'}
+DELETE_HEADERS = {'accept': 'application/json'}
+
+HEALTH_OK = 5
+
+
+class OcapiUtils(object):
+
+ def __init__(self, creds, base_uri, proxy_slot_number, timeout, module):
+ self.root_uri = base_uri
+ self.proxy_slot_number = proxy_slot_number
+ self.creds = creds
+ self.timeout = timeout
+ self.module = module
+
+ def _auth_params(self):
+ """
+ Return tuple of required authentication params based on the username and password.
+
+ :return: tuple of username, password
+ """
+ username = self.creds['user']
+ password = self.creds['pswd']
+ force_basic_auth = True
+ return username, password, force_basic_auth
+
+ def get_request(self, uri):
+ req_headers = dict(GET_HEADERS)
+ username, password, basic_auth = self._auth_params()
+ try:
+ resp = open_url(uri, method="GET", headers=req_headers,
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ data = json.loads(to_native(resp.read()))
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ return {'ret': False,
+ 'msg': "HTTP Error %s on GET request to '%s'"
+ % (e.code, uri),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'data': data, 'headers': headers}
+
+ def delete_request(self, uri, etag=None):
+ req_headers = dict(DELETE_HEADERS)
+ if etag is not None:
+ req_headers['If-Match'] = etag
+ username, password, basic_auth = self._auth_params()
+ try:
+ resp = open_url(uri, method="DELETE", headers=req_headers,
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ if resp.status != 204:
+ data = json.loads(to_native(resp.read()))
+ else:
+ data = ""
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ return {'ret': False,
+ 'msg': "HTTP Error %s on DELETE request to '%s'"
+ % (e.code, uri),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'data': data, 'headers': headers}
+
+ def put_request(self, uri, payload, etag=None):
+ req_headers = dict(PUT_HEADERS)
+ if etag is not None:
+ req_headers['If-Match'] = etag
+ username, password, basic_auth = self._auth_params()
+ try:
+ resp = open_url(uri, data=json.dumps(payload),
+ headers=req_headers, method="PUT",
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ return {'ret': False,
+ 'msg': "HTTP Error %s on PUT request to '%s'"
+ % (e.code, uri),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed PUT request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'headers': headers, 'resp': resp}
+
+ def post_request(self, uri, payload, content_type="application/json", timeout=None):
+ req_headers = dict(POST_HEADERS)
+ if content_type != "application/json":
+ req_headers["content-type"] = content_type
+ username, password, basic_auth = self._auth_params()
+ if content_type == "application/json":
+ request_data = json.dumps(payload)
+ else:
+ request_data = payload
+ try:
+ resp = open_url(uri, data=request_data,
+ headers=req_headers, method="POST",
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout if timeout is None else timeout)
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ return {'ret': False,
+ 'msg': "HTTP Error %s on POST request to '%s'"
+ % (e.code, uri),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'headers': headers, 'resp': resp}
+
+ def get_uri_with_slot_number_query_param(self, uri):
+ """Return the URI with proxy slot number added as a query param, if there is one.
+
+ If a proxy slot number is provided, to access it, we must append it as a query parameter.
+ This method returns the given URI with the slotnumber query param added, if there is one.
+ If there is not a proxy slot number, it just returns the URI as it was passed in.
+ """
+ if self.proxy_slot_number is not None:
+ parsed_url = urlparse(uri)
+ return parsed_url._replace(query="slotnumber=" + str(self.proxy_slot_number)).geturl()
+ else:
+ return uri
+
+ def manage_system_power(self, command):
+ """Process a command to manage the system power.
+
+ :param str command: The Ansible command being processed.
+ """
+ if command == "PowerGracefulRestart":
+ resource_uri = self.root_uri
+ resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
+
+ # Get the resource so that we have the Etag
+ response = self.get_request(resource_uri)
+ if 'etag' not in response['headers']:
+ return {'ret': False, 'msg': 'Etag not found in response.'}
+ etag = response['headers']['etag']
+ if response['ret'] is False:
+ return response
+
+ # Issue the PUT to do the reboot (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ payload = {'Reboot': True}
+ response = self.put_request(resource_uri, payload, etag)
+ if response['ret'] is False:
+ return response
+ elif command.startswith("PowerMode"):
+ return self.manage_power_mode(command)
+ else:
+ return {'ret': False, 'msg': 'Invalid command: ' + command}
+
+ return {'ret': True}
+
+ def manage_chassis_indicator_led(self, command):
+ """Process a command to manage the chassis indicator LED.
+
+ :param string command: The Ansible command being processed.
+ """
+ return self.manage_indicator_led(command, self.root_uri)
+
+ def manage_indicator_led(self, command, resource_uri=None):
+ """Process a command to manage an indicator LED.
+
+ :param string command: The Ansible command being processed.
+ :param string resource_uri: URI of the resource whose indicator LED is being managed.
+ """
+ key = "IndicatorLED"
+ if resource_uri is None:
+ resource_uri = self.root_uri
+ resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
+
+ payloads = {
+ 'IndicatorLedOn': {
+ 'ID': 2
+ },
+ 'IndicatorLedOff': {
+ 'ID': 4
+ }
+ }
+
+ response = self.get_request(resource_uri)
+ if 'etag' not in response['headers']:
+ return {'ret': False, 'msg': 'Etag not found in response.'}
+ etag = response['headers']['etag']
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+ if 'ID' not in data[key]:
+ return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'}
+
+ if command in payloads.keys():
+ # See if the LED is already set as requested.
+ current_led_status = data[key]['ID']
+ if current_led_status == payloads[command]['ID']:
+ return {'ret': True, 'changed': False}
+
+ # Set the LED (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ payload = {'IndicatorLED': payloads[command]}
+ response = self.put_request(resource_uri, payload, etag)
+ if response['ret'] is False:
+ return response
+ else:
+ return {'ret': False, 'msg': 'Invalid command'}
+
+ return {'ret': True}
+
+ def manage_power_mode(self, command):
+ key = "PowerState"
+ resource_uri = self.get_uri_with_slot_number_query_param(self.root_uri)
+
+ payloads = {
+ "PowerModeNormal": 2,
+ "PowerModeLow": 4
+ }
+
+ response = self.get_request(resource_uri)
+ if 'etag' not in response['headers']:
+ return {'ret': False, 'msg': 'Etag not found in response.'}
+ etag = response['headers']['etag']
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+ if 'ID' not in data[key]:
+ return {'ret': False, 'msg': 'PowerState for resource has no ID.'}
+
+ if command in payloads.keys():
+ # See if the PowerState is already set as requested.
+ current_power_state = data[key]['ID']
+ if current_power_state == payloads[command]:
+ return {'ret': True, 'changed': False}
+
+ # Set the Power State (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ payload = {'PowerState': {"ID": payloads[command]}}
+ response = self.put_request(resource_uri, payload, etag)
+ if response['ret'] is False:
+ return response
+ else:
+ return {'ret': False, 'msg': 'Invalid command: ' + command}
+
+ return {'ret': True}
+
+ def prepare_multipart_firmware_upload(self, filename):
+ """Prepare a multipart/form-data body for OCAPI firmware upload.
+
+ :arg filename: The name of the file to upload.
+ :returns: tuple of (content_type, body) where ``content_type`` is
+ the ``multipart/form-data`` ``Content-Type`` header including
+ ``boundary`` and ``body`` is the prepared bytestring body
+
+ Prepares the body to include "FirmwareFile" field with the contents of the file.
+ Because some OCAPI targets do not support Base-64 encoding for multipart/form-data,
+ this method sends the file as binary.
+ """
+ boundary = str(uuid.uuid4()) # Generate a random boundary
+ body = "--" + boundary + '\r\n'
+ body += 'Content-Disposition: form-data; name="FirmwareFile"; filename="%s"\r\n' % to_native(os.path.basename(filename))
+ body += 'Content-Type: application/octet-stream\r\n\r\n'
+ body_bytes = bytearray(body, 'utf-8')
+ with open(filename, 'rb') as f:
+ body_bytes += f.read()
+ body_bytes += bytearray("\r\n--%s--" % boundary, 'utf-8')
+ return ("multipart/form-data; boundary=%s" % boundary,
+ body_bytes)
+
+ def upload_firmware_image(self, update_image_path):
+ """Perform Firmware Upload to the OCAPI storage device.
+
+ :param str update_image_path: The path/filename of the firmware image, on the local filesystem.
+ """
+ if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)):
+ return {'ret': False, 'msg': 'File does not exist.'}
+ url = self.root_uri + "OperatingSystem"
+ url = self.get_uri_with_slot_number_query_param(url)
+ content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path)
+
+ # Post the firmware (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ result = self.post_request(url, b_form_data, content_type=content_type, timeout=300)
+ if result['ret'] is False:
+ return result
+ return {'ret': True}
+
+ def update_firmware_image(self):
+ """Perform a Firmware Update on the OCAPI storage device."""
+ resource_uri = self.root_uri
+ resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
+ # We have to do a GET to obtain the Etag. It's required on the PUT.
+ response = self.get_request(resource_uri)
+ if response['ret'] is False:
+ return response
+ if 'etag' not in response['headers']:
+ return {'ret': False, 'msg': 'Etag not found in response.'}
+ etag = response['headers']['etag']
+
+ # Issue the PUT (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ payload = {'FirmwareUpdate': True}
+ response = self.put_request(resource_uri, payload, etag)
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'jobUri': response["headers"]["location"]}
+
+ def activate_firmware_image(self):
+ """Perform a Firmware Activate on the OCAPI storage device."""
+ resource_uri = self.root_uri
+ resource_uri = self.get_uri_with_slot_number_query_param(resource_uri)
+ # We have to do a GET to obtain the Etag. It's required on the PUT.
+ response = self.get_request(resource_uri)
+ if 'etag' not in response['headers']:
+ return {'ret': False, 'msg': 'Etag not found in response.'}
+ etag = response['headers']['etag']
+ if response['ret'] is False:
+ return response
+
+ # Issue the PUT (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ payload = {'FirmwareActivate': True}
+ response = self.put_request(resource_uri, payload, etag)
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'jobUri': response["headers"]["location"]}
+
+ def get_job_status(self, job_uri):
+ """Get the status of a job.
+
+ :param str job_uri: The URI of the job's status monitor.
+ """
+ job_uri = self.get_uri_with_slot_number_query_param(job_uri)
+ response = self.get_request(job_uri)
+ if response['ret'] is False:
+ if response.get('status') == 404:
+ # Job not found -- assume 0%
+ return {
+ "ret": True,
+ "percentComplete": 0,
+ "operationStatus": "Not Available",
+ "operationStatusId": 1,
+ "operationHealth": None,
+ "operationHealthId": None,
+ "details": "Job does not exist.",
+ "jobExists": False
+ }
+ else:
+ return response
+ details = response["data"]["Status"].get("Details")
+ if type(details) is str:
+ details = [details]
+ health_list = response["data"]["Status"]["Health"]
+ return_value = {
+ "ret": True,
+ "percentComplete": response["data"]["PercentComplete"],
+ "operationStatus": response["data"]["Status"]["State"]["Name"],
+ "operationStatusId": response["data"]["Status"]["State"]["ID"],
+ "operationHealth": health_list[0]["Name"] if len(health_list) > 0 else None,
+ "operationHealthId": health_list[0]["ID"] if len(health_list) > 0 else None,
+ "details": details,
+ "jobExists": True
+ }
+ return return_value
+
+ def delete_job(self, job_uri):
+ """Delete the OCAPI job referenced by the specified job_uri."""
+ job_uri = self.get_uri_with_slot_number_query_param(job_uri)
+ # We have to do a GET to obtain the Etag. It's required on the DELETE.
+ response = self.get_request(job_uri)
+
+ if response['ret'] is True:
+ if 'etag' not in response['headers']:
+ return {'ret': False, 'msg': 'Etag not found in response.'}
+ else:
+ etag = response['headers']['etag']
+
+ if response['data']['PercentComplete'] != 100:
+ return {
+ 'ret': False,
+ 'changed': False,
+ 'msg': 'Cannot delete job because it is in progress.'
+ }
+
+ if response['ret'] is False:
+ if response['status'] == 404:
+ return {
+ 'ret': True,
+ 'changed': False,
+ 'msg': 'Job already deleted.'
+ }
+ return response
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+
+ # Do the DELETE (unless we are in check mode)
+ response = self.delete_request(job_uri, etag)
+ if response['ret'] is False:
+ if response['status'] == 404:
+ return {
+ 'ret': True,
+ 'changed': False
+ }
+ elif response['status'] == 409:
+ return {
+ 'ret': False,
+ 'changed': False,
+ 'msg': 'Cannot delete job because it is in progress.'
+ }
+ return response
+ return {
+ 'ret': True,
+ 'changed': True
+ }
diff --git a/ansible_collections/community/general/plugins/module_utils/oneandone.py b/ansible_collections/community/general/plugins/module_utils/oneandone.py
new file mode 100644
index 000000000..bbad2eaa0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/oneandone.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+
+
+class OneAndOneResources:
+ firewall_policy = 'firewall_policy'
+ load_balancer = 'load_balancer'
+ monitoring_policy = 'monitoring_policy'
+ private_network = 'private_network'
+ public_ip = 'public_ip'
+ role = 'role'
+ server = 'server'
+ user = 'user'
+ vpn = 'vpn'
+
+
+def get_resource(oneandone_conn, resource_type, resource_id):
+ switcher = {
+ 'firewall_policy': oneandone_conn.get_firewall,
+ 'load_balancer': oneandone_conn.get_load_balancer,
+ 'monitoring_policy': oneandone_conn.get_monitoring_policy,
+ 'private_network': oneandone_conn.get_private_network,
+ 'public_ip': oneandone_conn.get_public_ip,
+ 'role': oneandone_conn.get_role,
+ 'server': oneandone_conn.get_server,
+ 'user': oneandone_conn.get_user,
+ 'vpn': oneandone_conn.get_vpn,
+ }
+
+ return switcher.get(resource_type, None)(resource_id)
+
+
+def get_datacenter(oneandone_conn, datacenter, full_object=False):
+ """
+ Validates the datacenter exists by ID or country code.
+ Returns the datacenter ID.
+ """
+ for _datacenter in oneandone_conn.list_datacenters():
+ if datacenter in (_datacenter['id'], _datacenter['country_code']):
+ if full_object:
+ return _datacenter
+ return _datacenter['id']
+
+
+def get_fixed_instance_size(oneandone_conn, fixed_instance_size, full_object=False):
+ """
+ Validates the fixed instance size exists by ID or name.
+ Return the instance size ID.
+ """
+ for _fixed_instance_size in oneandone_conn.fixed_server_flavors():
+ if fixed_instance_size in (_fixed_instance_size['id'],
+ _fixed_instance_size['name']):
+ if full_object:
+ return _fixed_instance_size
+ return _fixed_instance_size['id']
+
+
+def get_appliance(oneandone_conn, appliance, full_object=False):
+ """
+ Validates the appliance exists by ID or name.
+ Return the appliance ID.
+ """
+ for _appliance in oneandone_conn.list_appliances(q='IMAGE'):
+ if appliance in (_appliance['id'], _appliance['name']):
+ if full_object:
+ return _appliance
+ return _appliance['id']
+
+
+def get_private_network(oneandone_conn, private_network, full_object=False):
+ """
+ Validates the private network exists by ID or name.
+ Return the private network ID.
+ """
+ for _private_network in oneandone_conn.list_private_networks():
+ if private_network in (_private_network['name'],
+ _private_network['id']):
+ if full_object:
+ return _private_network
+ return _private_network['id']
+
+
+def get_monitoring_policy(oneandone_conn, monitoring_policy, full_object=False):
+ """
+ Validates the monitoring policy exists by ID or name.
+ Return the monitoring policy ID.
+ """
+ for _monitoring_policy in oneandone_conn.list_monitoring_policies():
+ if monitoring_policy in (_monitoring_policy['name'],
+ _monitoring_policy['id']):
+ if full_object:
+ return _monitoring_policy
+ return _monitoring_policy['id']
+
+
+def get_firewall_policy(oneandone_conn, firewall_policy, full_object=False):
+ """
+ Validates the firewall policy exists by ID or name.
+ Return the firewall policy ID.
+ """
+ for _firewall_policy in oneandone_conn.list_firewall_policies():
+ if firewall_policy in (_firewall_policy['name'],
+ _firewall_policy['id']):
+ if full_object:
+ return _firewall_policy
+ return _firewall_policy['id']
+
+
+def get_load_balancer(oneandone_conn, load_balancer, full_object=False):
+ """
+ Validates the load balancer exists by ID or name.
+ Return the load balancer ID.
+ """
+ for _load_balancer in oneandone_conn.list_load_balancers():
+ if load_balancer in (_load_balancer['name'],
+ _load_balancer['id']):
+ if full_object:
+ return _load_balancer
+ return _load_balancer['id']
+
+
+def get_server(oneandone_conn, instance, full_object=False):
+ """
+ Validates that the server exists whether by ID or name.
+ Returns the server if one was found.
+ """
+ for server in oneandone_conn.list_servers(per_page=1000):
+ if instance in (server['id'], server['name']):
+ if full_object:
+ return server
+ return server['id']
+
+
+def get_user(oneandone_conn, user, full_object=False):
+ """
+ Validates that the user exists by ID or a name.
+ Returns the user if one was found.
+ """
+ for _user in oneandone_conn.list_users(per_page=1000):
+ if user in (_user['id'], _user['name']):
+ if full_object:
+ return _user
+ return _user['id']
+
+
+def get_role(oneandone_conn, role, full_object=False):
+ """
+ Given a name, validates that the role exists
+ whether it is a proper ID or a name.
+ Returns the role if one was found, else None.
+ """
+ for _role in oneandone_conn.list_roles(per_page=1000):
+ if role in (_role['id'], _role['name']):
+ if full_object:
+ return _role
+ return _role['id']
+
+
+def get_vpn(oneandone_conn, vpn, full_object=False):
+ """
+ Validates that the vpn exists by ID or a name.
+ Returns the vpn if one was found.
+ """
+ for _vpn in oneandone_conn.list_vpns(per_page=1000):
+ if vpn in (_vpn['id'], _vpn['name']):
+ if full_object:
+ return _vpn
+ return _vpn['id']
+
+
+def get_public_ip(oneandone_conn, public_ip, full_object=False):
+ """
+ Validates that the public ip exists by ID or a name.
+ Returns the public ip if one was found.
+ """
+ for _public_ip in oneandone_conn.list_public_ips(per_page=1000):
+ if public_ip in (_public_ip['id'], _public_ip['ip']):
+ if full_object:
+ return _public_ip
+ return _public_ip['id']
+
+
+def wait_for_resource_creation_completion(oneandone_conn,
+ resource_type,
+ resource_id,
+ wait_timeout,
+ wait_interval):
+ """
+ Waits for the resource create operation to complete based on the timeout period.
+ """
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+
+ # Refresh the resource info
+ resource = get_resource(oneandone_conn, resource_type, resource_id)
+
+ if resource_type == OneAndOneResources.server:
+ resource_state = resource['status']['state']
+ else:
+ resource_state = resource['state']
+
+ if ((resource_type == OneAndOneResources.server and resource_state.lower() == 'powered_on') or
+ (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')):
+ return
+ elif resource_state.lower() == 'failed':
+ raise Exception('%s creation failed for %s' % (resource_type, resource_id))
+ elif resource_state.lower() in ('active',
+ 'enabled',
+ 'deploying',
+ 'configuring'):
+ continue
+ else:
+ raise Exception(
+ 'Unknown %s state %s' % (resource_type, resource_state))
+
+ raise Exception(
+ 'Timed out waiting for %s completion for %s' % (resource_type, resource_id))
+
+
+def wait_for_resource_deletion_completion(oneandone_conn,
+ resource_type,
+ resource_id,
+ wait_timeout,
+ wait_interval):
+ """
+ Waits for the resource delete operation to complete based on the timeout period.
+ """
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+
+ # Refresh the operation info
+ logs = oneandone_conn.list_logs(q='DELETE',
+ period='LAST_HOUR',
+ sort='-start_date')
+
+ if resource_type == OneAndOneResources.server:
+ _type = 'VM'
+ elif resource_type == OneAndOneResources.private_network:
+ _type = 'PRIVATENETWORK'
+ else:
+ raise Exception(
+ 'Unsupported wait_for delete operation for %s resource' % resource_type)
+
+ for log in logs:
+ if (log['resource']['id'] == resource_id and
+ log['action'] == 'DELETE' and
+ log['type'] == _type and
+ log['status']['state'] == 'OK'):
+ return
+ raise Exception(
+ 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id))
diff --git a/ansible_collections/community/general/plugins/module_utils/onepassword.py b/ansible_collections/community/general/plugins/module_utils/onepassword.py
new file mode 100644
index 000000000..3023165b1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/onepassword.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+
+class OnePasswordConfig(object):
+ _config_file_paths = (
+ "~/.op/config",
+ "~/.config/op/config",
+ "~/.config/.op/config",
+ )
+
+ def __init__(self):
+ self._config_file_path = ""
+
+ @property
+ def config_file_path(self):
+ if self._config_file_path:
+ return self._config_file_path
+
+ for path in self._config_file_paths:
+ realpath = os.path.expanduser(path)
+ if os.path.exists(realpath):
+ self._config_file_path = realpath
+ return self._config_file_path
diff --git a/ansible_collections/community/general/plugins/module_utils/oneview.py b/ansible_collections/community/general/plugins/module_utils/oneview.py
new file mode 100644
index 000000000..4315a462d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/oneview.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import collections
+import json
+# (TODO: remove next line!)
+import os # noqa: F401, pylint: disable=unused-import
+import traceback
+
+HPE_ONEVIEW_IMP_ERR = None
+try:
+ from hpOneView.oneview_client import OneViewClient
+ HAS_HPE_ONEVIEW = True
+except ImportError:
+ HPE_ONEVIEW_IMP_ERR = traceback.format_exc()
+ HAS_HPE_ONEVIEW = False
+
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+def transform_list_to_dict(list_):
+ """
+ Transforms a list into a dictionary, putting values as keys.
+
+ :arg list list_: List of values
+ :return: dict: dictionary built
+ """
+
+ ret = {}
+
+ if not list_:
+ return ret
+
+ for value in list_:
+ if isinstance(value, Mapping):
+ ret.update(value)
+ else:
+ ret[to_native(value, errors='surrogate_or_strict')] = True
+
+ return ret
+
+
+def merge_list_by_key(original_list, updated_list, key, ignore_when_null=None):
+ """
+ Merge two lists by the key. It basically:
+
+ 1. Adds the items that are present on updated_list and are absent on original_list.
+
+ 2. Removes items that are absent on updated_list and are present on original_list.
+
+ 3. For all items that are in both lists, overwrites the values from the original item by the updated item.
+
+ :arg list original_list: original list.
+ :arg list updated_list: list with changes.
+ :arg str key: unique identifier.
+ :arg list ignore_when_null: list with the keys from the updated items that should be ignored in the merge,
+ if its values are null.
+ :return: list: Lists merged.
+ """
+ ignore_when_null = [] if ignore_when_null is None else ignore_when_null
+
+ if not original_list:
+ return updated_list
+
+ items_map = collections.OrderedDict([(i[key], i.copy()) for i in original_list])
+
+ merged_items = collections.OrderedDict()
+
+ for item in updated_list:
+ item_key = item[key]
+ if item_key in items_map:
+ for ignored_key in ignore_when_null:
+ if ignored_key in item and item[ignored_key] is None:
+ item.pop(ignored_key)
+ merged_items[item_key] = items_map[item_key]
+ merged_items[item_key].update(item)
+ else:
+ merged_items[item_key] = item
+
+ return list(merged_items.values())
+
+
+def _str_sorted(obj):
+ if isinstance(obj, Mapping):
+ return json.dumps(obj, sort_keys=True)
+ else:
+ return str(obj)
+
+
+def _standardize_value(value):
+ """
+ Convert value to string to enhance the comparison.
+
+ :arg value: Any object type.
+
+ :return: str: Converted value.
+ """
+ if isinstance(value, float) and value.is_integer():
+ # Workaround to avoid erroneous comparison between int and float
+ # Removes zero from integer floats
+ value = int(value)
+
+ return str(value)
+
+
+class OneViewModuleException(Exception):
+ """
+ OneView base Exception.
+
+ Attributes:
+ msg (str): Exception message.
+ oneview_response (dict): OneView rest response.
+ """
+
+ def __init__(self, data):
+ self.msg = None
+ self.oneview_response = None
+
+ if isinstance(data, six.string_types):
+ self.msg = data
+ else:
+ self.oneview_response = data
+
+ if data and isinstance(data, dict):
+ self.msg = data.get('message')
+
+ if self.oneview_response:
+ Exception.__init__(self, self.msg, self.oneview_response)
+ else:
+ Exception.__init__(self, self.msg)
+
+
+class OneViewModuleTaskError(OneViewModuleException):
+ """
+ OneView Task Error Exception.
+
+ Attributes:
+ msg (str): Exception message.
+ error_code (str): A code which uniquely identifies the specific error.
+ """
+
+ def __init__(self, msg, error_code=None):
+ super(OneViewModuleTaskError, self).__init__(msg)
+ self.error_code = error_code
+
+
+class OneViewModuleValueError(OneViewModuleException):
+ """
+ OneView Value Error.
+ The exception is raised when the data contains an inappropriate value.
+
+ Attributes:
+ msg (str): Exception message.
+ """
+ pass
+
+
+class OneViewModuleResourceNotFound(OneViewModuleException):
+ """
+ OneView Resource Not Found Exception.
+ The exception is raised when an associated resource was not found.
+
+ Attributes:
+ msg (str): Exception message.
+ """
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OneViewModuleBase(object):
+ MSG_CREATED = 'Resource created successfully.'
+ MSG_UPDATED = 'Resource updated successfully.'
+ MSG_DELETED = 'Resource deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Resource is already present.'
+ MSG_ALREADY_ABSENT = 'Resource is already absent.'
+ MSG_DIFF_AT_KEY = 'Difference found at key \'{0}\'. '
+
+ ONEVIEW_COMMON_ARGS = dict(
+ config=dict(type='path'),
+ hostname=dict(type='str'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ api_version=dict(type='int'),
+ image_streamer_hostname=dict(type='str')
+ )
+
+ ONEVIEW_VALIDATE_ETAG_ARGS = dict(validate_etag=dict(type='bool', default=True))
+
+ resource_client = None
+
+ def __init__(self, additional_arg_spec=None, validate_etag_support=False, supports_check_mode=False):
+ """
+ OneViewModuleBase constructor.
+
+ :arg dict additional_arg_spec: Additional argument spec definition.
+ :arg bool validate_etag_support: Enables support to eTag validation.
+ """
+ argument_spec = self._build_argument_spec(additional_arg_spec, validate_etag_support)
+
+ self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=supports_check_mode)
+
+ self._check_hpe_oneview_sdk()
+ self._create_oneview_client()
+
+ self.state = self.module.params.get('state')
+ self.data = self.module.params.get('data')
+
+ # Preload params for get_all - used by facts
+ self.facts_params = self.module.params.get('params') or {}
+
+ # Preload options as dict - used by facts
+ self.options = transform_list_to_dict(self.module.params.get('options'))
+
+ self.validate_etag_support = validate_etag_support
+
+ def _build_argument_spec(self, additional_arg_spec, validate_etag_support):
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(self.ONEVIEW_COMMON_ARGS)
+
+ if validate_etag_support:
+ merged_arg_spec.update(self.ONEVIEW_VALIDATE_ETAG_ARGS)
+
+ if additional_arg_spec:
+ merged_arg_spec.update(additional_arg_spec)
+
+ return merged_arg_spec
+
+ def _check_hpe_oneview_sdk(self):
+ if not HAS_HPE_ONEVIEW:
+ self.module.fail_json(msg=missing_required_lib('hpOneView'), exception=HPE_ONEVIEW_IMP_ERR)
+
+ def _create_oneview_client(self):
+ if self.module.params.get('hostname'):
+ config = dict(ip=self.module.params['hostname'],
+ credentials=dict(userName=self.module.params['username'], password=self.module.params['password']),
+ api_version=self.module.params['api_version'],
+ image_streamer_ip=self.module.params['image_streamer_hostname'])
+ self.oneview_client = OneViewClient(config)
+ elif not self.module.params['config']:
+ self.oneview_client = OneViewClient.from_environment_variables()
+ else:
+ self.oneview_client = OneViewClient.from_json_file(self.module.params['config'])
+
+ @abc.abstractmethod
+ def execute_module(self):
+ """
+ Abstract method, must be implemented by the inheritor.
+
+ This method is called from the run method. It should contains the module logic
+
+ :return: dict: It must return a dictionary with the attributes for the module result,
+ such as ansible_facts, msg and changed.
+ """
+ pass
+
+ def run(self):
+ """
+ Common implementation of the OneView run modules.
+
+ It calls the inheritor 'execute_module' function and sends the return to the Ansible.
+
+ It handles any OneViewModuleException in order to signal a failure to Ansible, with a descriptive error message.
+
+ """
+ try:
+ if self.validate_etag_support:
+ if not self.module.params.get('validate_etag'):
+ self.oneview_client.connection.disable_etag_validation()
+
+ result = self.execute_module()
+
+ if "changed" not in result:
+ result['changed'] = False
+
+ self.module.exit_json(**result)
+
+ except OneViewModuleException as exception:
+ error_msg = '; '.join(to_native(e) for e in exception.args)
+ self.module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+ def resource_absent(self, resource, method='delete'):
+ """
+ Generic implementation of the absent state for the OneView resources.
+
+ It checks if the resource needs to be removed.
+
+ :arg dict resource: Resource to delete.
+ :arg str method: Function of the OneView client that will be called for resource deletion.
+ Usually delete or remove.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+ if resource:
+ getattr(self.resource_client, method)(resource)
+
+ return {"changed": True, "msg": self.MSG_DELETED}
+ else:
+ return {"changed": False, "msg": self.MSG_ALREADY_ABSENT}
+
+ def get_by_name(self, name):
+ """
+ Generic get by name implementation.
+
+ :arg str name: Resource name to search for.
+
+ :return: The resource found or None.
+ """
+ result = self.resource_client.get_by('name', name)
+ return result[0] if result else None
+
+ def resource_present(self, resource, fact_name, create_method='create'):
+ """
+ Generic implementation of the present state for the OneView resources.
+
+ It checks if the resource needs to be created or updated.
+
+ :arg dict resource: Resource to create or update.
+ :arg str fact_name: Name of the fact returned to the Ansible.
+ :arg str create_method: Function of the OneView client that will be called for resource creation.
+ Usually create or add.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+
+ changed = False
+ if "newName" in self.data:
+ self.data["name"] = self.data.pop("newName")
+
+ if not resource:
+ resource = getattr(self.resource_client, create_method)(self.data)
+ msg = self.MSG_CREATED
+ changed = True
+
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ if self.compare(resource, merged_data):
+ msg = self.MSG_ALREADY_PRESENT
+ else:
+ resource = self.resource_client.update(merged_data)
+ changed = True
+ msg = self.MSG_UPDATED
+
+ return dict(
+ msg=msg,
+ changed=changed,
+ ansible_facts={fact_name: resource}
+ )
+
+ def resource_scopes_set(self, state, fact_name, scope_uris):
+ """
+ Generic implementation of the scopes update PATCH for the OneView resources.
+ It checks if the resource needs to be updated with the current scopes.
+ This method is meant to be run after ensuring the present state.
+ :arg dict state: Dict containing the data from the last state results in the resource.
+ It needs to have the 'msg', 'changed', and 'ansible_facts' entries.
+ :arg str fact_name: Name of the fact returned to the Ansible.
+ :arg list scope_uris: List with all the scope URIs to be added to the resource.
+ :return: A dictionary with the expected arguments for the AnsibleModule.exit_json
+ """
+ if scope_uris is None:
+ scope_uris = []
+ resource = state['ansible_facts'][fact_name]
+ operation_data = dict(operation='replace', path='/scopeUris', value=scope_uris)
+
+ if resource['scopeUris'] is None or set(resource['scopeUris']) != set(scope_uris):
+ state['ansible_facts'][fact_name] = self.resource_client.patch(resource['uri'], **operation_data)
+ state['changed'] = True
+ state['msg'] = self.MSG_UPDATED
+
+ return state
+
+ def compare(self, first_resource, second_resource):
+ """
+ Recursively compares dictionary contents equivalence, ignoring types and elements order.
+ Particularities of the comparison:
+ - Inexistent key = None
+ - These values are considered equal: None, empty, False
+ - Lists are compared value by value after a sort, if they have same size.
+ - Each element is converted to str before the comparison.
+ :arg dict first_resource: first dictionary
+ :arg dict second_resource: second dictionary
+ :return: bool: True when equal, False when different.
+ """
+ resource1 = first_resource
+ resource2 = second_resource
+
+ debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
+
+ # The first resource is True / Not Null and the second resource is False / Null
+ if resource1 and not resource2:
+ self.module.log("resource1 and not resource2. " + debug_resources)
+ return False
+
+ # Checks all keys in first dict against the second dict
+ for key in resource1:
+ if key not in resource2:
+ if resource1[key] is not None:
+ # Inexistent key is equivalent to exist with value None
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ # If both values are null, empty or False it will be considered equal.
+ elif not resource1[key] and not resource2[key]:
+ continue
+ elif isinstance(resource1[key], Mapping):
+ # recursive call
+ if not self.compare(resource1[key], resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ elif isinstance(resource1[key], list):
+ # change comparison function to compare_list
+ if not self.compare_list(resource1[key], resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+ elif _standardize_value(resource1[key]) != _standardize_value(resource2[key]):
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+
+ # Checks all keys in the second dict, looking for missing elements
+ for key in resource2.keys():
+ if key not in resource1:
+ if resource2[key] is not None:
+ # Inexistent key is equivalent to exist with value None
+ self.module.log(self.MSG_DIFF_AT_KEY.format(key) + debug_resources)
+ return False
+
+ return True
+
+ def compare_list(self, first_resource, second_resource):
+ """
+ Recursively compares lists contents equivalence, ignoring types and element orders.
+ Lists with same size are compared value by value after a sort,
+ each element is converted to str before the comparison.
+ :arg list first_resource: first list
+ :arg list second_resource: second list
+ :return: True when equal; False when different.
+ """
+
+ resource1 = first_resource
+ resource2 = second_resource
+
+ debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2)
+
+ # The second list is null / empty / False
+ if not resource2:
+ self.module.log("resource 2 is null. " + debug_resources)
+ return False
+
+ if len(resource1) != len(resource2):
+ self.module.log("resources have different length. " + debug_resources)
+ return False
+
+ resource1 = sorted(resource1, key=_str_sorted)
+ resource2 = sorted(resource2, key=_str_sorted)
+
+ for i, val in enumerate(resource1):
+ if isinstance(val, Mapping):
+ # change comparison function to compare dictionaries
+ if not self.compare(val, resource2[i]):
+ self.module.log("resources are different. " + debug_resources)
+ return False
+ elif isinstance(val, list):
+ # recursive call
+ if not self.compare_list(val, resource2[i]):
+ self.module.log("lists are different. " + debug_resources)
+ return False
+ elif _standardize_value(val) != _standardize_value(resource2[i]):
+ self.module.log("values are different. " + debug_resources)
+ return False
+
+ # no differences found
+ return True
diff --git a/ansible_collections/community/general/plugins/module_utils/online.py b/ansible_collections/community/general/plugins/module_utils/online.py
new file mode 100644
index 000000000..a2f6e77a0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/online.py
@@ -0,0 +1,124 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url
+
+
+def online_argument_spec():
+ return dict(
+ api_token=dict(required=True, fallback=(env_fallback, ['ONLINE_TOKEN', 'ONLINE_API_KEY', 'ONLINE_OAUTH_TOKEN', 'ONLINE_API_TOKEN']),
+ no_log=True, aliases=['oauth_token']),
+ api_url=dict(fallback=(env_fallback, ['ONLINE_API_URL']), default='https://api.online.net', aliases=['base_url']),
+ api_timeout=dict(type='int', default=30, aliases=['timeout']),
+ validate_certs=dict(default=True, type='bool'),
+ )
+
+
+class OnlineException(Exception):
+
+ def __init__(self, message):
+ self.message = message
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+ @property
+ def ok(self):
+ return self.status_code in (200, 201, 202, 204)
+
+
+class Online(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.headers = {
+ 'Authorization': "Bearer %s" % self.module.params.get('api_token'),
+ 'User-Agent': self.get_user_agent_string(module),
+ 'Content-type': 'application/json',
+ }
+ self.name = None
+
+ def get_resources(self):
+ results = self.get('/%s' % self.name)
+ if not results.ok:
+ raise OnlineException('Error fetching {0} ({1}) [{2}: {3}]'.format(
+ self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
+ results.status_code, results.json['message']
+ ))
+
+ return results.json
+
+ def _url_builder(self, path):
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s' % (self.module.params.get('api_url'), path)
+
+ def send(self, method, path, data=None, headers=None):
+ url = self._url_builder(path)
+ data = self.module.jsonify(data)
+
+ if headers is not None:
+ self.headers.update(headers)
+
+ resp, info = fetch_url(
+ self.module, url, data=data, headers=self.headers, method=method,
+ timeout=self.module.params.get('api_timeout')
+ )
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ self.module.fail_json(msg=info['msg'])
+
+ return Response(resp, info)
+
+ @staticmethod
+ def get_user_agent_string(module):
+ return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
+
+ def get(self, path, data=None, headers=None):
+ return self.send('GET', path, data, headers)
+
+ def put(self, path, data=None, headers=None):
+ return self.send('PUT', path, data, headers)
+
+ def post(self, path, data=None, headers=None):
+ return self.send('POST', path, data, headers)
+
+ def delete(self, path, data=None, headers=None):
+ return self.send('DELETE', path, data, headers)
+
+ def patch(self, path, data=None, headers=None):
+ return self.send("PATCH", path, data, headers)
+
+ def update(self, path, data=None, headers=None):
+ return self.send("UPDATE", path, data, headers)
diff --git a/ansible_collections/community/general/plugins/module_utils/opennebula.py b/ansible_collections/community/general/plugins/module_utils/opennebula.py
new file mode 100644
index 000000000..94732e4f7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/opennebula.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2018 www.privaz.io Valletech AB
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import time
+import ssl
+from os import environ
+from ansible.module_utils.six import string_types
+from ansible.module_utils.basic import AnsibleModule
+
+
+HAS_PYONE = True
+
+try:
+ from pyone import OneException
+ from pyone.server import OneServer
+except ImportError:
+ OneException = Exception
+ HAS_PYONE = False
+
+
+# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064.
+# It allows for easily handling lists like "NIC" or "DISK" in the JSON-like template representation.
+# There are either lists of dictionaries (length > 1) or just dictionaries.
+def flatten(to_flatten, extract=False):
+ """Flattens nested lists (with optional value extraction)."""
+ def recurse(to_flatten):
+ return sum(map(recurse, to_flatten), []) if isinstance(to_flatten, list) else [to_flatten]
+ value = recurse(to_flatten)
+ if extract and len(value) == 1:
+ return value[0]
+ return value
+
+
+# A helper function to mitigate https://github.com/OpenNebula/one/issues/6064.
+# It renders JSON-like template representation into OpenNebula's template syntax (string).
+def render(to_render):
+ """Converts dictionary to OpenNebula template."""
+ def recurse(to_render):
+ for key, value in sorted(to_render.items()):
+ if value is None:
+ continue
+ if isinstance(value, dict):
+ yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value)))
+ continue
+ if isinstance(value, list):
+ for item in value:
+ yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item)))
+ continue
+ if isinstance(value, str):
+ yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"'))
+ continue
+ yield '{0:}="{1:}"'.format(key, value)
+ return '\n'.join(recurse(to_render))
+
+
+class OpenNebulaModule:
+ """
+ Base class for all OpenNebula Ansible Modules.
+ This is basically a wrapper of the common arguments, the pyone client and
+ some utility methods.
+ """
+
+ common_args = dict(
+ api_url=dict(type='str', aliases=['api_endpoint'], default=environ.get("ONE_URL")),
+ api_username=dict(type='str', default=environ.get("ONE_USERNAME")),
+ api_password=dict(type='str', no_log=True, aliases=['api_token'], default=environ.get("ONE_PASSWORD")),
+ validate_certs=dict(default=True, type='bool'),
+ wait_timeout=dict(type='int', default=300),
+ )
+
+ def __init__(self, argument_spec, supports_check_mode=False, mutually_exclusive=None, required_one_of=None, required_if=None):
+
+ module_args = OpenNebulaModule.common_args.copy()
+ module_args.update(argument_spec)
+
+ self.module = AnsibleModule(argument_spec=module_args,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ required_if=required_if)
+ self.result = dict(changed=False,
+ original_message='',
+ message='')
+ self.one = self.create_one_client()
+
+ self.resolved_parameters = self.resolve_parameters()
+
+ def create_one_client(self):
+ """
+ Creates an XMLPRC client to OpenNebula.
+
+ Returns: the new xmlrpc client.
+
+ """
+
+ # context required for not validating SSL, old python versions won't validate anyway.
+ if hasattr(ssl, '_create_unverified_context'):
+ no_ssl_validation_context = ssl._create_unverified_context()
+ else:
+ no_ssl_validation_context = None
+
+ # Check if the module can run
+ if not HAS_PYONE:
+ self.fail("pyone is required for this module")
+
+ if self.module.params.get("api_url"):
+ url = self.module.params.get("api_url")
+ else:
+ self.fail("Either api_url or the environment variable ONE_URL must be provided")
+
+ if self.module.params.get("api_username"):
+ username = self.module.params.get("api_username")
+ else:
+ self.fail("Either api_username or the environment variable ONE_USERNAME must be provided")
+
+ if self.module.params.get("api_password"):
+ password = self.module.params.get("api_password")
+ else:
+ self.fail("Either api_password or the environment variable ONE_PASSWORD must be provided")
+
+ session = "%s:%s" % (username, password)
+
+ if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ:
+ return OneServer(url, session=session, context=no_ssl_validation_context)
+ else:
+ return OneServer(url, session)
+
+ def close_one_client(self):
+ """
+ Close the pyone session.
+ """
+ self.one.server_close()
+
+ def fail(self, msg):
+ """
+ Utility failure method, will ensure pyone is properly closed before failing.
+ Args:
+ msg: human readable failure reason.
+ """
+ if hasattr(self, 'one'):
+ self.close_one_client()
+ self.module.fail_json(msg=msg)
+
+ def exit(self):
+ """
+ Utility exit method, will ensure pyone is properly closed before exiting.
+
+ """
+ if hasattr(self, 'one'):
+ self.close_one_client()
+ self.module.exit_json(**self.result)
+
+ def resolve_parameters(self):
+ """
+ This method resolves parameters provided by a secondary ID to the primary ID.
+ For example if cluster_name is present, cluster_id will be introduced by performing
+ the required resolution
+
+ Returns: a copy of the parameters that includes the resolved parameters.
+
+ """
+
+ resolved_params = dict(self.module.params)
+
+ if 'cluster_name' in self.module.params:
+ clusters = self.one.clusterpool.info()
+ for cluster in clusters.CLUSTER:
+ if cluster.NAME == self.module.params.get('cluster_name'):
+ resolved_params['cluster_id'] = cluster.ID
+
+ return resolved_params
+
+ def is_parameter(self, name):
+ """
+ Utility method to check if a parameter was provided or is resolved
+ Args:
+ name: the parameter to check
+ """
+ if name in self.resolved_parameters:
+ return self.get_parameter(name) is not None
+ else:
+ return False
+
+ def get_parameter(self, name):
+ """
+ Utility method for accessing parameters that includes resolved ID
+ parameters from provided Name parameters.
+ """
+ return self.resolved_parameters.get(name)
+
+ def get_host_by_name(self, name):
+ '''
+ Returns a host given its name.
+ Args:
+ name: the name of the host
+
+ Returns: the host object or None if the host is absent.
+
+ '''
+ hosts = self.one.hostpool.info()
+ for h in hosts.HOST:
+ if h.NAME == name:
+ return h
+ return None
+
+ def get_cluster_by_name(self, name):
+ """
+ Returns a cluster given its name.
+ Args:
+ name: the name of the cluster
+
+ Returns: the cluster object or None if the host is absent.
+ """
+
+ clusters = self.one.clusterpool.info()
+ for c in clusters.CLUSTER:
+ if c.NAME == name:
+ return c
+ return None
+
+ def get_template_by_name(self, name):
+ '''
+ Returns a template given its name.
+ Args:
+ name: the name of the template
+
+ Returns: the template object or None if the host is absent.
+
+ '''
+ templates = self.one.templatepool.info()
+ for t in templates.TEMPLATE:
+ if t.NAME == name:
+ return t
+ return None
+
+ def cast_template(self, template):
+ """
+ OpenNebula handles all template elements as strings
+ At some point there is a cast being performed on types provided by the user
+ This function mimics that transformation so that required template updates are detected properly
+ additionally an array will be converted to a comma separated list,
+ which works for labels and hopefully for something more.
+
+ Args:
+ template: the template to transform
+
+ Returns: the transformed template with data casts applied.
+ """
+
+ # TODO: check formally available data types in templates
+ # TODO: some arrays might be converted to space separated
+
+ for key in template:
+ value = template[key]
+ if isinstance(value, dict):
+ self.cast_template(template[key])
+ elif isinstance(value, list):
+ template[key] = ', '.join(value)
+ elif not isinstance(value, string_types):
+ template[key] = str(value)
+
+ def requires_template_update(self, current, desired):
+ """
+ This function will help decide if a template update is required or not
+ If a desired key is missing from the current dictionary an update is required
+ If the intersection of both dictionaries is not deep equal, an update is required
+ Args:
+ current: current template as a dictionary
+ desired: desired template as a dictionary
+
+ Returns: True if a template update is required
+ """
+
+ if not desired:
+ return False
+
+ self.cast_template(desired)
+ intersection = dict()
+ for dkey in desired.keys():
+ if dkey in current.keys():
+ intersection[dkey] = current[dkey]
+ else:
+ return True
+ return not (desired == intersection)
+
+ def wait_for_state(self, element_name, state, state_name, target_states,
+ invalid_states=None, transition_states=None,
+ wait_timeout=None):
+ """
+ Args:
+ element_name: the name of the object we are waiting for: HOST, VM, etc.
+ state: lambda that returns the current state, will be queried until target state is reached
+ state_name: lambda that returns the readable form of a given state
+ target_states: states expected to be reached
+ invalid_states: if any of this states is reached, fail
+ transition_states: when used, these are the valid states during the transition.
+ wait_timeout: timeout period in seconds. Defaults to the provided parameter.
+ """
+
+ if not wait_timeout:
+ wait_timeout = self.module.params.get("wait_timeout")
+
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ current_state = state()
+
+ if current_state in invalid_states:
+ self.fail('invalid %s state %s' % (element_name, state_name(current_state)))
+
+ if transition_states:
+ if current_state not in transition_states:
+ self.fail('invalid %s transition state %s' % (element_name, state_name(current_state)))
+
+ if current_state in target_states:
+ return True
+
+ time.sleep(self.one.server_retry_interval())
+
+ self.fail(msg="Wait timeout has expired!")
+
+ def run_module(self):
+ """
+ trigger the start of the execution of the module.
+ Returns:
+
+ """
+ try:
+ self.run(self.one, self.module, self.result)
+ except OneException as e:
+ self.fail(msg="OpenNebula Exception: %s" % e)
+
+ def run(self, one, module, result):
+ """
+ to be implemented by subclass with the actual module actions.
+ Args:
+ one: the OpenNebula XMLRPC client
+ module: the Ansible Module object
+ result: the Ansible result
+ """
+ raise NotImplementedError("Method requires implementation")
diff --git a/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py b/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
new file mode 100644
index 000000000..3d9c20f2a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/oracle/oci_utils.py
@@ -0,0 +1,1965 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import logging.config
+import os
+import tempfile
+# (TODO: remove next line!)
+from datetime import datetime # noqa: F401, pylint: disable=unused-import
+from operator import eq
+
+import time
+
+try:
+ import yaml # noqa: F401, pylint: disable=unused-import
+
+ import oci
+ from oci.constants import HEADER_NEXT_PAGE
+
+ from oci.exceptions import (
+ InvalidConfig,
+ InvalidPrivateKey,
+ MissingPrivateKeyPassphrase,
+ ConfigFileNotFound,
+ ServiceError,
+ MaximumWaitTimeExceeded,
+ )
+ from oci.identity.identity_client import IdentityClient
+ from oci.object_storage.models import CreateBucketDetails
+ from oci.object_storage.models import UpdateBucketDetails
+ from oci.retry import RetryStrategyBuilder
+ from oci.util import to_dict, Sentinel
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.six import iteritems
+
+__version__ = "1.6.0-dev"
+
+MAX_WAIT_TIMEOUT_IN_SECONDS = 1200
+
+# If a resource is in one of these states it would be considered inactive
+DEAD_STATES = [
+ "TERMINATING",
+ "TERMINATED",
+ "FAULTY",
+ "FAILED",
+ "DELETING",
+ "DELETED",
+ "UNKNOWN_ENUM_VALUE",
+ "DETACHING",
+ "DETACHED",
+]
+
+# If a resource is in one of these states it would be considered available
+DEFAULT_READY_STATES = [
+ "AVAILABLE",
+ "ACTIVE",
+ "RUNNING",
+ "PROVISIONED",
+ "ATTACHED",
+ "ASSIGNED",
+ "SUCCEEDED",
+ "PENDING_PROVIDER",
+]
+
+# If a resource is in one of these states, it would be considered deleted
+DEFAULT_TERMINATED_STATES = ["TERMINATED", "DETACHED", "DELETED"]
+
+
+def get_common_arg_spec(supports_create=False, supports_wait=False):
+ """
+ Return the common set of module arguments for all OCI cloud modules.
+ :param supports_create: Variable to decide whether to add options related to idempotency of create operation.
+ :param supports_wait: Variable to decide whether to add options related to waiting for completion.
+ :return: A dict with applicable module options.
+ """
+ # Note: This method is used by most OCI ansible resource modules during initialization. When making changes to this
+ # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
+ # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
+ # this method would break that error handling logic.
+ common_args = dict(
+ config_file_location=dict(type="str"),
+ config_profile_name=dict(type="str", default="DEFAULT"),
+ api_user=dict(type="str"),
+ api_user_fingerprint=dict(type="str", no_log=True),
+ api_user_key_file=dict(type="path"),
+ api_user_key_pass_phrase=dict(type="str", no_log=True),
+ auth_type=dict(
+ type="str",
+ required=False,
+ choices=["api_key", "instance_principal"],
+ default="api_key",
+ ),
+ tenancy=dict(type="str"),
+ region=dict(type="str"),
+ )
+
+ if supports_create:
+ common_args.update(
+ key_by=dict(type="list", elements="str", no_log=False),
+ force_create=dict(type="bool", default=False),
+ )
+
+ if supports_wait:
+ common_args.update(
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(
+ type="int", default=MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ wait_until=dict(type="str"),
+ )
+
+ return common_args
+
+
+def get_facts_module_arg_spec(filter_by_name=False):
+ # Note: This method is used by most OCI ansible fact modules during initialization. When making changes to this
+ # method, ensure that no `oci` python sdk dependencies are introduced in this method. This ensures that the modules
+ # can check for absence of OCI Python SDK and fail with an appropriate message. Introducing an OCI dependency in
+ # this method would break that error handling logic.
+ facts_module_arg_spec = get_common_arg_spec()
+ if filter_by_name:
+ facts_module_arg_spec.update(name=dict(type="str"))
+ else:
+ facts_module_arg_spec.update(display_name=dict(type="str"))
+ return facts_module_arg_spec
+
+
+def get_oci_config(module, service_client_class=None):
+ """Return the OCI configuration to use for all OCI API calls. The effective OCI configuration is derived by merging
+ any overrides specified for configuration attributes through Ansible module options or environment variables. The
+ order of precedence for deriving the effective configuration dict is:
+ 1. If a config file is provided, use that to setup the initial config dict.
+ 2. If a config profile is specified, use that config profile to setup the config dict.
+ 3. For each authentication attribute, check if an override is provided either through
+ a. Ansible Module option
+ b. Environment variable
+ and override the value in the config dict in that order."""
+ config = {}
+
+ config_file = module.params.get("config_file_location")
+ _debug("Config file through module options - {0} ".format(config_file))
+ if not config_file:
+ if "OCI_CONFIG_FILE" in os.environ:
+ config_file = os.environ["OCI_CONFIG_FILE"]
+ _debug(
+ "Config file through OCI_CONFIG_FILE environment variable - {0}".format(
+ config_file
+ )
+ )
+ else:
+ config_file = "~/.oci/config"
+ _debug("Config file (fallback) - {0} ".format(config_file))
+
+ config_profile = module.params.get("config_profile_name")
+ if not config_profile:
+ if "OCI_CONFIG_PROFILE" in os.environ:
+ config_profile = os.environ["OCI_CONFIG_PROFILE"]
+ else:
+ config_profile = "DEFAULT"
+ try:
+ config = oci.config.from_file(
+ file_location=config_file, profile_name=config_profile
+ )
+ except (
+ ConfigFileNotFound,
+ InvalidConfig,
+ InvalidPrivateKey,
+ MissingPrivateKeyPassphrase,
+ ) as ex:
+ if not _is_instance_principal_auth(module):
+ # When auth_type is not instance_principal, config file is required
+ module.fail_json(msg=str(ex))
+ else:
+ _debug(
+ "Ignore {0} as the auth_type is set to instance_principal".format(
+ str(ex)
+ )
+ )
+ # if instance_principal auth is used, an empty 'config' map is used below.
+
+ config["additional_user_agent"] = "Oracle-Ansible/{0}".format(__version__)
+ # Merge any overrides through other IAM options
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user",
+ env_var_name="OCI_USER_ID",
+ config_attr_name="user",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_fingerprint",
+ env_var_name="OCI_USER_FINGERPRINT",
+ config_attr_name="fingerprint",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_key_file",
+ env_var_name="OCI_USER_KEY_FILE",
+ config_attr_name="key_file",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="api_user_key_pass_phrase",
+ env_var_name="OCI_USER_KEY_PASS_PHRASE",
+ config_attr_name="pass_phrase",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="tenancy",
+ env_var_name="OCI_TENANCY",
+ config_attr_name="tenancy",
+ )
+ _merge_auth_option(
+ config,
+ module,
+ module_option_name="region",
+ env_var_name="OCI_REGION",
+ config_attr_name="region",
+ )
+
+ # Redirect calls to home region for IAM service.
+ do_not_redirect = module.params.get(
+ "do_not_redirect_to_home_region", False
+ ) or os.environ.get("OCI_IDENTITY_DO_NOT_REDIRECT_TO_HOME_REGION")
+ if service_client_class == IdentityClient and not do_not_redirect:
+ _debug("Region passed for module invocation - {0} ".format(config["region"]))
+ identity_client = IdentityClient(config)
+ region_subscriptions = identity_client.list_region_subscriptions(
+ config["tenancy"]
+ ).data
+ # Replace the region in the config with the home region.
+ [config["region"]] = [
+ rs.region_name for rs in region_subscriptions if rs.is_home_region is True
+ ]
+ _debug(
+ "Setting region in the config to home region - {0} ".format(
+ config["region"]
+ )
+ )
+
+ return config
+
+
+def create_service_client(module, service_client_class):
+ """
+ Creates a service client using the common module options provided by the user.
+ :param module: An AnsibleModule that represents user provided options for a Task
+ :param service_client_class: A class that represents a client to an OCI Service
+ :return: A fully configured client
+ """
+ config = get_oci_config(module, service_client_class)
+ kwargs = {}
+
+ if _is_instance_principal_auth(module):
+ try:
+ signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
+ except Exception as ex:
+ message = (
+ "Failed retrieving certificates from localhost. Instance principal based authentication is only"
+ "possible from within OCI compute instances. Exception: {0}".format(
+ str(ex)
+ )
+ )
+ module.fail_json(msg=message)
+
+ kwargs["signer"] = signer
+
+ # XXX: Validate configuration -- this may be redundant, as all Client constructors perform a validation
+ try:
+ oci.config.validate_config(config, **kwargs)
+ except oci.exceptions.InvalidConfig as ic:
+ module.fail_json(
+ msg="Invalid OCI configuration. Exception: {0}".format(str(ic))
+ )
+
+ # Create service client class with the signer
+ client = service_client_class(config, **kwargs)
+
+ return client
+
+
+def _is_instance_principal_auth(module):
+ # check if auth type is overridden via module params
+ instance_principal_auth = (
+ "auth_type" in module.params
+ and module.params["auth_type"] == "instance_principal"
+ )
+ if not instance_principal_auth:
+ instance_principal_auth = (
+ "OCI_ANSIBLE_AUTH_TYPE" in os.environ
+ and os.environ["OCI_ANSIBLE_AUTH_TYPE"] == "instance_principal"
+ )
+ return instance_principal_auth
+
+
+def _merge_auth_option(
+ config, module, module_option_name, env_var_name, config_attr_name
+):
+ """Merge the values for an authentication attribute from ansible module options and
+ environment variables with the values specified in a configuration file"""
+ _debug("Merging {0}".format(module_option_name))
+
+ auth_attribute = module.params.get(module_option_name)
+ _debug(
+ "\t Ansible module option {0} = {1}".format(module_option_name, auth_attribute)
+ )
+ if not auth_attribute:
+ if env_var_name in os.environ:
+ auth_attribute = os.environ[env_var_name]
+ _debug(
+ "\t Environment variable {0} = {1}".format(env_var_name, auth_attribute)
+ )
+
+ # An authentication attribute has been provided through an env-variable or an ansible
+ # option and must override the corresponding attribute's value specified in the
+ # config file [profile].
+ if auth_attribute:
+ _debug(
+ "Updating config attribute {0} -> {1} ".format(
+ config_attr_name, auth_attribute
+ )
+ )
+ config.update({config_attr_name: auth_attribute})
+
+
+def bucket_details_factory(bucket_details_type, module):
+ bucket_details = None
+ if bucket_details_type == "create":
+ bucket_details = CreateBucketDetails()
+ elif bucket_details_type == "update":
+ bucket_details = UpdateBucketDetails()
+
+ bucket_details.compartment_id = module.params["compartment_id"]
+ bucket_details.name = module.params["name"]
+ bucket_details.public_access_type = module.params["public_access_type"]
+ bucket_details.metadata = module.params["metadata"]
+
+ return bucket_details
+
+
+def filter_resources(all_resources, filter_params):
+ if not filter_params:
+ return all_resources
+ filtered_resources = []
+ filtered_resources.extend(
+ [
+ resource
+ for resource in all_resources
+ for key, value in filter_params.items()
+ if getattr(resource, key) == value
+ ]
+ )
+ return filtered_resources
+
+
+def list_all_resources(target_fn, **kwargs):
+ """
+ Return all resources after paging through all results returned by target_fn. If a `display_name` or `name` is
+ provided as a kwarg, then only resources matching the specified name are returned.
+ :param target_fn: The target OCI SDK paged function to call
+ :param kwargs: All arguments that the OCI SDK paged function expects
+ :return: List of all objects returned by target_fn
+ :raises ServiceError: When the Service returned an Error response
+ :raises MaximumWaitTimeExceededError: When maximum wait time is exceeded while invoking target_fn
+ """
+ filter_params = None
+ try:
+ response = call_with_backoff(target_fn, **kwargs)
+ except ValueError as ex:
+ if "unknown kwargs" in str(ex):
+ if "display_name" in kwargs:
+ if kwargs["display_name"]:
+ filter_params = {"display_name": kwargs["display_name"]}
+ del kwargs["display_name"]
+ elif "name" in kwargs:
+ if kwargs["name"]:
+ filter_params = {"name": kwargs["name"]}
+ del kwargs["name"]
+ response = call_with_backoff(target_fn, **kwargs)
+
+ existing_resources = response.data
+ while response.has_next_page:
+ kwargs.update(page=response.headers.get(HEADER_NEXT_PAGE))
+ response = call_with_backoff(target_fn, **kwargs)
+ existing_resources += response.data
+
+ # If the underlying SDK Service list* method doesn't support filtering by name or display_name, filter the resources
+ # and return the matching list of resources
+ return filter_resources(existing_resources, filter_params)
+
+
+def _debug(s):
+ get_logger("oci_utils").debug(s)
+
+
+def get_logger(module_name):
+ oci_logging = setup_logging()
+ return oci_logging.getLogger(module_name)
+
+
+def setup_logging(
+ default_level="INFO",
+):
+ """Setup logging configuration"""
+ env_log_path = "LOG_PATH"
+ env_log_level = "LOG_LEVEL"
+
+ default_log_path = tempfile.gettempdir()
+ log_path = os.getenv(env_log_path, default_log_path)
+ log_level_str = os.getenv(env_log_level, default_level)
+ log_level = logging.getLevelName(log_level_str)
+ log_file_path = os.path.join(log_path, "oci_ansible_module.log")
+ logging.basicConfig(filename=log_file_path, filemode="a", level=log_level)
+ return logging
+
+
+def check_and_update_attributes(
+ target_instance, attr_name, input_value, existing_value, changed
+):
+ """
+ This function checks the difference between two resource attributes of literal types and sets the attrbute
+ value in the target instance type holding the attribute.
+ :param target_instance: The instance which contains the attribute whose values to be compared
+ :param attr_name: Name of the attribute whose value required to be compared
+ :param input_value: The value of the attribute provided by user
+ :param existing_value: The value of the attribute in the existing resource
+ :param changed: Flag to indicate whether there is any difference between the values
+ :return: Returns a boolean value indicating whether there is any difference between the values
+ """
+ if input_value is not None and not eq(input_value, existing_value):
+ changed = True
+ target_instance.__setattr__(attr_name, input_value)
+ else:
+ target_instance.__setattr__(attr_name, existing_value)
+ return changed
+
+
+def check_and_update_resource(
+ resource_type,
+ get_fn,
+ kwargs_get,
+ update_fn,
+ primitive_params_update,
+ kwargs_non_primitive_update,
+ module,
+ update_attributes,
+ client=None,
+ sub_attributes_of_update_model=None,
+ wait_applicable=True,
+ states=None,
+):
+
+ """
+ This function handles update operation on a resource. It checks whether update is required and accordingly returns
+ the resource and the changed status.
+ :param wait_applicable: Indicates if the resource support wait
+ :param client: The resource Client class to use to perform the wait checks. This param must be specified if
+ wait_applicable is True
+ :param resource_type: The type of the resource. e.g. "private_ip"
+ :param get_fn: Function used to get the resource. e.g. virtual_network_client.get_private_ip
+ :param kwargs_get: Dictionary containing the arguments to be used to call get function.
+ e.g. {"private_ip_id": module.params["private_ip_id"]}
+ :param update_fn: Function used to update the resource. e.g virtual_network_client.update_private_ip
+ :param primitive_params_update: List of primitive parameters used for update function. e.g. ['private_ip_id']
+ :param kwargs_non_primitive_update: Dictionary containing the non-primitive arguments to be used to call get
+ function with key as the non-primitive argument type & value as the name of the non-primitive argument to be passed
+ to the update function. e.g. {UpdatePrivateIpDetails: "update_private_ip_details"}
+ :param module: Instance of AnsibleModule
+ :param update_attributes: Attributes in update model.
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param sub_attributes_of_update_model: Dictionary of non-primitive sub-attributes of update model. for example,
+ {'services': [ServiceIdRequestDetails()]} as in UpdateServiceGatewayDetails.
+ :return: Returns a dictionary containing the "changed" status and the resource.
+ """
+ try:
+ result = dict(changed=False)
+ attributes_to_update, resource = get_attr_to_update(
+ get_fn, kwargs_get, module, update_attributes
+ )
+
+ if attributes_to_update:
+ kwargs_update = get_kwargs_update(
+ attributes_to_update,
+ kwargs_non_primitive_update,
+ module,
+ primitive_params_update,
+ sub_attributes_of_update_model,
+ )
+ resource = call_with_backoff(update_fn, **kwargs_update).data
+ if wait_applicable:
+ if client is None:
+ module.fail_json(
+ msg="wait_applicable is True, but client is not specified."
+ )
+ resource = wait_for_resource_lifecycle_state(
+ client, module, True, kwargs_get, get_fn, None, resource, states
+ )
+ result["changed"] = True
+ result[resource_type] = to_dict(resource)
+ return result
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def get_kwargs_update(
+ attributes_to_update,
+ kwargs_non_primitive_update,
+ module,
+ primitive_params_update,
+ sub_attributes_of_update_model=None,
+):
+ kwargs_update = dict()
+ for param in primitive_params_update:
+ kwargs_update[param] = module.params[param]
+ for param in kwargs_non_primitive_update:
+ update_object = param()
+ for key in update_object.attribute_map:
+ if key in attributes_to_update:
+ if (
+ sub_attributes_of_update_model
+ and key in sub_attributes_of_update_model
+ ):
+ setattr(update_object, key, sub_attributes_of_update_model[key])
+ else:
+ setattr(update_object, key, module.params[key])
+ kwargs_update[kwargs_non_primitive_update[param]] = update_object
+ return kwargs_update
+
+
+def is_dictionary_subset(sub, super_dict):
+ """
+ This function checks if `sub` dictionary is a subset of `super` dictionary.
+ :param sub: subset dictionary, for example user_provided_attr_value.
+ :param super_dict: super dictionary, for example resources_attr_value.
+ :return: True if sub is contained in super.
+ """
+ for key in sub:
+ if sub[key] != super_dict[key]:
+ return False
+ return True
+
+
+def are_lists_equal(s, t):
+ if s is None and t is None:
+ return True
+
+ if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)):
+ return False
+
+ if len(s) == 0:
+ return True
+
+ s = to_dict(s)
+ t = to_dict(t)
+
+ if type(s[0]) == dict:
+ # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on
+ # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key
+ # `service_name` which is not provided in the list of `services` by a user while making an update call; only
+ # `service_id` is provided by the user in the update call.
+ sorted_s = sort_list_of_dictionary(s)
+ sorted_t = sort_list_of_dictionary(t)
+ for index, d in enumerate(sorted_s):
+ if not is_dictionary_subset(d, sorted_t[index]):
+ return False
+ return True
+ else:
+ # Handle lists of primitive types.
+ try:
+ for elem in s:
+ t.remove(elem)
+ except ValueError:
+ return False
+ return not t
+
+
+def get_attr_to_update(get_fn, kwargs_get, module, update_attributes):
+ try:
+ resource = call_with_backoff(get_fn, **kwargs_get).data
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+ attributes_to_update = []
+
+ for attr in update_attributes:
+ resources_attr_value = getattr(resource, attr, None)
+ user_provided_attr_value = module.params.get(attr, None)
+
+ unequal_list_attr = (
+ type(resources_attr_value) == list or type(user_provided_attr_value) == list
+ ) and not are_lists_equal(user_provided_attr_value, resources_attr_value)
+ unequal_attr = type(resources_attr_value) != list and to_dict(
+ resources_attr_value
+ ) != to_dict(user_provided_attr_value)
+ if unequal_list_attr or unequal_attr:
+ # only update if the user has explicitly provided a value for this attribute
+ # otherwise, no update is necessary because the user hasn't expressed a particular
+ # value for that attribute
+ if module.params.get(attr, None):
+ attributes_to_update.append(attr)
+
+ return attributes_to_update, resource
+
+
+def get_taggable_arg_spec(supports_create=False, supports_wait=False):
+ """
+ Returns an arg_spec that is valid for taggable OCI resources.
+ :return: A dict that represents an ansible arg spec that builds over the common_arg_spec and adds free-form and
+ defined tags.
+ """
+ tag_arg_spec = get_common_arg_spec(supports_create, supports_wait)
+ tag_arg_spec.update(
+ dict(freeform_tags=dict(type="dict"), defined_tags=dict(type="dict"))
+ )
+ return tag_arg_spec
+
+
+def add_tags_to_model_from_module(model, module):
+ """
+ Adds free-form and defined tags from an ansible module to a resource model
+ :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
+ :param module: An AnsibleModule representing the options provided by the user
+ :return: The updated model class with the tags specified by the user.
+ """
+ freeform_tags = module.params.get("freeform_tags", None)
+ defined_tags = module.params.get("defined_tags", None)
+ return add_tags_to_model_class(model, freeform_tags, defined_tags)
+
+
+def add_tags_to_model_class(model, freeform_tags, defined_tags):
+ """
+ Add free-form and defined tags to a resource model.
+ :param model: A resource model instance that supports 'freeform_tags' and 'defined_tags' as attributes
+ :param freeform_tags: A dict representing the freeform_tags to be applied to the model
+ :param defined_tags: A dict representing the defined_tags to be applied to the model
+ :return: The updated model class with the tags specified by the user
+ """
+ try:
+ if freeform_tags is not None:
+ _debug("Model {0} set freeform tags to {1}".format(model, freeform_tags))
+ model.__setattr__("freeform_tags", freeform_tags)
+
+ if defined_tags is not None:
+ _debug("Model {0} set defined tags to {1}".format(model, defined_tags))
+ model.__setattr__("defined_tags", defined_tags)
+ except AttributeError as ae:
+ _debug("Model {0} doesn't support tags. Error {1}".format(model, ae))
+
+ return model
+
+
+def check_and_create_resource(
+ resource_type,
+ create_fn,
+ kwargs_create,
+ list_fn,
+ kwargs_list,
+ module,
+ model,
+ existing_resources=None,
+ exclude_attributes=None,
+ dead_states=None,
+ default_attribute_values=None,
+ supports_sort_by_time_created=True,
+):
+ """
+ This function checks whether there is a resource with same attributes as specified in the module options. If not,
+ it creates and returns the resource.
+ :param resource_type: Type of the resource to be created.
+ :param create_fn: Function used in the module to handle create operation. The function should return a dict with
+ keys as resource & changed.
+ :param kwargs_create: Dictionary of parameters for create operation.
+ :param list_fn: List function in sdk to list all the resources of type resource_type.
+ :param kwargs_list: Dictionary of parameters for list operation.
+ :param module: Instance of AnsibleModule
+ :param model: Model used to create a resource.
+ :param exclude_attributes: The attributes which should not be used to distinguish the resource. e.g. display_name,
+ dns_label.
+ :param dead_states: List of states which can't transition to any of the usable states of the resource. This defaults
+ to ["TERMINATING", "TERMINATED", "FAULTY", "FAILED", "DELETING", "DELETED", "UNKNOWN_ENUM_VALUE"]
+ :param default_attribute_values: A dictionary containing default values for attributes.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+
+ if module.params.get("force_create", None):
+ _debug("Force creating {0}".format(resource_type))
+ result = call_with_backoff(create_fn, **kwargs_create)
+ return result
+
+ # Get the existing resources list sorted by creation time in descending order. Return the latest matching resource
+ # in case of multiple resource matches.
+ if exclude_attributes is None:
+ exclude_attributes = {}
+ if default_attribute_values is None:
+ default_attribute_values = {}
+ try:
+ if existing_resources is None:
+ if supports_sort_by_time_created:
+ kwargs_list["sort_by"] = "TIMECREATED"
+ existing_resources = list_all_resources(list_fn, **kwargs_list)
+ except ValueError:
+ # list_fn doesn't support sort_by, so remove the sort_by key in kwargs_list and retry
+ kwargs_list.pop("sort_by", None)
+ try:
+ existing_resources = list_all_resources(list_fn, **kwargs_list)
+ # Handle errors like 404 due to bad arguments to the list_all_resources call.
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+ result = dict()
+
+ attributes_to_consider = _get_attributes_to_consider(
+ exclude_attributes, model, module
+ )
+ if "defined_tags" not in default_attribute_values:
+ default_attribute_values["defined_tags"] = {}
+ resource_matched = None
+ _debug(
+ "Trying to find a match within {0} existing resources".format(
+ len(existing_resources)
+ )
+ )
+
+ for resource in existing_resources:
+ if _is_resource_active(resource, dead_states):
+ _debug(
+ "Comparing user specified values {0} against an existing resource's "
+ "values {1}".format(module.params, to_dict(resource))
+ )
+ if does_existing_resource_match_user_inputs(
+ to_dict(resource),
+ module,
+ attributes_to_consider,
+ exclude_attributes,
+ default_attribute_values,
+ ):
+ resource_matched = to_dict(resource)
+ break
+
+ if resource_matched:
+ _debug("Resource with same attributes found: {0}.".format(resource_matched))
+ result[resource_type] = resource_matched
+ result["changed"] = False
+ else:
+ _debug("No matching resource found. Attempting to create a new resource.")
+ result = call_with_backoff(create_fn, **kwargs_create)
+
+ return result
+
+
+def _get_attributes_to_consider(exclude_attributes, model, module):
+ """
+ Determine the attributes to detect if an existing resource already matches the requested resource state
+ :param exclude_attributes: Attributes to not consider for matching
+ :param model: The model class used to create the Resource
+ :param module: An instance of AnsibleModule that contains user's desires around a resource's state
+ :return: A list of attributes that needs to be matched
+ """
+
+ # If a user explicitly requests us to match only against a set of resources (using 'key_by', use that as the list
+ # of attributes to consider for matching.
+ if "key_by" in module.params and module.params["key_by"] is not None:
+ attributes_to_consider = module.params["key_by"]
+ else:
+ # Consider all attributes except freeform_tags as freeform tags do not distinguish a resource.
+ attributes_to_consider = list(model.attribute_map)
+ if "freeform_tags" in attributes_to_consider:
+ attributes_to_consider.remove("freeform_tags")
+ # Temporarily removing node_count as the exisiting resource does not reflect it
+ if "node_count" in attributes_to_consider:
+ attributes_to_consider.remove("node_count")
+ _debug("attributes to consider: {0}".format(attributes_to_consider))
+ return attributes_to_consider
+
+
+def _is_resource_active(resource, dead_states):
+ if dead_states is None:
+ dead_states = DEAD_STATES
+
+ if "lifecycle_state" not in resource.attribute_map:
+ return True
+ return resource.lifecycle_state not in dead_states
+
+
+def is_attr_assigned_default(default_attribute_values, attr, assigned_value):
+ if not default_attribute_values:
+ return False
+
+ if attr in default_attribute_values:
+ default_val_for_attr = default_attribute_values.get(attr, None)
+ if isinstance(default_val_for_attr, dict):
+ # When default value for a resource's attribute is empty dictionary, check if the corresponding value of the
+ # existing resource's attribute is also empty.
+ if not default_val_for_attr:
+ return not assigned_value
+ # only compare keys that are in default_attribute_values[attr]
+ # this is to ensure forward compatibility when the API returns new keys that are not known during
+ # the time when the module author provided default values for the attribute
+ keys = {}
+ for k, v in iteritems(assigned_value.items()):
+ if k in default_val_for_attr:
+ keys[k] = v
+
+ return default_val_for_attr == keys
+ # non-dict, normal comparison
+ return default_val_for_attr == assigned_value
+ else:
+ # module author has not provided a default value for attr
+ return True
+
+
+def create_resource(resource_type, create_fn, kwargs_create, module):
+ """
+ Create an OCI resource
+ :param resource_type: Type of the resource to be created. e.g.: "vcn"
+ :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
+ :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn
+ :param module: Instance of AnsibleModule
+ """
+ result = dict(changed=False)
+ try:
+ resource = to_dict(call_with_backoff(create_fn, **kwargs_create).data)
+ _debug("Created {0}, {1}".format(resource_type, resource))
+ result["changed"] = True
+ result[resource_type] = resource
+ return result
+ except (ServiceError, TypeError) as ex:
+ module.fail_json(msg=str(ex))
+
+
+def does_existing_resource_match_user_inputs(
+ existing_resource,
+ module,
+ attributes_to_compare,
+ exclude_attributes,
+ default_attribute_values=None,
+):
+ """
+ Check if 'attributes_to_compare' in an existing_resource match the desired state provided by a user in 'module'.
+ :param existing_resource: A dictionary representing an existing resource's values.
+ :param module: The AnsibleModule representing the options provided by the user.
+ :param attributes_to_compare: A list of attributes of a resource that are used to compare if an existing resource
+ matches the desire state of the resource expressed by the user in 'module'.
+ :param exclude_attributes: The attributes, that a module author provides, which should not be used to match the
+ resource. This dictionary typically includes: (a) attributes which are initialized with dynamic default values
+ like 'display_name', 'security_list_ids' for subnets and (b) attributes that don't have any defaults like
+ 'dns_label' in VCNs. The attributes are part of keys and 'True' is the value for all existing keys.
+ :param default_attribute_values: A dictionary containing default values for attributes.
+ :return: True if the values for the list of attributes is the same in the existing_resource and module instances.
+ """
+ if not default_attribute_values:
+ default_attribute_values = {}
+ for attr in attributes_to_compare:
+ attribute_with_default_metadata = None
+ if attr in existing_resource:
+ resources_value_for_attr = existing_resource[attr]
+ # Check if the user has explicitly provided the value for attr.
+ user_provided_value_for_attr = _get_user_provided_value(module, attr)
+ if user_provided_value_for_attr is not None:
+ res = [True]
+ check_if_user_value_matches_resources_attr(
+ attr,
+ resources_value_for_attr,
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ if not res[0]:
+ _debug(
+ "Mismatch on attribute '{0}'. User provided value is {1} & existing resource's value"
+ "is {2}.".format(
+ attr, user_provided_value_for_attr, resources_value_for_attr
+ )
+ )
+ return False
+ else:
+ # If the user has not explicitly provided the value for attr and attr is in exclude_list, we can
+ # consider this as a 'pass'. For example, if an attribute 'display_name' is not specified by user and
+ # that attribute is in the 'exclude_list' according to the module author(Not User), then exclude
+ if (
+ exclude_attributes.get(attr) is None
+ and resources_value_for_attr is not None
+ ):
+ if module.argument_spec.get(attr):
+ attribute_with_default_metadata = module.argument_spec.get(attr)
+ default_attribute_value = attribute_with_default_metadata.get(
+ "default", None
+ )
+ if default_attribute_value is not None:
+ if existing_resource[attr] != default_attribute_value:
+ return False
+ # Check if attr has a value that is not default. For example, a custom `security_list_id`
+ # is assigned to the subnet's attribute `security_list_ids`. If the attribute is assigned a
+ # value that is not the default, then it must be considered a mismatch and false returned.
+ elif not is_attr_assigned_default(
+ default_attribute_values, attr, existing_resource[attr]
+ ):
+ return False
+
+ else:
+ _debug(
+ "Attribute {0} is in the create model of resource {1}"
+ "but doesn't exist in the get model of the resource".format(
+ attr, existing_resource.__class__
+ )
+ )
+ return True
+
+
+def tuplize(d):
+ """
+ This function takes a dictionary and converts it to a list of tuples recursively.
+ :param d: A dictionary.
+ :return: List of tuples.
+ """
+ list_of_tuples = []
+ key_list = sorted(list(d.keys()))
+ for key in key_list:
+ if type(d[key]) == list:
+ # Convert a value which is itself a list of dict to a list of tuples.
+ if d[key] and type(d[key][0]) == dict:
+ sub_tuples = []
+ for sub_dict in d[key]:
+ sub_tuples.append(tuplize(sub_dict))
+ # To handle comparing two None values, while creating a tuple for a {key: value}, make the first element
+ # in the tuple a boolean `True` if value is None so that attributes with None value are put at last
+ # in the sorted list.
+ list_of_tuples.append((sub_tuples is None, key, sub_tuples))
+ else:
+ list_of_tuples.append((d[key] is None, key, d[key]))
+ elif type(d[key]) == dict:
+ tupled_value = tuplize(d[key])
+ list_of_tuples.append((tupled_value is None, key, tupled_value))
+ else:
+ list_of_tuples.append((d[key] is None, key, d[key]))
+ return list_of_tuples
+
+
+def get_key_for_comparing_dict(d):
+ tuple_form_of_d = tuplize(d)
+ return tuple_form_of_d
+
+
+def sort_dictionary(d):
+ """
+ This function sorts values of a dictionary recursively.
+ :param d: A dictionary.
+ :return: Dictionary with sorted elements.
+ """
+ sorted_d = {}
+ for key in d:
+ if type(d[key]) == list:
+ if d[key] and type(d[key][0]) == dict:
+ sorted_value = sort_list_of_dictionary(d[key])
+ sorted_d[key] = sorted_value
+ else:
+ sorted_d[key] = sorted(d[key])
+ elif type(d[key]) == dict:
+ sorted_d[key] = sort_dictionary(d[key])
+ else:
+ sorted_d[key] = d[key]
+ return sorted_d
+
+
+def sort_list_of_dictionary(list_of_dict):
+ """
+ This functions sorts a list of dictionaries. It first sorts each value of the dictionary and then sorts the list of
+ individually sorted dictionaries. For sorting, each dictionary's tuple equivalent is used.
+ :param list_of_dict: List of dictionaries.
+ :return: A sorted dictionary.
+ """
+ list_with_sorted_dict = []
+ for d in list_of_dict:
+ sorted_d = sort_dictionary(d)
+ list_with_sorted_dict.append(sorted_d)
+ return sorted(list_with_sorted_dict, key=get_key_for_comparing_dict)
+
+
+def check_if_user_value_matches_resources_attr(
+ attribute_name,
+ resources_value_for_attr,
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+):
+ if isinstance(default_attribute_values.get(attribute_name), dict):
+ default_attribute_values = default_attribute_values.get(attribute_name)
+
+ if isinstance(exclude_attributes.get(attribute_name), dict):
+ exclude_attributes = exclude_attributes.get(attribute_name)
+
+ if isinstance(resources_value_for_attr, list) or isinstance(
+ user_provided_value_for_attr, list
+ ):
+ # Perform a deep equivalence check for a List attribute
+ if exclude_attributes.get(attribute_name):
+ return
+ if (
+ user_provided_value_for_attr is None
+ and default_attribute_values.get(attribute_name) is not None
+ ):
+ user_provided_value_for_attr = default_attribute_values.get(attribute_name)
+
+ if resources_value_for_attr is None and user_provided_value_for_attr is None:
+ return
+
+ if (
+ resources_value_for_attr is None
+ and len(user_provided_value_for_attr) >= 0
+ or user_provided_value_for_attr is None
+ and len(resources_value_for_attr) >= 0
+ ):
+ res[0] = False
+ return
+
+ if (
+ resources_value_for_attr is not None
+ and user_provided_value_for_attr is not None
+ and len(resources_value_for_attr) != len(user_provided_value_for_attr)
+ ):
+ res[0] = False
+ return
+
+ if (
+ user_provided_value_for_attr
+ and type(user_provided_value_for_attr[0]) == dict
+ ):
+ # Process a list of dict
+ sorted_user_provided_value_for_attr = sort_list_of_dictionary(
+ user_provided_value_for_attr
+ )
+ sorted_resources_value_for_attr = sort_list_of_dictionary(
+ resources_value_for_attr
+ )
+
+ else:
+ sorted_user_provided_value_for_attr = sorted(user_provided_value_for_attr)
+ sorted_resources_value_for_attr = sorted(resources_value_for_attr)
+
+ # Walk through the sorted list values of the resource's value for this attribute, and compare against user
+ # provided values.
+ for index, resources_value_for_attr_part in enumerate(
+ sorted_resources_value_for_attr
+ ):
+ check_if_user_value_matches_resources_attr(
+ attribute_name,
+ resources_value_for_attr_part,
+ sorted_user_provided_value_for_attr[index],
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+
+ elif isinstance(resources_value_for_attr, dict):
+ # Perform a deep equivalence check for dict typed attributes
+
+ if not resources_value_for_attr and user_provided_value_for_attr:
+ res[0] = False
+ for key in resources_value_for_attr:
+ if (
+ user_provided_value_for_attr is not None
+ and user_provided_value_for_attr
+ ):
+ check_if_user_value_matches_resources_attr(
+ key,
+ resources_value_for_attr.get(key),
+ user_provided_value_for_attr.get(key),
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ else:
+ if exclude_attributes.get(key) is None:
+ if default_attribute_values.get(key) is not None:
+ user_provided_value_for_attr = default_attribute_values.get(key)
+ check_if_user_value_matches_resources_attr(
+ key,
+ resources_value_for_attr.get(key),
+ user_provided_value_for_attr,
+ exclude_attributes,
+ default_attribute_values,
+ res,
+ )
+ else:
+ res[0] = is_attr_assigned_default(
+ default_attribute_values,
+ attribute_name,
+ resources_value_for_attr.get(key),
+ )
+
+ elif resources_value_for_attr != user_provided_value_for_attr:
+ if (
+ exclude_attributes.get(attribute_name) is None
+ and default_attribute_values.get(attribute_name) is not None
+ ):
+ # As the user has not specified a value for an optional attribute, if the existing resource's
+ # current state has a DEFAULT value for that attribute, we must not consider this incongruence
+ # an issue and continue with other checks. If the existing resource's value for the attribute
+ # is not the default value, then the existing resource is not a match.
+ if not is_attr_assigned_default(
+ default_attribute_values, attribute_name, resources_value_for_attr
+ ):
+ res[0] = False
+ elif user_provided_value_for_attr is not None:
+ res[0] = False
+
+
+def are_dicts_equal(
+ option_name,
+ existing_resource_dict,
+ user_provided_dict,
+ exclude_list,
+ default_attribute_values,
+):
+ if not user_provided_dict:
+ # User has not provided a value for the map option. In this case, the user hasn't expressed an intent around
+ # this optional attribute. Check if existing_resource_dict matches default.
+ # For example, source_details attribute in volume is optional and does not have any defaults.
+ return is_attr_assigned_default(
+ default_attribute_values, option_name, existing_resource_dict
+ )
+
+ # If the existing resource has an empty dict, while the user has provided entries, dicts are not equal
+ if not existing_resource_dict and user_provided_dict:
+ return False
+
+ # check if all keys of an existing resource's dict attribute matches user-provided dict's entries
+ for sub_attr in existing_resource_dict:
+ # If user has provided value for sub-attribute, then compare it with corresponding key in existing resource.
+ if sub_attr in user_provided_dict:
+ if existing_resource_dict[sub_attr] != user_provided_dict[sub_attr]:
+ _debug(
+ "Failed to match: Existing resource's attr {0} sub-attr {1} value is {2}, while user "
+ "provided value is {3}".format(
+ option_name,
+ sub_attr,
+ existing_resource_dict[sub_attr],
+ user_provided_dict.get(sub_attr, None),
+ )
+ )
+ return False
+
+ # If sub_attr not provided by user, check if the sub-attribute value of existing resource matches default value.
+ else:
+ if not should_dict_attr_be_excluded(option_name, sub_attr, exclude_list):
+ default_value_for_dict_attr = default_attribute_values.get(
+ option_name, None
+ )
+ if default_value_for_dict_attr:
+ # if a default value for the sub-attr was provided by the module author, fail if the existing
+ # resource's value for the sub-attr is not the default
+ if not is_attr_assigned_default(
+ default_value_for_dict_attr,
+ sub_attr,
+ existing_resource_dict[sub_attr],
+ ):
+ return False
+ else:
+ # No default value specified by module author for sub_attr
+ _debug(
+ "Consider as match: Existing resource's attr {0} sub-attr {1} value is {2}, while user did"
+ "not provide a value for it. The module author also has not provided a default value for it"
+ "or marked it for exclusion. So ignoring this attribute during matching and continuing with"
+ "other checks".format(
+ option_name, sub_attr, existing_resource_dict[sub_attr]
+ )
+ )
+
+ return True
+
+
+def should_dict_attr_be_excluded(map_option_name, option_key, exclude_list):
+ """An entry for the Exclude list for excluding a map's key is specified as a dict with the map option name as the
+ key, and the value as a list of keys to be excluded within that map. For example, if the keys "k1" and "k2" of a map
+ option named "m1" needs to be excluded, the exclude list must have an entry {'m1': ['k1','k2']} """
+ for exclude_item in exclude_list:
+ if isinstance(exclude_item, dict):
+ if map_option_name in exclude_item:
+ if option_key in exclude_item[map_option_name]:
+ return True
+ return False
+
+
+def create_and_wait(
+ resource_type,
+ client,
+ create_fn,
+ kwargs_create,
+ get_fn,
+ get_param,
+ module,
+ states=None,
+ wait_applicable=True,
+ kwargs_get=None,
+):
+ """
+ A utility function to create a resource and wait for the resource to get into the state as specified in the module
+ options.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param create_fn: Function in the SDK to create the resource. e.g. virtual_network_client.create_vcn
+ :param kwargs_create: Dictionary containing arguments to be used to call the create function create_fn.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param module: Instance of AnsibleModule.
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param kwargs_get: Dictionary containing arguments to be used to call a multi-argument `get` function
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ try:
+ return create_or_update_resource_and_wait(
+ resource_type,
+ create_fn,
+ kwargs_create,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ kwargs_get,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def update_and_wait(
+ resource_type,
+ client,
+ update_fn,
+ kwargs_update,
+ get_fn,
+ get_param,
+ module,
+ states=None,
+ wait_applicable=True,
+ kwargs_get=None,
+):
+ """
+ A utility function to update a resource and wait for the resource to get into the state as specified in the module
+ options. It wraps the create_and_wait method as apart from the method and arguments, everything else is similar.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param update_fn: Function in the SDK to update the resource. e.g. virtual_network_client.update_vcn
+ :param kwargs_update: Dictionary containing arguments to be used to call the update function update_fn.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param module: Instance of AnsibleModule.
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :param states: List of lifecycle states to watch for while waiting after update_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ try:
+ return create_or_update_resource_and_wait(
+ resource_type,
+ update_fn,
+ kwargs_update,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ kwargs_get=kwargs_get,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
+
+
+def create_or_update_resource_and_wait(
+ resource_type,
+ function,
+ kwargs_function,
+ module,
+ wait_applicable,
+ get_fn,
+ get_param,
+ states,
+ client,
+ update_target_resource_id_in_get_param=False,
+ kwargs_get=None,
+):
+ """
+ A utility function to create or update a resource and wait for the resource to get into the state as specified in
+ the module options.
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param function: Function in the SDK to create or update the resource.
+ :param kwargs_function: Dictionary containing arguments to be used to call the create or update function
+ :param module: Instance of AnsibleModule.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ result = create_resource(resource_type, function, kwargs_function, module)
+ resource = result[resource_type]
+ result[resource_type] = wait_for_resource_lifecycle_state(
+ client,
+ module,
+ wait_applicable,
+ kwargs_get,
+ get_fn,
+ get_param,
+ resource,
+ states,
+ resource_type,
+ )
+ return result
+
+
+def wait_for_resource_lifecycle_state(
+ client,
+ module,
+ wait_applicable,
+ kwargs_get,
+ get_fn,
+ get_param,
+ resource,
+ states,
+ resource_type=None,
+):
+ """
+ A utility function to wait for the resource to get into the state as specified in
+ the module options.
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient
+ :param module: Instance of AnsibleModule.
+ :param wait_applicable: Specifies if wait for create is applicable for this resource
+ :param kwargs_get: Dictionary containing arguments to be used to call the get function which requires multiple arguments.
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param get_param: Name of the argument in the SDK get function. e.g. "vcn_id"
+ :param resource_type: Type of the resource to be created. e.g. "vcn"
+ :param states: List of lifecycle states to watch for while waiting after create_fn is called.
+ e.g. [module.params['wait_until'], "FAULTY"]
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+ if wait_applicable and module.params.get("wait", None):
+ if resource_type == "compartment":
+ # An immediate attempt to retrieve a compartment after a compartment is created fails with
+ # 'Authorization failed or requested resource not found', 'status': 404}.
+ # This is because it takes few seconds for the permissions on a compartment to be ready.
+ # Wait for few seconds before attempting a get call on compartment.
+ _debug(
+ "Pausing execution for permission on the newly created compartment to be ready."
+ )
+ time.sleep(15)
+ if kwargs_get:
+ _debug(
+ "Waiting for resource to reach READY state. get_args: {0}".format(
+ kwargs_get
+ )
+ )
+ response_get = call_with_backoff(get_fn, **kwargs_get)
+ else:
+ _debug(
+ "Waiting for resource with id {0} to reach READY state.".format(
+ resource["id"]
+ )
+ )
+ response_get = call_with_backoff(get_fn, **{get_param: resource["id"]})
+ if states is None:
+ states = module.params.get("wait_until") or DEFAULT_READY_STATES
+ resource = to_dict(
+ oci.wait_until(
+ client,
+ response_get,
+ evaluate_response=lambda r: r.data.lifecycle_state in states,
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ ).data
+ )
+ return resource
+
+
+def wait_on_work_request(client, response, module):
+ try:
+ if module.params.get("wait", None):
+ _debug(
+ "Waiting for work request with id {0} to reach SUCCEEDED state.".format(
+ response.data.id
+ )
+ )
+ wait_response = oci.wait_until(
+ client,
+ response,
+ evaluate_response=lambda r: r.data.status == "SUCCEEDED",
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ )
+ else:
+ _debug(
+ "Waiting for work request with id {0} to reach ACCEPTED state.".format(
+ response.data.id
+ )
+ )
+ wait_response = oci.wait_until(
+ client,
+ response,
+ evaluate_response=lambda r: r.data.status == "ACCEPTED",
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ )
+ except MaximumWaitTimeExceeded as ex:
+ _debug(str(ex))
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ _debug(str(ex))
+ module.fail_json(msg=str(ex))
+ return wait_response.data
+
+
+def delete_and_wait(
+ resource_type,
+ client,
+ get_fn,
+ kwargs_get,
+ delete_fn,
+ kwargs_delete,
+ module,
+ states=None,
+ wait_applicable=True,
+ process_work_request=False,
+):
+ """A utility function to delete a resource and wait for the resource to get into the state as specified in the
+ module options.
+ :param wait_applicable: Specifies if wait for delete is applicable for this resource
+ :param resource_type: Type of the resource to be deleted. e.g. "vcn"
+ :param client: OCI service client instance to call the service periodically to retrieve data.
+ e.g. VirtualNetworkClient()
+ :param get_fn: Function in the SDK to get the resource. e.g. virtual_network_client.get_vcn
+ :param kwargs_get: Dictionary of arguments for get function get_fn. e.g. {"vcn_id": module.params["id"]}
+ :param delete_fn: Function in the SDK to delete the resource. e.g. virtual_network_client.delete_vcn
+ :param kwargs_delete: Dictionary of arguments for delete function delete_fn. e.g. {"vcn_id": module.params["id"]}
+ :param module: Instance of AnsibleModule.
+ :param states: List of lifecycle states to watch for while waiting after delete_fn is called. If nothing is passed,
+ defaults to ["TERMINATED", "DETACHED", "DELETED"].
+ :param process_work_request: Whether a work request is generated on an API call and if it needs to be handled.
+ :return: A dictionary containing the resource & the "changed" status. e.g. {"vcn":{x:y}, "changed":True}
+ """
+
+ states_set = set(["DETACHING", "DETACHED", "DELETING", "DELETED", "TERMINATING", "TERMINATED"])
+ result = dict(changed=False)
+ result[resource_type] = dict()
+ try:
+ resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
+ if resource:
+ if "lifecycle_state" not in resource or resource["lifecycle_state"] not in states_set:
+ response = call_with_backoff(delete_fn, **kwargs_delete)
+ if process_work_request:
+ wr_id = response.headers.get("opc-work-request-id")
+ get_wr_response = call_with_backoff(
+ client.get_work_request, work_request_id=wr_id
+ )
+ result["work_request"] = to_dict(
+ wait_on_work_request(client, get_wr_response, module)
+ )
+ # Set changed to True as work request has been created to delete the resource.
+ result["changed"] = True
+ resource = to_dict(call_with_backoff(get_fn, **kwargs_get).data)
+ else:
+ _debug("Deleted {0}, {1}".format(resource_type, resource))
+ result["changed"] = True
+
+ if wait_applicable and module.params.get("wait", None):
+ if states is None:
+ states = (
+ module.params.get("wait_until")
+ or DEFAULT_TERMINATED_STATES
+ )
+ try:
+ wait_response = oci.wait_until(
+ client,
+ get_fn(**kwargs_get),
+ evaluate_response=lambda r: r.data.lifecycle_state
+ in states,
+ max_wait_seconds=module.params.get(
+ "wait_timeout", MAX_WAIT_TIMEOUT_IN_SECONDS
+ ),
+ succeed_on_not_found=True,
+ )
+ except MaximumWaitTimeExceeded as ex:
+ module.fail_json(msg=str(ex))
+ except ServiceError as ex:
+ if ex.status != 404:
+ module.fail_json(msg=ex.message)
+ else:
+ # While waiting for resource to get into terminated state, if the resource is not found.
+ _debug(
+ "API returned Status:404(Not Found) while waiting for resource to get into"
+ " terminated state."
+ )
+ resource["lifecycle_state"] = "DELETED"
+ result[resource_type] = resource
+ return result
+ # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found.
+ if type(wait_response) is not Sentinel:
+ resource = to_dict(wait_response.data)
+ else:
+ resource["lifecycle_state"] = "DELETED"
+
+ result[resource_type] = resource
+ else:
+ _debug(
+ "Resource {0} with {1} already deleted. So returning changed=False".format(
+ resource_type, kwargs_get
+ )
+ )
+ except ServiceError as ex:
+ # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone
+ # resource is not available, instead of the expected 404. So working around this for now.
+ if type(client) == oci.dns.DnsClient:
+ if ex.status == 400 and ex.code == "InvalidParameter":
+ _debug(
+ "Resource {0} with {1} already deleted. So returning changed=False".format(
+ resource_type, kwargs_get
+ )
+ )
+ elif ex.status != 404:
+ module.fail_json(msg=ex.message)
+ result[resource_type] = dict()
+ return result
+
+
+def are_attrs_equal(current_resource, module, attributes):
+ """
+ Check if the specified attributes are equal in the specified 'model' and 'module'. This is used to check if an OCI
+ Model instance already has the values specified by an Ansible user while invoking an OCI Ansible module and if a
+ resource needs to be updated.
+ :param current_resource: A resource model instance
+ :param module: The AnsibleModule representing the options provided by the user
+ :param attributes: A list of attributes that would need to be compared in the model and the module instances.
+ :return: True if the values for the list of attributes is the same in the model and module instances
+ """
+ for attr in attributes:
+ curr_value = getattr(current_resource, attr, None)
+ user_provided_value = _get_user_provided_value(module, attribute_name=attr)
+
+ if user_provided_value is not None:
+ if curr_value != user_provided_value:
+ _debug(
+ "are_attrs_equal - current resource's attribute "
+ + attr
+ + " value is "
+ + str(curr_value)
+ + " and this doesn't match user provided value of "
+ + str(user_provided_value)
+ )
+ return False
+ return True
+
+
+def _get_user_provided_value(module, attribute_name):
+ """
+ Returns the user provided value for "attribute_name". We consider aliases in the module.
+ """
+ user_provided_value = module.params.get(attribute_name, None)
+ if user_provided_value is None:
+ # If the attribute_name is set as an alias for some option X and user has provided value in the playbook using
+ # option X, then user provided value for attribute_name is equal to value for X.
+ # Get option name for attribute_name from module.aliases.
+ # module.aliases is a dictionary with key as alias name and its value as option name.
+ option_alias_for_attribute = module.aliases.get(attribute_name, None)
+ if option_alias_for_attribute is not None:
+ user_provided_value = module.params.get(option_alias_for_attribute, None)
+ return user_provided_value
+
+
+def update_model_with_user_options(curr_model, update_model, module):
+ """
+ Update the 'update_model' with user provided values in 'module' for the specified 'attributes' if they are different
+ from the values in the 'curr_model'.
+ :param curr_model: A resource model instance representing the state of the current resource
+ :param update_model: An instance of the update resource model for the current resource's type
+ :param module: An AnsibleModule representing the options provided by the user
+ :return: An updated 'update_model' instance filled with values that would need to be updated in the current resource
+ state to satisfy the user's requested state.
+ """
+ attributes = update_model.attribute_map.keys()
+ for attr in attributes:
+ curr_value_for_attr = getattr(curr_model, attr, None)
+ user_provided_value = _get_user_provided_value(module, attribute_name=attr)
+
+ if curr_value_for_attr != user_provided_value:
+ if user_provided_value is not None:
+ # Only update if a user has specified a value for an option
+ _debug(
+ "User requested {0} for attribute {1}, whereas the current value is {2}. So adding it "
+ "to the update model".format(
+ user_provided_value, attr, curr_value_for_attr
+ )
+ )
+ setattr(update_model, attr, user_provided_value)
+ else:
+ # Always set current values of the resource in the update model if there is no request for change in
+ # values
+ setattr(update_model, attr, curr_value_for_attr)
+ return update_model
+
+
+def _get_retry_strategy():
+ retry_strategy_builder = RetryStrategyBuilder(
+ max_attempts_check=True,
+ max_attempts=10,
+ retry_max_wait_between_calls_seconds=30,
+ retry_base_sleep_time_seconds=3,
+ backoff_type=oci.retry.BACKOFF_FULL_JITTER_EQUAL_ON_THROTTLE_VALUE,
+ )
+ retry_strategy_builder.add_service_error_check(
+ service_error_retry_config={
+ 429: [],
+ 400: ["QuotaExceeded", "LimitExceeded"],
+ 409: ["Conflict"],
+ },
+ service_error_retry_on_any_5xx=True,
+ )
+ return retry_strategy_builder.get_retry_strategy()
+
+
+def call_with_backoff(fn, **kwargs):
+ if "retry_strategy" not in kwargs:
+ kwargs["retry_strategy"] = _get_retry_strategy()
+ try:
+ return fn(**kwargs)
+ except TypeError as te:
+ if "unexpected keyword argument" in str(te):
+ # to handle older SDKs that did not support retry_strategy
+ del kwargs["retry_strategy"]
+ return fn(**kwargs)
+ else:
+ # A validation error raised by the SDK, throw it back
+ raise
+
+
+def generic_hash(obj):
+ """
+ Compute a hash of all the fields in the object
+ :param obj: Object whose hash needs to be computed
+ :return: a hash value for the object
+ """
+ sum = 0
+ for field in obj.attribute_map.keys():
+ field_value = getattr(obj, field)
+ if isinstance(field_value, list):
+ for value in field_value:
+ sum = sum + hash(value)
+ elif isinstance(field_value, dict):
+ for k, v in field_value.items():
+ sum = sum + hash(hash(k) + hash(":") + hash(v))
+ else:
+ sum = sum + hash(getattr(obj, field))
+ return sum
+
+
+def generic_eq(s, other):
+ if other is None:
+ return False
+ return s.__dict__ == other.__dict__
+
+
+def generate_subclass(parent_class):
+ """Make a class hash-able by generating a subclass with a __hash__ method that returns the sum of all fields within
+ the parent class"""
+ dict_of_method_in_subclass = {
+ "__init__": parent_class.__init__,
+ "__hash__": generic_hash,
+ "__eq__": generic_eq,
+ }
+ subclass_name = "GeneratedSub" + parent_class.__name__
+ generated_sub_class = type(
+ subclass_name, (parent_class,), dict_of_method_in_subclass
+ )
+ return generated_sub_class
+
+
+def create_hashed_instance(class_type):
+ hashed_class = generate_subclass(class_type)
+ return hashed_class()
+
+
+def get_hashed_object_list(class_type, object_with_values, attributes_class_type=None):
+ if object_with_values is None:
+ return None
+ hashed_class_instances = []
+ for object_with_value in object_with_values:
+ hashed_class_instances.append(
+ get_hashed_object(class_type, object_with_value, attributes_class_type)
+ )
+ return hashed_class_instances
+
+
+def get_hashed_object(
+ class_type, object_with_value, attributes_class_type=None, supported_attributes=None
+):
+ """
+ Convert any class instance into hashable so that the
+ instances are eligible for various comparison
+ operation available under set() object.
+ :param class_type: Any class type whose instances needs to be hashable
+ :param object_with_value: Instance of the class type with values which
+ would be set in the resulting isinstance
+ :param attributes_class_type: A list of class types of attributes, if attribute is a custom class instance
+ :param supported_attributes: A list of attributes which should be considered while populating the instance
+ with the values in the object. This helps in avoiding new attributes of the class_type which are still not
+ supported by the current implementation.
+ :return: A hashable instance with same state of the provided object_with_value
+ """
+ if object_with_value is None:
+ return None
+
+ HashedClass = generate_subclass(class_type)
+ hashed_class_instance = HashedClass()
+
+ if supported_attributes:
+ class_attributes = list(
+ set(hashed_class_instance.attribute_map) & set(supported_attributes)
+ )
+ else:
+ class_attributes = hashed_class_instance.attribute_map
+
+ for attribute in class_attributes:
+ attribute_value = getattr(object_with_value, attribute)
+ if attributes_class_type:
+ for attribute_class_type in attributes_class_type:
+ if isinstance(attribute_value, attribute_class_type):
+ attribute_value = get_hashed_object(
+ attribute_class_type, attribute_value
+ )
+ hashed_class_instance.__setattr__(attribute, attribute_value)
+
+ return hashed_class_instance
+
+
+def update_class_type_attr_difference(
+ update_class_details, existing_instance, attr_name, attr_class, input_attr_value
+):
+ """
+ Checks the difference and updates an attribute which is represented by a class
+ instance. Not aplicable if the attribute type is a primitive value.
+ For example, if a class name is A with an attribute x, then if A.x = X(), then only
+ this method works.
+ :param update_class_details The instance which should be updated if there is change in
+ attribute value
+ :param existing_instance The instance whose attribute value is compared with input
+ attribute value
+ :param attr_name Name of the attribute whose value should be compared
+ :param attr_class Class type of the attribute
+ :param input_attr_value The value of input attribute which should replaced the current
+ value in case of mismatch
+ :return: A boolean value indicating whether attribute value has been replaced
+ """
+ changed = False
+ # Here existing attribute values is an instance
+ existing_attr_value = get_hashed_object(
+ attr_class, getattr(existing_instance, attr_name)
+ )
+ if input_attr_value is None:
+ update_class_details.__setattr__(attr_name, existing_attr_value)
+ else:
+ changed = not input_attr_value.__eq__(existing_attr_value)
+ if changed:
+ update_class_details.__setattr__(attr_name, input_attr_value)
+ else:
+ update_class_details.__setattr__(attr_name, existing_attr_value)
+
+ return changed
+
+
+def get_existing_resource(target_fn, module, **kwargs):
+ """
+ Returns the requested resource if it exists based on the input arguments.
+ :param target_fn The function which should be used to find the requested resource
+ :param module Instance of AnsibleModule attribute value
+ :param kwargs A map of arguments consisting of values based on which requested resource should be searched
+ :return: Instance of requested resource
+ """
+ existing_resource = None
+ try:
+ response = call_with_backoff(target_fn, **kwargs)
+ existing_resource = response.data
+ except ServiceError as ex:
+ if ex.status != 404:
+ module.fail_json(msg=ex.message)
+
+ return existing_resource
+
+
+def get_attached_instance_info(
+ module, lookup_attached_instance, list_attachments_fn, list_attachments_args
+):
+ config = get_oci_config(module)
+ identity_client = create_service_client(module, IdentityClient)
+
+ volume_attachments = []
+
+ if lookup_attached_instance:
+ # Get all the compartments in the tenancy
+ compartments = to_dict(
+ identity_client.list_compartments(
+ config.get("tenancy"), compartment_id_in_subtree=True
+ ).data
+ )
+ # For each compartment, get the volume attachments for the compartment_id with the other args in
+ # list_attachments_args.
+ for compartment in compartments:
+ list_attachments_args["compartment_id"] = compartment["id"]
+ try:
+ volume_attachments += list_all_resources(
+ list_attachments_fn, **list_attachments_args
+ )
+
+ # Pass ServiceError due to authorization issue in accessing volume attachments of a compartment
+ except ServiceError as ex:
+ if ex.status == 404:
+ pass
+
+ else:
+ volume_attachments = list_all_resources(
+ list_attachments_fn, **list_attachments_args
+ )
+
+ volume_attachments = to_dict(volume_attachments)
+ # volume_attachments has attachments in DETACHING or DETACHED state. Return the volume attachment in ATTACHING or
+ # ATTACHED state
+
+ return next(
+ (
+ volume_attachment
+ for volume_attachment in volume_attachments
+ if volume_attachment["lifecycle_state"] in ["ATTACHING", "ATTACHED"]
+ ),
+ None,
+ )
+
+
+def check_mode(fn):
+ def wrapper(*args, **kwargs):
+ if os.environ.get("OCI_ANSIBLE_EXPERIMENTAL", None):
+ return fn(*args, **kwargs)
+ return None
+
+ return wrapper
+
+
+def check_and_return_component_list_difference(
+ input_component_list, existing_components, purge_components, delete_components=False
+):
+ if input_component_list:
+ existing_components, changed = get_component_list_difference(
+ input_component_list,
+ existing_components,
+ purge_components,
+ delete_components,
+ )
+ else:
+ existing_components = []
+ changed = True
+ return existing_components, changed
+
+
+def get_component_list_difference(
+ input_component_list, existing_components, purge_components, delete_components=False
+):
+ if delete_components:
+ if existing_components is None:
+ return None, False
+ component_differences = set(existing_components).intersection(
+ set(input_component_list)
+ )
+ if component_differences:
+ return list(set(existing_components) - component_differences), True
+ else:
+ return None, False
+ if existing_components is None:
+ return input_component_list, True
+ if purge_components:
+ components_differences = set(input_component_list).symmetric_difference(
+ set(existing_components)
+ )
+
+ if components_differences:
+ return input_component_list, True
+
+ components_differences = set(input_component_list).difference(
+ set(existing_components)
+ )
+ if components_differences:
+ return list(components_differences) + existing_components, True
+ return None, False
+
+
+def write_to_file(path, content):
+ with open(to_bytes(path), "wb") as dest_file:
+ dest_file.write(content)
+
+
+def get_target_resource_from_list(
+ module, list_resource_fn, target_resource_id=None, **kwargs
+):
+ """
+ Returns a resource filtered by identifer from a list of resources. This method should be
+ used as an alternative of 'get resource' method when 'get resource' is nor provided by
+ resource api. This method returns a wrapper of response object but that should not be
+ used as an input to 'wait_until' utility as this is only a partial wrapper of response object.
+ :param module The AnsibleModule representing the options provided by the user
+ :param list_resource_fn The function which lists all the resources
+ :param target_resource_id The identifier of the resource which should be filtered from the list
+ :param kwargs A map of arguments consisting of values based on which requested resource should be searched
+ :return: A custom wrapper which partially wraps a response object where the data field contains the target
+ resource, if found.
+ """
+
+ class ResponseWrapper:
+ def __init__(self, data):
+ self.data = data
+
+ try:
+ resources = list_all_resources(list_resource_fn, **kwargs)
+ if resources is not None:
+ for resource in resources:
+ if resource.id == target_resource_id:
+ # Returning an object that mimics an OCI response as oci_utils methods assumes an Response-ish
+ # object
+ return ResponseWrapper(data=resource)
+ return ResponseWrapper(data=None)
+ except ServiceError as ex:
+ module.fail_json(msg=ex.message)
diff --git a/ansible_collections/community/general/plugins/module_utils/pipx.py b/ansible_collections/community/general/plugins/module_utils/pipx.py
new file mode 100644
index 000000000..2f19f352d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/pipx.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+
+
+_state_map = dict(
+ install='install',
+ present='install',
+ uninstall='uninstall',
+ absent='uninstall',
+ uninstall_all='uninstall-all',
+ inject='inject',
+ upgrade='upgrade',
+ upgrade_all='upgrade-all',
+ reinstall='reinstall',
+ reinstall_all='reinstall-all',
+)
+
+
+def pipx_runner(module, command, **kwargs):
+ runner = CmdRunner(
+ module,
+ command=command,
+ arg_formats=dict(
+
+ state=fmt.as_map(_state_map),
+ name=fmt.as_list(),
+ name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])),
+ install_apps=fmt.as_bool("--include-apps"),
+ install_deps=fmt.as_bool("--include-deps"),
+ inject_packages=fmt.as_list(),
+ force=fmt.as_bool("--force"),
+ include_injected=fmt.as_bool("--include-injected"),
+ index_url=fmt.as_opt_val('--index-url'),
+ python=fmt.as_opt_val('--python'),
+ system_site_packages=fmt.as_bool("--system-site-packages"),
+ _list=fmt.as_fixed(['list', '--include-injected', '--json']),
+ editable=fmt.as_bool("--editable"),
+ pip_args=fmt.as_opt_val('--pip-args'),
+ ),
+ environ_update={'USE_EMOJI': '0'},
+ check_rc=True,
+ **kwargs
+ )
+ return runner
diff --git a/ansible_collections/community/general/plugins/module_utils/proxmox.py b/ansible_collections/community/general/plugins/module_utils/proxmox.py
new file mode 100644
index 000000000..58287cec1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/proxmox.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# (TODO: remove next line!)
+import atexit # noqa: F401, pylint: disable=unused-import
+# (TODO: remove next line!)
+import time # noqa: F401, pylint: disable=unused-import
+# (TODO: remove next line!)
+import re # noqa: F401, pylint: disable=unused-import
+import traceback
+
+PROXMOXER_IMP_ERR = None
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+ PROXMOXER_IMP_ERR = traceback.format_exc()
+
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+# (TODO: remove next line!)
+from ansible.module_utils.common.text.converters import to_native # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+def proxmox_auth_argument_spec():
+ return dict(
+ api_host=dict(type='str',
+ required=True,
+ fallback=(env_fallback, ['PROXMOX_HOST'])
+ ),
+ api_user=dict(type='str',
+ required=True,
+ fallback=(env_fallback, ['PROXMOX_USER'])
+ ),
+ api_password=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['PROXMOX_PASSWORD'])
+ ),
+ api_token_id=dict(type='str',
+ no_log=False
+ ),
+ api_token_secret=dict(type='str',
+ no_log=True
+ ),
+ validate_certs=dict(type='bool',
+ default=False
+ ),
+ )
+
+
+def proxmox_to_ansible_bool(value):
+ '''Convert Proxmox representation of a boolean to be ansible-friendly'''
+ return True if value == 1 else False
+
+
+def ansible_to_proxmox_bool(value):
+ '''Convert Ansible representation of a boolean to be proxmox-friendly'''
+ if value is None:
+ return None
+
+ if not isinstance(value, bool):
+ raise ValueError("%s must be of type bool not %s" % (value, type(value)))
+
+ return 1 if value else 0
+
+
+class ProxmoxAnsible(object):
+ """Base class for Proxmox modules"""
+ def __init__(self, module):
+ if not HAS_PROXMOXER:
+ module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR)
+
+ self.module = module
+ self.proxmox_api = self._connect()
+ # Test token validity
+ try:
+ self.proxmox_api.version.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e, exception=traceback.format_exc())
+
+ def _connect(self):
+ api_host = self.module.params['api_host']
+ api_user = self.module.params['api_user']
+ api_password = self.module.params['api_password']
+ api_token_id = self.module.params['api_token_id']
+ api_token_secret = self.module.params['api_token_secret']
+ validate_certs = self.module.params['validate_certs']
+
+ auth_args = {'user': api_user}
+ if api_password:
+ auth_args['password'] = api_password
+ else:
+ auth_args['token_name'] = api_token_id
+ auth_args['token_value'] = api_token_secret
+
+ try:
+ return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args)
+ except Exception as e:
+ self.module.fail_json(msg='%s' % e, exception=traceback.format_exc())
+
+ def version(self):
+ apireturn = self.proxmox_api.version.get()
+ return LooseVersion(apireturn['version'])
+
+ def get_node(self, node):
+ nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node]
+ return nodes[0] if nodes else None
+
+ def get_nextvmid(self):
+ vmid = self.proxmox_api.cluster.nextid.get()
+ return vmid
+
+ def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False):
+ vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name]
+
+ if not vms:
+ if ignore_missing:
+ return None
+
+ self.module.fail_json(msg='No VM with name %s found' % name)
+ elif len(vms) > 1:
+ self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name)
+
+ return vms[0]
+
+ def get_vm(self, vmid, ignore_missing=False):
+ vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
+
+ if vms:
+ return vms[0]
+ else:
+ if ignore_missing:
+ return None
+
+ self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid)
+
+ def api_task_ok(self, node, taskid):
+ status = self.proxmox_api.nodes(node).tasks(taskid).status.get()
+ return status['status'] == 'stopped' and status['exitstatus'] == 'OK'
diff --git a/ansible_collections/community/general/plugins/module_utils/puppet.py b/ansible_collections/community/general/plugins/module_utils/puppet.py
new file mode 100644
index 000000000..8d553a2d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/puppet.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import os
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+
+
+_PUPPET_PATH_PREFIX = ["/opt/puppetlabs/bin"]
+
+
+def get_facter_dir():
+ if os.getuid() == 0:
+ return '/etc/facter/facts.d'
+ else:
+ return os.path.expanduser('~/.facter/facts.d')
+
+
+def _puppet_cmd(module):
+ return module.get_bin_path("puppet", False, _PUPPET_PATH_PREFIX)
+
+
+# If the `timeout` CLI command feature is removed,
+# Then we could add this as a fixed param to `puppet_runner`
+def ensure_agent_enabled(module):
+ runner = CmdRunner(
+ module,
+ command="puppet",
+ path_prefix=_PUPPET_PATH_PREFIX,
+ arg_formats=dict(
+ _agent_disabled=cmd_runner_fmt.as_fixed(['config', 'print', 'agent_disabled_lockfile']),
+ ),
+ check_rc=False,
+ )
+
+ rc, stdout, stderr = runner("_agent_disabled").run()
+ if os.path.exists(stdout.strip()):
+ module.fail_json(
+ msg="Puppet agent is administratively disabled.",
+ disabled=True)
+ elif rc != 0:
+ module.fail_json(
+ msg="Puppet agent state could not be determined.")
+
+
+def puppet_runner(module):
+
+ # Keeping backward compatibility, allow for running with the `timeout` CLI command.
+ # If this can be replaced with ansible `timeout` parameter in playbook,
+ # then this function could be removed.
+ def _prepare_base_cmd():
+ _tout_cmd = module.get_bin_path("timeout", False)
+ if _tout_cmd:
+ cmd = ["timeout", "-s", "9", module.params["timeout"], _puppet_cmd(module)]
+ else:
+ cmd = ["puppet"]
+ return cmd
+
+ def noop_func(v):
+ return ["--noop"] if module.check_mode or v else ["--no-noop"]
+
+ _logdest_map = {
+ "syslog": ["--logdest", "syslog"],
+ "all": ["--logdest", "syslog", "--logdest", "console"],
+ }
+
+ @cmd_runner_fmt.unpack_args
+ def execute_func(execute, manifest):
+ if execute:
+ return ["--execute", execute]
+ else:
+ return [manifest]
+
+ runner = CmdRunner(
+ module,
+ command=_prepare_base_cmd(),
+ path_prefix=_PUPPET_PATH_PREFIX,
+ arg_formats=dict(
+ _agent_fixed=cmd_runner_fmt.as_fixed([
+ "agent", "--onetime", "--no-daemonize", "--no-usecacheonfailure",
+ "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0",
+ ]),
+ _apply_fixed=cmd_runner_fmt.as_fixed(["apply", "--detailed-exitcodes"]),
+ puppetmaster=cmd_runner_fmt.as_opt_val("--server"),
+ show_diff=cmd_runner_fmt.as_bool("--show-diff"),
+ confdir=cmd_runner_fmt.as_opt_val("--confdir"),
+ environment=cmd_runner_fmt.as_opt_val("--environment"),
+ tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]),
+ skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]),
+ certname=cmd_runner_fmt.as_opt_eq_val("--certname"),
+ noop=cmd_runner_fmt.as_func(noop_func),
+ use_srv_records=cmd_runner_fmt.as_map({
+ True: "--usr_srv_records",
+ False: "--no-usr_srv_records",
+ }),
+ logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]),
+ modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"),
+ _execute=cmd_runner_fmt.as_func(execute_func),
+ summarize=cmd_runner_fmt.as_bool("--summarize"),
+ debug=cmd_runner_fmt.as_bool("--debug"),
+ verbose=cmd_runner_fmt.as_bool("--verbose"),
+ ),
+ check_rc=False,
+ )
+ return runner
diff --git a/ansible_collections/community/general/plugins/module_utils/pure.py b/ansible_collections/community/general/plugins/module_utils/pure.py
new file mode 100644
index 000000000..8210e28f4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/pure.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+HAS_PURESTORAGE = True
+try:
+ from purestorage import purestorage
+except ImportError:
+ HAS_PURESTORAGE = False
+
+HAS_PURITY_FB = True
+try:
+ from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ HAS_PURITY_FB = False
+
+# (TODO: remove next line!)
+from functools import wraps # noqa: F401, pylint: disable=unused-import
+from os import environ
+# (TODO: remove next line!)
+from os import path # noqa: F401, pylint: disable=unused-import
+import platform
+
+VERSION = 1.2
+USER_AGENT_BASE = 'Ansible'
+API_AGENT_VERSION = 1.5
+
+
+def get_system(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ array_name = module.params['fa_url']
+ api = module.params['api_token']
+
+ if array_name and api:
+ system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
+ elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
+ system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
+ else:
+ module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
+ try:
+ system.get()
+ except Exception:
+ module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
+ return system
+
+
+def get_blade(module):
+ """Return System Object or Fail"""
+ user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
+ 'base': USER_AGENT_BASE,
+ 'class': __name__,
+ 'version': VERSION,
+ 'platform': platform.platform()
+ }
+ blade_name = module.params['fb_url']
+ api = module.params['api_token']
+
+ if blade_name and api:
+ blade = PurityFb(blade_name)
+ blade.disable_verify_ssl()
+ try:
+ blade.login(api)
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except rest.ApiException as e:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'):
+ blade = PurityFb(environ.get('PUREFB_URL'))
+ blade.disable_verify_ssl()
+ try:
+ blade.login(environ.get('PUREFB_API'))
+ versions = blade.api_version.list_versions().versions
+ if API_AGENT_VERSION in versions:
+ blade._api_client.user_agent = user_agent
+ except rest.ApiException as e:
+ module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials")
+ else:
+ module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments")
+ return blade
+
+
+def purefa_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fa_url=dict(),
+ api_token=dict(no_log=True),
+ )
+
+
+def purefb_argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+
+ return dict(
+ fb_url=dict(),
+ api_token=dict(no_log=True),
+ )
diff --git a/ansible_collections/community/general/plugins/module_utils/rax.py b/ansible_collections/community/general/plugins/module_utils/rax.py
new file mode 100644
index 000000000..6331c0d1b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/rax.py
@@ -0,0 +1,334 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by
+# Ansible still belong to the author of the module, and may assign their own
+# license to the complete work.
+#
+# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import re
+from uuid import UUID
+
+from ansible.module_utils.six import text_type, binary_type
+
+FINAL_STATUSES = ('ACTIVE', 'ERROR')
+VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
+ 'error', 'error_deleting')
+
+CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
+ 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
+CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
+ 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
+ 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
+
+NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None))
+PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
+SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
+
+
+def rax_slugify(value):
+ """Prepend a key with rax_ and normalize the key name"""
+ return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+
+def rax_clb_node_to_dict(obj):
+ """Function to convert a CLB Node object to a dict"""
+ if not obj:
+ return {}
+ node = obj.to_dict()
+ node['id'] = obj.id
+ node['weight'] = obj.weight
+ return node
+
+
+def rax_to_dict(obj, obj_type='standard'):
+ """Generic function to convert a pyrax object to a dict
+
+ obj_type values:
+ standard
+ clb
+ server
+
+ """
+ instance = {}
+ for key in dir(obj):
+ value = getattr(obj, key)
+ if obj_type == 'clb' and key == 'nodes':
+ instance[key] = []
+ for node in value:
+ instance[key].append(rax_clb_node_to_dict(node))
+ elif (isinstance(value, list) and len(value) > 0 and
+ not isinstance(value[0], NON_CALLABLES)):
+ instance[key] = []
+ for item in value:
+ instance[key].append(rax_to_dict(item))
+ elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
+ if obj_type == 'server':
+ if key == 'image':
+ if not value:
+ instance['rax_boot_source'] = 'volume'
+ else:
+ instance['rax_boot_source'] = 'local'
+ key = rax_slugify(key)
+ instance[key] = value
+
+ if obj_type == 'server':
+ for attr in ['id', 'accessIPv4', 'name', 'status']:
+ instance[attr] = instance.get(rax_slugify(attr))
+
+ return instance
+
+
+def rax_find_bootable_volume(module, rax_module, server, exit=True):
+ """Find a servers bootable volume"""
+ cs = rax_module.cloudservers
+ cbs = rax_module.cloud_blockstorage
+ server_id = rax_module.utils.get_id(server)
+ volumes = cs.volumes.get_server_volumes(server_id)
+ bootable_volumes = []
+ for volume in volumes:
+ vol = cbs.get(volume)
+ if module.boolean(vol.bootable):
+ bootable_volumes.append(vol)
+ if not bootable_volumes:
+ if exit:
+ module.fail_json(msg='No bootable volumes could be found for '
+ 'server %s' % server_id)
+ else:
+ return False
+ elif len(bootable_volumes) > 1:
+ if exit:
+ module.fail_json(msg='Multiple bootable volumes found for server '
+ '%s' % server_id)
+ else:
+ return False
+
+ return bootable_volumes[0]
+
+
+def rax_find_image(module, rax_module, image, exit=True):
+ """Find a server image by ID or Name"""
+ cs = rax_module.cloudservers
+ try:
+ UUID(image)
+ except ValueError:
+ try:
+ image = cs.images.find(human_id=image)
+ except (cs.exceptions.NotFound, cs.exceptions.NoUniqueMatch):
+ try:
+ image = cs.images.find(name=image)
+ except (cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ if exit:
+ module.fail_json(msg='No matching image found (%s)' %
+ image)
+ else:
+ return False
+
+ return rax_module.utils.get_id(image)
+
+
+def rax_find_volume(module, rax_module, name):
+ """Find a Block storage volume by ID or name"""
+ cbs = rax_module.cloud_blockstorage
+ try:
+ UUID(name)
+ volume = cbs.get(name)
+ except ValueError:
+ try:
+ volume = cbs.find(name=name)
+ except rax_module.exc.NotFound:
+ volume = None
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ return volume
+
+
+def rax_find_network(module, rax_module, network):
+ """Find a cloud network by ID or name"""
+ cnw = rax_module.cloud_networks
+ try:
+ UUID(network)
+ except ValueError:
+ if network.lower() == 'public':
+ return cnw.get_server_networks(PUBLIC_NET_ID)
+ elif network.lower() == 'private':
+ return cnw.get_server_networks(SERVICE_NET_ID)
+ else:
+ try:
+ network_obj = cnw.find_network_by_label(network)
+ except (rax_module.exceptions.NetworkNotFound,
+ rax_module.exceptions.NetworkLabelNotUnique):
+ module.fail_json(msg='No matching network found (%s)' %
+ network)
+ else:
+ return cnw.get_server_networks(network_obj)
+ else:
+ return cnw.get_server_networks(network)
+
+
+def rax_find_server(module, rax_module, server):
+ """Find a Cloud Server by ID or name"""
+ cs = rax_module.cloudservers
+ try:
+ UUID(server)
+ server = cs.servers.get(server)
+ except ValueError:
+ servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
+ if not servers:
+ module.fail_json(msg='No Server was matched by name, '
+ 'try using the Server ID instead')
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers matched by name, '
+ 'try using the Server ID instead')
+
+ # We made it this far, grab the first and hopefully only server
+ # in the list
+ server = servers[0]
+ return server
+
+
+def rax_find_loadbalancer(module, rax_module, loadbalancer):
+ """Find a Cloud Load Balancer by ID or name"""
+ clb = rax_module.cloud_loadbalancers
+ try:
+ found = clb.get(loadbalancer)
+ except Exception:
+ found = []
+ for lb in clb.list():
+ if loadbalancer == lb.name:
+ found.append(lb)
+
+ if not found:
+ module.fail_json(msg='No loadbalancer was matched')
+
+ if len(found) > 1:
+ module.fail_json(msg='Multiple loadbalancers matched')
+
+ # We made it this far, grab the first and hopefully only item
+ # in the list
+ found = found[0]
+
+ return found
+
+
+def rax_argument_spec():
+ """Return standard base dictionary used for the argument_spec
+ argument in AnsibleModule
+
+ """
+ return dict(
+ api_key=dict(type='str', aliases=['password'], no_log=True),
+ auth_endpoint=dict(type='str'),
+ credentials=dict(type='path', aliases=['creds_file']),
+ env=dict(type='str'),
+ identity_type=dict(type='str', default='rackspace'),
+ region=dict(type='str'),
+ tenant_id=dict(type='str'),
+ tenant_name=dict(type='str'),
+ username=dict(type='str'),
+ validate_certs=dict(type='bool', aliases=['verify_ssl']),
+ )
+
+
+def rax_required_together():
+ """Return the default list used for the required_together argument to
+ AnsibleModule"""
+ return [['api_key', 'username']]
+
+
+def setup_rax_module(module, rax_module, region_required=True):
+ """Set up pyrax in a standard way for all modules"""
+ rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version,
+ rax_module.USER_AGENT)
+
+ api_key = module.params.get('api_key')
+ auth_endpoint = module.params.get('auth_endpoint')
+ credentials = module.params.get('credentials')
+ env = module.params.get('env')
+ identity_type = module.params.get('identity_type')
+ region = module.params.get('region')
+ tenant_id = module.params.get('tenant_id')
+ tenant_name = module.params.get('tenant_name')
+ username = module.params.get('username')
+ verify_ssl = module.params.get('validate_certs')
+
+ if env is not None:
+ rax_module.set_environment(env)
+
+ rax_module.set_setting('identity_type', identity_type)
+ if verify_ssl is not None:
+ rax_module.set_setting('verify_ssl', verify_ssl)
+ if auth_endpoint is not None:
+ rax_module.set_setting('auth_endpoint', auth_endpoint)
+ if tenant_id is not None:
+ rax_module.set_setting('tenant_id', tenant_id)
+ if tenant_name is not None:
+ rax_module.set_setting('tenant_name', tenant_name)
+
+ try:
+ username = username or os.environ.get('RAX_USERNAME')
+ if not username:
+ username = rax_module.get_setting('keyring_username')
+ if username:
+ api_key = 'USE_KEYRING'
+ if not api_key:
+ api_key = os.environ.get('RAX_API_KEY')
+ credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
+ os.environ.get('RAX_CREDS_FILE'))
+ region = (region or os.environ.get('RAX_REGION') or
+ rax_module.get_setting('region'))
+ except KeyError as e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ try:
+ if api_key and username:
+ if api_key == 'USE_KEYRING':
+ rax_module.keyring_auth(username, region=region)
+ else:
+ rax_module.set_credentials(username, api_key=api_key,
+ region=region)
+ elif credentials:
+ credentials = os.path.expanduser(credentials)
+ rax_module.set_credential_file(credentials, region=region)
+ else:
+ raise Exception('No credentials supplied!')
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+
+ if region_required and region not in rax_module.regions:
+ module.fail_json(msg='%s is not a valid region, must be one of: %s' %
+ (region, ','.join(rax_module.regions)))
+
+ return rax_module
+
+
+def rax_scaling_group_personality_file(module, files):
+ if not files:
+ return []
+
+ results = []
+ for rpath, lpath in files.items():
+ lpath = os.path.expanduser(lpath)
+ try:
+ with open(lpath, 'r') as f:
+ results.append({
+ 'path': rpath,
+ 'contents': f.read(),
+ })
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s: %s' % (lpath, str(e)))
+ return results
diff --git a/ansible_collections/community/general/plugins/module_utils/redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
new file mode 100644
index 000000000..9b6470302
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/redfish_utils.py
@@ -0,0 +1,3251 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six.moves import http_client
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
+POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+PATCH_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+PUT_HEADERS = {'content-type': 'application/json', 'accept': 'application/json',
+ 'OData-Version': '4.0'}
+DELETE_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'}
+
+FAIL_MSG = 'Issuing a data modification command without specifying the '\
+ 'ID of the target %(resource)s resource when there is more '\
+ 'than one %(resource)s is no longer allowed. Use the `resource_id` '\
+ 'option to specify the target %(resource)s ID.'
+
+
+class RedfishUtils(object):
+
+ def __init__(self, creds, root_uri, timeout, module, resource_id=None,
+ data_modification=False, strip_etag_quotes=False):
+ self.root_uri = root_uri
+ self.creds = creds
+ self.timeout = timeout
+ self.module = module
+ self.service_root = '/redfish/v1/'
+ self.session_service_uri = '/redfish/v1/SessionService'
+ self.sessions_uri = '/redfish/v1/SessionService/Sessions'
+ self.resource_id = resource_id
+ self.data_modification = data_modification
+ self.strip_etag_quotes = strip_etag_quotes
+ self._vendor = None
+ self._init_session()
+
+ def _auth_params(self, headers):
+ """
+ Return tuple of required authentication params based on the presence
+ of a token in the self.creds dict. If using a token, set the
+ X-Auth-Token header in the `headers` param.
+
+ :param headers: dict containing headers to send in request
+ :return: tuple of username, password and force_basic_auth
+ """
+ if self.creds.get('token'):
+ username = None
+ password = None
+ force_basic_auth = False
+ headers['X-Auth-Token'] = self.creds['token']
+ else:
+ username = self.creds['user']
+ password = self.creds['pswd']
+ force_basic_auth = True
+ return username, password, force_basic_auth
+
+ def _check_request_payload(self, req_pyld, cur_pyld, uri):
+ """
+ Checks the request payload with the values currently held by the
+ service. Will check if changes are needed and if properties are
+ supported by the service.
+
+ :param req_pyld: dict containing the properties to apply
+ :param cur_pyld: dict containing the properties currently set
+ :param uri: string containing the URI being modified
+ :return: dict containing response information
+ """
+
+ change_required = False
+ for prop in req_pyld:
+ # Check if the property is supported by the service
+ if prop not in cur_pyld:
+ return {'ret': False,
+ 'changed': False,
+ 'msg': '%s does not support the property %s' % (uri, prop),
+ 'changes_required': False}
+
+ # Perform additional checks based on the type of property
+ if isinstance(req_pyld[prop], dict) and isinstance(cur_pyld[prop], dict):
+ # If the property is a dictionary, check the nested properties
+ sub_resp = self._check_request_payload(req_pyld[prop], cur_pyld[prop], uri)
+ if not sub_resp['ret']:
+ # Unsupported property or other error condition; no change
+ return sub_resp
+ if sub_resp['changes_required']:
+ # Subordinate dictionary requires changes
+ change_required = True
+
+ else:
+ # For other properties, just compare the values
+
+ # Note: This is also a fallthrough for cases where the request
+ # payload and current settings do not match in their data type.
+ # There are cases where this can be expected, such as when a
+ # property is always 'null' in responses, so we want to attempt
+ # the PATCH request.
+
+ # Note: This is also a fallthrough for properties that are
+ # arrays of objects. Some services erroneously omit properties
+ # within arrays of objects when not configured, and it's
+ # expecting the client to provide them anyway.
+
+ if req_pyld[prop] != cur_pyld[prop]:
+ change_required = True
+
+ resp = {'ret': True, 'changes_required': change_required}
+ if not change_required:
+ # No changes required; all properties set
+ resp['changed'] = False
+ resp['msg'] = 'Properties in %s are already set' % uri
+ return resp
+
+ # The following functions are to send GET/POST/PATCH/DELETE requests
+ def get_request(self, uri):
+ req_headers = dict(GET_HEADERS)
+ username, password, basic_auth = self._auth_params(req_headers)
+ try:
+ # Service root is an unauthenticated resource; remove credentials
+ # in case the caller will be using sessions later.
+ if uri == (self.root_uri + self.service_root):
+ basic_auth = False
+ resp = open_url(uri, method="GET", headers=req_headers,
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ data = json.loads(to_native(resp.read()))
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'data': data, 'headers': headers, 'resp': resp}
+
+ def post_request(self, uri, pyld):
+ req_headers = dict(POST_HEADERS)
+ username, password, basic_auth = self._auth_params(req_headers)
+ try:
+ # When performing a POST to the session collection, credentials are
+ # provided in the request body. Do not provide the basic auth
+ # header since this can cause conflicts with some services
+ if self.sessions_uri is not None and uri == (self.root_uri + self.sessions_uri):
+ basic_auth = False
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=req_headers, method="POST",
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ try:
+ data = json.loads(to_native(resp.read()))
+ except Exception as e:
+ # No response data; this is okay in many cases
+ data = None
+ headers = dict((k.lower(), v) for (k, v) in resp.info().items())
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'data': data, 'headers': headers, 'resp': resp}
+
+ def patch_request(self, uri, pyld, check_pyld=False):
+ req_headers = dict(PATCH_HEADERS)
+ r = self.get_request(uri)
+ if r['ret']:
+ # Get etag from etag header or @odata.etag property
+ etag = r['headers'].get('etag')
+ if not etag:
+ etag = r['data'].get('@odata.etag')
+ if etag:
+ if self.strip_etag_quotes:
+ etag = etag.strip('"')
+ req_headers['If-Match'] = etag
+
+ if check_pyld:
+ # Check the payload with the current settings to see if changes
+ # are needed or if there are unsupported properties
+ if r['ret']:
+ check_resp = self._check_request_payload(pyld, r['data'], uri)
+ if not check_resp.pop('changes_required'):
+ check_resp['changed'] = False
+ return check_resp
+ else:
+ r['changed'] = False
+ return r
+
+ username, password, basic_auth = self._auth_params(req_headers)
+ try:
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=req_headers, method="PATCH",
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False, 'changed': False,
+ 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'changed': False,
+ 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False, 'changed': False,
+ 'msg': "Failed PATCH request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'changed': True, 'resp': resp, 'msg': 'Modified %s' % uri}
+
+ def put_request(self, uri, pyld):
+ req_headers = dict(PUT_HEADERS)
+ r = self.get_request(uri)
+ if r['ret']:
+ # Get etag from etag header or @odata.etag property
+ etag = r['headers'].get('etag')
+ if not etag:
+ etag = r['data'].get('@odata.etag')
+ if etag:
+ if self.strip_etag_quotes:
+ etag = etag.strip('"')
+ req_headers['If-Match'] = etag
+ username, password, basic_auth = self._auth_params(req_headers)
+ try:
+ resp = open_url(uri, data=json.dumps(pyld),
+ headers=req_headers, method="PUT",
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on PUT request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed PUT request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ def delete_request(self, uri, pyld=None):
+ req_headers = dict(DELETE_HEADERS)
+ username, password, basic_auth = self._auth_params(req_headers)
+ try:
+ data = json.dumps(pyld) if pyld else None
+ resp = open_url(uri, data=data,
+ headers=req_headers, method="DELETE",
+ url_username=username, url_password=password,
+ force_basic_auth=basic_auth, validate_certs=False,
+ follow_redirects='all',
+ use_proxy=True, timeout=self.timeout)
+ except HTTPError as e:
+ msg = self._get_extended_message(e)
+ return {'ret': False,
+ 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'"
+ % (e.code, uri, msg),
+ 'status': e.code}
+ except URLError as e:
+ return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'"
+ % (uri, e.reason)}
+ # Almost all errors should be caught above, but just in case
+ except Exception as e:
+ return {'ret': False,
+ 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))}
+ return {'ret': True, 'resp': resp}
+
+ @staticmethod
+ def _get_extended_message(error):
+ """
+ Get Redfish ExtendedInfo message from response payload if present
+ :param error: an HTTPError exception
+ :type error: HTTPError
+ :return: the ExtendedInfo message if present, else standard HTTP error
+ """
+ msg = http_client.responses.get(error.code, '')
+ if error.code >= 400:
+ try:
+ body = error.read().decode('utf-8')
+ data = json.loads(body)
+ ext_info = data['error']['@Message.ExtendedInfo']
+ # if the ExtendedInfo contains a user friendly message send it
+ # otherwise try to send the entire contents of ExtendedInfo
+ try:
+ msg = ext_info[0]['Message']
+ except Exception:
+ msg = str(data['error']['@Message.ExtendedInfo'])
+ except Exception:
+ pass
+ return msg
+
+ def _init_session(self):
+ pass
+
+ def _get_vendor(self):
+ # If we got the vendor info once, don't get it again
+ if self._vendor is not None:
+ return {'ret': 'True', 'Vendor': self._vendor}
+
+ # Find the vendor info from the service root
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return {'ret': False, 'Vendor': ''}
+ data = response['data']
+
+ if 'Vendor' in data:
+ # Extract the vendor string from the Vendor property
+ self._vendor = data["Vendor"]
+ return {'ret': True, 'Vendor': data["Vendor"]}
+ elif 'Oem' in data and len(data['Oem']) > 0:
+ # Determine the vendor from the OEM object if needed
+ vendor = list(data['Oem'].keys())[0]
+ if vendor == 'Hpe' or vendor == 'Hp':
+ # HPE uses Pascal-casing for their OEM object
+ # Older systems reported 'Hp' (pre-split)
+ vendor = 'HPE'
+ self._vendor = vendor
+ return {'ret': True, 'Vendor': vendor}
+ else:
+ # Could not determine; use an empty string
+ self._vendor = ''
+ return {'ret': True, 'Vendor': ''}
+
+ def _find_accountservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'AccountService' not in data:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+ else:
+ account_service = data["AccountService"]["@odata.id"]
+ response = self.get_request(self.root_uri + account_service)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ accounts = data['Accounts']['@odata.id']
+ if accounts[-1:] == '/':
+ accounts = accounts[:-1]
+ self.accounts_uri = accounts
+ return {'ret': True}
+
+ def _find_sessionservice_resource(self):
+ # Get the service root
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Check for the session service and session collection. Well-known
+ # defaults are provided in the constructor, but services that predate
+ # Redfish 1.6.0 might contain different values.
+ self.session_service_uri = data.get('SessionService', {}).get('@odata.id')
+ self.sessions_uri = data.get('Links', {}).get('Sessions', {}).get('@odata.id')
+
+ # If one isn't found, return an error
+ if self.session_service_uri is None:
+ return {'ret': False, 'msg': "SessionService resource not found"}
+ if self.sessions_uri is None:
+ return {'ret': False, 'msg': "SessionCollection resource not found"}
+ return {'ret': True}
+
+ def _get_resource_uri_by_id(self, uris, id_prop):
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ if id_prop == data.get('Id'):
+ return uri
+ return None
+
+ def _find_systems_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Systems' not in data:
+ return {'ret': False, 'msg': "Systems resource not found"}
+ response = self.get_request(self.root_uri + data['Systems']['@odata.id'])
+ if response['ret'] is False:
+ return response
+ self.systems_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.systems_uris:
+ return {
+ 'ret': False,
+ 'msg': "ComputerSystem's Members array is either empty or missing"}
+ self.systems_uri = self.systems_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.systems_uri = self._get_resource_uri_by_id(self.systems_uris,
+ self.resource_id)
+ if not self.systems_uri:
+ return {
+ 'ret': False,
+ 'msg': "System resource %s not found" % self.resource_id}
+ elif len(self.systems_uris) > 1:
+ self.module.fail_json(msg=FAIL_MSG % {'resource': 'System'})
+ return {'ret': True}
+
+ def _find_updateservice_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'UpdateService' not in data:
+ return {'ret': False, 'msg': "UpdateService resource not found"}
+ else:
+ update = data["UpdateService"]["@odata.id"]
+ self.update_uri = update
+ response = self.get_request(self.root_uri + update)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ self.firmware_uri = self.software_uri = None
+ if 'FirmwareInventory' in data:
+ self.firmware_uri = data['FirmwareInventory'][u'@odata.id']
+ if 'SoftwareInventory' in data:
+ self.software_uri = data['SoftwareInventory'][u'@odata.id']
+ return {'ret': True}
+
+ def _find_chassis_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Chassis' not in data:
+ return {'ret': False, 'msg': "Chassis resource not found"}
+ chassis = data["Chassis"]["@odata.id"]
+ response = self.get_request(self.root_uri + chassis)
+ if response['ret'] is False:
+ return response
+ self.chassis_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.chassis_uris:
+ return {'ret': False,
+ 'msg': "Chassis Members array is either empty or missing"}
+ self.chassis_uri = self.chassis_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.chassis_uri = self._get_resource_uri_by_id(self.chassis_uris,
+ self.resource_id)
+ if not self.chassis_uri:
+ return {
+ 'ret': False,
+ 'msg': "Chassis resource %s not found" % self.resource_id}
+ elif len(self.chassis_uris) > 1:
+ self.module.fail_json(msg=FAIL_MSG % {'resource': 'Chassis'})
+ return {'ret': True}
+
+ def _find_managers_resource(self):
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Managers' not in data:
+ return {'ret': False, 'msg': "Manager resource not found"}
+ manager = data["Managers"]["@odata.id"]
+ response = self.get_request(self.root_uri + manager)
+ if response['ret'] is False:
+ return response
+ self.manager_uris = [
+ i['@odata.id'] for i in response['data'].get('Members', [])]
+ if not self.manager_uris:
+ return {'ret': False,
+ 'msg': "Managers Members array is either empty or missing"}
+ self.manager_uri = self.manager_uris[0]
+ if self.data_modification:
+ if self.resource_id:
+ self.manager_uri = self._get_resource_uri_by_id(self.manager_uris,
+ self.resource_id)
+ if not self.manager_uri:
+ return {
+ 'ret': False,
+ 'msg': "Manager resource %s not found" % self.resource_id}
+ elif len(self.manager_uris) > 1:
+ self.module.fail_json(msg=FAIL_MSG % {'resource': 'Manager'})
+ return {'ret': True}
+
+ def _get_all_action_info_values(self, action):
+ """Retrieve all parameter values for an Action from ActionInfo.
+ Fall back to AllowableValue annotations if no ActionInfo found.
+ Return the result in an ActionInfo-like dictionary, keyed
+ by the name of the parameter. """
+ ai = {}
+ if '@Redfish.ActionInfo' in action:
+ ai_uri = action['@Redfish.ActionInfo']
+ response = self.get_request(self.root_uri + ai_uri)
+ if response['ret'] is True:
+ data = response['data']
+ if 'Parameters' in data:
+ params = data['Parameters']
+ ai = dict((p['Name'], p)
+ for p in params if 'Name' in p)
+ if not ai:
+ ai = dict((k[:-24],
+ {'AllowableValues': v}) for k, v in action.items()
+ if k.endswith('@Redfish.AllowableValues'))
+ return ai
+
+ def _get_allowable_values(self, action, name, default_values=None):
+ if default_values is None:
+ default_values = []
+ ai = self._get_all_action_info_values(action)
+ allowable_values = ai.get(name, {}).get('AllowableValues')
+ # fallback to default values
+ if allowable_values is None:
+ allowable_values = default_values
+ return allowable_values
+
+ def get_logs(self):
+ log_svcs_uri_list = []
+ list_of_logs = []
+ properties = ['Severity', 'Created', 'EntryType', 'OemRecordFormat',
+ 'Message', 'MessageId', 'MessageArgs']
+
+ # Find LogService
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'LogServices' not in data:
+ return {'ret': False, 'msg': "LogServices resource not found"}
+
+ # Find all entries in LogServices
+ logs_uri = data["LogServices"]["@odata.id"]
+ response = self.get_request(self.root_uri + logs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ for log_svcs_entry in data.get('Members', []):
+ response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id'])
+ if response['ret'] is False:
+ return response
+ _data = response['data']
+ if 'Entries' in _data:
+ log_svcs_uri_list.append(_data['Entries'][u'@odata.id'])
+
+ # For each entry in LogServices, get log name and all log entries
+ for log_svcs_uri in log_svcs_uri_list:
+ logs = {}
+ list_of_log_entries = []
+ response = self.get_request(self.root_uri + log_svcs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ logs['Description'] = data.get('Description',
+ 'Collection of log entries')
+ # Get all log entries for each type of log found
+ for logEntry in data.get('Members', []):
+ entry = {}
+ for prop in properties:
+ if prop in logEntry:
+ entry[prop] = logEntry.get(prop)
+ if entry:
+ list_of_log_entries.append(entry)
+ log_name = log_svcs_uri.split('/')[-1]
+ logs[log_name] = list_of_log_entries
+ list_of_logs.append(logs)
+
+ # list_of_logs[logs{list_of_log_entries[entry{}]}]
+ return {'ret': True, 'entries': list_of_logs}
+
+ def clear_logs(self):
+ # Find LogService
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'LogServices' not in data:
+ return {'ret': False, 'msg': "LogServices resource not found"}
+
+ # Find all entries in LogServices
+ logs_uri = data["LogServices"]["@odata.id"]
+ response = self.get_request(self.root_uri + logs_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for log_svcs_entry in data[u'Members']:
+ response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"])
+ if response['ret'] is False:
+ return response
+ _data = response['data']
+ # Check to make sure option is available, otherwise error is ugly
+ if "Actions" in _data:
+ if "#LogService.ClearLog" in _data[u"Actions"]:
+ self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {})
+ if response['ret'] is False:
+ return response
+ return {'ret': True}
+
+ def aggregate(self, func, uri_list, uri_name):
+ ret = True
+ entries = []
+ for uri in uri_list:
+ inventory = func(uri)
+ ret = inventory.pop('ret') and ret
+ if 'entries' in inventory:
+ entries.append(({uri_name: uri},
+ inventory['entries']))
+ return dict(ret=ret, entries=entries)
+
+ def aggregate_chassis(self, func):
+ return self.aggregate(func, self.chassis_uris, 'chassis_uri')
+
+ def aggregate_managers(self, func):
+ return self.aggregate(func, self.manager_uris, 'manager_uri')
+
+ def aggregate_systems(self, func):
+ return self.aggregate(func, self.systems_uris, 'system_uri')
+
+ def get_storage_controller_inventory(self, systems_uri):
+ result = {}
+ controller_list = []
+ controller_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers',
+ 'Location', 'Manufacturer', 'Model', 'Name', 'Id',
+ 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status']
+ key = "StorageControllers"
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'Storage' not in data:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data['Storage']["@odata.id"]
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Loop through Members and their StorageControllers
+ # and gather properties from each StorageController
+ if data[u'Members']:
+ for storage_member in data[u'Members']:
+ storage_member_uri = storage_member[u'@odata.id']
+ response = self.get_request(self.root_uri + storage_member_uri)
+ data = response['data']
+
+ if key in data:
+ controller_list = data[key]
+ for controller in controller_list:
+ controller_result = {}
+ for property in properties:
+ if property in controller:
+ controller_result[property] = controller[property]
+ controller_results.append(controller_result)
+ result['entries'] = controller_results
+ return result
+ else:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ def get_multi_storage_controller_inventory(self):
+ return self.aggregate_systems(self.get_storage_controller_inventory)
+
+ def get_disk_inventory(self, systems_uri):
+ result = {'entries': []}
+ controller_list = []
+ # Get these entries, but does not fail if not found
+ properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes',
+ 'EncryptionAbility', 'EncryptionStatus',
+ 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers',
+ 'Manufacturer', 'MediaType', 'Model', 'Name',
+ 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision',
+ 'RotationSpeedRPM', 'SerialNumber', 'Status']
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'SimpleStorage' not in data and 'Storage' not in data:
+ return {'ret': False, 'msg': "SimpleStorage and Storage resource \
+ not found"}
+
+ if 'Storage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data[u'Storage'][u'@odata.id']
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data[u'Members']:
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ controller_name = 'Controller 1'
+ if 'StorageControllers' in data:
+ sc = data['StorageControllers']
+ if sc:
+ if 'Name' in sc[0]:
+ controller_name = sc[0]['Name']
+ else:
+ sc_id = sc[0].get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ drive_results = []
+ if 'Drives' in data:
+ for device in data[u'Drives']:
+ disk_uri = self.root_uri + device[u'@odata.id']
+ response = self.get_request(disk_uri)
+ data = response['data']
+
+ drive_result = {}
+ for property in properties:
+ if property in data:
+ if data[property] is not None:
+ drive_result[property] = data[property]
+ drive_results.append(drive_result)
+ drives = {'Controller': controller_name,
+ 'Drives': drive_results}
+ result["entries"].append(drives)
+
+ if 'SimpleStorage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data["SimpleStorage"]["@odata.id"]
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Name' in data:
+ controller_name = data['Name']
+ else:
+ sc_id = data.get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ drive_results = []
+ for device in data[u'Devices']:
+ drive_result = {}
+ for property in properties:
+ if property in device:
+ drive_result[property] = device[property]
+ drive_results.append(drive_result)
+ drives = {'Controller': controller_name,
+ 'Drives': drive_results}
+ result["entries"].append(drives)
+
+ return result
+
+ def get_multi_disk_inventory(self):
+ return self.aggregate_systems(self.get_disk_inventory)
+
+ def get_volume_inventory(self, systems_uri):
+ result = {'entries': []}
+ controller_list = []
+ volume_list = []
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'RAIDType', 'VolumeType', 'BlockSizeBytes',
+ 'Capacity', 'CapacityBytes', 'CapacitySources',
+ 'Encrypted', 'EncryptionTypes', 'Identifiers',
+ 'Operations', 'OptimumIOSizeBytes', 'AccessCapabilities',
+ 'AllocatedPools', 'Status']
+
+ # Find Storage service
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if 'SimpleStorage' not in data and 'Storage' not in data:
+ return {'ret': False, 'msg': "SimpleStorage and Storage resource \
+ not found"}
+
+ if 'Storage' in data:
+ # Get a list of all storage controllers and build respective URIs
+ storage_uri = data[u'Storage'][u'@odata.id']
+ response = self.get_request(self.root_uri + storage_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if data.get('Members'):
+ for controller in data[u'Members']:
+ controller_list.append(controller[u'@odata.id'])
+ for c in controller_list:
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ controller_name = 'Controller 1'
+ if 'StorageControllers' in data:
+ sc = data['StorageControllers']
+ if sc:
+ if 'Name' in sc[0]:
+ controller_name = sc[0]['Name']
+ else:
+ sc_id = sc[0].get('Id', '1')
+ controller_name = 'Controller %s' % sc_id
+ volume_results = []
+ if 'Volumes' in data:
+ # Get a list of all volumes and build respective URIs
+ volumes_uri = data[u'Volumes'][u'@odata.id']
+ response = self.get_request(self.root_uri + volumes_uri)
+ data = response['data']
+
+ if data.get('Members'):
+ for volume in data[u'Members']:
+ volume_list.append(volume[u'@odata.id'])
+ for v in volume_list:
+ uri = self.root_uri + v
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ volume_result = {}
+ for property in properties:
+ if property in data:
+ if data[property] is not None:
+ volume_result[property] = data[property]
+
+ # Get related Drives Id
+ drive_id_list = []
+ if 'Links' in data:
+ if 'Drives' in data[u'Links']:
+ for link in data[u'Links'][u'Drives']:
+ drive_id_link = link[u'@odata.id']
+ drive_id = drive_id_link.split("/")[-1]
+ drive_id_list.append({'Id': drive_id})
+ volume_result['Linked_drives'] = drive_id_list
+ volume_results.append(volume_result)
+ volumes = {'Controller': controller_name,
+ 'Volumes': volume_results}
+ result["entries"].append(volumes)
+ else:
+ return {'ret': False, 'msg': "Storage resource not found"}
+
+ return result
+
+ def get_multi_volume_inventory(self):
+ return self.aggregate_systems(self.get_volume_inventory)
+
+ def manage_system_indicator_led(self, command):
+ return self.manage_indicator_led(command, self.systems_uri)
+
+ def manage_chassis_indicator_led(self, command):
+ return self.manage_indicator_led(command, self.chassis_uri)
+
+ def manage_indicator_led(self, command, resource_uri=None):
+ # If no resource is specified; default to the Chassis resource
+ if resource_uri is None:
+ resource_uri = self.chassis_uri
+
+ # Perform a PATCH on the IndicatorLED property based on the requested command
+ payloads = {'IndicatorLedOn': 'Lit', 'IndicatorLedOff': 'Off', "IndicatorLedBlink": 'Blinking'}
+ if command not in payloads.keys():
+ return {'ret': False, 'msg': 'Invalid command (%s)' % command}
+ payload = {'IndicatorLED': payloads[command]}
+ resp = self.patch_request(self.root_uri + resource_uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Set IndicatorLED to %s' % payloads[command]
+ return resp
+
+ def _map_reset_type(self, reset_type, allowable_values):
+ equiv_types = {
+ 'On': 'ForceOn',
+ 'ForceOn': 'On',
+ 'ForceOff': 'GracefulShutdown',
+ 'GracefulShutdown': 'ForceOff',
+ 'GracefulRestart': 'ForceRestart',
+ 'ForceRestart': 'GracefulRestart'
+ }
+
+ if reset_type in allowable_values:
+ return reset_type
+ if reset_type not in equiv_types:
+ return reset_type
+ mapped_type = equiv_types[reset_type]
+ if mapped_type in allowable_values:
+ return mapped_type
+ return reset_type
+
+ def manage_system_power(self, command):
+ return self.manage_power(command, self.systems_uri,
+ '#ComputerSystem.Reset')
+
+ def manage_manager_power(self, command):
+ return self.manage_power(command, self.manager_uri,
+ '#Manager.Reset')
+
+ def manage_power(self, command, resource_uri, action_name):
+ key = "Actions"
+ reset_type_values = ['On', 'ForceOff', 'GracefulShutdown',
+ 'GracefulRestart', 'ForceRestart', 'Nmi',
+ 'ForceOn', 'PushPowerButton', 'PowerCycle']
+
+ # command should be PowerOn, PowerForceOff, etc.
+ if not command.startswith('Power'):
+ return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
+ reset_type = command[5:]
+
+ # map Reboot to a ResetType that does a reboot
+ if reset_type == 'Reboot':
+ reset_type = 'GracefulRestart'
+
+ if reset_type not in reset_type_values:
+ return {'ret': False, 'msg': 'Invalid Command (%s)' % command}
+
+ # read the resource and get the current power state
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ power_state = data.get('PowerState')
+
+ # if power is already in target state, nothing to do
+ if power_state == "On" and reset_type in ['On', 'ForceOn']:
+ return {'ret': True, 'changed': False}
+ if power_state == "Off" and reset_type in ['GracefulShutdown', 'ForceOff']:
+ return {'ret': True, 'changed': False}
+
+ # get the reset Action and target URI
+ if key not in data or action_name not in data[key]:
+ return {'ret': False, 'msg': 'Action %s not found' % action_name}
+ reset_action = data[key][action_name]
+ if 'target' not in reset_action:
+ return {'ret': False,
+ 'msg': 'target URI missing from Action %s' % action_name}
+ action_uri = reset_action['target']
+
+ # get AllowableValues
+ ai = self._get_all_action_info_values(reset_action)
+ allowable_values = ai.get('ResetType', {}).get('AllowableValues', [])
+
+ # map ResetType to an allowable value if needed
+ if reset_type not in allowable_values:
+ reset_type = self._map_reset_type(reset_type, allowable_values)
+
+ # define payload
+ payload = {'ResetType': reset_type}
+
+ # POST to Action URI
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True}
+
+ def _find_account_uri(self, username=None, acct_id=None):
+ if not any((username, acct_id)):
+ return {'ret': False, 'msg':
+ 'Must provide either account_id or account_username'}
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ headers = response['headers']
+ if username:
+ if username == data.get('UserName'):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+ if acct_id:
+ if acct_id == data.get('Id'):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+
+ return {'ret': False, 'no_match': True, 'msg':
+ 'No account with the given account_id or account_username found'}
+
+ def _find_empty_account_slot(self):
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+ if uris:
+ # first slot may be reserved, so move to end of list
+ uris += [uris.pop(0)]
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ headers = response['headers']
+ if data.get('UserName') == "" and not data.get('Enabled', True):
+ return {'ret': True, 'data': data,
+ 'headers': headers, 'uri': uri}
+
+ return {'ret': False, 'no_match': True, 'msg':
+ 'No empty account slot found'}
+
+ def list_users(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ user_list = []
+ users_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled']
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for users in data.get('Members', []):
+ user_list.append(users[u'@odata.id']) # user_list[] are URIs
+
+ # for each user, get details
+ for uri in user_list:
+ user = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ user[property] = data[property]
+
+ users_results.append(user)
+ result["entries"] = users_results
+ return result
+
+ def add_user_via_patch(self, user):
+ if user.get('account_id'):
+ # If Id slot specified, use it
+ response = self._find_account_uri(acct_id=user.get('account_id'))
+ else:
+ # Otherwise find first empty slot
+ response = self._find_empty_account_slot()
+
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ payload = {}
+ if user.get('account_username'):
+ payload['UserName'] = user.get('account_username')
+ if user.get('account_password'):
+ payload['Password'] = user.get('account_password')
+ if user.get('account_roleid'):
+ payload['RoleId'] = user.get('account_roleid')
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def add_user(self, user):
+ if not user.get('account_username'):
+ return {'ret': False, 'msg':
+ 'Must provide account_username for AddUser command'}
+
+ response = self._find_account_uri(username=user.get('account_username'))
+ if response['ret']:
+ # account_username already exists, nothing to do
+ return {'ret': True, 'changed': False}
+
+ response = self.get_request(self.root_uri + self.accounts_uri)
+ if not response['ret']:
+ return response
+ headers = response['headers']
+
+ if 'allow' in headers:
+ methods = [m.strip() for m in headers.get('allow').split(',')]
+ if 'POST' not in methods:
+ # if Allow header present and POST not listed, add via PATCH
+ return self.add_user_via_patch(user)
+
+ payload = {}
+ if user.get('account_username'):
+ payload['UserName'] = user.get('account_username')
+ if user.get('account_password'):
+ payload['Password'] = user.get('account_password')
+ if user.get('account_roleid'):
+ payload['RoleId'] = user.get('account_roleid')
+ if user.get('account_id'):
+ payload['Id'] = user.get('account_id')
+
+ response = self.post_request(self.root_uri + self.accounts_uri, payload)
+ if not response['ret']:
+ if response.get('status') == 405:
+ # if POST returned a 405, try to add via PATCH
+ return self.add_user_via_patch(user)
+ else:
+ return response
+ return {'ret': True}
+
+ def enable_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+
+ payload = {'Enabled': True}
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def delete_user_via_patch(self, user, uri=None, data=None):
+ if not uri:
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+ uri = response['uri']
+ data = response['data']
+
+ payload = {'UserName': ''}
+ if data.get('Enabled', False):
+ payload['Enabled'] = False
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def delete_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ if response.get('no_match'):
+ # account does not exist, nothing to do
+ return {'ret': True, 'changed': False}
+ else:
+ # some error encountered
+ return response
+
+ uri = response['uri']
+ headers = response['headers']
+ data = response['data']
+
+ if 'allow' in headers:
+ methods = [m.strip() for m in headers.get('allow').split(',')]
+ if 'DELETE' not in methods:
+ # if Allow header present and DELETE not listed, del via PATCH
+ return self.delete_user_via_patch(user, uri=uri, data=data)
+
+ response = self.delete_request(self.root_uri + uri)
+ if not response['ret']:
+ if response.get('status') == 405:
+ # if DELETE returned a 405, try to delete via PATCH
+ return self.delete_user_via_patch(user, uri=uri, data=data)
+ else:
+ return response
+ return {'ret': True}
+
+ def disable_user(self, user):
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+
+ uri = response['uri']
+ payload = {'Enabled': False}
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def update_user_role(self, user):
+ if not user.get('account_roleid'):
+ return {'ret': False, 'msg':
+ 'Must provide account_roleid for UpdateUserRole command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+
+ uri = response['uri']
+ payload = {'RoleId': user['account_roleid']}
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def update_user_password(self, user):
+ if not user.get('account_password'):
+ return {'ret': False, 'msg':
+ 'Must provide account_password for UpdateUserPassword command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+
+ uri = response['uri']
+ payload = {'Password': user['account_password']}
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def update_user_name(self, user):
+ if not user.get('account_updatename'):
+ return {'ret': False, 'msg':
+ 'Must provide account_updatename for UpdateUserName command'}
+
+ response = self._find_account_uri(username=user.get('account_username'),
+ acct_id=user.get('account_id'))
+ if not response['ret']:
+ return response
+
+ uri = response['uri']
+ payload = {'UserName': user['account_updatename']}
+ return self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+
+ def update_accountservice_properties(self, user):
+ account_properties = user.get('account_properties')
+ if account_properties is None:
+ return {'ret': False, 'msg':
+ 'Must provide account_properties for UpdateAccountServiceProperties command'}
+
+ # Find the AccountService resource
+ response = self.get_request(self.root_uri + self.service_root)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ accountservice_uri = data.get("AccountService", {}).get("@odata.id")
+ if accountservice_uri is None:
+ return {'ret': False, 'msg': "AccountService resource not found"}
+
+ # Perform a PATCH on the AccountService resource with the requested properties
+ resp = self.patch_request(self.root_uri + accountservice_uri, account_properties, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Modified account service'
+ return resp
+
+ def get_sessions(self):
+ result = {}
+ # listing all users has always been slower than other operations, why?
+ session_list = []
+ sessions_results = []
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'Id', 'Name', 'UserName']
+
+ response = self.get_request(self.root_uri + self.sessions_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for sessions in data[u'Members']:
+ session_list.append(sessions[u'@odata.id']) # session_list[] are URIs
+
+ # for each session, get details
+ for uri in session_list:
+ session = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ session[property] = data[property]
+
+ sessions_results.append(session)
+ result["entries"] = sessions_results
+ return result
+
+ def clear_sessions(self):
+ response = self.get_request(self.root_uri + self.sessions_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # if no active sessions, return as success
+ if data['Members@odata.count'] == 0:
+ return {'ret': True, 'changed': False, 'msg': "There are no active sessions"}
+
+ # loop to delete every active session
+ for session in data[u'Members']:
+ response = self.delete_request(self.root_uri + session[u'@odata.id'])
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True, 'msg': "Cleared all sessions successfully"}
+
+ def create_session(self):
+ if not self.creds.get('user') or not self.creds.get('pswd'):
+ return {'ret': False, 'msg':
+ 'Must provide the username and password parameters for '
+ 'the CreateSession command'}
+
+ payload = {
+ 'UserName': self.creds['user'],
+ 'Password': self.creds['pswd']
+ }
+ response = self.post_request(self.root_uri + self.sessions_uri, payload)
+ if response['ret'] is False:
+ return response
+
+ headers = response['headers']
+ if 'x-auth-token' not in headers:
+ return {'ret': False, 'msg':
+ 'The service did not return the X-Auth-Token header in '
+ 'the response from the Sessions collection POST'}
+
+ if 'location' not in headers:
+ self.module.warn(
+ 'The service did not return the Location header for the '
+ 'session URL in the response from the Sessions collection '
+ 'POST')
+ session_uri = None
+ else:
+ session_uri = urlparse(headers.get('location')).path
+
+ session = dict()
+ session['token'] = headers.get('x-auth-token')
+ session['uri'] = session_uri
+ return {'ret': True, 'changed': True, 'session': session,
+ 'msg': 'Session created successfully'}
+
+ def delete_session(self, session_uri):
+ if not session_uri:
+ return {'ret': False, 'msg':
+ 'Must provide the session_uri parameter for the '
+ 'DeleteSession command'}
+
+ response = self.delete_request(self.root_uri + session_uri)
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True,
+ 'msg': 'Session deleted successfully'}
+
+ def get_firmware_update_capabilities(self):
+ result = {}
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+
+ result['entries'] = {}
+
+ data = response['data']
+
+ if "Actions" in data:
+ actions = data['Actions']
+ if len(actions) > 0:
+ for key in actions.keys():
+ action = actions.get(key)
+ if 'title' in action:
+ title = action['title']
+ else:
+ title = key
+ result['entries'][title] = action.get('TransferProtocol@Redfish.AllowableValues',
+ ["Key TransferProtocol@Redfish.AllowableValues not found"])
+ else:
+ return {'ret': "False", 'msg': "Actions list is empty."}
+ else:
+ return {'ret': "False", 'msg': "Key Actions not found."}
+ return result
+
+ def _software_inventory(self, uri):
+ result = {}
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ result['entries'] = []
+ for member in data[u'Members']:
+ uri = self.root_uri + member[u'@odata.id']
+ # Get details for each software or firmware member
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ software = {}
+ # Get these standard properties if present
+ for key in ['Name', 'Id', 'Status', 'Version', 'Updateable',
+ 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer',
+ 'ReleaseDate']:
+ if key in data:
+ software[key] = data.get(key)
+ result['entries'].append(software)
+ return result
+
+ def get_firmware_inventory(self):
+ if self.firmware_uri is None:
+ return {'ret': False, 'msg': 'No FirmwareInventory resource found'}
+ else:
+ return self._software_inventory(self.firmware_uri)
+
+ def get_software_inventory(self):
+ if self.software_uri is None:
+ return {'ret': False, 'msg': 'No SoftwareInventory resource found'}
+ else:
+ return self._software_inventory(self.software_uri)
+
+ def _operation_results(self, response, data, handle=None):
+ """
+ Builds the results for an operation from task, job, or action response.
+
+ :param response: HTTP response object
+ :param data: HTTP response data
+ :param handle: The task or job handle that was last used
+ :return: dict containing operation results
+ """
+
+ operation_results = {'status': None, 'messages': [], 'handle': None, 'ret': True,
+ 'resets_requested': []}
+
+ if response.status == 204:
+ # No content; successful, but nothing to return
+ # Use the Redfish "Completed" enum from TaskState for the operation status
+ operation_results['status'] = 'Completed'
+ else:
+ # Parse the response body for details
+
+ # Determine the next handle, if any
+ operation_results['handle'] = handle
+ if response.status == 202:
+ # Task generated; get the task monitor URI
+ operation_results['handle'] = response.getheader('Location', handle)
+
+ # Pull out the status and messages based on the body format
+ if data is not None:
+ response_type = data.get('@odata.type', '')
+ if response_type.startswith('#Task.') or response_type.startswith('#Job.'):
+ # Task and Job have similar enough structures to treat the same
+ operation_results['status'] = data.get('TaskState', data.get('JobState'))
+ operation_results['messages'] = data.get('Messages', [])
+ else:
+ # Error response body, which is a bit of a misnomer since it's used in successful action responses
+ operation_results['status'] = 'Completed'
+ if response.status >= 400:
+ operation_results['status'] = 'Exception'
+ operation_results['messages'] = data.get('error', {}).get('@Message.ExtendedInfo', [])
+ else:
+ # No response body (or malformed); build based on status code
+ operation_results['status'] = 'Completed'
+ if response.status == 202:
+ operation_results['status'] = 'New'
+ elif response.status >= 400:
+ operation_results['status'] = 'Exception'
+
+ # Clear out the handle if the operation is complete
+ if operation_results['status'] in ['Completed', 'Cancelled', 'Exception', 'Killed']:
+ operation_results['handle'] = None
+
+ # Scan the messages to see if next steps are needed
+ for message in operation_results['messages']:
+ message_id = message['MessageId']
+
+ if message_id.startswith('Update.1.') and message_id.endswith('.OperationTransitionedToJob'):
+ # Operation rerouted to a job; update the status and handle
+ operation_results['status'] = 'New'
+ operation_results['handle'] = message['MessageArgs'][0]
+ operation_results['resets_requested'] = []
+ # No need to process other messages in this case
+ break
+
+ if message_id.startswith('Base.1.') and message_id.endswith('.ResetRequired'):
+ # A reset to some device is needed to continue the update
+ reset = {'uri': message['MessageArgs'][0], 'type': message['MessageArgs'][1]}
+ operation_results['resets_requested'].append(reset)
+
+ return operation_results
+
+ def simple_update(self, update_opts):
+ image_uri = update_opts.get('update_image_uri')
+ protocol = update_opts.get('update_protocol')
+ targets = update_opts.get('update_targets')
+ creds = update_opts.get('update_creds')
+ apply_time = update_opts.get('update_apply_time')
+
+ if not image_uri:
+ return {'ret': False, 'msg':
+ 'Must specify update_image_uri for the SimpleUpdate command'}
+
+ response = self.get_request(self.root_uri + self.update_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Actions' not in data:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ if '#UpdateService.SimpleUpdate' not in data['Actions']:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ action = data['Actions']['#UpdateService.SimpleUpdate']
+ if 'target' not in action:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ update_uri = action['target']
+ if protocol:
+ default_values = ['CIFS', 'FTP', 'SFTP', 'HTTP', 'HTTPS', 'NSF',
+ 'SCP', 'TFTP', 'OEM', 'NFS']
+ allowable_values = self._get_allowable_values(action,
+ 'TransferProtocol',
+ default_values)
+ if protocol not in allowable_values:
+ return {'ret': False,
+ 'msg': 'Specified update_protocol (%s) not supported '
+ 'by service. Supported protocols: %s' %
+ (protocol, allowable_values)}
+ if targets:
+ allowable_values = self._get_allowable_values(action, 'Targets')
+ if allowable_values:
+ for target in targets:
+ if target not in allowable_values:
+ return {'ret': False,
+ 'msg': 'Specified target (%s) not supported '
+ 'by service. Supported targets: %s' %
+ (target, allowable_values)}
+
+ payload = {
+ 'ImageURI': image_uri
+ }
+ if protocol:
+ payload["TransferProtocol"] = protocol
+ if targets:
+ payload["Targets"] = targets
+ if creds:
+ if creds.get('username'):
+ payload["Username"] = creds.get('username')
+ if creds.get('password'):
+ payload["Password"] = creds.get('password')
+ if apply_time:
+ payload["@Redfish.OperationApplyTime"] = apply_time
+ response = self.post_request(self.root_uri + update_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "SimpleUpdate requested",
+ 'update_status': self._operation_results(response['resp'], response['data'])}
+
+ def get_update_status(self, update_handle):
+ """
+ Gets the status of an update operation.
+
+ :param handle: The task or job handle tracking the update
+ :return: dict containing the response of the update status
+ """
+
+ if not update_handle:
+ return {'ret': False, 'msg': 'Must provide a handle tracking the update.'}
+
+ # Get the task or job tracking the update
+ response = self.get_request(self.root_uri + update_handle)
+ if response['ret'] is False:
+ return response
+
+ # Inspect the response to build the update status
+ return self._operation_results(response['resp'], response['data'], update_handle)
+
+ def perform_requested_update_operations(self, update_handle):
+ """
+ Performs requested operations to allow the update to continue.
+
+ :param handle: The task or job handle tracking the update
+ :return: dict containing the result of the operations
+ """
+
+ # Get the current update status
+ update_status = self.get_update_status(update_handle)
+ if update_status['ret'] is False:
+ return update_status
+
+ changed = False
+
+ # Perform any requested updates
+ for reset in update_status['resets_requested']:
+ resp = self.post_request(self.root_uri + reset['uri'], {'ResetType': reset['type']})
+ if resp['ret'] is False:
+ # Override the 'changed' indicator since other resets may have
+ # been successful
+ resp['changed'] = changed
+ return resp
+ changed = True
+
+ msg = 'No operations required for the update'
+ if changed:
+ # Will need to consider finetuning this message if the scope of the
+ # requested operations grow over time
+ msg = 'One or more components reset to continue the update'
+ return {'ret': True, 'changed': changed, 'msg': msg}
+
+ def get_bios_attributes(self, systems_uri):
+ result = {}
+ bios_attributes = {}
+ key = "Bios"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ for attribute in data[u'Attributes'].items():
+ bios_attributes[attribute[0]] = attribute[1]
+ result["entries"] = bios_attributes
+ return result
+
+ def get_multi_bios_attributes(self):
+ return self.aggregate_systems(self.get_bios_attributes)
+
+ def _get_boot_options_dict(self, boot):
+ # Get these entries from BootOption, if present
+ properties = ['DisplayName', 'BootOptionReference']
+
+ # Retrieve BootOptions if present
+ if 'BootOptions' in boot and '@odata.id' in boot['BootOptions']:
+ boot_options_uri = boot['BootOptions']["@odata.id"]
+ # Get BootOptions resource
+ response = self.get_request(self.root_uri + boot_options_uri)
+ if response['ret'] is False:
+ return {}
+ data = response['data']
+
+ # Retrieve Members array
+ if 'Members' not in data:
+ return {}
+ members = data['Members']
+ else:
+ members = []
+
+ # Build dict of BootOptions keyed by BootOptionReference
+ boot_options_dict = {}
+ for member in members:
+ if '@odata.id' not in member:
+ return {}
+ boot_option_uri = member['@odata.id']
+ response = self.get_request(self.root_uri + boot_option_uri)
+ if response['ret'] is False:
+ return {}
+ data = response['data']
+ if 'BootOptionReference' not in data:
+ return {}
+ boot_option_ref = data['BootOptionReference']
+
+ # fetch the props to display for this boot device
+ boot_props = {}
+ for prop in properties:
+ if prop in data:
+ boot_props[prop] = data[prop]
+
+ boot_options_dict[boot_option_ref] = boot_props
+
+ return boot_options_dict
+
+ def get_boot_order(self, systems_uri):
+ result = {}
+
+ # Retrieve System resource
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Confirm needed Boot properties are present
+ if 'Boot' not in data or 'BootOrder' not in data['Boot']:
+ return {'ret': False, 'msg': "Key BootOrder not found"}
+
+ boot = data['Boot']
+ boot_order = boot['BootOrder']
+ boot_options_dict = self._get_boot_options_dict(boot)
+
+ # Build boot device list
+ boot_device_list = []
+ for ref in boot_order:
+ boot_device_list.append(
+ boot_options_dict.get(ref, {'BootOptionReference': ref}))
+
+ result["entries"] = boot_device_list
+ return result
+
+ def get_multi_boot_order(self):
+ return self.aggregate_systems(self.get_boot_order)
+
+ def get_boot_override(self, systems_uri):
+ result = {}
+
+ properties = ["BootSourceOverrideEnabled", "BootSourceOverrideTarget",
+ "BootSourceOverrideMode", "UefiTargetBootSourceOverride", "BootSourceOverrideTarget@Redfish.AllowableValues"]
+
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if 'Boot' not in data:
+ return {'ret': False, 'msg': "Key Boot not found"}
+
+ boot = data['Boot']
+
+ boot_overrides = {}
+ if "BootSourceOverrideEnabled" in boot:
+ if boot["BootSourceOverrideEnabled"] is not False:
+ for property in properties:
+ if property in boot:
+ if boot[property] is not None:
+ boot_overrides[property] = boot[property]
+ else:
+ return {'ret': False, 'msg': "No boot override is enabled."}
+
+ result['entries'] = boot_overrides
+ return result
+
+ def get_multi_boot_override(self):
+ return self.aggregate_systems(self.get_boot_override)
+
+ def set_bios_default_settings(self):
+ # Find the Bios resource from the requested ComputerSystem resource
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ bios_uri = data.get('Bios', {}).get('@odata.id')
+ if bios_uri is None:
+ return {'ret': False, 'msg': 'Bios resource not found'}
+
+ # Find the URI of the ResetBios action
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ reset_bios_uri = data.get('Actions', {}).get('#Bios.ResetBios', {}).get('target')
+ if reset_bios_uri is None:
+ return {'ret': False, 'msg': 'ResetBios action not found'}
+
+ # Perform the ResetBios action
+ response = self.post_request(self.root_uri + reset_bios_uri, {})
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "BIOS set to default settings"}
+
+ def set_boot_override(self, boot_opts):
+ # Extract the requested boot override options
+ bootdevice = boot_opts.get('bootdevice')
+ uefi_target = boot_opts.get('uefi_target')
+ boot_next = boot_opts.get('boot_next')
+ override_enabled = boot_opts.get('override_enabled')
+ boot_override_mode = boot_opts.get('boot_override_mode')
+ if not bootdevice and override_enabled != 'Disabled':
+ return {'ret': False,
+ 'msg': "bootdevice option required for temporary boot override"}
+
+ # Get the current boot override options from the Boot property
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ boot = data.get('Boot')
+ if boot is None:
+ return {'ret': False, 'msg': "Boot property not found"}
+ cur_override_mode = boot.get('BootSourceOverrideMode')
+
+ # Check if the requested target is supported by the system
+ if override_enabled != 'Disabled':
+ annotation = 'BootSourceOverrideTarget@Redfish.AllowableValues'
+ if annotation in boot:
+ allowable_values = boot[annotation]
+ if isinstance(allowable_values, list) and bootdevice not in allowable_values:
+ return {'ret': False,
+ 'msg': "Boot device %s not in list of allowable values (%s)" %
+ (bootdevice, allowable_values)}
+
+ # Build the request payload based on the desired boot override options
+ if override_enabled == 'Disabled':
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': 'None'
+ }
+ }
+ elif bootdevice == 'UefiTarget':
+ if not uefi_target:
+ return {'ret': False,
+ 'msg': "uefi_target option required to SetOneTimeBoot for UefiTarget"}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice,
+ 'UefiTargetBootSourceOverride': uefi_target
+ }
+ }
+ # If needed, also specify UEFI mode
+ if cur_override_mode == 'Legacy':
+ payload['Boot']['BootSourceOverrideMode'] = 'UEFI'
+ elif bootdevice == 'UefiBootNext':
+ if not boot_next:
+ return {'ret': False,
+ 'msg': "boot_next option required to SetOneTimeBoot for UefiBootNext"}
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice,
+ 'BootNext': boot_next
+ }
+ }
+ # If needed, also specify UEFI mode
+ if cur_override_mode == 'Legacy':
+ payload['Boot']['BootSourceOverrideMode'] = 'UEFI'
+ else:
+ payload = {
+ 'Boot': {
+ 'BootSourceOverrideEnabled': override_enabled,
+ 'BootSourceOverrideTarget': bootdevice
+ }
+ }
+ if boot_override_mode:
+ payload['Boot']['BootSourceOverrideMode'] = boot_override_mode
+
+ # Apply the requested boot override request
+ resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True)
+ if resp['ret'] is False:
+ # WORKAROUND
+ # Older Dell systems do not allow BootSourceOverrideEnabled to be
+ # specified with UefiTarget as the target device
+ vendor = self._get_vendor()['Vendor']
+ if vendor == 'Dell':
+ if bootdevice == 'UefiTarget' and override_enabled != 'Disabled':
+ payload['Boot'].pop('BootSourceOverrideEnabled', None)
+ resp = self.patch_request(self.root_uri + self.systems_uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Updated the boot override settings'
+ return resp
+
+ def set_bios_attributes(self, attributes):
+ # Find the Bios resource from the requested ComputerSystem resource
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ bios_uri = data.get('Bios', {}).get('@odata.id')
+ if bios_uri is None:
+ return {'ret': False, 'msg': 'Bios resource not found'}
+
+ # Get the current BIOS settings
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Make a copy of the attributes dict
+ attrs_to_patch = dict(attributes)
+ # List to hold attributes not found
+ attrs_bad = {}
+
+ # Check the attributes
+ for attr_name, attr_value in attributes.items():
+ # Check if attribute exists
+ if attr_name not in data[u'Attributes']:
+ # Remove and proceed to next attribute if this isn't valid
+ attrs_bad.update({attr_name: attr_value})
+ del attrs_to_patch[attr_name]
+ continue
+
+ # If already set to requested value, remove it from PATCH payload
+ if data[u'Attributes'][attr_name] == attributes[attr_name]:
+ del attrs_to_patch[attr_name]
+
+ warning = ""
+ if attrs_bad:
+ warning = "Unsupported attributes %s" % (attrs_bad)
+
+ # Return success w/ changed=False if no attrs need to be changed
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "BIOS attributes already set",
+ 'warning': warning}
+
+ # Get the SettingsObject URI to apply the attributes
+ set_bios_attr_uri = data.get("@Redfish.Settings", {}).get("SettingsObject", {}).get("@odata.id")
+ if set_bios_attr_uri is None:
+ return {'ret': False, 'msg': "Settings resource for BIOS attributes not found."}
+
+ # Construct payload and issue PATCH command
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + set_bios_attr_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "Modified BIOS attributes %s" % (attrs_to_patch),
+ 'warning': warning}
+
+ def set_boot_order(self, boot_list):
+ if not boot_list:
+ return {'ret': False,
+ 'msg': "boot_order list required for SetBootOrder command"}
+
+ systems_uri = self.systems_uri
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Confirm needed Boot properties are present
+ if 'Boot' not in data or 'BootOrder' not in data['Boot']:
+ return {'ret': False, 'msg': "Key BootOrder not found"}
+
+ boot = data['Boot']
+ boot_order = boot['BootOrder']
+ boot_options_dict = self._get_boot_options_dict(boot)
+
+ # Verify the requested boot options are valid
+ if boot_options_dict:
+ boot_option_references = boot_options_dict.keys()
+ for ref in boot_list:
+ if ref not in boot_option_references:
+ return {'ret': False,
+ 'msg': "BootOptionReference %s not found in BootOptions" % ref}
+
+ # Apply the boot order
+ payload = {
+ 'Boot': {
+ 'BootOrder': boot_list
+ }
+ }
+ resp = self.patch_request(self.root_uri + systems_uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Modified the boot order'
+ return resp
+
+ def set_default_boot_order(self):
+ systems_uri = self.systems_uri
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # get the #ComputerSystem.SetDefaultBootOrder Action and target URI
+ action = '#ComputerSystem.SetDefaultBootOrder'
+ if 'Actions' not in data or action not in data['Actions']:
+ return {'ret': False, 'msg': 'Action %s not found' % action}
+ if 'target' not in data['Actions'][action]:
+ return {'ret': False,
+ 'msg': 'target URI missing from Action %s' % action}
+ action_uri = data['Actions'][action]['target']
+
+ # POST to Action URI
+ payload = {}
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "BootOrder set to default"}
+
+ def get_chassis_inventory(self):
+ result = {}
+ chassis_results = []
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Id', 'ChassisType', 'PartNumber', 'AssetTag',
+ 'Manufacturer', 'IndicatorLED', 'SerialNumber', 'Model']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ chassis_result = {}
+ for property in properties:
+ if property in data:
+ chassis_result[property] = data[property]
+ chassis_results.append(chassis_result)
+
+ result["entries"] = chassis_results
+ return result
+
+ def get_fan_inventory(self):
+ result = {}
+ fan_results = []
+ key = "Thermal"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'FanName', 'Reading', 'ReadingUnits', 'Status']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ # match: found an entry for "Thermal" information = fans
+ thermal_uri = data[key]["@odata.id"]
+ response = self.get_request(self.root_uri + thermal_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Checking if fans are present
+ if u'Fans' in data:
+ for device in data[u'Fans']:
+ fan = {}
+ for property in properties:
+ if property in device:
+ fan[property] = device[property]
+ fan_results.append(fan)
+ else:
+ return {'ret': False, 'msg': "No Fans present"}
+ result["entries"] = fan_results
+ return result
+
+ def get_chassis_power(self):
+ result = {}
+ key = "Power"
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'PowerAllocatedWatts',
+ 'PowerAvailableWatts', 'PowerCapacityWatts',
+ 'PowerConsumedWatts', 'PowerMetrics',
+ 'PowerRequestedWatts', 'RelatedItem', 'Status']
+
+ chassis_power_results = []
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ chassis_power_result = {}
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ response = self.get_request(self.root_uri + data[key]['@odata.id'])
+ data = response['data']
+ if 'PowerControl' in data:
+ if len(data['PowerControl']) > 0:
+ data = data['PowerControl'][0]
+ for property in properties:
+ if property in data:
+ chassis_power_result[property] = data[property]
+ chassis_power_results.append(chassis_power_result)
+
+ if len(chassis_power_results) > 0:
+ result['entries'] = chassis_power_results
+ return result
+ else:
+ return {'ret': False, 'msg': 'Power information not found.'}
+
+ def get_chassis_thermals(self):
+ result = {}
+ sensors = []
+ key = "Thermal"
+
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'PhysicalContext', 'UpperThresholdCritical',
+ 'UpperThresholdFatal', 'UpperThresholdNonCritical',
+ 'LowerThresholdCritical', 'LowerThresholdFatal',
+ 'LowerThresholdNonCritical', 'MaxReadingRangeTemp',
+ 'MinReadingRangeTemp', 'ReadingCelsius', 'RelatedItem',
+ 'SensorNumber', 'Status']
+
+ # Go through list
+ for chassis_uri in self.chassis_uris:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key in data:
+ thermal_uri = data[key]["@odata.id"]
+ response = self.get_request(self.root_uri + thermal_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if "Temperatures" in data:
+ for sensor in data[u'Temperatures']:
+ sensor_result = {}
+ for property in properties:
+ if property in sensor:
+ if sensor[property] is not None:
+ sensor_result[property] = sensor[property]
+ sensors.append(sensor_result)
+
+ if sensors is None:
+ return {'ret': False, 'msg': 'Key Temperatures was not found.'}
+
+ result['entries'] = sensors
+ return result
+
+ def get_cpu_inventory(self, systems_uri):
+ result = {}
+ cpu_list = []
+ cpu_results = []
+ key = "Processors"
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz',
+ 'TotalCores', 'TotalThreads', 'Status']
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ processors_uri = data[key]["@odata.id"]
+
+ # Get a list of all CPUs and build respective URIs
+ response = self.get_request(self.root_uri + processors_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for cpu in data[u'Members']:
+ cpu_list.append(cpu[u'@odata.id'])
+
+ for c in cpu_list:
+ cpu = {}
+ uri = self.root_uri + c
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ cpu[property] = data[property]
+
+ cpu_results.append(cpu)
+ result["entries"] = cpu_results
+ return result
+
+ def get_multi_cpu_inventory(self):
+ return self.aggregate_systems(self.get_cpu_inventory)
+
+ def get_memory_inventory(self, systems_uri):
+ result = {}
+ memory_list = []
+ memory_results = []
+ key = "Memory"
+ # Get these entries, but does not fail if not found
+ properties = ['Id', 'SerialNumber', 'MemoryDeviceType', 'PartNumber',
+ 'MemoryLocation', 'RankCount', 'CapacityMiB', 'OperatingMemoryModes', 'Status', 'Manufacturer', 'Name']
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ memory_uri = data[key]["@odata.id"]
+
+ # Get a list of all DIMMs and build respective URIs
+ response = self.get_request(self.root_uri + memory_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for dimm in data[u'Members']:
+ memory_list.append(dimm[u'@odata.id'])
+
+ for m in memory_list:
+ dimm = {}
+ uri = self.root_uri + m
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ if "Status" in data:
+ if "State" in data["Status"]:
+ if data["Status"]["State"] == "Absent":
+ continue
+ else:
+ continue
+
+ for property in properties:
+ if property in data:
+ dimm[property] = data[property]
+
+ memory_results.append(dimm)
+ result["entries"] = memory_results
+ return result
+
+ def get_multi_memory_inventory(self):
+ return self.aggregate_systems(self.get_memory_inventory)
+
+ def get_nic(self, resource_uri):
+ result = {}
+ properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses',
+ 'NameServers', 'MACAddress', 'PermanentMACAddress',
+ 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status']
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ nic = {}
+ for property in properties:
+ if property in data:
+ nic[property] = data[property]
+ result['entries'] = nic
+ return result
+
+ def get_nic_inventory(self, resource_uri):
+ result = {}
+ nic_list = []
+ nic_results = []
+ key = "EthernetInterfaces"
+
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ ethernetinterfaces_uri = data[key]["@odata.id"]
+
+ # Get a list of all network controllers and build respective URIs
+ response = self.get_request(self.root_uri + ethernetinterfaces_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for nic in data[u'Members']:
+ nic_list.append(nic[u'@odata.id'])
+
+ for n in nic_list:
+ nic = self.get_nic(n)
+ if nic['ret']:
+ nic_results.append(nic['entries'])
+ result["entries"] = nic_results
+ return result
+
+ def get_multi_nic_inventory(self, resource_type):
+ ret = True
+ entries = []
+
+ # Given resource_type, use the proper URI
+ if resource_type == 'Systems':
+ resource_uris = self.systems_uris
+ elif resource_type == 'Manager':
+ resource_uris = self.manager_uris
+
+ for resource_uri in resource_uris:
+ inventory = self.get_nic_inventory(resource_uri)
+ ret = inventory.pop('ret') and ret
+ if 'entries' in inventory:
+ entries.append(({'resource_uri': resource_uri},
+ inventory['entries']))
+ return dict(ret=ret, entries=entries)
+
+ def get_virtualmedia(self, resource_uri):
+ result = {}
+ virtualmedia_list = []
+ virtualmedia_results = []
+ key = "VirtualMedia"
+ # Get these entries, but does not fail if not found
+ properties = ['Description', 'ConnectedVia', 'Id', 'MediaTypes',
+ 'Image', 'ImageName', 'Name', 'WriteProtected',
+ 'TransferMethod', 'TransferProtocolType']
+
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ virtualmedia_uri = data[key]["@odata.id"]
+
+ # Get a list of all virtual media and build respective URIs
+ response = self.get_request(self.root_uri + virtualmedia_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for virtualmedia in data[u'Members']:
+ virtualmedia_list.append(virtualmedia[u'@odata.id'])
+
+ for n in virtualmedia_list:
+ virtualmedia = {}
+ uri = self.root_uri + n
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ virtualmedia[property] = data[property]
+
+ virtualmedia_results.append(virtualmedia)
+ result["entries"] = virtualmedia_results
+ return result
+
+ def get_multi_virtualmedia(self, resource_type='Manager'):
+ ret = True
+ entries = []
+
+ # Given resource_type, use the proper URI
+ if resource_type == 'Systems':
+ resource_uris = self.systems_uris
+ elif resource_type == 'Manager':
+ resource_uris = self.manager_uris
+
+ for resource_uri in resource_uris:
+ virtualmedia = self.get_virtualmedia(resource_uri)
+ ret = virtualmedia.pop('ret') and ret
+ if 'entries' in virtualmedia:
+ entries.append(({'resource_uri': resource_uri},
+ virtualmedia['entries']))
+ return dict(ret=ret, entries=entries)
+
+ @staticmethod
+ def _find_empty_virt_media_slot(resources, media_types,
+ media_match_strict=True, vendor=''):
+ for uri, data in resources.items():
+ # check MediaTypes
+ if 'MediaTypes' in data and media_types:
+ if not set(media_types).intersection(set(data['MediaTypes'])):
+ continue
+ else:
+ if media_match_strict:
+ continue
+ # Base on current Lenovo server capability, filter out slot RDOC1/2 and Remote1/2/3/4 which are not supported to Insert/Eject.
+ if vendor == 'Lenovo' and ('RDOC' in uri or 'Remote' in uri):
+ continue
+ # if ejected, 'Inserted' should be False and 'ImageName' cleared
+ if (not data.get('Inserted', False) and
+ not data.get('ImageName')):
+ return uri, data
+ return None, None
+
+ @staticmethod
+ def _virt_media_image_inserted(resources, image_url):
+ for uri, data in resources.items():
+ if data.get('Image'):
+ if urlparse(image_url) == urlparse(data.get('Image')):
+ if data.get('Inserted', False) and data.get('ImageName'):
+ return True
+ return False
+
+ @staticmethod
+ def _find_virt_media_to_eject(resources, image_url):
+ matched_uri, matched_data = None, None
+ for uri, data in resources.items():
+ if data.get('Image'):
+ if urlparse(image_url) == urlparse(data.get('Image')):
+ matched_uri, matched_data = uri, data
+ if data.get('Inserted', True) and data.get('ImageName', 'x'):
+ return uri, data, True
+ return matched_uri, matched_data, False
+
+ def _read_virt_media_resources(self, uri_list):
+ resources = {}
+ headers = {}
+ for uri in uri_list:
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ continue
+ resources[uri] = response['data']
+ headers[uri] = response['headers']
+ return resources, headers
+
+ @staticmethod
+ def _insert_virt_media_payload(options, param_map, data, ai):
+ payload = {
+ 'Image': options.get('image_url')
+ }
+ for param, option in param_map.items():
+ if options.get(option) is not None and param in data:
+ allowable = ai.get(param, {}).get('AllowableValues', [])
+ if allowable and options.get(option) not in allowable:
+ return {'ret': False,
+ 'msg': "Value '%s' specified for option '%s' not "
+ "in list of AllowableValues %s" % (
+ options.get(option), option,
+ allowable)}
+ payload[param] = options.get(option)
+ return payload
+
+ def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False):
+ # get AllowableValues
+ ai = dict((k[:-24],
+ {'AllowableValues': v}) for k, v in data.items()
+ if k.endswith('@Redfish.AllowableValues'))
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ if 'Inserted' not in payload and not image_only:
+ # Add Inserted to the payload if needed
+ payload['Inserted'] = True
+
+ # PATCH the resource
+ resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+ if resp['ret'] is False:
+ # WORKAROUND
+ # Older HPE systems with iLO 4 and Supermicro do not support
+ # specifying Inserted or WriteProtected
+ vendor = self._get_vendor()['Vendor']
+ if vendor == 'HPE' or vendor == 'Supermicro':
+ payload.pop('Inserted', None)
+ payload.pop('WriteProtected', None)
+ resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'VirtualMedia inserted'
+ return resp
+
+ def virtual_media_insert(self, options, resource_type='Manager'):
+ param_map = {
+ 'Inserted': 'inserted',
+ 'WriteProtected': 'write_protected',
+ 'UserName': 'username',
+ 'Password': 'password',
+ 'TransferProtocolType': 'transfer_protocol_type',
+ 'TransferMethod': 'transfer_method'
+ }
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaInsert"}
+ media_types = options.get('media_types')
+
+ # locate and read the VirtualMedia resources
+ # Given resource_type, use the proper URI
+ if resource_type == 'Systems':
+ resource_uri = self.systems_uri
+ elif resource_type == 'Manager':
+ resource_uri = self.manager_uri
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # see if image already inserted; if so, nothing to do
+ if self._virt_media_image_inserted(resources, image_url):
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia '%s' already inserted" % image_url}
+
+ # find an empty slot to insert the media
+ # try first with strict media_type matching
+ vendor = self._get_vendor()['Vendor']
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=True, vendor=vendor)
+ if not uri:
+ # if not found, try without strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=False, vendor=vendor)
+ if not uri:
+ return {'ret': False,
+ 'msg': "Unable to find an available VirtualMedia resource "
+ "%s" % ('supporting ' + str(media_types)
+ if media_types else '')}
+
+ # confirm InsertMedia action found
+ if ('Actions' not in data or
+ '#VirtualMedia.InsertMedia' not in data['Actions']):
+ # try to insert via PATCH if no InsertMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.InsertMedia'}
+ return self.virtual_media_insert_via_patch(options, param_map,
+ uri, data)
+
+ # get the action property
+ action = data['Actions']['#VirtualMedia.InsertMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI missing from Action "
+ "#VirtualMedia.InsertMedia"}
+ action_uri = action['target']
+ # get ActionInfo or AllowableValues
+ ai = self._get_all_action_info_values(action)
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False and ('Inserted' in payload or 'WriteProtected' in payload):
+ # WORKAROUND
+ # Older HPE systems with iLO 4 and Supermicro do not support
+ # specifying Inserted or WriteProtected
+ vendor = self._get_vendor()['Vendor']
+ if vendor == 'HPE' or vendor == 'Supermicro':
+ payload.pop('Inserted', None)
+ payload.pop('WriteProtected', None)
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
+
+ def virtual_media_eject_via_patch(self, uri, image_only=False):
+ # construct payload
+ payload = {
+ 'Inserted': False,
+ 'Image': None
+ }
+
+ # Inserted is not writable
+ if image_only:
+ del payload['Inserted']
+
+ # PATCH resource
+ resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+ if resp['ret'] is False and 'Inserted' in payload:
+ # WORKAROUND
+ # Older HPE systems with iLO 4 and Supermicro do not support
+ # specifying Inserted
+ vendor = self._get_vendor()['Vendor']
+ if vendor == 'HPE' or vendor == 'Supermicro':
+ payload.pop('Inserted', None)
+ resp = self.patch_request(self.root_uri + uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'VirtualMedia ejected'
+ return resp
+
+ def virtual_media_eject(self, options, resource_type='Manager'):
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaEject"}
+
+ # locate and read the VirtualMedia resources
+ # Given resource_type, use the proper URI
+ if resource_type == 'Systems':
+ resource_uri = self.systems_uri
+ elif resource_type == 'Manager':
+ resource_uri = self.manager_uri
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # find the VirtualMedia resource to eject
+ uri, data, eject = self._find_virt_media_to_eject(resources, image_url)
+ if uri and eject:
+ if ('Actions' not in data or
+ '#VirtualMedia.EjectMedia' not in data['Actions']):
+ # try to eject via PATCH if no EjectMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.EjectMedia'}
+ return self.virtual_media_eject_via_patch(uri)
+ else:
+ # POST to the EjectMedia Action
+ action = data['Actions']['#VirtualMedia.EjectMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI property missing from Action "
+ "#VirtualMedia.EjectMedia"}
+ action_uri = action['target']
+ # empty payload for Eject action
+ payload = {}
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri,
+ payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia ejected"}
+ elif uri and not eject:
+ # already ejected: return success but changed=False
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia image '%s' already ejected" %
+ image_url}
+ else:
+ # return failure (no resources matching image_url found)
+ return {'ret': False, 'changed': False,
+ 'msg': "No VirtualMedia resource found with image '%s' "
+ "inserted" % image_url}
+
+ def get_psu_inventory(self):
+ result = {}
+ psu_list = []
+ psu_results = []
+ key = "PowerSupplies"
+ # Get these entries, but does not fail if not found
+ properties = ['Name', 'Model', 'SerialNumber', 'PartNumber', 'Manufacturer',
+ 'FirmwareVersion', 'PowerCapacityWatts', 'PowerSupplyType',
+ 'Status']
+
+ # Get a list of all Chassis and build URIs, then get all PowerSupplies
+ # from each Power entry in the Chassis
+ chassis_uri_list = self.chassis_uris
+ for chassis_uri in chassis_uri_list:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+ data = response['data']
+
+ if 'Power' in data:
+ power_uri = data[u'Power'][u'@odata.id']
+ else:
+ continue
+
+ response = self.get_request(self.root_uri + power_uri)
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ psu_list = data[key]
+ for psu in psu_list:
+ psu_not_present = False
+ psu_data = {}
+ for property in properties:
+ if property in psu:
+ if psu[property] is not None:
+ if property == 'Status':
+ if 'State' in psu[property]:
+ if psu[property]['State'] == 'Absent':
+ psu_not_present = True
+ psu_data[property] = psu[property]
+ if psu_not_present:
+ continue
+ psu_results.append(psu_data)
+
+ result["entries"] = psu_results
+ if not result["entries"]:
+ return {'ret': False, 'msg': "No PowerSupply objects found"}
+ return result
+
+ def get_multi_psu_inventory(self):
+ return self.aggregate_systems(self.get_psu_inventory)
+
+ def get_system_inventory(self, systems_uri):
+ result = {}
+ inventory = {}
+ # Get these entries, but does not fail if not found
+ properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer',
+ 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag',
+ 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary',
+ 'ProcessorSummary', 'TrustedModules', 'Name', 'Id']
+
+ response = self.get_request(self.root_uri + systems_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ inventory[property] = data[property]
+
+ result["entries"] = inventory
+ return result
+
+ def get_multi_system_inventory(self):
+ return self.aggregate_systems(self.get_system_inventory)
+
+ def get_network_protocols(self):
+ result = {}
+ service_result = {}
+ # Find NetworkProtocol
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'NetworkProtocol' not in data:
+ return {'ret': False, 'msg': "NetworkProtocol resource not found"}
+ networkprotocol_uri = data["NetworkProtocol"]["@odata.id"]
+
+ response = self.get_request(self.root_uri + networkprotocol_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
+ 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
+ 'RFB']
+ for protocol_service in protocol_services:
+ if protocol_service in data.keys():
+ service_result[protocol_service] = data[protocol_service]
+
+ result['ret'] = True
+ result["entries"] = service_result
+ return result
+
+ def set_network_protocols(self, manager_services):
+ # Check input data validity
+ protocol_services = ['SNMP', 'VirtualMedia', 'Telnet', 'SSDP', 'IPMI', 'SSH',
+ 'KVMIP', 'NTP', 'HTTP', 'HTTPS', 'DHCP', 'DHCPv6', 'RDP',
+ 'RFB']
+ protocol_state_onlist = ['true', 'True', True, 'on', 1]
+ protocol_state_offlist = ['false', 'False', False, 'off', 0]
+ payload = {}
+ for service_name in manager_services.keys():
+ if service_name not in protocol_services:
+ return {'ret': False, 'msg': "Service name %s is invalid" % service_name}
+ payload[service_name] = {}
+ for service_property in manager_services[service_name].keys():
+ value = manager_services[service_name][service_property]
+ if service_property in ['ProtocolEnabled', 'protocolenabled']:
+ if value in protocol_state_onlist:
+ payload[service_name]['ProtocolEnabled'] = True
+ elif value in protocol_state_offlist:
+ payload[service_name]['ProtocolEnabled'] = False
+ else:
+ return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
+ elif service_property in ['port', 'Port']:
+ if isinstance(value, int):
+ payload[service_name]['Port'] = value
+ elif isinstance(value, str) and value.isdigit():
+ payload[service_name]['Port'] = int(value)
+ else:
+ return {'ret': False, 'msg': "Value of property %s is invalid" % service_property}
+ else:
+ payload[service_name][service_property] = value
+
+ # Find the ManagerNetworkProtocol resource
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ networkprotocol_uri = data.get("NetworkProtocol", {}).get("@odata.id")
+ if networkprotocol_uri is None:
+ return {'ret': False, 'msg': "NetworkProtocol resource not found"}
+
+ # Modify the ManagerNetworkProtocol resource
+ resp = self.patch_request(self.root_uri + networkprotocol_uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Modified manager network protocol settings'
+ return resp
+
+ @staticmethod
+ def to_singular(resource_name):
+ if resource_name.endswith('ies'):
+ resource_name = resource_name[:-3] + 'y'
+ elif resource_name.endswith('s'):
+ resource_name = resource_name[:-1]
+ return resource_name
+
+ def get_health_resource(self, subsystem, uri, health, expanded):
+ status = 'Status'
+
+ if expanded:
+ d = expanded
+ else:
+ r = self.get_request(self.root_uri + uri)
+ if r.get('ret'):
+ d = r.get('data')
+ else:
+ return
+
+ if 'Members' in d: # collections case
+ for m in d.get('Members'):
+ u = m.get('@odata.id')
+ r = self.get_request(self.root_uri + u)
+ if r.get('ret'):
+ p = r.get('data')
+ if p:
+ e = {self.to_singular(subsystem.lower()) + '_uri': u,
+ status: p.get(status,
+ "Status not available")}
+ health[subsystem].append(e)
+ else: # non-collections case
+ e = {self.to_singular(subsystem.lower()) + '_uri': uri,
+ status: d.get(status,
+ "Status not available")}
+ health[subsystem].append(e)
+
+ def get_health_subsystem(self, subsystem, data, health):
+ if subsystem in data:
+ sub = data.get(subsystem)
+ if isinstance(sub, list):
+ for r in sub:
+ if '@odata.id' in r:
+ uri = r.get('@odata.id')
+ expanded = None
+ if '#' in uri and len(r) > 1:
+ expanded = r
+ self.get_health_resource(subsystem, uri, health, expanded)
+ elif isinstance(sub, dict):
+ if '@odata.id' in sub:
+ uri = sub.get('@odata.id')
+ self.get_health_resource(subsystem, uri, health, None)
+ elif 'Members' in data:
+ for m in data.get('Members'):
+ u = m.get('@odata.id')
+ r = self.get_request(self.root_uri + u)
+ if r.get('ret'):
+ d = r.get('data')
+ self.get_health_subsystem(subsystem, d, health)
+
+ def get_health_report(self, category, uri, subsystems):
+ result = {}
+ health = {}
+ status = 'Status'
+
+ # Get health status of top level resource
+ response = self.get_request(self.root_uri + uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ health[category] = {status: data.get(status, "Status not available")}
+
+ # Get health status of subsystems
+ for sub in subsystems:
+ d = None
+ if sub.startswith('Links.'): # ex: Links.PCIeDevices
+ sub = sub[len('Links.'):]
+ d = data.get('Links', {})
+ elif '.' in sub: # ex: Thermal.Fans
+ p, sub = sub.split('.')
+ u = data.get(p, {}).get('@odata.id')
+ if u:
+ r = self.get_request(self.root_uri + u)
+ if r['ret']:
+ d = r['data']
+ if not d:
+ continue
+ else: # ex: Memory
+ d = data
+ health[sub] = []
+ self.get_health_subsystem(sub, d, health)
+ if not health[sub]:
+ del health[sub]
+
+ result["entries"] = health
+ return result
+
+ def get_system_health_report(self, systems_uri):
+ subsystems = ['Processors', 'Memory', 'SimpleStorage', 'Storage',
+ 'EthernetInterfaces', 'NetworkInterfaces.NetworkPorts',
+ 'NetworkInterfaces.NetworkDeviceFunctions']
+ return self.get_health_report('System', systems_uri, subsystems)
+
+ def get_multi_system_health_report(self):
+ return self.aggregate_systems(self.get_system_health_report)
+
+ def get_chassis_health_report(self, chassis_uri):
+ subsystems = ['Power.PowerSupplies', 'Thermal.Fans',
+ 'Links.PCIeDevices']
+ return self.get_health_report('Chassis', chassis_uri, subsystems)
+
+ def get_multi_chassis_health_report(self):
+ return self.aggregate_chassis(self.get_chassis_health_report)
+
+ def get_manager_health_report(self, manager_uri):
+ subsystems = []
+ return self.get_health_report('Manager', manager_uri, subsystems)
+
+ def get_multi_manager_health_report(self):
+ return self.aggregate_managers(self.get_manager_health_report)
+
+ def set_manager_nic(self, nic_addr, nic_config):
+ # Get the manager ethernet interface uri
+ nic_info = self.get_manager_ethernet_uri(nic_addr)
+
+ if nic_info.get('nic_addr') is None:
+ return nic_info
+ else:
+ target_ethernet_uri = nic_info['nic_addr']
+ target_ethernet_current_setting = nic_info['ethernet_setting']
+
+ # Convert input to payload and check validity
+ # Note: Some properties in the EthernetInterface resource are arrays of
+ # objects. The call into this module expects a flattened view, meaning
+ # the user specifies exactly one object for an array property. For
+ # example, if a user provides IPv4StaticAddresses in the request to this
+ # module, it will turn that into an array of one member. This pattern
+ # should be avoided for future commands in this module, but needs to be
+ # preserved here for backwards compatibility.
+ payload = {}
+ for property in nic_config.keys():
+ value = nic_config[property]
+ if property in target_ethernet_current_setting and isinstance(value, dict) and isinstance(target_ethernet_current_setting[property], list):
+ payload[property] = list()
+ payload[property].append(value)
+ else:
+ payload[property] = value
+
+ # Modify the EthernetInterface resource
+ resp = self.patch_request(self.root_uri + target_ethernet_uri, payload, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Modified manager NIC'
+ return resp
+
+ # A helper function to get the EthernetInterface URI
+ def get_manager_ethernet_uri(self, nic_addr='null'):
+ # Get EthernetInterface collection
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if not response['ret']:
+ return response
+ data = response['data']
+ if 'EthernetInterfaces' not in data:
+ return {'ret': False, 'msg': "EthernetInterfaces resource not found"}
+ ethernetinterfaces_uri = data["EthernetInterfaces"]["@odata.id"]
+ response = self.get_request(self.root_uri + ethernetinterfaces_uri)
+ if not response['ret']:
+ return response
+ data = response['data']
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if
+ a.get('@odata.id')]
+
+ # Find target EthernetInterface
+ target_ethernet_uri = None
+ target_ethernet_current_setting = None
+ if nic_addr == 'null':
+ # Find root_uri matched EthernetInterface when nic_addr is not specified
+ nic_addr = (self.root_uri).split('/')[-1]
+ nic_addr = nic_addr.split(':')[0] # split port if existing
+ for uri in uris:
+ response = self.get_request(self.root_uri + uri)
+ if not response['ret']:
+ return response
+ data = response['data']
+ data_string = json.dumps(data)
+ if nic_addr.lower() in data_string.lower():
+ target_ethernet_uri = uri
+ target_ethernet_current_setting = data
+ break
+
+ nic_info = {}
+ nic_info['nic_addr'] = target_ethernet_uri
+ nic_info['ethernet_setting'] = target_ethernet_current_setting
+
+ if target_ethernet_uri is None:
+ return {}
+ else:
+ return nic_info
+
+ def set_hostinterface_attributes(self, hostinterface_config, hostinterface_id=None):
+ if hostinterface_config is None:
+ return {'ret': False, 'msg':
+ 'Must provide hostinterface_config for SetHostInterface command'}
+
+ # Find the HostInterfaceCollection resource
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id")
+ if hostinterfaces_uri is None:
+ return {'ret': False, 'msg': "HostInterface resource not found"}
+ response = self.get_request(self.root_uri + hostinterfaces_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ uris = [a.get('@odata.id') for a in data.get('Members', []) if a.get('@odata.id')]
+
+ # Capture list of URIs that match a specified HostInterface resource Id
+ if hostinterface_id:
+ matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]]
+ if hostinterface_id and matching_hostinterface_uris:
+ hostinterface_uri = list.pop(matching_hostinterface_uris)
+ elif hostinterface_id and not matching_hostinterface_uris:
+ return {'ret': False, 'msg': "HostInterface ID %s not present." % hostinterface_id}
+ elif len(uris) == 1:
+ hostinterface_uri = list.pop(uris)
+ else:
+ return {'ret': False, 'msg': "HostInterface ID not defined and multiple interfaces detected."}
+
+ # Modify the HostInterface resource
+ resp = self.patch_request(self.root_uri + hostinterface_uri, hostinterface_config, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Modified host interface'
+ return resp
+
+ def get_hostinterfaces(self):
+ result = {}
+ hostinterface_results = []
+ properties = ['Id', 'Name', 'Description', 'HostInterfaceType', 'Status',
+ 'InterfaceEnabled', 'ExternallyAccessible', 'AuthenticationModes',
+ 'AuthNoneRoleId', 'CredentialBootstrapping']
+ manager_uri_list = self.manager_uris
+ for manager_uri in manager_uri_list:
+ response = self.get_request(self.root_uri + manager_uri)
+ if response['ret'] is False:
+ return response
+
+ result['ret'] = True
+ data = response['data']
+ hostinterfaces_uri = data.get("HostInterfaces", {}).get("@odata.id")
+ if hostinterfaces_uri is None:
+ continue
+
+ response = self.get_request(self.root_uri + hostinterfaces_uri)
+ data = response['data']
+
+ if 'Members' in data:
+ for hostinterface in data['Members']:
+ hostinterface_uri = hostinterface['@odata.id']
+ hostinterface_response = self.get_request(self.root_uri + hostinterface_uri)
+ # dictionary for capturing individual HostInterface properties
+ hostinterface_data_temp = {}
+ if hostinterface_response['ret'] is False:
+ return hostinterface_response
+ hostinterface_data = hostinterface_response['data']
+ for property in properties:
+ if property in hostinterface_data:
+ if hostinterface_data[property] is not None:
+ hostinterface_data_temp[property] = hostinterface_data[property]
+ # Check for the presence of a ManagerEthernetInterface
+ # object, a link to a _single_ EthernetInterface that the
+ # BMC uses to communicate with the host.
+ if 'ManagerEthernetInterface' in hostinterface_data:
+ interface_uri = hostinterface_data['ManagerEthernetInterface']['@odata.id']
+ interface_response = self.get_nic(interface_uri)
+ if interface_response['ret'] is False:
+ return interface_response
+ hostinterface_data_temp['ManagerEthernetInterface'] = interface_response['entries']
+
+ # Check for the presence of a HostEthernetInterfaces
+ # object, a link to a _collection_ of EthernetInterfaces
+ # that the host uses to communicate with the BMC.
+ if 'HostEthernetInterfaces' in hostinterface_data:
+ interfaces_uri = hostinterface_data['HostEthernetInterfaces']['@odata.id']
+ interfaces_response = self.get_request(self.root_uri + interfaces_uri)
+ if interfaces_response['ret'] is False:
+ return interfaces_response
+ interfaces_data = interfaces_response['data']
+ if 'Members' in interfaces_data:
+ for interface in interfaces_data['Members']:
+ interface_uri = interface['@odata.id']
+ interface_response = self.get_nic(interface_uri)
+ if interface_response['ret'] is False:
+ return interface_response
+ # Check if this is the first
+ # HostEthernetInterfaces item and create empty
+ # list if so.
+ if 'HostEthernetInterfaces' not in hostinterface_data_temp:
+ hostinterface_data_temp['HostEthernetInterfaces'] = []
+
+ hostinterface_data_temp['HostEthernetInterfaces'].append(interface_response['entries'])
+
+ hostinterface_results.append(hostinterface_data_temp)
+ else:
+ continue
+ result["entries"] = hostinterface_results
+ if not result["entries"]:
+ return {'ret': False, 'msg': "No HostInterface objects found"}
+ return result
+
+ def get_manager_inventory(self, manager_uri):
+ result = {}
+ inventory = {}
+ # Get these entries, but does not fail if not found
+ properties = ['FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model',
+ 'PartNumber', 'PowerState', 'SerialNumber', 'Status', 'UUID']
+
+ response = self.get_request(self.root_uri + manager_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ for property in properties:
+ if property in data:
+ inventory[property] = data[property]
+
+ result["entries"] = inventory
+ return result
+
+ def get_multi_manager_inventory(self):
+ return self.aggregate_managers(self.get_manager_inventory)
+
+ def set_session_service(self, sessions_config):
+ if sessions_config is None:
+ return {'ret': False, 'msg':
+ 'Must provide sessions_config for SetSessionService command'}
+
+ resp = self.patch_request(self.root_uri + self.session_service_uri, sessions_config, check_pyld=True)
+ if resp['ret'] and resp['changed']:
+ resp['msg'] = 'Modified session service'
+ return resp
+
+ def verify_bios_attributes(self, bios_attributes):
+ # This method verifies BIOS attributes against the provided input
+ server_bios = self.get_multi_bios_attributes()
+ if server_bios["ret"] is False:
+ return server_bios
+
+ bios_dict = {}
+ wrong_param = {}
+
+ # Verify bios_attributes with BIOS settings available in the server
+ for key, value in bios_attributes.items():
+ if key in server_bios["entries"][0][1]:
+ if server_bios["entries"][0][1][key] != value:
+ bios_dict.update({key: value})
+ else:
+ wrong_param.update({key: value})
+
+ if wrong_param:
+ return {
+ "ret": False,
+ "msg": "Wrong parameters are provided: %s" % wrong_param
+ }
+
+ if bios_dict:
+ return {
+ "ret": False,
+ "msg": "BIOS parameters are not matching: %s" % bios_dict
+ }
+
+ return {
+ "ret": True,
+ "changed": False,
+ "msg": "BIOS verification completed"
+ }
+
+ def enable_secure_boot(self):
+ # This function enable Secure Boot on an OOB controller
+
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response["ret"] is False:
+ return response
+
+ server_details = response["data"]
+ secure_boot_url = server_details["SecureBoot"]["@odata.id"]
+
+ response = self.get_request(self.root_uri + secure_boot_url)
+ if response["ret"] is False:
+ return response
+
+ body = {}
+ body["SecureBootEnable"] = True
+
+ return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True)
+
+ def get_hpe_thermal_config(self):
+ result = {}
+ key = "Thermal"
+ # Go through list
+ for chassis_uri in self.chassis_uri_list:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ oem = data.get['Oem']
+ hpe = oem.get['Hpe']
+ thermal_config = hpe.get('ThermalConfiguration')
+ result["current_thermal_config"] = thermal_config
+ return result
+
+ def get_hpe_fan_percent_min(self):
+ result = {}
+ key = "Thermal"
+ # Go through list
+ for chassis_uri in self.chassis_uri_list:
+ response = self.get_request(self.root_uri + chassis_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ oem = data.get['Oem']
+ hpe = oem.get['Hpe']
+ fan_percent_min_config = hpe.get('FanPercentMinimum')
+ result["fan_percent_min"] = fan_percent_min_config
+ return result
diff --git a/ansible_collections/community/general/plugins/module_utils/redhat.py b/ansible_collections/community/general/plugins/module_utils/redhat.py
new file mode 100644
index 000000000..f82cffaa0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/redhat.py
@@ -0,0 +1,272 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), James Laska
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import re
+import shutil
+import tempfile
+import types
+
+from ansible.module_utils.six.moves import configparser
+
+
+class RegistrationBase(object):
+ def __init__(self, module, username=None, password=None):
+ self.module = module
+ self.username = username
+ self.password = password
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ redhat_repo = '/etc/yum.repos.d/redhat.repo'
+ if os.path.isfile(redhat_repo):
+ os.unlink(redhat_repo)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if os.path.isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', 1)
+ else:
+ cfg.set('main', 'enabled', 0)
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
+ self.config = self._read_config()
+ self.module = module
+
+ def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
+ '''
+ Load RHSM configuration from /etc/rhsm/rhsm.conf.
+ Returns:
+ * ConfigParser object
+ '''
+
+ # Read RHSM defaults ...
+ cp = configparser.ConfigParser()
+ cp.read(rhsm_conf)
+
+ # Add support for specifying a default value w/o having to standup some configuration
+ # Yeah, I know this should be subclassed ... but, oh well
+ def get_option_default(self, key, default=''):
+ sect, opt = key.split('.', 1)
+ if self.has_section(sect) and self.has_option(sect, opt):
+ return self.get(sect, opt)
+ else:
+ return default
+
+ cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser)
+
+ return cp
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHN
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--system.hostname'.
+ for k, v in kwargs.items():
+ if re.search(r'^(system|rhsm)_', k):
+ args.append('--%s=%s' % (k.replace('_', '.'), v))
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHN.
+ '''
+ args = ['subscription-manager', 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def register(self, username, password, autosubscribe, activationkey):
+ '''
+ Register the current system to the provided RHN server
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'register']
+
+ # Generate command arguments
+ if activationkey:
+ args.append('--activationkey "%s"' % activationkey)
+ else:
+ if autosubscribe:
+ args.append('--autosubscribe')
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+
+ # Do the needful...
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+
+ def unsubscribe(self):
+ '''
+ Unsubscribe a system from all subscribed channels
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'unsubscribe', '--all']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = ['subscription-manager', 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ for pool in available_pools.filter(regexp):
+ pool.subscribe()
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def subscribe(self):
+ args = "subscription-manager subscribe --pool %s" % self.PoolId
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+ def __init__(self, module):
+ self.module = module
+ self.products = self._load_product_list()
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self):
+ """
+ Loads list of all available pools for system in data structure
+ """
+ args = "subscription-manager list --available"
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of an output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
diff --git a/ansible_collections/community/general/plugins/module_utils/redis.py b/ansible_collections/community/general/plugins/module_utils/redis.py
new file mode 100644
index 000000000..c4d87aca5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/redis.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+from ansible.module_utils.basic import missing_required_lib
+__metaclass__ = type
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ from redis import Redis
+ from redis import __version__ as redis_version
+ HAS_REDIS_PACKAGE = True
+ REDIS_IMP_ERR = None
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ HAS_REDIS_PACKAGE = False
+
+try:
+ import certifi
+ HAS_CERTIFI_PACKAGE = True
+ CERTIFI_IMPORT_ERROR = None
+except ImportError:
+ CERTIFI_IMPORT_ERROR = traceback.format_exc()
+ HAS_CERTIFI_PACKAGE = False
+
+
+def fail_imports(module, needs_certifi=True):
+ errors = []
+ traceback = []
+ if not HAS_REDIS_PACKAGE:
+ errors.append(missing_required_lib('redis'))
+ traceback.append(REDIS_IMP_ERR)
+ if not HAS_CERTIFI_PACKAGE and needs_certifi:
+ errors.append(missing_required_lib('certifi'))
+ traceback.append(CERTIFI_IMPORT_ERROR)
+ if errors:
+ module.fail_json(msg='\n'.join(errors), traceback='\n'.join(traceback))
+
+
+def redis_auth_argument_spec(tls_default=True):
+ return dict(
+ login_host=dict(type='str',
+ default='localhost',),
+ login_user=dict(type='str'),
+ login_password=dict(type='str',
+ no_log=True
+ ),
+ login_port=dict(type='int', default=6379),
+ tls=dict(type='bool',
+ default=tls_default),
+ validate_certs=dict(type='bool',
+ default=True
+ ),
+ ca_certs=dict(type='str')
+ )
+
+
+def redis_auth_params(module):
+ login_host = module.params['login_host']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_port = module.params['login_port']
+ tls = module.params['tls']
+ validate_certs = 'required' if module.params['validate_certs'] else None
+ ca_certs = module.params['ca_certs']
+ if tls and ca_certs is None:
+ ca_certs = str(certifi.where())
+ if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None:
+ module.fail_json(
+ msg='The option `username` in only supported with redis >= 3.4.0.')
+ params = {'host': login_host,
+ 'port': login_port,
+ 'password': login_password,
+ 'ssl_ca_certs': ca_certs,
+ 'ssl_cert_reqs': validate_certs,
+ 'ssl': tls}
+ if login_user is not None:
+ params['username'] = login_user
+ return params
+
+
+class RedisAnsible(object):
+ '''Base class for Redis module'''
+
+ def __init__(self, module):
+ self.module = module
+ self.connection = self._connect()
+
+ def _connect(self):
+ try:
+ return Redis(**redis_auth_params(self.module))
+ except Exception as e:
+ self.module.fail_json(msg='{0}'.format(str(e)))
+ return None
diff --git a/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py b/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
new file mode 100644
index 000000000..0fe8c3207
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/remote_management/lxca/common.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by
+# Ansible still belong to the author of the module, and may assign their
+# own license to the complete work.
+#
+# Copyright (C) 2017 Lenovo, Inc.
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+#
+# Contains LXCA common class
+# Lenovo xClarity Administrator (LXCA)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+try:
+ from pylxca import connect, disconnect
+ HAS_PYLXCA = True
+except ImportError:
+ HAS_PYLXCA = False
+
+
+PYLXCA_REQUIRED = "Lenovo xClarity Administrator Python Client (Python package 'pylxca') is required for this module."
+
+
+def has_pylxca(module):
+ """
+ Check pylxca is installed
+ :param module:
+ """
+ if not HAS_PYLXCA:
+ module.fail_json(msg=PYLXCA_REQUIRED)
+
+
+LXCA_COMMON_ARGS = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ auth_url=dict(required=True),
+)
+
+
+class connection_object:
+ def __init__(self, module):
+ self.module = module
+
+ def __enter__(self):
+ return setup_conn(self.module)
+
+ def __exit__(self, type, value, traceback):
+ close_conn()
+
+
+def setup_conn(module):
+ """
+ this function create connection to LXCA
+ :param module:
+ :return: lxca connection
+ """
+ lxca_con = None
+ try:
+ lxca_con = connect(module.params['auth_url'],
+ module.params['login_user'],
+ module.params['login_password'],
+ "True")
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+ return lxca_con
+
+
+def close_conn():
+ """
+ this function close connection to LXCA
+ :param module:
+ :return: None
+ """
+ disconnect()
diff --git a/ansible_collections/community/general/plugins/module_utils/rundeck.py b/ansible_collections/community/general/plugins/module_utils/rundeck.py
new file mode 100644
index 000000000..6fb56fbae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/rundeck.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+def api_argument_spec():
+ '''
+ Creates an argument spec that can be used with any module
+ that will be requesting content via Rundeck API
+ '''
+ api_argument_spec = url_argument_spec()
+ api_argument_spec.update(dict(
+ url=dict(required=True, type="str"),
+ api_version=dict(type="int", default=39),
+ api_token=dict(required=True, type="str", no_log=True)
+ ))
+
+ return api_argument_spec
+
+
+def api_request(module, endpoint, data=None, method="GET"):
+ """Manages Rundeck API requests via HTTP(S)
+
+ :arg module: The AnsibleModule (used to get url, api_version, api_token, etc).
+ :arg endpoint: The API endpoint to be used.
+ :kwarg data: The data to be sent (in case of POST/PUT).
+ :kwarg method: "POST", "PUT", etc.
+
+ :returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
+ The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
+ occurred then ``info['body']`` contains the error response data::
+
+ Example::
+
+ data={...}
+ resp, info = fetch_url(module,
+ "http://rundeck.example.org",
+ data=module.jsonify(data),
+ method="POST")
+ status_code = info["status"]
+ body = resp.read()
+ if status_code >= 400 :
+ body = info['body']
+ """
+
+ response, info = fetch_url(
+ module=module,
+ url="%s/api/%s/%s" % (
+ module.params["url"],
+ module.params["api_version"],
+ endpoint
+ ),
+ data=json.dumps(data),
+ method=method,
+ headers={
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "X-Rundeck-Auth-Token": module.params["api_token"]
+ }
+ )
+
+ if info["status"] == 403:
+ module.fail_json(msg="Token authorization failed",
+ execution_info=json.loads(info["body"]))
+ if info["status"] == 409:
+ module.fail_json(msg="Job executions limit reached",
+ execution_info=json.loads(info["body"]))
+ elif info["status"] >= 500:
+ module.fail_json(msg="Rundeck API error",
+ execution_info=json.loads(info["body"]))
+
+ try:
+ content = response.read()
+
+ if not content:
+ return None, info
+ else:
+ json_response = json.loads(content)
+ return json_response, info
+ except AttributeError as error:
+ module.fail_json(
+ msg="Rundeck API request error",
+ exception=to_native(error),
+ execution_info=info
+ )
+ except ValueError as error:
+ module.fail_json(
+ msg="No valid JSON response",
+ exception=to_native(error),
+ execution_info=content
+ )
diff --git a/ansible_collections/community/general/plugins/module_utils/saslprep.py b/ansible_collections/community/general/plugins/module_utils/saslprep.py
new file mode 100644
index 000000000..29bb49b70
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/saslprep.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+
+# Copyright (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from stringprep import (
+ in_table_a1,
+ in_table_b1,
+ in_table_c3,
+ in_table_c4,
+ in_table_c5,
+ in_table_c6,
+ in_table_c7,
+ in_table_c8,
+ in_table_c9,
+ in_table_c12,
+ in_table_c21_c22,
+ in_table_d1,
+ in_table_d2,
+)
+from unicodedata import normalize
+
+from ansible.module_utils.six import text_type
+
+
+def is_unicode_str(string):
+ return True if isinstance(string, text_type) else False
+
+
+def mapping_profile(string):
+ """RFC4013 Mapping profile implementation."""
+ # Regarding RFC4013,
+ # This profile specifies:
+ # - non-ASCII space characters [StringPrep, C.1.2] that can be
+ # mapped to SPACE (U+0020), and
+ # - the "commonly mapped to nothing" characters [StringPrep, B.1]
+ # that can be mapped to nothing.
+
+ tmp = []
+ for c in string:
+ # If not the "commonly mapped to nothing"
+ if not in_table_b1(c):
+ if in_table_c12(c):
+ # map non-ASCII space characters
+ # (that can be mapped) to Unicode space
+ tmp.append(u' ')
+ else:
+ tmp.append(c)
+
+ return u"".join(tmp)
+
+
+def is_ral_string(string):
+ """RFC3454 Check bidirectional category of the string"""
+ # Regarding RFC3454,
+ # Table D.1 lists the characters that belong
+ # to Unicode bidirectional categories "R" and "AL".
+ # If a string contains any RandALCat character, a RandALCat
+ # character MUST be the first character of the string, and a
+ # RandALCat character MUST be the last character of the string.
+ if in_table_d1(string[0]):
+ if not in_table_d1(string[-1]):
+ raise ValueError('RFC3454: incorrect bidirectional RandALCat string.')
+ return True
+ return False
+
+
+def prohibited_output_profile(string):
+ """RFC4013 Prohibited output profile implementation."""
+ # Implements:
+ # RFC4013, 2.3. Prohibited Output.
+ # This profile specifies the following characters as prohibited input:
+ # - Non-ASCII space characters [StringPrep, C.1.2]
+ # - ASCII control characters [StringPrep, C.2.1]
+ # - Non-ASCII control characters [StringPrep, C.2.2]
+ # - Private Use characters [StringPrep, C.3]
+ # - Non-character code points [StringPrep, C.4]
+ # - Surrogate code points [StringPrep, C.5]
+ # - Inappropriate for plain text characters [StringPrep, C.6]
+ # - Inappropriate for canonical representation characters [StringPrep, C.7]
+ # - Change display properties or deprecated characters [StringPrep, C.8]
+ # - Tagging characters [StringPrep, C.9]
+ # RFC4013, 2.4. Bidirectional Characters.
+ # RFC4013, 2.5. Unassigned Code Points.
+
+ # Determine how to handle bidirectional characters (RFC3454):
+ if is_ral_string(string):
+ # If a string contains any RandALCat characters,
+ # The string MUST NOT contain any LCat character:
+ is_prohibited_bidi_ch = in_table_d2
+ bidi_table = 'D.2'
+ else:
+ # Forbid RandALCat characters in LCat string:
+ is_prohibited_bidi_ch = in_table_d1
+ bidi_table = 'D.1'
+
+ RFC = 'RFC4013'
+ for c in string:
+ # RFC4013 2.3. Prohibited Output:
+ if in_table_c12(c):
+ raise ValueError('%s: prohibited non-ASCII space characters '
+ 'that cannot be replaced (C.1.2).' % RFC)
+ if in_table_c21_c22(c):
+ raise ValueError('%s: prohibited control characters (C.2.1).' % RFC)
+ if in_table_c3(c):
+ raise ValueError('%s: prohibited private Use characters (C.3).' % RFC)
+ if in_table_c4(c):
+ raise ValueError('%s: prohibited non-character code points (C.4).' % RFC)
+ if in_table_c5(c):
+ raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC)
+ if in_table_c6(c):
+ raise ValueError('%s: prohibited inappropriate for plain text '
+ 'characters (C.6).' % RFC)
+ if in_table_c7(c):
+ raise ValueError('%s: prohibited inappropriate for canonical '
+ 'representation characters (C.7).' % RFC)
+ if in_table_c8(c):
+ raise ValueError('%s: prohibited change display properties / '
+ 'deprecated characters (C.8).' % RFC)
+ if in_table_c9(c):
+ raise ValueError('%s: prohibited tagging characters (C.9).' % RFC)
+
+ # RFC4013, 2.4. Bidirectional Characters:
+ if is_prohibited_bidi_ch(c):
+ raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table))
+
+ # RFC4013, 2.5. Unassigned Code Points:
+ if in_table_a1(c):
+ raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC)
+
+
+def saslprep(string):
+ """RFC4013 implementation.
+ Implements "SASLprep" profile (RFC4013) of the "stringprep" algorithm (RFC3454)
+ to prepare Unicode strings representing user names and passwords for comparison.
+ Regarding the RFC4013, the "SASLprep" profile is intended to be used by
+ Simple Authentication and Security Layer (SASL) mechanisms
+ (such as PLAIN, CRAM-MD5, and DIGEST-MD5), as well as other protocols
+ exchanging simple user names and/or passwords.
+
+ Args:
+ string (unicode string): Unicode string to validate and prepare.
+
+ Returns:
+ Prepared unicode string.
+ """
+ # RFC4013: "The algorithm assumes all strings are
+ # comprised of characters from the Unicode [Unicode] character set."
+ # Validate the string is a Unicode string
+ # (text_type is the string type if PY3 and unicode otherwise):
+ if not is_unicode_str(string):
+ raise TypeError('input must be of type %s, not %s' % (text_type, type(string)))
+
+ # RFC4013: 2.1. Mapping.
+ string = mapping_profile(string)
+
+ # RFC4013: 2.2. Normalization.
+ # "This profile specifies using Unicode normalization form KC."
+ string = normalize('NFKC', string)
+ if not string:
+ return u''
+
+ # RFC4013: 2.3. Prohibited Output.
+ # RFC4013: 2.4. Bidirectional Characters.
+ # RFC4013: 2.5. Unassigned Code Points.
+ prohibited_output_profile(string)
+
+ return string
diff --git a/ansible_collections/community/general/plugins/module_utils/scaleway.py b/ansible_collections/community/general/plugins/module_utils/scaleway.py
new file mode 100644
index 000000000..43f209480
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/scaleway.py
@@ -0,0 +1,397 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import re
+import sys
+import datetime
+import time
+import traceback
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+SCALEWAY_SECRET_IMP_ERR = None
+try:
+ from passlib.hash import argon2
+ HAS_SCALEWAY_SECRET_PACKAGE = True
+except Exception:
+ argon2 = None
+ SCALEWAY_SECRET_IMP_ERR = traceback.format_exc()
+ HAS_SCALEWAY_SECRET_PACKAGE = False
+
+
+def scaleway_argument_spec():
+ return dict(
+ api_token=dict(required=True, fallback=(env_fallback, ['SCW_TOKEN', 'SCW_API_KEY', 'SCW_OAUTH_TOKEN', 'SCW_API_TOKEN']),
+ no_log=True, aliases=['oauth_token']),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://api.scaleway.com', aliases=['base_url']),
+ api_timeout=dict(type='int', default=30, aliases=['timeout']),
+ query_parameters=dict(type='dict', default={}),
+ validate_certs=dict(default=True, type='bool'),
+ )
+
+
+def scaleway_waitable_resource_argument_spec():
+ return dict(
+ wait=dict(type="bool", default=True),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ )
+
+
+def payload_from_object(scw_object):
+ return dict(
+ (k, v)
+ for k, v in scw_object.items()
+ if k != 'id' and v is not None
+ )
+
+
+class ScalewayException(Exception):
+
+ def __init__(self, message):
+ self.message = message
+
+
+# Specify a complete Link header, for validation purposes
+R_LINK_HEADER = r'''<[^>]+>;\srel="(first|previous|next|last)"
+ (,<[^>]+>;\srel="(first|previous|next|last)")*'''
+# Specify a single relation, for iteration and string extraction purposes
+R_RELATION = r'</?(?P<target_IRI>[^>]+)>; rel="(?P<relation>first|previous|next|last)"'
+
+
+def parse_pagination_link(header):
+ if not re.match(R_LINK_HEADER, header, re.VERBOSE):
+ raise ScalewayException('Scaleway API answered with an invalid Link pagination header')
+ else:
+ relations = header.split(',')
+ parsed_relations = {}
+ rc_relation = re.compile(R_RELATION)
+ for relation in relations:
+ match = rc_relation.match(relation)
+ if not match:
+ raise ScalewayException('Scaleway API answered with an invalid relation in the Link pagination header')
+ data = match.groupdict()
+ parsed_relations[data['relation']] = data['target_IRI']
+ return parsed_relations
+
+
+def filter_sensitive_attributes(container, attributes):
+ '''
+ WARNING: This function is effectively private, **do not use it**!
+ It will be removed or renamed once changing its name no longer triggers a pylint bug.
+ '''
+ for attr in attributes:
+ container[attr] = "SENSITIVE_VALUE"
+
+ return container
+
+
+class SecretVariables(object):
+ @staticmethod
+ def ensure_scaleway_secret_package(module):
+ if not HAS_SCALEWAY_SECRET_PACKAGE:
+ module.fail_json(
+ msg=missing_required_lib("passlib[argon2]", url='https://passlib.readthedocs.io/en/stable/'),
+ exception=SCALEWAY_SECRET_IMP_ERR
+ )
+
+ @staticmethod
+ def dict_to_list(source_dict):
+ return [
+ dict(key=var[0], value=var[1])
+ for var in source_dict.items()
+ ]
+
+ @staticmethod
+ def list_to_dict(source_list, hashed=False):
+ key_value = 'hashed_value' if hashed else 'value'
+ return dict(
+ (var['key'], var[key_value])
+ for var in source_list
+ )
+
+ @classmethod
+ def decode(cls, secrets_list, values_list):
+ secrets_dict = cls.list_to_dict(secrets_list, hashed=True)
+ values_dict = cls.list_to_dict(values_list, hashed=False)
+ for key in values_dict:
+ if key in secrets_dict:
+ if argon2.verify(values_dict[key], secrets_dict[key]):
+ secrets_dict[key] = values_dict[key]
+ else:
+ secrets_dict[key] = secrets_dict[key]
+
+ return cls.dict_to_list(secrets_dict)
+
+
+def resource_attributes_should_be_changed(target, wished, verifiable_mutable_attributes, mutable_attributes):
+ diff = dict()
+ for attr in verifiable_mutable_attributes:
+ if wished[attr] is not None and target[attr] != wished[attr]:
+ diff[attr] = wished[attr]
+
+ if diff:
+ return dict((attr, wished[attr]) for attr in mutable_attributes)
+ else:
+ return diff
+
+
+class Response(object):
+
+ def __init__(self, resp, info):
+ self.body = None
+ if resp:
+ self.body = resp.read()
+ self.info = info
+
+ @property
+ def json(self):
+ if not self.body:
+ if "body" in self.info:
+ return json.loads(self.info["body"])
+ return None
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+ @property
+ def status_code(self):
+ return self.info["status"]
+
+ @property
+ def ok(self):
+ return self.status_code in (200, 201, 202, 204)
+
+
+class Scaleway(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.headers = {
+ 'X-Auth-Token': self.module.params.get('api_token'),
+ 'User-Agent': self.get_user_agent_string(module),
+ 'Content-Type': 'application/json',
+ }
+ self.name = None
+
+ def get_resources(self):
+ results = self.get('/%s' % self.name)
+
+ if not results.ok:
+ raise ScalewayException('Error fetching {0} ({1}) [{2}: {3}]'.format(
+ self.name, '%s/%s' % (self.module.params.get('api_url'), self.name),
+ results.status_code, results.json['message']
+ ))
+
+ return results.json.get(self.name)
+
+ def _url_builder(self, path, params):
+ d = self.module.params.get('query_parameters')
+ if params is not None:
+ d.update(params)
+ query_string = urlencode(d, doseq=True)
+
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s?%s' % (self.module.params.get('api_url'), path, query_string)
+
+ def send(self, method, path, data=None, headers=None, params=None):
+ url = self._url_builder(path=path, params=params)
+ self.warn(url)
+
+ if headers is not None:
+ self.headers.update(headers)
+
+ if self.headers['Content-Type'] == "application/json":
+ data = self.module.jsonify(data)
+
+ resp, info = fetch_url(
+ self.module, url, data=data, headers=self.headers, method=method,
+ timeout=self.module.params.get('api_timeout')
+ )
+
+ # Exceptions in fetch_url may result in a status -1, the ensures a proper error to the user in all cases
+ if info['status'] == -1:
+ self.module.fail_json(msg=info['msg'])
+
+ return Response(resp, info)
+
+ @staticmethod
+ def get_user_agent_string(module):
+ return "ansible %s Python %s" % (module.ansible_version, sys.version.split(' ', 1)[0])
+
+ def get(self, path, data=None, headers=None, params=None):
+ return self.send(method='GET', path=path, data=data, headers=headers, params=params)
+
+ def put(self, path, data=None, headers=None, params=None):
+ return self.send(method='PUT', path=path, data=data, headers=headers, params=params)
+
+ def post(self, path, data=None, headers=None, params=None):
+ return self.send(method='POST', path=path, data=data, headers=headers, params=params)
+
+ def delete(self, path, data=None, headers=None, params=None):
+ return self.send(method='DELETE', path=path, data=data, headers=headers, params=params)
+
+ def patch(self, path, data=None, headers=None, params=None):
+ return self.send(method="PATCH", path=path, data=data, headers=headers, params=params)
+
+ def update(self, path, data=None, headers=None, params=None):
+ return self.send(method="UPDATE", path=path, data=data, headers=headers, params=params)
+
+ def warn(self, x):
+ self.module.warn(str(x))
+
+ def fetch_state(self, resource):
+ self.module.debug("fetch_state of resource: %s" % resource["id"])
+ response = self.get(path=self.api_path + "/%s" % resource["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ self.module.fail_json(msg=msg)
+
+ try:
+ self.module.debug("Resource %s in state: %s" % (resource["id"], response.json["status"]))
+ return response.json["status"]
+ except KeyError:
+ self.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+ def fetch_paginated_resources(self, resource_key, **pagination_kwargs):
+ response = self.get(
+ path=self.api_path,
+ params=pagination_kwargs)
+
+ status_code = response.status_code
+ if not response.ok:
+ self.module.fail_json(msg='Error getting {0} [{1}: {2}]'.format(
+ resource_key,
+ response.status_code, response.json['message']))
+
+ return response.json[resource_key]
+
+ def fetch_all_resources(self, resource_key, **pagination_kwargs):
+ resources = []
+
+ result = [None]
+ while len(result) != 0:
+ result = self.fetch_paginated_resources(resource_key, **pagination_kwargs)
+ resources += result
+ if 'page' in pagination_kwargs:
+ pagination_kwargs['page'] += 1
+ else:
+ pagination_kwargs['page'] = 2
+
+ return resources
+
+ def wait_to_complete_state_transition(self, resource, stable_states, force_wait=False):
+ wait = self.module.params["wait"]
+
+ if not (wait or force_wait):
+ return
+
+ wait_timeout = self.module.params["wait_timeout"]
+ wait_sleep_time = self.module.params["wait_sleep_time"]
+
+ # Prevent requesting the ressource status too soon
+ time.sleep(wait_sleep_time)
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+
+ while datetime.datetime.utcnow() < end:
+ self.module.debug("We are going to wait for the resource to finish its transition")
+
+ state = self.fetch_state(resource)
+ if state in stable_states:
+ self.module.debug("It seems that the resource is not in transition anymore.")
+ self.module.debug("load-balancer in state: %s" % self.fetch_state(resource))
+ break
+
+ time.sleep(wait_sleep_time)
+ else:
+ self.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+SCALEWAY_LOCATION = {
+ 'par1': {
+ 'name': 'Paris 1',
+ 'country': 'FR',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1'
+ },
+
+ 'EMEA-FR-PAR1': {
+ 'name': 'Paris 1',
+ 'country': 'FR',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-1',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-1'
+ },
+
+ 'par2': {
+ 'name': 'Paris 2',
+ 'country': 'FR',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2'
+ },
+
+ 'EMEA-FR-PAR2': {
+ 'name': 'Paris 2',
+ 'country': 'FR',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-2',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2'
+ },
+
+ 'ams1': {
+ 'name': 'Amsterdam 1',
+ 'country': 'NL',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-10'
+ },
+
+ 'EMEA-NL-EVS': {
+ 'name': 'Amsterdam 1',
+ 'country': 'NL',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1'
+ },
+
+ 'waw1': {
+ 'name': 'Warsaw 1',
+ 'country': 'PL',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1'
+ },
+
+ 'EMEA-PL-WAW1': {
+ 'name': 'Warsaw 1',
+ 'country': 'PL',
+ 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1',
+ 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1'
+ },
+}
+
+SCALEWAY_ENDPOINT = "https://api.scaleway.com"
+
+SCALEWAY_REGIONS = [
+ "fr-par",
+ "nl-ams",
+ "pl-waw",
+]
+
+SCALEWAY_ZONES = [
+ "fr-par-1",
+ "fr-par-2",
+ "nl-ams-1",
+ "pl-waw-1",
+]
diff --git a/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py b/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
new file mode 100644
index 000000000..9a2736183
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/source_control/bitbucket.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.urls import fetch_url, basic_auth_header
+
+
+class BitbucketHelper:
+ BITBUCKET_API_URL = 'https://api.bitbucket.org'
+
+ def __init__(self, module):
+ self.module = module
+ self.access_token = None
+
+ @staticmethod
+ def bitbucket_argument_spec():
+ return dict(
+ client_id=dict(type='str', fallback=(env_fallback, ['BITBUCKET_CLIENT_ID'])),
+ client_secret=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_CLIENT_SECRET'])),
+ # TODO:
+ # - Rename user to username once current usage of username is removed
+ # - Alias user to username and deprecate it
+ user=dict(type='str', aliases=['username'], fallback=(env_fallback, ['BITBUCKET_USERNAME'])),
+ password=dict(type='str', no_log=True, fallback=(env_fallback, ['BITBUCKET_PASSWORD'])),
+ )
+
+ @staticmethod
+ def bitbucket_required_one_of():
+ return [['client_id', 'client_secret', 'user', 'password']]
+
+ @staticmethod
+ def bitbucket_required_together():
+ return [['client_id', 'client_secret'], ['user', 'password']]
+
+ def fetch_access_token(self):
+ if self.module.params['client_id'] and self.module.params['client_secret']:
+ headers = {
+ 'Authorization': basic_auth_header(self.module.params['client_id'], self.module.params['client_secret']),
+ }
+
+ info, content = self.request(
+ api_url='https://bitbucket.org/site/oauth2/access_token',
+ method='POST',
+ data='grant_type=client_credentials',
+ headers=headers,
+ )
+
+ if info['status'] == 200:
+ self.access_token = content['access_token']
+ else:
+ self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info))
+
+ def request(self, api_url, method, data=None, headers=None):
+ headers = headers or {}
+
+ if self.access_token:
+ headers.update({
+ 'Authorization': 'Bearer {0}'.format(self.access_token),
+ })
+ elif self.module.params['user'] and self.module.params['password']:
+ headers.update({
+ 'Authorization': basic_auth_header(self.module.params['user'], self.module.params['password']),
+ })
+
+ if isinstance(data, dict):
+ data = self.module.jsonify(data)
+ headers.update({
+ 'Content-type': 'application/json',
+ })
+
+ response, info = fetch_url(
+ module=self.module,
+ url=api_url,
+ method=method,
+ headers=headers,
+ data=data,
+ force=True,
+ )
+
+ content = {}
+
+ if response is not None:
+ body = to_text(response.read())
+ if body:
+ content = json.loads(body)
+
+ return info, content
diff --git a/ansible_collections/community/general/plugins/module_utils/ssh.py b/ansible_collections/community/general/plugins/module_utils/ssh.py
new file mode 100644
index 000000000..082839e26
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/ssh.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Björn Andersson
+# Copyright (c) 2021, Ansible Project
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+import os
+
+
+def determine_config_file(user, config_file):
+ if user:
+ config_file = os.path.join(os.path.expanduser('~%s' % user), '.ssh', 'config')
+ elif config_file is None:
+ config_file = '/etc/ssh/ssh_config'
+ return config_file
diff --git a/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py b/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
new file mode 100644
index 000000000..2e391b8fb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/storage/emc/emc_vnx.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Luca 'remix_tj' Lorenzetto
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+emc_vnx_argument_spec = {
+ 'sp_address': dict(type='str', required=True),
+ 'sp_user': dict(type='str', required=False, default='sysadmin'),
+ 'sp_password': dict(type='str', required=False, default='sysadmin',
+ no_log=True),
+}
diff --git a/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py b/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
new file mode 100644
index 000000000..3d164ce74
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/storage/hpe3par/hpe3par.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Hewlett Packard Enterprise Development LP
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.module_utils import basic
+
+
+def convert_to_binary_multiple(size_with_unit):
+ if size_with_unit is None:
+ return -1
+ valid_units = ['MiB', 'GiB', 'TiB']
+ valid_unit = False
+ for unit in valid_units:
+ if size_with_unit.strip().endswith(unit):
+ valid_unit = True
+ size = size_with_unit.split(unit)[0]
+ if float(size) < 0:
+ return -1
+ if not valid_unit:
+ raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units))
+
+ size = size_with_unit.replace(" ", "").split('iB')[0]
+ size_kib = basic.human_to_bytes(size)
+ return int(size_kib / (1024 * 1024))
+
+
+storage_system_spec = {
+ "storage_system_ip": {
+ "required": True,
+ "type": "str"
+ },
+ "storage_system_username": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ },
+ "storage_system_password": {
+ "required": True,
+ "type": "str",
+ "no_log": True
+ },
+ "secure": {
+ "type": "bool",
+ "default": False
+ }
+}
+
+
+def cpg_argument_spec():
+ spec = {
+ "state": {
+ "required": True,
+ "choices": ['present', 'absent'],
+ "type": 'str'
+ },
+ "cpg_name": {
+ "required": True,
+ "type": "str"
+ },
+ "domain": {
+ "type": "str"
+ },
+ "growth_increment": {
+ "type": "str",
+ },
+ "growth_limit": {
+ "type": "str",
+ },
+ "growth_warning": {
+ "type": "str",
+ },
+ "raid_type": {
+ "required": False,
+ "type": "str",
+ "choices": ['R0', 'R1', 'R5', 'R6']
+ },
+ "set_size": {
+ "required": False,
+ "type": "int"
+ },
+ "high_availability": {
+ "type": "str",
+ "choices": ['PORT', 'CAGE', 'MAG']
+ },
+ "disk_type": {
+ "type": "str",
+ "choices": ['FC', 'NL', 'SSD']
+ }
+ }
+ spec.update(storage_system_spec)
+ return spec
diff --git a/ansible_collections/community/general/plugins/module_utils/univention_umc.py b/ansible_collections/community/general/plugins/module_utils/univention_umc.py
new file mode 100644
index 000000000..b08f39e30
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/univention_umc.py
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""Univention Corporate Server (UCS) access module.
+
+Provides the following functions for working with an UCS server.
+
+ - ldap_search(filter, base=None, attr=None)
+ Search the LDAP via Univention's LDAP wrapper (ULDAP)
+
+ - config_registry()
+ Return the UCR registry object
+
+ - base_dn()
+ Return the configured Base DN according to the UCR
+
+ - uldap()
+ Return a handle to the ULDAP LDAP wrapper
+
+ - umc_module_for_add(module, container_dn, superordinate=None)
+ Return a UMC module for creating a new object of the given type
+
+ - umc_module_for_edit(module, object_dn, superordinate=None)
+ Return a UMC module for editing an existing object of the given type
+
+
+Any other module is not part of the "official" API and may change at any time.
+"""
+
+import re
+
+
+__all__ = [
+ 'ldap_search',
+ 'config_registry',
+ 'base_dn',
+ 'uldap',
+ 'umc_module_for_add',
+ 'umc_module_for_edit',
+]
+
+
+_singletons = {}
+
+
+def ldap_module():
+ import ldap as orig_ldap
+ return orig_ldap
+
+
+def _singleton(name, constructor):
+ if name in _singletons:
+ return _singletons[name]
+ _singletons[name] = constructor()
+ return _singletons[name]
+
+
+def config_registry():
+
+ def construct():
+ import univention.config_registry
+ ucr = univention.config_registry.ConfigRegistry()
+ ucr.load()
+ return ucr
+
+ return _singleton('config_registry', construct)
+
+
+def base_dn():
+ return config_registry()['ldap/base']
+
+
+def uldap():
+ "Return a configured univention uldap object"
+
+ def construct():
+ try:
+ secret_file = open('/etc/ldap.secret', 'r')
+ bind_dn = 'cn=admin,{0}'.format(base_dn())
+ except IOError: # pragma: no cover
+ secret_file = open('/etc/machine.secret', 'r')
+ bind_dn = config_registry()["ldap/hostdn"]
+ pwd_line = secret_file.readline()
+ pwd = re.sub('\n', '', pwd_line)
+
+ import univention.admin.uldap
+ return univention.admin.uldap.access(
+ host=config_registry()['ldap/master'],
+ base=base_dn(),
+ binddn=bind_dn,
+ bindpw=pwd,
+ start_tls=1,
+ )
+
+ return _singleton('uldap', construct)
+
+
+def config():
+ def construct():
+ import univention.admin.config
+ return univention.admin.config.config()
+ return _singleton('config', construct)
+
+
+def init_modules():
+ def construct():
+ import univention.admin.modules
+ univention.admin.modules.update()
+ return True
+ return _singleton('modules_initialized', construct)
+
+
+def position_base_dn():
+ def construct():
+ import univention.admin.uldap
+ return univention.admin.uldap.position(base_dn())
+ return _singleton('position_base_dn', construct)
+
+
+def ldap_dn_tree_parent(dn, count=1):
+ dn_array = dn.split(',')
+ dn_array[0:count] = []
+ return ','.join(dn_array)
+
+
+def ldap_search(filter, base=None, attr=None):
+ """Replaces uldaps search and uses a generator.
+ !! Arguments are not the same."""
+
+ if base is None:
+ base = base_dn()
+ msgid = uldap().lo.lo.search(
+ base,
+ ldap_module().SCOPE_SUBTREE,
+ filterstr=filter,
+ attrlist=attr
+ )
+ # I used to have a try: finally: here but there seems to be a bug in python
+ # which swallows the KeyboardInterrupt
+ # The abandon now doesn't make too much sense
+ while True:
+ result_type, result_data = uldap().lo.lo.result(msgid, all=0)
+ if not result_data:
+ break
+ if result_type is ldap_module().RES_SEARCH_RESULT: # pragma: no cover
+ break
+ else:
+ if result_type is ldap_module().RES_SEARCH_ENTRY:
+ for res in result_data:
+ yield res
+ uldap().lo.lo.abandon(msgid)
+
+
+def module_by_name(module_name_):
+ """Returns an initialized UMC module, identified by the given name.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ If the module does not exist, a KeyError is raised.
+
+ The modules are cached, so they won't be re-initialized
+ in subsequent calls.
+ """
+
+ def construct():
+ import univention.admin.modules
+ init_modules()
+ module = univention.admin.modules.get(module_name_)
+ univention.admin.modules.init(uldap(), position_base_dn(), module)
+ return module
+
+ return _singleton('module/%s' % module_name_, construct)
+
+
+def get_umc_admin_objects():
+ """Convenience accessor for getting univention.admin.objects.
+
+ This implements delayed importing, so the univention.* modules
+ are not loaded until this function is called.
+ """
+ import univention.admin
+ return univention.admin.objects
+
+
+def umc_module_for_add(module, container_dn, superordinate=None):
+ """Returns an UMC module object prepared for creating a new entry.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ The container_dn MUST be the dn of the container (not of the object to
+ be created itself!).
+ """
+ mod = module_by_name(module)
+
+ position = position_base_dn()
+ position.setDn(container_dn)
+
+ # config, ldap objects from common module
+ obj = mod.object(config(), uldap(), position, superordinate=superordinate)
+ obj.open()
+
+ return obj
+
+
+def umc_module_for_edit(module, object_dn, superordinate=None):
+ """Returns an UMC module object prepared for editing an existing entry.
+
+ The module is a module specification according to the udm commandline.
+ Example values are:
+ * users/user
+ * shares/share
+ * groups/group
+
+ The object_dn MUST be the dn of the object itself, not the container!
+ """
+ mod = module_by_name(module)
+
+ objects = get_umc_admin_objects()
+
+ position = position_base_dn()
+ position.setDn(ldap_dn_tree_parent(object_dn))
+
+ obj = objects.get(
+ mod,
+ config(),
+ uldap(),
+ position=position,
+ superordinate=superordinate,
+ dn=object_dn
+ )
+ obj.open()
+
+ return obj
+
+
+def create_containers_and_parents(container_dn):
+ """Create a container and if needed the parents containers"""
+ import univention.admin.uexceptions as uexcp
+ if not container_dn.startswith("cn="):
+ raise AssertionError()
+ try:
+ parent = ldap_dn_tree_parent(container_dn)
+ obj = umc_module_for_add(
+ 'container/cn',
+ parent
+ )
+ obj['name'] = container_dn.split(',')[0].split('=')[1]
+ obj['description'] = "container created by import"
+ except uexcp.ldapError:
+ create_containers_and_parents(parent)
+ obj = umc_module_for_add(
+ 'container/cn',
+ parent
+ )
+ obj['name'] = container_dn.split(',')[0].split('=')[1]
+ obj['description'] = "container created by import"
diff --git a/ansible_collections/community/general/plugins/module_utils/utm_utils.py b/ansible_collections/community/general/plugins/module_utils/utm_utils.py
new file mode 100644
index 000000000..712450cd2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/utm_utils.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class UTMModuleConfigurationError(Exception):
+
+ def __init__(self, msg, **args):
+ super(UTMModuleConfigurationError, self).__init__(self, msg)
+ self.msg = msg
+ self.module_fail_args = args
+
+ def do_fail(self, module):
+ module.fail_json(msg=self.msg, other=self.module_fail_args)
+
+
+class UTMModule(AnsibleModule):
+ """
+ This is a helper class to construct any UTM Module. This will automatically add the utm host, port, token,
+ protocol, validate_certs and state field to the module. If you want to implement your own sophos utm module
+ just initialize this UTMModule class and define the Payload fields that are needed for your module.
+ See the other modules like utm_aaa_group for example.
+ """
+
+ def __init__(self, argument_spec, bypass_checks=False, no_log=False,
+ mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False,
+ supports_check_mode=False, required_if=None):
+ default_specs = dict(
+ headers=dict(type='dict', required=False, default={}),
+ utm_host=dict(type='str', required=True),
+ utm_port=dict(type='int', default=4444),
+ utm_token=dict(type='str', required=True, no_log=True),
+ utm_protocol=dict(type='str', required=False, default="https", choices=["https", "http"]),
+ validate_certs=dict(type='bool', required=False, default=True),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ super(UTMModule, self).__init__(self._merge_specs(default_specs, argument_spec), bypass_checks, no_log,
+ mutually_exclusive, required_together, required_one_of,
+ add_file_common_args, supports_check_mode, required_if)
+
+ def _merge_specs(self, default_specs, custom_specs):
+ result = default_specs.copy()
+ result.update(custom_specs)
+ return result
+
+
+class UTM:
+
+ def __init__(self, module, endpoint, change_relevant_keys, info_only=False):
+ """
+ Initialize UTM Class
+ :param module: The Ansible module
+ :param endpoint: The corresponding endpoint to the module
+ :param change_relevant_keys: The keys of the object to check for changes
+ :param info_only: When implementing an info module, set this to true. Will allow access to the info method only
+ """
+ self.info_only = info_only
+ self.module = module
+ self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native(
+ module.params.get('utm_port')) + "/api/objects/" + endpoint + "/"
+
+ """
+ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated
+ """
+ self.change_relevant_keys = change_relevant_keys
+ self.module.params['url_username'] = 'token'
+ self.module.params['url_password'] = module.params.get('utm_token')
+ if all(elem in self.change_relevant_keys for elem in module.params.keys()):
+ raise UTMModuleConfigurationError(
+ "The keys " + to_native(
+ self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native(
+ list(module.params.keys())))
+
+ def execute(self):
+ try:
+ if not self.info_only:
+ if self.module.params.get('state') == 'present':
+ self._add()
+ elif self.module.params.get('state') == 'absent':
+ self._remove()
+ else:
+ self._info()
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+ def _info(self):
+ """
+ returns the info for an object in utm
+ """
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if info["status"] >= 400:
+ self.module.fail_json(result=json.loads(info))
+ else:
+ if result is None:
+ self.module.exit_json(changed=False)
+ else:
+ self.module.exit_json(result=result, changed=False)
+
+ def _add(self):
+ """
+ adds or updates a host object on utm
+ """
+
+ combined_headers = self._combine_headers()
+
+ is_changed = False
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if info["status"] >= 400:
+ self.module.fail_json(result=json.loads(info))
+ else:
+ data_as_json_string = self.module.jsonify(self.module.params)
+ if result is None:
+ response, info = fetch_url(self.module, self.request_url, method="POST",
+ headers=combined_headers,
+ data=data_as_json_string)
+ if info["status"] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ is_changed = True
+ result = self._clean_result(json.loads(response.read()))
+ else:
+ if self._is_object_changed(self.change_relevant_keys, self.module, result):
+ response, info = fetch_url(self.module, self.request_url + result['_ref'], method="PUT",
+ headers=combined_headers,
+ data=data_as_json_string)
+ if info['status'] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ is_changed = True
+ result = self._clean_result(json.loads(response.read()))
+ self.module.exit_json(result=result, changed=is_changed)
+
+ def _combine_headers(self):
+ """
+ This will combine a header default with headers that come from the module declaration
+ :return: A combined headers dict
+ """
+ default_headers = {"Accept": "application/json", "Content-type": "application/json"}
+ if self.module.params.get('headers') is not None:
+ result = default_headers.copy()
+ result.update(self.module.params.get('headers'))
+ else:
+ result = default_headers
+ return result
+
+ def _remove(self):
+ """
+ removes an object from utm
+ """
+ is_changed = False
+ info, result = self._lookup_entry(self.module, self.request_url)
+ if result is not None:
+ response, info = fetch_url(self.module, self.request_url + result['_ref'], method="DELETE",
+ headers={"Accept": "application/json", "X-Restd-Err-Ack": "all"},
+ data=self.module.jsonify(self.module.params))
+ if info["status"] >= 400:
+ self.module.fail_json(msg=json.loads(info["body"]))
+ else:
+ is_changed = True
+ self.module.exit_json(changed=is_changed)
+
+ def _lookup_entry(self, module, request_url):
+ """
+ Lookup for existing entry
+ :param module:
+ :param request_url:
+ :return:
+ """
+ response, info = fetch_url(module, request_url, method="GET", headers={"Accept": "application/json"})
+ result = None
+ if response is not None:
+ results = json.loads(response.read())
+ result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None)
+ return info, result
+
+ def _clean_result(self, result):
+ """
+ Will clean the result from irrelevant fields
+ :param result: The result from the query
+ :return: The modified result
+ """
+ del result['utm_host']
+ del result['utm_port']
+ del result['utm_token']
+ del result['utm_protocol']
+ del result['validate_certs']
+ del result['url_username']
+ del result['url_password']
+ del result['state']
+ return result
+
+ def _is_object_changed(self, keys, module, result):
+ """
+ Check if my object is changed
+ :param keys: The keys that will determine if an object is changed
+ :param module: The module
+ :param result: The result from the query
+ :return:
+ """
+ for key in keys:
+ if module.params.get(key) != result[key]:
+ return True
+ return False
diff --git a/ansible_collections/community/general/plugins/module_utils/version.py b/ansible_collections/community/general/plugins/module_utils/version.py
new file mode 100644
index 000000000..369988197
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/version.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+from ansible.module_utils.six import raise_from
+
+try:
+ from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ try:
+ from distutils.version import LooseVersion # noqa: F401, pylint: disable=unused-import
+ except ImportError as exc:
+ msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present'
+ raise_from(ImportError(msg), exc)
diff --git a/ansible_collections/community/general/plugins/module_utils/vexata.py b/ansible_collections/community/general/plugins/module_utils/vexata.py
new file mode 100644
index 000000000..2ea56a3b0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/vexata.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Sandeep Kasargod <sandeep@vexata.com>
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+HAS_VEXATAPI = True
+try:
+ from vexatapi.vexata_api_proxy import VexataAPIProxy
+except ImportError:
+ HAS_VEXATAPI = False
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.basic import env_fallback
+
+VXOS_VERSION = None
+
+
+def get_version(iocs_json):
+ if not iocs_json:
+ raise Exception('Invalid IOC json')
+ active = filter(lambda x: x['mgmtRole'], iocs_json)
+ if not active:
+ raise Exception('Unable to detect active IOC')
+ active = active[0]
+ ver = active['swVersion']
+ if ver[0] != 'v':
+ raise Exception('Illegal version string')
+ ver = ver[1:ver.find('-')]
+ ver = map(int, ver.split('.'))
+ return tuple(ver)
+
+
+def get_array(module):
+ """Return storage array object or fail"""
+ global VXOS_VERSION
+ array = module.params['array']
+ user = module.params.get('user', None)
+ password = module.params.get('password', None)
+ validate = module.params.get('validate_certs')
+
+ if not HAS_VEXATAPI:
+ module.fail_json(msg='vexatapi library is required for this module. '
+ 'To install, use `pip install vexatapi`')
+
+ if user and password:
+ system = VexataAPIProxy(array, user, password, verify_cert=validate)
+ else:
+ module.fail_json(msg='The user/password are required to be passed in to '
+ 'the module as arguments or by setting the '
+ 'VEXATA_USER and VEXATA_PASSWORD environment variables.')
+ try:
+ if system.test_connection():
+ VXOS_VERSION = get_version(system.iocs())
+ return system
+ else:
+ module.fail_json(msg='Test connection to array failed.')
+ except Exception as e:
+ module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e)))
+
+
+def argument_spec():
+ """Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
+ return dict(
+ array=dict(type='str',
+ required=True),
+ user=dict(type='str',
+ fallback=(env_fallback, ['VEXATA_USER'])),
+ password=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['VEXATA_PASSWORD'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=False),
+ )
+
+
+def required_together():
+ """Return the default list used for the required_together argument to AnsibleModule"""
+ return [['user', 'password']]
+
+
+def size_to_MiB(size):
+ """Convert a '<integer>[MGT]' string to MiB, return -1 on error."""
+ quant = size[:-1]
+ exponent = size[-1]
+ if not quant.isdigit() or exponent not in 'MGT':
+ return -1
+ quant = int(quant)
+ if exponent == 'G':
+ quant <<= 10
+ elif exponent == 'T':
+ quant <<= 20
+ return quant
diff --git a/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py b/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py
new file mode 100644
index 000000000..d27e02d7b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/wdc_redfish_utils.py
@@ -0,0 +1,520 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022 Western Digital Corporation
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import datetime
+import re
+import time
+import tarfile
+
+from ansible.module_utils.urls import fetch_file
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+
+
+class WdcRedfishUtils(RedfishUtils):
+ """Extension to RedfishUtils to support WDC enclosures."""
+ # Status codes returned by WDC FW Update Status
+ UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE = 0
+ UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS = 1
+ UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION = 2
+ UPDATE_STATUS_CODE_FW_UPDATE_FAILED = 3
+
+ # Status messages returned by WDC FW Update Status
+ UPDATE_STATUS_MESSAGE_READY_FOR_FW_UDPATE = "Ready for FW update"
+ UDPATE_STATUS_MESSAGE_FW_UPDATE_IN_PROGRESS = "FW update in progress"
+ UPDATE_STATUS_MESSAGE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION = "FW update completed. Waiting for activation."
+ UPDATE_STATUS_MESSAGE_FW_UPDATE_FAILED = "FW update failed."
+
+ # Dict keys for resource bodies
+ # Standard keys
+ ACTIONS = "Actions"
+ OEM = "Oem"
+ WDC = "WDC"
+ TARGET = "target"
+
+ # Keys for specific operations
+ CHASSIS_LOCATE = "#Chassis.Locate"
+ CHASSIS_POWER_MODE = "#Chassis.PowerMode"
+
+ def __init__(self,
+ creds,
+ root_uris,
+ timeout,
+ module,
+ resource_id,
+ data_modification):
+ super(WdcRedfishUtils, self).__init__(creds=creds,
+ root_uri=root_uris[0],
+ timeout=timeout,
+ module=module,
+ resource_id=resource_id,
+ data_modification=data_modification)
+ # Update the root URI if we cannot perform a Redfish GET to the first one
+ self._set_root_uri(root_uris)
+
+ def _set_root_uri(self, root_uris):
+ """Set the root URI from a list of options.
+
+ If the current root URI is good, just keep it. Else cycle through our options until we find a good one.
+ A URI is considered good if we can GET uri/redfish/v1.
+ """
+ for root_uri in root_uris:
+ uri = root_uri + "/redfish/v1"
+ response = self.get_request(uri)
+ if response['ret']:
+ self.root_uri = root_uri
+ break
+
+ def _find_updateservice_resource(self):
+ """Find the update service resource as well as additional WDC-specific resources."""
+ response = super(WdcRedfishUtils, self)._find_updateservice_resource()
+ if not response['ret']:
+ return response
+ return self._find_updateservice_additional_uris()
+
+ def _is_enclosure_multi_tenant(self):
+ """Determine if the enclosure is multi-tenant.
+
+ The serial number of a multi-tenant enclosure will end in "-A" or "-B".
+
+ :return: True/False if the enclosure is multi-tenant or not; None if unable to determine.
+ """
+ response = self.get_request(self.root_uri + self.service_root + "Chassis/Enclosure")
+ if response['ret'] is False:
+ return None
+ pattern = r".*-[A,B]"
+ data = response['data']
+ return re.match(pattern, data['SerialNumber']) is not None
+
+ def _find_updateservice_additional_uris(self):
+ """Find & set WDC-specific update service URIs"""
+ response = self.get_request(self.root_uri + self._update_uri())
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Actions' not in data:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ if '#UpdateService.SimpleUpdate' not in data['Actions']:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ action = data['Actions']['#UpdateService.SimpleUpdate']
+ if 'target' not in action:
+ return {'ret': False, 'msg': 'Service does not support SimpleUpdate'}
+ self.simple_update_uri = action['target']
+
+ # Simple update status URI is not provided via GET /redfish/v1/UpdateService
+ # So we have to hard code it.
+ self.simple_update_status_uri = "{0}/Status".format(self.simple_update_uri)
+
+ # FWActivate URI
+ if 'Oem' not in data['Actions']:
+ return {'ret': False, 'msg': 'Service does not support OEM operations'}
+ if 'WDC' not in data['Actions']['Oem']:
+ return {'ret': False, 'msg': 'Service does not support WDC operations'}
+ if '#UpdateService.FWActivate' not in data['Actions']['Oem']['WDC']:
+ return {'ret': False, 'msg': 'Service does not support FWActivate'}
+ action = data['Actions']['Oem']['WDC']['#UpdateService.FWActivate']
+ if 'target' not in action:
+ return {'ret': False, 'msg': 'Service does not support FWActivate'}
+ self.firmware_activate_uri = action['target']
+ return {'ret': True}
+
+ def _simple_update_status_uri(self):
+ return self.simple_update_status_uri
+
+ def _firmware_activate_uri(self):
+ return self.firmware_activate_uri
+
+ def _update_uri(self):
+ return self.update_uri
+
+ def get_simple_update_status(self):
+ """Issue Redfish HTTP GET to return the simple update status"""
+ result = {}
+ response = self.get_request(self.root_uri + self._simple_update_status_uri())
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ result['entries'] = data
+ return result
+
+ def firmware_activate(self, update_opts):
+ """Perform FWActivate using Redfish HTTP API."""
+ creds = update_opts.get('update_creds')
+ payload = {}
+ if creds:
+ if creds.get('username'):
+ payload["Username"] = creds.get('username')
+ if creds.get('password'):
+ payload["Password"] = creds.get('password')
+
+ # Make sure the service supports FWActivate
+ response = self.get_request(self.root_uri + self._update_uri())
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'Actions' not in data:
+ return {'ret': False, 'msg': 'Service does not support FWActivate'}
+
+ response = self.post_request(self.root_uri + self._firmware_activate_uri(), payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "FWActivate requested"}
+
+ def _get_bundle_version(self,
+ bundle_uri):
+ """Get the firmware version from a bundle file, and whether or not it is multi-tenant.
+
+ Only supports HTTP at this time. Assumes URI exists and is a tarfile.
+ Looks for a file oobm-[version].pkg, such as 'oobm-4.0.13.pkg`. Extracts the version number
+ from that filename (in the above example, the version number is "4.0.13".
+
+ To determine if the bundle is multi-tenant or not, it looks inside the .bin file within the tarfile,
+ and checks the appropriate byte in the file.
+
+ :param str bundle_uri: HTTP URI of the firmware bundle.
+ :return: Firmware version number contained in the bundle, and whether or not the bundle is multi-tenant.
+ Either value will be None if unable to deterine.
+ :rtype: str or None, bool or None
+ """
+ bundle_temp_filename = fetch_file(module=self.module,
+ url=bundle_uri)
+ if not tarfile.is_tarfile(bundle_temp_filename):
+ return None, None
+ tf = tarfile.open(bundle_temp_filename)
+ pattern_pkg = r"oobm-(.+)\.pkg"
+ pattern_bin = r"(.*\.bin)"
+ bundle_version = None
+ is_multi_tenant = None
+ for filename in tf.getnames():
+ match_pkg = re.match(pattern_pkg, filename)
+ if match_pkg is not None:
+ bundle_version = match_pkg.group(1)
+ match_bin = re.match(pattern_bin, filename)
+ if match_bin is not None:
+ bin_filename = match_bin.group(1)
+ bin_file = tf.extractfile(bin_filename)
+ bin_file.seek(11)
+ byte_11 = bin_file.read(1)
+ is_multi_tenant = byte_11 == b'\x80'
+
+ return bundle_version, is_multi_tenant
+
+ @staticmethod
+ def uri_is_http(uri):
+ """Return True if the specified URI is http or https.
+
+ :param str uri: A URI.
+ :return: True if the URI is http or https, else False
+ :rtype: bool
+ """
+ parsed_bundle_uri = urlparse(uri)
+ return parsed_bundle_uri.scheme.lower() in ['http', 'https']
+
+ def update_and_activate(self, update_opts):
+ """Update and activate the firmware in a single action.
+
+ Orchestrates the firmware update so that everything can be done in a single command.
+ Compares the update version with the already-installed version -- skips update if they are the same.
+ Performs retries, handles timeouts as needed.
+
+ """
+ # Convert credentials to standard HTTP format
+ if update_opts.get("update_creds") is not None and "username" in update_opts["update_creds"] and "password" in update_opts["update_creds"]:
+ update_creds = update_opts["update_creds"]
+ parsed_url = urlparse(update_opts["update_image_uri"])
+ if update_creds:
+ original_netloc = parsed_url.netloc
+ parsed_url = parsed_url._replace(netloc="{0}:{1}@{2}".format(update_creds.get("username"),
+ update_creds.get("password"),
+ original_netloc))
+ update_opts["update_image_uri"] = urlunparse(parsed_url)
+ del update_opts["update_creds"]
+
+ # Make sure bundle URI is HTTP(s)
+ bundle_uri = update_opts["update_image_uri"]
+
+ if not self.uri_is_http(bundle_uri):
+ return {
+ 'ret': False,
+ 'msg': 'Bundle URI must be HTTP or HTTPS'
+ }
+ # Make sure IOM is ready for update
+ result = self.get_simple_update_status()
+ if result['ret'] is False:
+ return result
+ update_status = result['entries']
+ status_code = update_status['StatusCode']
+ status_description = update_status['Description']
+ if status_code not in [
+ self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE,
+ self.UPDATE_STATUS_CODE_FW_UPDATE_FAILED
+ ]:
+ return {
+ 'ret': False,
+ 'msg': 'Target is not ready for FW update. Current status: {0} ({1})'.format(
+ status_code, status_description
+ )}
+
+ # Check the FW version in the bundle file, and compare it to what is already on the IOMs
+
+ # Bundle version number
+ bundle_firmware_version, is_bundle_multi_tenant = self._get_bundle_version(bundle_uri)
+ if bundle_firmware_version is None or is_bundle_multi_tenant is None:
+ return {
+ 'ret': False,
+ 'msg': 'Unable to extract bundle version or multi-tenant status from update image tarfile'
+ }
+
+ # Verify that the bundle is correctly multi-tenant or not
+ is_enclosure_multi_tenant = self._is_enclosure_multi_tenant()
+ if is_enclosure_multi_tenant != is_bundle_multi_tenant:
+ return {
+ 'ret': False,
+ 'msg': 'Enclosure multi-tenant is {0} but bundle multi-tenant is {1}'.format(
+ is_enclosure_multi_tenant,
+ is_bundle_multi_tenant,
+ )
+ }
+
+ # Version number installed on IOMs
+ firmware_inventory = self.get_firmware_inventory()
+ if not firmware_inventory["ret"]:
+ return firmware_inventory
+ firmware_inventory_dict = {}
+ for entry in firmware_inventory["entries"]:
+ firmware_inventory_dict[entry["Id"]] = entry
+ iom_a_firmware_version = firmware_inventory_dict.get("IOModuleA_OOBM", {}).get("Version")
+ iom_b_firmware_version = firmware_inventory_dict.get("IOModuleB_OOBM", {}).get("Version")
+ # If version is None, we will proceed with the update, because we cannot tell
+ # for sure that we have a full version match.
+ if is_enclosure_multi_tenant:
+ # For multi-tenant, only one of the IOMs will be affected by the firmware update,
+ # so see if that IOM already has the same firmware version as the bundle.
+ firmware_already_installed = bundle_firmware_version == self._get_installed_firmware_version_of_multi_tenant_system(
+ iom_a_firmware_version,
+ iom_b_firmware_version)
+ else:
+ # For single-tenant, see if both IOMs already have the same firmware version as the bundle.
+ firmware_already_installed = bundle_firmware_version == iom_a_firmware_version == iom_b_firmware_version
+ # If this FW already installed, return changed: False, and do not update the firmware.
+ if firmware_already_installed:
+ return {
+ 'ret': True,
+ 'changed': False,
+ 'msg': 'Version {0} already installed'.format(bundle_firmware_version)
+ }
+
+ # Version numbers don't match the bundle -- proceed with update (unless we are in check mode)
+ if self.module.check_mode:
+ return {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'Update not performed in check mode.'
+ }
+ update_successful = False
+ retry_interval_seconds = 5
+ max_number_of_retries = 5
+ retry_number = 0
+ while retry_number < max_number_of_retries and not update_successful:
+ if retry_number != 0:
+ time.sleep(retry_interval_seconds)
+ retry_number += 1
+
+ result = self.simple_update(update_opts)
+ if result['ret'] is not True:
+ # Sometimes a timeout error is returned even though the update actually was requested.
+ # Check the update status to see if the update is in progress.
+ status_result = self.get_simple_update_status()
+ if status_result['ret'] is False:
+ continue
+ update_status = status_result['entries']
+ status_code = update_status['StatusCode']
+ if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS:
+ # Update is not in progress -- retry until max number of retries
+ continue
+ else:
+ update_successful = True
+ else:
+ update_successful = True
+ if not update_successful:
+ # Unable to get SimpleUpdate to work. Return the failure from the SimpleUpdate
+ return result
+
+ # Wait for "ready to activate"
+ max_wait_minutes = 30
+ polling_interval_seconds = 30
+ status_code = self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE
+ start_time = datetime.datetime.now()
+ # For a short time, target will still say "ready for firmware update" before it transitions
+ # to "update in progress"
+ status_codes_for_update_incomplete = [
+ self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS,
+ self.UPDATE_STATUS_CODE_READY_FOR_FW_UPDATE
+ ]
+ iteration = 0
+ while status_code in status_codes_for_update_incomplete \
+ and datetime.datetime.now() - start_time < datetime.timedelta(minutes=max_wait_minutes):
+ if iteration != 0:
+ time.sleep(polling_interval_seconds)
+ iteration += 1
+ result = self.get_simple_update_status()
+ if result['ret'] is False:
+ continue # We may get timeouts, just keep trying until we give up
+ update_status = result['entries']
+ status_code = update_status['StatusCode']
+ status_description = update_status['Description']
+ if status_code == self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS:
+ # Once it says update in progress, "ready for update" is no longer a valid status code
+ status_codes_for_update_incomplete = [self.UPDATE_STATUS_CODE_FW_UPDATE_IN_PROGRESS]
+
+ # Update no longer in progress -- verify that it finished
+ if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION:
+ return {
+ 'ret': False,
+ 'msg': 'Target is not ready for FW activation after update. Current status: {0} ({1})'.format(
+ status_code, status_description
+ )}
+
+ self.firmware_activate(update_opts)
+ return {'ret': True, 'changed': True,
+ 'msg': "Firmware updated and activation initiated."}
+
+ def _get_installed_firmware_version_of_multi_tenant_system(self,
+ iom_a_firmware_version,
+ iom_b_firmware_version):
+ """Return the version for the active IOM on a multi-tenant system.
+
+ Only call this on a multi-tenant system.
+ Given the installed firmware versions for IOM A, B, this method will determine which IOM is active
+ for this tenanat, and return that IOM's firmware version.
+ """
+ # To determine which IOM we are on, try to GET each IOM resource
+ # The one we are on will return valid data.
+ # The other will return an error with message "IOM Module A/B cannot be read"
+ which_iom_is_this = None
+ for iom_letter in ['A', 'B']:
+ iom_uri = "Chassis/IOModule{0}FRU".format(iom_letter)
+ response = self.get_request(self.root_uri + self.service_root + iom_uri)
+ if response['ret'] is False:
+ continue
+ data = response['data']
+ if "Id" in data: # Assume if there is an "Id", it is valid
+ which_iom_is_this = iom_letter
+ break
+ if which_iom_is_this == 'A':
+ return iom_a_firmware_version
+ elif which_iom_is_this == 'B':
+ return iom_b_firmware_version
+ else:
+ return None
+
+ @staticmethod
+ def _get_led_locate_uri(data):
+ """Get the LED locate URI given a resource body."""
+ if WdcRedfishUtils.ACTIONS not in data:
+ return None
+ if WdcRedfishUtils.OEM not in data[WdcRedfishUtils.ACTIONS]:
+ return None
+ if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]:
+ return None
+ if WdcRedfishUtils.CHASSIS_LOCATE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]:
+ return None
+ if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE]:
+ return None
+ return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_LOCATE][WdcRedfishUtils.TARGET]
+
+ @staticmethod
+ def _get_power_mode_uri(data):
+ """Get the Power Mode URI given a resource body."""
+ if WdcRedfishUtils.ACTIONS not in data:
+ return None
+ if WdcRedfishUtils.OEM not in data[WdcRedfishUtils.ACTIONS]:
+ return None
+ if WdcRedfishUtils.WDC not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM]:
+ return None
+ if WdcRedfishUtils.CHASSIS_POWER_MODE not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]:
+ return None
+ if WdcRedfishUtils.TARGET not in data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE]:
+ return None
+ return data[WdcRedfishUtils.ACTIONS][WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][WdcRedfishUtils.CHASSIS_POWER_MODE][WdcRedfishUtils.TARGET]
+
+ def manage_indicator_led(self, command, resource_uri):
+ key = 'IndicatorLED'
+
+ payloads = {'IndicatorLedOn': 'On', 'IndicatorLedOff': 'Off'}
+ current_led_status_map = {'IndicatorLedOn': 'Blinking', 'IndicatorLedOff': 'Off'}
+
+ result = {}
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+ current_led_status = data[key]
+ if current_led_status == current_led_status_map[command]:
+ return {'ret': True, 'changed': False}
+
+ led_locate_uri = self._get_led_locate_uri(data)
+ if led_locate_uri is None:
+ return {'ret': False, 'msg': 'LED locate URI not found.'}
+
+ if command in payloads.keys():
+ payload = {'LocateState': payloads[command]}
+ response = self.post_request(self.root_uri + led_locate_uri, payload)
+ if response['ret'] is False:
+ return response
+ else:
+ return {'ret': False, 'msg': 'Invalid command'}
+
+ return result
+
+ def manage_chassis_power_mode(self, command):
+ return self.manage_power_mode(command, self.chassis_uri)
+
+ def manage_power_mode(self, command, resource_uri=None):
+ if resource_uri is None:
+ resource_uri = self.chassis_uri
+
+ payloads = {'PowerModeNormal': 'Normal', 'PowerModeLow': 'Low'}
+ requested_power_mode = payloads[command]
+
+ result = {}
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ # Make sure the response includes Oem.WDC.PowerMode, and get current power mode
+ power_mode = 'PowerMode'
+ if WdcRedfishUtils.OEM not in data or WdcRedfishUtils.WDC not in data[WdcRedfishUtils.OEM] or\
+ power_mode not in data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC]:
+ return {'ret': False, 'msg': 'Resource does not support Oem.WDC.PowerMode'}
+ current_power_mode = data[WdcRedfishUtils.OEM][WdcRedfishUtils.WDC][power_mode]
+ if current_power_mode == requested_power_mode:
+ return {'ret': True, 'changed': False}
+
+ power_mode_uri = self._get_power_mode_uri(data)
+ if power_mode_uri is None:
+ return {'ret': False, 'msg': 'Power Mode URI not found.'}
+
+ if command in payloads.keys():
+ payload = {'PowerMode': payloads[command]}
+ response = self.post_request(self.root_uri + power_mode_uri, payload)
+ if response['ret'] is False:
+ return response
+ else:
+ return {'ret': False, 'msg': 'Invalid command'}
+
+ return result
diff --git a/ansible_collections/community/general/plugins/module_utils/xenserver.py b/ansible_collections/community/general/plugins/module_utils/xenserver.py
new file mode 100644
index 000000000..3176b5628
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/xenserver.py
@@ -0,0 +1,862 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import atexit
+import time
+import re
+import traceback
+
+XENAPI_IMP_ERR = None
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ HAS_XENAPI = False
+ XENAPI_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import env_fallback, missing_required_lib
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+
+def xenserver_common_argument_spec():
+ return dict(
+ hostname=dict(type='str',
+ aliases=['host', 'pool'],
+ required=False,
+ default='localhost',
+ fallback=(env_fallback, ['XENSERVER_HOST']),
+ ),
+ username=dict(type='str',
+ aliases=['user', 'admin'],
+ required=False,
+ default='root',
+ fallback=(env_fallback, ['XENSERVER_USER'])),
+ password=dict(type='str',
+ aliases=['pass', 'pwd'],
+ required=False,
+ no_log=True,
+ fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
+ validate_certs=dict(type='bool',
+ required=False,
+ default=True,
+ fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
+ )
+
+
+def xapi_to_module_vm_power_state(power_state):
+ """Maps XAPI VM power states to module VM power states."""
+ module_power_state_map = {
+ "running": "poweredon",
+ "halted": "poweredoff",
+ "suspended": "suspended",
+ "paused": "paused"
+ }
+
+ return module_power_state_map.get(power_state)
+
+
+def module_to_xapi_vm_power_state(power_state):
+ """Maps module VM power states to XAPI VM power states."""
+ vm_power_state_map = {
+ "poweredon": "running",
+ "poweredoff": "halted",
+ "restarted": "running",
+ "suspended": "suspended",
+ "shutdownguest": "halted",
+ "rebootguest": "running",
+ }
+
+ return vm_power_state_map.get(power_state)
+
+
+def is_valid_ip_addr(ip_addr):
+ """Validates given string as IPv4 address for given string.
+
+ Args:
+ ip_addr (str): string to validate as IPv4 address.
+
+ Returns:
+ bool: True if string is valid IPv4 address, else False.
+ """
+ ip_addr_split = ip_addr.split('.')
+
+ if len(ip_addr_split) != 4:
+ return False
+
+ for ip_addr_octet in ip_addr_split:
+ if not ip_addr_octet.isdigit():
+ return False
+
+ ip_addr_octet_int = int(ip_addr_octet)
+
+ if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
+ return False
+
+ return True
+
+
+def is_valid_ip_netmask(ip_netmask):
+ """Validates given string as IPv4 netmask.
+
+ Args:
+ ip_netmask (str): string to validate as IPv4 netmask.
+
+ Returns:
+ bool: True if string is valid IPv4 netmask, else False.
+ """
+ ip_netmask_split = ip_netmask.split('.')
+
+ if len(ip_netmask_split) != 4:
+ return False
+
+ valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
+
+ for ip_netmask_octet in ip_netmask_split:
+ if ip_netmask_octet not in valid_octet_values:
+ return False
+
+ if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
+ return False
+ elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
+ return False
+ elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
+ return False
+
+ return True
+
+
+def is_valid_ip_prefix(ip_prefix):
+ """Validates given string as IPv4 prefix.
+
+ Args:
+ ip_prefix (str): string to validate as IPv4 prefix.
+
+ Returns:
+ bool: True if string is valid IPv4 prefix, else False.
+ """
+ if not ip_prefix.isdigit():
+ return False
+
+ ip_prefix_int = int(ip_prefix)
+
+ if ip_prefix_int < 0 or ip_prefix_int > 32:
+ return False
+
+ return True
+
+
+def ip_prefix_to_netmask(ip_prefix, skip_check=False):
+ """Converts IPv4 prefix to netmask.
+
+ Args:
+ ip_prefix (str): IPv4 prefix to convert.
+ skip_check (bool): Skip validation of IPv4 prefix
+ (default: False). Use if you are sure IPv4 prefix is valid.
+
+ Returns:
+ str: IPv4 netmask equivalent to given IPv4 prefix if
+ IPv4 prefix is valid, else an empty string.
+ """
+ if skip_check:
+ ip_prefix_valid = True
+ else:
+ ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
+
+ if ip_prefix_valid:
+ return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
+ else:
+ return ""
+
+
+def ip_netmask_to_prefix(ip_netmask, skip_check=False):
+ """Converts IPv4 netmask to prefix.
+
+ Args:
+ ip_netmask (str): IPv4 netmask to convert.
+ skip_check (bool): Skip validation of IPv4 netmask
+ (default: False). Use if you are sure IPv4 netmask is valid.
+
+ Returns:
+ str: IPv4 prefix equivalent to given IPv4 netmask if
+ IPv4 netmask is valid, else an empty string.
+ """
+ if skip_check:
+ ip_netmask_valid = True
+ else:
+ ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
+
+ if ip_netmask_valid:
+ return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
+ else:
+ return ""
+
+
+def is_valid_ip6_addr(ip6_addr):
+ """Validates given string as IPv6 address.
+
+ Args:
+ ip6_addr (str): string to validate as IPv6 address.
+
+ Returns:
+ bool: True if string is valid IPv6 address, else False.
+ """
+ ip6_addr = ip6_addr.lower()
+ ip6_addr_split = ip6_addr.split(':')
+
+ if ip6_addr_split[0] == "":
+ ip6_addr_split.pop(0)
+
+ if ip6_addr_split[-1] == "":
+ ip6_addr_split.pop(-1)
+
+ if len(ip6_addr_split) > 8:
+ return False
+
+ if ip6_addr_split.count("") > 1:
+ return False
+ elif ip6_addr_split.count("") == 1:
+ ip6_addr_split.remove("")
+ else:
+ if len(ip6_addr_split) != 8:
+ return False
+
+ ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
+
+ for ip6_addr_hextet in ip6_addr_split:
+ if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
+ return False
+
+ return True
+
+
+def is_valid_ip6_prefix(ip6_prefix):
+ """Validates given string as IPv6 prefix.
+
+ Args:
+ ip6_prefix (str): string to validate as IPv6 prefix.
+
+ Returns:
+ bool: True if string is valid IPv6 prefix, else False.
+ """
+ if not ip6_prefix.isdigit():
+ return False
+
+ ip6_prefix_int = int(ip6_prefix)
+
+ if ip6_prefix_int < 0 or ip6_prefix_int > 128:
+ return False
+
+ return True
+
+
+def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
+ """Finds and returns a reference to arbitrary XAPI object.
+
+ An object is searched by using either name (name_label) or UUID
+ with UUID taken precedence over name.
+
+ Args:
+ module: Reference to Ansible module object.
+ name (str): Name (name_label) of an object to search for.
+ uuid (str): UUID of an object to search for.
+ obj_type (str): Any valid XAPI object type. See XAPI docs.
+ fail (bool): Should function fail with error message if object
+ is not found or exit silently (default: True). The function
+ always fails if multiple objects with same name are found.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ XAPI reference to found object or None if object is not found
+ and fail=False.
+ """
+ xapi_session = XAPI.connect(module)
+
+ if obj_type in ["template", "snapshot"]:
+ real_obj_type = "VM"
+ elif obj_type == "home server":
+ real_obj_type = "host"
+ elif obj_type == "ISO image":
+ real_obj_type = "VDI"
+ else:
+ real_obj_type = obj_type
+
+ obj_ref = None
+
+ # UUID has precedence over name.
+ if uuid:
+ try:
+ # Find object by UUID. If no object is found using given UUID,
+ # an exception will be generated.
+ obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
+ except XenAPI.Failure as f:
+ if fail:
+ module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
+ elif name:
+ try:
+ # Find object by name (name_label).
+ obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ # If obj_ref_list is empty.
+ if not obj_ref_list:
+ if fail:
+ module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
+ # If obj_ref_list contains multiple object references.
+ elif len(obj_ref_list) > 1:
+ module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
+ # The obj_ref_list contains only one object reference.
+ else:
+ obj_ref = obj_ref_list[0]
+ else:
+ module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
+
+ return obj_ref
+
+
+def gather_vm_params(module, vm_ref):
+ """Gathers all VM parameters available in XAPI database.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+
+ Returns:
+ dict: VM parameters.
+ """
+ # We silently return empty vm_params if bad vm_ref was supplied.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ return {}
+
+ xapi_session = XAPI.connect(module)
+
+ try:
+ vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
+
+ # We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
+
+ # Affinity.
+ if vm_params['affinity'] != "OpaqueRef:NULL":
+ vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
+ vm_params['affinity'] = vm_affinity
+ else:
+ vm_params['affinity'] = {}
+
+ # VBDs.
+ vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
+
+ # List of VBDs is usually sorted by userdevice but we sort just
+ # in case. We need this list sorted by userdevice so that we can
+ # make positional pairing with module.params['disks'].
+ vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
+ vm_params['VBDs'] = vm_vbd_params_list
+
+ # VDIs.
+ for vm_vbd_params in vm_params['VBDs']:
+ if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
+ vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
+ else:
+ vm_vdi_params = {}
+
+ vm_vbd_params['VDI'] = vm_vdi_params
+
+ # VIFs.
+ vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
+
+ # List of VIFs is usually sorted by device but we sort just
+ # in case. We need this list sorted by device so that we can
+ # make positional pairing with module.params['networks'].
+ vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
+ vm_params['VIFs'] = vm_vif_params_list
+
+ # Networks.
+ for vm_vif_params in vm_params['VIFs']:
+ if vm_vif_params['network'] != "OpaqueRef:NULL":
+ vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
+ else:
+ vm_network_params = {}
+
+ vm_vif_params['network'] = vm_network_params
+
+ # Guest metrics.
+ if vm_params['guest_metrics'] != "OpaqueRef:NULL":
+ vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
+ vm_params['guest_metrics'] = vm_guest_metrics
+ else:
+ vm_params['guest_metrics'] = {}
+
+ # Detect customization agent.
+ xenserver_version = get_xenserver_version(module)
+
+ if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
+ "feature-static-ip-setting" in vm_params['guest_metrics']['other']):
+ vm_params['customization_agent'] = "native"
+ else:
+ vm_params['customization_agent'] = "custom"
+
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return vm_params
+
+
+def gather_vm_facts(module, vm_params):
+ """Gathers VM facts.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+
+ Returns:
+ dict: VM facts.
+ """
+ # We silently return empty vm_facts if no vm_params are available.
+ if not vm_params:
+ return {}
+
+ xapi_session = XAPI.connect(module)
+
+ # Gather facts.
+ vm_facts = {
+ "state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
+ "name": vm_params['name_label'],
+ "name_desc": vm_params['name_description'],
+ "uuid": vm_params['uuid'],
+ "is_template": vm_params['is_a_template'],
+ "folder": vm_params['other_config'].get('folder', ''),
+ "hardware": {
+ "num_cpus": int(vm_params['VCPUs_max']),
+ "num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
+ "memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
+ },
+ "disks": [],
+ "cdrom": {},
+ "networks": [],
+ "home_server": vm_params['affinity'].get('name_label', ''),
+ "domid": vm_params['domid'],
+ "platform": vm_params['platform'],
+ "other_config": vm_params['other_config'],
+ "xenstore_data": vm_params['xenstore_data'],
+ "customization_agent": vm_params['customization_agent'],
+ }
+
+ for vm_vbd_params in vm_params['VBDs']:
+ if vm_vbd_params['type'] == "Disk":
+ vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
+
+ vm_disk_params = {
+ "size": int(vm_vbd_params['VDI']['virtual_size']),
+ "name": vm_vbd_params['VDI']['name_label'],
+ "name_desc": vm_vbd_params['VDI']['name_description'],
+ "sr": vm_disk_sr_params['name_label'],
+ "sr_uuid": vm_disk_sr_params['uuid'],
+ "os_device": vm_vbd_params['device'],
+ "vbd_userdevice": vm_vbd_params['userdevice'],
+ }
+
+ vm_facts['disks'].append(vm_disk_params)
+ elif vm_vbd_params['type'] == "CD":
+ if vm_vbd_params['empty']:
+ vm_facts['cdrom'].update(type="none")
+ else:
+ vm_facts['cdrom'].update(type="iso")
+ vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
+
+ for vm_vif_params in vm_params['VIFs']:
+ vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
+
+ vm_network_params = {
+ "name": vm_vif_params['network']['name_label'],
+ "mac": vm_vif_params['MAC'],
+ "vif_device": vm_vif_params['device'],
+ "mtu": vm_vif_params['MTU'],
+ "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
+ "prefix": "",
+ "netmask": "",
+ "gateway": "",
+ "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
+ vm_vif_params['device'])],
+ "prefix6": "",
+ "gateway6": "",
+ }
+
+ if vm_params['customization_agent'] == "native":
+ if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
+
+ vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
+
+ if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
+
+ vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
+
+ elif vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = vm_params['xenstore_data']
+
+ for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
+ vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
+
+ vm_facts['networks'].append(vm_network_params)
+
+ return vm_facts
+
+
+def set_vm_power_state(module, vm_ref, power_state, timeout=300):
+ """Controls VM power state.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+ power_state (str): Power state to put VM into. Accepted values:
+
+ - poweredon
+ - poweredoff
+ - restarted
+ - suspended
+ - shutdownguest
+ - rebootguest
+
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ tuple (bool, str): Bool element is True if VM power state has
+ changed by calling this function, else False. Str element carries
+ a value of resulting power state as defined by XAPI - 'running',
+ 'halted' or 'suspended'.
+ """
+ # Fail if we don't have a valid VM reference.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ power_state = power_state.replace('_', '').replace('-', '').lower()
+ vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
+
+ state_changed = False
+
+ try:
+ # Get current state of the VM.
+ vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
+
+ if vm_power_state_current != power_state:
+ if power_state == "poweredon":
+ if not module.check_mode:
+ # VM can be in either halted, suspended, paused or running state.
+ # For VM to be in running state, start has to be called on halted,
+ # resume on suspended and unpause on paused VM.
+ if vm_power_state_current == "poweredoff":
+ xapi_session.xenapi.VM.start(vm_ref, False, False)
+ elif vm_power_state_current == "suspended":
+ xapi_session.xenapi.VM.resume(vm_ref, False, False)
+ elif vm_power_state_current == "paused":
+ xapi_session.xenapi.VM.unpause(vm_ref)
+ elif power_state == "poweredoff":
+ if not module.check_mode:
+ # hard_shutdown will halt VM regardless of current state.
+ xapi_session.xenapi.VM.hard_shutdown(vm_ref)
+ elif power_state == "restarted":
+ # hard_reboot will restart VM only if VM is in paused or running state.
+ if vm_power_state_current in ["paused", "poweredon"]:
+ if not module.check_mode:
+ xapi_session.xenapi.VM.hard_reboot(vm_ref)
+ else:
+ module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
+ elif power_state == "suspended":
+ # running state is required for suspend.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ xapi_session.xenapi.VM.suspend(vm_ref)
+ else:
+ module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
+ elif power_state == "shutdownguest":
+ # running state is required for guest shutdown.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ if timeout == 0:
+ xapi_session.xenapi.VM.clean_shutdown(vm_ref)
+ else:
+ task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
+ task_result = wait_for_task(module, task_ref, timeout)
+
+ if task_result:
+ module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
+ else:
+ module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
+ elif power_state == "rebootguest":
+ # running state is required for guest reboot.
+ if vm_power_state_current == "poweredon":
+ if not module.check_mode:
+ if timeout == 0:
+ xapi_session.xenapi.VM.clean_reboot(vm_ref)
+ else:
+ task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
+ task_result = wait_for_task(module, task_ref, timeout)
+
+ if task_result:
+ module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
+ else:
+ module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
+ else:
+ module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
+
+ state_changed = True
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return (state_changed, vm_power_state_resulting)
+
+
+def wait_for_task(module, task_ref, timeout=300):
+ """Waits for async XAPI task to finish.
+
+ Args:
+ module: Reference to Ansible module object.
+ task_ref (str): XAPI reference to task.
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ str: failure message on failure, else an empty string.
+ """
+ # Fail if we don't have a valid task reference.
+ if not task_ref or task_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ interval = 2
+
+ result = ""
+
+ # If we have to wait indefinitely, make time_left larger than 0 so we can
+ # enter while loop.
+ if timeout == 0:
+ time_left = 1
+ else:
+ time_left = timeout
+
+ try:
+ while time_left > 0:
+ task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
+
+ if task_status == "pending":
+ # Task is still running.
+ time.sleep(interval)
+
+ # We decrease time_left only if we don't wait indefinitely.
+ if timeout != 0:
+ time_left -= interval
+
+ continue
+ elif task_status == "success":
+ # Task is done.
+ break
+ else:
+ # Task failed.
+ result = task_status
+ break
+ else:
+ # We timed out.
+ result = "timeout"
+
+ xapi_session.xenapi.task.destroy(task_ref)
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return result
+
+
+def wait_for_vm_ip_address(module, vm_ref, timeout=300):
+ """Waits for VM to acquire an IP address.
+
+ Args:
+ module: Reference to Ansible module object.
+ vm_ref (str): XAPI reference to VM.
+ timeout (int): timeout in seconds (default: 300).
+
+ Returns:
+ dict: VM guest metrics as retrieved by
+ VM_guest_metrics.get_record() XAPI method with info
+ on IP address acquired.
+ """
+ # Fail if we don't have a valid VM reference.
+ if not vm_ref or vm_ref == "OpaqueRef:NULL":
+ module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
+
+ xapi_session = XAPI.connect(module)
+
+ vm_guest_metrics = {}
+
+ try:
+ # We translate VM power state string so that error message can be
+ # consistent with module VM power states.
+ vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
+
+ if vm_power_state != 'poweredon':
+ module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
+
+ interval = 2
+
+ # If we have to wait indefinitely, make time_left larger than 0 so we can
+ # enter while loop.
+ if timeout == 0:
+ time_left = 1
+ else:
+ time_left = timeout
+
+ while time_left > 0:
+ vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
+
+ if vm_guest_metrics_ref != "OpaqueRef:NULL":
+ vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
+ vm_ips = vm_guest_metrics['networks']
+
+ if "0/ip" in vm_ips:
+ break
+
+ time.sleep(interval)
+
+ # We decrease time_left only if we don't wait indefinitely.
+ if timeout != 0:
+ time_left -= interval
+ else:
+ # We timed out.
+ module.fail_json(msg="Timed out waiting for VM IP address!")
+
+ except XenAPI.Failure as f:
+ module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return vm_guest_metrics
+
+
+def get_xenserver_version(module):
+ """Returns XenServer version.
+
+ Args:
+ module: Reference to Ansible module object.
+
+ Returns:
+ list: Element [0] is major version. Element [1] is minor version.
+ Element [2] is update number.
+ """
+ xapi_session = XAPI.connect(module)
+
+ host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
+
+ try:
+ xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
+ except ValueError:
+ xenserver_version = [0, 0, 0]
+
+ return xenserver_version
+
+
+class XAPI(object):
+ """Class for XAPI session management."""
+ _xapi_session = None
+
+ @classmethod
+ def connect(cls, module, disconnect_atexit=True):
+ """Establishes XAPI connection and returns session reference.
+
+ If no existing session is available, establishes a new one
+ and returns it, else returns existing one.
+
+ Args:
+ module: Reference to Ansible module object.
+ disconnect_atexit (bool): Controls if method should
+ register atexit handler to disconnect from XenServer
+ on module exit (default: True).
+
+ Returns:
+ XAPI session reference.
+ """
+ if cls._xapi_session is not None:
+ return cls._xapi_session
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+ ignore_ssl = not module.params['validate_certs']
+
+ if hostname == 'localhost':
+ cls._xapi_session = XenAPI.xapi_local()
+ username = ''
+ password = ''
+ else:
+ # If scheme is not specified we default to http:// because https://
+ # is problematic in most setups.
+ if not hostname.startswith("http://") and not hostname.startswith("https://"):
+ hostname = "http://%s" % hostname
+
+ try:
+ # ignore_ssl is supported in XenAPI library from XenServer 7.2
+ # SDK onward but there is no way to tell which version we
+ # are using. TypeError will be raised if ignore_ssl is not
+ # supported. Additionally, ignore_ssl requires Python 2.7.9
+ # or newer.
+ cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
+ except TypeError:
+ # Try without ignore_ssl.
+ cls._xapi_session = XenAPI.Session(hostname)
+
+ if not password:
+ password = ''
+
+ try:
+ cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
+ except XenAPI.Failure as f:
+ module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
+
+ # Disabling atexit should be used in special cases only.
+ if disconnect_atexit:
+ atexit.register(cls._xapi_session.logout)
+
+ return cls._xapi_session
+
+
+class XenServerObject(object):
+ """Base class for all XenServer objects.
+
+ This class contains active XAPI session reference and common
+ attributes with useful info about XenServer host/pool.
+
+ Attributes:
+ module: Reference to Ansible module object.
+ xapi_session: Reference to XAPI session.
+ pool_ref (str): XAPI reference to a pool currently connected to.
+ default_sr_ref (str): XAPI reference to a pool default
+ Storage Repository.
+ host_ref (str): XAPI rerefence to a host currently connected to.
+ xenserver_version (list of str): Contains XenServer major and
+ minor version.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerObject using common module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ if not HAS_XENAPI:
+ module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
+
+ self.module = module
+ self.xapi_session = XAPI.connect(module)
+
+ try:
+ self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
+ self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
+ self.xenserver_version = get_xenserver_version(module)
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
diff --git a/ansible_collections/community/general/plugins/module_utils/xfconf.py b/ansible_collections/community/general/plugins/module_utils/xfconf.py
new file mode 100644
index 000000000..b63518d0c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/module_utils/xfconf.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+
+
+@fmt.unpack_args
+def _values_fmt(values, value_types):
+ result = []
+ for value, value_type in zip(values, value_types):
+ if value_type == 'bool':
+ value = 'true' if boolean(value) else 'false'
+ result.extend(['--type', '{0}'.format(value_type), '--set', '{0}'.format(value)])
+ return result
+
+
+def xfconf_runner(module, **kwargs):
+ runner = CmdRunner(
+ module,
+ command='xfconf-query',
+ arg_formats=dict(
+ channel=fmt.as_opt_val("--channel"),
+ property=fmt.as_opt_val("--property"),
+ force_array=fmt.as_bool("--force-array"),
+ reset=fmt.as_bool("--reset"),
+ create=fmt.as_bool("--create"),
+ list_arg=fmt.as_bool("--list"),
+ values_and_types=fmt.as_func(_values_fmt),
+ ),
+ **kwargs
+ )
+ return runner
diff --git a/ansible_collections/community/general/plugins/modules/aerospike_migrations.py b/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
new file mode 100644
index 000000000..1eee5b1a2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/aerospike_migrations.py
@@ -0,0 +1,529 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""short_description: Check or wait for migrations between nodes"""
+
+# Copyright (c) 2018, Albert Autin
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: aerospike_migrations
+short_description: Check or wait for migrations between nodes
+description:
+ - This can be used to check for migrations in a cluster.
+ This makes it easy to do a rolling upgrade/update on Aerospike nodes.
+ - If waiting for migrations is not desired, simply just poll until
+ port 3000 if available or asinfo -v status returns ok
+author: "Albert Autin (@Alb0t)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ host:
+ description:
+ - Which host do we use as seed for info connection
+ required: false
+ type: str
+ default: localhost
+ port:
+ description:
+ - Which port to connect to Aerospike on (service port)
+ required: false
+ type: int
+ default: 3000
+ connect_timeout:
+ description:
+ - How long to try to connect before giving up (milliseconds)
+ required: false
+ type: int
+ default: 1000
+ consecutive_good_checks:
+ description:
+ - How many times should the cluster report "no migrations"
+ consecutively before returning OK back to ansible?
+ required: false
+ type: int
+ default: 3
+ sleep_between_checks:
+ description:
+ - How long to sleep between each check (seconds).
+ required: false
+ type: int
+ default: 60
+ tries_limit:
+ description:
+ - How many times do we poll before giving up and failing?
+ default: 300
+ required: false
+ type: int
+ local_only:
+ description:
+ - Do you wish to only check for migrations on the local node
+ before returning, or do you want all nodes in the cluster
+ to finish before returning?
+ required: true
+ type: bool
+ min_cluster_size:
+ description:
+ - Check will return bad until cluster size is met
+ or until tries is exhausted
+ required: false
+ type: int
+ default: 1
+ fail_on_cluster_change:
+ description:
+ - Fail if the cluster key changes
+ if something else is changing the cluster, we may want to fail
+ required: false
+ type: bool
+ default: true
+ migrate_tx_key:
+ description:
+ - The metric key used to determine if we have tx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: false
+ type: str
+ default: migrate_tx_partitions_remaining
+ migrate_rx_key:
+ description:
+ - The metric key used to determine if we have rx migrations
+ remaining. Changeable due to backwards compatibility.
+ required: false
+ type: str
+ default: migrate_rx_partitions_remaining
+ target_cluster_size:
+ description:
+ - When all aerospike builds in the cluster are greater than
+ version 4.3, then the C(cluster-stable) info command will be used.
+ Inside this command, you can optionally specify what the target
+ cluster size is - but it is not necessary. You can still rely on
+ min_cluster_size if you don't want to use this option.
+ - If this option is specified on a cluster that has at least 1
+ host <4.3 then it will be ignored until the min version reaches
+ 4.3.
+ required: false
+ type: int
+'''
+EXAMPLES = '''
+# check for migrations on local node
+- name: Wait for migrations on local node before proceeding
+ community.general.aerospike_migrations:
+ host: "localhost"
+ connect_timeout: 2000
+ consecutive_good_checks: 5
+ sleep_between_checks: 15
+ tries_limit: 600
+ local_only: false
+
+# example playbook:
+- name: Upgrade aerospike
+ hosts: all
+ become: true
+ serial: 1
+ tasks:
+ - name: Install dependencies
+ ansible.builtin.apt:
+ name:
+ - python
+ - python-pip
+ - python-setuptools
+ state: latest
+ - name: Setup aerospike
+ ansible.builtin.pip:
+ name: aerospike
+# check for migrations every (sleep_between_checks)
+# If at least (consecutive_good_checks) checks come back OK in a row, then return OK.
+# Will exit if any exception, which can be caused by bad nodes,
+# nodes not returning data, or other reasons.
+# Maximum runtime before giving up in this case will be:
+# Tries Limit * Sleep Between Checks * delay * retries
+ - name: Wait for aerospike migrations
+ community.general.aerospike_migrations:
+ local_only: true
+ sleep_between_checks: 1
+ tries_limit: 5
+ consecutive_good_checks: 3
+ fail_on_cluster_change: true
+ min_cluster_size: 3
+ target_cluster_size: 4
+ register: migrations_check
+ until: migrations_check is succeeded
+ changed_when: false
+ delay: 60
+ retries: 120
+ - name: Another thing
+ ansible.builtin.shell: |
+ echo foo
+ - name: Reboot
+ ansible.builtin.reboot:
+'''
+
+RETURN = '''
+# Returns only a success/failure result. Changed is always false.
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+LIB_FOUND_ERR = None
+try:
+ import aerospike
+ from time import sleep
+ import re
+except ImportError as ie:
+ LIB_FOUND = False
+ LIB_FOUND_ERR = traceback.format_exc()
+else:
+ LIB_FOUND = True
+
+
+def run_module():
+ """run ansible module"""
+ module_args = dict(
+ host=dict(type='str', required=False, default='localhost'),
+ port=dict(type='int', required=False, default=3000),
+ connect_timeout=dict(type='int', required=False, default=1000),
+ consecutive_good_checks=dict(type='int', required=False, default=3),
+ sleep_between_checks=dict(type='int', required=False, default=60),
+ tries_limit=dict(type='int', required=False, default=300),
+ local_only=dict(type='bool', required=True),
+ min_cluster_size=dict(type='int', required=False, default=1),
+ target_cluster_size=dict(type='int', required=False, default=None),
+ fail_on_cluster_change=dict(type='bool', required=False, default=True),
+ migrate_tx_key=dict(type='str', required=False, no_log=False,
+ default="migrate_tx_partitions_remaining"),
+ migrate_rx_key=dict(type='str', required=False, no_log=False,
+ default="migrate_rx_partitions_remaining")
+ )
+
+ result = dict(
+ changed=False,
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+ if not LIB_FOUND:
+ module.fail_json(msg=missing_required_lib('aerospike'),
+ exception=LIB_FOUND_ERR)
+
+ try:
+ if module.check_mode:
+ has_migrations, skip_reason = False, None
+ else:
+ migrations = Migrations(module)
+ has_migrations, skip_reason = migrations.has_migs(
+ module.params['local_only']
+ )
+
+ if has_migrations:
+ module.fail_json(msg="Failed.", skip_reason=skip_reason)
+ except Exception as e:
+ module.fail_json(msg="Error: {0}".format(e))
+
+ module.exit_json(**result)
+
+
+class Migrations:
+ """ Check or wait for migrations between nodes """
+
+ def __init__(self, module):
+ self.module = module
+ self._client = self._create_client().connect()
+ self._nodes = {}
+ self._update_nodes_list()
+ self._cluster_statistics = {}
+ self._update_cluster_statistics()
+ self._namespaces = set()
+ self._update_cluster_namespace_list()
+ self._build_list = set()
+ self._update_build_list()
+ self._start_cluster_key = \
+ self._cluster_statistics[self._nodes[0]]['cluster_key']
+
+ def _create_client(self):
+ """ TODO: add support for auth, tls, and other special features
+ I won't use those features, so I'll wait until somebody complains
+ or does it for me (Cross fingers)
+ create the client object"""
+ config = {
+ 'hosts': [
+ (self.module.params['host'], self.module.params['port'])
+ ],
+ 'policies': {
+ 'timeout': self.module.params['connect_timeout']
+ }
+ }
+ return aerospike.client(config)
+
+ def _info_cmd_helper(self, cmd, node=None, delimiter=';'):
+ """delimiter is for separate stats that come back, NOT for kv
+ separation which is ="""
+ if node is None: # If no node passed, use the first one (local)
+ node = self._nodes[0]
+ data = self._client.info_node(cmd, node)
+ data = data.split("\t")
+ if len(data) != 1 and len(data) != 2:
+ self.module.fail_json(
+ msg="Unexpected number of values returned in info command: " +
+ str(len(data))
+ )
+ # data will be in format 'command\touput'
+ data = data[-1]
+ data = data.rstrip("\n\r")
+ data_arr = data.split(delimiter)
+
+ # some commands don't return in kv format
+ # so we dont want a dict from those.
+ if '=' in data:
+ retval = dict(
+ metric.split("=", 1) for metric in data_arr
+ )
+ else:
+ # if only 1 element found, and not kv, return just the value.
+ if len(data_arr) == 1:
+ retval = data_arr[0]
+ else:
+ retval = data_arr
+ return retval
+
+ def _update_build_list(self):
+ """creates self._build_list which is a unique list
+ of build versions."""
+ self._build_list = set()
+ for node in self._nodes:
+ build = self._info_cmd_helper('build', node)
+ self._build_list.add(build)
+
+ # just checks to see if the version is 4.3 or greater
+ def _can_use_cluster_stable(self):
+ # if version <4.3 we can't use cluster-stable info cmd
+ # regex hack to check for versions beginning with 0-3 or
+ # beginning with 4.0,4.1,4.2
+ if re.search(R'^([0-3]\.|4\.[0-2])', min(self._build_list)):
+ return False
+ return True
+
+ def _update_cluster_namespace_list(self):
+ """ make a unique list of namespaces
+ TODO: does this work on a rolling namespace add/deletion?
+ thankfully if it doesn't, we dont need this on builds >=4.3"""
+ self._namespaces = set()
+ for node in self._nodes:
+ namespaces = self._info_cmd_helper('namespaces', node)
+ for namespace in namespaces:
+ self._namespaces.add(namespace)
+
+ def _update_cluster_statistics(self):
+ """create a dict of nodes with their related stats """
+ self._cluster_statistics = {}
+ for node in self._nodes:
+ self._cluster_statistics[node] = \
+ self._info_cmd_helper('statistics', node)
+
+ def _update_nodes_list(self):
+ """get a fresh list of all the nodes"""
+ self._nodes = self._client.get_nodes()
+ if not self._nodes:
+ self.module.fail_json("Failed to retrieve at least 1 node.")
+
+ def _namespace_has_migs(self, namespace, node=None):
+ """returns a True or False.
+ Does the namespace have migrations for the node passed?
+ If no node passed, uses the local node or the first one in the list"""
+ namespace_stats = self._info_cmd_helper("namespace/" + namespace, node)
+ try:
+ namespace_tx = \
+ int(namespace_stats[self.module.params['migrate_tx_key']])
+ namespace_rx = \
+ int(namespace_stats[self.module.params['migrate_rx_key']])
+ except KeyError:
+ self.module.fail_json(
+ msg="Did not find partition remaining key:" +
+ self.module.params['migrate_tx_key'] +
+ " or key:" +
+ self.module.params['migrate_rx_key'] +
+ " in 'namespace/" +
+ namespace +
+ "' output."
+ )
+ except TypeError:
+ self.module.fail_json(
+ msg="namespace stat returned was not numerical"
+ )
+ return namespace_tx != 0 or namespace_rx != 0
+
+ def _node_has_migs(self, node=None):
+ """just calls namespace_has_migs and
+ if any namespace has migs returns true"""
+ migs = 0
+ self._update_cluster_namespace_list()
+ for namespace in self._namespaces:
+ if self._namespace_has_migs(namespace, node):
+ migs += 1
+ return migs != 0
+
+ def _cluster_key_consistent(self):
+ """create a dictionary to store what each node
+ returns the cluster key as. we should end up with only 1 dict key,
+ with the key being the cluster key."""
+ cluster_keys = {}
+ for node in self._nodes:
+ cluster_key = self._cluster_statistics[node][
+ 'cluster_key']
+ if cluster_key not in cluster_keys:
+ cluster_keys[cluster_key] = 1
+ else:
+ cluster_keys[cluster_key] += 1
+ if len(cluster_keys.keys()) == 1 and \
+ self._start_cluster_key in cluster_keys:
+ return True
+ return False
+
+ def _cluster_migrates_allowed(self):
+ """ensure all nodes have 'migrate_allowed' in their stats output"""
+ for node in self._nodes:
+ node_stats = self._info_cmd_helper('statistics', node)
+ allowed = node_stats['migrate_allowed']
+ if allowed == "false":
+ return False
+ return True
+
+ def _cluster_has_migs(self):
+ """calls node_has_migs for each node"""
+ migs = 0
+ for node in self._nodes:
+ if self._node_has_migs(node):
+ migs += 1
+ if migs == 0:
+ return False
+ return True
+
+ def _has_migs(self, local):
+ if local:
+ return self._local_node_has_migs()
+ return self._cluster_has_migs()
+
+ def _local_node_has_migs(self):
+ return self._node_has_migs(None)
+
+ def _is_min_cluster_size(self):
+ """checks that all nodes in the cluster are returning the
+ minimum cluster size specified in their statistics output"""
+ sizes = set()
+ for node in self._cluster_statistics:
+ sizes.add(int(self._cluster_statistics[node]['cluster_size']))
+
+ if (len(sizes)) > 1: # if we are getting more than 1 size, lets say no
+ return False
+ if (min(sizes)) >= self.module.params['min_cluster_size']:
+ return True
+ return False
+
+ def _cluster_stable(self):
+ """Added 4.3:
+ cluster-stable:size=<target-cluster-size>;ignore-migrations=<yes/no>;namespace=<namespace-name>
+ Returns the current 'cluster_key' when the following are satisfied:
+
+ If 'size' is specified then the target node's 'cluster-size'
+ must match size.
+ If 'ignore-migrations' is either unspecified or 'false' then
+ the target node's migrations counts must be zero for the provided
+ 'namespace' or all namespaces if 'namespace' is not provided."""
+ cluster_key = set()
+ cluster_key.add(self._info_cmd_helper('statistics')['cluster_key'])
+ cmd = "cluster-stable:"
+ target_cluster_size = self.module.params['target_cluster_size']
+ if target_cluster_size is not None:
+ cmd = cmd + "size=" + str(target_cluster_size) + ";"
+ for node in self._nodes:
+ try:
+ cluster_key.add(self._info_cmd_helper(cmd, node))
+ except aerospike.exception.ServerError as e: # unstable-cluster is returned in form of Exception
+ if 'unstable-cluster' in e.msg:
+ return False
+ raise e
+ if len(cluster_key) == 1:
+ return True
+ return False
+
+ def _cluster_good_state(self):
+ """checks a few things to make sure we're OK to say the cluster
+ has no migs. It could be in a unhealthy condition that does not allow
+ migs, or a split brain"""
+ if self._cluster_key_consistent() is not True:
+ return False, "Cluster key inconsistent."
+ if self._is_min_cluster_size() is not True:
+ return False, "Cluster min size not reached."
+ if self._cluster_migrates_allowed() is not True:
+ return False, "migrate_allowed is false somewhere."
+ return True, "OK."
+
+ def has_migs(self, local=True):
+ """returns a boolean, False if no migrations otherwise True"""
+ consecutive_good = 0
+ try_num = 0
+ skip_reason = list()
+ while \
+ try_num < int(self.module.params['tries_limit']) and \
+ consecutive_good < \
+ int(self.module.params['consecutive_good_checks']):
+
+ self._update_nodes_list()
+ self._update_cluster_statistics()
+
+ # These checks are outside of the while loop because
+ # we probably want to skip & sleep instead of failing entirely
+ stable, reason = self._cluster_good_state()
+ if stable is not True:
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + reason
+ )
+ else:
+ if self._can_use_cluster_stable():
+ if self._cluster_stable():
+ consecutive_good += 1
+ else:
+ consecutive_good = 0
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " cluster_stable"
+ )
+ elif self._has_migs(local):
+ # print("_has_migs")
+ skip_reason.append(
+ "Skipping on try#" + str(try_num) +
+ " for reason:" + " migrations"
+ )
+ consecutive_good = 0
+ else:
+ consecutive_good += 1
+ if consecutive_good == self.module.params[
+ 'consecutive_good_checks']:
+ break
+ try_num += 1
+ sleep(self.module.params['sleep_between_checks'])
+ # print(skip_reason)
+ if consecutive_good == self.module.params['consecutive_good_checks']:
+ return False, None
+ return True, skip_reason
+
+
+def main():
+ """main method for ansible module"""
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/airbrake_deployment.py b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
new file mode 100644
index 000000000..42ac037e1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/airbrake_deployment.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: airbrake_deployment
+author:
+- "Bruce Pennypacker (@bpennypacker)"
+- "Patrick Humpal (@phumpal)"
+short_description: Notify airbrake about app deployments
+description:
+ - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ project_id:
+ description:
+ - Airbrake PROJECT_ID
+ required: true
+ type: str
+ version_added: '0.2.0'
+ project_key:
+ description:
+ - Airbrake PROJECT_KEY.
+ required: true
+ type: str
+ version_added: '0.2.0'
+ environment:
+ description:
+ - The airbrake environment name, typically 'production', 'staging', etc.
+ required: true
+ type: str
+ user:
+ description:
+ - The username of the person doing the deployment
+ required: false
+ type: str
+ repo:
+ description:
+ - URL of the project repository
+ required: false
+ type: str
+ revision:
+ description:
+ - A hash, number, tag, or other identifier showing what revision from version control was deployed
+ required: false
+ type: str
+ version:
+ description:
+ - A string identifying what version was deployed
+ required: false
+ type: str
+ version_added: '1.0.0'
+ url:
+ description:
+ - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
+ required: false
+ default: "https://api.airbrake.io/api/v4/projects/"
+ type: str
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+ type: bool
+
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify airbrake about an app deployment
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: '4.2'
+
+- name: Notify airbrake about an app deployment, using git hash as revision
+ community.general.airbrake_deployment:
+ project_id: '12345'
+ project_key: 'AAAAAA'
+ environment: staging
+ user: ansible
+ revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15'
+ version: '0.2.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_id=dict(required=True, no_log=True, type='str'),
+ project_key=dict(required=True, no_log=True, type='str'),
+ environment=dict(required=True, type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ version=dict(required=False, type='str'),
+ url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ # Build list of params
+ params = {}
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # v4 API documented at https://airbrake.io/docs/api/#create-deploy-v4
+ if module.params["environment"]:
+ params["environment"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["username"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["repository"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["revision"] = module.params["revision"]
+
+ if module.params["version"]:
+ params["version"] = module.params["version"]
+
+ # Build deploy url
+ url = module.params.get('url') + module.params["project_id"] + '/deploys?key=' + module.params["project_key"]
+ json_body = module.jsonify(params)
+
+ # Build header
+ headers = {'Content-Type': 'application/json'}
+
+ # Notify Airbrake of deploy
+ response, info = fetch_url(module, url, data=json_body,
+ headers=headers, method='POST')
+
+ if info['status'] == 200 or info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/aix_devices.py b/ansible_collections/community/general/plugins/modules/aix_devices.py
new file mode 100644
index 000000000..ef4ed4961
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/aix_devices.py
@@ -0,0 +1,377 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, 2018 Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Kairo Araujo (@kairoaraujo)
+module: aix_devices
+short_description: Manages AIX devices
+description:
+- This module discovers, defines, removes and modifies attributes of AIX devices.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ attributes:
+ description:
+ - A list of device attributes.
+ type: dict
+ device:
+ description:
+ - The name of the device.
+ - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command).
+ type: str
+ force:
+ description:
+ - Forces action.
+ type: bool
+ default: false
+ recursive:
+ description:
+ - Removes or defines a device and children devices.
+ type: bool
+ default: false
+ state:
+ description:
+ - Controls the device state.
+ - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified).
+ - C(removed) (alias C(absent) removes a device.
+ - C(defined) changes device to Defined state.
+ type: str
+ choices: [ available, defined, removed ]
+ default: available
+'''
+
+EXAMPLES = r'''
+- name: Scan new devices
+ community.general.aix_devices:
+ device: all
+ state: available
+
+- name: Scan new virtual devices (vio0)
+ community.general.aix_devices:
+ device: vio0
+ state: available
+
+- name: Removing IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2
+ community.general.aix_devices:
+ device: ent2
+ state: removed
+
+- name: Put device en2 in Defined
+ community.general.aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ community.general.aix_devices:
+ device: ent4
+ state: removed
+
+- name: Put device en4 in Defined (inexistent)
+ community.general.aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: true
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ community.general.aix_devices:
+ device: vscsi1
+ recursive: true
+ state: removed
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: 'off'
+ state: available
+
+- name: Configure IP, netmask and set en1 up.
+ community.general.aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: available
+
+- name: Adding IP alias to en0
+ community.general.aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: available
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_device(module, device):
+ """
+ Check if device already exists and the state.
+ Args:
+ module: Ansible module.
+ device: device to be checked.
+
+ Returns: bool, device state
+
+ """
+ lsdev_cmd = module.get_bin_path('lsdev', True)
+ rc, lsdev_out, err = module.run_command(["%s" % lsdev_cmd, '-C', '-l', "%s" % device])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lsdev", rc=rc, err=err)
+
+ if lsdev_out:
+ device_state = lsdev_out.split()[1]
+ return True, device_state
+
+ device_state = None
+ return False, device_state
+
+
+def _check_device_attr(module, device, attr):
+ """
+
+ Args:
+ module: Ansible module.
+ device: device to check attributes.
+ attr: attribute to be checked.
+
+ Returns:
+
+ """
+ lsattr_cmd = module.get_bin_path('lsattr', True)
+ rc, lsattr_out, err = module.run_command(["%s" % lsattr_cmd, '-El', "%s" % device, '-a', "%s" % attr])
+
+ hidden_attrs = ['delalias4', 'delalias6']
+
+ if rc == 255:
+
+ if attr in hidden_attrs:
+ current_param = ''
+ else:
+ current_param = None
+
+ return current_param
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run lsattr: %s" % err, rc=rc, err=err)
+
+ current_param = lsattr_out.split()[1]
+ return current_param
+
+
+def discover_device(module, device):
+ """ Discover AIX devices."""
+ cfgmgr_cmd = module.get_bin_path('cfgmgr', True)
+
+ if device is not None:
+ device = "-l %s" % device
+
+ else:
+ device = ''
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ rc, cfgmgr_out, err = module.run_command(["%s" % cfgmgr_cmd, "%s" % device])
+ changed = True
+ msg = cfgmgr_out
+
+ return changed, msg
+
+
+def change_device_attr(module, attributes, device, force):
+ """ Change AIX device attribute. """
+
+ attr_changed = []
+ attr_not_changed = []
+ attr_invalid = []
+ chdev_cmd = module.get_bin_path('chdev', True)
+
+ for attr in list(attributes.keys()):
+ new_param = attributes[attr]
+ current_param = _check_device_attr(module, device, attr)
+
+ if current_param is None:
+ attr_invalid.append(attr)
+
+ elif current_param != new_param:
+ if force:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr]), "%s" % force]
+ else:
+ cmd = ["%s" % chdev_cmd, '-l', "%s" % device, '-a', "%s=%s" % (attr, attributes[attr])]
+
+ if not module.check_mode:
+ rc, chdev_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.exit_json(msg="Failed to run chdev.", rc=rc, err=err)
+
+ attr_changed.append(attributes[attr])
+ else:
+ attr_not_changed.append(attributes[attr])
+
+ if len(attr_changed) > 0:
+ changed = True
+ attr_changed_msg = "Attributes changed: %s. " % ','.join(attr_changed)
+ else:
+ changed = False
+ attr_changed_msg = ''
+
+ if len(attr_not_changed) > 0:
+ attr_not_changed_msg = "Attributes already set: %s. " % ','.join(attr_not_changed)
+ else:
+ attr_not_changed_msg = ''
+
+ if len(attr_invalid) > 0:
+ attr_invalid_msg = "Invalid attributes: %s " % ', '.join(attr_invalid)
+ else:
+ attr_invalid_msg = ''
+
+ msg = "%s%s%s" % (attr_changed_msg, attr_not_changed_msg, attr_invalid_msg)
+
+ return changed, msg
+
+
+def remove_device(module, device, force, recursive, state):
+ """ Puts device in defined state or removes device. """
+
+ state_opt = {
+ 'removed': '-d',
+ 'absent': '-d',
+ 'defined': ''
+ }
+
+ recursive_opt = {
+ True: '-R',
+ False: ''
+ }
+
+ recursive = recursive_opt[recursive]
+ state = state_opt[state]
+
+ changed = True
+ msg = ''
+ rmdev_cmd = module.get_bin_path('rmdev', True)
+
+ if not module.check_mode:
+ if state:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive, "%s" % force])
+ else:
+ rc, rmdev_out, err = module.run_command(["%s" % rmdev_cmd, "-l", "%s" % device, "%s" % recursive])
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run rmdev", rc=rc, err=err)
+
+ msg = rmdev_out
+
+ return changed, msg
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ attributes=dict(type='dict'),
+ device=dict(type='str'),
+ force=dict(type='bool', default=False),
+ recursive=dict(type='bool', default=False),
+ state=dict(type='str', default='available', choices=['available', 'defined', 'removed']),
+ ),
+ supports_check_mode=True,
+ )
+
+ force_opt = {
+ True: '-f',
+ False: '',
+ }
+
+ attributes = module.params['attributes']
+ device = module.params['device']
+ force = force_opt[module.params['force']]
+ recursive = module.params['recursive']
+ state = module.params['state']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'available' or state == 'present':
+ if attributes:
+ # change attributes on device
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ result['changed'], result['msg'] = change_device_attr(module, attributes, device, force)
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ # discovery devices (cfgmgr)
+ if device and device != 'all':
+ device_status, device_state = _check_device(module, device)
+ if device_status:
+ # run cfgmgr on specific device
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['changed'], result['msg'] = discover_device(module, device)
+
+ elif state == 'removed' or state == 'absent' or state == 'defined':
+ if not device:
+ result['msg'] = "device is required to removed or defined state."
+
+ else:
+ # Remove device
+ check_device, device_state = _check_device(module, device)
+ if check_device:
+ if state == 'defined' and device_state == 'Defined':
+ result['changed'] = False
+ result['msg'] = 'Device %s already in Defined' % device
+
+ else:
+ result['changed'], result['msg'] = remove_device(module, device, force, recursive, state)
+
+ else:
+ result['msg'] = "Device %s does not exist." % device
+
+ else:
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/aix_filesystem.py b/ansible_collections/community/general/plugins/modules/aix_filesystem.py
new file mode 100644
index 000000000..b1f363a93
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/aix_filesystem.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Kairo Araujo (@kairoaraujo)
+module: aix_filesystem
+short_description: Configure LVM and NFS file systems for AIX
+description:
+ - This module creates, removes, mount and unmount LVM and NFS file system for
+ AIX using C(/etc/filesystems).
+ - For LVM file systems is possible to resize a file system.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ account_subsystem:
+ description:
+ - Specifies whether the file system is to be processed by the accounting subsystem.
+ type: bool
+ default: false
+ attributes:
+ description:
+ - Specifies attributes for files system separated by comma.
+ type: list
+ elements: str
+ default:
+ - agblksize='4096'
+ - isnapshot='no'
+ auto_mount:
+ description:
+ - File system is automatically mounted at system restart.
+ type: bool
+ default: true
+ device:
+ description:
+ - Logical volume (LV) device name or remote export device to create a NFS file system.
+ - It is used to create a file system on an already existing logical volume or the exported NFS file system.
+ - If not mentioned a new logical volume name will be created following AIX standards (LVM).
+ type: str
+ fs_type:
+ description:
+ - Specifies the virtual file system type.
+ type: str
+ default: jfs2
+ permissions:
+ description:
+ - Set file system permissions. C(rw) (read-write) or C(ro) (read-only).
+ type: str
+ choices: [ ro, rw ]
+ default: rw
+ mount_group:
+ description:
+ - Specifies the mount group.
+ type: str
+ filesystem:
+ description:
+ - Specifies the mount point, which is the directory where the file system will be mounted.
+ type: str
+ required: true
+ nfs_server:
+ description:
+ - Specifies a Network File System (NFS) server.
+ type: str
+ rm_mount_point:
+ description:
+ - Removes the mount point directory when used with state C(absent).
+ type: bool
+ default: false
+ size:
+ description:
+ - Specifies the file system size.
+ - For already C(present) it will be resized.
+ - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified
+ it will be in Megabytes. If the value has G specified it will be in
+ Gigabytes.
+ - If no M or G the value will be 512-byte blocks.
+ - If "+" is specified in begin of value, the value will be added.
+ - If "-" is specified in begin of value, the value will be removed.
+ - If "+" or "-" is not specified, the total value will be the specified.
+ - Size will respects the LVM AIX standards.
+ type: str
+ state:
+ description:
+ - Controls the file system state.
+ - C(present) check if file system exists, creates or resize.
+ - C(absent) removes existing file system if already C(unmounted).
+ - C(mounted) checks if the file system is mounted or mount the file system.
+ - C(unmounted) check if the file system is unmounted or unmount the file system.
+ type: str
+ choices: [ absent, mounted, present, unmounted ]
+ default: present
+ vg:
+ description:
+ - Specifies an existing volume group (VG).
+ type: str
+notes:
+ - For more C(attributes), please check "crfs" AIX manual.
+'''
+
+EXAMPLES = r'''
+- name: Create filesystem in a previously defined logical volume.
+ community.general.aix_filesystem:
+ device: testlv
+ filesystem: /testfs
+ state: present
+
+- name: Creating NFS filesystem from nfshost.
+ community.general.aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ filesystem: /home/ftp
+ state: present
+
+- name: Creating a new file system without a previously logical volume.
+ community.general.aix_filesystem:
+ filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+- name: Unmounting /testfs.
+ community.general.aix_filesystem:
+ filesystem: /testfs
+ state: unmounted
+
+- name: Resizing /mksysb to +512M.
+ community.general.aix_filesystem:
+ filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G.
+ community.general.aix_filesystem:
+ filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G.
+ community.general.aix_filesystem:
+ filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Remove NFS filesystem /home/ftp.
+ community.general.aix_filesystem:
+ filesystem: /home/ftp
+ rm_mount_point: true
+ state: absent
+
+- name: Remove /newfs.
+ community.general.aix_filesystem:
+ filesystem: /newfs
+ rm_mount_point: true
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Return changed for aix_filesystems actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils._mount import ismount
+import re
+
+
+def _fs_exists(module, filesystem):
+ """
+ Check if file system already exists on /etc/filesystems.
+
+ :param module: Ansible module.
+ :param community.general.filesystem: filesystem name.
+ :return: True or False.
+ """
+ lsfs_cmd = module.get_bin_path('lsfs', True)
+ rc, lsfs_out, err = module.run_command([lsfs_cmd, "-l", filesystem])
+ if rc == 1:
+ if re.findall("No record matching", err):
+ return False
+
+ else:
+ module.fail_json(msg="Failed to run lsfs. Error message: %s" % err)
+
+ else:
+
+ return True
+
+
+def _check_nfs_device(module, nfs_host, device):
+ """
+ Validate if NFS server is exporting the device (remote export).
+
+ :param module: Ansible module.
+ :param nfs_host: nfs_host parameter, NFS server.
+ :param device: device parameter, remote export.
+ :return: True or False.
+ """
+ showmount_cmd = module.get_bin_path('showmount', True)
+ rc, showmount_out, err = module.run_command([showmount_cmd, "-a", nfs_host])
+ if rc != 0:
+ module.fail_json(msg="Failed to run showmount. Error message: %s" % err)
+ else:
+ showmount_data = showmount_out.splitlines()
+ for line in showmount_data:
+ if line.split(':')[1] == device:
+ return True
+
+ return False
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"])
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"])
+ if rc != 0:
+ module.fail_json(msg="Failed executing %s command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group %s is in varyoff state." % vg
+ return False, msg
+ elif vg in current_active_vgs:
+ msg = "Volume group %s is in varyon state." % vg
+ return True, msg
+ else:
+ msg = "Volume group %s does not exist." % vg
+ return None, msg
+
+
+def resize_fs(module, filesystem, size):
+ """ Resize LVM file system. """
+
+ chfs_cmd = module.get_bin_path('chfs', True)
+ if not module.check_mode:
+ rc, chfs_out, err = module.run_command([chfs_cmd, "-a", "size=%s" % size, filesystem])
+
+ if rc == 28:
+ changed = False
+ return changed, chfs_out
+ elif rc != 0:
+ if re.findall('Maximum allocation for logical', err):
+ changed = False
+ return changed, err
+ else:
+ module.fail_json(msg="Failed to run chfs. Error message: %s" % err)
+
+ else:
+ if re.findall('The filesystem size is already', chfs_out):
+ changed = False
+ else:
+ changed = True
+
+ return changed, chfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount,
+ account_subsystem, permissions, nfs_server, attributes):
+ """ Create LVM file system or NFS remote mount point. """
+
+ attributes = ' -a '.join(attributes)
+
+ # Parameters definition.
+ account_subsys_opt = {
+ True: '-t yes',
+ False: '-t no'
+ }
+
+ if nfs_server is not None:
+ auto_mount_opt = {
+ True: '-A',
+ False: '-a'
+ }
+
+ else:
+ auto_mount_opt = {
+ True: '-A yes',
+ False: '-A no'
+ }
+
+ if size is None:
+ size = ''
+ else:
+ size = "-a size=%s" % size
+
+ if device is None:
+ device = ''
+ else:
+ device = "-d %s" % device
+
+ if vg is None:
+ vg = ''
+ else:
+ vg_state, msg = _validate_vg(module, vg)
+ if vg_state:
+ vg = "-g %s" % vg
+ else:
+ changed = False
+
+ return changed, msg
+
+ if mount_group is None:
+ mount_group = ''
+
+ else:
+ mount_group = "-u %s" % mount_group
+
+ auto_mount = auto_mount_opt[auto_mount]
+ account_subsystem = account_subsys_opt[account_subsystem]
+
+ if nfs_server is not None:
+ # Creates a NFS file system.
+ mknfsmnt_cmd = module.get_bin_path('mknfsmnt', True)
+ if not module.check_mode:
+ rc, mknfsmnt_out, err = module.run_command([mknfsmnt_cmd, "-f", filesystem, device, "-h", nfs_server, "-t", permissions, auto_mount, "-w", "bg"])
+ if rc != 0:
+ module.fail_json(msg="Failed to run mknfsmnt. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "NFS file system %s created." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+ else:
+ # Creates a LVM file system.
+ crfs_cmd = module.get_bin_path('crfs', True)
+ if not module.check_mode:
+ cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes]
+ rc, crfs_out, err = module.run_command(cmd)
+
+ if rc == 10:
+ module.exit_json(
+ msg="Using a existent previously defined logical volume, "
+ "volume group needs to be empty. %s" % err)
+
+ elif rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+
+ else:
+ changed = True
+ return changed, crfs_out
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def remove_fs(module, filesystem, rm_mount_point):
+ """ Remove an LVM file system or NFS entry. """
+
+ # Command parameters.
+ rm_mount_point_opt = {
+ True: '-r',
+ False: ''
+ }
+
+ rm_mount_point = rm_mount_point_opt[rm_mount_point]
+
+ rmfs_cmd = module.get_bin_path('rmfs', True)
+ if not module.check_mode:
+ cmd = [rmfs_cmd, "-r", rm_mount_point, filesystem]
+ rc, rmfs_out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="Failed to run %s. Error message: %s" % (cmd, err))
+ else:
+ changed = True
+ msg = rmfs_out
+ if not rmfs_out:
+ msg = "File system %s removed." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def mount_fs(module, filesystem):
+ """ Mount a file system. """
+ mount_cmd = module.get_bin_path('mount', True)
+
+ if not module.check_mode:
+ rc, mount_out, err = module.run_command([mount_cmd, filesystem])
+ if rc != 0:
+ module.fail_json(msg="Failed to run mount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s mounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def unmount_fs(module, filesystem):
+ """ Unmount a file system."""
+ unmount_cmd = module.get_bin_path('unmount', True)
+
+ if not module.check_mode:
+ rc, unmount_out, err = module.run_command([unmount_cmd, filesystem])
+ if rc != 0:
+ module.fail_json(msg="Failed to run unmount. Error message: %s" % err)
+ else:
+ changed = True
+ msg = "File system %s unmounted." % filesystem
+
+ return changed, msg
+ else:
+ changed = True
+ msg = ''
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_subsystem=dict(type='bool', default=False),
+ attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]),
+ auto_mount=dict(type='bool', default=True),
+ device=dict(type='str'),
+ filesystem=dict(type='str', required=True),
+ fs_type=dict(type='str', default='jfs2'),
+ permissions=dict(type='str', default='rw', choices=['rw', 'ro']),
+ mount_group=dict(type='str'),
+ nfs_server=dict(type='str'),
+ rm_mount_point=dict(type='bool', default=False),
+ size=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'mounted', 'present', 'unmounted']),
+ vg=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ account_subsystem = module.params['account_subsystem']
+ attributes = module.params['attributes']
+ auto_mount = module.params['auto_mount']
+ device = module.params['device']
+ fs_type = module.params['fs_type']
+ permissions = module.params['permissions']
+ mount_group = module.params['mount_group']
+ filesystem = module.params['filesystem']
+ nfs_server = module.params['nfs_server']
+ rm_mount_point = module.params['rm_mount_point']
+ size = module.params['size']
+ state = module.params['state']
+ vg = module.params['vg']
+
+ result = dict(
+ changed=False,
+ msg='',
+ )
+
+ if state == 'present':
+ fs_mounted = ismount(filesystem)
+ fs_exists = _fs_exists(module, filesystem)
+
+ # Check if fs is mounted or exists.
+ if fs_mounted or fs_exists:
+ result['msg'] = "File system %s already exists." % filesystem
+ result['changed'] = False
+
+ # If parameter size was passed, resize fs.
+ if size is not None:
+ result['changed'], result['msg'] = resize_fs(module, filesystem, size)
+
+ # If fs doesn't exist, create it.
+ else:
+ # Check if fs will be a NFS device.
+ if nfs_server is not None:
+ if device is None:
+ result['msg'] = 'Parameter "device" is required when "nfs_server" is defined.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from NFS export.
+ if _check_nfs_device(module, nfs_server, device):
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is None:
+ if vg is None:
+ result['msg'] = 'Required parameter "device" and/or "vg" is missing for filesystem creation.'
+ module.fail_json(**result)
+ else:
+ # Create a fs from
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ if device is not None and nfs_server is None:
+ # Create a fs from a previously lv device.
+ result['changed'], result['msg'] = create_fs(
+ module, fs_type, filesystem, vg, device, size, mount_group, auto_mount, account_subsystem, permissions, nfs_server, attributes)
+
+ elif state == 'absent':
+ if ismount(filesystem):
+ result['msg'] = "File system %s mounted." % filesystem
+
+ else:
+ fs_status = _fs_exists(module, filesystem)
+ if not fs_status:
+ result['msg'] = "File system %s does not exist." % filesystem
+ else:
+ result['changed'], result['msg'] = remove_fs(module, filesystem, rm_mount_point)
+
+ elif state == 'mounted':
+ if ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already mounted." % filesystem
+ else:
+ result['changed'], result['msg'] = mount_fs(module, filesystem)
+
+ elif state == 'unmounted':
+ if not ismount(filesystem):
+ result['changed'] = False
+ result['msg'] = "File system %s already unmounted." % filesystem
+ else:
+ result['changed'], result['msg'] = unmount_fs(module, filesystem)
+
+ else:
+ # Unreachable codeblock
+ result['msg'] = "Unexpected state %s." % state
+ module.fail_json(**result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/aix_inittab.py b/ansible_collections/community/general/plugins/modules/aix_inittab.py
new file mode 100644
index 000000000..c2c968189
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/aix_inittab.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Joris Weijters <joris.weijters@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Joris Weijters (@molekuul)
+module: aix_inittab
+short_description: Manages the inittab on AIX
+description:
+ - Manages the inittab on AIX.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the inittab entry.
+ type: str
+ required: true
+ aliases: [ service ]
+ runlevel:
+ description:
+ - Runlevel of the entry.
+ type: str
+ required: true
+ action:
+ description:
+ - Action what the init has to do with this entry.
+ type: str
+ choices:
+ - boot
+ - bootwait
+ - hold
+ - initdefault
+ - 'off'
+ - once
+ - ondemand
+ - powerfail
+ - powerwait
+ - respawn
+ - sysinit
+ - wait
+ command:
+ description:
+ - What command has to run.
+ type: str
+ required: true
+ insertafter:
+ description:
+ - After which inittabline should the new entry inserted.
+ type: str
+ state:
+ description:
+ - Whether the entry should be present or absent in the inittab file.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The changes are persistent across reboots.
+ - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands.
+ - Tested on AIX 7.1.
+requirements:
+- itertools
+'''
+
+EXAMPLES = '''
+# Add service startmyservice to the inittab, directly after service existingservice.
+- name: Add startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 4
+ action: once
+ command: echo hello
+ insertafter: existingservice
+ state: present
+ become: true
+
+# Change inittab entry startmyservice to runlevel "2" and processaction "wait".
+- name: Change startmyservice to inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: present
+ become: true
+
+- name: Remove startmyservice from inittab
+ community.general.aix_inittab:
+ name: startmyservice
+ runlevel: 2
+ action: wait
+ command: echo hello
+ state: absent
+ become: true
+'''
+
+RETURN = '''
+name:
+ description: Name of the adjusted inittab entry
+ returned: always
+ type: str
+ sample: startmyservice
+msg:
+ description: Action done with the inittab entry
+ returned: changed
+ type: str
+ sample: changed inittab entry startmyservice
+changed:
+ description: Whether the inittab changed or not
+ returned: always
+ type: bool
+ sample: true
+'''
+
+# Import necessary libraries
+try:
+ # python 2
+ from itertools import izip
+except ImportError:
+ izip = zip
+
+from ansible.module_utils.basic import AnsibleModule
+
+# end import modules
+# start defining the functions
+
+
+def check_current_entry(module):
+ # Check if entry exists, if not return False in exists in return dict,
+ # if true return True and the entry in return dict
+ existsdict = {'exist': False}
+ lsitab = module.get_bin_path('lsitab')
+ (rc, out, err) = module.run_command([lsitab, module.params['name']])
+ if rc == 0:
+ keys = ('name', 'runlevel', 'action', 'command')
+ values = out.split(":")
+ # strip non readable characters as \n
+ values = map(lambda s: s.strip(), values)
+ existsdict = dict(izip(keys, values))
+ existsdict.update({'exist': True})
+ return existsdict
+
+
+def main():
+ # initialize
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['service']),
+ runlevel=dict(type='str', required=True),
+ action=dict(type='str', choices=[
+ 'boot',
+ 'bootwait',
+ 'hold',
+ 'initdefault',
+ 'off',
+ 'once',
+ 'ondemand',
+ 'powerfail',
+ 'powerwait',
+ 'respawn',
+ 'sysinit',
+ 'wait',
+ ]),
+ command=dict(type='str', required=True),
+ insertafter=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ result = {
+ 'name': module.params['name'],
+ 'changed': False,
+ 'msg': ""
+ }
+
+ # Find commandline strings
+ mkitab = module.get_bin_path('mkitab')
+ rmitab = module.get_bin_path('rmitab')
+ chitab = module.get_bin_path('chitab')
+ rc = 0
+
+ # check if the new entry exists
+ current_entry = check_current_entry(module)
+
+ # if action is install or change,
+ if module.params['state'] == 'present':
+
+ # create new entry string
+ new_entry = module.params['name'] + ":" + module.params['runlevel'] + \
+ ":" + module.params['action'] + ":" + module.params['command']
+
+ # If current entry exists or fields are different(if the entry does not
+ # exists, then the entry wil be created
+ if (not current_entry['exist']) or (
+ module.params['runlevel'] != current_entry['runlevel'] or
+ module.params['action'] != current_entry['action'] or
+ module.params['command'] != current_entry['command']):
+
+ # If the entry does exist then change the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command([chitab, new_entry])
+ if rc != 0:
+ module.fail_json(
+ msg="could not change inittab", rc=rc, err=err)
+ result['msg'] = "changed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ # If the entry does not exist create the entry
+ elif not current_entry['exist']:
+ if module.params['insertafter']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, '-i', module.params['insertafter'], new_entry])
+ else:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [mkitab, new_entry])
+
+ if rc != 0:
+ module.fail_json(msg="could not adjust inittab", rc=rc, err=err)
+ result['msg'] = "add inittab entry" + " " + module.params['name']
+ result['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # If the action is remove and the entry exists then remove the entry
+ if current_entry['exist']:
+ if not module.check_mode:
+ (rc, out, err) = module.run_command(
+ [rmitab, module.params['name']])
+ if rc != 0:
+ module.fail_json(
+ msg="could not remove entry from inittab)", rc=rc, err=err)
+ result['msg'] = "removed inittab entry" + " " + current_entry['name']
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvg.py b/ansible_collections/community/general/plugins/modules/aix_lvg.py
new file mode 100644
index 000000000..d89c43de4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/aix_lvg.py
@@ -0,0 +1,371 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Kairo Araujo (@kairoaraujo)
+module: aix_lvg
+short_description: Manage LVM volume groups on AIX
+description:
+ - This module creates, removes or resize volume groups on AIX LVM.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ force:
+ description:
+ - Force volume group creation.
+ type: bool
+ default: false
+ pp_size:
+ description:
+ - The size of the physical partition in megabytes.
+ type: int
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or extending (C(present) state) the volume group.
+ - If not informed reducing (C(absent) state) the volume group will be removed.
+ type: list
+ elements: str
+ state:
+ description:
+ - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
+ type: str
+ choices: [ absent, present, varyoff, varyon ]
+ default: present
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ vg_type:
+ description:
+ - The type of the volume group.
+ type: str
+ choices: [ big, normal, scalable ]
+ default: normal
+notes:
+- AIX will permit remove VG only if all LV/Filesystems are not busy.
+- Module does not modify PP size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ pp_size: 128
+ vg_type: scalable
+ state: present
+
+- name: Removing a volume group datavg
+ community.general.aix_lvg:
+ vg: datavg
+ state: absent
+
+- name: Extending rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: present
+
+- name: Reducing rootvg
+ community.general.aix_lvg:
+ vg: rootvg
+ pvs: hdisk1
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _validate_pv(module, vg, pvs):
+ """
+ Function to validate if the physical volume (PV) is not already in use by
+ another volume group or Oracle ASM.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume group name.
+ :param pvs: Physical volume list.
+ :return: [bool, message] or module.fail_json for errors.
+ """
+
+ lspv_cmd = module.get_bin_path('lspv', True)
+ rc, current_lspv, stderr = module.run_command([lspv_cmd])
+ if rc != 0:
+ module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
+
+ for pv in pvs:
+ # Get pv list.
+ lspv_list = {}
+ for line in current_lspv.splitlines():
+ pv_data = line.split()
+ lspv_list[pv_data[0]] = pv_data[2]
+
+ # Check if pv exists and is free.
+ if pv not in lspv_list.keys():
+ module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
+
+ if lspv_list[pv] == 'None':
+ # Disk None, looks free.
+ # Check if PV is not already in use by Oracle ASM.
+ lquerypv_cmd = module.get_bin_path('lquerypv', True)
+ rc, current_lquerypv, stderr = module.run_command([lquerypv_cmd, "-h", "/dev/%s" % pv, "20", "10"])
+ if rc != 0:
+ module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
+
+ if 'ORCLDISK' in current_lquerypv:
+ module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
+
+ msg = "Physical volume '%s' is ok to be used." % pv
+ return True, msg
+
+ # Check if PV is already in use for the same vg.
+ elif vg != lspv_list[pv]:
+ module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
+
+ msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
+ return False, msg
+
+
+def _validate_vg(module, vg):
+ """
+ Check the current state of volume group.
+
+ :param module: Ansible module argument spec.
+ :param vg: Volume Group name.
+ :return: True (VG in varyon state) or False (VG in varyoff state) or
+ None (VG does not exist), message.
+ """
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_active_vgs, err = module.run_command([lsvg_cmd, "-o"])
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ rc, current_all_vgs, err = module.run_command([lsvg_cmd])
+ if rc != 0:
+ module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
+
+ if vg in current_all_vgs and vg not in current_active_vgs:
+ msg = "Volume group '%s' is in varyoff state." % vg
+ return False, msg
+
+ if vg in current_active_vgs:
+ msg = "Volume group '%s' is in varyon state." % vg
+ return True, msg
+
+ msg = "Volume group '%s' does not exist." % vg
+ return None, msg
+
+
+def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
+ """ Creates or extend a volume group. """
+
+ # Command option parameters.
+ force_opt = {
+ True: '-f',
+ False: ''
+ }
+
+ vg_opt = {
+ 'normal': '',
+ 'big': '-B',
+ 'scalable': '-S',
+ }
+
+ # Validate if PV are not already in use.
+ pv_state, msg = _validate_pv(module, vg, pvs)
+ if not pv_state:
+ changed = False
+ return changed, msg
+
+ vg_state, msg = vg_validation
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is True:
+ # Volume group extension.
+ changed = True
+ msg = ""
+
+ if not module.check_mode:
+ extendvg_cmd = module.get_bin_path('extendvg', True)
+ rc, output, err = module.run_command([extendvg_cmd, vg] + pvs)
+ if rc != 0:
+ changed = False
+ msg = "Extending volume group '%s' has failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' extended." % vg
+ return changed, msg
+
+ elif vg_state is None:
+ # Volume group creation.
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ mkvg_cmd = module.get_bin_path('mkvg', True)
+ rc, output, err = module.run_command([mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], "-y", vg] + pvs)
+ if rc != 0:
+ changed = False
+ msg = "Creating volume group '%s' failed." % vg
+ return changed, msg
+
+ msg = "Volume group '%s' created." % vg
+ return changed, msg
+
+
+def reduce_vg(module, vg, pvs, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ elif vg_state is None:
+ changed = False
+ return changed, msg
+
+ # Define pvs_to_remove (list of physical volumes to be removed).
+ if pvs is None:
+ # Remove VG if pvs are note informed.
+ # Remark: AIX will permit remove only if the VG has not LVs.
+ lsvg_cmd = module.get_bin_path('lsvg', True)
+ rc, current_pvs, err = module.run_command([lsvg_cmd, "-p", vg])
+ if rc != 0:
+ module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
+
+ pvs_to_remove = []
+ for line in current_pvs.splitlines()[2:]:
+ pvs_to_remove.append(line.split()[0])
+
+ reduce_msg = "Volume group '%s' removed." % vg
+ else:
+ pvs_to_remove = pvs
+ reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
+
+ # Reduce volume group.
+ if len(pvs_to_remove) <= 0:
+ changed = False
+ msg = "No physical volumes to remove."
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ reducevg_cmd = module.get_bin_path('reducevg', True)
+ rc, stdout, stderr = module.run_command([reducevg_cmd, "-df", vg] + pvs_to_remove)
+ if rc != 0:
+ module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
+
+ msg = reduce_msg
+ return changed, msg
+
+
+def state_vg(module, vg, state, vg_validation):
+ vg_state, msg = vg_validation
+
+ if vg_state is None:
+ module.fail_json(msg=msg)
+
+ if state == 'varyon':
+ if vg_state is True:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyonvg', True)
+ rc, varyonvg_out, err = module.run_command([varyonvg_cmd, vg])
+ if rc != 0:
+ module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
+
+ msg = "Varyon volume group %s completed." % vg
+ return changed, msg
+
+ elif state == 'varyoff':
+ if vg_state is False:
+ changed = False
+ return changed, msg
+
+ changed = True
+ msg = ''
+
+ if not module.check_mode:
+ varyonvg_cmd = module.get_bin_path('varyoffvg', True)
+ rc, varyonvg_out, stderr = module.run_command([varyonvg_cmd, vg])
+ if rc != 0:
+ module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
+
+ msg = "Varyoff volume group %s completed." % vg
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool', default=False),
+ pp_size=dict(type='int'),
+ pvs=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
+ vg=dict(type='str', required=True),
+ vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
+ ),
+ supports_check_mode=True,
+ )
+
+ force = module.params['force']
+ pp_size = module.params['pp_size']
+ pvs = module.params['pvs']
+ state = module.params['state']
+ vg = module.params['vg']
+ vg_type = module.params['vg_type']
+
+ if pp_size is None:
+ pp_size = ''
+ else:
+ pp_size = "-s %s" % pp_size
+
+ vg_validation = _validate_vg(module, vg)
+
+ if state == 'present':
+ if not pvs:
+ changed = False
+ msg = "pvs is required to state 'present'."
+ module.fail_json(msg=msg)
+ else:
+ changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
+
+ elif state == 'absent':
+ changed, msg = reduce_vg(module, vg, pvs, vg_validation)
+
+ elif state == 'varyon' or state == 'varyoff':
+ changed, msg = state_vg(module, vg, state, vg_validation)
+
+ else:
+ changed = False
+ msg = "Unexpected state"
+
+ module.exit_json(changed=changed, msg=msg, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/aix_lvol.py b/ansible_collections/community/general/plugins/modules/aix_lvol.py
new file mode 100644
index 000000000..0a4a6eff5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/aix_lvol.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Alain Dejoux <adejoux@djouxtech.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Alain Dejoux (@adejoux)
+module: aix_lvol
+short_description: Configure AIX LVM logical volumes
+description:
+ - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ vg:
+ description:
+ - The volume group this logical volume is part of.
+ type: str
+ required: true
+ lv:
+ description:
+ - The name of the logical volume.
+ type: str
+ required: true
+ lv_type:
+ description:
+ - The type of the logical volume.
+ type: str
+ default: jfs2
+ size:
+ description:
+ - The size of the logical volume with one of the [MGT] units.
+ type: str
+ copies:
+ description:
+ - The number of copies of the logical volume.
+ - Maximum copies are 3.
+ type: int
+ default: 1
+ policy:
+ description:
+ - Sets the interphysical volume allocation policy.
+ - C(maximum) allocates logical partitions across the maximum number of physical volumes.
+ - C(minimum) allocates logical partitions across the minimum number of physical volumes.
+ type: str
+ choices: [ maximum, minimum ]
+ default: maximum
+ state:
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ opts:
+ description:
+ - Free-form options to be passed to the mklv command.
+ type: str
+ default: ''
+ pvs:
+ description:
+ - A list of physical volumes e.g. C(hdisk1,hdisk2).
+ type: list
+ elements: str
+ default: []
+'''
+
+EXAMPLES = r'''
+- name: Create a logical volume of 512M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+
+- name: Create a logical volume of 512M with disks hdisk1 and hdisk2
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test2lv
+ size: 512M
+ pvs: [ hdisk1, hdisk2 ]
+
+- name: Create a logical volume of 512M mirrored
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test3lv
+ size: 512M
+ copies: 2
+
+- name: Create a logical volume of 1G with a minimum placement policy
+ community.general.aix_lvol:
+ vg: rootvg
+ lv: test4lv
+ size: 1G
+ policy: minimum
+
+- name: Create a logical volume with special options like mirror pool
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ size: 512M
+ opts: -p copy1=poolA -p copy2=poolB
+
+- name: Extend the logical volume to 1200M
+ community.general.aix_lvol:
+ vg: testvg
+ lv: test4lv
+ size: 1200M
+
+- name: Remove the logical volume
+ community.general.aix_lvol:
+ vg: testvg
+ lv: testlv
+ state: absent
+'''
+
+RETURN = r'''
+msg:
+ type: str
+ description: A friendly message describing the task result.
+ returned: always
+ sample: Logical volume testlv created.
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def convert_size(module, size):
+ unit = size[-1].upper()
+ units = ['M', 'G', 'T']
+ try:
+ multiplier = 1024 ** units.index(unit)
+ except ValueError:
+ module.fail_json(msg="No valid size unit specified.")
+
+ return int(size[:-1]) * multiplier
+
+
+def round_ppsize(x, base=16):
+ new_size = int(base * round(float(x) / base))
+ if new_size < x:
+ new_size += base
+ return new_size
+
+
+def parse_lv(data):
+ name = None
+
+ for line in data.splitlines():
+ match = re.search(r"LOGICAL VOLUME:\s+(\w+)\s+VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ vg = match.group(2)
+ continue
+ match = re.search(r"LPs:\s+(\d+).*PPs", line)
+ if match is not None:
+ lps = int(match.group(1))
+ continue
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+ match = re.search(r"INTER-POLICY:\s+(\w+)", line)
+ if match is not None:
+ policy = match.group(1)
+ continue
+
+ if not name:
+ return None
+
+ size = lps * pp_size
+
+ return {'name': name, 'vg': vg, 'size': size, 'policy': policy}
+
+
+def parse_vg(data):
+
+ for line in data.splitlines():
+
+ match = re.search(r"VOLUME GROUP:\s+(\w+)", line)
+ if match is not None:
+ name = match.group(1)
+ continue
+
+ match = re.search(r"TOTAL PP.*\((\d+)", line)
+ if match is not None:
+ size = int(match.group(1))
+ continue
+
+ match = re.search(r"PP SIZE:\s+(\d+)", line)
+ if match is not None:
+ pp_size = int(match.group(1))
+ continue
+
+ match = re.search(r"FREE PP.*\((\d+)", line)
+ if match is not None:
+ free = int(match.group(1))
+ continue
+
+ return {'name': name, 'size': size, 'free': free, 'pp_size': pp_size}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str', required=True),
+ lv_type=dict(type='str', default='jfs2'),
+ size=dict(type='str'),
+ opts=dict(type='str', default=''),
+ copies=dict(type='int', default=1),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ policy=dict(type='str', default='maximum', choices=['maximum', 'minimum']),
+ pvs=dict(type='list', elements='str', default=list())
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ lv_type = module.params['lv_type']
+ size = module.params['size']
+ opts = module.params['opts']
+ copies = module.params['copies']
+ policy = module.params['policy']
+ state = module.params['state']
+ pvs = module.params['pvs']
+
+ pv_list = ' '.join(pvs)
+
+ if policy == 'maximum':
+ lv_policy = 'x'
+ else:
+ lv_policy = 'm'
+
+ # Add echo command when running in check-mode
+ if module.check_mode:
+ test_opt = 'echo '
+ else:
+ test_opt = ''
+
+ # check if system commands are available
+ lsvg_cmd = module.get_bin_path("lsvg", required=True)
+ lslv_cmd = module.get_bin_path("lslv", required=True)
+
+ # Get information on volume group requested
+ rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, out=vg_info, err=err)
+
+ this_vg = parse_vg(vg_info)
+
+ if size is not None:
+ # Calculate pp size and round it up based on pp size.
+ lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size'])
+
+ # Get information on logical volume requested
+ rc, lv_info, err = module.run_command(
+ "%s %s" % (lslv_cmd, lv))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, msg="Logical Volume %s does not exist." % lv)
+
+ changed = False
+
+ this_lv = parse_lv(lv_info)
+
+ if state == 'present' and not size:
+ if this_lv is None:
+ module.fail_json(msg="No size given.")
+
+ if this_lv is None:
+ if state == 'present':
+ if lv_size > this_vg['free']:
+ module.fail_json(msg="Not enough free space in volume group %s: %s MB free." % (this_vg['name'], this_vg['free']))
+
+ # create LV
+ mklv_cmd = module.get_bin_path("mklv", required=True)
+
+ cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s created." % lv)
+ else:
+ module.fail_json(msg="Creating logical volume %s failed." % lv, rc=rc, out=out, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ rmlv_cmd = module.get_bin_path("rmlv", required=True)
+ rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s deleted." % lv)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s." % lv, rc=rc, out=out, err=err)
+ else:
+ if this_lv['policy'] != policy:
+ # change lv allocation policy
+ chlv_cmd = module.get_bin_path("chlv", required=True)
+ rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy))
+ else:
+ module.fail_json(msg="Failed to change logical volume %s policy." % lv, rc=rc, out=out, err=err)
+
+ if vg != this_lv['vg']:
+ module.fail_json(msg="Logical volume %s already exist in volume group %s" % (lv, this_lv['vg']))
+
+ # from here the last remaining action is to resize it, if no size parameter is passed we do nothing.
+ if not size:
+ module.exit_json(changed=False, msg="Logical volume %s already exist." % (lv))
+
+ # resize LV based on absolute values
+ if int(lv_size) > this_lv['size']:
+ extendlv_cmd = module.get_bin_path("extendlv", required=True)
+ cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size'])
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size))
+ else:
+ module.fail_json(msg="Unable to resize %s to %sMB." % (lv, lv_size), rc=rc, out=out, err=err)
+ elif lv_size < this_lv['size']:
+ module.fail_json(msg="No shrinking of Logical Volume %s permitted. Current size: %s MB" % (lv, this_lv['size']))
+ else:
+ module.exit_json(changed=False, msg="Logical volume %s size is already %sMB." % (lv, lv_size))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/alerta_customer.py b/ansible_collections/community/general/plugins/modules/alerta_customer.py
new file mode 100644
index 000000000..120d98932
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/alerta_customer.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Christian Wollinger <@cwollinger>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: alerta_customer
+short_description: Manage customers in Alerta
+version_added: 4.8.0
+description:
+ - Create or delete customers in Alerta with the REST API.
+author: Christian Wollinger (@cwollinger)
+seealso:
+ - name: API documentation
+ description: Documentation for Alerta API
+ link: https://docs.alerta.io/api/reference.html#customers
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ customer:
+ description:
+ - Name of the customer.
+ required: true
+ type: str
+ match:
+ description:
+ - The matching logged in user for the customer.
+ required: true
+ type: str
+ alerta_url:
+ description:
+ - The Alerta API endpoint.
+ required: true
+ type: str
+ api_username:
+ description:
+ - The username for the API using basic auth.
+ type: str
+ api_password:
+ description:
+ - The password for the API using basic auth.
+ type: str
+ api_key:
+ description:
+ - The access token for the API.
+ type: str
+ state:
+ description:
+ - Whether the customer should exist or not.
+ - Both I(customer) and I(match) identify a customer that should be added or removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = """
+- name: Create customer
+ community.general.alerta_customer:
+ alerta_url: https://alerta.example.com
+ api_username: admin@example.com
+ api_password: password
+ customer: Developer
+ match: dev@example.com
+
+- name: Delete customer
+ community.general.alerta_customer:
+ alerta_url: https://alerta.example.com
+ api_username: admin@example.com
+ api_password: password
+ customer: Developer
+ match: dev@example.com
+ state: absent
+"""
+
+RETURN = """
+msg:
+ description:
+ - Success or failure message.
+ returned: always
+ type: str
+ sample: Customer customer1 created
+response:
+ description:
+ - The response from the API.
+ returned: always
+ type: dict
+"""
+
+from ansible.module_utils.urls import fetch_url, basic_auth_header
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AlertaInterface(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.customer = module.params['customer']
+ self.match = module.params['match']
+ self.alerta_url = module.params['alerta_url']
+ self.headers = {"Content-Type": "application/json"}
+
+ if module.params.get('api_key', None):
+ self.headers["Authorization"] = "Key %s" % module.params['api_key']
+ else:
+ self.headers["Authorization"] = basic_auth_header(module.params['api_username'], module.params['api_password'])
+
+ def send_request(self, url, data=None, method="GET"):
+ response, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
+
+ status_code = info["status"]
+ if status_code == 401:
+ self.module.fail_json(failed=True, response=info, msg="Unauthorized to request '%s' on '%s'" % (method, url))
+ elif status_code == 403:
+ self.module.fail_json(failed=True, response=info, msg="Permission Denied for '%s' on '%s'" % (method, url))
+ elif status_code == 404:
+ self.module.fail_json(failed=True, response=info, msg="Not found for request '%s' on '%s'" % (method, url))
+ elif status_code in (200, 201):
+ return self.module.from_json(response.read())
+ self.module.fail_json(failed=True, response=info, msg="Alerta API error with HTTP %d for %s" % (status_code, url))
+
+ def get_customers(self):
+ url = "%s/api/customers" % self.alerta_url
+ response = self.send_request(url)
+ pages = response["pages"]
+ if pages > 1:
+ for page in range(2, pages + 1):
+ page_url = url + '?page=' + str(page)
+ new_results = self.send_request(page_url)
+ response.update(new_results)
+ return response
+
+ def create_customer(self):
+ url = "%s/api/customer" % self.alerta_url
+
+ payload = {
+ 'customer': self.customer,
+ 'match': self.match,
+ }
+
+ payload = self.module.jsonify(payload)
+ response = self.send_request(url, payload, 'POST')
+ return response
+
+ def delete_customer(self, id):
+ url = "%s/api/customer/%s" % (self.alerta_url, id)
+
+ response = self.send_request(url, None, 'DELETE')
+ return response
+
+ def find_customer_id(self, customer):
+ for i in customer['customers']:
+ if self.customer == i['customer'] and self.match == i['match']:
+ return i['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ customer=dict(type='str', required=True),
+ match=dict(type='str', required=True),
+ alerta_url=dict(type='str', required=True),
+ api_username=dict(type='str'),
+ api_password=dict(type='str', no_log=True),
+ api_key=dict(type='str', no_log=True),
+ ),
+ required_together=[['api_username', 'api_password']],
+ mutually_exclusive=[['api_username', 'api_key']],
+ supports_check_mode=True
+ )
+
+ alerta_iface = AlertaInterface(module)
+
+ if alerta_iface.state == 'present':
+ response = alerta_iface.get_customers()
+ if alerta_iface.find_customer_id(response):
+ module.exit_json(changed=False, response=response, msg="Customer %s already exists" % alerta_iface.customer)
+ else:
+ if not module.check_mode:
+ response = alerta_iface.create_customer()
+ module.exit_json(changed=True, response=response, msg="Customer %s created" % alerta_iface.customer)
+ else:
+ response = alerta_iface.get_customers()
+ id = alerta_iface.find_customer_id(response)
+ if id:
+ if not module.check_mode:
+ alerta_iface.delete_customer(id)
+ module.exit_json(changed=True, response=response, msg="Customer %s with id %s deleted" % (alerta_iface.customer, id))
+ else:
+ module.exit_json(changed=False, response=response, msg="Customer %s does not exists" % alerta_iface.customer)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ali_instance.py b/ansible_collections/community/general/plugins/modules/ali_instance.py
new file mode 100644
index 000000000..232c21ee0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ali_instance.py
@@ -0,0 +1,1012 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance
+short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security Group
+description:
+ - Create, start, stop, restart, modify or terminate ecs instances.
+ - Add or remove ecs instances to/from security group.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - The state of the instance after operating.
+ default: 'present'
+ choices: ['present', 'running', 'stopped', 'restarted', 'absent']
+ type: str
+ availability_zone:
+ description:
+ - Aliyun availability zone ID in which to launch the instance.
+ If it is not specified, it will be allocated by system automatically.
+ aliases: ['alicloud_zone', 'zone_id']
+ type: str
+ image_id:
+ description:
+ - Image ID used to launch instances. Required when I(state=present) and creating new ECS instances.
+ aliases: ['image']
+ type: str
+ instance_type:
+ description:
+ - Instance type used to launch instances. Required when I(state=present) and creating new ECS instances.
+ aliases: ['type']
+ type: str
+ security_groups:
+ description:
+ - A list of security group IDs.
+ aliases: ['group_ids']
+ type: list
+ elements: str
+ vswitch_id:
+ description:
+ - The subnet ID in which to launch the instances (VPC).
+ aliases: ['subnet_id']
+ type: str
+ instance_name:
+ description:
+ - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an
+ uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-".
+ It cannot begin with http:// or https://.
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://.
+ type: str
+ internet_charge_type:
+ description:
+ - Internet charge type of ECS instance.
+ default: 'PayByBandwidth'
+ choices: ['PayByBandwidth', 'PayByTraffic']
+ type: str
+ max_bandwidth_in:
+ description:
+ - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second).
+ default: 200
+ type: int
+ max_bandwidth_out:
+ description:
+ - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second).
+ Required when I(allocate_public_ip=true). Ignored when I(allocate_public_ip=false).
+ default: 0
+ type: int
+ host_name:
+ description:
+ - Instance host name. Ordered hostname is not supported.
+ type: str
+ unique_suffix:
+ description:
+ - Specifies whether to add sequential suffixes to the host_name.
+ The sequential suffix ranges from 001 to 999.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ password:
+ description:
+ - The password to login instance. After rebooting instances, modified password will take effect.
+ type: str
+ system_disk_category:
+ description:
+ - Category of the system disk.
+ default: 'cloud_efficiency'
+ choices: ['cloud_efficiency', 'cloud_ssd']
+ type: str
+ system_disk_size:
+ description:
+ - Size of the system disk, in GB. The valid values are 40~500.
+ default: 40
+ type: int
+ system_disk_name:
+ description:
+ - Name of the system disk.
+ type: str
+ system_disk_description:
+ description:
+ - Description of the system disk.
+ type: str
+ count:
+ description:
+ - The number of the new instance. An integer value which indicates how many instances that match I(count_tag)
+ should be running. Instances are either created or terminated based on this value.
+ default: 1
+ type: int
+ count_tag:
+ description:
+ - I(count) determines how many instances based on a specific tag criteria should be present.
+ This can be expressed in multiple ways and is shown in the EXAMPLES section.
+ The specified count_tag must already exist or be passed in as the I(tags) option.
+ If it is not specified, it will be replaced by I(instance_name).
+ type: str
+ allocate_public_ip:
+ description:
+ - Whether allocate a public ip for the new instance.
+ default: false
+ aliases: [ 'assign_public_ip' ]
+ type: bool
+ instance_charge_type:
+ description:
+ - The charge type of the instance.
+ choices: ['PrePaid', 'PostPaid']
+ default: 'PostPaid'
+ type: str
+ period:
+ description:
+ - The charge duration of the instance, in months. Required when I(instance_charge_type=PrePaid).
+ - The valid value are [1-9, 12, 24, 36].
+ default: 1
+ type: int
+ auto_renew:
+ description:
+ - Whether automate renew the charge of the instance.
+ type: bool
+ default: false
+ auto_renew_period:
+ description:
+ - The duration of the automatic renew the charge of the instance. Required when I(auto_renew=true).
+ choices: [1, 2, 3, 6, 12]
+ type: int
+ instance_ids:
+ description:
+ - A list of instance ids. It is required when need to operate existing instances.
+ If it is specified, I(count) will lose efficacy.
+ type: list
+ elements: str
+ force:
+ description:
+ - Whether the current operation needs to be execute forcibly.
+ default: false
+ type: bool
+ tags:
+ description:
+ - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ version_added: '0.2.0'
+ purge_tags:
+ description:
+ - Delete any tags not specified in the task that are on the instance.
+ If True, it means you have to specify all the desired tags on each task affecting an instance.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ key_name:
+ description:
+ - The name of key pair which is used to access ECS instance in SSH.
+ required: false
+ type: str
+ aliases: ['keypair']
+ user_data:
+ description:
+ - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance.
+ It only will take effect when launching the new ECS instances.
+ required: false
+ type: str
+ ram_role_name:
+ description:
+ - The name of the instance RAM role.
+ type: str
+ version_added: '0.2.0'
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal
+ places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit.
+ type: float
+ version_added: '0.2.0'
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid.
+ choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']
+ default: 'NoSpot'
+ type: str
+ version_added: '0.2.0'
+ period_unit:
+ description:
+ - The duration unit that you will buy the resource. It is valid when I(instance_charge_type=PrePaid).
+ choices: ['Month', 'Week']
+ default: 'Month'
+ type: str
+ version_added: '0.2.0'
+ dry_run:
+ description:
+ - Specifies whether to send a dry-run request.
+ - If I(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the
+ required parameters are set, and validates the request format, service permissions, and available ECS instances.
+ If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned.
+ - If I(dry_run=false), A request is sent. If the validation succeeds, the instance is created.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ include_data_disks:
+ description:
+ - Whether to change instance disks charge type when changing instance charge type.
+ default: true
+ type: bool
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.19.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+# basic provisioning example vpc network
+- name: Basic provisioning example
+ hosts: localhost
+ vars:
+ alicloud_access_key: <your-alicloud-access-key-id>
+ alicloud_secret_key: <your-alicloud-access-secret-key>
+ alicloud_region: cn-beijing
+ image: ubuntu1404_64_40G_cloudinit_20160727.raw
+ instance_type: ecs.n4.small
+ vswitch_id: vsw-abcd1234
+ assign_public_ip: true
+ max_bandwidth_out: 10
+ host_name: myhost
+ password: mypassword
+ system_disk_category: cloud_efficiency
+ system_disk_size: 100
+ internet_charge_type: PayByBandwidth
+ security_groups: ["sg-f2rwnfh23r"]
+
+ instance_ids: ["i-abcd12346", "i-abcd12345"]
+ force: true
+
+ tasks:
+ - name: Launch ECS instance in VPC network
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ vswitch_id: '{{ vswitch_id }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: With count and count_tag to create a number of instances
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ image: '{{ image }}'
+ system_disk_category: '{{ system_disk_category }}'
+ system_disk_size: '{{ system_disk_size }}'
+ instance_type: '{{ instance_type }}'
+ assign_public_ip: '{{ assign_public_ip }}'
+ security_groups: '{{ security_groups }}'
+ internet_charge_type: '{{ internet_charge_type }}'
+ max_bandwidth_out: '{{ max_bandwidth_out }}'
+ tags:
+ Name: created_one
+ Version: 0.1
+ count: 2
+ count_tag:
+ Name: created_one
+ host_name: '{{ host_name }}'
+ password: '{{ password }}'
+
+ - name: Start instance
+ community.general.ali_instance:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'running'
+
+ - name: Reboot instance forcibly
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ state: 'restarted'
+ force: '{{ force }}'
+
+ - name: Add instances to an security group
+ ecs:
+ alicloud_access_key: '{{ alicloud_access_key }}'
+ alicloud_secret_key: '{{ alicloud_secret_key }}'
+ alicloud_region: '{{ alicloud_region }}'
+ instance_ids: '{{ instance_ids }}'
+ security_groups: '{{ security_groups }}'
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ user_data:
+ description: User-defined data.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ spot_price_limit:
+ description:
+ - The maximum hourly price for the preemptible instance.
+ returned: always
+ type: float
+ sample: 0.97
+ spot_strategy:
+ description:
+ - The bidding mode of the pay-as-you-go instance.
+ returned: always
+ type: str
+ sample: NoSpot
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+import re
+import time
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import (
+ ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK
+)
+
+
+def get_instances_info(connection, ids):
+ result = []
+ instances = connection.describe_instances(instance_ids=ids)
+ if len(instances) > 0:
+ for inst in instances:
+ volumes = connection.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ result.append(inst.read())
+ return result
+
+
+def run_instance(module, ecs, exact_count):
+ if exact_count <= 0:
+ return None
+ zone_id = module.params['availability_zone']
+ image_id = module.params['image_id']
+ instance_type = module.params['instance_type']
+ security_groups = module.params['security_groups']
+ vswitch_id = module.params['vswitch_id']
+ instance_name = module.params['instance_name']
+ description = module.params['description']
+ internet_charge_type = module.params['internet_charge_type']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ max_bandwidth_in = module.params['max_bandwidth_in']
+ host_name = module.params['host_name']
+ password = module.params['password']
+ system_disk_category = module.params['system_disk_category']
+ system_disk_size = module.params['system_disk_size']
+ system_disk_name = module.params['system_disk_name']
+ system_disk_description = module.params['system_disk_description']
+ allocate_public_ip = module.params['allocate_public_ip']
+ period = module.params['period']
+ auto_renew = module.params['auto_renew']
+ instance_charge_type = module.params['instance_charge_type']
+ auto_renew_period = module.params['auto_renew_period']
+ user_data = module.params['user_data']
+ key_name = module.params['key_name']
+ ram_role_name = module.params['ram_role_name']
+ spot_price_limit = module.params['spot_price_limit']
+ spot_strategy = module.params['spot_strategy']
+ unique_suffix = module.params['unique_suffix']
+ # check whether the required parameter passed or not
+ if not image_id:
+ module.fail_json(msg='image_id is required for new instance')
+ if not instance_type:
+ module.fail_json(msg='instance_type is required for new instance')
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ if len(security_groups) <= 0:
+ module.fail_json(msg='Expected the parameter security_groups is non-empty when create new ECS instances, aborting')
+
+ client_token = "Ansible-Alicloud-{0}-{1}".format(hash(str(module.params)), str(time.time()))
+
+ try:
+ # call to create_instance method from footmark
+ instances = ecs.run_instances(image_id=image_id, instance_type=instance_type, security_group_id=security_groups[0],
+ zone_id=zone_id, instance_name=instance_name, description=description,
+ internet_charge_type=internet_charge_type, internet_max_bandwidth_out=max_bandwidth_out,
+ internet_max_bandwidth_in=max_bandwidth_in, host_name=host_name, password=password,
+ io_optimized='optimized', system_disk_category=system_disk_category,
+ system_disk_size=system_disk_size, system_disk_disk_name=system_disk_name,
+ system_disk_description=system_disk_description, vswitch_id=vswitch_id,
+ amount=exact_count, instance_charge_type=instance_charge_type, period=period, period_unit="Month",
+ auto_renew=auto_renew, auto_renew_period=auto_renew_period, key_pair_name=key_name,
+ user_data=user_data, client_token=client_token, ram_role_name=ram_role_name,
+ spot_price_limit=spot_price_limit, spot_strategy=spot_strategy, unique_suffix=unique_suffix)
+
+ except Exception as e:
+ module.fail_json(msg='Unable to create instance, error: {0}'.format(e))
+
+ return instances
+
+
+def modify_instance(module, instance):
+ # According to state to modify instance's some special attribute
+ state = module.params["state"]
+ name = module.params['instance_name']
+ unique_suffix = module.params['unique_suffix']
+ if not name:
+ name = instance.name
+
+ description = module.params['description']
+ if not description:
+ description = instance.description
+
+ host_name = module.params['host_name']
+ if unique_suffix and host_name:
+ suffix = instance.host_name[-3:]
+ host_name = host_name + suffix
+
+ if not host_name:
+ host_name = instance.host_name
+
+ # password can be modified only when restart instance
+ password = ""
+ if state == "restarted":
+ password = module.params['password']
+
+ # userdata can be modified only when instance is stopped
+ setattr(instance, "user_data", instance.describe_user_data())
+ user_data = instance.user_data
+ if state == "stopped":
+ user_data = module.params['user_data'].encode()
+
+ try:
+ return instance.modify(name=name, description=description, host_name=host_name, password=password, user_data=user_data)
+ except Exception as e:
+ module.fail_json(msg="Modify instance {0} attribute got an error: {1}".format(instance.id, e))
+
+
+def wait_for_instance_modify_charge(ecs, instance_ids, charge_type, delay=10, timeout=300):
+ """
+ To verify instance charge type has become expected after modify instance charge type
+ """
+ try:
+ while True:
+ instances = ecs.describe_instances(instance_ids=instance_ids)
+ flag = True
+ for inst in instances:
+ if inst and inst.instance_charge_type != charge_type:
+ flag = False
+ if flag:
+ return
+ timeout -= delay
+ time.sleep(delay)
+ if timeout <= 0:
+ raise Exception("Timeout Error: Waiting for instance to {0}. ".format(charge_type))
+ except Exception as e:
+ raise e
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ security_groups=dict(type='list', elements='str', aliases=['group_ids']),
+ availability_zone=dict(type='str', aliases=['alicloud_zone', 'zone_id']),
+ instance_type=dict(type='str', aliases=['type']),
+ image_id=dict(type='str', aliases=['image']),
+ count=dict(type='int', default=1),
+ count_tag=dict(type='str'),
+ vswitch_id=dict(type='str', aliases=['subnet_id']),
+ instance_name=dict(type='str', aliases=['name']),
+ host_name=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ internet_charge_type=dict(type='str', default='PayByBandwidth', choices=['PayByBandwidth', 'PayByTraffic']),
+ max_bandwidth_in=dict(type='int', default=200),
+ max_bandwidth_out=dict(type='int', default=0),
+ system_disk_category=dict(type='str', default='cloud_efficiency', choices=['cloud_efficiency', 'cloud_ssd']),
+ system_disk_size=dict(type='int', default=40),
+ system_disk_name=dict(type='str'),
+ system_disk_description=dict(type='str'),
+ force=dict(type='bool', default=False),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ purge_tags=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'running', 'stopped', 'restarted', 'absent']),
+ description=dict(type='str'),
+ allocate_public_ip=dict(type='bool', aliases=['assign_public_ip'], default=False),
+ instance_charge_type=dict(type='str', default='PostPaid', choices=['PrePaid', 'PostPaid']),
+ period=dict(type='int', default=1),
+ auto_renew=dict(type='bool', default=False),
+ instance_ids=dict(type='list', elements='str'),
+ auto_renew_period=dict(type='int', choices=[1, 2, 3, 6, 12]),
+ key_name=dict(type='str', aliases=['keypair']),
+ user_data=dict(type='str'),
+ ram_role_name=dict(type='str'),
+ spot_price_limit=dict(type='float'),
+ spot_strategy=dict(type='str', default='NoSpot', choices=['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo']),
+ unique_suffix=dict(type='bool', default=False),
+ period_unit=dict(type='str', default='Month', choices=['Month', 'Week']),
+ dry_run=dict(type='bool', default=False),
+ include_data_disks=dict(type='bool', default=True)
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+ host_name = module.params['host_name']
+ state = module.params['state']
+ instance_ids = module.params['instance_ids']
+ count_tag = module.params['count_tag']
+ count = module.params['count']
+ instance_name = module.params['instance_name']
+ force = module.params['force']
+ zone_id = module.params['availability_zone']
+ key_name = module.params['key_name']
+ tags = module.params['tags']
+ max_bandwidth_out = module.params['max_bandwidth_out']
+ instance_charge_type = module.params['instance_charge_type']
+ if instance_charge_type == "PrePaid":
+ module.params['spot_strategy'] = ''
+ changed = False
+
+ instances = []
+ if instance_ids:
+ if not isinstance(instance_ids, list):
+ module.fail_json(msg='The parameter instance_ids should be a list, aborting')
+ instances = ecs.describe_instances(zone_id=zone_id, instance_ids=instance_ids)
+ if not instances:
+ module.fail_json(msg="There are no instances in our record based on instance_ids {0}. "
+ "Please check it and try again.".format(instance_ids))
+ elif count_tag:
+ instances = ecs.describe_instances(zone_id=zone_id, tags=eval(count_tag))
+ elif instance_name:
+ instances = ecs.describe_instances(zone_id=zone_id, instance_name=instance_name)
+
+ ids = []
+ if state == 'absent':
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="Instance is running, and please stop it or set 'force' as True.")
+ targets.append(inst.id)
+ if ecs.delete_instances(instance_ids=targets, force=force):
+ changed = True
+ ids.extend(targets)
+
+ module.exit_json(changed=changed, ids=ids, instances=[])
+ except Exception as e:
+ module.fail_json(msg='Delete instance got an error: {0}'.format(e))
+
+ if module.params['allocate_public_ip'] and max_bandwidth_out < 0:
+ module.fail_json(msg="'max_bandwidth_out' should be greater than 0 when 'allocate_public_ip' is True.")
+ if not module.params['allocate_public_ip']:
+ module.params['max_bandwidth_out'] = 0
+
+ if state == 'present':
+ if not instance_ids:
+ if len(instances) > count:
+ for i in range(0, len(instances) - count):
+ inst = instances[len(instances) - 1]
+ if inst.status != 'stopped' and not force:
+ module.fail_json(msg="That to delete instance {0} is failed results from it is running, "
+ "and please stop it or set 'force' as True.".format(inst.id))
+ try:
+ if inst.terminate(force=force):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="Delete instance {0} got an error: {1}".format(inst.id, e))
+ instances.pop(len(instances) - 1)
+ else:
+ try:
+ if re.search(r"-\[\d+,\d+\]-", host_name):
+ module.fail_json(msg='Ordered hostname is not supported, If you want to add an ordered '
+ 'suffix to the hostname, you can set unique_suffix to True')
+ new_instances = run_instance(module, ecs, count - len(instances))
+ if new_instances:
+ changed = True
+ instances.extend(new_instances)
+ except Exception as e:
+ module.fail_json(msg="Create new instances got an error: {0}".format(e))
+
+ # Security Group join/leave begin
+ security_groups = module.params['security_groups']
+ if security_groups:
+ if not isinstance(security_groups, list):
+ module.fail_json(msg='The parameter security_groups should be a list, aborting')
+ for inst in instances:
+ existing = inst.security_group_ids['security_group_id']
+ remove = list(set(existing).difference(set(security_groups)))
+ add = list(set(security_groups).difference(set(existing)))
+ for sg in remove:
+ if inst.leave_security_group(sg):
+ changed = True
+ for sg in add:
+ if inst.join_security_group(sg):
+ changed = True
+ # Security Group join/leave ends here
+
+ # Attach/Detach key pair
+ inst_ids = []
+ for inst in instances:
+ if key_name is not None and key_name != inst.key_name:
+ if key_name == "":
+ if inst.detach_key_pair():
+ changed = True
+ else:
+ inst_ids.append(inst.id)
+ if inst_ids:
+ changed = ecs.attach_key_pair(instance_ids=inst_ids, key_pair_name=key_name)
+
+ # Modify instance attribute
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.id not in ids:
+ ids.append(inst.id)
+
+ # Modify instance charge type
+ ids = []
+ for inst in instances:
+ if inst.instance_charge_type != instance_charge_type:
+ ids.append(inst.id)
+ if ids:
+ params = {"instance_ids": ids, "instance_charge_type": instance_charge_type,
+ "include_data_disks": module.params['include_data_disks'], "dry_run": module.params['dry_run'],
+ "auto_pay": True}
+ if instance_charge_type == 'PrePaid':
+ params['period'] = module.params['period']
+ params['period_unit'] = module.params['period_unit']
+
+ if ecs.modify_instance_charge_type(**params):
+ changed = True
+ wait_for_instance_modify_charge(ecs, ids, instance_charge_type)
+
+ else:
+ if len(instances) < 1:
+ module.fail_json(msg='Please specify ECS instances that you want to operate by using '
+ 'parameters instance_ids, tags or instance_name, aborting')
+ if state == 'running':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ if inst.status != "running":
+ targets.append(inst.id)
+ ids.append(inst.id)
+ if targets and ecs.start_instances(instance_ids=targets):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Start instances got an error: {0}'.format(e))
+ elif state == 'stopped':
+ try:
+ targets = []
+ for inst in instances:
+ if inst.status != "stopped":
+ targets.append(inst.id)
+ if targets and ecs.stop_instances(instance_ids=targets, force_stop=force):
+ changed = True
+ ids.extend(targets)
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='Stop instances got an error: {0}'.format(e))
+ elif state == 'restarted':
+ try:
+ targets = []
+ for inst in instances:
+ if modify_instance(module, inst):
+ changed = True
+ targets.append(inst.id)
+ if ecs.reboot_instances(instance_ids=targets, force_stop=module.params['force']):
+ changed = True
+ ids.extend(targets)
+ except Exception as e:
+ module.fail_json(msg='Reboot instances got an error: {0}'.format(e))
+
+ tags = module.params['tags']
+ if module.params['purge_tags']:
+ for inst in instances:
+ if not tags:
+ tags = inst.tags
+ try:
+ if inst.remove_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+ if tags:
+ for inst in instances:
+ try:
+ if inst.add_tags(tags):
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="{0}".format(e))
+ module.exit_json(changed=changed, instances=get_instances_info(ecs, ids))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ali_instance_info.py b/ansible_collections/community/general/plugins/modules/ali_instance_info.py
new file mode 100644
index 000000000..e7ec7f395
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ali_instance_info.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <heguimin36@163.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see http://www.gnu.org/licenses/.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ali_instance_info
+short_description: Gather information on instances of Alibaba Cloud ECS
+description:
+ - This module fetches data from the Open API in Alicloud.
+ The module must be called from within the ECS instance itself.
+ - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change.
+
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+
+options:
+ name_prefix:
+ description:
+ - Use a instance name prefix to filter ecs instances.
+ type: str
+ version_added: '0.2.0'
+ tags:
+ description:
+ - A hash/dictionaries of instance tags. C({"key":"value"})
+ aliases: ["instance_tags"]
+ type: dict
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be
+ all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details.
+ Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to
+ connect different words in one parameter. 'InstanceIds' should be a list.
+ 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead.
+ type: dict
+ version_added: '0.2.0'
+author:
+ - "He Guimin (@xiaozhu36)"
+requirements:
+ - "python >= 3.6"
+ - "footmark >= 1.13.0"
+extends_documentation_fragment:
+ - community.general.alicloud
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = '''
+# Fetch instances details according to setting different filters
+
+- name: Find all instances in the specified region
+ community.general.ali_instance_info:
+ register: all_instances
+
+- name: Find all instances based on the specified ids
+ community.general.ali_instance_info:
+ instance_ids:
+ - "i-35b333d9"
+ - "i-ddav43kd"
+ register: instances_by_ids
+
+- name: Find all instances based on the specified name_prefix
+ community.general.ali_instance_info:
+ name_prefix: "ecs_instance_"
+ register: instances_by_name_prefix
+
+- name: Find instances based on tags
+ community.general.ali_instance_info:
+ tags:
+ Test: "add"
+'''
+
+RETURN = '''
+instances:
+ description: List of ECS instances
+ returned: always
+ type: complex
+ contains:
+ availability_zone:
+ description: The availability zone of the instance is in.
+ returned: always
+ type: str
+ sample: cn-beijing-a
+ block_device_mappings:
+ description: Any block device mapping entries for the instance.
+ returned: always
+ type: complex
+ contains:
+ device_name:
+ description: The device name exposed to the instance (for example, /dev/xvda).
+ returned: always
+ type: str
+ sample: /dev/xvda
+ attach_time:
+ description: The time stamp when the attachment initiated.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08:26Z"
+ delete_on_termination:
+ description: Indicates whether the volume is deleted on instance termination.
+ returned: always
+ type: bool
+ sample: true
+ status:
+ description: The attachment state.
+ returned: always
+ type: str
+ sample: in_use
+ volume_id:
+ description: The ID of the cloud disk.
+ returned: always
+ type: str
+ sample: d-2zei53pjsi117y6gf9t6
+ cpu:
+ description: The CPU core count of the instance.
+ returned: always
+ type: int
+ sample: 4
+ creation_time:
+ description: The time the instance was created.
+ returned: always
+ type: str
+ sample: "2018-06-25T04:08Z"
+ description:
+ description: The instance description.
+ returned: always
+ type: str
+ sample: "my ansible instance"
+ eip:
+ description: The attribution of EIP associated with the instance.
+ returned: always
+ type: complex
+ contains:
+ allocation_id:
+ description: The ID of the EIP.
+ returned: always
+ type: str
+ sample: eip-12345
+ internet_charge_type:
+ description: The internet charge type of the EIP.
+ returned: always
+ type: str
+ sample: "paybybandwidth"
+ ip_address:
+ description: EIP address.
+ returned: always
+ type: str
+ sample: 42.10.2.2
+ expired_time:
+ description: The time the instance will expire.
+ returned: always
+ type: str
+ sample: "2099-12-31T15:59Z"
+ gpu:
+ description: The attribution of instance GPU.
+ returned: always
+ type: complex
+ contains:
+ amount:
+ description: The count of the GPU.
+ returned: always
+ type: int
+ sample: 0
+ spec:
+ description: The specification of the GPU.
+ returned: always
+ type: str
+ sample: ""
+ host_name:
+ description: The host name of the instance.
+ returned: always
+ type: str
+ sample: iZ2zewaoZ
+ id:
+ description: Alias of instance_id.
+ returned: always
+ type: str
+ sample: i-abc12345
+ instance_id:
+ description: ECS instance resource ID.
+ returned: always
+ type: str
+ sample: i-abc12345
+ image_id:
+ description: The ID of the image used to launch the instance.
+ returned: always
+ type: str
+ sample: m-0011223344
+ inner_ip_address:
+ description: The inner IPv4 address of the classic instance.
+ returned: always
+ type: str
+ sample: 10.0.0.2
+ instance_charge_type:
+ description: The instance charge type.
+ returned: always
+ type: str
+ sample: PostPaid
+ instance_name:
+ description: The name of the instance.
+ returned: always
+ type: str
+ sample: my-ecs
+ instance_type_family:
+ description: The instance type family of the instance belongs.
+ returned: always
+ type: str
+ sample: ecs.sn1ne
+ instance_type:
+ description: The instance type of the running instance.
+ returned: always
+ type: str
+ sample: ecs.sn1ne.xlarge
+ internet_charge_type:
+ description: The billing method of the network bandwidth.
+ returned: always
+ type: str
+ sample: PayByBandwidth
+ internet_max_bandwidth_in:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 200
+ internet_max_bandwidth_out:
+ description: Maximum incoming bandwidth from the internet network.
+ returned: always
+ type: int
+ sample: 20
+ io_optimized:
+ description: Indicates whether the instance is optimized for EBS I/O.
+ returned: always
+ type: bool
+ sample: false
+ memory:
+ description: Memory size of the instance.
+ returned: always
+ type: int
+ sample: 8192
+ network_interfaces:
+ description: One or more network interfaces for the instance.
+ returned: always
+ type: complex
+ contains:
+ mac_address:
+ description: The MAC address.
+ returned: always
+ type: str
+ sample: "00:11:22:33:44:55"
+ network_interface_id:
+ description: The ID of the network interface.
+ returned: always
+ type: str
+ sample: eni-01234567
+ primary_ip_address:
+ description: The primary IPv4 address of the network interface within the vswitch.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ osname:
+ description: The operation system name of the instance owned.
+ returned: always
+ type: str
+ sample: CentOS
+ ostype:
+ description: The operation system type of the instance owned.
+ returned: always
+ type: str
+ sample: linux
+ private_ip_address:
+ description: The IPv4 address of the network interface within the subnet.
+ returned: always
+ type: str
+ sample: 10.0.0.1
+ public_ip_address:
+ description: The public IPv4 address assigned to the instance or eip address
+ returned: always
+ type: str
+ sample: 43.0.0.1
+ resource_group_id:
+ description: The id of the resource group to which the instance belongs.
+ returned: always
+ type: str
+ sample: my-ecs-group
+ security_groups:
+ description: One or more security groups for the instance.
+ returned: always
+ type: list
+ elements: dict
+ contains:
+ group_id:
+ description: The ID of the security group.
+ returned: always
+ type: str
+ sample: sg-0123456
+ group_name:
+ description: The name of the security group.
+ returned: always
+ type: str
+ sample: my-security-group
+ status:
+ description: The current status of the instance.
+ returned: always
+ type: str
+ sample: running
+ tags:
+ description: Any tags assigned to the instance.
+ returned: always
+ type: dict
+ sample:
+ vswitch_id:
+ description: The ID of the vswitch in which the instance is running.
+ returned: always
+ type: str
+ sample: vsw-dew00abcdef
+ vpc_id:
+ description: The ID of the VPC the instance is in.
+ returned: always
+ type: str
+ sample: vpc-0011223344
+ids:
+ description: List of ECS instance IDs
+ returned: always
+ type: list
+ sample: [i-12345er, i-3245fs]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import (
+ ecs_argument_spec, ecs_connect, FOOTMARK_IMP_ERR, HAS_FOOTMARK
+)
+
+
+def main():
+ argument_spec = ecs_argument_spec()
+ argument_spec.update(dict(
+ name_prefix=dict(type='str'),
+ tags=dict(type='dict', aliases=['instance_tags']),
+ filters=dict(type='dict')
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if HAS_FOOTMARK is False:
+ module.fail_json(msg=missing_required_lib('footmark'), exception=FOOTMARK_IMP_ERR)
+
+ ecs = ecs_connect(module)
+
+ instances = []
+ instance_ids = []
+ ids = []
+ name_prefix = module.params['name_prefix']
+
+ filters = module.params['filters']
+ if not filters:
+ filters = {}
+ for key, value in list(filters.items()):
+ if key in ["InstanceIds", "instance_ids", "instance-ids"] and isinstance(ids, list):
+ for id in value:
+ if id not in ids:
+ ids.append(value)
+ if ids:
+ filters['instance_ids'] = ids
+ if module.params['tags']:
+ filters['tags'] = module.params['tags']
+
+ for inst in ecs.describe_instances(**filters):
+ if name_prefix:
+ if not str(inst.instance_name).startswith(name_prefix):
+ continue
+ volumes = ecs.describe_disks(instance_id=inst.id)
+ setattr(inst, 'block_device_mappings', volumes)
+ setattr(inst, 'user_data', inst.describe_user_data())
+ instances.append(inst.read())
+ instance_ids.append(inst.id)
+
+ module.exit_json(changed=False, ids=instance_ids, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/alternatives.py b/ansible_collections/community/general/plugins/modules/alternatives.py
new file mode 100644
index 000000000..97d4f51fb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/alternatives.py
@@ -0,0 +1,407 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Gabe Mulley <gabe.mulley@gmail.com>
+# Copyright (c) 2015, David Wittman <dwittman@gmail.com>
+# Copyright (c) 2022, Marius Rieder <marius.rieder@scs.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool.
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+author:
+ - Marius Rieder (@jiuka)
+ - David Wittman (@DavidWittman)
+ - Gabe Mulley (@mulby)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ name:
+ description:
+ - The generic name of the link.
+ type: str
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ type: path
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ - This option is always required on RHEL-based distributions. On Debian-based distributions this option is
+ required when the alternative I(name) is unknown to the system.
+ type: path
+ priority:
+ description:
+ - The priority of the alternative. If no priority is given for creation C(50) is used as a fallback.
+ type: int
+ state:
+ description:
+ - C(present) - install the alternative (if not already installed), but do
+ not set it as the currently selected alternative for the group.
+ - C(selected) - install the alternative (if not already installed), and
+ set it as the currently selected alternative for the group.
+ - C(auto) - install the alternative (if not already installed), and
+ set the group to auto mode. Added in community.general 5.1.0.
+ - C(absent) - removes the alternative. Added in community.general 5.1.0.
+ choices: [ present, selected, auto, absent ]
+ default: selected
+ type: str
+ version_added: 4.8.0
+ subcommands:
+ description:
+ - A list of subcommands.
+ - Each subcommand needs a name, a link and a path parameter.
+ - Subcommands are also named 'slaves' or 'followers', depending on the version
+ of alternatives.
+ type: list
+ elements: dict
+ aliases: ['slaves']
+ suboptions:
+ name:
+ description:
+ - The generic name of the subcommand.
+ type: str
+ required: true
+ path:
+ description:
+ - The path to the real executable that the subcommand should point to.
+ type: path
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real subcommand executable.
+ type: path
+ required: true
+ version_added: 5.1.0
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = r'''
+- name: Correct java version selected
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: Alternatives link created
+ community.general.alternatives:
+ name: hadoop-conf
+ link: /etc/hadoop/conf
+ path: /etc/hadoop/conf.ansible
+
+- name: Make java 32 bit an alternative with low priority
+ community.general.alternatives:
+ name: java
+ path: /usr/lib/jvm/java-7-openjdk-i386/jre/bin/java
+ priority: -10
+
+- name: Install Python 3.5 but do not select it
+ community.general.alternatives:
+ name: python
+ path: /usr/bin/python3.5
+ link: /usr/bin/python
+ state: present
+
+- name: Install Python 3.5 and reset selection to auto
+ community.general.alternatives:
+ name: python
+ path: /usr/bin/python3.5
+ link: /usr/bin/python
+ state: auto
+
+- name: keytool is a subcommand of java
+ community.general.alternatives:
+ name: java
+ link: /usr/bin/java
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+ subcommands:
+ - name: keytool
+ link: /usr/bin/keytool
+ path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class AlternativeState:
+ PRESENT = "present"
+ SELECTED = "selected"
+ ABSENT = "absent"
+ AUTO = "auto"
+
+ @classmethod
+ def to_list(cls):
+ return [cls.PRESENT, cls.SELECTED, cls.ABSENT, cls.AUTO]
+
+
+class AlternativesModule(object):
+ _UPDATE_ALTERNATIVES = None
+
+ def __init__(self, module):
+ self.module = module
+ self.result = dict(changed=False, diff=dict(before=dict(), after=dict()))
+ self.module.run_command_environ_update = {'LC_ALL': 'C'}
+ self.messages = []
+ self.run()
+
+ @property
+ def mode_present(self):
+ return self.module.params.get('state') in [AlternativeState.PRESENT, AlternativeState.SELECTED, AlternativeState.AUTO]
+
+ @property
+ def mode_selected(self):
+ return self.module.params.get('state') == AlternativeState.SELECTED
+
+ @property
+ def mode_auto(self):
+ return self.module.params.get('state') == AlternativeState.AUTO
+
+ def run(self):
+ self.parse()
+
+ if self.mode_present:
+ # Check if we need to (re)install
+ subcommands_parameter = self.module.params['subcommands']
+ priority_parameter = self.module.params['priority']
+ if (
+ self.path not in self.current_alternatives or
+ (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or
+ (subcommands_parameter is not None and (
+ not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or
+ not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter)
+ ))
+ ):
+ self.install()
+
+ # Check if we need to set the preference
+ if self.mode_selected and self.current_path != self.path:
+ self.set()
+
+ # Check if we need to reset to auto
+ if self.mode_auto and self.current_mode == 'manual':
+ self.auto()
+ else:
+ # Check if we need to uninstall
+ if self.path in self.current_alternatives:
+ self.remove()
+
+ self.result['msg'] = ' '.join(self.messages)
+ self.module.exit_json(**self.result)
+
+ def install(self):
+ if not os.path.exists(self.path):
+ self.module.fail_json(msg="Specified path %s does not exist" % self.path)
+ if not self.link:
+ self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link')
+
+ cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)]
+
+ if self.module.params['subcommands'] is not None:
+ subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands]
+ cmd += [item for sublist in subcommands for item in sublist]
+
+ self.result['changed'] = True
+ self.messages.append("Install alternative '%s' for '%s'." % (self.path, self.name))
+
+ if not self.module.check_mode:
+ self.module.run_command(cmd, check_rc=True)
+
+ if self.module._diff:
+ self.result['diff']['after'] = dict(
+ state=AlternativeState.PRESENT,
+ path=self.path,
+ priority=self.priority,
+ link=self.link,
+ )
+ if self.subcommands:
+ self.result['diff']['after'].update(dict(
+ subcommands=self.subcommands
+ ))
+
+ def remove(self):
+ cmd = [self.UPDATE_ALTERNATIVES, '--remove', self.name, self.path]
+ self.result['changed'] = True
+ self.messages.append("Remove alternative '%s' from '%s'." % (self.path, self.name))
+
+ if not self.module.check_mode:
+ self.module.run_command(cmd, check_rc=True)
+
+ if self.module._diff:
+ self.result['diff']['after'] = dict(state=AlternativeState.ABSENT)
+
+ def set(self):
+ cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path]
+ self.result['changed'] = True
+ self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name))
+
+ if not self.module.check_mode:
+ self.module.run_command(cmd, check_rc=True)
+
+ if self.module._diff:
+ self.result['diff']['after']['state'] = AlternativeState.SELECTED
+
+ def auto(self):
+ cmd = [self.UPDATE_ALTERNATIVES, '--auto', self.name]
+ self.messages.append("Set alternative to auto for '%s'." % (self.name))
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ self.module.run_command(cmd, check_rc=True)
+
+ if self.module._diff:
+ self.result['diff']['after']['state'] = AlternativeState.PRESENT
+
+ @property
+ def name(self):
+ return self.module.params.get('name')
+
+ @property
+ def path(self):
+ return self.module.params.get('path')
+
+ @property
+ def link(self):
+ return self.module.params.get('link') or self.current_link
+
+ @property
+ def priority(self):
+ if self.module.params.get('priority') is not None:
+ return self.module.params.get('priority')
+ return self.current_alternatives.get(self.path, {}).get('priority', 50)
+
+ @property
+ def subcommands(self):
+ if self.module.params.get('subcommands') is not None:
+ return self.module.params.get('subcommands')
+ elif self.path in self.current_alternatives and self.current_alternatives[self.path].get('subcommands'):
+ return self.current_alternatives[self.path].get('subcommands')
+ return None
+
+ @property
+ def UPDATE_ALTERNATIVES(self):
+ if self._UPDATE_ALTERNATIVES is None:
+ self._UPDATE_ALTERNATIVES = self.module.get_bin_path('update-alternatives', True)
+ return self._UPDATE_ALTERNATIVES
+
+ def parse(self):
+ self.current_mode = None
+ self.current_path = None
+ self.current_link = None
+ self.current_alternatives = {}
+
+ # Run `update-alternatives --display <name>` to find existing alternatives
+ (rc, display_output, dummy) = self.module.run_command(
+ [self.UPDATE_ALTERNATIVES, '--display', self.name]
+ )
+
+ if rc != 0:
+ self.module.debug("No current alternative found. '%s' exited with %s" % (self.UPDATE_ALTERNATIVES, rc))
+ return
+
+ current_mode_regex = re.compile(r'\s-\s(?:status\sis\s)?(\w*)(?:\smode|.)$', re.MULTILINE)
+ current_path_regex = re.compile(r'^\s*link currently points to (.*)$', re.MULTILINE)
+ current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE)
+ subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE)
+
+ alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE)
+ subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE)
+
+ match = current_mode_regex.search(display_output)
+ if not match:
+ self.module.debug("No current mode found in output")
+ return
+ self.current_mode = match.group(1)
+
+ match = current_path_regex.search(display_output)
+ if not match:
+ self.module.debug("No current path found in output")
+ else:
+ self.current_path = match.group(1)
+
+ match = current_link_regex.search(display_output)
+ if not match:
+ self.module.debug("No current link found in output")
+ else:
+ self.current_link = match.group(1)
+
+ subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output))
+ if not subcmd_path_map and self.subcommands:
+ subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands)
+
+ for path, prio, subcmd in alternative_regex.findall(display_output):
+ self.current_alternatives[path] = dict(
+ priority=int(prio),
+ subcommands=[dict(
+ name=name,
+ path=spath,
+ link=subcmd_path_map.get(name)
+ ) for name, spath in subcmd_regex.findall(subcmd) if spath != '(null)']
+ )
+
+ if self.module._diff:
+ if self.path in self.current_alternatives:
+ self.result['diff']['before'].update(dict(
+ state=AlternativeState.PRESENT,
+ path=self.path,
+ priority=self.current_alternatives[self.path].get('priority'),
+ link=self.current_link,
+ ))
+ if self.current_alternatives[self.path].get('subcommands'):
+ self.result['diff']['before'].update(dict(
+ subcommands=self.current_alternatives[self.path].get('subcommands')
+ ))
+ if self.current_mode == 'manual' and self.current_path != self.path:
+ self.result['diff']['before'].update(dict(
+ state=AlternativeState.SELECTED
+ ))
+ else:
+ self.result['diff']['before'].update(dict(
+ state=AlternativeState.ABSENT
+ ))
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ path=dict(type='path', required=True),
+ link=dict(type='path'),
+ priority=dict(type='int'),
+ state=dict(
+ type='str',
+ choices=AlternativeState.to_list(),
+ default=AlternativeState.SELECTED,
+ ),
+ subcommands=dict(type='list', elements='dict', aliases=['slaves'], options=dict(
+ name=dict(type='str', required=True),
+ path=dict(type='path', required=True),
+ link=dict(type='path', required=True),
+ )),
+ ),
+ supports_check_mode=True,
+ )
+
+ AlternativesModule(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
new file mode 100644
index 000000000..0f38eabdf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ansible_galaxy_install.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+module: ansible_galaxy_install
+author:
+ - "Alexei Znamensky (@russoz)"
+short_description: Install Ansible roles or collections using ansible-galaxy
+version_added: 3.5.0
+description:
+ - This module allows the installation of Ansible collections or roles using C(ansible-galaxy).
+notes:
+ - >
+ B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and
+ ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters.
+ - >
+ The module will try and run using the C(C.UTF-8) locale.
+ If that fails, it will try C(en_US.UTF-8).
+ If that one also fails, the module will fail.
+requirements:
+ - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ type:
+ description:
+ - The type of installation performed by C(ansible-galaxy).
+ - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections.
+ - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices."
+ - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)."
+ type: str
+ choices: [collection, role, both]
+ required: true
+ name:
+ description:
+ - Name of the collection or role being installed.
+ - >
+ Versions can be specified with C(ansible-galaxy) usual formats.
+ For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0).
+ - I(name) and I(requirements_file) are mutually exclusive.
+ type: str
+ requirements_file:
+ description:
+ - Path to a file containing a list of requirements to be installed.
+ - It works for I(type) equals to C(collection) and C(role).
+ - I(name) and I(requirements_file) are mutually exclusive.
+ - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run."
+ type: path
+ dest:
+ description:
+ - The path to the directory containing your collections or roles, according to the value of I(type).
+ - >
+ Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file)
+ contains both roles and collections and I(dest) is specified.
+ type: path
+ no_deps:
+ description:
+ - Refrain from installing dependencies.
+ version_added: 4.5.0
+ type: bool
+ default: false
+ force:
+ description:
+ - Force overwriting an existing role or collection.
+ - Using I(force=true) is mandatory when downgrading.
+ - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections."
+ type: bool
+ default: false
+ ack_ansible29:
+ description:
+ - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them.
+ - This option is completely ignored if using a version of Ansible greater than C(2.9.x).
+ - Note that this option will be removed without any further deprecation warning once support
+ for Ansible 2.9 is removed from this module.
+ type: bool
+ default: false
+ ack_min_ansiblecore211:
+ description:
+ - Acknowledge the module is deprecating support for Ansible 2.9 and ansible-base 2.10.
+ - Support for those versions will be removed in community.general 8.0.0.
+ At the same time, this option will be removed without any deprecation warning!
+ - This option is completely ignored if using a version of ansible-core/ansible-base/Ansible greater than C(2.11).
+ - For the sake of conciseness, setting this parameter to C(true) implies I(ack_ansible29=true).
+ type: bool
+ default: false
+"""
+
+EXAMPLES = """
+- name: Install collection community.network
+ community.general.ansible_galaxy_install:
+ type: collection
+ name: community.network
+
+- name: Install role at specific path
+ community.general.ansible_galaxy_install:
+ type: role
+ name: ansistrano.deploy
+ dest: /ansible/roles
+
+- name: Install collections and roles together
+ community.general.ansible_galaxy_install:
+ type: both
+ requirements_file: requirements.yml
+
+- name: Force-install collection community.network at specific version
+ community.general.ansible_galaxy_install:
+ type: collection
+ name: community.network:3.0.2
+ force: true
+
+"""
+
+RETURN = """
+ type:
+ description: The value of the I(type) parameter.
+ type: str
+ returned: always
+ name:
+ description: The value of the I(name) parameter.
+ type: str
+ returned: always
+ dest:
+ description: The value of the I(dest) parameter.
+ type: str
+ returned: always
+ requirements_file:
+ description: The value of the I(requirements_file) parameter.
+ type: str
+ returned: always
+ force:
+ description: The value of the I(force) parameter.
+ type: bool
+ returned: always
+ installed_roles:
+ description:
+ - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path.
+ - If I(name) is specified, returns that role name and the version installed per path.
+ - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
+ type: dict
+ returned: always when installing roles
+ contains:
+ "<path>":
+ description: Roles and versions for that path.
+ type: dict
+ sample:
+ /home/user42/.ansible/roles:
+ ansistrano.deploy: 3.9.0
+ baztian.xfce: v0.0.3
+ /custom/ansible/roles:
+ ansistrano.deploy: 3.8.0
+ installed_collections:
+ description:
+ - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path.
+ - If I(name) is specified, returns that collection name and the version installed per path.
+ - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand."
+ type: dict
+ returned: always when installing collections
+ contains:
+ "<path>":
+ description: Collections and versions for that path
+ type: dict
+ sample:
+ /home/az/.ansible/collections/ansible_collections:
+ community.docker: 1.6.0
+ community.general: 3.0.2
+ /custom/ansible/ansible_collections:
+ community.general: 3.1.0
+ new_collections:
+ description: New collections installed by this module.
+ returned: success
+ type: dict
+ sample:
+ community.general: 3.1.0
+ community.docker: 1.6.1
+ new_roles:
+ description: New roles installed by this module.
+ returned: success
+ type: dict
+ sample:
+ ansistrano.deploy: 3.8.0
+ baztian.xfce: v0.0.3
+"""
+
+import re
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException
+
+
+class AnsibleGalaxyInstall(ModuleHelper):
+ _RE_GALAXY_VERSION = re.compile(r'^ansible-galaxy(?: \[core)? (?P<version>\d+\.\d+\.\d+)(?:\.\w+)?(?:\])?')
+ _RE_LIST_PATH = re.compile(r'^# (?P<path>.*)$')
+ _RE_LIST_COLL = re.compile(r'^(?P<elem>\w+\.\w+)\s+(?P<version>[\d\.]+)\s*$')
+ _RE_LIST_ROLE = re.compile(r'^- (?P<elem>\w+\.\w+),\s+(?P<version>[\d\.]+)\s*$')
+ _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__()
+ ansible_version = None
+ is_ansible29 = None
+
+ output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps')
+ module = dict(
+ argument_spec=dict(
+ type=dict(type='str', choices=('collection', 'role', 'both'), required=True),
+ name=dict(type='str'),
+ requirements_file=dict(type='path'),
+ dest=dict(type='path'),
+ force=dict(type='bool', default=False),
+ no_deps=dict(type='bool', default=False),
+ ack_ansible29=dict(type='bool', default=False),
+ ack_min_ansiblecore211=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[('name', 'requirements_file')],
+ required_one_of=[('name', 'requirements_file')],
+ required_if=[('type', 'both', ['requirements_file'])],
+ supports_check_mode=False,
+ )
+
+ command = 'ansible-galaxy'
+ command_args_formats = dict(
+ type=fmt.as_func(lambda v: [] if v == 'both' else [v]),
+ galaxy_cmd=fmt.as_list(),
+ requirements_file=fmt.as_opt_val('-r'),
+ dest=fmt.as_opt_val('-p'),
+ force=fmt.as_bool("--force"),
+ no_deps=fmt.as_bool("--no-deps"),
+ version=fmt.as_bool("--version"),
+ name=fmt.as_list(),
+ )
+
+ def _make_runner(self, lang):
+ return CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=lang, check_rc=True)
+
+ def _get_ansible_galaxy_version(self):
+ class UnsupportedLocale(ModuleHelperException):
+ pass
+
+ def process(rc, out, err):
+ if (rc != 0 and "unsupported locale setting" in err) or (rc == 0 and "cannot change locale" in err):
+ raise UnsupportedLocale(msg=err)
+ line = out.splitlines()[0]
+ match = self._RE_GALAXY_VERSION.match(line)
+ if not match:
+ self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line))
+ version = match.group("version")
+ version = tuple(int(x) for x in version.split('.')[:3])
+ return version
+
+ try:
+ runner = self._make_runner("C.UTF-8")
+ with runner("version", check_rc=False, output_process=process) as ctx:
+ return runner, ctx.run(version=True)
+ except UnsupportedLocale as e:
+ runner = self._make_runner("en_US.UTF-8")
+ with runner("version", check_rc=True, output_process=process) as ctx:
+ return runner, ctx.run(version=True)
+
+ def __init_module__(self):
+ # self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang)
+ self.runner, self.ansible_version = self._get_ansible_galaxy_version()
+ if self.ansible_version < (2, 11) and not self.vars.ack_min_ansiblecore211:
+ self.module.deprecate(
+ "Support for Ansible 2.9 and ansible-base 2.10 is being deprecated. "
+ "At the same time support for them is ended, also the ack_ansible29 option will be removed. "
+ "Upgrading is strongly recommended, or set 'ack_min_ansiblecore211' to suppress this message.",
+ version="8.0.0",
+ collection_name="community.general",
+ )
+ self.is_ansible29 = self.ansible_version < (2, 10)
+ if self.is_ansible29:
+ self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P<collection>\w+\.\w+):(?P<cversion>[\d\.]+)'.*"
+ r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\)'
+ r' was installed successfully)$')
+ else:
+ # Collection install output changed:
+ # ansible-base 2.10: "coll.name (x.y.z)"
+ # ansible-core 2.11+: "coll.name:x.y.z"
+ self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P<collection>\w+\.\w+)(?: \(|:)(?P<cversion>[\d\.]+)\)?'
+ r'|- (?P<role>\w+\.\w+) \((?P<rversion>[\d\.]+)\))'
+ r' was installed successfully$')
+
+ def _list_element(self, _type, path_re, elem_re):
+ def process(rc, out, err):
+ return [] if "None of the provided paths were usable" in out else out.splitlines()
+
+ with self.runner('type galaxy_cmd dest', output_process=process, check_rc=False) as ctx:
+ elems = ctx.run(type=_type, galaxy_cmd='list')
+
+ elems_dict = {}
+ current_path = None
+ for line in elems:
+ if line.startswith("#"):
+ match = path_re.match(line)
+ if not match:
+ continue
+ if self.vars.dest is not None and match.group('path') != self.vars.dest:
+ current_path = None
+ continue
+ current_path = match.group('path') if match else None
+ elems_dict[current_path] = {}
+
+ elif current_path is not None:
+ match = elem_re.match(line)
+ if not match or (self.vars.name is not None and match.group('elem') != self.vars.name):
+ continue
+ elems_dict[current_path][match.group('elem')] = match.group('version')
+ return elems_dict
+
+ def _list_collections(self):
+ return self._list_element('collection', self._RE_LIST_PATH, self._RE_LIST_COLL)
+
+ def _list_roles(self):
+ return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE)
+
+ def _setup29(self):
+ self.vars.set("new_collections", {})
+ self.vars.set("new_roles", {})
+ self.vars.set("ansible29_change", False, change=True, output=False)
+ if not (self.vars.ack_ansible29 or self.vars.ack_min_ansiblecore211):
+ self.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed")
+ if self.vars.requirements_file is not None and self.vars.type == 'both':
+ self.warn("Ansible 2.9 or older: will install only roles from requirement files")
+
+ def _setup210plus(self):
+ self.vars.set("new_collections", {}, change=True)
+ self.vars.set("new_roles", {}, change=True)
+ if self.vars.type != "collection":
+ self.vars.installed_roles = self._list_roles()
+ if self.vars.type != "roles":
+ self.vars.installed_collections = self._list_collections()
+
+ def __run__(self):
+ def process(rc, out, err):
+ for line in out.splitlines():
+ match = self._RE_INSTALL_OUTPUT.match(line)
+ if not match:
+ continue
+ if match.group("collection"):
+ self.vars.new_collections[match.group("collection")] = match.group("cversion")
+ if self.is_ansible29:
+ self.vars.ansible29_change = True
+ elif match.group("role"):
+ self.vars.new_roles[match.group("role")] = match.group("rversion")
+ if self.is_ansible29:
+ self.vars.ansible29_change = True
+
+ if self.is_ansible29:
+ if self.vars.type == 'both':
+ raise ValueError("Type 'both' not supported in Ansible 2.9")
+ self._setup29()
+ else:
+ self._setup210plus()
+ with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx:
+ ctx.run(galaxy_cmd="install")
+ if self.verbosity > 2:
+ self.vars.set("run_info", ctx.run_info)
+
+
+def main():
+ AnsibleGalaxyInstall.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py b/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
new file mode 100644
index 000000000..8f561e8ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/apache2_mod_proxy.py
@@ -0,0 +1,452 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Olivier Boukili <boukili.olivier@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_mod_proxy
+author: Olivier Boukili (@oboukili)
+short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
+description:
+ - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
+ pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
+ status page has to be enabled and accessible, as this module relies on parsing
+ this page. This module supports ansible check_mode, and requires BeautifulSoup
+ python module.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ balancer_url_suffix:
+ type: str
+ description:
+ - Suffix of the balancer pool url required to access the balancer pool
+ status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
+ default: /balancer-manager/
+ balancer_vhost:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
+ required: true
+ member_host:
+ type: str
+ description:
+ - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
+ Port number is autodetected and should not be specified here.
+ If undefined, apache2_mod_proxy module will return a members list of
+ dictionaries of all the current balancer pool members' attributes.
+ state:
+ type: str
+ description:
+ - Desired state of the member host.
+ (absent|disabled),drained,hot_standby,ignore_errors can be
+ simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
+ - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]'
+ tls:
+ description:
+ - Use https to access balancer management page.
+ type: bool
+ default: false
+ validate_certs:
+ description:
+ - Validate ssl/tls certificates.
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+- name: Get all current balancer pool members attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: 10.0.0.2
+
+- name: Get a specific member attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: myws.mydomain.org
+ balancer_suffix: /lb/
+ member_host: node1.myws.mydomain.org
+
+# Enable all balancer pool members:
+- name: Get attributes
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ register: result
+
+- name: Enable all balancer pool members
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ myloadbalancer_host }}'
+ member_host: '{{ item.host }}'
+ state: present
+ with_items: '{{ result.members }}'
+
+# Gracefully disable a member from a loadbalancer node:
+- name: Step 1
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 2
+ ansible.builtin.wait_for:
+ host: '{{ member.host }}'
+ port: '{{ member.port }}'
+ state: drained
+ delegate_to: myloadbalancernode
+
+- name: Step 3
+ community.general.apache2_mod_proxy:
+ balancer_vhost: '{{ vhost_host }}'
+ member_host: '{{ member.host }}'
+ state: absent
+ delegate_to: myloadbalancernode
+'''
+
+RETURN = '''
+member:
+ description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
+ type: dict
+ returned: success
+ sample:
+ {"attributes":
+ {"Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ }
+members:
+ description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
+ returned: success
+ type: list
+ sample:
+ [{"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.20",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false
+ }
+ },
+ {"attributes": {
+ "Busy": "0",
+ "Elected": "42",
+ "Factor": "1",
+ "From": "136K",
+ "Load": "0",
+ "Route": null,
+ "RouteRedir": null,
+ "Set": "0",
+ "Status": "Init Ok ",
+ "To": " 47K",
+ "Worker URL": null
+ },
+ "balancer_url": "http://10.10.0.2/balancer-manager/",
+ "host": "10.10.0.21",
+ "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
+ "path": "/ws",
+ "port": 8080,
+ "protocol": "http",
+ "status": {
+ "disabled": false,
+ "drained": false,
+ "hot_standby": false,
+ "ignore_errors": false}
+ }
+ ]
+'''
+
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six import iteritems
+
+BEAUTIFUL_SOUP_IMP_ERR = None
+try:
+ from BeautifulSoup import BeautifulSoup
+except ImportError:
+ BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
+ HAS_BEAUTIFULSOUP = False
+else:
+ HAS_BEAUTIFULSOUP = True
+
+# balancer member attributes extraction regexp:
+EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
+# Apache2 server version extraction regexp:
+APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
+
+
+def regexp_extraction(string, _regexp, groups=1):
+ """ Returns the capture group (default=1) specified in the regexp, applied to the string """
+ regexp_search = re.search(string=str(string), pattern=str(_regexp))
+ if regexp_search:
+ if regexp_search.group(groups) != '':
+ return str(regexp_search.group(groups))
+ return None
+
+
+class BalancerMember(object):
+ """ Apache 2.4 mod_proxy LB balancer member.
+ attributes:
+ read-only:
+ host -> member host (string),
+ management_url -> member management url (string),
+ protocol -> member protocol (string)
+ port -> member port (string),
+ path -> member location (string),
+ balancer_url -> url of this member's parent balancer (string),
+ attributes -> whole member attributes (dictionary)
+ module -> ansible module instance (AnsibleModule object).
+ writable:
+ status -> status of the member (dictionary)
+ """
+
+ def __init__(self, management_url, balancer_url, module):
+ self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
+ self.management_url = str(management_url)
+ self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
+ self.port = regexp_extraction(management_url, EXPRESSION, 5)
+ self.path = regexp_extraction(management_url, EXPRESSION, 6)
+ self.balancer_url = str(balancer_url)
+ self.module = module
+
+ def get_member_attributes(self):
+ """ Returns a dictionary of a balancer member's attributes."""
+
+ balancer_member_page = fetch_url(self.module, self.management_url)
+
+ if balancer_member_page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
+ else:
+ try:
+ soup = BeautifulSoup(balancer_member_page[0])
+ except TypeError as exc:
+ self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc))
+ else:
+ subsoup = soup.findAll('table')[1].findAll('tr')
+ keys = subsoup[0].findAll('th')
+ for valuesset in subsoup[1::1]:
+ if re.search(pattern=self.host, string=str(valuesset)):
+ values = valuesset.findAll('td')
+ return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
+
+ def get_member_status(self):
+ """ Returns a dictionary of a balancer member's status attributes."""
+ status_mapping = {'disabled': 'Dis',
+ 'drained': 'Drn',
+ 'hot_standby': 'Stby',
+ 'ignore_errors': 'Ign'}
+ actual_status = str(self.attributes['Status'])
+ status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping))
+ return status
+
+ def set_member_status(self, values):
+ """ Sets a balancer member's status attributes amongst pre-mapped values."""
+ values_mapping = {'disabled': '&w_status_D',
+ 'drained': '&w_status_N',
+ 'hot_standby': '&w_status_H',
+ 'ignore_errors': '&w_status_I'}
+
+ request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
+ values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping))
+ request_body = "{0}{1}".format(request_body, values_url)
+
+ response = fetch_url(self.module, self.management_url, data=request_body)
+ if response[1]['status'] != 200:
+ self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
+
+ attributes = property(get_member_attributes)
+ status = property(get_member_status, set_member_status)
+
+
+class Balancer(object):
+ """ Apache httpd 2.4 mod_proxy balancer object"""
+
+ def __init__(self, host, suffix, module, members=None, tls=False):
+ if tls:
+ self.base_url = 'https://' + str(host)
+ self.url = 'https://' + str(host) + str(suffix)
+ else:
+ self.base_url = 'http://' + str(host)
+ self.url = 'http://' + str(host) + str(suffix)
+ self.module = module
+ self.page = self.fetch_balancer_page()
+ if members is None:
+ self._members = []
+
+ def fetch_balancer_page(self):
+ """ Returns the balancer management html page as a string for later parsing."""
+ page = fetch_url(self.module, str(self.url))
+ if page[1]['status'] != 200:
+ self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
+ else:
+ content = page[0].read()
+ apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
+ if apache_version:
+ if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
+ self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
+ return content
+ else:
+ self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
+
+ def get_balancer_members(self):
+ """ Returns members of the balancer as a generator object for later iteration."""
+ try:
+ soup = BeautifulSoup(self.page)
+ except TypeError:
+ self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
+ else:
+ for element in soup.findAll('a')[1::1]:
+ balancer_member_suffix = str(element.get('href'))
+ if not balancer_member_suffix:
+ self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
+ else:
+ yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
+
+ members = property(get_balancer_members)
+
+
+def main():
+ """ Initiates module."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ balancer_vhost=dict(required=True, type='str'),
+ balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
+ member_host=dict(type='str'),
+ state=dict(type='str'),
+ tls=dict(default=False, type='bool'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ if HAS_BEAUTIFULSOUP is False:
+ module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
+
+ if module.params['state'] is not None:
+ states = module.params['state'].split(',')
+ if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
+ module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
+ else:
+ for _state in states:
+ if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
+ module.fail_json(
+ msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
+ )
+ else:
+ states = ['None']
+
+ mybalancer = Balancer(module.params['balancer_vhost'],
+ module.params['balancer_url_suffix'],
+ module=module,
+ tls=module.params['tls'])
+
+ if module.params['member_host'] is None:
+ json_output_list = []
+ for member in mybalancer.members:
+ json_output_list.append({
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ })
+ module.exit_json(
+ changed=False,
+ members=json_output_list
+ )
+ else:
+ changed = False
+ member_exists = False
+ member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
+ for mode in member_status.keys():
+ for state in states:
+ if mode == state:
+ member_status[mode] = True
+ elif mode == 'disabled' and state == 'absent':
+ member_status[mode] = True
+
+ for member in mybalancer.members:
+ if str(member.host) == str(module.params['member_host']):
+ member_exists = True
+ if module.params['state'] is not None:
+ member_status_before = member.status
+ if not module.check_mode:
+ member_status_after = member.status = member_status
+ else:
+ member_status_after = member_status
+ if member_status_before != member_status_after:
+ changed = True
+ json_output = {
+ "host": member.host,
+ "status": member.status,
+ "protocol": member.protocol,
+ "port": member.port,
+ "path": member.path,
+ "attributes": member.attributes,
+ "management_url": member.management_url,
+ "balancer_url": member.balancer_url
+ }
+ if member_exists:
+ module.exit_json(
+ changed=changed,
+ member=json_output
+ )
+ else:
+ module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/apache2_module.py b/ansible_collections/community/general/plugins/modules/apache2_module.py
new file mode 100644
index 000000000..2e2456d74
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/apache2_module.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013-2014, Christian Berendt <berendt@b1-systems.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apache2_module
+author:
+ - Christian Berendt (@berendt)
+ - Ralf Hertel (@n0trax)
+ - Robin Roth (@robinro)
+short_description: Enables/disables a module of the Apache2 webserver
+description:
+ - Enables or disables a specified module of the Apache2 webserver.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name of the module to enable/disable as given to C(a2enmod/a2dismod).
+ required: true
+ identifier:
+ type: str
+ description:
+ - Identifier of the module as listed by C(apache2ctl -M).
+ This is optional and usually determined automatically by the common convention of
+ appending C(_module) to I(name) as well as custom exception for popular modules.
+ required: false
+ force:
+ description:
+ - Force disabling of default modules and override Debian warnings.
+ required: false
+ type: bool
+ default: false
+ state:
+ type: str
+ description:
+ - Desired state of the module.
+ choices: ['present', 'absent']
+ default: present
+ ignore_configcheck:
+ description:
+ - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
+ type: bool
+ default: false
+ warn_mpm_absent:
+ description:
+ - Control the behavior of the warning process for MPM modules.
+ type: bool
+ default: true
+ version_added: 6.3.0
+requirements: ["a2enmod","a2dismod"]
+notes:
+ - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions.
+ Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not.
+'''
+
+EXAMPLES = '''
+- name: Enable the Apache2 module wsgi
+ community.general.apache2_module:
+ state: present
+ name: wsgi
+
+- name: Disables the Apache2 module wsgi
+ community.general.apache2_module:
+ state: absent
+ name: wsgi
+
+- name: Disable default modules for Debian
+ community.general.apache2_module:
+ state: absent
+ name: autoindex
+ force: true
+
+- name: Disable mpm_worker and ignore warnings about missing mpm module
+ community.general.apache2_module:
+ state: absent
+ name: mpm_worker
+ ignore_configcheck: true
+
+- name: Disable mpm_event, enable mpm_prefork and ignore warnings about missing mpm module
+ community.general.apache2_module:
+ name: "{{ item.module }}"
+ state: "{{ item.state }}"
+ warn_mpm_absent: false
+ ignore_configcheck: true
+ loop:
+ - module: mpm_event
+ state: absent
+ - module: mpm_prefork
+ state: present
+
+- name: Enable dump_io module, which is identified as dumpio_module inside apache2
+ community.general.apache2_module:
+ state: present
+ name: dump_io
+ identifier: dumpio_module
+'''
+
+RETURN = '''
+result:
+ description: message about action taken
+ returned: always
+ type: str
+warnings:
+ description: list of warning messages
+ returned: when needed
+ type: list
+rc:
+ description: return code of underlying command
+ returned: failed
+ type: int
+stdout:
+ description: stdout of underlying command
+ returned: failed
+ type: str
+stderr:
+ description: stderr of underlying command
+ returned: failed
+ type: str
+'''
+
+import re
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+_re_threaded = re.compile(r'threaded: *yes')
+
+
+def _run_threaded(module):
+ control_binary = _get_ctl_binary(module)
+ result, stdout, stderr = module.run_command([control_binary, "-V"])
+
+ return bool(_re_threaded.search(stdout))
+
+
+def _get_ctl_binary(module):
+ for command in ['apache2ctl', 'apachectl']:
+ ctl_binary = module.get_bin_path(command)
+ if ctl_binary is not None:
+ return ctl_binary
+
+ module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.")
+
+
+def _module_is_enabled(module):
+ control_binary = _get_ctl_binary(module)
+ result, stdout, stderr = module.run_command([control_binary, "-M"])
+
+ if result != 0:
+ error_msg = "Error executing %s: %s" % (control_binary, stderr)
+ if module.params['ignore_configcheck']:
+ if 'AH00534' in stderr and 'mpm_' in module.params['name']:
+ if module.params['warn_mpm_absent']:
+ module.warnings.append(
+ "No MPM module loaded! apache2 reload AND other module actions"
+ " will fail if no MPM module is loaded immediately."
+ )
+ else:
+ module.warnings.append(error_msg)
+ return False
+ else:
+ module.fail_json(msg=error_msg)
+
+ searchstring = ' ' + module.params['identifier']
+ return searchstring in stdout
+
+
+def create_apache_identifier(name):
+ """
+ By convention if a module is loaded via name, it appears in apache2ctl -M as
+ name_module.
+
+ Some modules don't follow this convention and we use replacements for those."""
+
+ # a2enmod name replacement to apache2ctl -M names
+ text_workarounds = [
+ ('shib', 'mod_shib'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ]
+
+ # re expressions to extract subparts of names
+ re_workarounds = [
+ ('php', re.compile(r'^(php\d)\.')),
+ ]
+
+ for a2enmod_spelling, module_name in text_workarounds:
+ if a2enmod_spelling in name:
+ return module_name
+
+ for search, reexpr in re_workarounds:
+ if search in name:
+ try:
+ rematch = reexpr.search(name)
+ return rematch.group(1) + '_module'
+ except AttributeError:
+ pass
+
+ return name + '_module'
+
+
+def _set_state(module, state):
+ name = module.params['name']
+ force = module.params['force']
+
+ want_enabled = state == 'present'
+ state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
+ a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
+ success_msg = "Module %s %s" % (name, state_string)
+
+ if _module_is_enabled(module) != want_enabled:
+ if module.check_mode:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+
+ a2mod_binary_path = module.get_bin_path(a2mod_binary)
+ if a2mod_binary_path is None:
+ module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
+
+ a2mod_binary_cmd = [a2mod_binary_path]
+
+ if not want_enabled and force:
+ # force exists only for a2dismod on debian
+ a2mod_binary_cmd.append('-f')
+
+ result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name])
+
+ if _module_is_enabled(module) == want_enabled:
+ module.exit_json(changed=True,
+ result=success_msg,
+ warnings=module.warnings)
+ else:
+ msg = (
+ 'Failed to set module {name} to {state}:\n'
+ '{stdout}\n'
+ 'Maybe the module identifier ({identifier}) was guessed incorrectly.'
+ 'Consider setting the "identifier" option.'
+ ).format(
+ name=name,
+ state=state_string,
+ stdout=stdout,
+ identifier=module.params['identifier']
+ )
+ module.fail_json(msg=msg,
+ rc=result,
+ stdout=stdout,
+ stderr=stderr)
+ else:
+ module.exit_json(changed=False,
+ result=success_msg,
+ warnings=module.warnings)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ identifier=dict(type='str'),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ ignore_configcheck=dict(type='bool', default=False),
+ warn_mpm_absent=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.warnings = []
+
+ name = module.params['name']
+ if name == 'cgi' and _run_threaded(module):
+ module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.")
+
+ if not module.params['identifier']:
+ module.params['identifier'] = create_apache_identifier(module.params['name'])
+
+ if module.params['state'] in ['present', 'absent']:
+ _set_state(module, module.params['state'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/apk.py b/ansible_collections/community/general/plugins/modules/apk.py
new file mode 100644
index 000000000..e56b2165d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/apk.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Kevin Brebanov <https://github.com/kbrebanov>
+# Based on pacman (Afterburn <https://github.com/afterburn>, Aaron Bull Schaefer <aaron@elasticdog.com>)
+# and apt (Matthew Williams <matthew@flowroute.com>) modules.
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: apk
+short_description: Manages apk packages
+description:
+ - Manages I(apk) packages for Alpine Linux.
+author: "Kevin Brebanov (@kbrebanov)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ available:
+ description:
+ - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them)
+ if the currently installed package is no longer available from any repository.
+ type: bool
+ default: false
+ name:
+ description:
+ - A package name, like C(foo), or multiple packages, like C(foo, bar).
+ type: list
+ elements: str
+ no_cache:
+ description:
+ - Do not use any local cache path.
+ type: bool
+ default: false
+ version_added: 1.0.0
+ repository:
+ description:
+ - A package repository or multiple repositories.
+ Unlike with the underlying apk command, this list will override the system repositories rather than supplement them.
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package(s) state.
+ - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias.
+ - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias.
+ - C(latest) ensures the package(s) is/are present and the latest version(s).
+ default: present
+ choices: [ "present", "absent", "latest", "installed", "removed" ]
+ type: str
+ update_cache:
+ description:
+ - Update repository indexes. Can be run with other steps or on it's own.
+ type: bool
+ default: false
+ upgrade:
+ description:
+ - Upgrade all installed packages to their latest version.
+ type: bool
+ default: false
+ world:
+ description:
+ - Use a custom world file when checking for explicitly installed packages.
+ type: str
+ default: /etc/apk/world
+ version_added: 5.4.0
+notes:
+ - 'I(name) and I(upgrade) are mutually exclusive.'
+ - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
+'''
+
+EXAMPLES = '''
+- name: Update repositories and install foo package
+ community.general.apk:
+ name: foo
+ update_cache: true
+
+- name: Update repositories and install foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ update_cache: true
+
+- name: Remove foo package
+ community.general.apk:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar packages
+ community.general.apk:
+ name: foo,bar
+ state: absent
+
+- name: Install the package foo
+ community.general.apk:
+ name: foo
+ state: present
+
+- name: Install the packages foo and bar
+ community.general.apk:
+ name: foo,bar
+ state: present
+
+- name: Update repositories and update package foo to latest version
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: true
+
+- name: Update repositories and update packages foo and bar to latest versions
+ community.general.apk:
+ name: foo,bar
+ state: latest
+ update_cache: true
+
+- name: Update all installed packages to the latest versions
+ community.general.apk:
+ upgrade: true
+
+- name: Upgrade / replace / downgrade / uninstall all installed packages to the latest versions available
+ community.general.apk:
+ available: true
+ upgrade: true
+
+- name: Update repositories as a separate step
+ community.general.apk:
+ update_cache: true
+
+- name: Install package from a specific repository
+ community.general.apk:
+ name: foo
+ state: latest
+ update_cache: true
+ repository: http://dl-3.alpinelinux.org/alpine/edge/main
+
+- name: Install package without using cache
+ community.general.apk:
+ name: foo
+ state: latest
+ no_cache: true
+
+- name: Install package checking a custom world
+ community.general.apk:
+ name: foo
+ state: latest
+ world: /etc/apk/world.custom
+'''
+
+RETURN = '''
+packages:
+ description: a list of packages that have been changed
+ returned: when packages have changed
+ type: list
+ sample: ['package', 'other-package']
+'''
+
+import re
+# Import module snippets.
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_for_packages(stdout):
+ packages = []
+ data = stdout.split('\n')
+ regex = re.compile(r'^\(\d+/\d+\)\s+\S+\s+(\S+)')
+ for l in data:
+ p = regex.search(l)
+ if p:
+ packages.append(p.group(1))
+ return packages
+
+
+def update_package_db(module, exit):
+ cmd = "%s update" % (APK_PATH)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr)
+ elif exit:
+ module.exit_json(changed=True, msg='updated repository indexes', stdout=stdout, stderr=stderr)
+ else:
+ return True
+
+
+def query_toplevel(module, name, world):
+ # world contains a list of top-level packages separated by ' ' or \n
+ # packages may contain repository (@) or version (=<>~) separator characters or start with negation !
+ regex = re.compile(r'^' + re.escape(name) + r'([@=<>~].+)?$')
+ with open(world) as f:
+ content = f.read().split()
+ for p in content:
+ if regex.search(p):
+ return True
+ return False
+
+
+def query_package(module, name):
+ cmd = "%s -v info --installed %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_latest(module, name):
+ cmd = "%s version %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name))
+ match = re.search(search_pattern, stdout)
+ if match and match.group(2) == "<":
+ return False
+ return True
+
+
+def query_virtual(module, name):
+ cmd = "%s -v info --description %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ search_pattern = r"^%s: virtual meta package" % (re.escape(name))
+ if re.search(search_pattern, stdout):
+ return True
+ return False
+
+
+def get_dependencies(module, name):
+ cmd = "%s -v info --depends %s" % (APK_PATH, name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ dependencies = stdout.split()
+ if len(dependencies) > 1:
+ return dependencies[1:]
+ else:
+ return []
+
+
+def upgrade_packages(module, available):
+ if module.check_mode:
+ cmd = "%s upgrade --simulate" % (APK_PATH)
+ else:
+ cmd = "%s upgrade" % (APK_PATH)
+ if available:
+ cmd = "%s --available" % cmd
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to upgrade packages", stdout=stdout, stderr=stderr, packages=packagelist)
+ if re.search(r'^OK', stdout):
+ module.exit_json(changed=False, msg="packages already upgraded", stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="upgraded packages", stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def install_packages(module, names, state, world):
+ upgrade = False
+ to_install = []
+ to_upgrade = []
+ for name in names:
+ # Check if virtual package
+ if query_virtual(module, name):
+ # Get virtual package dependencies
+ dependencies = get_dependencies(module, name)
+ for dependency in dependencies:
+ if state == 'latest' and not query_latest(module, dependency):
+ to_upgrade.append(dependency)
+ else:
+ if not query_toplevel(module, name, world):
+ to_install.append(name)
+ elif state == 'latest' and not query_latest(module, name):
+ to_upgrade.append(name)
+ if to_upgrade:
+ upgrade = True
+ if not to_install and not upgrade:
+ module.exit_json(changed=False, msg="package(s) already installed")
+ packages = " ".join(to_install + to_upgrade)
+ if upgrade:
+ if module.check_mode:
+ cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add --upgrade %s" % (APK_PATH, packages)
+ else:
+ if module.check_mode:
+ cmd = "%s add --simulate %s" % (APK_PATH, packages)
+ else:
+ cmd = "%s add %s" % (APK_PATH, packages)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="installed %s package(s)" % (packages), stdout=stdout, stderr=stderr, packages=packagelist)
+
+
+def remove_packages(module, names):
+ installed = []
+ for name in names:
+ if query_package(module, name):
+ installed.append(name)
+ if not installed:
+ module.exit_json(changed=False, msg="package(s) already removed")
+ names = " ".join(installed)
+ if module.check_mode:
+ cmd = "%s del --purge --simulate %s" % (APK_PATH, names)
+ else:
+ cmd = "%s del --purge %s" % (APK_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ packagelist = parse_for_packages(stdout)
+ # Check to see if packages are still present because of dependencies
+ for name in installed:
+ if query_package(module, name):
+ rc = 1
+ break
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+ module.exit_json(changed=True, msg="removed %s package(s)" % (names), stdout=stdout, stderr=stderr, packages=packagelist)
+
+# ==========================================
+# Main control flow.
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'installed', 'absent', 'removed', 'latest']),
+ name=dict(type='list', elements='str'),
+ no_cache=dict(default=False, type='bool'),
+ repository=dict(type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ available=dict(default=False, type='bool'),
+ world=dict(default='/etc/apk/world', type='str'),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ mutually_exclusive=[['name', 'upgrade']],
+ supports_check_mode=True
+ )
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ global APK_PATH
+ APK_PATH = module.get_bin_path('apk', required=True)
+
+ p = module.params
+
+ if p['no_cache']:
+ APK_PATH = "%s --no-cache" % (APK_PATH, )
+
+ # add repositories to the APK_PATH
+ if p['repository']:
+ for r in p['repository']:
+ APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r)
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ if p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_package_db(module, not p['name'] and not p['upgrade'])
+
+ if p['upgrade']:
+ upgrade_packages(module, p['available'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['name'], p['state'], p['world'])
+ elif p['state'] == 'absent':
+ remove_packages(module, p['name'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/apt_repo.py b/ansible_collections/community/general/plugins/modules/apt_repo.py
new file mode 100644
index 000000000..556039027
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/apt_repo.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Mikhail Gordeev
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_repo
+short_description: Manage APT repositories via apt-repo
+description:
+ - Manages APT repositories using apt-repo tool.
+ - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo
+notes:
+ - This module works on ALT based distros.
+ - Does NOT support checkmode, due to a limitation in apt-repo tool.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ repo:
+ description:
+ - Name of the repository to add or remove.
+ required: true
+ type: str
+ state:
+ description:
+ - Indicates the desired repository state.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ remove_others:
+ description:
+ - Remove other then added repositories
+ - Used if I(state=present)
+ type: bool
+ default: false
+ update:
+ description:
+ - Update the package database after changing repositories.
+ type: bool
+ default: false
+author:
+- Mikhail Gordeev (@obirvalger)
+'''
+
+EXAMPLES = '''
+- name: Remove all repositories
+ community.general.apt_repo:
+ repo: all
+ state: absent
+
+- name: Add repository `Sisysphus` and remove other repositories
+ community.general.apt_repo:
+ repo: Sisysphus
+ state: present
+ remove_others: true
+
+- name: Add local repository `/space/ALT/Sisyphus` and update package cache
+ community.general.apt_repo:
+ repo: copy:///space/ALT/Sisyphus
+ state: present
+ update: true
+'''
+
+RETURN = ''' # '''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_REPO_PATH = "/usr/bin/apt-repo"
+
+
+def apt_repo(module, *args):
+ """run apt-repo with args and return its output"""
+ # make args list to use in concatenation
+ args = list(args)
+ rc, out, err = module.run_command([APT_REPO_PATH] + args)
+
+ if rc != 0:
+ module.fail_json(msg="'%s' failed: %s" % (' '.join(['apt-repo'] + args), err))
+
+ return out
+
+
+def add_repo(module, repo):
+ """add a repository"""
+ apt_repo(module, 'add', repo)
+
+
+def rm_repo(module, repo):
+ """remove a repository"""
+ apt_repo(module, 'rm', repo)
+
+
+def set_repo(module, repo):
+ """add a repository and remove other repositories"""
+ # first add to validate repository
+ apt_repo(module, 'add', repo)
+ apt_repo(module, 'rm', 'all')
+ apt_repo(module, 'add', repo)
+
+
+def update(module):
+ """update package cache"""
+ apt_repo(module, 'update')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ remove_others=dict(type='bool', default=False),
+ update=dict(type='bool', default=False),
+ ),
+ )
+
+ if not os.path.exists(APT_REPO_PATH):
+ module.fail_json(msg='cannot find /usr/bin/apt-repo')
+
+ params = module.params
+ repo = params['repo']
+ state = params['state']
+ old_repositories = apt_repo(module)
+
+ if state == 'present':
+ if params['remove_others']:
+ set_repo(module, repo)
+ else:
+ add_repo(module, repo)
+ elif state == 'absent':
+ rm_repo(module, repo)
+
+ if params['update']:
+ update(module)
+
+ new_repositories = apt_repo(module)
+ changed = old_repositories != new_repositories
+ module.exit_json(changed=changed, repo=repo, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/apt_rpm.py b/ansible_collections/community/general/plugins/modules/apt_rpm.py
new file mode 100644
index 000000000..8749086bb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/apt_rpm.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Evgenii Terechkov
+# Written by Evgenii Terechkov <evg@altlinux.org>
+# Based on urpmi module written by Philippe Makowski <philippem@mageia.org>
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: apt_rpm
+short_description: APT-RPM package manager
+description:
+ - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ package:
+ description:
+ - List of packages to install, upgrade, or remove.
+ aliases: [ name, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
+ - Default is not to update the cache.
+ type: bool
+ default: false
+ clean:
+ description:
+ - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything but
+ the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/).
+ - Can be run as part of the package installation (clean runs before install) or as a separate step.
+ type: bool
+ default: false
+ version_added: 6.5.0
+ dist_upgrade:
+ description:
+ - If true performs an C(apt-get dist-upgrade) to upgrade system.
+ type: bool
+ default: false
+ version_added: 6.5.0
+ update_kernel:
+ description:
+ - If true performs an C(update-kernel) to upgrade kernel packages.
+ type: bool
+ default: false
+ version_added: 6.5.0
+author:
+- Evgenii Terechkov (@evgkrsk)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: present
+
+- name: Install packages foo and bar
+ community.general.apt_rpm:
+ pkg:
+ - foo
+ - bar
+ state: present
+
+- name: Remove package foo
+ community.general.apt_rpm:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.apt_rpm:
+ pkg: foo,bar
+ state: absent
+
+# bar will be the updated if a newer version exists
+- name: Update the package database and install bar
+ community.general.apt_rpm:
+ name: bar
+ state: present
+ update_cache: true
+
+- name: Run the equivalent of "apt-get clean" as a separate step
+ community.general.apt_rpm:
+ clean: true
+
+- name: Perform cache update and complete system upgrade (includes kernel)
+ community.general.apt_rpm:
+ update_cache: true
+ dist_upgrade: true
+ update_kernel: true
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+APT_PATH = "/usr/bin/apt-get"
+RPM_PATH = "/usr/bin/rpm"
+APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
+UPDATE_KERNEL_ZERO = "\nTry to install new kernel "
+
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name))
+ return rc == 0
+
+
+def update_package_db(module):
+ rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"})
+ return (False, update_out)
+
+
+def dir_size(module, path):
+ total_size = 0
+ for path, dirs, files in os.walk(path):
+ for f in files:
+ total_size += os.path.getsize(os.path.join(path, f))
+ return total_size
+
+
+def clean(module):
+ t = dir_size(module, "/var/cache/apt/archives")
+ rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True)
+ return (t != dir_size(module, "/var/cache/apt/archives"), out)
+
+
+def dist_upgrade(module):
+ rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"})
+ return (APT_GET_ZERO not in out, out)
+
+
+def update_kernel(module):
+ rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"})
+ return (UPDATE_KERNEL_ZERO not in out, out)
+
+
+def remove_packages(module, packages):
+
+ if packages is None:
+ return (False, "Empty package list")
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package), environ_update={"LANG": "C"})
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, err))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ return (True, "removed %s package(s)" % remove_c)
+
+ return (False, "package(s) already absent")
+
+
+def install_packages(module, pkgspec):
+
+ if pkgspec is None:
+ return (False, "Empty package list")
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+
+ rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages), environ_update={"LANG": "C"})
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # apt-rpm always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ else:
+ return (True, "%s present(s)" % packages)
+ else:
+ return (False, "Nothing to install")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ dist_upgrade=dict(type='bool', default=False),
+ update_kernel=dict(type='bool', default=False),
+ package=dict(type='list', elements='str', aliases=['name', 'pkg']),
+ ),
+ )
+
+ if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
+ module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
+
+ p = module.params
+ modified = False
+ output = ""
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ if p['clean']:
+ (m, out) = clean(module)
+ modified = modified or m
+
+ if p['dist_upgrade']:
+ (m, out) = dist_upgrade(module)
+ modified = modified or m
+ output += out
+
+ if p['update_kernel']:
+ (m, out) = update_kernel(module)
+ modified = modified or m
+ output += out
+
+ packages = p['package']
+ if p['state'] in ['installed', 'present']:
+ (m, out) = install_packages(module, packages)
+ modified = modified or m
+ output += out
+
+ if p['state'] in ['absent', 'removed']:
+ (m, out) = remove_packages(module, packages)
+ modified = modified or m
+ output += out
+
+ # Return total modification status and output of all commands
+ module.exit_json(changed=modified, msg=output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/archive.py b/ansible_collections/community/general/plugins/modules/archive.py
new file mode 100644
index 000000000..8748fb8a3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/archive.py
@@ -0,0 +1,686 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Ben Doherty <bendohmv@gmail.com>
+# Sponsored by Oomph, Inc. http://www.oomphinc.com
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: archive
+short_description: Creates a compressed archive of one or more files or trees
+extends_documentation_fragment:
+ - files
+ - community.general.attributes
+description:
+ - Creates or extends an archive.
+ - The source and archive are on the remote host, and the archive I(is not) copied to the local host.
+ - Source files can be deleted after archival by specifying I(remove=True).
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
+ type: list
+ elements: path
+ required: true
+ format:
+ description:
+ - The type of compression to use.
+ - Support for xz was added in Ansible 2.5.
+ type: str
+ choices: [ bz2, gz, tar, xz, zip ]
+ default: gz
+ dest:
+ description:
+ - The file name of the destination archive. The parent directory must exists on the remote host.
+ - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ - If the destination archive already exists, it will be truncated and overwritten.
+ type: path
+ exclude_path:
+ description:
+ - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion.
+ - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list.
+ type: list
+ elements: path
+ default: []
+ exclusion_patterns:
+ description:
+ - Glob style patterns to exclude files or directories from the resulting archive.
+ - This differs from I(exclude_path) which applies only to the source paths from I(path).
+ type: list
+ elements: path
+ version_added: 3.2.0
+ force_archive:
+ description:
+ - Allows you to force the module to treat this as an archive even if only a single file is specified.
+ - By default when a single file is specified it is compressed only (not archived).
+ - Enable this if you want to use M(ansible.builtin.unarchive) on an archive of a single file created with this module.
+ type: bool
+ default: false
+ remove:
+ description:
+ - Remove any added source files and trees after adding to archive.
+ type: bool
+ default: false
+notes:
+ - Can produce I(gzip), I(bzip2), I(lzma), and I(zip) compressed files or archives.
+ - This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives.
+ These are part of the Python standard library for Python 2 and 3.
+requirements:
+ - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format.
+seealso:
+ - module: ansible.builtin.unarchive
+author:
+ - Ben Doherty (@bendoh)
+'''
+
+EXAMPLES = r'''
+- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
+ community.general.archive:
+ path: /path/to/foo
+ dest: /path/to/foo.tgz
+
+- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
+ community.general.archive:
+ path: /path/to/foo
+ remove: true
+
+- name: Create a zip archive of /path/to/foo
+ community.general.archive:
+ path: /path/to/foo
+ format: zip
+
+- name: Create a bz2 archive of multiple files, rooted at /path
+ community.general.archive:
+ path:
+ - /path/to/foo
+ - /path/wong/foo
+ dest: /path/file.tar.bz2
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/bar
+ - /path/to/foo/baz
+ format: bz2
+
+- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
+ community.general.archive:
+ path:
+ - /path/to/foo/*
+ dest: /path/file.tar.bz2
+ exclude_path:
+ - /path/to/foo/ba*
+ format: bz2
+
+- name: Use gzip to compress a single archive (i.e don't archive it first with tar)
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.gz
+ format: gz
+
+- name: Create a tar.gz archive of a single file.
+ community.general.archive:
+ path: /path/to/foo/single.file
+ dest: /path/file.tar.gz
+ format: gz
+ force_archive: true
+'''
+
+RETURN = r'''
+state:
+ description:
+ The state of the input C(path).
+ type: str
+ returned: always
+dest_state:
+ description:
+ - The state of the I(dest) file.
+ - C(absent) when the file does not exist.
+ - C(archive) when the file is an archive.
+ - C(compress) when the file is compressed, but not an archive.
+ - C(incomplete) when the file is an archive, but some files under I(path) were not found.
+ type: str
+ returned: success
+ version_added: 3.4.0
+missing:
+ description: Any files that were missing from the source.
+ type: list
+ returned: success
+archived:
+ description: Any files that were compressed or added to the archive.
+ type: list
+ returned: success
+arcroot:
+ description: The archive root.
+ type: str
+ returned: always
+expanded_paths:
+ description: The list of matching paths from paths argument.
+ type: list
+ returned: always
+expanded_exclude_paths:
+ description: The list of matching exclude paths from the exclude_path argument.
+ type: list
+ returned: always
+'''
+
+import abc
+import bz2
+import glob
+import gzip
+import io
+import os
+import re
+import shutil
+import tarfile
+import zipfile
+from fnmatch import fnmatch
+from sys import version_info
+from traceback import format_exc
+from zlib import crc32
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils import six
+
+try: # python 3.2+
+ from zipfile import BadZipFile # type: ignore[attr-defined]
+except ImportError: # older python
+ from zipfile import BadZipfile as BadZipFile
+
+LZMA_IMP_ERR = None
+if six.PY3:
+ try:
+ import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+else:
+ try:
+ from backports import lzma
+ HAS_LZMA = True
+ except ImportError:
+ LZMA_IMP_ERR = format_exc()
+ HAS_LZMA = False
+
+PY27 = version_info[0:2] >= (2, 7)
+
+STATE_ABSENT = 'absent'
+STATE_ARCHIVED = 'archive'
+STATE_COMPRESSED = 'compress'
+STATE_INCOMPLETE = 'incomplete'
+
+
+def common_path(paths):
+ empty = b'' if paths and isinstance(paths[0], six.binary_type) else ''
+
+ return os.path.join(
+ os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty
+ )
+
+
+def expand_paths(paths):
+ expanded_path = []
+ is_globby = False
+ for path in paths:
+ b_path = _to_bytes(path)
+ if b'*' in b_path or b'?' in b_path:
+ e_paths = glob.glob(b_path)
+ is_globby = True
+ else:
+ e_paths = [b_path]
+ expanded_path.extend(e_paths)
+ return expanded_path, is_globby
+
+
+def matches_exclusion_patterns(path, exclusion_patterns):
+ return any(fnmatch(path, p) for p in exclusion_patterns)
+
+
+def is_archive(path):
+ return re.search(br'\.(tar|tar\.(gz|bz2|xz)|tgz|tbz2|zip)$', os.path.basename(path), re.IGNORECASE)
+
+
+def strip_prefix(prefix, string):
+ return string[len(prefix):] if string.startswith(prefix) else string
+
+
+def _to_bytes(s):
+ return to_bytes(s, errors='surrogate_or_strict')
+
+
+def _to_native(s):
+ return to_native(s, errors='surrogate_or_strict')
+
+
+def _to_native_ascii(s):
+ return to_native(s, errors='surrogate_or_strict', encoding='ascii')
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Archive(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.destination = _to_bytes(module.params['dest']) if module.params['dest'] else None
+ self.exclusion_patterns = module.params['exclusion_patterns'] or []
+ self.format = module.params['format']
+ self.must_archive = module.params['force_archive']
+ self.remove = module.params['remove']
+
+ self.changed = False
+ self.destination_state = STATE_ABSENT
+ self.errors = []
+ self.file = None
+ self.successes = []
+ self.targets = []
+ self.not_found = []
+
+ paths = module.params['path']
+ self.expanded_paths, has_globs = expand_paths(paths)
+ self.expanded_exclude_paths = expand_paths(module.params['exclude_path'])[0]
+
+ self.paths = sorted(set(self.expanded_paths) - set(self.expanded_exclude_paths))
+
+ if not self.paths:
+ module.fail_json(
+ path=', '.join(paths),
+ expanded_paths=_to_native(b', '.join(self.expanded_paths)),
+ expanded_exclude_paths=_to_native(b', '.join(self.expanded_exclude_paths)),
+ msg='Error, no source paths were found'
+ )
+
+ self.root = common_path(self.paths)
+
+ if not self.must_archive:
+ self.must_archive = any([has_globs, os.path.isdir(self.paths[0]), len(self.paths) > 1])
+
+ if not self.destination and not self.must_archive:
+ self.destination = b'%s.%s' % (self.paths[0], _to_bytes(self.format))
+
+ if self.must_archive and not self.destination:
+ module.fail_json(
+ dest=_to_native(self.destination),
+ path=', '.join(paths),
+ msg='Error, must specify "dest" when archiving multiple files or trees'
+ )
+
+ if self.remove:
+ self._check_removal_safety()
+
+ self.original_checksums = self.destination_checksums()
+ self.original_size = self.destination_size()
+
+ def add(self, path, archive_name):
+ try:
+ self._add(_to_native_ascii(path), _to_native(archive_name))
+ if self.contains(_to_native(archive_name)):
+ self.successes.append(path)
+ except Exception as e:
+ self.errors.append('%s: %s' % (_to_native_ascii(path), _to_native(e)))
+
+ def add_single_target(self, path):
+ if self.format in ('zip', 'tar'):
+ self.open()
+ self.add(path, strip_prefix(self.root, path))
+ self.close()
+ self.destination_state = STATE_ARCHIVED
+ else:
+ try:
+ f_out = self._open_compressed_file(_to_native_ascii(self.destination), 'wb')
+ with open(path, 'rb') as f_in:
+ shutil.copyfileobj(f_in, f_out)
+ f_out.close()
+ self.successes.append(path)
+ self.destination_state = STATE_COMPRESSED
+ except (IOError, OSError) as e:
+ self.module.fail_json(
+ path=_to_native(path),
+ dest=_to_native(self.destination),
+ msg='Unable to write to compressed file: %s' % _to_native(e), exception=format_exc()
+ )
+
+ def add_targets(self):
+ self.open()
+ try:
+ for target in self.targets:
+ if os.path.isdir(target):
+ for directory_path, directory_names, file_names in os.walk(target, topdown=True):
+ for directory_name in directory_names:
+ full_path = os.path.join(directory_path, directory_name)
+ self.add(full_path, strip_prefix(self.root, full_path))
+
+ for file_name in file_names:
+ full_path = os.path.join(directory_path, file_name)
+ self.add(full_path, strip_prefix(self.root, full_path))
+ else:
+ self.add(target, strip_prefix(self.root, target))
+ except Exception as e:
+ if self.format in ('zip', 'tar'):
+ archive_format = self.format
+ else:
+ archive_format = 'tar.' + self.format
+ self.module.fail_json(
+ msg='Error when writing %s archive at %s: %s' % (
+ archive_format, _to_native(self.destination), _to_native(e)
+ ),
+ exception=format_exc()
+ )
+ self.close()
+
+ if self.errors:
+ self.module.fail_json(
+ msg='Errors when writing archive at %s: %s' % (_to_native(self.destination), '; '.join(self.errors))
+ )
+
+ def is_different_from_original(self):
+ if self.original_checksums is None:
+ return self.original_size != self.destination_size()
+ else:
+ return self.original_checksums != self.destination_checksums()
+
+ def destination_checksums(self):
+ if self.destination_exists() and self.destination_readable():
+ return self._get_checksums(self.destination)
+ return None
+
+ def destination_exists(self):
+ return self.destination and os.path.exists(self.destination)
+
+ def destination_readable(self):
+ return self.destination and os.access(self.destination, os.R_OK)
+
+ def destination_size(self):
+ return os.path.getsize(self.destination) if self.destination_exists() else 0
+
+ def find_targets(self):
+ for path in self.paths:
+ if not os.path.lexists(path):
+ self.not_found.append(path)
+ else:
+ self.targets.append(path)
+
+ def has_targets(self):
+ return bool(self.targets)
+
+ def has_unfound_targets(self):
+ return bool(self.not_found)
+
+ def remove_single_target(self, path):
+ try:
+ os.remove(path)
+ except OSError as e:
+ self.module.fail_json(
+ path=_to_native(path),
+ msg='Unable to remove source file: %s' % _to_native(e), exception=format_exc()
+ )
+
+ def remove_targets(self):
+ for path in self.successes:
+ if os.path.exists(path):
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+ except OSError:
+ self.errors.append(_to_native(path))
+ for path in self.paths:
+ try:
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+ except OSError:
+ self.errors.append(_to_native(path))
+
+ if self.errors:
+ self.module.fail_json(
+ dest=_to_native(self.destination), msg='Error deleting some source files: ', files=self.errors
+ )
+
+ def update_permissions(self):
+ file_args = self.module.load_file_common_arguments(self.module.params, path=self.destination)
+ self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed)
+
+ @property
+ def result(self):
+ return {
+ 'archived': [_to_native(p) for p in self.successes],
+ 'dest': _to_native(self.destination),
+ 'dest_state': self.destination_state,
+ 'changed': self.changed,
+ 'arcroot': _to_native(self.root),
+ 'missing': [_to_native(p) for p in self.not_found],
+ 'expanded_paths': [_to_native(p) for p in self.expanded_paths],
+ 'expanded_exclude_paths': [_to_native(p) for p in self.expanded_exclude_paths],
+ }
+
+ def _check_removal_safety(self):
+ for path in self.paths:
+ if os.path.isdir(path) and self.destination.startswith(os.path.join(path, b'')):
+ self.module.fail_json(
+ path=b', '.join(self.paths),
+ msg='Error, created archive can not be contained in source paths when remove=true'
+ )
+
+ def _open_compressed_file(self, path, mode):
+ f = None
+ if self.format == 'gz':
+ f = gzip.open(path, mode)
+ elif self.format == 'bz2':
+ f = bz2.BZ2File(path, mode)
+ elif self.format == 'xz':
+ f = lzma.LZMAFile(path, mode)
+ else:
+ self.module.fail_json(msg="%s is not a valid format" % self.format)
+
+ return f
+
+ @abc.abstractmethod
+ def close(self):
+ pass
+
+ @abc.abstractmethod
+ def contains(self, name):
+ pass
+
+ @abc.abstractmethod
+ def open(self):
+ pass
+
+ @abc.abstractmethod
+ def _add(self, path, archive_name):
+ pass
+
+ @abc.abstractmethod
+ def _get_checksums(self, path):
+ pass
+
+
+class ZipArchive(Archive):
+ def __init__(self, module):
+ super(ZipArchive, self).__init__(module)
+
+ def close(self):
+ self.file.close()
+
+ def contains(self, name):
+ try:
+ self.file.getinfo(name)
+ except KeyError:
+ return False
+ return True
+
+ def open(self):
+ self.file = zipfile.ZipFile(_to_native_ascii(self.destination), 'w', zipfile.ZIP_DEFLATED, True)
+
+ def _add(self, path, archive_name):
+ if not matches_exclusion_patterns(path, self.exclusion_patterns):
+ self.file.write(path, archive_name)
+
+ def _get_checksums(self, path):
+ try:
+ archive = zipfile.ZipFile(_to_native_ascii(path), 'r')
+ checksums = set((info.filename, info.CRC) for info in archive.infolist())
+ archive.close()
+ except BadZipFile:
+ checksums = set()
+ return checksums
+
+
+class TarArchive(Archive):
+ def __init__(self, module):
+ super(TarArchive, self).__init__(module)
+ self.fileIO = None
+
+ def close(self):
+ self.file.close()
+ if self.format == 'xz':
+ with lzma.open(_to_native(self.destination), 'wb') as f:
+ f.write(self.fileIO.getvalue())
+ self.fileIO.close()
+
+ def contains(self, name):
+ try:
+ self.file.getmember(name)
+ except KeyError:
+ return False
+ return True
+
+ def open(self):
+ if self.format in ('gz', 'bz2'):
+ self.file = tarfile.open(_to_native_ascii(self.destination), 'w|' + self.format)
+ # python3 tarfile module allows xz format but for python2 we have to create the tarfile
+ # in memory and then compress it with lzma.
+ elif self.format == 'xz':
+ self.fileIO = io.BytesIO()
+ self.file = tarfile.open(fileobj=self.fileIO, mode='w')
+ elif self.format == 'tar':
+ self.file = tarfile.open(_to_native_ascii(self.destination), 'w')
+ else:
+ self.module.fail_json(msg="%s is not a valid archive format" % self.format)
+
+ def _add(self, path, archive_name):
+ def py27_filter(tarinfo):
+ return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo
+
+ def py26_filter(path):
+ return matches_exclusion_patterns(path, self.exclusion_patterns)
+
+ if PY27:
+ self.file.add(path, archive_name, recursive=False, filter=py27_filter)
+ else:
+ self.file.add(path, archive_name, recursive=False, exclude=py26_filter)
+
+ def _get_checksums(self, path):
+ if HAS_LZMA:
+ LZMAError = lzma.LZMAError
+ else:
+ # Just picking another exception that's also listed below
+ LZMAError = tarfile.ReadError
+ try:
+ if self.format == 'xz':
+ with lzma.open(_to_native_ascii(path), 'r') as f:
+ archive = tarfile.open(fileobj=f)
+ checksums = set((info.name, info.chksum) for info in archive.getmembers())
+ archive.close()
+ else:
+ archive = tarfile.open(_to_native_ascii(path), 'r|' + self.format)
+ checksums = set((info.name, info.chksum) for info in archive.getmembers())
+ archive.close()
+ except (LZMAError, tarfile.ReadError, tarfile.CompressionError):
+ try:
+ # The python implementations of gzip, bz2, and lzma do not support restoring compressed files
+ # to their original names so only file checksum is returned
+ f = self._open_compressed_file(_to_native_ascii(path), 'r')
+ checksum = 0
+ while True:
+ chunk = f.read(16 * 1024 * 1024)
+ if not chunk:
+ break
+ checksum = crc32(chunk, checksum)
+ checksums = set([(b'', checksum)])
+ f.close()
+ except Exception:
+ checksums = set()
+ return checksums
+
+
+def get_archive(module):
+ if module.params['format'] == 'zip':
+ return ZipArchive(module)
+ else:
+ return TarArchive(module)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='list', elements='path', required=True),
+ format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
+ dest=dict(type='path'),
+ exclude_path=dict(type='list', elements='path', default=[]),
+ exclusion_patterns=dict(type='list', elements='path'),
+ force_archive=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ if not HAS_LZMA and module.params['format'] == 'xz':
+ module.fail_json(
+ msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR
+ )
+
+ check_mode = module.check_mode
+
+ archive = get_archive(module)
+ archive.find_targets()
+
+ if not archive.has_targets():
+ if archive.destination_exists():
+ archive.destination_state = STATE_ARCHIVED if is_archive(archive.destination) else STATE_COMPRESSED
+ elif archive.has_targets() and archive.must_archive:
+ if check_mode:
+ archive.changed = True
+ else:
+ archive.add_targets()
+ archive.destination_state = STATE_INCOMPLETE if archive.has_unfound_targets() else STATE_ARCHIVED
+ archive.changed |= archive.is_different_from_original()
+ if archive.remove:
+ archive.remove_targets()
+ else:
+ if check_mode:
+ if not archive.destination_exists():
+ archive.changed = True
+ else:
+ path = archive.paths[0]
+ archive.add_single_target(path)
+ archive.changed |= archive.is_different_from_original()
+ if archive.remove:
+ archive.remove_single_target(path)
+
+ if archive.destination_exists():
+ archive.update_permissions()
+
+ module.exit_json(**archive.result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/atomic_container.py b/ansible_collections/community/general/plugins/modules/atomic_container.py
new file mode 100644
index 000000000..c26510296
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/atomic_container.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: atomic_container
+short_description: Manage the containers on the atomic host platform
+description:
+ - Manage the containers on the atomic host platform.
+ - Allows to manage the lifecycle of a container on the atomic host platform.
+author: "Giuseppe Scrivano (@giuseppe)"
+notes:
+ - Host should support C(atomic) command
+requirements:
+ - atomic
+ - "python >= 2.6"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ backend:
+ description:
+ - Define the backend to use for the container.
+ required: true
+ choices: ["docker", "ostree"]
+ type: str
+ name:
+ description:
+ - Name of the container.
+ required: true
+ type: str
+ image:
+ description:
+ - The image to use to install the container.
+ required: true
+ type: str
+ rootfs:
+ description:
+ - Define the rootfs of the image.
+ type: str
+ state:
+ description:
+ - State of the container.
+ choices: ["absent", "latest", "present", "rollback"]
+ default: "latest"
+ type: str
+ mode:
+ description:
+ - Define if it is an user or a system container.
+ choices: ["user", "system"]
+ type: str
+ values:
+ description:
+ - Values for the installation of the container.
+ - This option is permitted only with mode 'user' or 'system'.
+ - The values specified here will be used at installation time as --set arguments for atomic install.
+ type: list
+ elements: str
+ default: []
+'''
+
+EXAMPLES = r'''
+
+- name: Install the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: latest
+ mode: system
+ values:
+ - ETCD_NAME=etcd.server
+
+- name: Uninstall the etcd system container
+ community.general.atomic_container:
+ name: etcd
+ image: rhel/etcd
+ backend: ostree
+ state: absent
+ mode: system
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Using default tag: latest ...'
+'''
+
+# import module snippets
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def do_install(module, mode, rootfs, container, image, values_list, backend):
+ system_list = ["--system"] if mode == 'system' else []
+ user_list = ["--user"] if mode == 'user' else []
+ rootfs_list = ["--rootfs=%s" % rootfs] if rootfs else []
+ atomic_bin = module.get_bin_path('atomic')
+ args = [atomic_bin, 'install', "--storage=%s" % backend, '--name=%s' % container] + system_list + user_list + rootfs_list + values_list + [image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_update(module, container, image, values_list):
+ atomic_bin = module.get_bin_path('atomic')
+ args = [atomic_bin, 'containers', 'update', "--rebase=%s" % image] + values_list + [container]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def do_uninstall(module, name, backend):
+ atomic_bin = module.get_bin_path('atomic')
+ args = [atomic_bin, 'uninstall', "--storage=%s" % backend, name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ module.exit_json(msg=out, changed=True)
+
+
+def do_rollback(module, name):
+ atomic_bin = module.get_bin_path('atomic')
+ args = [atomic_bin, 'containers', 'rollback', name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Rolling back" in out
+ module.exit_json(msg=out, changed=changed)
+
+
+def core(module):
+ mode = module.params['mode']
+ name = module.params['name']
+ image = module.params['image']
+ rootfs = module.params['rootfs']
+ values = module.params['values']
+ backend = module.params['backend']
+ state = module.params['state']
+
+ atomic_bin = module.get_bin_path('atomic')
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ values_list = ["--set=%s" % x for x in values] if values else []
+
+ args = [atomic_bin, 'containers', 'list', '--no-trunc', '-n', '--all', '-f', 'backend=%s' % backend, '-f', 'container=%s' % name]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ return
+ present = name in out
+
+ if state == 'present' and present:
+ module.exit_json(msg=out, changed=False)
+ elif (state in ['latest', 'present']) and not present:
+ do_install(module, mode, rootfs, name, image, values_list, backend)
+ elif state == 'latest':
+ do_update(module, name, image, values_list)
+ elif state == 'absent':
+ if not present:
+ module.exit_json(msg="The container is not present", changed=False)
+ else:
+ do_uninstall(module, name, backend)
+ elif state == 'rollback':
+ do_rollback(module, name)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mode=dict(choices=['user', 'system']),
+ name=dict(required=True),
+ image=dict(required=True),
+ rootfs=dict(),
+ state=dict(default='latest', choices=['present', 'absent', 'latest', 'rollback']),
+ backend=dict(required=True, choices=['docker', 'ostree']),
+ values=dict(type='list', default=[], elements='str'),
+ ),
+ )
+
+ if module.params['values'] is not None and module.params['mode'] == 'default':
+ module.fail_json(msg="values is supported only with user or system mode")
+
+ # Verify that the platform supports atomic command
+ dummy = module.get_bin_path('atomic', required=True)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg='Unanticipated error running atomic: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/atomic_host.py b/ansible_collections/community/general/plugins/modules/atomic_host.py
new file mode 100644
index 000000000..bb44c4489
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/atomic_host.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform.
+ - Rebooting of Atomic host platform should be done outside this module.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file).
+requirements:
+ - atomic
+ - python >= 2.6
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed.
+ - Providing C(latest) will upgrade to the latest available version.
+ default: 'latest'
+ aliases: [ version ]
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Upgrade the atomic host platform to the latest version (atomic host upgrade)
+ community.general.atomic_host:
+ revision: latest
+
+- name: Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+ community.general.atomic_host:
+ revision: 23.130
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Already on latest'
+'''
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def core(module):
+ revision = module.params['revision']
+ atomic_bin = module.get_bin_path('atomic', required=True)
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = [atomic_bin, 'host', 'upgrade']
+ else:
+ args = [atomic_bin, 'host', 'deploy', revision]
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ revision=dict(type='str', default='latest', aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/atomic_image.py b/ansible_collections/community/general/plugins/modules/atomic_image.py
new file mode 100644
index 000000000..65aec1e9d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/atomic_image.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform.
+ - Allows to execute the commands specified by the RUN label in the container image when present.
+author:
+- Saravanan KR (@krsacme)
+notes:
+ - Host should support C(atomic) command.
+requirements:
+ - atomic
+ - python >= 2.6
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ backend:
+ description:
+ - Define the backend where the image is pulled.
+ choices: [ 'docker', 'ostree' ]
+ type: str
+ name:
+ description:
+ - Name of the container image.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of the container image.
+ - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ choices: [ 'absent', 'latest', 'present' ]
+ default: 'latest'
+ type: str
+ started:
+ description:
+ - Start or Stop the container.
+ type: bool
+ default: true
+'''
+
+EXAMPLES = r'''
+- name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+ community.general.atomic_image:
+ name: rhel7/rsyslog
+ state: latest
+
+- name: Pull busybox to the OSTree backend
+ community.general.atomic_image:
+ name: busybox
+ state: latest
+ backend: ostree
+'''
+
+RETURN = r'''
+msg:
+ description: The command standard output
+ returned: always
+ type: str
+ sample: 'Using default tag: latest ...'
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def do_upgrade(module, image):
+ atomic_bin = module.get_bin_path('atomic')
+ args = [atomic_bin, 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ backend = module.params['backend']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ atomic_bin = module.get_bin_path('atomic')
+ out = {}
+ err = {}
+ rc = 0
+
+ if backend:
+ if state == 'present' or state == 'latest':
+ args = [atomic_bin, 'pull', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ out_run = ""
+ if started:
+ args = [atomic_bin, 'run', "--storage=%s" % backend, image]
+ rc, out_run, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+
+ changed = "Extracting" in out or "Copying blob" in out
+ module.exit_json(msg=(out + out_run), changed=changed)
+ elif state == 'absent':
+ args = [atomic_bin, 'images', 'delete', "--storage=%s" % backend, image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ changed = "Unable to find" not in out
+ module.exit_json(msg=out, changed=changed)
+ return
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = [atomic_bin, 'run', image]
+ else:
+ args = [atomic_bin, 'install', image]
+ elif state == 'absent':
+ args = [atomic_bin, 'uninstall', image]
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ backend=dict(type='str', choices=['docker', 'ostree']),
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='latest', choices=['absent', 'latest', 'present']),
+ started=dict(type='bool', default=True),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ dummy = module.get_bin_path('atomic', required=True)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/awall.py b/ansible_collections/community/general/plugins/modules/awall.py
new file mode 100644
index 000000000..da1b29f70
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/awall.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ted Trask <ttrask01@yahoo.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: awall
+short_description: Manage awall policies
+author: Ted Trask (@tdtrask) <ttrask01@yahoo.com>
+description:
+ - This modules allows for enable/disable/activate of C(awall) policies.
+ - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files
+ and activates the configuration on the system.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - One or more policy names.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether the policies should be enabled or disabled.
+ type: str
+ choices: [ disabled, enabled ]
+ default: enabled
+ activate:
+ description:
+ - Activate the new firewall rules.
+ - Can be run with other steps or on its own.
+ - Idempotency is affected if I(activate=true), as the module will always report a changed state.
+ type: bool
+ default: false
+notes:
+ - At least one of I(name) and I(activate) is required.
+'''
+
+EXAMPLES = r'''
+- name: Enable "foo" and "bar" policy
+ community.general.awall:
+ name: [ foo bar ]
+ state: enabled
+
+- name: Disable "foo" and "bar" policy and activate new rules
+ community.general.awall:
+ name:
+ - foo
+ - bar
+ state: disabled
+ activate: false
+
+- name: Activate currently enabled firewall rules
+ community.general.awall:
+ activate: true
+'''
+
+RETURN = ''' # '''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def activate(module):
+ cmd = "%s activate --force" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ module.fail_json(msg="could not activate new rules", stdout=stdout, stderr=stderr)
+
+
+def is_policy_enabled(module, name):
+ cmd = "%s list" % (AWALL_PATH)
+ rc, stdout, stderr = module.run_command(cmd)
+ if re.search(r"^%s\s+enabled" % name, stdout, re.MULTILINE):
+ return True
+ return False
+
+
+def enable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if not is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already enabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s enable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to enable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="enabled awall policy(ies): %s" % names)
+
+
+def disable_policy(module, names, act):
+ policies = []
+ for name in names:
+ if is_policy_enabled(module, name):
+ policies.append(name)
+ if not policies:
+ module.exit_json(changed=False, msg="policy(ies) already disabled")
+ names = " ".join(policies)
+ if module.check_mode:
+ cmd = "%s list" % (AWALL_PATH)
+ else:
+ cmd = "%s disable %s" % (AWALL_PATH, names)
+ rc, stdout, stderr = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg="failed to disable %s" % names, stdout=stdout, stderr=stderr)
+ if act and not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="disabled awall policy(ies): %s" % names)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='enabled', choices=['disabled', 'enabled']),
+ name=dict(type='list', elements='str'),
+ activate=dict(type='bool', default=False),
+ ),
+ required_one_of=[['name', 'activate']],
+ supports_check_mode=True,
+ )
+
+ global AWALL_PATH
+ AWALL_PATH = module.get_bin_path('awall', required=True)
+
+ p = module.params
+
+ if p['name']:
+ if p['state'] == 'enabled':
+ enable_policy(module, p['name'], p['activate'])
+ elif p['state'] == 'disabled':
+ disable_policy(module, p['name'], p['activate'])
+
+ if p['activate']:
+ if not module.check_mode:
+ activate(module)
+ module.exit_json(changed=True, msg="activated awall rules")
+
+ module.fail_json(msg="no action defined")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/beadm.py b/ansible_collections/community/general/plugins/modules/beadm.py
new file mode 100644
index 000000000..8857fd846
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/beadm.py
@@ -0,0 +1,415 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: beadm
+short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems
+description:
+ - Create, delete or activate ZFS boot environments.
+ - Mount and unmount ZFS boot environments.
+author: Adam Števko (@xen0l)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - ZFS boot environment name.
+ type: str
+ required: true
+ aliases: [ "be" ]
+ snapshot:
+ description:
+ - If specified, the new boot environment will be cloned from the given
+ snapshot or inactive boot environment.
+ type: str
+ description:
+ description:
+ - Associate a description with a new boot environment. This option is
+ available only on Solarish platforms.
+ type: str
+ options:
+ description:
+ - Create the datasets for new BE with specific ZFS properties.
+ - Multiple options can be specified.
+ - This option is available only on Solarish platforms.
+ type: str
+ mountpoint:
+ description:
+ - Path where to mount the ZFS boot environment.
+ type: path
+ state:
+ description:
+ - Create or delete ZFS boot environment.
+ type: str
+ choices: [ absent, activated, mounted, present, unmounted ]
+ default: present
+ force:
+ description:
+ - Specifies if the unmount should be forced.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Create ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: present
+
+- name: Create ZFS boot environment from existing inactive boot environment
+ community.general.beadm:
+ name: upgrade-be
+ snapshot: be@old
+ state: present
+
+- name: Create ZFS boot environment with compression enabled and description "upgrade"
+ community.general.beadm:
+ name: upgrade-be
+ options: "compression=on"
+ description: upgrade
+ state: present
+
+- name: Delete ZFS boot environment
+ community.general.beadm:
+ name: old-be
+ state: absent
+
+- name: Mount ZFS boot environment on /tmp/be
+ community.general.beadm:
+ name: BE
+ mountpoint: /tmp/be
+ state: mounted
+
+- name: Unmount ZFS boot environment
+ community.general.beadm:
+ name: BE
+ state: unmounted
+
+- name: Activate ZFS boot environment
+ community.general.beadm:
+ name: upgrade-be
+ state: activated
+'''
+
+RETURN = r'''
+name:
+ description: BE name
+ returned: always
+ type: str
+ sample: pre-upgrade
+snapshot:
+ description: ZFS snapshot to create BE from
+ returned: always
+ type: str
+ sample: rpool/ROOT/oi-hipster@fresh
+description:
+ description: BE description
+ returned: always
+ type: str
+ sample: Upgrade from 9.0 to 10.0
+options:
+ description: BE additional options
+ returned: always
+ type: str
+ sample: compression=on
+mountpoint:
+ description: BE mountpoint
+ returned: always
+ type: str
+ sample: /mnt/be
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+force:
+ description: If forced action is wanted
+ returned: always
+ type: bool
+ sample: false
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BE(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.snapshot = module.params['snapshot']
+ self.description = module.params['description']
+ self.options = module.params['options']
+ self.mountpoint = module.params['mountpoint']
+ self.state = module.params['state']
+ self.force = module.params['force']
+ self.is_freebsd = os.uname()[0] == 'FreeBSD'
+
+ def _beadm_list(self):
+ cmd = [self.module.get_bin_path('beadm'), 'list', '-H']
+ if '@' in self.name:
+ cmd.append('-s')
+ return self.module.run_command(cmd)
+
+ def _find_be_by_name(self, out):
+ if '@' in self.name:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if check == []:
+ continue
+ full_name = check[0].split('/')
+ if full_name == []:
+ continue
+ check[0] = full_name[len(full_name) - 1]
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ else:
+ for line in out.splitlines():
+ if self.is_freebsd:
+ check = line.split()
+ if check[0] == self.name:
+ return check
+ else:
+ check = line.split(';')
+ if check[0] == self.name:
+ return check
+ return None
+
+ def exists(self):
+ (rc, out, dummy) = self._beadm_list()
+
+ if rc == 0:
+ if self._find_be_by_name(out):
+ return True
+ else:
+ return False
+ else:
+ return False
+
+ def is_activated(self):
+ (rc, out, dummy) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ if 'R' in line[1]:
+ return True
+ else:
+ if 'R' in line[2]:
+ return True
+
+ return False
+
+ def activate_be(self):
+ cmd = [self.module.get_bin_path('beadm'), 'activate', self.name]
+ return self.module.run_command(cmd)
+
+ def create_be(self):
+ cmd = [self.module.get_bin_path('beadm'), 'create']
+
+ if self.snapshot:
+ cmd.extend(['-e', self.snapshot])
+ if not self.is_freebsd:
+ if self.description:
+ cmd.extend(['-d', self.description])
+ if self.options:
+ cmd.extend(['-o', self.options])
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def destroy_be(self):
+ cmd = [self.module.get_bin_path('beadm'), 'destroy', '-F', self.name]
+ return self.module.run_command(cmd)
+
+ def is_mounted(self):
+ (rc, out, dummy) = self._beadm_list()
+
+ if rc == 0:
+ line = self._find_be_by_name(out)
+ if line is None:
+ return False
+ if self.is_freebsd:
+ # On FreeBSD, we exclude currently mounted BE on /, as it is
+ # special and can be activated even if it is mounted. That is not
+ # possible with non-root BEs.
+ if line[2] != '-' and line[2] != '/':
+ return True
+ else:
+ if line[3]:
+ return True
+
+ return False
+
+ def mount_be(self):
+ cmd = [self.module.get_bin_path('beadm'), 'mount', self.name]
+
+ if self.mountpoint:
+ cmd.append(self.mountpoint)
+
+ return self.module.run_command(cmd)
+
+ def unmount_be(self):
+ cmd = [self.module.get_bin_path('beadm'), 'unmount']
+ if self.force:
+ cmd.append('-f')
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True, aliases=['be']),
+ snapshot=dict(type='str'),
+ description=dict(type='str'),
+ options=dict(type='str'),
+ mountpoint=dict(type='path'),
+ state=dict(type='str', default='present', choices=['absent', 'activated', 'mounted', 'present', 'unmounted']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ be = BE(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = be.name
+ result['state'] = be.state
+
+ if be.snapshot:
+ result['snapshot'] = be.snapshot
+
+ if be.description:
+ result['description'] = be.description
+
+ if be.options:
+ result['options'] = be.options
+
+ if be.mountpoint:
+ result['mountpoint'] = be.mountpoint
+
+ if be.state == 'absent':
+ # beadm on FreeBSD and Solarish systems differs in delete behaviour in
+ # that we are not allowed to delete activated BE on FreeBSD while on
+ # Solarish systems we cannot delete BE if it is mounted. We add mount
+ # check for both platforms as BE should be explicitly unmounted before
+ # being deleted. On FreeBSD, we also check if the BE is activated.
+ if be.exists():
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if be.is_freebsd:
+ if be.is_activated():
+ module.fail_json(msg='Unable to remove active BE!')
+
+ (rc, out, err) = be.destroy_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while destroying BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ else:
+ module.fail_json(msg='Unable to remove BE as it is mounted!')
+
+ elif be.state == 'present':
+ if not be.exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.create_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while creating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'activated':
+ if not be.is_activated():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # On FreeBSD, beadm is unable to activate mounted BEs, so we add
+ # an explicit check for that case.
+ if be.is_freebsd:
+ if be.is_mounted():
+ module.fail_json(msg='Unable to activate mounted BE!')
+
+ (rc, out, err) = be.activate_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while activating BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+ elif be.state == 'mounted':
+ if not be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.mount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while mounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ elif be.state == 'unmounted':
+ if be.is_mounted():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ (rc, out, err) = be.unmount_be()
+
+ if rc != 0:
+ module.fail_json(msg='Error while unmounting BE: "%s"' % err,
+ name=be.name,
+ stderr=err,
+ rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bearychat.py b/ansible_collections/community/general/plugins/modules/bearychat.py
new file mode 100644
index 000000000..28f1f8fcd
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bearychat.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: bearychat
+short_description: Send BearyChat notifications
+description:
+ - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com)
+ via the Incoming Robot integration.
+author: "Jiangge Zhang (@tonyseek)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ url:
+ type: str
+ description:
+ - BearyChat WebHook URL. This authenticates you to the bearychat
+ service. It looks like
+ C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
+ required: true
+ text:
+ type: str
+ description:
+ - Message to send.
+ markdown:
+ description:
+ - If C(true), text will be parsed as markdown.
+ default: true
+ type: bool
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the
+ default channel selected by the I(url).
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments. For more information, see
+ https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
+'''
+
+EXAMPLES = """
+- name: Send notification message via BearyChat
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via BearyChat all options
+ local_action:
+ module: bearychat
+ url: |
+ https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
+ text: "{{ inventory_hostname }} completed"
+ markdown: false
+ channel: "#ansible"
+ attachments:
+ - title: "Ansible on {{ inventory_hostname }}"
+ text: "May the Force be with you."
+ color: "#ffffff"
+ images:
+ - http://example.com/index.png
+"""
+
+RETURN = """
+msg:
+ description: execution result
+ returned: success
+ type: str
+ sample: "OK"
+"""
+
+try:
+ from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
+ HAS_URLPARSE = True
+except Exception:
+ HAS_URLPARSE = False
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def build_payload_for_bearychat(module, text, markdown, channel, attachments):
+ payload = {}
+ if text is not None:
+ payload['text'] = text
+ if markdown is not None:
+ payload['markdown'] = markdown
+ if channel is not None:
+ payload['channel'] = channel
+ if attachments is not None:
+ payload.setdefault('attachments', []).extend(
+ build_payload_for_bearychat_attachment(
+ module, item.get('title'), item.get('text'), item.get('color'),
+ item.get('images'))
+ for item in attachments)
+ payload = 'payload=%s' % module.jsonify(payload)
+ return payload
+
+
+def build_payload_for_bearychat_attachment(module, title, text, color, images):
+ attachment = {}
+ if title is not None:
+ attachment['title'] = title
+ if text is not None:
+ attachment['text'] = text
+ if color is not None:
+ attachment['color'] = color
+ if images is not None:
+ target_images = attachment.setdefault('images', [])
+ if not isinstance(images, (list, tuple)):
+ images = [images]
+ for image in images:
+ if isinstance(image, dict) and 'url' in image:
+ image = {'url': image['url']}
+ elif hasattr(image, 'startswith') and image.startswith('http'):
+ image = {'url': image}
+ else:
+ module.fail_json(
+ msg="BearyChat doesn't have support for this kind of "
+ "attachment image")
+ target_images.append(image)
+ return attachment
+
+
+def do_notify_bearychat(module, url, payload):
+ response, info = fetch_url(module, url, data=payload)
+ if info['status'] != 200:
+ url_info = urlparse(url)
+ obscured_incoming_webhook = urlunparse(
+ (url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
+ module.fail_json(
+ msg=" failed to send %s to %s: %s" % (
+ payload, obscured_incoming_webhook, info['msg']))
+
+
+def main():
+ module = AnsibleModule(argument_spec={
+ 'url': dict(type='str', required=True, no_log=True),
+ 'text': dict(type='str'),
+ 'markdown': dict(default=True, type='bool'),
+ 'channel': dict(type='str'),
+ 'attachments': dict(type='list', elements='dict'),
+ })
+
+ if not HAS_URLPARSE:
+ module.fail_json(msg='urlparse is not installed')
+
+ url = module.params['url']
+ text = module.params['text']
+ markdown = module.params['markdown']
+ channel = module.params['channel']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_bearychat(
+ module, text, markdown, channel, attachments)
+ do_notify_bearychat(module, url, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bigpanda.py b/ansible_collections/community/general/plugins/modules/bigpanda.py
new file mode 100644
index 000000000..bab200bc4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bigpanda.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bigpanda
+author: "Hagai Kariti (@hkariti)"
+short_description: Notify BigPanda about deployments
+description:
+ - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ component:
+ type: str
+ description:
+ - "The name of the component being deployed. Ex: billing"
+ required: true
+ aliases: ['name']
+ version:
+ type: str
+ description:
+ - The deployment version.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ state:
+ type: str
+ description:
+ - State of the deployment.
+ required: true
+ choices: ['started', 'finished', 'failed']
+ hosts:
+ type: str
+ description:
+ - Name of affected host name. Can be a list.
+ - If not specified, it defaults to the remote system's hostname.
+ required: false
+ aliases: ['host']
+ env:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: false
+ owner:
+ type: str
+ description:
+ - The person responsible for the deployment.
+ required: false
+ description:
+ type: str
+ description:
+ - Free text description of the deployment.
+ required: false
+ url:
+ type: str
+ description:
+ - Base URL of the API server.
+ required: false
+ default: https://api.bigpanda.io
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+ type: bool
+ deployment_message:
+ type: str
+ description:
+ - Message about the deployment.
+ version_added: '0.2.0'
+ source_system:
+ type: str
+ description:
+ - Source system used in the requests to the API
+ default: ansible
+
+# informational: requirements for nodes
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: started
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ state: finished
+
+# If outside servers aren't reachable from your machine, use delegate_to and override hosts:
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: myapp
+ version: '1.3'
+ token: '{{ bigpanda_token }}'
+ hosts: '{{ ansible_hostname }}'
+ state: started
+ delegate_to: localhost
+ register: deployment
+
+- name: Notify BigPanda about a deployment
+ community.general.bigpanda:
+ component: '{{ deployment.component }}'
+ version: '{{ deployment.version }}'
+ token: '{{ deployment.token }}'
+ state: finished
+ delegate_to: localhost
+'''
+
+# ===========================================
+# Module execution.
+#
+import json
+import socket
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ component=dict(required=True, aliases=['name']),
+ version=dict(required=True),
+ token=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['started', 'finished', 'failed']),
+ hosts=dict(required=False, aliases=['host']),
+ env=dict(required=False),
+ owner=dict(required=False),
+ description=dict(required=False),
+ deployment_message=dict(required=False),
+ source_system=dict(required=False, default='ansible'),
+ validate_certs=dict(default=True, type='bool'),
+ url=dict(required=False, default='https://api.bigpanda.io'),
+ ),
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ state = module.params['state']
+ url = module.params['url']
+
+ # Build the common request body
+ body = dict()
+ for k in ('component', 'version', 'hosts'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+ if body.get('hosts') is None:
+ body['hosts'] = [socket.gethostname()]
+
+ if not isinstance(body['hosts'], list):
+ body['hosts'] = [body['hosts']]
+
+ # Insert state-specific attributes to body
+ if state == 'started':
+ for k in ('source_system', 'env', 'owner', 'description'):
+ v = module.params[k]
+ if v is not None:
+ body[k] = v
+
+ request_url = url + '/data/events/deployments/start'
+ else:
+ message = module.params['deployment_message']
+ if message is not None:
+ body['errorMessage'] = message
+
+ if state == 'finished':
+ body['status'] = 'success'
+ else:
+ body['status'] = 'failure'
+
+ request_url = url + '/data/events/deployments/end'
+
+ # Build the deployment object we return
+ deployment = dict(token=token, url=url)
+ deployment.update(body)
+ if 'errorMessage' in deployment:
+ message = deployment.pop('errorMessage')
+ deployment['message'] = message
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True, **deployment)
+
+ # Send the data to bigpanda
+ data = json.dumps(body)
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+ try:
+ response, info = fetch_url(module, request_url, data=data, headers=headers)
+ if info['status'] == 200:
+ module.exit_json(changed=True, **deployment)
+ else:
+ module.fail_json(msg=json.dumps(info))
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
new file mode 100644
index 000000000..5ef199f7a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_access_key.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_access_key
+short_description: Manages Bitbucket repository access keys
+description:
+ - Manages Bitbucket repository access keys (also called deploy keys).
+author:
+ - Evgeniy Krysanov (@catcombo)
+extends_documentation_fragment:
+ - community.general.bitbucket
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ workspace:
+ description:
+ - The repository owner.
+ - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ type: str
+ required: true
+ key:
+ description:
+ - The SSH public key.
+ type: str
+ label:
+ description:
+ - The key label.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates desired state of the access key.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories.
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create access key
+ community.general.bitbucket_access_key:
+ repository: 'bitbucket-repo'
+ workspace: bitbucket_workspace
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ label: 'Bitbucket'
+ state: present
+
+- name: Delete access key
+ community.general.bitbucket_access_key:
+ repository: bitbucket-repo
+ workspace: bitbucket_workspace
+ label: Bitbucket
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_key': '`key` is required when the `state` is `present`',
+ 'required_permission': 'OAuth consumer `client_id` should have permissions to read and administrate the repository',
+ 'invalid_workspace_or_repo': 'Invalid `repository` or `workspace`',
+ 'invalid_key': 'Invalid SSH key or key is already in use',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'deploy-key-list': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'deploy-key-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/deploy-keys/{key_id}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_deploy_key(module, bitbucket):
+ """
+ Search for an existing deploy key on Bitbucket
+ with the label specified in module param `label`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing deploy key or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through the all response pages in search of deploy key we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info))
+
+ res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None)
+
+ if res is not None:
+ return res
+
+ return None
+
+
+def create_deploy_key(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-list'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['key'],
+ 'label': module.params['label'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] == 400:
+ module.fail_json(msg=error_messages['invalid_key'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def delete_deploy_key(module, bitbucket, key_id):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['deploy-key-detail'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ key_id=key_id,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_workspace_or_repo'])
+
+ if info['status'] == 403:
+ module.fail_json(msg=error_messages['required_permission'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete deploy key `{label}`: {info}'.format(
+ label=module.params['label'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ workspace=dict(
+ type='str', required=True,
+ ),
+ key=dict(type='str', no_log=False),
+ label=dict(type='str', required=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=BitbucketHelper.bitbucket_required_one_of(),
+ required_together=BitbucketHelper.bitbucket_required_together(),
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ key = module.params['key']
+ state = module.params['state']
+
+ # Check parameters
+ if (key is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_key'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing deploy key (if any)
+ existing_deploy_key = get_existing_deploy_key(module, bitbucket)
+ changed = False
+
+ # Create new deploy key in case it doesn't exists
+ if not existing_deploy_key and (state == 'present'):
+ if not module.check_mode:
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Update deploy key if the old value does not match the new one
+ elif existing_deploy_key and (state == 'present'):
+ if not key.startswith(existing_deploy_key.get('key')):
+ if not module.check_mode:
+ # Bitbucket doesn't support update key for the same label,
+ # so we need to delete the old one first
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ create_deploy_key(module, bitbucket)
+ changed = True
+
+ # Delete deploy key
+ elif existing_deploy_key and (state == 'absent'):
+ if not module.check_mode:
+ delete_deploy_key(module, bitbucket, existing_deploy_key['id'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
new file mode 100644
index 000000000..d39c054b1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_key_pair.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_key_pair
+short_description: Manages Bitbucket pipeline SSH key pair
+description:
+ - Manages Bitbucket pipeline SSH key pair.
+author:
+ - Evgeniy Krysanov (@catcombo)
+extends_documentation_fragment:
+ - community.general.bitbucket
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ workspace:
+ description:
+ - The repository owner.
+ - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ type: str
+ required: true
+ public_key:
+ description:
+ - The public key.
+ type: str
+ private_key:
+ description:
+ - The private key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the key pair.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create or update SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: 'bitbucket-repo'
+ workspace: bitbucket_workspace
+ public_key: '{{lookup("file", "bitbucket.pub") }}'
+ private_key: '{{lookup("file", "bitbucket") }}'
+ state: present
+
+- name: Remove SSH key pair
+ community.general.bitbucket_pipeline_key_pair:
+ repository: bitbucket-repo
+ workspace: bitbucket_workspace
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account, repository or SSH key pair was not found',
+ 'required_keys': '`public_key` and `private_key` are required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'ssh-key-pair': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/key_pair' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_ssh_key_pair(module, bitbucket):
+ """
+ Retrieves an existing ssh key pair from repository
+ specified in module param `repository`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing key pair or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...2E8HAeT",
+ "type": "pipeline_ssh_key_pair"
+ }
+ """
+ api_url = BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ )
+
+ info, content = bitbucket.request(
+ api_url=api_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ # Account, repository or SSH key pair was not found.
+ return None
+
+ return content
+
+
+def update_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ ),
+ method='PUT',
+ data={
+ 'private_key': module.params['private_key'],
+ 'public_key': module.params['public_key'],
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to create or update pipeline ssh key pair : {0}'.format(info))
+
+
+def delete_ssh_key_pair(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['ssh-key-pair'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline ssh key pair: {0}'.format(info))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ workspace=dict(type='str', required=True),
+ public_key=dict(type='str'),
+ private_key=dict(type='str', no_log=True),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=BitbucketHelper.bitbucket_required_one_of(),
+ required_together=BitbucketHelper.bitbucket_required_together(),
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ state = module.params['state']
+ public_key = module.params['public_key']
+ private_key = module.params['private_key']
+
+ # Check parameters
+ if ((public_key is None) or (private_key is None)) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_keys'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing ssh key
+ key_pair = get_existing_ssh_key_pair(module, bitbucket)
+ changed = False
+
+ # Create or update key pair
+ if (not key_pair or (key_pair.get('public_key') != public_key)) and (state == 'present'):
+ if not module.check_mode:
+ update_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ # Delete key pair
+ elif key_pair and (state == 'absent'):
+ if not module.check_mode:
+ delete_ssh_key_pair(module, bitbucket)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
new file mode 100644
index 000000000..28ff48739
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_known_host.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_known_host
+short_description: Manages Bitbucket pipeline known hosts
+description:
+ - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu.
+ - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually.
+author:
+ - Evgeniy Krysanov (@catcombo)
+extends_documentation_fragment:
+ - community.general.bitbucket
+ - community.general.attributes
+requirements:
+ - paramiko
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ workspace:
+ description:
+ - The repository owner.
+ - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ type: str
+ required: true
+ name:
+ description:
+ - The FQDN of the known host.
+ type: str
+ required: true
+ key:
+ description:
+ - The public key.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the record.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Check mode is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create known hosts from the list
+ community.general.bitbucket_pipeline_known_host:
+ repository: 'bitbucket-repo'
+ workspace: bitbucket_workspace
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - bitbucket.org
+ - example.com
+
+- name: Remove known host
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ workspace: bitbucket_workspace
+ name: bitbucket.org
+ state: absent
+
+- name: Specify public key file
+ community.general.bitbucket_pipeline_known_host:
+ repository: bitbucket-repo
+ workspace: bitbucket_workspace
+ name: bitbucket.org
+ key: '{{lookup("file", "bitbucket.pub") }}'
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import socket
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'invalid_params': 'Account or repository was not found',
+ 'unknown_key_type': 'Public key type is unknown',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'known-host-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'known-host-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/ssh/known_hosts/{known_host_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_known_host(module, bitbucket):
+ """
+ Search for a host in Bitbucket pipelines known hosts
+ with the name specified in module param `name`
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing host or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}'
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ },
+ }
+ """
+ content = {
+ 'next': BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ )
+ }
+
+ # Look through all response pages in search of hostname we need
+ while 'next' in content:
+ info, content = bitbucket.request(
+ api_url=content['next'],
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `workspace`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info))
+
+ host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None)
+
+ if host is not None:
+ return host
+
+ return None
+
+
+def get_host_key(module, hostname):
+ """
+ Fetches public key for specified host
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param hostname: host name
+ :return: key type and key content
+ :rtype: tuple
+
+ Return example::
+
+ (
+ 'ssh-rsa',
+ 'AAAAB3NzaC1yc2EAAAABIwAAA...SBne8+seeFVBoGqzHM9yXw==',
+ )
+ """
+ try:
+ sock = socket.socket()
+ sock.connect((hostname, 22))
+ except socket.error:
+ module.fail_json(msg='Error opening socket to {0}'.format(hostname))
+
+ try:
+ trans = paramiko.transport.Transport(sock)
+ trans.start_client()
+ host_key = trans.get_remote_server_key()
+ except paramiko.SSHException:
+ module.fail_json(msg='SSH error on retrieving {0} server key'.format(hostname))
+
+ trans.close()
+ sock.close()
+
+ key_type = host_key.get_name()
+ key = host_key.get_base64()
+
+ return key_type, key
+
+
+def create_known_host(module, bitbucket):
+ hostname = module.params['name']
+ key_param = module.params['key']
+
+ if key_param is None:
+ key_type, key = get_host_key(module, hostname)
+ elif ' ' in key_param:
+ key_type, key = key_param.split(' ', 1)
+ else:
+ module.fail_json(msg=error_messages['unknown_key_type'])
+
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-list'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'hostname': hostname,
+ 'public_key': {
+ 'key_type': key_type,
+ 'key': key,
+ }
+ },
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create known host `{hostname}`: {info}'.format(
+ hostname=module.params['hostname'],
+ info=info,
+ ))
+
+
+def delete_known_host(module, bitbucket, known_host_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['known-host-detail'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ known_host_uuid=known_host_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg=error_messages['invalid_params'])
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete known host `{hostname}`: {info}'.format(
+ hostname=module.params['name'],
+ info=info,
+ ))
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ workspace=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ key=dict(type='str', no_log=False),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=BitbucketHelper.bitbucket_required_one_of(),
+ required_together=BitbucketHelper.bitbucket_required_together(),
+ )
+
+ if (module.params['key'] is None) and (not HAS_PARAMIKO):
+ module.fail_json(msg='`paramiko` package not found, please install it.')
+
+ bitbucket = BitbucketHelper(module)
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing known host
+ existing_host = get_existing_known_host(module, bitbucket)
+ state = module.params['state']
+ changed = False
+
+ # Create new host in case it doesn't exists
+ if not existing_host and (state == 'present'):
+ if not module.check_mode:
+ create_known_host(module, bitbucket)
+ changed = True
+
+ # Delete host
+ elif existing_host and (state == 'absent'):
+ if not module.check_mode:
+ delete_known_host(module, bitbucket, existing_host['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
new file mode 100644
index 000000000..eac0d18dd
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bitbucket_pipeline_variable.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Evgeniy Krysanov <evgeniy.krysanov@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: bitbucket_pipeline_variable
+short_description: Manages Bitbucket pipeline variables
+description:
+ - Manages Bitbucket pipeline variables.
+author:
+ - Evgeniy Krysanov (@catcombo)
+extends_documentation_fragment:
+ - community.general.bitbucket
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ repository:
+ description:
+ - The repository name.
+ type: str
+ required: true
+ workspace:
+ description:
+ - The repository owner.
+ - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user).
+ type: str
+ required: true
+ name:
+ description:
+ - The pipeline variable name.
+ type: str
+ required: true
+ value:
+ description:
+ - The pipeline variable value.
+ type: str
+ secured:
+ description:
+ - Whether to encrypt the variable value.
+ type: bool
+ default: false
+ state:
+ description:
+ - Indicates desired state of the variable.
+ type: str
+ required: true
+ choices: [ absent, present ]
+notes:
+ - Check mode is supported.
+ - For secured values return parameter C(changed) is always C(True).
+'''
+
+EXAMPLES = r'''
+- name: Create or update pipeline variables from the list
+ community.general.bitbucket_pipeline_variable:
+ repository: 'bitbucket-repo'
+ workspace: bitbucket_workspace
+ name: '{{ item.name }}'
+ value: '{{ item.value }}'
+ secured: '{{ item.secured }}'
+ state: present
+ with_items:
+ - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: false }
+ - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: true }
+
+- name: Remove pipeline variable
+ community.general.bitbucket_pipeline_variable:
+ repository: bitbucket-repo
+ workspace: bitbucket_workspace
+ name: AWS_ACCESS_KEY
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule, _load_params
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+
+error_messages = {
+ 'required_value': '`value` is required when the `state` is `present`',
+}
+
+BITBUCKET_API_ENDPOINTS = {
+ 'pipeline-variable-list': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/' % BitbucketHelper.BITBUCKET_API_URL,
+ 'pipeline-variable-detail': '%s/2.0/repositories/{workspace}/{repo_slug}/pipelines_config/variables/{variable_uuid}' % BitbucketHelper.BITBUCKET_API_URL,
+}
+
+
+def get_existing_pipeline_variable(module, bitbucket):
+ """
+ Search for a pipeline variable
+
+ :param module: instance of the :class:`AnsibleModule`
+ :param bitbucket: instance of the :class:`BitbucketHelper`
+ :return: existing variable or None if not found
+ :rtype: dict or None
+
+ Return example::
+
+ {
+ 'name': 'AWS_ACCESS_OBKEY_ID',
+ 'value': 'x7HU80-a2',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a-99f3-5464f15128127}'
+ }
+
+ The `value` key in dict is absent in case of secured variable.
+ """
+ variables_base_url = BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ )
+ # Look through the all response pages in search of variable we need
+ page = 1
+ while True:
+ next_url = "%s?page=%s" % (variables_base_url, page)
+ info, content = bitbucket.request(
+ api_url=next_url,
+ method='GET',
+ )
+
+ if info['status'] == 404:
+ module.fail_json(msg='Invalid `repository` or `workspace`.')
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to retrieve the list of pipeline variables: {0}'.format(info))
+
+ # We are at the end of list
+ if 'pagelen' in content and content['pagelen'] == 0:
+ return None
+
+ page += 1
+ var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None)
+
+ if var is not None:
+ var['name'] = var.pop('key')
+ return var
+
+
+def create_pipeline_variable(module, bitbucket):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-list'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ ),
+ method='POST',
+ data={
+ 'key': module.params['name'],
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 201:
+ module.fail_json(msg='Failed to create pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def update_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='PUT',
+ data={
+ 'value': module.params['value'],
+ 'secured': module.params['secured'],
+ },
+ )
+
+ if info['status'] != 200:
+ module.fail_json(msg='Failed to update pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+def delete_pipeline_variable(module, bitbucket, variable_uuid):
+ info, content = bitbucket.request(
+ api_url=BITBUCKET_API_ENDPOINTS['pipeline-variable-detail'].format(
+ workspace=module.params['workspace'],
+ repo_slug=module.params['repository'],
+ variable_uuid=variable_uuid,
+ ),
+ method='DELETE',
+ )
+
+ if info['status'] != 204:
+ module.fail_json(msg='Failed to delete pipeline variable `{name}`: {info}'.format(
+ name=module.params['name'],
+ info=info,
+ ))
+
+
+class BitBucketPipelineVariable(AnsibleModule):
+ def __init__(self, *args, **kwargs):
+ params = _load_params() or {}
+ if params.get('secured'):
+ kwargs['argument_spec']['value'].update({'no_log': True})
+ super(BitBucketPipelineVariable, self).__init__(*args, **kwargs)
+
+
+def main():
+ argument_spec = BitbucketHelper.bitbucket_argument_spec()
+ argument_spec.update(
+ repository=dict(type='str', required=True),
+ workspace=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ secured=dict(type='bool', default=False),
+ state=dict(type='str', choices=['present', 'absent'], required=True),
+ )
+ module = BitBucketPipelineVariable(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=BitbucketHelper.bitbucket_required_one_of(),
+ required_together=BitbucketHelper.bitbucket_required_together(),
+ )
+
+ bitbucket = BitbucketHelper(module)
+
+ value = module.params['value']
+ state = module.params['state']
+ secured = module.params['secured']
+
+ # Check parameters
+ if (value is None) and (state == 'present'):
+ module.fail_json(msg=error_messages['required_value'])
+
+ # Retrieve access token for authorized API requests
+ bitbucket.fetch_access_token()
+
+ # Retrieve existing pipeline variable (if any)
+ existing_variable = get_existing_pipeline_variable(module, bitbucket)
+ changed = False
+
+ # Create new variable in case it doesn't exists
+ if not existing_variable and (state == 'present'):
+ if not module.check_mode:
+ create_pipeline_variable(module, bitbucket)
+ changed = True
+
+ # Update variable if it is secured or the old value does not match the new one
+ elif existing_variable and (state == 'present'):
+ if (existing_variable['secured'] != secured) or (existing_variable.get('value') != value):
+ if not module.check_mode:
+ update_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ # Delete variable
+ elif existing_variable and (state == 'absent'):
+ if not module.check_mode:
+ delete_pipeline_variable(module, bitbucket, existing_variable['uuid'])
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bower.py b/ansible_collections/community/general/plugins/modules/bower.py
new file mode 100644
index 000000000..1824e68bb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bower.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bower
+short_description: Manage bower packages with bower
+description:
+ - Manage bower packages with bower
+author: "Michael Warkentin (@mwarkentin)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The name of a bower package to install
+ offline:
+ description:
+ - Install packages from local cache, if the packages were installed before
+ type: bool
+ default: false
+ production:
+ description:
+ - Install with --production flag
+ type: bool
+ default: false
+ path:
+ type: path
+ description:
+ - The base path where to install the bower packages
+ required: true
+ relative_execpath:
+ type: path
+ description:
+ - Relative path to bower executable from install path
+ state:
+ type: str
+ description:
+ - The state of the bower package
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ version:
+ type: str
+ description:
+ - The version to be installed
+'''
+
+EXAMPLES = '''
+- name: Install "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+
+- name: Install "bootstrap" bower package on version 3.1.1.
+ community.general.bower:
+ name: bootstrap
+ version: '3.1.1'
+
+- name: Remove the "bootstrap" bower package.
+ community.general.bower:
+ name: bootstrap
+ state: absent
+
+- name: Install packages based on bower.json.
+ community.general.bower:
+ path: /app/location
+
+- name: Update packages based on bower.json to their latest version.
+ community.general.bower:
+ path: /app/location
+ state: latest
+
+# install bower locally and run from there
+- npm:
+ path: /app/location
+ name: bower
+ global: false
+- community.general.bower:
+ path: /app/location
+ relative_execpath: node_modules/.bin
+'''
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bower(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs['name']
+ self.offline = kwargs['offline']
+ self.production = kwargs['production']
+ self.path = kwargs['path']
+ self.relative_execpath = kwargs['relative_execpath']
+ self.version = kwargs['version']
+
+ if kwargs['version']:
+ self.name_version = self.name + '#' + self.version
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = []
+
+ if self.relative_execpath:
+ cmd.append(os.path.join(self.path, self.relative_execpath, "bower"))
+ if not os.path.isfile(cmd[-1]):
+ self.module.fail_json(msg="bower not found at relative path %s" % self.relative_execpath)
+ else:
+ cmd.append("bower")
+
+ cmd.extend(args)
+ cmd.extend(['--config.interactive=false', '--allow-root'])
+
+ if self.name:
+ cmd.append(self.name_version)
+
+ if self.offline:
+ cmd.append('--offline')
+
+ if self.production:
+ cmd.append('--production')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json']
+
+ installed = list()
+ missing = list()
+ outdated = list()
+ data = json.loads(self._exec(cmd, True, False))
+ if 'dependencies' in data:
+ for dep in data['dependencies']:
+ dep_data = data['dependencies'][dep]
+ if dep_data.get('missing', False):
+ missing.append(dep)
+ elif ('version' in dep_data['pkgMeta'] and
+ 'update' in dep_data and
+ dep_data['pkgMeta']['version'] != dep_data['update']['latest']):
+ outdated.append(dep)
+ elif dep_data.get('incompatible', False):
+ outdated.append(dep)
+ else:
+ installed.append(dep)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing, outdated
+
+ def install(self):
+ return self._exec(['install'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ offline=dict(default=False, type='bool'),
+ production=dict(default=False, type='bool'),
+ path=dict(required=True, type='path'),
+ relative_execpath=dict(default=None, required=False, type='path'),
+ state=dict(default='present', choices=['present', 'absent', 'latest', ]),
+ version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=arg_spec
+ )
+
+ name = module.params['name']
+ offline = module.params['offline']
+ production = module.params['production']
+ path = module.params['path']
+ relative_execpath = module.params['relative_execpath']
+ state = module.params['state']
+ version = module.params['version']
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ bower = Bower(module, name=name, offline=offline, production=production, path=path, relative_execpath=relative_execpath, version=version)
+
+ changed = False
+ if state == 'present':
+ installed, missing, outdated = bower.list()
+ if missing:
+ changed = True
+ bower.install()
+ elif state == 'latest':
+ installed, missing, outdated = bower.list()
+ if missing or outdated:
+ changed = True
+ bower.update()
+ else: # Absent
+ installed, missing, outdated = bower.list()
+ if name in installed:
+ changed = True
+ bower.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/btrfs_info.py b/ansible_collections/community/general/plugins/modules/btrfs_info.py
new file mode 100644
index 000000000..c367b9ed1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/btrfs_info.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: btrfs_info
+short_description: Query btrfs filesystem info
+version_added: "6.6.0"
+description: Query status of available btrfs filesystems, including uuid, label, subvolumes and mountpoints.
+
+author:
+ - Gregory Furlong (@gnfzdz)
+
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = r'''
+
+- name: Query information about mounted btrfs filesystems
+ community.general.btrfs_info:
+ register: my_btrfs_info
+
+'''
+
+RETURN = r'''
+
+filesystems:
+ description: Summaries of the current state for all btrfs filesystems found on the target host.
+ type: list
+ elements: dict
+ returned: success
+ contains:
+ uuid:
+ description: A unique identifier assigned to the filesystem.
+ type: str
+ sample: 96c9c605-1454-49b8-a63a-15e2584c208e
+ label:
+ description: An optional label assigned to the filesystem.
+ type: str
+ sample: Tank
+ devices:
+ description: A list of devices assigned to the filesystem.
+ type: list
+ sample:
+ - /dev/sda1
+ - /dev/sdb1
+ default_subvolume:
+ description: The id of the filesystem's default subvolume.
+ type: int
+ sample: 5
+ subvolumes:
+ description: A list of dicts containing metadata for all of the filesystem's subvolumes.
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: An identifier assigned to the subvolume, unique within the containing filesystem.
+ type: int
+ sample: 256
+ mountpoints:
+ description: Paths where the subvolume is mounted on the targeted host.
+ type: list
+ sample: ['/home']
+ parent:
+ description: The identifier of this subvolume's parent.
+ type: int
+ sample: 5
+ path:
+ description: The full path of the subvolume relative to the btrfs fileystem's root.
+ type: str
+ sample: /@home
+
+'''
+
+
+from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_module():
+ module_args = dict()
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ provider = BtrfsFilesystemsProvider(module)
+ filesystems = [x.get_summary() for x in provider.get_filesystems()]
+ result = {
+ "filesystems": filesystems,
+ }
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
new file mode 100644
index 000000000..cd2ac6f97
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/btrfs_subvolume.py
@@ -0,0 +1,682 @@
+#!/usr/bin/python
+
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: btrfs_subvolume
+short_description: Manage btrfs subvolumes
+version_added: "6.6.0"
+
+description: Creates, updates and deletes btrfs subvolumes and snapshots.
+
+options:
+ automount:
+ description:
+ - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make any required changes.
+ type: bool
+ default: false
+ default:
+ description:
+ - Make the subvolume specified by I(name) the filesystem's default subvolume.
+ type: bool
+ default: false
+ filesystem_device:
+ description:
+ - A block device contained within the btrfs filesystem to be targeted.
+ - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
+ type: path
+ filesystem_label:
+ description:
+ - A descriptive label assigned to the btrfs filesystem to be targeted.
+ - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
+ type: str
+ filesystem_uuid:
+ description:
+ - A unique identifier assigned to the btrfs filesystem to be targeted.
+ - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted.
+ type: str
+ name:
+ description:
+ - Name of the subvolume/snapshot to be targeted.
+ required: true
+ type: str
+ recursive:
+ description:
+ - When true, indicates that parent/child subvolumes should be created/removedas necessary
+ to complete the operation (for I(state=present) and I(state=absent) respectively).
+ type: bool
+ default: false
+ snapshot_source:
+ description:
+ - Identifies the source subvolume for the created snapshot.
+ - Infers that the created subvolume is a snapshot.
+ type: str
+ snapshot_conflict:
+ description:
+ - Policy defining behavior when a subvolume already exists at the path of the requested snapshot.
+ - C(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that no change is required.
+ Warning, this option does not yet verify that the target subvolume was generated from a snapshot of the requested source.
+ - C(clobber) - If a subvolume already exists at the requested location, delete it first.
+ This option is not idempotent and will result in a new snapshot being generated on every execution.
+ - C(error) - If a subvolume already exists at the requested location, return an error.
+ This option is not idempotent and will result in an error on replay of the module.
+ type: str
+ choices: [ skip, clobber, error ]
+ default: skip
+ state:
+ description:
+ - Indicates the current state of the targeted subvolume.
+ type: str
+ choices: [ absent, present ]
+ default: present
+
+notes:
+ - If any or all of the options I(filesystem_device), I(filesystem_label) or I(filesystem_uuid) parameters are provided, there is expected
+ to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or only a single
+ btrfs filesystem is mounted, that filesystem will be used; otherwise, the module will take no action and return an error.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - In some scenarios it may erroneously report intermediate subvolumes being created.
+ After mounting, if a directory like file is found where the subvolume would have been created, the operation is skipped.
+ diff_mode:
+ support: none
+
+author:
+ - Gregory Furlong (@gnfzdz)
+'''
+
+EXAMPLES = r'''
+
+- name: Create a @home subvolume under the root subvolume
+ community.general.btrfs_subvolume:
+ name: /@home
+ device: /dev/vda2
+
+- name: Remove the @home subvolume if it exists
+ community.general.btrfs_subvolume:
+ name: /@home
+ state: absent
+ device: /dev/vda2
+
+- name: Create a snapshot of the root subvolume named @
+ community.general.btrfs_subvolume:
+ name: /@
+ snapshot_source: /
+ device: /dev/vda2
+
+- name: Create a snapshot of the root subvolume and make it the new default subvolume
+ community.general.btrfs_subvolume:
+ name: /@
+ snapshot_source: /
+ default: Yes
+ device: /dev/vda2
+
+- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required
+ community.general.btrfs_subvolume:
+ name: /@snapshots/@2022_06_09
+ snapshot_source: /@
+ recursive: True
+ device: /dev/vda2
+
+- name: Remove the /@ subvolume and recursively delete child subvolumes as required
+ community.general.btrfs_subvolume:
+ name: /@snapshots/@2022_06_09
+ snapshot_source: /@
+ recursive: True
+ device: /dev/vda2
+
+'''
+
+RETURN = r'''
+
+filesystem:
+ description:
+ - A summary of the final state of the targeted btrfs filesystem.
+ type: dict
+ returned: success
+ contains:
+ uuid:
+ description: A unique identifier assigned to the filesystem.
+ returned: success
+ type: str
+ sample: 96c9c605-1454-49b8-a63a-15e2584c208e
+ label:
+ description: An optional label assigned to the filesystem.
+ returned: success
+ type: str
+ sample: Tank
+ devices:
+ description: A list of devices assigned to the filesystem.
+ returned: success
+ type: list
+ sample:
+ - /dev/sda1
+ - /dev/sdb1
+ default_subvolume:
+ description: The ID of the filesystem's default subvolume.
+ returned: success and if filesystem is mounted
+ type: int
+ sample: 5
+ subvolumes:
+ description: A list of dicts containing metadata for all of the filesystem's subvolumes.
+ returned: success and if filesystem is mounted
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: An identifier assigned to the subvolume, unique within the containing filesystem.
+ type: int
+ sample: 256
+ mountpoints:
+ description: Paths where the subvolume is mounted on the targeted host.
+ type: list
+ sample: ['/home']
+ parent:
+ description: The identifier of this subvolume's parent.
+ type: int
+ sample: 5
+ path:
+ description: The full path of the subvolume relative to the btrfs fileystem's root.
+ type: str
+ sample: /@home
+
+modifications:
+ description:
+ - A list where each element describes a change made to the target btrfs filesystem.
+ type: list
+ returned: Success
+ elements: str
+
+target_subvolume_id:
+ description:
+ - The ID of the subvolume specified with the I(name) parameter, either pre-existing or created as part of module execution.
+ type: int
+ sample: 257
+ returned: Success and subvolume exists after module execution
+'''
+
+from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException
+from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path
+from ansible.module_utils.basic import AnsibleModule
+import os
+import tempfile
+
+
+class BtrfsSubvolumeModule(object):
+
+ __BTRFS_ROOT_SUBVOLUME = '/'
+ __BTRFS_ROOT_SUBVOLUME_ID = 5
+ __BTRFS_SUBVOLUME_INODE_NUMBER = 256
+
+ __CREATE_SUBVOLUME_OPERATION = 'create'
+ __CREATE_SNAPSHOT_OPERATION = 'snapshot'
+ __DELETE_SUBVOLUME_OPERATION = 'delete'
+ __SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default'
+
+ __UNKNOWN_SUBVOLUME_ID = '?'
+
+ def __init__(self, module):
+ self.module = module
+ self.__btrfs_api = BtrfsCommands(module)
+ self.__provider = BtrfsFilesystemsProvider(module)
+
+ # module parameters
+ name = self.module.params['name']
+ self.__name = normalize_subvolume_path(name) if name is not None else None
+ self.__state = self.module.params['state']
+
+ self.__automount = self.module.params['automount']
+ self.__default = self.module.params['default']
+ self.__filesystem_device = self.module.params['filesystem_device']
+ self.__filesystem_label = self.module.params['filesystem_label']
+ self.__filesystem_uuid = self.module.params['filesystem_uuid']
+ self.__recursive = self.module.params['recursive']
+ self.__snapshot_conflict = self.module.params['snapshot_conflict']
+ snapshot_source = self.module.params['snapshot_source']
+ self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None
+
+ # execution state
+ self.__filesystem = None
+ self.__required_mounts = []
+ self.__unit_of_work = []
+ self.__completed_work = []
+ self.__temporary_mounts = dict()
+
+ def run(self):
+ error = None
+ try:
+ self.__load_filesystem()
+ self.__prepare_unit_of_work()
+
+ if not self.module.check_mode:
+ # check required mounts & mount
+ if len(self.__unit_of_work) > 0:
+ self.__execute_unit_of_work()
+ self.__filesystem.refresh()
+ else:
+ # check required mounts
+ self.__completed_work.extend(self.__unit_of_work)
+ except Exception as e:
+ error = e
+ finally:
+ self.__cleanup_mounts()
+ if self.__filesystem is not None:
+ self.__filesystem.refresh_mountpoints()
+
+ return (error, self.get_results())
+
+ # Identify the targeted filesystem and obtain the current state
+ def __load_filesystem(self):
+ if self.__has_filesystem_criteria():
+ filesystem = self.__find_matching_filesytem()
+ else:
+ filesystem = self.__find_default_filesystem()
+
+ # The filesystem must be mounted to obtain the current state (subvolumes, default, etc)
+ if not filesystem.is_mounted():
+ if not self.__automount:
+ raise BtrfsModuleException(
+ "Target filesystem uuid=%s is not currently mounted and automount=False."
+ "Mount explicitly before module execution or pass automount=True" % filesystem.uuid)
+ elif self.module.check_mode:
+ # TODO is failing the module an appropriate outcome in this scenario?
+ raise BtrfsModuleException(
+ "Target filesystem uuid=%s is not currently mounted. Unable to validate the current"
+ "state while running with check_mode=True" % filesystem.uuid)
+ else:
+ self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID)
+ filesystem.refresh()
+ self.__filesystem = filesystem
+
+ def __has_filesystem_criteria(self):
+ return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None
+
+ def __find_matching_filesytem(self):
+ criteria = {
+ 'uuid': self.__filesystem_uuid,
+ 'label': self.__filesystem_label,
+ 'device': self.__filesystem_device,
+ }
+ return self.__provider.get_matching_filesystem(criteria)
+
+ def __find_default_filesystem(self):
+ filesystems = self.__provider.get_filesystems()
+ filesystem = None
+
+ if len(filesystems) == 1:
+ filesystem = filesystems[0]
+ else:
+ mounted_filesystems = [x for x in filesystems if x.is_mounted()]
+ if len(mounted_filesystems) == 1:
+ filesystem = mounted_filesystems[0]
+
+ if filesystem is not None:
+ return filesystem
+ else:
+ raise BtrfsModuleException(
+ "Failed to automatically identify targeted filesystem. "
+ "No explicit device indicated and found %d available filesystems." % len(filesystems)
+ )
+
+ # Prepare unit of work
+ def __prepare_unit_of_work(self):
+ if self.__state == "present":
+ if self.__snapshot_source is None:
+ self.__prepare_subvolume_present()
+ else:
+ self.__prepare_snapshot_present()
+
+ if self.__default:
+ self.__prepare_set_default()
+ elif self.__state == "absent":
+ self.__prepare_subvolume_absent()
+
+ def __prepare_subvolume_present(self):
+ subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
+ if subvolume is None:
+ self.__prepare_before_create_subvolume(self.__name)
+ self.__stage_create_subvolume(self.__name)
+
+ def __prepare_before_create_subvolume(self, subvolume_name):
+ closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name)
+ self.__stage_required_mount(closest_parent)
+ if self.__recursive:
+ self.__prepare_create_intermediates(closest_parent, subvolume_name)
+
+ def __prepare_create_intermediates(self, closest_subvolume, subvolume_name):
+ relative_path = closest_subvolume.get_child_relative_path(self.__name)
+ missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0]
+ if len(missing_subvolumes) > 1:
+ current = closest_subvolume.path
+ for s in missing_subvolumes[:-1]:
+ separator = os.path.sep if current[-1] != os.path.sep else ""
+ current = current + separator + s
+ self.__stage_create_subvolume(current, True)
+
+ def __prepare_snapshot_present(self):
+ source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source)
+ subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
+ subvolume_exists = subvolume is not None
+
+ if subvolume_exists:
+ if self.__snapshot_conflict == "skip":
+ # No change required
+ return
+ elif self.__snapshot_conflict == "error":
+ raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name)
+
+ if source_subvolume is None:
+ raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source)
+ elif subvolume is not None and source_subvolume.id == subvolume.id:
+ raise BtrfsModuleException("Snapshot source and target are the same.")
+ else:
+ self.__stage_required_mount(source_subvolume)
+
+ if subvolume_exists and self.__snapshot_conflict == "clobber":
+ self.__prepare_delete_subvolume_tree(subvolume)
+ elif not subvolume_exists:
+ self.__prepare_before_create_subvolume(self.__name)
+
+ self.__stage_create_snapshot(source_subvolume, self.__name)
+
+ def __prepare_subvolume_absent(self):
+ subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
+ if subvolume is not None:
+ self.__prepare_delete_subvolume_tree(subvolume)
+
+ def __prepare_delete_subvolume_tree(self, subvolume):
+ if subvolume.is_filesystem_root():
+ raise BtrfsModuleException("Can not delete the filesystem's root subvolume")
+ if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0:
+ raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False."
+ "Either explicitly delete the child subvolumes first or pass "
+ "parameter recursive=True." % subvolume.path)
+
+ self.__stage_required_mount(subvolume.get_parent_subvolume())
+ queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume]
+ # prepare unit of work
+ for s in queue:
+ if s.is_mounted():
+ # TODO potentially unmount the subvolume if automount=True ?
+ raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path)
+ if s.is_filesystem_default():
+ self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID)
+ self.__stage_delete_subvolume(s)
+
+ def __prepare_recursive_delete_order(self, subvolume):
+ """Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors"""
+ pending = [subvolume]
+ ordered = []
+ while len(pending) > 0:
+ next = pending.pop()
+ ordered.append(next)
+ pending.extend(next.get_child_subvolumes())
+ ordered.reverse() # reverse to ensure children are deleted before their parent
+ return ordered
+
+ def __prepare_set_default(self):
+ subvolume = self.__filesystem.get_subvolume_by_name(self.__name)
+ subvolume_id = subvolume.id if subvolume is not None else None
+
+ if self.__filesystem.default_subvolid != subvolume_id:
+ self.__stage_set_default_subvolume(self.__name, subvolume_id)
+
+ # Stage operations to the unit of work
+ def __stage_required_mount(self, subvolume):
+ if subvolume.get_mounted_path() is None:
+ if self.__automount:
+ self.__required_mounts.append(subvolume)
+ else:
+ raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path)
+
+ def __stage_create_subvolume(self, subvolume_path, intermediate=False):
+ """
+ Add required creation of an intermediate subvolume to the unit of work
+ If intermediate is true, the action will be skipped if a directory like file is found at target
+ after mounting a parent subvolume
+ """
+ self.__unit_of_work.append({
+ 'action': self.__CREATE_SUBVOLUME_OPERATION,
+ 'target': subvolume_path,
+ 'intermediate': intermediate,
+ })
+
+ def __stage_create_snapshot(self, source_subvolume, target_subvolume_path):
+ """Add creation of a snapshot from source to target to the unit of work"""
+ self.__unit_of_work.append({
+ 'action': self.__CREATE_SNAPSHOT_OPERATION,
+ 'source': source_subvolume.path,
+ 'source_id': source_subvolume.id,
+ 'target': target_subvolume_path,
+ })
+
+ def __stage_delete_subvolume(self, subvolume):
+ """Add deletion of the target subvolume to the unit of work"""
+ self.__unit_of_work.append({
+ 'action': self.__DELETE_SUBVOLUME_OPERATION,
+ 'target': subvolume.path,
+ 'target_id': subvolume.id,
+ })
+
+ def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None):
+ """Add update of the filesystem's default subvolume to the unit of work"""
+ self.__unit_of_work.append({
+ 'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION,
+ 'target': subvolume_path,
+ 'target_id': subvolume_id,
+ })
+
+ # Execute the unit of work
+ def __execute_unit_of_work(self):
+ self.__check_required_mounts()
+ for op in self.__unit_of_work:
+ if op['action'] == self.__CREATE_SUBVOLUME_OPERATION:
+ self.__execute_create_subvolume(op)
+ elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION:
+ self.__execute_create_snapshot(op)
+ elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION:
+ self.__execute_delete_subvolume(op)
+ elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
+ self.__execute_set_default_subvolume(op)
+ else:
+ raise ValueError("Unknown operation type '%s'" % op['action'])
+
+ def __execute_create_subvolume(self, operation):
+ target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
+ if not self.__is_existing_directory_like(target_mounted_path):
+ self.__btrfs_api.subvolume_create(target_mounted_path)
+ self.__completed_work.append(operation)
+
+ def __execute_create_snapshot(self, operation):
+ source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source'])
+ source_mounted_path = source_subvolume.get_mounted_path()
+ target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
+
+ self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path)
+ self.__completed_work.append(operation)
+
+ def __execute_delete_subvolume(self, operation):
+ target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target'])
+ self.__btrfs_api.subvolume_delete(target_mounted_path)
+ self.__completed_work.append(operation)
+
+ def __execute_set_default_subvolume(self, operation):
+ target = operation['target']
+ target_id = operation['target_id']
+
+ if target_id is None:
+ target_subvolume = self.__filesystem.get_subvolume_by_name(target)
+
+ if target_subvolume is None:
+ self.__filesystem.refresh() # the target may have been created earlier in module execution
+ target_subvolume = self.__filesystem.get_subvolume_by_name(target)
+
+ if target_subvolume is None:
+ raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target)
+ else:
+ target_id = target_subvolume.id
+
+ self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id)
+ self.__completed_work.append(operation)
+
+ def __is_existing_directory_like(self, path):
+ return os.path.exists(path) and (
+ os.path.isdir(path) or
+ os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER
+ )
+
+ def __check_required_mounts(self):
+ filtered = self.__filter_child_subvolumes(self.__required_mounts)
+ if len(filtered) > 0:
+ for subvolume in filtered:
+ self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id)
+ self.__filesystem.refresh_mountpoints()
+
+ def __filter_child_subvolumes(self, subvolumes):
+ """Filter the provided list of subvolumes to remove any that are a child of another item in the list"""
+ filtered = []
+ last = None
+ ordered = sorted(subvolumes, key=lambda x: x.path)
+ for next in ordered:
+ if last is None or not next.path[0:len(last)] == last:
+ filtered.append(next)
+ last = next.path
+ return filtered
+
+ # Create/cleanup temporary mountpoints
+ def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid):
+ # this check should be redundant
+ if self.module.check_mode or not self.__automount:
+ raise BtrfsModuleException("Unable to temporarily mount required subvolumes"
+ "with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode))
+
+ cache_key = "%s:%d" % (filesystem.uuid, subvolid)
+ # The subvolume was already mounted, so return the current path
+ if cache_key in self.__temporary_mounts:
+ return self.__temporary_mounts[cache_key]
+
+ device = filesystem.devices[0]
+ mountpoint = tempfile.mkdtemp(dir="/tmp")
+ self.__temporary_mounts[cache_key] = mountpoint
+
+ mount = self.module.get_bin_path("mount", required=True)
+ command = "%s -o noatime,subvolid=%d %s %s " % (mount,
+ subvolid,
+ device,
+ mountpoint)
+ result = self.module.run_command(command, check_rc=True)
+
+ return mountpoint
+
+ def __cleanup_mounts(self):
+ for key in self.__temporary_mounts.keys():
+ self.__cleanup_mount(self.__temporary_mounts[key])
+
+ def __cleanup_mount(self, mountpoint):
+ umount = self.module.get_bin_path("umount", required=True)
+ result = self.module.run_command("%s %s" % (umount, mountpoint))
+ if result[0] == 0:
+ rmdir = self.module.get_bin_path("rmdir", required=True)
+ self.module.run_command("%s %s" % (rmdir, mountpoint))
+
+ # Format and return results
+ def get_results(self):
+ target = self.__filesystem.get_subvolume_by_name(self.__name)
+ return dict(
+ changed=len(self.__completed_work) > 0,
+ filesystem=self.__filesystem.get_summary(),
+ modifications=self.__get_formatted_modifications(),
+ target_subvolume_id=(target.id if target is not None else None)
+ )
+
+ def __get_formatted_modifications(self):
+ return [self.__format_operation_result(op) for op in self.__completed_work]
+
+ def __format_operation_result(self, operation):
+ action_type = operation['action']
+ if action_type == self.__CREATE_SUBVOLUME_OPERATION:
+ return self.__format_create_subvolume_result(operation)
+ elif action_type == self.__CREATE_SNAPSHOT_OPERATION:
+ return self.__format_create_snapshot_result(operation)
+ elif action_type == self.__DELETE_SUBVOLUME_OPERATION:
+ return self.__format_delete_subvolume_result(operation)
+ elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION:
+ return self.__format_set_default_subvolume_result(operation)
+ else:
+ raise ValueError("Unknown operation type '%s'" % operation['action'])
+
+ def __format_create_subvolume_result(self, operation):
+ target = operation['target']
+ target_subvolume = self.__filesystem.get_subvolume_by_name(target)
+ target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
+ return "Created subvolume '%s' (%s)" % (target, target_id)
+
+ def __format_create_snapshot_result(self, operation):
+ source = operation['source']
+ source_id = operation['source_id']
+
+ target = operation['target']
+ target_subvolume = self.__filesystem.get_subvolume_by_name(target)
+ target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
+ return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id)
+
+ def __format_delete_subvolume_result(self, operation):
+ target = operation['target']
+ target_id = operation['target_id']
+ return "Deleted subvolume '%s' (%s)" % (target, target_id)
+
+ def __format_set_default_subvolume_result(self, operation):
+ target = operation['target']
+ if 'target_id' in operation:
+ target_id = operation['target_id']
+ else:
+ target_subvolume = self.__filesystem.get_subvolume_by_name(target)
+ target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID
+ return "Updated default subvolume to '%s' (%s)" % (target, target_id)
+
+
+def run_module():
+ module_args = dict(
+ automount=dict(type='bool', required=False, default=False),
+ default=dict(type='bool', required=False, default=False),
+ filesystem_device=dict(type='path', required=False),
+ filesystem_label=dict(type='str', required=False),
+ filesystem_uuid=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ recursive=dict(type='bool', default=False),
+ state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
+ snapshot_source=dict(type='str', required=False),
+ snapshot_conflict=dict(type='str', required=False, default='skip', choices=['skip', 'clobber', 'error'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ subvolume = BtrfsSubvolumeModule(module)
+ error, result = subvolume.run()
+ if error is not None:
+ module.fail_json(str(error), **result)
+ else:
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bundler.py b/ansible_collections/community/general/plugins/modules/bundler.py
new file mode 100644
index 000000000..682dd334a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bundler.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Tim Hoiberg <tim.hoiberg@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: bundler
+short_description: Manage Ruby Gem dependencies with Bundler
+description:
+ - Manage installation and Gem version dependencies for Ruby using the Bundler gem
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ executable:
+ type: str
+ description:
+ - The path to the bundler executable
+ state:
+ type: str
+ description:
+ - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version
+ choices: [present, latest]
+ default: present
+ chdir:
+ type: path
+ description:
+ - The directory to execute the bundler commands from. This directory
+ needs to contain a valid Gemfile or .bundle/ directory
+ - If not specified, it will default to the temporary working directory
+ exclude_groups:
+ type: list
+ elements: str
+ description:
+ - A list of Gemfile groups to exclude during operations. This only
+ applies when state is C(present). Bundler considers this
+ a 'remembered' property for the Gemfile and will automatically exclude
+ groups in future operations even if C(exclude_groups) is not set
+ clean:
+ description:
+ - Only applies if state is C(present). If set removes any gems on the
+ target host that are not in the gemfile
+ type: bool
+ default: false
+ gemfile:
+ type: path
+ description:
+ - Only applies if state is C(present). The path to the gemfile to use to install gems.
+ - If not specified it will default to the Gemfile in current directory
+ local:
+ description:
+ - If set only installs gems from the cache on the target host
+ type: bool
+ default: false
+ deployment_mode:
+ description:
+ - Only applies if state is C(present). If set it will install gems in
+ ./vendor/bundle instead of the default location. Requires a Gemfile.lock
+ file to have been created prior
+ type: bool
+ default: false
+ user_install:
+ description:
+ - Only applies if state is C(present). Installs gems in the local user's cache or for all users
+ type: bool
+ default: true
+ gem_path:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install the gems into. If C(chdir) is set then this path is relative to
+ C(chdir)
+ - If not specified the default RubyGems gem paths will be used.
+ binstub_directory:
+ type: path
+ description:
+ - Only applies if state is C(present). Specifies the directory to
+ install any gem bins files to. When executed the bin files will run
+ within the context of the Gemfile and fail if any required gem
+ dependencies are not installed. If C(chdir) is set then this path is
+ relative to C(chdir)
+ extra_args:
+ type: str
+ description:
+ - A space separated string of additional commands that can be applied to
+ the Bundler command. Refer to the Bundler documentation for more
+ information
+author: "Tim Hoiberg (@thoiberg)"
+'''
+
+EXAMPLES = '''
+- name: Install gems from a Gemfile in the current directory
+ community.general.bundler:
+ state: present
+ executable: ~/.rvm/gems/2.1.5/bin/bundle
+
+- name: Exclude the production group from installing
+ community.general.bundler:
+ state: present
+ exclude_groups: production
+
+- name: Install gems into ./vendor/bundle
+ community.general.bundler:
+ state: present
+ deployment_mode: true
+
+- name: Install gems using a Gemfile in another directory
+ community.general.bundler:
+ state: present
+ gemfile: ../rails_project/Gemfile
+
+- name: Update Gemfile in another directory
+ community.general.bundler:
+ state: latest
+ chdir: ~/rails_project
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_bundler_executable(module):
+ if module.params.get('executable'):
+ result = module.params.get('executable').split(' ')
+ else:
+ result = [module.get_bin_path('bundle', True)]
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(default=None, required=False),
+ state=dict(default='present', required=False, choices=['present', 'latest']),
+ chdir=dict(default=None, required=False, type='path'),
+ exclude_groups=dict(default=None, required=False, type='list', elements='str'),
+ clean=dict(default=False, required=False, type='bool'),
+ gemfile=dict(default=None, required=False, type='path'),
+ local=dict(default=False, required=False, type='bool'),
+ deployment_mode=dict(default=False, required=False, type='bool'),
+ user_install=dict(default=True, required=False, type='bool'),
+ gem_path=dict(default=None, required=False, type='path'),
+ binstub_directory=dict(default=None, required=False, type='path'),
+ extra_args=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ state = module.params.get('state')
+ chdir = module.params.get('chdir')
+ exclude_groups = module.params.get('exclude_groups')
+ clean = module.params.get('clean')
+ gemfile = module.params.get('gemfile')
+ local = module.params.get('local')
+ deployment_mode = module.params.get('deployment_mode')
+ user_install = module.params.get('user_install')
+ gem_path = module.params.get('gem_path')
+ binstub_directory = module.params.get('binstub_directory')
+ extra_args = module.params.get('extra_args')
+
+ cmd = get_bundler_executable(module)
+
+ if module.check_mode:
+ cmd.append('check')
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=False)
+
+ module.exit_json(changed=rc != 0, state=state, stdout=out, stderr=err)
+
+ if state == 'present':
+ cmd.append('install')
+ if exclude_groups:
+ cmd.extend(['--without', ':'.join(exclude_groups)])
+ if clean:
+ cmd.append('--clean')
+ if gemfile:
+ cmd.extend(['--gemfile', gemfile])
+ if local:
+ cmd.append('--local')
+ if deployment_mode:
+ cmd.append('--deployment')
+ if not user_install:
+ cmd.append('--system')
+ if gem_path:
+ cmd.extend(['--path', gem_path])
+ if binstub_directory:
+ cmd.extend(['--binstubs', binstub_directory])
+ else:
+ cmd.append('update')
+ if local:
+ cmd.append('--local')
+
+ if extra_args:
+ cmd.extend(extra_args.split(' '))
+
+ rc, out, err = module.run_command(cmd, cwd=chdir, check_rc=True)
+
+ module.exit_json(changed='Installing' in out, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/bzr.py b/ansible_collections/community/general/plugins/modules/bzr.py
new file mode 100644
index 000000000..e7aca7c6b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/bzr.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, André Paramés <git@andreparames.com>
+# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: bzr
+author:
+- André Paramés (@andreparames)
+short_description: Deploy software (or files) from bzr branches
+description:
+ - Manage I(bzr) branches to deploy files or software.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - SSH or HTTP protocol address of the parent branch.
+ aliases: [ parent ]
+ required: true
+ type: str
+ dest:
+ description:
+ - Absolute path of where the branch should be cloned to.
+ required: true
+ type: path
+ version:
+ description:
+ - What version of the branch to clone. This can be the
+ bzr revno or revid.
+ default: head
+ type: str
+ force:
+ description:
+ - If C(true), any modified files in the working
+ tree will be discarded. Before 1.9 the default
+ value was C(true).
+ type: bool
+ default: false
+ executable:
+ description:
+ - Path to bzr executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Checkout
+ community.general.bzr:
+ name: bzr+ssh://foosball.example.org/path/to/branch
+ dest: /srv/checkout
+ version: 22
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Bzr(object):
+ def __init__(self, module, parent, dest, version, bzr_path):
+ self.module = module
+ self.parent = parent
+ self.dest = dest
+ self.version = version
+ self.bzr_path = bzr_path
+
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
+ return (rc, out, err)
+
+ def get_version(self):
+ '''samples the version of the bzr branch'''
+
+ cmd = "%s revno" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
+ return revno
+
+ def clone(self):
+ '''makes a new bzr branch if it does not already exist'''
+ dest_dirname = os.path.dirname(self.dest)
+ try:
+ os.makedirs(dest_dirname)
+ except Exception:
+ pass
+ if self.version.lower() != 'head':
+ args_list = ["branch", "-r", self.version, self.parent, self.dest]
+ else:
+ args_list = ["branch", self.parent, self.dest]
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
+
+ def has_local_mods(self):
+
+ cmd = "%s status -S" % self.bzr_path
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
+ lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+ return len(lines) > 0
+
+ def reset(self, force):
+ '''
+ Resets the index and working tree to head.
+ Discards any changes to tracked files in the working
+ tree since that commit.
+ '''
+ if not force and self.has_local_mods():
+ self.module.fail_json(msg="Local modifications exist in branch (force=false).")
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
+
+ def fetch(self):
+ '''updates branch from remote sources'''
+ if self.version.lower() != 'head':
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
+ else:
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
+ if rc != 0:
+ self.module.fail_json(msg="Failed to pull")
+ return (rc, out, err)
+
+ def switch_version(self):
+ '''once pulled, switch to a particular revno or revid'''
+ if self.version.lower() != 'head':
+ args_list = ["revert", "-r", self.version]
+ else:
+ args_list = ["revert"]
+ return self._command(args_list, check_rc=True, cwd=self.dest)
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', required=True),
+ name=dict(type='str', required=True, aliases=['parent']),
+ version=dict(type='str', default='head'),
+ force=dict(type='bool', default=False),
+ executable=dict(type='str'),
+ )
+ )
+
+ dest = module.params['dest']
+ parent = module.params['name']
+ version = module.params['version']
+ force = module.params['force']
+ bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
+
+ bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
+
+ rc, out, err = (0, None, None)
+
+ bzr = Bzr(module, parent, dest, version, bzr_path)
+
+ # if there is no bzr configuration, do a branch operation
+ # else pull and switch the version
+ before = None
+ local_mods = False
+ if not os.path.exists(bzrconfig):
+ (rc, out, err) = bzr.clone()
+
+ else:
+ # else do a pull
+ local_mods = bzr.has_local_mods()
+ before = bzr.get_version()
+ (rc, out, err) = bzr.reset(force)
+ if rc != 0:
+ module.fail_json(msg=err)
+ (rc, out, err) = bzr.fetch()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ # switch to version specified regardless of whether
+ # we cloned or pulled
+ (rc, out, err) = bzr.switch_version()
+
+ # determine if we changed anything
+ after = bzr.get_version()
+ changed = False
+
+ if before != after or local_mods:
+ changed = True
+
+ module.exit_json(changed=changed, before=before, after=after)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/campfire.py b/ansible_collections/community/general/plugins/modules/campfire.py
new file mode 100644
index 000000000..1e0f1ecea
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/campfire.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: campfire
+short_description: Send a message to Campfire
+description:
+ - Send a message to Campfire.
+ - Messages with newlines will result in a "Paste" message being sent.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ subscription:
+ type: str
+ description:
+ - The subscription name to use.
+ required: true
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - Room number to which the message should be sent.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ notify:
+ type: str
+ description:
+ - Send a notification sound before the message.
+ required: false
+ choices: ["56k", "bell", "bezos", "bueller", "clowntown",
+ "cottoneyejoe", "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama", "greatjob", "greyjoy",
+ "guarantee", "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins", "makeitso", "noooo",
+ "nyan", "ohmy", "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret", "sexyback",
+ "story", "tada", "tmyk", "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah", "yodel"]
+
+# informational: requirements for nodes
+requirements: [ ]
+author: "Adam Garside (@fabulops)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ msg: Task completed.
+
+- name: Send a message to Campfire
+ community.general.campfire:
+ subscription: foo
+ token: 12345
+ room: 123
+ notify: loggins
+ msg: Task completed ... with feeling.
+'''
+
+try:
+ from html import escape as html_escape
+except ImportError:
+ # Python-3.2 or later
+ import cgi
+
+ def html_escape(text, quote=True):
+ return cgi.escape(text, quote)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ subscription=dict(required=True),
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ notify=dict(required=False,
+ choices=["56k", "bell", "bezos", "bueller",
+ "clowntown", "cottoneyejoe",
+ "crickets", "dadgummit", "dangerzone",
+ "danielsan", "deeper", "drama",
+ "greatjob", "greyjoy", "guarantee",
+ "heygirl", "horn", "horror",
+ "inconceivable", "live", "loggins",
+ "makeitso", "noooo", "nyan", "ohmy",
+ "ohyeah", "pushit", "rimshot",
+ "rollout", "rumble", "sax", "secret",
+ "sexyback", "story", "tada", "tmyk",
+ "trololo", "trombone", "unix",
+ "vuvuzela", "what", "whoomp", "yeah",
+ "yodel"]),
+ ),
+ supports_check_mode=False
+ )
+
+ subscription = module.params["subscription"]
+ token = module.params["token"]
+ room = module.params["room"]
+ msg = module.params["msg"]
+ notify = module.params["notify"]
+
+ URI = "https://%s.campfirenow.com" % subscription
+ NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
+ MSTR = "<message><body>%s</body></message>"
+ AGENT = "Ansible/1.2"
+
+ # Hack to add basic auth username and password the way fetch_url expects
+ module.params['url_username'] = token
+ module.params['url_password'] = 'X'
+
+ target_url = '%s/room/%s/speak.xml' % (URI, room)
+ headers = {'Content-Type': 'application/xml',
+ 'User-agent': AGENT}
+
+ # Send some audible notification if requested
+ if notify:
+ response, info = fetch_url(module, target_url, data=NSTR % html_escape(notify), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (notify, info['status']))
+
+ # Send the message
+ response, info = fetch_url(module, target_url, data=MSTR % html_escape(msg), headers=headers)
+ if info['status'] not in [200, 201]:
+ module.fail_json(msg="unable to send msg: '%s', campfire api"
+ " returned error code: '%s'" %
+ (msg, info['status']))
+
+ module.exit_json(changed=True, room=room, msg=msg, notify=notify)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/capabilities.py b/ansible_collections/community/general/plugins/modules/capabilities.py
new file mode 100644
index 000000000..9b72ac6ea
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/capabilities.py
@@ -0,0 +1,188 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ type: str
+ required: true
+ aliases: [ key ]
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ type: str
+ required: true
+ aliases: [ cap ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags into the effective set,
+ so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
+ - This module does not attempt to determine the final operator and flags to compare,
+ so you will want to ensure that your capabilities argument matches the final capabilities.
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Set cap_sys_chroot+ep on /foo
+ community.general.capabilities:
+ path: /foo
+ capability: cap_sys_chroot+ep
+ state: present
+
+- name: Remove cap_net_bind_service from /bar
+ community.general.capabilities:
+ path: /bar
+ capability: cap_net_bind_service
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+OPS = ('=', '-', '+')
+
+
+class CapabilitiesModule(object):
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [cap[0] for cap in current]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
+ # add new cap with correct op/flags
+ current.append(self.capability_tup)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not exist, the stderr will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or stderr != "":
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append((subcap, op, flags))
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([''.join(cap) for cap in caps])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except Exception:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+
+# ==============================================================
+# main
+
+def main():
+ # defining module
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='str', required=True, aliases=['key']),
+ capability=dict(type='str', required=True, aliases=['cap']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ CapabilitiesModule(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cargo.py b/ansible_collections/community/general/plugins/modules/cargo.py
new file mode 100644
index 000000000..24be43741
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cargo.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Radek Sprta <mail@radeksprta.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: cargo
+short_description: Manage Rust packages with cargo
+version_added: 4.3.0
+description:
+ - Manage Rust packages with cargo.
+author: "Radek Sprta (@radek-sprta)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of a Rust package to install.
+ type: list
+ elements: str
+ required: true
+ path:
+ description:
+ ->
+ The base path where to install the Rust packages. Cargo automatically appends
+ C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin).
+ type: path
+ version:
+ description:
+ ->
+ The version to install. If I(name) contains multiple values, the module will
+ try to install all of them in this version.
+ type: str
+ required: false
+ state:
+ description:
+ - The state of the Rust package.
+ required: false
+ type: str
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - cargo installed in bin path (recommended /usr/local/bin)
+"""
+
+EXAMPLES = r"""
+- name: Install "ludusavi" Rust package
+ community.general.cargo:
+ name: ludusavi
+
+- name: Install "ludusavi" Rust package in version 0.10.0
+ community.general.cargo:
+ name: ludusavi
+ version: '0.10.0'
+
+- name: Install "ludusavi" Rust package to global location
+ community.general.cargo:
+ name: ludusavi
+ path: /usr/local
+
+- name: Remove "ludusavi" Rust package
+ community.general.cargo:
+ name: ludusavi
+ state: absent
+
+- name: Update "ludusavi" Rust package its latest version
+ community.general.cargo:
+ name: ludusavi
+ state: latest
+"""
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Cargo(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.name = kwargs["name"]
+ self.path = kwargs["path"]
+ self.state = kwargs["state"]
+ self.version = kwargs["version"]
+
+ self.executable = [module.get_bin_path("cargo", True)]
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if path is not None and not os.path.isdir(path):
+ self.module.fail_json(msg="Path %s is not a directory" % path)
+ self._path = path
+
+ def _exec(
+ self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
+ ):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
+ return out, err
+ return "", ""
+
+ def get_installed(self):
+ cmd = ["install", "--list"]
+ data, dummy = self._exec(cmd, True, False, False)
+
+ package_regex = re.compile(r"^([\w\-]+) v(.+):$")
+ installed = {}
+ for line in data.splitlines():
+ package_info = package_regex.match(line)
+ if package_info:
+ installed[package_info.group(1)] = package_info.group(2)
+
+ return installed
+
+ def install(self, packages=None):
+ cmd = ["install"]
+ cmd.extend(packages or self.name)
+ if self.path:
+ cmd.append("--root")
+ cmd.append(self.path)
+ if self.version:
+ cmd.append("--version")
+ cmd.append(self.version)
+ return self._exec(cmd)
+
+ def is_outdated(self, name):
+ installed_version = self.get_installed().get(name)
+
+ cmd = ["search", name, "--limit", "1"]
+ data, dummy = self._exec(cmd, True, False, False)
+
+ match = re.search(r'"(.+)"', data)
+ if match:
+ latest_version = match.group(1)
+
+ return installed_version != latest_version
+
+ def uninstall(self, packages=None):
+ cmd = ["uninstall"]
+ cmd.extend(packages or self.name)
+ return self._exec(cmd)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True, type="list", elements="str"),
+ path=dict(default=None, type="path"),
+ state=dict(default="present", choices=["present", "absent", "latest"]),
+ version=dict(default=None, type="str"),
+ )
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params["name"]
+ path = module.params["path"]
+ state = module.params["state"]
+ version = module.params["version"]
+
+ if not name:
+ module.fail_json(msg="Package name must be specified")
+
+ # Set LANG env since we parse stdout
+ module.run_command_environ_update = dict(
+ LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
+ )
+
+ cargo = Cargo(module, name=name, path=path, state=state, version=version)
+ changed, out, err = False, None, None
+ installed_packages = cargo.get_installed()
+ if state == "present":
+ to_install = [
+ n
+ for n in name
+ if (n not in installed_packages)
+ or (version and version != installed_packages[n])
+ ]
+ if to_install:
+ changed = True
+ out, err = cargo.install(to_install)
+ elif state == "latest":
+ to_update = [
+ n for n in name if n not in installed_packages or cargo.is_outdated(n)
+ ]
+ if to_update:
+ changed = True
+ out, err = cargo.install(to_update)
+ else: # absent
+ to_uninstall = [n for n in name if n in installed_packages]
+ if to_uninstall:
+ changed = True
+ out, err = cargo.uninstall(to_uninstall)
+
+ module.exit_json(changed=changed, stdout=out, stderr=err)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/catapult.py b/ansible_collections/community/general/plugins/modules/catapult.py
new file mode 100644
index 000000000..a3bbef6c4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/catapult.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Jonathan Mainguy <jon@soh.re>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# basis of code taken from the ansible twillio and nexmo modules
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: catapult
+short_description: Send a sms / mms using the catapult bandwidth api
+description:
+ - Allows notifications to be sent using sms / mms via the catapult bandwidth api.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ src:
+ type: str
+ description:
+ - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)).
+ required: true
+ dest:
+ type: list
+ elements: str
+ description:
+ - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)).
+ required: true
+ msg:
+ type: str
+ description:
+ - The contents of the text message (must be 2048 characters or less).
+ required: true
+ media:
+ type: str
+ description:
+ - For MMS messages, a media url to the location of the media to be sent with the message.
+ user_id:
+ type: str
+ description:
+ - User Id from Api account page.
+ required: true
+ api_token:
+ type: str
+ description:
+ - Api Token from Api account page.
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Api Secret from Api account page.
+ required: true
+
+author: "Jonathan Mainguy (@Jmainguy)"
+notes:
+ - Will return changed even if the media url is wrong.
+ - Will return changed if the destination number is invalid.
+
+'''
+
+EXAMPLES = '''
+- name: Send a mms to multiple users
+ community.general.catapult:
+ src: "+15035555555"
+ dest:
+ - "+12525089000"
+ - "+12018994225"
+ media: "http://example.com/foobar.jpg"
+ msg: "Task is complete"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+- name: Send a sms to a single user
+ community.general.catapult:
+ src: "+15035555555"
+ dest: "+12018994225"
+ msg: "Consider yourself notified"
+ user_id: "{{ user_id }}"
+ api_token: "{{ api_token }}"
+ api_secret: "{{ api_secret }}"
+
+'''
+
+RETURN = '''
+changed:
+ description: Whether the api accepted the message.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def send(module, src, dest, msg, media, user_id, api_token, api_secret):
+ """
+ Send the message
+ """
+ AGENT = "Ansible"
+ URI = "https://api.catapult.inetwork.com/v1/users/%s/messages" % user_id
+ data = {'from': src, 'to': dest, 'text': msg}
+ if media:
+ data['media'] = media
+
+ headers = {'User-Agent': AGENT, 'Content-type': 'application/json'}
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = api_token.replace('\n', '')
+ module.params['url_password'] = api_secret.replace('\n', '')
+
+ return fetch_url(module, URI, data=json.dumps(data), headers=headers, method="post")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(required=True),
+ dest=dict(required=True, type='list', elements='str'),
+ msg=dict(required=True),
+ user_id=dict(required=True),
+ api_token=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ media=dict(default=None, required=False),
+ ),
+ )
+
+ src = module.params['src']
+ dest = module.params['dest']
+ msg = module.params['msg']
+ media = module.params['media']
+ user_id = module.params['user_id']
+ api_token = module.params['api_token']
+ api_secret = module.params['api_secret']
+
+ for number in dest:
+ rc, info = send(module, src, number, msg, media, user_id, api_token, api_secret)
+ if info["status"] != 201:
+ body = json.loads(info["body"])
+ fail_msg = body["message"]
+ module.fail_json(msg=fail_msg)
+
+ changed = True
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/circonus_annotation.py b/ansible_collections/community/general/plugins/modules/circonus_annotation.py
new file mode 100644
index 000000000..937610776
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/circonus_annotation.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2014-2015, Epic Games, Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: circonus_annotation
+short_description: Create an annotation in circonus
+description:
+ - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
+author: "Nick Harring (@NickatEpic)"
+requirements:
+ - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
+notes:
+ - Check mode isn't supported.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ api_key:
+ type: str
+ description:
+ - Circonus API key
+ required: true
+ category:
+ type: str
+ description:
+ - Annotation Category
+ required: true
+ description:
+ type: str
+ description:
+ - Description of annotation
+ required: true
+ title:
+ type: str
+ description:
+ - Title of annotation
+ required: true
+ start:
+ type: int
+ description:
+ - Unix timestamp of event start
+ - If not specified, it defaults to I(now).
+ stop:
+ type: int
+ description:
+ - Unix timestamp of event end
+ - If not specified, it defaults to I(now) + I(duration).
+ duration:
+ type: int
+ description:
+ - Duration in seconds of annotation
+ default: 0
+'''
+EXAMPLES = '''
+- name: Create a simple annotation event with a source, defaults to start and end time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+
+- name: Create an annotation with a duration of 5 minutes and a default start time of now
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ duration: 300
+
+- name: Create an annotation with a start_time and end_time
+ community.general.circonus_annotation:
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ description: This is a detailed description of the config change
+ category: This category groups like annotations
+ start_time: 1395940006
+ end_time: 1395954407
+'''
+
+RETURN = '''
+annotation:
+ description: details about the created annotation
+ returned: success
+ type: complex
+ contains:
+ _cid:
+ description: annotation identifier
+ returned: success
+ type: str
+ sample: /annotation/100000
+ _created:
+ description: creation timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified:
+ description: last modification timestamp
+ returned: success
+ type: int
+ sample: 1502236928
+ _last_modified_by:
+ description: last modified by
+ returned: success
+ type: str
+ sample: /user/1000
+ category:
+ description: category of the created annotation
+ returned: success
+ type: str
+ sample: alerts
+ title:
+ description: title of the created annotation
+ returned: success
+ type: str
+ sample: WARNING
+ description:
+ description: description of the created annotation
+ returned: success
+ type: str
+ sample: Host is down.
+ start:
+ description: timestamp, since annotation applies
+ returned: success
+ type: int
+ sample: Host is down.
+ stop:
+ description: timestamp, since annotation ends
+ returned: success
+ type: str
+ sample: Host is down.
+ rel_metrics:
+ description: Array of metrics related to this annotation, each metrics is a string.
+ returned: success
+ type: list
+ sample:
+ - 54321_kbps
+'''
+import json
+import time
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+ HAS_REQUESTS = True
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ HAS_REQUESTS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six import PY3
+from ansible.module_utils.common.text.converters import to_native
+
+
+def check_requests_dep(module):
+ """Check if an adequate requests version is available"""
+ if not HAS_REQUESTS:
+ module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ else:
+ required_version = '2.0.0' if PY3 else '1.0.0'
+ if LooseVersion(requests.__version__) < LooseVersion(required_version):
+ module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
+
+
+def post_annotation(annotation, api_key):
+ ''' Takes annotation dict and api_key string'''
+ base_url = 'https://api.circonus.com/v2'
+ anootate_post_endpoint = '/annotation'
+ resp = requests.post(base_url + anootate_post_endpoint,
+ headers=build_headers(api_key), data=json.dumps(annotation))
+ resp.raise_for_status()
+ return resp
+
+
+def create_annotation(module):
+ ''' Takes ansible module object '''
+ annotation = {}
+ duration = module.params['duration']
+ if module.params['start'] is not None:
+ start = module.params['start']
+ else:
+ start = int(time.time())
+ if module.params['stop'] is not None:
+ stop = module.params['stop']
+ else:
+ stop = int(time.time()) + duration
+ annotation['start'] = start
+ annotation['stop'] = stop
+ annotation['category'] = module.params['category']
+ annotation['description'] = module.params['description']
+ annotation['title'] = module.params['title']
+ return annotation
+
+
+def build_headers(api_token):
+ '''Takes api token, returns headers with it included.'''
+ headers = {'X-Circonus-App-Name': 'ansible',
+ 'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
+ 'Accept': 'application/json'}
+ return headers
+
+
+def main():
+ '''Main function, dispatches logic'''
+ module = AnsibleModule(
+ argument_spec=dict(
+ start=dict(type='int'),
+ stop=dict(type='int'),
+ category=dict(required=True),
+ title=dict(required=True),
+ description=dict(required=True),
+ duration=dict(default=0, type='int'),
+ api_key=dict(required=True, no_log=True)
+ )
+ )
+
+ check_requests_dep(module)
+
+ annotation = create_annotation(module)
+ try:
+ resp = post_annotation(annotation, module.params['api_key'])
+ except requests.exceptions.RequestException as e:
+ module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=True, annotation=resp.json())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cisco_webex.py b/ansible_collections/community/general/plugins/modules/cisco_webex.py
new file mode 100644
index 000000000..2e5cb50ea
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cisco_webex.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cisco_webex
+short_description: Send a message to a Cisco Webex Teams Room or Individual
+description:
+ - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting.
+author: Drew Rusell (@drew-russell)
+notes:
+ - The C(recipient_id) type must be valid for the supplied C(recipient_id).
+ - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics).
+
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ recipient_type:
+ description:
+ - The request parameter you would like to send the message to.
+ - Messages can be sent to either a room or individual (by ID or E-Mail).
+ required: true
+ choices: ['roomId', 'toPersonEmail', 'toPersonId']
+ type: str
+
+ recipient_id:
+ description:
+ - The unique identifier associated with the supplied C(recipient_type).
+ required: true
+ type: str
+
+ msg_type:
+ description:
+ - Specifies how you would like the message formatted.
+ default: text
+ choices: ['text', 'markdown']
+ type: str
+ aliases: ['message_type']
+
+ personal_token:
+ description:
+ - Your personal access token required to validate the Webex Teams API.
+ required: true
+ aliases: ['token']
+ type: str
+
+ msg:
+ description:
+ - The message you would like to send.
+ required: true
+ type: str
+'''
+
+EXAMPLES = """
+# Note: The following examples assume a variable file has been imported
+# that contains the appropriate information.
+
+- name: Cisco Webex Teams - Markdown Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: markdown
+ personal_token: "{{ token }}"
+ msg: "**Cisco Webex Teams Ansible Module - Room Message in Markdown**"
+
+- name: Cisco Webex Teams - Text Message to a Room
+ community.general.cisco_webex:
+ recipient_type: roomId
+ recipient_id: "{{ room_id }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Room Message in Text"
+
+- name: Cisco Webex Teams - Text Message by an Individuals ID
+ community.general.cisco_webex:
+ recipient_type: toPersonId
+ recipient_id: "{{ person_id}}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by ID"
+
+- name: Cisco Webex Teams - Text Message by an Individuals E-Mail Address
+ community.general.cisco_webex:
+ recipient_type: toPersonEmail
+ recipient_id: "{{ person_email }}"
+ msg_type: text
+ personal_token: "{{ token }}"
+ msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail"
+
+"""
+
+RETURN = """
+status_code:
+ description:
+ - The Response Code returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: int
+ sample: 200
+
+message:
+ description:
+ - The Response Message returned by the Webex Teams API.
+ - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics).
+ returned: always
+ type: str
+ sample: OK (585 bytes)
+"""
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def webex_msg(module):
+ """When check mode is specified, establish a read only connection, that does not return any user specific
+ data, to validate connectivity. In regular mode, send a message to a Cisco Webex Teams Room or Individual"""
+
+ # Ansible Specific Variables
+ results = {}
+ ansible = module.params
+
+ headers = {
+ 'Authorization': 'Bearer {0}'.format(ansible['personal_token']),
+ 'content-type': 'application/json'
+ }
+
+ if module.check_mode:
+ url = "https://webexapis.com/v1/people/me"
+ payload = None
+
+ else:
+ url = "https://webexapis.com/v1/messages"
+
+ payload = {
+ ansible['recipient_type']: ansible['recipient_id'],
+ ansible['msg_type']: ansible['msg']
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers)
+
+ status_code = info['status']
+ msg = info['msg']
+
+ # Module will fail if the response is not 200
+ if status_code != 200:
+ results['failed'] = True
+ results['status_code'] = status_code
+ results['message'] = msg
+ else:
+ results['failed'] = False
+ results['status_code'] = status_code
+
+ if module.check_mode:
+ results['message'] = 'Authentication Successful.'
+ else:
+ results['message'] = msg
+
+ return results
+
+
+def main():
+ '''Ansible main. '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']),
+ recipient_id=dict(required=True, no_log=True),
+ msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']),
+ personal_token=dict(required=True, no_log=True, aliases=['token']),
+ msg=dict(required=True),
+ ),
+
+ supports_check_mode=True
+ )
+
+ results = webex_msg(module)
+
+ module.exit_json(**results)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_aa_policy.py b/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
new file mode 100644
index 000000000..05135bd95
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_aa_policy.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ type: str
+ required: true
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ required: false
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ community.general.clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk:
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'),
+ exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'),
+ exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_alert_policy.py b/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
new file mode 100644
index 000000000..b77c83e3b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_alert_policy.py
@@ -0,0 +1,536 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: true
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ type: str
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ type: str
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ type: list
+ elements: str
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ type: str
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ type: str
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ type: int
+ state:
+ description:
+ - Whether to create or delete the policy.
+ type: str
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ community.general.clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: Debug
+ ansible.builtin.debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ id=dict(),
+ alias=dict(required=True),
+ alert_recipients=dict(type='list', elements='str'),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk']),
+ duration=dict(type='str'),
+ threshold=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py b/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
new file mode 100644
index 000000000..672e06780
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_blueprint_package.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ type: list
+ required: true
+ elements: str
+ package_id:
+ description:
+ - The package id of the blue print.
+ type: str
+ required: true
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ type: dict
+ default: {}
+ required: false
+ state:
+ description:
+ - Whether to install or uninstall the package. Currently it supports only "present" for install action.
+ type: str
+ required: false
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: str
+ default: 'True'
+ required: false
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ community.general.clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', elements='str', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True), # @FIXME should be bool?
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
new file mode 100644
index 000000000..c832571d3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_firewall_policy.py
@@ -0,0 +1,596 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall policies on Centurylink Cloud
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ type: str
+ required: true
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present'
+ type: list
+ elements: str
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ type: list
+ elements: str
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])."
+ type: list
+ elements: str
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ type: str
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ type: str
+ required: true
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ type: str
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: str
+ default: 'True'
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ type: str
+ choices: ['True', 'False']
+ default: 'True'
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: str
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from time import sleep
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True),
+ destination_account_alias=dict(),
+ firewall_policy_id=dict(),
+ ports=dict(type='list', elements='str'),
+ source=dict(type='list', elements='str'),
+ destination=dict(type='list', elements='str'),
+ wait=dict(default=True), # @FIXME type=bool
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(default=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_group.py b/ansible_collections/community/general/plugins/modules/clc_group.py
new file mode 100644
index 000000000..88aef2d63
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_group.py
@@ -0,0 +1,522 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the Server Group
+ type: str
+ required: true
+ description:
+ description:
+ - A description of the Server Group
+ type: str
+ required: false
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ type: str
+ required: false
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ type: str
+ required: false
+ state:
+ description:
+ - Whether to create or delete the group
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: true
+ required: false
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+# Delete a Server Group
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ community.general.clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ parent=dict(),
+ location=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ if not self.root_group:
+ raise AssertionError("Implementation Error: Root Group not set")
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py b/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
new file mode 100644
index 000000000..675cc1100
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_loadbalancer.py
@@ -0,0 +1,945 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ type: str
+ required: true
+ description:
+ description:
+ - A description for the loadbalancer
+ type: str
+ alias:
+ description:
+ - The alias of your CLC Account
+ type: str
+ required: true
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ type: str
+ required: true
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ type: str
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ type: str
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ type: str
+ choices: ['80', '443']
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ type: list
+ default: []
+ elements: dict
+ status:
+ description:
+ - The status of the loadbalancer
+ type: str
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ type: str
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ community.general.clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+from time import sleep
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node not in nodes:
+ changed = True
+ nodes.append(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed is True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[], elements='dict'),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_modify_server.py b/ansible_collections/community/general/plugins/modules/clc_modify_server.py
new file mode 100644
index 000000000..b375d9d47
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_modify_server.py
@@ -0,0 +1,975 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: Modify servers in CenturyLink Cloud
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ type: list
+ required: true
+ elements: str
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ type: str
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: true
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Set the cpu count to 4 on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: Set the memory to 8GB on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: Set the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: Remove the anti affinity policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: Add the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: Remove the alert policy on a server
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers
+ community.general.clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True, elements='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_publicip.py b/ansible_collections/community/general/plugins/modules/clc_publicip.py
new file mode 100644
index 000000000..c1bffcea0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_publicip.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ type: str
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ type: list
+ elements: int
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ type: list
+ required: true
+ elements: str
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ type: bool
+ default: true
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ protocol: TCP
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ community.general.clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: Debug
+ ansible.builtin.debug:
+ var: clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True, elements='str'),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list', elements='int'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_server.py b/ansible_collections/community/general/plugins/modules/clc_server.py
new file mode 100644
index 000000000..d2d019ff0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_server.py
@@ -0,0 +1,1570 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ type: list
+ elements: dict
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ type: bool
+ default: false
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ type: str
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ type: str
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ type: str
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ type: str
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ type: str
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ default: 1
+ type: int
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many servers to deploy.
+ type: str
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ type: int
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ type: str
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ type: list
+ default: []
+ elements: dict
+ description:
+ description:
+ - The description to set for the server.
+ type: str
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ type: int
+ group:
+ description:
+ - The Server Group to create servers under.
+ type: str
+ default: 'Default Group'
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ type: str
+ location:
+ description:
+ - The Datacenter to create servers in.
+ type: str
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ type: bool
+ default: false
+ required: false
+ memory:
+ description:
+ - Memory in GB.
+ type: int
+ default: 1
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ type: str
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ type: str
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ type: list
+ elements: dict
+ default: []
+ password:
+ description:
+ - Password for the administrator / root user
+ type: str
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ type: str
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ type: str
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ type: list
+ elements: dict
+ default: []
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ type: str
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ type: list
+ default: []
+ elements: str
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ type: str
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ type: str
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ type: str
+ type:
+ description:
+ - The type of server to create.
+ type: str
+ default: 'standard'
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ type: str
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ type: str
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ type: bool
+ default: true
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: Default Group
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ community.general.clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: Default Group
+ group: Default Group
+
+- name: Stop a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: stopped
+
+- name: Start a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: started
+
+- name: Delete a Server
+ community.general.clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+import json
+import os
+import time
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(),
+ cpu=dict(default=1, type='int'),
+ memory=dict(default=1, type='int'),
+ alias=dict(),
+ password=dict(no_log=True),
+ ip_address=dict(),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(),
+ secondary_dns=dict(),
+ additional_disks=dict(type='list', default=[], elements='dict'),
+ custom_fields=dict(type='list', default=[], elements='dict'),
+ ttl=dict(),
+ managed_os=dict(type='bool', default=False),
+ description=dict(),
+ source_server_password=dict(no_log=True),
+ cpu_autoscale_policy_id=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ packages=dict(type='list', default=[], elements='dict'),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', ),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[], elements='str'),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[], elements='dict'),
+ configuration_id=dict(),
+ os_type=dict(choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException:
+ module.fail_json(msg="Unable to find location: {0}".format(location))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ time.sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py b/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
new file mode 100644
index 000000000..82b2a9956
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/clc_server_snapshot.py
@@ -0,0 +1,419 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015 CenturyLink
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ type: list
+ required: true
+ elements: str
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ type: int
+ default: 7
+ required: false
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ type: str
+ default: 'present'
+ required: false
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: 'True'
+ required: false
+ type: str
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: true
+ state: present
+
+- name: Restore server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: true
+ state: restore
+
+- name: Delete server snapshot
+ community.general.clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: true
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REQUESTS_IMP_ERR = None
+try:
+ import requests
+except ImportError:
+ REQUESTS_IMP_ERR = traceback.format_exc()
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+CLC_IMP_ERR = None
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_IMP_ERR = traceback.format_exc()
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
+ if not REQUESTS_FOUND:
+ self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True, elements='str'),
+ expiration_days=dict(default=7, type='int'),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py b/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
new file mode 100644
index 000000000..d8209cc61
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cloud_init_data_facts.py
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cloud_init_data_facts
+short_description: Retrieve facts of cloud-init
+description:
+ - Gathers facts by reading the status.json and result.json of cloud-init.
+author: René Moser (@resmo)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ filter:
+ description:
+ - Filter facts
+ type: str
+ choices: [ status, result ]
+notes:
+ - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
+'''
+
+EXAMPLES = '''
+- name: Gather all facts of cloud init
+ community.general.cloud_init_data_facts:
+ register: result
+
+- ansible.builtin.debug:
+ var: result
+
+- name: Wait for cloud init to finish
+ community.general.cloud_init_data_facts:
+ filter: status
+ register: res
+ until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
+ retries: 50
+ delay: 5
+'''
+
+RETURN = '''
+---
+cloud_init_data_facts:
+ description: Facts of result and status.
+ returned: success
+ type: dict
+ sample: '{
+ "status": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "errors": []
+ },
+ "result": {
+ "v1": {
+ "datasource": "DataSourceCloudStack",
+ "init": {
+ "errors": [],
+ "finished": 1522066377.0185432,
+ "start": 1522066375.2648022
+ },
+ "init-local": {
+ "errors": [],
+ "finished": 1522066373.70919,
+ "start": 1522066373.4726632
+ },
+ "modules-config": {
+ "errors": [],
+ "finished": 1522066380.9097016,
+ "start": 1522066379.0011985
+ },
+ "modules-final": {
+ "errors": [],
+ "finished": 1522066383.56594,
+ "start": 1522066382.3449218
+ },
+ "stage": null
+ }
+ }'
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_text
+
+
+CLOUD_INIT_PATH = "/var/lib/cloud/data"
+
+
+def gather_cloud_init_data_facts(module):
+ res = {
+ 'cloud_init_data_facts': dict()
+ }
+
+ for i in ['result', 'status']:
+ filter = module.params.get('filter')
+ if filter is None or filter == i:
+ res['cloud_init_data_facts'][i] = dict()
+ json_file = os.path.join(CLOUD_INIT_PATH, i + '.json')
+
+ if os.path.exists(json_file):
+ f = open(json_file, 'rb')
+ contents = to_text(f.read(), errors='surrogate_or_strict')
+ f.close()
+
+ if contents:
+ res['cloud_init_data_facts'][i] = module.from_json(contents)
+ return res
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filter=dict(choices=['result', 'status']),
+ ),
+ supports_check_mode=True,
+ )
+
+ facts = gather_cloud_init_data_facts(module)
+ result = dict(changed=False, ansible_facts=facts, **facts)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cloudflare_dns.py b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
new file mode 100644
index 000000000..8f45fcef3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cloudflare_dns.py
@@ -0,0 +1,893 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cloudflare_dns
+author:
+- Michael Gruener (@mgruener)
+requirements:
+ - python >= 2.6
+short_description: Manage Cloudflare DNS records
+description:
+ - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_token:
+ description:
+ - API token.
+ - Required for api token authentication.
+ - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
+ - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0.
+ type: str
+ required: false
+ version_added: '0.2.0'
+ account_api_key:
+ description:
+ - Account API key.
+ - Required for api keys authentication.
+ - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)."
+ type: str
+ required: false
+ aliases: [ account_api_token ]
+ account_email:
+ description:
+ - Account email. Required for API keys authentication.
+ type: str
+ required: false
+ algorithm:
+ description:
+ - Algorithm number.
+ - Required for I(type=DS) and I(type=SSHFP) when I(state=present).
+ type: int
+ cert_usage:
+ description:
+ - Certificate usage number.
+ - Required for I(type=TLSA) when I(state=present).
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ hash_type:
+ description:
+ - Hash type number.
+ - Required for I(type=DS), I(type=SSHFP) and I(type=TLSA) when I(state=present).
+ type: int
+ choices: [ 1, 2 ]
+ key_tag:
+ description:
+ - DNSSEC key tag.
+ - Needed for I(type=DS) when I(state=present).
+ type: int
+ port:
+ description:
+ - Service port.
+ - Required for I(type=SRV) and I(type=TLSA).
+ type: int
+ priority:
+ description:
+ - Record priority.
+ - Required for I(type=MX) and I(type=SRV)
+ default: 1
+ type: int
+ proto:
+ description:
+ - Service protocol. Required for I(type=SRV) and I(type=TLSA).
+ - Common values are TCP and UDP.
+ - Before Ansible 2.6 only TCP and UDP were available.
+ type: str
+ proxied:
+ description:
+ - Proxy through Cloudflare network or just use DNS.
+ type: bool
+ default: false
+ record:
+ description:
+ - Record to add.
+ - Required if I(state=present).
+ - Default is C(@) (e.g. the zone name).
+ type: str
+ default: '@'
+ aliases: [ name ]
+ selector:
+ description:
+ - Selector number.
+ - Required for I(type=TLSA) when I(state=present).
+ choices: [ 0, 1 ]
+ type: int
+ service:
+ description:
+ - Record service.
+ - Required for I(type=SRV).
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with I(state=present).
+ - This will delete all other records with the same record name and type.
+ type: bool
+ state:
+ description:
+ - Whether the record(s) should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ timeout:
+ description:
+ - Timeout for Cloudflare API calls.
+ type: int
+ default: 30
+ ttl:
+ description:
+ - The TTL to give the new record.
+ - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic.
+ type: int
+ default: 1
+ type:
+ description:
+ - The type of DNS record to create. Required if I(state=present).
+ - I(type=DS), I(type=SSHFP) and I(type=TLSA) added in Ansible 2.7.
+ type: str
+ choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ]
+ value:
+ description:
+ - The record value.
+ - Required for I(state=present).
+ type: str
+ aliases: [ content ]
+ weight:
+ description:
+ - Service weight.
+ - Required for I(type=SRV).
+ type: int
+ default: 1
+ zone:
+ description:
+ - The name of the Zone to work with (e.g. "example.com").
+ - The Zone must already exist.
+ type: str
+ required: true
+ aliases: [ domain ]
+'''
+
+EXAMPLES = r'''
+- name: Create a test.example.net A record to point to 127.0.0.1
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ register: record
+
+- name: Create a record using api token
+ community.general.cloudflare_dns:
+ zone: example.net
+ record: test
+ type: A
+ value: 127.0.0.1
+ api_token: dummyapitoken
+
+- name: Create a example.net CNAME record to example.com
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Change its TTL
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ ttl: 600
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Delete the record
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: absent
+
+- name: Create a example.net CNAME record to example.com and proxy through Cloudflare's network
+ community.general.cloudflare_dns:
+ zone: example.net
+ type: CNAME
+ value: example.com
+ proxied: true
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+# This deletes all other TXT records named "test.example.net"
+- name: Create TXT record "test.example.net" with value "unique value"
+ community.general.cloudflare_dns:
+ domain: example.net
+ record: test
+ type: TXT
+ value: unique value
+ solo: true
+ account_email: test@example.com
+ account_api_key: dummyapitoken
+ state: present
+
+- name: Create an SRV record _foo._tcp.example.net
+ community.general.cloudflare_dns:
+ domain: example.net
+ service: foo
+ proto: tcp
+ port: 3500
+ priority: 10
+ weight: 20
+ type: SRV
+ value: fooserver.example.net
+
+- name: Create a SSHFP record login.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: login
+ type: SSHFP
+ algorithm: 4
+ hash_type: 2
+ value: 9dc1d6742696d2f51ca1f1a78b3d16a840f7d111eb9454239e70db31363f33e1
+
+- name: Create a TLSA record _25._tcp.mail.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: mail
+ port: 25
+ proto: tcp
+ type: TLSA
+ cert_usage: 3
+ selector: 1
+ hash_type: 1
+ value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3
+
+- name: Create a DS record for subdomain.example.com
+ community.general.cloudflare_dns:
+ zone: example.com
+ record: subdomain
+ type: DS
+ key_tag: 5464
+ algorithm: 8
+ hash_type: 2
+ value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB
+'''
+
+RETURN = r'''
+record:
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: complex
+ contains:
+ content:
+ description: The record content (details depend on record type).
+ returned: success
+ type: str
+ sample: 192.0.2.91
+ created_on:
+ description: The record creation date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ data:
+ description: Additional record data.
+ returned: success, if type is SRV, DS, SSHFP or TLSA
+ type: dict
+ sample: {
+ name: "jabber",
+ port: 8080,
+ priority: 10,
+ proto: "_tcp",
+ service: "_xmpp",
+ target: "jabberhost.sample.com",
+ weight: 5,
+ }
+ id:
+ description: The record ID.
+ returned: success
+ type: str
+ sample: f9efb0549e96abcb750de63b38c9576e
+ locked:
+ description: No documentation available.
+ returned: success
+ type: bool
+ sample: false
+ meta:
+ description: No documentation available.
+ returned: success
+ type: dict
+ sample: { auto_added: false }
+ modified_on:
+ description: Record modification date.
+ returned: success
+ type: str
+ sample: "2016-03-25T19:09:42.516553Z"
+ name:
+ description: The record name as FQDN (including _service and _proto for SRV).
+ returned: success
+ type: str
+ sample: www.sample.com
+ priority:
+ description: Priority of the MX record.
+ returned: success, if type is MX
+ type: int
+ sample: 10
+ proxiable:
+ description: Whether this record can be proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: false
+ proxied:
+ description: Whether the record is proxied through Cloudflare.
+ returned: success
+ type: bool
+ sample: false
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ zone_id:
+ description: The ID of the zone containing the record.
+ returned: success
+ type: str
+ sample: abcede0bf9f0066f94029d2e6b73856a
+ zone_name:
+ description: The name of the zone containing the record.
+ returned: success
+ type: str
+ sample: sample.com
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.urls import fetch_url
+
+
+def lowercase_string(param):
+ if not isinstance(param, str):
+ return param
+ return param.lower()
+
+
+class CloudflareAPI(object):
+
+ cf_api_endpoint = 'https://api.cloudflare.com/client/v4'
+ changed = False
+
+ def __init__(self, module):
+ self.module = module
+ self.api_token = module.params['api_token']
+ self.account_api_key = module.params['account_api_key']
+ self.account_email = module.params['account_email']
+ self.algorithm = module.params['algorithm']
+ self.cert_usage = module.params['cert_usage']
+ self.hash_type = module.params['hash_type']
+ self.key_tag = module.params['key_tag']
+ self.port = module.params['port']
+ self.priority = module.params['priority']
+ self.proto = lowercase_string(module.params['proto'])
+ self.proxied = module.params['proxied']
+ self.selector = module.params['selector']
+ self.record = lowercase_string(module.params['record'])
+ self.service = lowercase_string(module.params['service'])
+ self.is_solo = module.params['solo']
+ self.state = module.params['state']
+ self.timeout = module.params['timeout']
+ self.ttl = module.params['ttl']
+ self.type = module.params['type']
+ self.value = module.params['value']
+ self.weight = module.params['weight']
+ self.zone = lowercase_string(module.params['zone'])
+
+ if self.record == '@':
+ self.record = self.zone
+
+ if (self.type in ['CNAME', 'NS', 'MX', 'SRV']) and (self.value is not None):
+ self.value = self.value.rstrip('.').lower()
+
+ if (self.type == 'AAAA') and (self.value is not None):
+ self.value = self.value.lower()
+
+ if (self.type == 'SRV'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.service is not None) and (not self.service.startswith('_')):
+ self.service = '_' + self.service
+
+ if (self.type == 'TLSA'):
+ if (self.proto is not None) and (not self.proto.startswith('_')):
+ self.proto = '_' + self.proto
+ if (self.port is not None):
+ self.port = '_' + str(self.port)
+
+ if not self.record.endswith(self.zone):
+ self.record = self.record + '.' + self.zone
+
+ if (self.type == 'DS'):
+ if self.record == self.zone:
+ self.module.fail_json(msg="DS records only apply to subdomains.")
+
+ def _cf_simple_api_call(self, api_call, method='GET', payload=None):
+ if self.api_token:
+ headers = {
+ 'Authorization': 'Bearer ' + self.api_token,
+ 'Content-Type': 'application/json',
+ }
+ else:
+ headers = {
+ 'X-Auth-Email': self.account_email,
+ 'X-Auth-Key': self.account_api_key,
+ 'Content-Type': 'application/json',
+ }
+ data = None
+ if payload:
+ try:
+ data = json.dumps(payload)
+ except Exception as e:
+ self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e))
+
+ resp, info = fetch_url(self.module,
+ self.cf_api_endpoint + api_call,
+ headers=headers,
+ data=data,
+ method=method,
+ timeout=self.timeout)
+
+ if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
+ self.module.fail_json(msg="Failed API call {0}; got unexpected HTTP code {1}: {2}".format(api_call, info['status'], info.get('msg')))
+
+ error_msg = ''
+ if info['status'] == 401:
+ # Unauthorized
+ error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 403:
+ # Forbidden
+ error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 429:
+ # Too many requests
+ error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 405:
+ # Method not allowed
+ error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 415:
+ # Unsupported Media Type
+ error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+ elif info['status'] == 400:
+ # Bad Request
+ error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(info['status'], method, api_call)
+
+ result = None
+ try:
+ content = resp.read()
+ except AttributeError:
+ if info['body']:
+ content = info['body']
+ else:
+ error_msg += "; The API response was empty"
+
+ if content:
+ try:
+ result = json.loads(to_text(content, errors='surrogate_or_strict'))
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content)
+
+ # Without a valid/parsed JSON response no more error processing can be done
+ if result is None:
+ self.module.fail_json(msg=error_msg)
+
+ if 'success' not in result:
+ error_msg += "; Unexpected error details: {0}".format(result.get('error'))
+ self.module.fail_json(msg=error_msg)
+
+ if not result['success']:
+ error_msg += "; Error details: "
+ for error in result['errors']:
+ error_msg += "code: {0}, error: {1}; ".format(error['code'], error['message'])
+ if 'error_chain' in error:
+ for chain_error in error['error_chain']:
+ error_msg += "code: {0}, error: {1}; ".format(chain_error['code'], chain_error['message'])
+ self.module.fail_json(msg=error_msg)
+
+ return result, info['status']
+
+ def _cf_api_call(self, api_call, method='GET', payload=None):
+ result, status = self._cf_simple_api_call(api_call, method, payload)
+
+ data = result['result']
+
+ if 'result_info' in result:
+ pagination = result['result_info']
+ if pagination['total_pages'] > 1:
+ next_page = int(pagination['page']) + 1
+ parameters = ['page={0}'.format(next_page)]
+ # strip "page" parameter from call parameters (if there are any)
+ if '?' in api_call:
+ raw_api_call, query = api_call.split('?', 1)
+ parameters += [param for param in query.split('&') if not param.startswith('page')]
+ else:
+ raw_api_call = api_call
+ while next_page <= pagination['total_pages']:
+ raw_api_call += '?' + '&'.join(parameters)
+ result, status = self._cf_simple_api_call(raw_api_call, method, payload)
+ data += result['result']
+ next_page += 1
+
+ return data, status
+
+ def _get_zone_id(self, zone=None):
+ if not zone:
+ zone = self.zone
+
+ zones = self.get_zones(zone)
+ if len(zones) > 1:
+ self.module.fail_json(msg="More than one zone matches {0}".format(zone))
+
+ if len(zones) < 1:
+ self.module.fail_json(msg="No zone found with name {0}".format(zone))
+
+ return zones[0]['id']
+
+ def get_zones(self, name=None):
+ if not name:
+ name = self.zone
+ param = ''
+ if name:
+ param = '?' + urlencode({'name': name})
+ zones, status = self._cf_api_call('/zones' + param)
+ return zones
+
+ def get_dns_records(self, zone_name=None, type=None, record=None, value=''):
+ if not zone_name:
+ zone_name = self.zone
+ if not type:
+ type = self.type
+ if not record:
+ record = self.record
+ # necessary because None as value means to override user
+ # set module value
+ if (not value) and (value is not None):
+ value = self.value
+
+ zone_id = self._get_zone_id()
+ api_call = '/zones/{0}/dns_records'.format(zone_id)
+ query = {}
+ if type:
+ query['type'] = type
+ if record:
+ query['name'] = record
+ if value:
+ query['content'] = value
+ if query:
+ api_call += '?' + urlencode(query)
+
+ records, status = self._cf_api_call(api_call)
+ return records
+
+ def delete_dns_records(self, **kwargs):
+ params = {}
+ for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ records = []
+ content = params['value']
+ search_record = params['record']
+ if params['type'] == 'SRV':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+ elif params['type'] == 'DS':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'SSHFP':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ elif params['type'] == 'TLSA':
+ if not (params['value'] is None or params['value'] == ''):
+ content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ if params['solo']:
+ search_value = None
+ else:
+ search_value = content
+
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+
+ for rr in records:
+ if params['solo']:
+ if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)):
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ else:
+ self.changed = True
+ if not self.module.check_mode:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE')
+ return self.changed
+
+ def ensure_dns_record(self, **kwargs):
+ params = {}
+ for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone',
+ 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']:
+ if param in kwargs:
+ params[param] = kwargs[param]
+ else:
+ params[param] = getattr(self, param)
+
+ search_value = params['value']
+ search_record = params['record']
+ new_record = None
+ if (params['type'] is None) or (params['record'] is None):
+ self.module.fail_json(msg="You must provide a type and a record to create a new record")
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']):
+ if not params['value']:
+ self.module.fail_json(msg="You must provide a non-empty value to create this record type")
+
+ # there can only be one CNAME per record
+ # ignoring the value when searching for existing
+ # CNAME records allows us to update the value if it
+ # changes
+ if params['type'] == 'CNAME':
+ search_value = None
+
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "ttl": params['ttl']
+ }
+
+ if (params['type'] in ['A', 'AAAA', 'CNAME']):
+ new_record["proxied"] = params["proxied"]
+
+ if params['type'] == 'MX':
+ for attr in [params['priority'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide priority and a value to create this record type")
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ "content": params['value'],
+ "priority": params['priority'],
+ "ttl": params['ttl']
+ }
+
+ if params['type'] == 'SRV':
+ for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type")
+ srv_data = {
+ "target": params['value'],
+ "port": params['port'],
+ "weight": params['weight'],
+ "priority": params['priority'],
+ "name": params['record'],
+ "proto": params['proto'],
+ "service": params['service']
+ }
+
+ new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data}
+ search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value']
+ search_record = params['service'] + '.' + params['proto'] + '.' + params['record']
+
+ if params['type'] == 'DS':
+ for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type")
+ ds_data = {
+ "key_tag": params['key_tag'],
+ "algorithm": params['algorithm'],
+ "digest_type": params['hash_type'],
+ "digest": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': ds_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'SSHFP':
+ for attr in [params['algorithm'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type")
+ sshfp_data = {
+ "fingerprint": params['value'],
+ "type": params['hash_type'],
+ "algorithm": params['algorithm'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": params['record'],
+ 'data': sshfp_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ if params['type'] == 'TLSA':
+ for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]:
+ if (attr is None) or (attr == ''):
+ self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type")
+ search_record = params['port'] + '.' + params['proto'] + '.' + params['record']
+ tlsa_data = {
+ "usage": params['cert_usage'],
+ "selector": params['selector'],
+ "matching_type": params['hash_type'],
+ "certificate": params['value'],
+ }
+ new_record = {
+ "type": params['type'],
+ "name": search_record,
+ 'data': tlsa_data,
+ "ttl": params['ttl'],
+ }
+ search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value']
+
+ zone_id = self._get_zone_id(params['zone'])
+ records = self.get_dns_records(params['zone'], params['type'], search_record, search_value)
+ # in theory this should be impossible as cloudflare does not allow
+ # the creation of duplicate records but lets cover it anyways
+ if len(records) > 1:
+ self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!")
+ # record already exists, check if it must be updated
+ if len(records) == 1:
+ cur_record = records[0]
+ do_update = False
+ if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']):
+ do_update = True
+ if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']):
+ do_update = True
+ if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']):
+ do_update = True
+ if ('data' in new_record) and ('data' in cur_record):
+ if (cur_record['data'] != new_record['data']):
+ do_update = True
+ if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']):
+ do_update = True
+ if do_update:
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, records[0]['id']), 'PUT', new_record)
+ self.changed = True
+ return result, self.changed
+ else:
+ return records, self.changed
+ if self.module.check_mode:
+ result = new_record
+ else:
+ result, info = self._cf_api_call('/zones/{0}/dns_records'.format(zone_id), 'POST', new_record)
+ self.changed = True
+ return result, self.changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_token=dict(
+ type="str",
+ required=False,
+ no_log=True,
+ fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]),
+ ),
+ account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']),
+ account_email=dict(type='str', required=False),
+ algorithm=dict(type='int'),
+ cert_usage=dict(type='int', choices=[0, 1, 2, 3]),
+ hash_type=dict(type='int', choices=[1, 2]),
+ key_tag=dict(type='int', no_log=False),
+ port=dict(type='int'),
+ priority=dict(type='int', default=1),
+ proto=dict(type='str'),
+ proxied=dict(type='bool', default=False),
+ record=dict(type='str', default='@', aliases=['name']),
+ selector=dict(type='int', choices=[0, 1]),
+ service=dict(type='str'),
+ solo=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ timeout=dict(type='int', default=30),
+ ttl=dict(type='int', default=1),
+ type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']),
+ value=dict(type='str', aliases=['content']),
+ weight=dict(type='int', default=1),
+ zone=dict(type='str', required=True, aliases=['domain']),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['record', 'type', 'value']),
+ ('state', 'absent', ['record']),
+ ('type', 'SRV', ['proto', 'service']),
+ ('type', 'TLSA', ['proto', 'port']),
+ ],
+ )
+
+ if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']):
+ module.fail_json(msg="Either api_token or account_api_key and account_email params are required.")
+ if module.params['type'] == 'SRV':
+ if not ((module.params['weight'] is not None and module.params['port'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['weight'] is None and module.params['port'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SRV records the params weight, port and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'SSHFP':
+ if not ((module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For SSHFP records the params algorithm, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'TLSA':
+ if not ((module.params['cert_usage'] is not None and module.params['selector'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['cert_usage'] is None and module.params['selector'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.")
+
+ if module.params['type'] == 'DS':
+ if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None
+ and not (module.params['value'] is None or module.params['value'] == ''))
+ or (module.params['key_tag'] is None and module.params['algorithm'] is None and module.params['hash_type'] is None
+ and (module.params['value'] is None or module.params['value'] == ''))):
+ module.fail_json(msg="For DS records the params key_tag, algorithm, hash_type and value all need to be defined, or not at all.")
+
+ changed = False
+ cf_api = CloudflareAPI(module)
+
+ # sanity checks
+ if cf_api.is_solo and cf_api.state == 'absent':
+ module.fail_json(msg="solo=true can only be used with state=present")
+
+ # perform add, delete or update (only the TTL can be updated) of one or
+ # more records
+ if cf_api.state == 'present':
+ # delete all records matching record name + type
+ if cf_api.is_solo:
+ changed = cf_api.delete_dns_records(solo=cf_api.is_solo)
+ result, changed = cf_api.ensure_dns_record()
+ if isinstance(result, list):
+ module.exit_json(changed=changed, result={'record': result[0]})
+
+ module.exit_json(changed=changed, result={'record': result})
+ else:
+ # force solo to False, just to be sure
+ changed = cf_api.delete_dns_records(solo=False)
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cobbler_sync.py b/ansible_collections/community/general/plugins/modules/cobbler_sync.py
new file mode 100644
index 000000000..d7acf4be6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cobbler_sync.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_sync
+short_description: Sync Cobbler
+description:
+ - Sync Cobbler to commit changes.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(false), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only set to C(false) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+author:
+- Dag Wieers (@dagwieers)
+todo:
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Commit Cobbler changes
+ community.general.cobbler_sync:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ run_once: true
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils.common.text.converters import to_text
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=True,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try:
+ ssl_context = ssl._create_unverified_context()
+ except AttributeError:
+ # Legacy Python that doesn't verify HTTPS certificates by default
+ pass
+ else:
+ # Handle target environment that doesn't support HTTPS verification
+ ssl._create_default_https_context = ssl._create_unverified_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e)))
+
+ if not module.check_mode:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e)))
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cobbler_system.py b/ansible_collections/community/general/plugins/modules/cobbler_system.py
new file mode 100644
index 000000000..c30b4f1c1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cobbler_system.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cobbler_system
+short_description: Manage system objects in Cobbler
+description:
+ - Add, modify or remove systems in Cobbler
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ host:
+ description:
+ - The name or IP address of the Cobbler system.
+ default: 127.0.0.1
+ type: str
+ port:
+ description:
+ - Port number to be used for REST connection.
+ - The default value depends on parameter C(use_ssl).
+ type: int
+ username:
+ description:
+ - The username to log in to Cobbler.
+ default: cobbler
+ type: str
+ password:
+ description:
+ - The password to log in to Cobbler.
+ type: str
+ use_ssl:
+ description:
+ - If C(false), an HTTP connection will be used instead of the default HTTPS connection.
+ type: bool
+ default: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only set to C(false) when used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ name:
+ description:
+ - The system name to manage.
+ type: str
+ properties:
+ description:
+ - A dictionary with system properties.
+ type: dict
+ interfaces:
+ description:
+ - A list of dictionaries containing interface options.
+ type: dict
+ sync:
+ description:
+ - Sync on changes.
+ - Concurrently syncing Cobbler is bound to fail.
+ type: bool
+ default: false
+ state:
+ description:
+ - Whether the system should be present, absent or a query is made.
+ choices: [ absent, present, query ]
+ default: present
+ type: str
+author:
+- Dag Wieers (@dagwieers)
+notes:
+- Concurrently syncing Cobbler is bound to fail with weird errors.
+- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation.
+ More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753).
+'''
+
+EXAMPLES = r'''
+- name: Ensure the system exists in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ properties:
+ profile: CentOS6-x86_64
+ name_servers: [ 2.3.4.5, 3.4.5.6 ]
+ name_servers_search: foo.com, bar.com
+ interfaces:
+ eth0:
+ macaddress: 00:01:02:03:04:05
+ ipaddress: 1.2.3.4
+ delegate_to: localhost
+
+- name: Enable network boot in Cobbler
+ community.general.cobbler_system:
+ host: bdsol-aci-cobbler-01
+ username: cobbler
+ password: ins3965!
+ name: bdsol-aci51-apic1.cisco.com
+ properties:
+ netboot_enabled: true
+ state: present
+ delegate_to: localhost
+
+- name: Query all systems in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ state: query
+ register: cobbler_systems
+ delegate_to: localhost
+
+- name: Query a specific system in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: '{{ inventory_hostname }}'
+ state: query
+ register: cobbler_properties
+ delegate_to: localhost
+
+- name: Ensure the system does not exist in Cobbler
+ community.general.cobbler_system:
+ host: cobbler01
+ username: cobbler
+ password: MySuperSecureP4sswOrd
+ name: myhost
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+systems:
+ description: List of systems
+ returned: I(state=query) and I(name) is not provided
+ type: list
+system:
+ description: (Resulting) information about the system we are working with
+ returned: when I(name) is provided
+ type: dict
+'''
+
+import datetime
+import ssl
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible.module_utils.common.text.converters import to_text
+
+IFPROPS_MAPPING = dict(
+ bondingopts='bonding_opts',
+ bridgeopts='bridge_opts',
+ connected_mode='connected_mode',
+ cnames='cnames',
+ dhcptag='dhcp_tag',
+ dnsname='dns_name',
+ ifgateway='if_gateway',
+ interfacetype='interface_type',
+ interfacemaster='interface_master',
+ ipaddress='ip_address',
+ ipv6address='ipv6_address',
+ ipv6defaultgateway='ipv6_default_gateway',
+ ipv6mtu='ipv6_mtu',
+ ipv6prefix='ipv6_prefix',
+ ipv6secondaries='ipv6_secondariesu',
+ ipv6staticroutes='ipv6_static_routes',
+ macaddress='mac_address',
+ management='management',
+ mtu='mtu',
+ netmask='netmask',
+ static='static',
+ staticroutes='static_routes',
+ virtbridge='virt_bridge',
+)
+
+
+def getsystem(conn, name, token):
+ system = dict()
+ if name:
+ # system = conn.get_system(name, token)
+ systems = conn.find_system(dict(name=name), token)
+ if systems:
+ system = systems[0]
+ return system
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int'),
+ username=dict(type='str', default='cobbler'),
+ password=dict(type='str', no_log=True),
+ use_ssl=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ name=dict(type='str'),
+ interfaces=dict(type='dict'),
+ properties=dict(type='dict'),
+ sync=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
+ ),
+ supports_check_mode=True,
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ port = module.params['port']
+ use_ssl = module.params['use_ssl']
+ validate_certs = module.params['validate_certs']
+
+ name = module.params['name']
+ state = module.params['state']
+
+ module.params['proto'] = 'https' if use_ssl else 'http'
+ if not port:
+ module.params['port'] = '443' if use_ssl else '80'
+
+ result = dict(
+ changed=False,
+ )
+
+ start = datetime.datetime.utcnow()
+
+ ssl_context = None
+ if not validate_certs:
+ try:
+ ssl_context = ssl._create_unverified_context()
+ except AttributeError:
+ # Legacy Python that doesn't verify HTTPS certificates by default
+ pass
+ else:
+ # Handle target environment that doesn't support HTTPS verification
+ ssl._create_default_https_context = ssl._create_unverified_context
+
+ url = '{proto}://{host}:{port}/cobbler_api'.format(**module.params)
+ if ssl_context:
+ conn = xmlrpc_client.ServerProxy(url, context=ssl_context)
+ else:
+ conn = xmlrpc_client.Server(url)
+
+ try:
+ token = conn.login(username, password)
+ except xmlrpc_client.Fault as e:
+ module.fail_json(msg="Failed to log in to Cobbler '{url}' as '{username}'. {error}".format(url=url, error=to_text(e), **module.params))
+ except Exception as e:
+ module.fail_json(msg="Connection to '{url}' failed. {error}".format(url=url, error=to_text(e), **module.params))
+
+ system = getsystem(conn, name, token)
+ # result['system'] = system
+
+ if state == 'query':
+ if name:
+ result['system'] = system
+ else:
+ # Turn it into a dictionary of dictionaries
+ # all_systems = conn.get_systems()
+ # result['systems'] = { system['name']: system for system in all_systems }
+
+ # Return a list of dictionaries
+ result['systems'] = conn.get_systems()
+
+ elif state == 'present':
+
+ if system:
+ # Update existing entry
+ system_id = conn.get_system_handle(name, token)
+
+ for key, value in iteritems(module.params['properties']):
+ if key not in system:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if system[key] != value:
+ try:
+ conn.modify_system(system_id, key, value, token)
+ result['changed'] = True
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ else:
+ # Create a new entry
+ system_id = conn.new_system(token)
+ conn.modify_system(system_id, 'name', name, token)
+ result['changed'] = True
+
+ if module.params['properties']:
+ for key, value in iteritems(module.params['properties']):
+ try:
+ conn.modify_system(system_id, key, value, token)
+ except Exception as e:
+ module.fail_json(msg="Unable to change '{0}' to '{1}'. {2}".format(key, value, e))
+
+ # Add interface properties
+ interface_properties = dict()
+ if module.params['interfaces']:
+ for device, values in iteritems(module.params['interfaces']):
+ for key, value in iteritems(values):
+ if key == 'name':
+ continue
+ if key not in IFPROPS_MAPPING:
+ module.warn("Property '{0}' is not a valid system property.".format(key))
+ if not system or system['interfaces'][device][IFPROPS_MAPPING[key]] != value:
+ result['changed'] = True
+ interface_properties['{0}-{1}'.format(key, device)] = value
+
+ if result['changed'] is True:
+ conn.modify_system(system_id, "modify_interface", interface_properties, token)
+
+ # Only save when the entry was changed
+ if not module.check_mode and result['changed']:
+ conn.save_system(system_id, token)
+
+ elif state == 'absent':
+
+ if system:
+ if not module.check_mode:
+ conn.remove_system(name, token)
+ result['changed'] = True
+
+ if not module.check_mode and module.params['sync'] and result['changed']:
+ try:
+ conn.sync(token)
+ except Exception as e:
+ module.fail_json(msg="Failed to sync Cobbler. {0}".format(to_text(e)))
+
+ if state in ('absent', 'present'):
+ result['system'] = getsystem(conn, name, token)
+
+ if module._diff:
+ result['diff'] = dict(before=system, after=result['system'])
+
+ elapsed = datetime.datetime.utcnow() - start
+ module.exit_json(elapsed=elapsed.seconds, **result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/composer.py b/ansible_collections/community/general/plugins/modules/composer.py
new file mode 100644
index 000000000..793abcda1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/composer.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: composer
+author:
+ - "Dimitrios Tydeas Mengidis (@dmtrs)"
+ - "René Moser (@resmo)"
+short_description: Dependency Manager for PHP
+description:
+ - >
+ Composer is a tool for dependency management in PHP. It allows you to
+ declare the dependent libraries your project needs and it will install
+ them in your project for you.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ command:
+ type: str
+ description:
+ - Composer command like "install", "update" and so on.
+ default: install
+ arguments:
+ type: str
+ description:
+ - Composer arguments like required package, version and so on.
+ default: ''
+ executable:
+ type: path
+ description:
+ - Path to PHP Executable on the remote host, if PHP is not in PATH.
+ aliases: [ php_path ]
+ working_dir:
+ type: path
+ description:
+ - Directory of your project (see --working-dir). This is required when
+ the command is not run globally.
+ - Will be ignored if I(global_command=true).
+ global_command:
+ description:
+ - Runs the specified command globally.
+ type: bool
+ default: false
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible (see --prefer-source).
+ default: false
+ type: bool
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for dev versions (see --prefer-dist).
+ default: false
+ type: bool
+ no_dev:
+ description:
+ - Disables installation of require-dev packages (see --no-dev).
+ default: true
+ type: bool
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json (see --no-scripts).
+ default: false
+ type: bool
+ no_plugins:
+ description:
+ - Disables all plugins (see --no-plugins).
+ default: false
+ type: bool
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump (see --optimize-autoloader).
+ - Convert PSR-0/4 autoloading to classmap to get a faster autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: true
+ type: bool
+ classmap_authoritative:
+ description:
+ - Autoload classes from classmap only.
+ - Implicitly enable optimize_autoloader.
+ - Recommended especially for production, but can take a bit of time to run.
+ default: false
+ type: bool
+ apcu_autoloader:
+ description:
+ - Uses APCu to cache found/not-found classes
+ default: false
+ type: bool
+ ignore_platform_reqs:
+ description:
+ - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
+ default: false
+ type: bool
+ composer_executable:
+ type: path
+ description:
+ - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed.
+ version_added: 3.2.0
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
+ - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues.
+'''
+
+EXAMPLES = '''
+- name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock
+ community.general.composer:
+ command: install
+ working_dir: /path/to/project
+
+- name: Install a new package
+ community.general.composer:
+ command: require
+ arguments: my/package
+ working_dir: /path/to/project
+
+- name: Clone and install a project with all dependencies
+ community.general.composer:
+ command: create-project
+ arguments: package/package /path/to/project ~1.0
+ working_dir: /path/to/project
+ prefer_dist: true
+
+- name: Install a package globally
+ community.general.composer:
+ command: require
+ global_command: true
+ arguments: my/package
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_out(string):
+ return re.sub(r"\s+", " ", string).strip()
+
+
+def has_changed(string):
+ for no_change in ["Nothing to install or update", "Nothing to install, update or remove"]:
+ if no_change in string:
+ return False
+
+ return True
+
+
+def get_available_options(module, command='install'):
+ # get all available options from a composer command using composer help to json
+ rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json")
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+
+ command_help_json = module.from_json(out)
+ return command_help_json['definition']['options']
+
+
+def composer_command(module, command, arguments="", options=None, global_command=False):
+ if options is None:
+ options = []
+
+ if module.params['executable'] is None:
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ else:
+ php_path = module.params['executable']
+
+ if module.params['composer_executable'] is None:
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ else:
+ composer_path = module.params['composer_executable']
+
+ cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments)
+ return module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(default="install", type="str"),
+ arguments=dict(default="", type="str"),
+ executable=dict(type="path", aliases=["php_path"]),
+ working_dir=dict(type="path"),
+ global_command=dict(default=False, type="bool"),
+ prefer_source=dict(default=False, type="bool"),
+ prefer_dist=dict(default=False, type="bool"),
+ no_dev=dict(default=True, type="bool"),
+ no_scripts=dict(default=False, type="bool"),
+ no_plugins=dict(default=False, type="bool"),
+ apcu_autoloader=dict(default=False, type="bool"),
+ optimize_autoloader=dict(default=True, type="bool"),
+ classmap_authoritative=dict(default=False, type="bool"),
+ ignore_platform_reqs=dict(default=False, type="bool"),
+ composer_executable=dict(type="path"),
+ ),
+ required_if=[('global_command', False, ['working_dir'])],
+ supports_check_mode=True
+ )
+
+ # Get composer command with fallback to default
+ command = module.params['command']
+ if re.search(r"\s", command):
+ module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
+
+ arguments = module.params['arguments']
+ global_command = module.params['global_command']
+ available_options = get_available_options(module=module, command=command)
+
+ options = []
+
+ # Default options
+ default_options = [
+ 'no-ansi',
+ 'no-interaction',
+ 'no-progress',
+ ]
+
+ for option in default_options:
+ if option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if not global_command:
+ options.extend(['--working-dir', "'%s'" % module.params['working_dir']])
+
+ option_params = {
+ 'prefer_source': 'prefer-source',
+ 'prefer_dist': 'prefer-dist',
+ 'no_dev': 'no-dev',
+ 'no_scripts': 'no-scripts',
+ 'no_plugins': 'no-plugins',
+ 'apcu_autoloader': 'acpu-autoloader',
+ 'optimize_autoloader': 'optimize-autoloader',
+ 'classmap_authoritative': 'classmap-authoritative',
+ 'ignore_platform_reqs': 'ignore-platform-reqs',
+ }
+
+ for param, option in option_params.items():
+ if module.params.get(param) and option in available_options:
+ option = "--%s" % option
+ options.append(option)
+
+ if module.check_mode:
+ if 'dry-run' in available_options:
+ options.append('--dry-run')
+ else:
+ module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command)
+
+ rc, out, err = composer_command(module, command, arguments, options, global_command)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output, stdout=err)
+ else:
+ # Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
+ output = parse_out(out + err)
+ module.exit_json(changed=has_changed(output), msg=output, stdout=out + err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul.py b/ansible_collections/community/general/plugins/modules/consul.py
new file mode 100644
index 000000000..cc599be36
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul.py
@@ -0,0 +1,635 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul
+short_description: Add, modify & delete services within a consul cluster
+description:
+ - Registers services and checks for an agent with a consul cluster.
+ A service is some process running on the agent node that should be advertised by
+ consul's discovery mechanism. It may optionally supply a check definition,
+ a periodic service test to notify the consul cluster of service's health.
+ - "Checks may also be registered per node e.g. disk usage, or cpu usage and
+ notify the health of the entire node to the cluster.
+ Service level checks do not require a check name or id as these are derived
+ by Consul from the Service name and id respectively by appending 'service:'
+ Node level checks require a I(check_name) and optionally a I(check_id)."
+ - Currently, there is no complete way to retrieve the script, interval or ttl
+ metadata for a registered check. Without this metadata it is not possible to
+ tell if the data supplied with ansible represents a change to a check. As a
+ result this does not attempt to determine changes and will always report a
+ changed occurred. An API method is planned to supply this metadata so at that
+ stage change management will be added.
+ - "See U(http://consul.io) for more details."
+requirements:
+ - python-consul
+ - requests
+author: "Steve Gargan (@sgargan)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Register or deregister the consul service, defaults to present.
+ default: present
+ choices: ['present', 'absent']
+ service_name:
+ type: str
+ description:
+ - Unique name for the service on a node, must be unique per node,
+ required if registering a service. May be omitted if registering
+ a node level check.
+ service_id:
+ type: str
+ description:
+ - The ID for the service, must be unique per node. If I(state=absent),
+ defaults to the service name if supplied.
+ host:
+ type: str
+ description:
+ - Host of the consul agent defaults to localhost.
+ default: localhost
+ port:
+ type: int
+ description:
+ - The port on which the consul agent is running.
+ default: 8500
+ scheme:
+ type: str
+ description:
+ - The protocol scheme on which the consul agent is running.
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: true
+ notes:
+ type: str
+ description:
+ - Notes to attach to check when registering it.
+ service_port:
+ type: int
+ description:
+ - The port on which the service is listening. Can optionally be supplied for
+ registration of a service, i.e. if I(service_name) or I(service_id) is set.
+ service_address:
+ type: str
+ description:
+ - The address to advertise that the service will be listening on.
+ This value will be passed as the I(address) parameter to Consul's
+ C(/v1/agent/service/register) API method, so refer to the Consul API
+ documentation for further details.
+ tags:
+ type: list
+ elements: str
+ description:
+ - Tags that will be attached to the service registration.
+ script:
+ type: str
+ description:
+ - The script/command that will be run periodically to check the health of the service.
+ - Requires I(interval) to be provided.
+ interval:
+ type: str
+ description:
+ - The interval at which the service check will be run.
+ This is a number with a C(s) or C(m) suffix to signify the units of seconds or minutes e.g C(15s) or C(1m).
+ If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
+ - Required if one of the parameters I(script), I(http), or I(tcp) is specified.
+ check_id:
+ type: str
+ description:
+ - An ID for the service check. If I(state=absent), defaults to
+ I(check_name). Ignored if part of a service definition.
+ check_name:
+ type: str
+ description:
+ - Name for the service check. Required if standalone, ignored if
+ part of service definition.
+ ttl:
+ type: str
+ description:
+ - Checks can be registered with a ttl instead of a I(script) and I(interval)
+ this means that the service will check in with the agent before the
+ ttl expires. If it doesn't the check will be considered failed.
+ Required if registering a check and the script an interval are missing
+ Similar to the interval this is a number with a C(s) or C(m) suffix to
+ signify the units of seconds or minutes e.g C(15s) or C(1m).
+ If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
+ tcp:
+ type: str
+ description:
+ - Checks can be registered with a TCP port. This means that consul
+ will check if the connection attempt to that port is successful (that is, the port is currently accepting connections).
+ The format is C(host:port), for example C(localhost:80).
+ - Requires I(interval) to be provided.
+ version_added: '1.3.0'
+ http:
+ type: str
+ description:
+ - Checks can be registered with an HTTP endpoint. This means that consul
+ will check that the http endpoint returns a successful HTTP status.
+ - Requires I(interval) to be provided.
+ timeout:
+ type: str
+ description:
+ - A custom HTTP check timeout. The consul default is 10 seconds.
+ Similar to the interval this is a number with a C(s) or C(m) suffix to
+ signify the units of seconds or minutes, e.g. C(15s) or C(1m).
+ If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s).
+ token:
+ type: str
+ description:
+ - The token key identifying an ACL rule set. May be required to register services.
+ ack_params_state_absent:
+ type: bool
+ description:
+ - Disable deprecation warning when using parameters incompatible with I(state=absent).
+'''
+
+EXAMPLES = '''
+- name: Register nginx service with the local consul agent
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+
+- name: Register nginx service with curl check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ script: curl http://localhost
+ interval: 60s
+
+- name: register nginx with a tcp check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ tcp: localhost:80
+
+- name: Register nginx with an http check
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ interval: 60s
+ http: http://localhost:80/status
+
+- name: Register external service nginx available at 10.1.5.23
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ service_address: 10.1.5.23
+
+- name: Register nginx with some service tags
+ community.general.consul:
+ service_name: nginx
+ service_port: 80
+ tags:
+ - prod
+ - webservers
+
+- name: Remove nginx service
+ community.general.consul:
+ service_name: nginx
+ state: absent
+
+- name: Register celery worker service
+ community.general.consul:
+ service_name: celery-worker
+ tags:
+ - prod
+ - worker
+
+- name: Create a node level check to test disk usage
+ community.general.consul:
+ check_name: Disk usage
+ check_id: disk_usage
+ script: /opt/disk_usage.py
+ interval: 5m
+
+- name: Register an http check against a service that's already registered
+ community.general.consul:
+ check_name: nginx-check2
+ check_id: nginx-check2
+ service_id: nginx
+ interval: 60s
+ http: http://localhost:80/morestatus
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+
+ class PatchedConsulAgentService(consul.Consul.Agent.Service):
+ def deregister(self, service_id, token=None):
+ params = {}
+ if token:
+ params['token'] = token
+ return self.agent.http.put(consul.base.CB.bool(),
+ '/v1/agent/service/deregister/%s' % service_id,
+ params=params)
+
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def register_with_consul(module):
+ state = module.params['state']
+
+ if state == 'present':
+ add(module)
+ else:
+ remove(module)
+
+
+def add(module):
+ ''' adds a service or a check depending on supplied configuration'''
+ check = parse_check(module)
+ service = parse_service(module)
+
+ if not service and not check:
+ module.fail_json(msg='a name and port are required to register a service')
+
+ if service:
+ if check:
+ service.add_check(check)
+ add_service(module, service)
+ elif check:
+ add_check(module, check)
+
+
+def remove(module):
+ ''' removes a service or a check '''
+ service_id = module.params['service_id'] or module.params['service_name']
+ check_id = module.params['check_id'] or module.params['check_name']
+ if service_id:
+ remove_service(module, service_id)
+ else:
+ remove_check(module, check_id)
+
+
+def add_check(module, check):
+ ''' registers a check with the given agent. currently there is no way
+ retrieve the full metadata of an existing check through the consul api.
+ Without this we can't compare to the supplied check and so we must assume
+ a change. '''
+ if not check.name and not check.service_id:
+ module.fail_json(msg='a check name is required for a node level check, one not attached to a service')
+
+ consul_api = get_consul_api(module)
+ check.register(consul_api)
+
+ module.exit_json(changed=True,
+ check_id=check.check_id,
+ check_name=check.name,
+ script=check.script,
+ interval=check.interval,
+ ttl=check.ttl,
+ tcp=check.tcp,
+ http=check.http,
+ timeout=check.timeout,
+ service_id=check.service_id)
+
+
+def remove_check(module, check_id):
+ ''' removes a check using its id '''
+ consul_api = get_consul_api(module)
+
+ if check_id in consul_api.agent.checks():
+ consul_api.agent.check.deregister(check_id)
+ module.exit_json(changed=True, id=check_id)
+
+ module.exit_json(changed=False, id=check_id)
+
+
+def add_service(module, service):
+ ''' registers a service with the current agent '''
+ result = service
+ changed = False
+
+ consul_api = get_consul_api(module)
+ existing = get_service_by_id_or_name(consul_api, service.id)
+
+ # there is no way to retrieve the details of checks so if a check is present
+ # in the service it must be re-registered
+ if service.has_checks() or not existing or not existing == service:
+
+ service.register(consul_api)
+ # check that it registered correctly
+ registered = get_service_by_id_or_name(consul_api, service.id)
+ if registered:
+ result = registered
+ changed = True
+
+ module.exit_json(changed=changed,
+ service_id=result.id,
+ service_name=result.name,
+ service_port=result.port,
+ checks=[check.to_dict() for check in service.checks()],
+ tags=result.tags)
+
+
+def remove_service(module, service_id):
+ ''' deregister a service from the given agent using its service id '''
+ consul_api = get_consul_api(module)
+ service = get_service_by_id_or_name(consul_api, service_id)
+ if service:
+ consul_api.agent.service.deregister(service_id, token=module.params['token'])
+ module.exit_json(changed=True, id=service_id)
+
+ module.exit_json(changed=False, id=service_id)
+
+
+def get_consul_api(module):
+ consulClient = consul.Consul(host=module.params['host'],
+ port=module.params['port'],
+ scheme=module.params['scheme'],
+ verify=module.params['validate_certs'],
+ token=module.params['token'])
+ consulClient.agent.service = PatchedConsulAgentService(consulClient)
+ return consulClient
+
+
+def get_service_by_id_or_name(consul_api, service_id_or_name):
+ ''' iterate the registered services and find one with the given id '''
+ for dummy, service in consul_api.agent.services().items():
+ if service_id_or_name in (service['ID'], service['Service']):
+ return ConsulService(loaded=service)
+
+
+def parse_check(module):
+ _checks = [module.params[p] for p in ('script', 'ttl', 'tcp', 'http') if module.params[p]]
+
+ if len(_checks) > 1:
+ module.fail_json(
+ msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense')
+
+ if module.params['check_id'] or _checks:
+ return ConsulCheck(
+ module.params['check_id'],
+ module.params['check_name'],
+ module.params['check_node'],
+ module.params['check_host'],
+ module.params['script'],
+ module.params['interval'],
+ module.params['ttl'],
+ module.params['notes'],
+ module.params['tcp'],
+ module.params['http'],
+ module.params['timeout'],
+ module.params['service_id'],
+ )
+
+
+def parse_service(module):
+ return ConsulService(
+ module.params['service_id'],
+ module.params['service_name'],
+ module.params['service_address'],
+ module.params['service_port'],
+ module.params['tags'],
+ )
+
+
+class ConsulService(object):
+
+ def __init__(self, service_id=None, name=None, address=None, port=-1,
+ tags=None, loaded=None):
+ self.id = self.name = name
+ if service_id:
+ self.id = service_id
+ self.address = address
+ self.port = port
+ self.tags = tags
+ self._checks = []
+ if loaded:
+ self.id = loaded['ID']
+ self.name = loaded['Service']
+ self.port = loaded['Port']
+ self.tags = loaded['Tags']
+
+ def register(self, consul_api):
+ optional = {}
+
+ if self.port:
+ optional['port'] = self.port
+
+ if len(self._checks) > 0:
+ optional['check'] = self._checks[0].check
+
+ consul_api.agent.service.register(
+ self.name,
+ service_id=self.id,
+ address=self.address,
+ tags=self.tags,
+ **optional)
+
+ def add_check(self, check):
+ self._checks.append(check)
+
+ def checks(self):
+ return self._checks
+
+ def has_checks(self):
+ return len(self._checks) > 0
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.id == other.id and
+ self.name == other.name and
+ self.port == other.port and
+ self.tags == other.tags)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {'id': self.id, "name": self.name}
+ if self.port:
+ data['port'] = self.port
+ if self.tags and len(self.tags) > 0:
+ data['tags'] = self.tags
+ if len(self._checks) > 0:
+ data['check'] = self._checks[0].to_dict()
+ return data
+
+
+class ConsulCheck(object):
+
+ def __init__(self, check_id, name, node=None, host='localhost',
+ script=None, interval=None, ttl=None, notes=None, tcp=None, http=None, timeout=None, service_id=None):
+ self.check_id = self.name = name
+ if check_id:
+ self.check_id = check_id
+ self.service_id = service_id
+ self.notes = notes
+ self.node = node
+ self.host = host
+
+ self.interval = self.validate_duration('interval', interval)
+ self.ttl = self.validate_duration('ttl', ttl)
+ self.script = script
+ self.tcp = tcp
+ self.http = http
+ self.timeout = self.validate_duration('timeout', timeout)
+
+ self.check = None
+
+ if script:
+ self.check = consul.Check.script(script, self.interval)
+
+ if ttl:
+ self.check = consul.Check.ttl(self.ttl)
+
+ if http:
+ if interval is None:
+ raise Exception('http check must specify interval')
+
+ self.check = consul.Check.http(http, self.interval, self.timeout)
+
+ if tcp:
+ if interval is None:
+ raise Exception('tcp check must specify interval')
+
+ regex = r"(?P<host>.*):(?P<port>(?:[0-9]+))$"
+ match = re.match(regex, tcp)
+
+ if not match:
+ raise Exception('tcp check must be in host:port format')
+
+ self.check = consul.Check.tcp(match.group('host').strip('[]'), int(match.group('port')), self.interval)
+
+ def validate_duration(self, name, duration):
+ if duration:
+ duration_units = ['ns', 'us', 'ms', 's', 'm', 'h']
+ if not any(duration.endswith(suffix) for suffix in duration_units):
+ duration = "{0}s".format(duration)
+ return duration
+
+ def register(self, consul_api):
+ consul_api.agent.check.register(self.name, check_id=self.check_id, service_id=self.service_id,
+ notes=self.notes,
+ check=self.check)
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and
+ self.check_id == other.check_id and
+ self.service_id == other.service_id and
+ self.name == other.name and
+ self.script == other.script and
+ self.interval == other.interval)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_dict(self):
+ data = {}
+ self._add(data, 'id', attr='check_id')
+ self._add(data, 'name', attr='check_name')
+ self._add(data, 'script')
+ self._add(data, 'node')
+ self._add(data, 'notes')
+ self._add(data, 'host')
+ self._add(data, 'interval')
+ self._add(data, 'ttl')
+ self._add(data, 'tcp')
+ self._add(data, 'http')
+ self._add(data, 'timeout')
+ self._add(data, 'service_id')
+ return data
+
+ def _add(self, data, key, attr=None):
+ try:
+ if attr is None:
+ attr = key
+ data[key] = getattr(self, attr)
+ except Exception:
+ pass
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(default='localhost'),
+ port=dict(default=8500, type='int'),
+ scheme=dict(default='http'),
+ validate_certs=dict(default=True, type='bool'),
+ check_id=dict(),
+ check_name=dict(),
+ check_node=dict(),
+ check_host=dict(),
+ notes=dict(),
+ script=dict(),
+ service_id=dict(),
+ service_name=dict(),
+ service_address=dict(type='str'),
+ service_port=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ interval=dict(type='str'),
+ ttl=dict(type='str'),
+ tcp=dict(type='str'),
+ http=dict(type='str'),
+ timeout=dict(type='str'),
+ tags=dict(type='list', elements='str'),
+ token=dict(no_log=True),
+ ack_params_state_absent=dict(type='bool'),
+ ),
+ required_if=[
+ ('state', 'present', ['service_name']),
+ ('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True),
+ ],
+ supports_check_mode=False,
+ )
+ p = module.params
+
+ test_dependencies(module)
+ if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']) and not p['ack_params_state_absent']:
+ module.deprecate(
+ "The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is deprecated. "
+ "In community.general 8.0.0 their use will become an error. "
+ "To suppress this deprecation notice, set parameter ack_params_state_absent=true.",
+ version="8.0.0",
+ collection_name="community.general",
+ )
+ # When reaching c.g 8.0.0:
+ # - Replace the deprecation with a fail_json(), remove the "ack_params_state_absent" condition from the "if"
+ # - Add mutually_exclusive for ('script', 'ttl', 'tcp', 'http'), then remove that validation from parse_check()
+ # - Add required_by {'script': 'interval', 'http': 'interval', 'tcp': 'interval'}, then remove checks for 'interval' in ConsulCheck.__init__()
+ # - Deprecate the parameter ack_params_state_absent
+
+ try:
+ register_with_consul(module)
+ except SystemExit:
+ raise
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (p['host'], p['port'], str(e)))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_acl.py b/ansible_collections/community/general/plugins/modules/consul_acl.py
new file mode 100644
index 000000000..91f955228
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_acl.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_acl
+short_description: Manipulate Consul ACL keys and rules
+description:
+ - Allows the addition, modification and deletion of ACL keys and associated
+ rules in a consul cluster via the agent. For more details on using and
+ configuring ACLs, see https://www.consul.io/docs/guides/acl.html.
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ mgmt_token:
+ description:
+ - a management token is required to manipulate the acl lists
+ required: true
+ type: str
+ state:
+ description:
+ - whether the ACL pair should be present or absent
+ required: false
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ token_type:
+ description:
+ - the type of token that should be created
+ choices: ['client', 'management']
+ default: client
+ type: str
+ name:
+ description:
+ - the name that should be associated with the acl key, this is opaque
+ to Consul
+ required: false
+ type: str
+ token:
+ description:
+ - the token key identifying an ACL rule set. If generated by consul
+ this will be a UUID
+ required: false
+ type: str
+ rules:
+ type: list
+ elements: dict
+ description:
+ - rules that should be associated with a given token
+ required: false
+ host:
+ description:
+ - host of the consul agent defaults to localhost
+ required: false
+ default: localhost
+ type: str
+ port:
+ type: int
+ description:
+ - the port on which the consul agent is running
+ required: false
+ default: 8500
+ scheme:
+ description:
+ - the protocol scheme on which the consul agent is running
+ required: false
+ default: http
+ type: str
+ validate_certs:
+ type: bool
+ description:
+ - whether to verify the tls certificate of the consul agent
+ required: false
+ default: true
+requirements:
+ - python-consul
+ - pyhcl
+ - requests
+'''
+
+EXAMPLES = """
+- name: Create an ACL with rules
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ rules:
+ - key: "foo"
+ policy: read
+ - key: "private/foo"
+ policy: deny
+
+- name: Create an ACL with a specific token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: my-token
+ rules:
+ - key: "foo"
+ policy: read
+
+- name: Update the rules associated to an ACL token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ name: Foo access
+ token: some_client_token
+ rules:
+ - event: "bbq"
+ policy: write
+ - key: "foo"
+ policy: read
+ - key: "private"
+ policy: deny
+ - keyring: write
+ - node: "hgs4"
+ policy: write
+ - operator: read
+ - query: ""
+ policy: write
+ - service: "consul"
+ policy: write
+ - session: "standup"
+ policy: write
+
+- name: Remove a token
+ community.general.consul_acl:
+ host: consul1.example.com
+ mgmt_token: some_management_acl
+ token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e
+ state: absent
+"""
+
+RETURN = """
+token:
+ description: the token associated to the ACL (the ACL's ID)
+ returned: success
+ type: str
+ sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da
+rules:
+ description: the HCL JSON representation of the rules associated to the ACL, in the format described in the
+ Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification).
+ returned: I(status) == "present"
+ type: dict
+ sample: {
+ "key": {
+ "foo": {
+ "policy": "write"
+ },
+ "bar": {
+ "policy": "deny"
+ }
+ }
+ }
+operation:
+ description: the operation performed on the ACL
+ returned: changed
+ type: str
+ sample: update
+"""
+
+
+try:
+ import consul
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+try:
+ import hcl
+ pyhcl_installed = True
+except ImportError:
+ pyhcl_installed = False
+
+try:
+ from requests.exceptions import ConnectionError
+ has_requests = True
+except ImportError:
+ has_requests = False
+
+from collections import defaultdict
+from ansible.module_utils.basic import to_text, AnsibleModule
+
+
+RULE_SCOPES = [
+ "agent",
+ "agent_prefix",
+ "event",
+ "event_prefix",
+ "key",
+ "key_prefix",
+ "keyring",
+ "node",
+ "node_prefix",
+ "operator",
+ "query",
+ "query_prefix",
+ "service",
+ "service_prefix",
+ "session",
+ "session_prefix",
+]
+
+MANAGEMENT_PARAMETER_NAME = "mgmt_token"
+HOST_PARAMETER_NAME = "host"
+SCHEME_PARAMETER_NAME = "scheme"
+VALIDATE_CERTS_PARAMETER_NAME = "validate_certs"
+NAME_PARAMETER_NAME = "name"
+PORT_PARAMETER_NAME = "port"
+RULES_PARAMETER_NAME = "rules"
+STATE_PARAMETER_NAME = "state"
+TOKEN_PARAMETER_NAME = "token"
+TOKEN_TYPE_PARAMETER_NAME = "token_type"
+
+PRESENT_STATE_VALUE = "present"
+ABSENT_STATE_VALUE = "absent"
+
+CLIENT_TOKEN_TYPE_VALUE = "client"
+MANAGEMENT_TOKEN_TYPE_VALUE = "management"
+
+REMOVE_OPERATION = "remove"
+UPDATE_OPERATION = "update"
+CREATE_OPERATION = "create"
+
+_POLICY_JSON_PROPERTY = "policy"
+_RULES_JSON_PROPERTY = "Rules"
+_TOKEN_JSON_PROPERTY = "ID"
+_TOKEN_TYPE_JSON_PROPERTY = "Type"
+_NAME_JSON_PROPERTY = "Name"
+_POLICY_YML_PROPERTY = "policy"
+_POLICY_HCL_PROPERTY = "policy"
+
+_ARGUMENT_SPEC = {
+ MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True),
+ HOST_PARAMETER_NAME: dict(default='localhost'),
+ SCHEME_PARAMETER_NAME: dict(default='http'),
+ VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True),
+ NAME_PARAMETER_NAME: dict(),
+ PORT_PARAMETER_NAME: dict(default=8500, type='int'),
+ RULES_PARAMETER_NAME: dict(type='list', elements='dict'),
+ STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]),
+ TOKEN_PARAMETER_NAME: dict(no_log=False),
+ TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE],
+ default=CLIENT_TOKEN_TYPE_VALUE)
+}
+
+
+def set_acl(consul_client, configuration):
+ """
+ Sets an ACL based on the given configuration.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of setting the ACL
+ """
+ acls_as_json = decode_acls_as_json(consul_client.acl.list())
+ existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None)
+ existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json)
+ if None in existing_acls_mapped_by_token:
+ raise AssertionError("expecting ACL list to be associated to a token: %s" %
+ existing_acls_mapped_by_token[None])
+
+ if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name:
+ # No token but name given so can get token from name
+ configuration.token = existing_acls_mapped_by_name[configuration.name].token
+
+ if configuration.token and configuration.token in existing_acls_mapped_by_token:
+ return update_acl(consul_client, configuration)
+ else:
+ if configuration.token in existing_acls_mapped_by_token:
+ raise AssertionError()
+ if configuration.name in existing_acls_mapped_by_name:
+ raise AssertionError()
+ return create_acl(consul_client, configuration)
+
+
+def update_acl(consul_client, configuration):
+ """
+ Updates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the update
+ """
+ existing_acl = load_acl_with_token(consul_client, configuration.token)
+ changed = existing_acl.rules != configuration.rules
+
+ if changed:
+ name = configuration.name if configuration.name is not None else existing_acl.name
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules)
+ updated_token = consul_client.acl.update(
+ configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl)
+ if updated_token != configuration.token:
+ raise AssertionError()
+
+ return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION)
+
+
+def create_acl(consul_client, configuration):
+ """
+ Creates an ACL.
+ :param consul_client: the consul client
+ :param configuration: the run configuration
+ :return: the output of the creation
+ """
+ rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None
+ token = consul_client.acl.create(
+ name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token)
+ rules = configuration.rules
+ return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION)
+
+
+def remove_acl(consul, configuration):
+ """
+ Removes an ACL.
+ :param consul: the consul client
+ :param configuration: the run configuration
+ :return: the output of the removal
+ """
+ token = configuration.token
+ changed = consul.acl.info(token) is not None
+ if changed:
+ consul.acl.destroy(token)
+ return Output(changed=changed, token=token, operation=REMOVE_OPERATION)
+
+
+def load_acl_with_token(consul, token):
+ """
+ Loads the ACL with the given token (token == rule ID).
+ :param consul: the consul client
+ :param token: the ACL "token"/ID (not name)
+ :return: the ACL associated to the given token
+ :exception ConsulACLTokenNotFoundException: raised if the given token does not exist
+ """
+ acl_as_json = consul.acl.info(token)
+ if acl_as_json is None:
+ raise ConsulACLNotFoundException(token)
+ return decode_acl_as_json(acl_as_json)
+
+
+def encode_rules_as_hcl_string(rules):
+ """
+ Converts the given rules into the equivalent HCL (string) representation.
+ :param rules: the rules
+ :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal
+ note for justification)
+ """
+ if len(rules) == 0:
+ # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty
+ # string if there is no rules...
+ return None
+ rules_as_hcl = ""
+ for rule in rules:
+ rules_as_hcl += encode_rule_as_hcl_string(rule)
+ return rules_as_hcl
+
+
+def encode_rule_as_hcl_string(rule):
+ """
+ Converts the given rule into the equivalent HCL (string) representation.
+ :param rule: the rule
+ :return: the equivalent HCL (string) representation of the rule
+ """
+ if rule.pattern is not None:
+ return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy)
+ else:
+ return '%s = "%s"\n' % (rule.scope, rule.policy)
+
+
+def decode_rules_as_hcl_string(rules_as_hcl):
+ """
+ Converts the given HCL (string) representation of rules into a list of rule domain models.
+ :param rules_as_hcl: the HCL (string) representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules_as_hcl = to_text(rules_as_hcl)
+ rules_as_json = hcl.loads(rules_as_hcl)
+ return decode_rules_as_json(rules_as_json)
+
+
+def decode_rules_as_json(rules_as_json):
+ """
+ Converts the given JSON representation of rules into a list of rule domain models.
+ :param rules_as_json: the JSON representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ for scope in rules_as_json:
+ if not isinstance(rules_as_json[scope], dict):
+ rules.add(Rule(scope, rules_as_json[scope]))
+ else:
+ for pattern, policy in rules_as_json[scope].items():
+ rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern))
+ return rules
+
+
+def encode_rules_as_json(rules):
+ """
+ Converts the given rules into the equivalent JSON representation according to the documentation:
+ https://www.consul.io/docs/guides/acl.html#rule-specification.
+ :param rules: the rules
+ :return: JSON representation of the given rules
+ """
+ rules_as_json = defaultdict(dict)
+ for rule in rules:
+ if rule.pattern is not None:
+ if rule.pattern in rules_as_json[rule.scope]:
+ raise AssertionError()
+ rules_as_json[rule.scope][rule.pattern] = {
+ _POLICY_JSON_PROPERTY: rule.policy
+ }
+ else:
+ if rule.scope in rules_as_json:
+ raise AssertionError()
+ rules_as_json[rule.scope] = rule.policy
+ return rules_as_json
+
+
+def decode_rules_as_yml(rules_as_yml):
+ """
+ Converts the given YAML representation of rules into a list of rule domain models.
+ :param rules_as_yml: the YAML representation of a collection of rules
+ :return: the equivalent domain model to the given rules
+ """
+ rules = RuleCollection()
+ if rules_as_yml:
+ for rule_as_yml in rules_as_yml:
+ rule_added = False
+ for scope in RULE_SCOPES:
+ if scope in rule_as_yml:
+ if rule_as_yml[scope] is None:
+ raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope)
+ policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \
+ else rule_as_yml[scope]
+ pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None
+ rules.add(Rule(scope, policy, pattern))
+ rule_added = True
+ break
+ if not rule_added:
+ raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES)))
+ return rules
+
+
+def decode_acl_as_json(acl_as_json):
+ """
+ Converts the given JSON representation of an ACL into the equivalent domain model.
+ :param acl_as_json: the JSON representation of an ACL
+ :return: the equivalent domain model to the given ACL
+ """
+ rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY]
+ rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \
+ else RuleCollection()
+ return ACL(
+ rules=rules,
+ token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY],
+ token=acl_as_json[_TOKEN_JSON_PROPERTY],
+ name=acl_as_json[_NAME_JSON_PROPERTY]
+ )
+
+
+def decode_acls_as_json(acls_as_json):
+ """
+ Converts the given JSON representation of ACLs into a list of ACL domain models.
+ :param acls_as_json: the JSON representation of a collection of ACLs
+ :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same)
+ """
+ return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json]
+
+
+class ConsulACLNotFoundException(Exception):
+ """
+ Exception raised if an ACL with is not found.
+ """
+
+
+class Configuration:
+ """
+ Configuration for this module.
+ """
+
+ def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None,
+ rules=None, state=None, token=None, token_type=None):
+ self.management_token = management_token # type: str
+ self.host = host # type: str
+ self.scheme = scheme # type: str
+ self.validate_certs = validate_certs # type: bool
+ self.name = name # type: str
+ self.port = port # type: int
+ self.rules = rules # type: RuleCollection
+ self.state = state # type: str
+ self.token = token # type: str
+ self.token_type = token_type # type: str
+
+
+class Output:
+ """
+ Output of an action of this module.
+ """
+
+ def __init__(self, changed=None, token=None, rules=None, operation=None):
+ self.changed = changed # type: bool
+ self.token = token # type: str
+ self.rules = rules # type: RuleCollection
+ self.operation = operation # type: str
+
+
+class ACL:
+ """
+ Consul ACL. See: https://www.consul.io/docs/guides/acl.html.
+ """
+
+ def __init__(self, rules, token_type, token, name):
+ self.rules = rules
+ self.token_type = token_type
+ self.token = token
+ self.name = name
+
+ def __eq__(self, other):
+ return other \
+ and isinstance(other, self.__class__) \
+ and self.rules == other.rules \
+ and self.token_type == other.token_type \
+ and self.token == other.token \
+ and self.name == other.name
+
+ def __hash__(self):
+ return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name)
+
+
+class Rule:
+ """
+ ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope.
+ """
+
+ def __init__(self, scope, policy, pattern=None):
+ self.scope = scope
+ self.policy = policy
+ self.pattern = pattern
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and self.scope == other.scope \
+ and self.policy == other.policy \
+ and self.pattern == other.pattern
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern)
+
+ def __str__(self):
+ return encode_rule_as_hcl_string(self)
+
+
+class RuleCollection:
+ """
+ Collection of ACL rules, which are part of a Consul ACL.
+ """
+
+ def __init__(self):
+ self._rules = {}
+ for scope in RULE_SCOPES:
+ self._rules[scope] = {}
+
+ def __iter__(self):
+ all_rules = []
+ for scope, pattern_keyed_rules in self._rules.items():
+ for pattern, rule in pattern_keyed_rules.items():
+ all_rules.append(rule)
+ return iter(all_rules)
+
+ def __len__(self):
+ count = 0
+ for scope in RULE_SCOPES:
+ count += len(self._rules[scope])
+ return count
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) \
+ and set(self) == set(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __str__(self):
+ return encode_rules_as_hcl_string(self)
+
+ def add(self, rule):
+ """
+ Adds the given rule to this collection.
+ :param rule: model of a rule
+ :raises ValueError: raised if there already exists a rule for a given scope and pattern
+ """
+ if rule.pattern in self._rules[rule.scope]:
+ patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else ""
+ raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info))
+ self._rules[rule.scope][rule.pattern] = rule
+
+
+def get_consul_client(configuration):
+ """
+ Gets a Consul client for the given configuration.
+
+ Does not check if the Consul client can connect.
+ :param configuration: the run configuration
+ :return: Consul client
+ """
+ token = configuration.management_token
+ if token is None:
+ token = configuration.token
+ if token is None:
+ raise AssertionError("Expecting the management token to always be set")
+ return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme,
+ verify=configuration.validate_certs, token=token)
+
+
+def check_dependencies():
+ """
+ Checks that the required dependencies have been imported.
+ :exception ImportError: if it is detected that any of the required dependencies have not been imported
+ """
+ if not python_consul_installed:
+ raise ImportError("python-consul required for this module. "
+ "See: https://python-consul.readthedocs.io/en/latest/#installation")
+
+ if not pyhcl_installed:
+ raise ImportError("pyhcl required for this module. "
+ "See: https://pypi.org/project/pyhcl/")
+
+ if not has_requests:
+ raise ImportError("requests required for this module. See https://pypi.org/project/requests/")
+
+
+def main():
+ """
+ Main method.
+ """
+ module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False)
+
+ try:
+ check_dependencies()
+ except ImportError as e:
+ module.fail_json(msg=str(e))
+
+ configuration = Configuration(
+ management_token=module.params.get(MANAGEMENT_PARAMETER_NAME),
+ host=module.params.get(HOST_PARAMETER_NAME),
+ scheme=module.params.get(SCHEME_PARAMETER_NAME),
+ validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME),
+ name=module.params.get(NAME_PARAMETER_NAME),
+ port=module.params.get(PORT_PARAMETER_NAME),
+ rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)),
+ state=module.params.get(STATE_PARAMETER_NAME),
+ token=module.params.get(TOKEN_PARAMETER_NAME),
+ token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME)
+ )
+ consul_client = get_consul_client(configuration)
+
+ try:
+ if configuration.state == PRESENT_STATE_VALUE:
+ output = set_acl(consul_client, configuration)
+ else:
+ output = remove_acl(consul_client, configuration)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ configuration.host, configuration.port, str(e)))
+ raise
+
+ return_values = dict(changed=output.changed, token=output.token, operation=output.operation)
+ if output.rules is not None:
+ return_values["rules"] = encode_rules_as_json(output.rules)
+ module.exit_json(**return_values)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_kv.py b/ansible_collections/community/general/plugins/modules/consul_kv.py
new file mode 100644
index 000000000..a4457f244
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_kv.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# Copyright (c) 2018 Genome Research Ltd.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: consul_kv
+short_description: Manipulate entries in the key/value store of a consul cluster
+description:
+ - Allows the retrieval, addition, modification and deletion of key/value entries in a
+ consul cluster via the agent. The entire contents of the record, including
+ the indices, flags and session are returned as C(value).
+ - If the C(key) represents a prefix then note that when a value is removed, the existing
+ value if any is returned as part of the results.
+ - See http://www.consul.io/docs/agent/http.html#kv for more details.
+requirements:
+ - python-consul
+ - requests
+author:
+ - Steve Gargan (@sgargan)
+ - Colin Nolan (@colin-nolan)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - The action to take with the supplied key and value. If the state is C(present) and I(value) is set, the key
+ contents will be set to the value supplied and C(changed) will be set to C(true) only if the value was
+ different to the current contents. If the state is C(present) and I(value) is not set, the existing value
+ associated to the key will be returned. The state C(absent) will remove the key/value pair,
+ again C(changed) will be set to true only if the key actually existed
+ prior to the removal. An attempt can be made to obtain or free the
+ lock associated with a key/value pair with the states C(acquire) or
+ C(release) respectively. a valid session must be supplied to make the
+ attempt changed will be true if the attempt is successful, false
+ otherwise.
+ type: str
+ choices: [ absent, acquire, present, release ]
+ default: present
+ key:
+ description:
+ - The key at which the value should be stored.
+ type: str
+ required: true
+ value:
+ description:
+ - The value should be associated with the given key, required if C(state)
+ is C(present).
+ type: str
+ recurse:
+ description:
+ - If the key represents a prefix, each entry with the prefix can be
+ retrieved by setting this to C(true).
+ type: bool
+ retrieve:
+ description:
+ - If the I(state) is C(present) and I(value) is set, perform a
+ read after setting the value and return this value.
+ default: true
+ type: bool
+ session:
+ description:
+ - The session that should be used to acquire or release a lock
+ associated with a key/value pair.
+ type: str
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to
+ the key value pair
+ type: str
+ cas:
+ description:
+ - Used when acquiring a lock with a session. If the C(cas) is C(0), then
+ Consul will only put the key if it does not already exist. If the
+ C(cas) value is non-zero, then the key is only set if the index matches
+ the ModifyIndex of that key.
+ type: str
+ flags:
+ description:
+ - Opaque positive integer value that can be passed when setting a value.
+ type: str
+ host:
+ description:
+ - Host of the consul agent.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the tls certificate of the consul agent.
+ type: bool
+ default: true
+'''
+
+
+EXAMPLES = '''
+# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
+# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
+- name: Retrieve a value from the key/value store
+ community.general.consul_kv:
+ key: somekey
+ register: retrieved_key
+
+- name: Add or update the value associated with a key in the key/value store
+ community.general.consul_kv:
+ key: somekey
+ value: somevalue
+
+- name: Remove a key from the store
+ community.general.consul_kv:
+ key: somekey
+ state: absent
+
+- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
+ community.general.consul_kv:
+ key: ansible/groups/dc1/somenode
+ value: top_secret
+
+- name: Register a key/value pair with an associated session
+ community.general.consul_kv:
+ key: stg/node/server_birthday
+ value: 20160509
+ session: "{{ sessionid }}"
+ state: acquire
+'''
+
+from ansible.module_utils.common.text.converters import to_text
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
+# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
+# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
+NOT_SET = None
+
+
+def _has_value_changed(consul_client, key, target_value):
+ """
+ Uses the given Consul client to determine if the value associated to the given key is different to the given target
+ value.
+ :param consul_client: Consul connected client
+ :param key: key in Consul
+ :param target_value: value to be associated to the key
+ :return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
+ value has changed (i.e. the stored value is not the target value)
+ """
+ index, existing = consul_client.kv.get(key)
+ if not existing:
+ return index, True
+ try:
+ changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
+ return index, changed
+ except UnicodeError:
+ # Existing value was not decodable but all values we set are valid utf-8
+ return index, True
+
+
+def execute(module):
+ state = module.params.get('state')
+
+ if state == 'acquire' or state == 'release':
+ lock(module, state)
+ elif state == 'present':
+ if module.params.get('value') is NOT_SET:
+ get_value(module)
+ else:
+ set_value(module)
+ elif state == 'absent':
+ remove_value(module)
+ else:
+ module.exit_json(msg="Unsupported state: %s" % (state, ))
+
+
+def lock(module, state):
+
+ consul_api = get_consul_api(module)
+
+ session = module.params.get('session')
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if not session:
+ module.fail(
+ msg='%s of lock for %s requested but no session supplied' %
+ (state, key))
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ if state == 'acquire':
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ acquire=session,
+ flags=module.params.get('flags'))
+ else:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ release=session,
+ flags=module.params.get('flags'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key)
+
+
+def get_value(module):
+ consul_api = get_consul_api(module)
+ key = module.params.get('key')
+
+ index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
+
+ module.exit_json(changed=False, index=index, data=existing_value)
+
+
+def set_value(module):
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+ value = module.params.get('value')
+
+ if value is NOT_SET:
+ raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
+
+ index, changed = _has_value_changed(consul_api, key, value)
+
+ if changed and not module.check_mode:
+ changed = consul_api.kv.put(key, value,
+ cas=module.params.get('cas'),
+ flags=module.params.get('flags'))
+
+ stored = None
+ if module.params.get('retrieve'):
+ index, stored = consul_api.kv.get(key)
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=stored)
+
+
+def remove_value(module):
+ ''' remove the value associated with the given key. if the recurse parameter
+ is set then any key prefixed with the given key will be removed. '''
+ consul_api = get_consul_api(module)
+
+ key = module.params.get('key')
+
+ index, existing = consul_api.kv.get(
+ key, recurse=module.params.get('recurse'))
+
+ changed = existing is not None
+ if changed and not module.check_mode:
+ consul_api.kv.delete(key, module.params.get('recurse'))
+
+ module.exit_json(changed=changed,
+ index=index,
+ key=key,
+ data=existing)
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cas=dict(type='str'),
+ flags=dict(type='str'),
+ key=dict(type='str', required=True, no_log=False),
+ host=dict(type='str', default='localhost'),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ port=dict(type='int', default=8500),
+ recurse=dict(type='bool'),
+ retrieve=dict(type='bool', default=True),
+ state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
+ token=dict(type='str', no_log=True),
+ value=dict(type='str', default=NOT_SET),
+ session=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/consul_session.py b/ansible_collections/community/general/plugins/modules/consul_session.py
new file mode 100644
index 000000000..246d13846
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/consul_session.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Steve Gargan <steve.gargan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: consul_session
+short_description: Manipulate consul sessions
+description:
+ - Allows the addition, modification and deletion of sessions in a consul
+ cluster. These sessions can then be used in conjunction with key value pairs
+ to implement distributed locks. In depth documentation for working with
+ sessions can be found at http://www.consul.io/docs/internals/sessions.html
+requirements:
+ - python-consul
+ - requests
+author:
+ - Steve Gargan (@sgargan)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ id:
+ description:
+ - ID of the session, required when I(state) is either C(info) or
+ C(remove).
+ type: str
+ state:
+ description:
+ - Whether the session should be present i.e. created if it doesn't
+ exist, or absent, removed if present. If created, the I(id) for the
+ session is returned in the output. If C(absent), I(id) is
+ required to remove the session. Info for a single session, all the
+ sessions for a node or all available sessions can be retrieved by
+ specifying C(info), C(node) or C(list) for the I(state); for C(node)
+ or C(info), the node I(name) or session I(id) is required as parameter.
+ choices: [ absent, info, list, node, present ]
+ type: str
+ default: present
+ name:
+ description:
+ - The name that should be associated with the session. Required when
+ I(state=node) is used.
+ type: str
+ delay:
+ description:
+ - The optional lock delay that can be attached to the session when it
+ is created. Locks for invalidated sessions ar blocked from being
+ acquired until this delay has expired. Durations are in seconds.
+ type: int
+ default: 15
+ node:
+ description:
+ - The name of the node that with which the session will be associated.
+ by default this is the name of the agent.
+ type: str
+ datacenter:
+ description:
+ - The name of the datacenter in which the session exists or should be
+ created.
+ type: str
+ checks:
+ description:
+ - Checks that will be used to verify the session health. If
+ all the checks fail, the session will be invalidated and any locks
+ associated with the session will be release and can be acquired once
+ the associated lock delay has expired.
+ type: list
+ elements: str
+ host:
+ description:
+ - The host of the consul agent defaults to localhost.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The port on which the consul agent is running.
+ type: int
+ default: 8500
+ scheme:
+ description:
+ - The protocol scheme on which the consul agent is running.
+ type: str
+ default: http
+ validate_certs:
+ description:
+ - Whether to verify the TLS certificate of the consul agent.
+ type: bool
+ default: true
+ behavior:
+ description:
+ - The optional behavior that can be attached to the session when it
+ is created. This controls the behavior when a session is invalidated.
+ choices: [ delete, release ]
+ type: str
+ default: release
+ ttl:
+ description:
+ - Specifies the duration of a session in seconds (between 10 and 86400).
+ type: int
+ version_added: 5.4.0
+ token:
+ description:
+ - The token key identifying an ACL rule set that controls access to
+ the key value pair.
+ type: str
+ version_added: 5.6.0
+'''
+
+EXAMPLES = '''
+- name: Register basic session with consul
+ community.general.consul_session:
+ name: session1
+
+- name: Register a session with an existing check
+ community.general.consul_session:
+ name: session_with_check
+ checks:
+ - existing_check_name
+
+- name: Register a session with lock_delay
+ community.general.consul_session:
+ name: session_with_delay
+ delay: 20s
+
+- name: Retrieve info about session by id
+ community.general.consul_session:
+ id: session_id
+ state: info
+
+- name: Retrieve active sessions
+ community.general.consul_session:
+ state: list
+
+- name: Register session with a ttl
+ community.general.consul_session:
+ name: session-with-ttl
+ ttl: 600 # sec
+'''
+
+try:
+ import consul
+ from requests.exceptions import ConnectionError
+ python_consul_installed = True
+except ImportError:
+ python_consul_installed = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def execute(module):
+
+ state = module.params.get('state')
+
+ if state in ['info', 'list', 'node']:
+ lookup_sessions(module)
+ elif state == 'present':
+ update_session(module)
+ else:
+ remove_session(module)
+
+
+def lookup_sessions(module):
+
+ datacenter = module.params.get('datacenter')
+
+ state = module.params.get('state')
+ consul_client = get_consul_api(module)
+ try:
+ if state == 'list':
+ sessions_list = consul_client.session.list(dc=datacenter)
+ # Ditch the index, this can be grabbed from the results
+ if sessions_list and len(sessions_list) >= 2:
+ sessions_list = sessions_list[1]
+ module.exit_json(changed=True,
+ sessions=sessions_list)
+ elif state == 'node':
+ node = module.params.get('node')
+ sessions = consul_client.session.node(node, dc=datacenter)
+ module.exit_json(changed=True,
+ node=node,
+ sessions=sessions)
+ elif state == 'info':
+ session_id = module.params.get('id')
+
+ session_by_id = consul_client.session.info(session_id, dc=datacenter)
+ module.exit_json(changed=True,
+ session_id=session_id,
+ sessions=session_by_id)
+
+ except Exception as e:
+ module.fail_json(msg="Could not retrieve session info %s" % e)
+
+
+def update_session(module):
+
+ name = module.params.get('name')
+ delay = module.params.get('delay')
+ checks = module.params.get('checks')
+ datacenter = module.params.get('datacenter')
+ node = module.params.get('node')
+ behavior = module.params.get('behavior')
+ ttl = module.params.get('ttl')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ session = consul_client.session.create(
+ name=name,
+ behavior=behavior,
+ ttl=ttl,
+ node=node,
+ lock_delay=delay,
+ dc=datacenter,
+ checks=checks
+ )
+ module.exit_json(changed=True,
+ session_id=session,
+ name=name,
+ behavior=behavior,
+ ttl=ttl,
+ delay=delay,
+ checks=checks,
+ node=node)
+ except Exception as e:
+ module.fail_json(msg="Could not create/update session %s" % e)
+
+
+def remove_session(module):
+ session_id = module.params.get('id')
+
+ consul_client = get_consul_api(module)
+
+ try:
+ consul_client.session.destroy(session_id)
+
+ module.exit_json(changed=True,
+ session_id=session_id)
+ except Exception as e:
+ module.fail_json(msg="Could not remove session with id '%s' %s" % (
+ session_id, e))
+
+
+def get_consul_api(module):
+ return consul.Consul(host=module.params.get('host'),
+ port=module.params.get('port'),
+ scheme=module.params.get('scheme'),
+ verify=module.params.get('validate_certs'),
+ token=module.params.get('token'))
+
+
+def test_dependencies(module):
+ if not python_consul_installed:
+ module.fail_json(msg="python-consul required for this module. "
+ "see https://python-consul.readthedocs.io/en/latest/#installation")
+
+
+def main():
+ argument_spec = dict(
+ checks=dict(type='list', elements='str'),
+ delay=dict(type='int', default='15'),
+ behavior=dict(type='str', default='release', choices=['release', 'delete']),
+ ttl=dict(type='int'),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8500),
+ scheme=dict(type='str', default='http'),
+ validate_certs=dict(type='bool', default=True),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ node=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
+ datacenter=dict(type='str'),
+ token=dict(type='str', no_log=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'node', ['name']),
+ ('state', 'info', ['id']),
+ ('state', 'remove', ['id']),
+ ],
+ supports_check_mode=False
+ )
+
+ test_dependencies(module)
+
+ try:
+ execute(module)
+ except ConnectionError as e:
+ module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
+ module.params.get('host'), module.params.get('port'), e))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/copr.py b/ansible_collections/community/general/plugins/modules/copr.py
new file mode 100644
index 000000000..965c2a935
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/copr.py
@@ -0,0 +1,500 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Silvie Chlupova <schlupov@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: copr
+short_description: Manage one of the Copr repositories
+version_added: 2.0.0
+description: This module can enable, disable or remove the specified repository.
+author: Silvie Chlupova (@schlupov) <schlupov@redhat.com>
+requirements:
+ - dnf
+ - dnf-plugins-core
+notes:
+ - Supports C(check_mode).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ host:
+ description: The Copr host to work with.
+ default: copr.fedorainfracloud.org
+ type: str
+ protocol:
+ description: This indicate which protocol to use with the host.
+ default: https
+ type: str
+ name:
+ description: Copr directory name, for example C(@copr/copr-dev).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to set this project as C(enabled), C(disabled) or C(absent).
+ default: enabled
+ type: str
+ choices: [absent, enabled, disabled]
+ chroot:
+ description:
+ - The name of the chroot that you want to enable/disable/remove in the project,
+ for example C(epel-7-x86_64). Default chroot is determined by the operating system,
+ version of the operating system, and architecture on which the module is run.
+ type: str
+"""
+
+EXAMPLES = r"""
+- name: Enable project Test of the user schlupov
+ community.general.copr:
+ host: copr.fedorainfracloud.org
+ state: enabled
+ name: schlupov/Test
+ chroot: fedora-31-x86_64
+
+- name: Remove project integration_tests of the group copr
+ community.general.copr:
+ state: absent
+ name: '@copr/integration_tests'
+"""
+
+RETURN = r"""
+repo_filename:
+ description: The name of the repo file in which the copr project information is stored.
+ returned: success
+ type: str
+ sample: _copr:copr.fedorainfracloud.org:group_copr:integration_tests.repo
+
+repo:
+ description: Path to the project on the host.
+ returned: success
+ type: str
+ sample: copr.fedorainfracloud.org/group_copr/integration_tests
+"""
+
+import stat
+import os
+import traceback
+
+try:
+ import dnf
+ import dnf.cli
+ import dnf.repodict
+ from dnf.conf import Conf
+ HAS_DNF_PACKAGES = True
+ DNF_IMP_ERR = None
+except ImportError:
+ DNF_IMP_ERR = traceback.format_exc()
+ HAS_DNF_PACKAGES = False
+
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.basic import missing_required_lib
+from ansible.module_utils import distro # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error
+from ansible.module_utils.urls import open_url # pylint: disable=import-error
+
+
+class CoprModule(object):
+ """The class represents a copr module.
+
+ The class contains methods that take care of the repository state of a project,
+ whether the project is enabled, disabled or missing.
+ """
+
+ ansible_module = None
+
+ def __init__(self, host, name, state, protocol, chroot=None, check_mode=False):
+ self.host = host
+ self.name = name
+ self.state = state
+ self.chroot = chroot
+ self.protocol = protocol
+ self.check_mode = check_mode
+ if not chroot:
+ self.chroot = self.chroot_conf()
+ else:
+ self.chroot = chroot
+ self.get_base()
+
+ @property
+ def short_chroot(self):
+ """str: Chroot (distribution-version-architecture) shorten to distribution-version."""
+ return self.chroot.rsplit('-', 1)[0]
+
+ @property
+ def arch(self):
+ """str: Target architecture."""
+ chroot_parts = self.chroot.split("-")
+ return chroot_parts[-1]
+
+ @property
+ def user(self):
+ """str: Copr user (this can also be the name of the group)."""
+ return self._sanitize_username(self.name.split("/")[0])
+
+ @property
+ def project(self):
+ """str: The name of the copr project."""
+ return self.name.split("/")[1]
+
+ @classmethod
+ def need_root(cls):
+ """Check if the module was run as root."""
+ if os.geteuid() != 0:
+ cls.raise_exception("This command has to be run under the root user.")
+
+ @classmethod
+ def get_base(cls):
+ """Initialize the configuration from dnf.
+
+ Returns:
+ An instance of the BaseCli class.
+ """
+ cls.base = dnf.cli.cli.BaseCli(Conf())
+ return cls.base
+
+ @classmethod
+ def raise_exception(cls, msg):
+ """Raise either an ansible exception or a python exception.
+
+ Args:
+ msg: The message to be displayed when an exception is thrown.
+ """
+ if cls.ansible_module:
+ raise cls.ansible_module.fail_json(msg=msg, changed=False)
+ raise Exception(msg)
+
+ def _get(self, chroot):
+ """Send a get request to the server to obtain the necessary data.
+
+ Args:
+ chroot: Chroot in the form of distribution-version.
+
+ Returns:
+ Info about a repository and status code of the get request.
+ """
+ repo_info = None
+ url = "{0}://{1}/coprs/{2}/repo/{3}/dnf.repo?arch={4}".format(
+ self.protocol, self.host, self.name, chroot, self.arch
+ )
+ try:
+ r = open_url(url)
+ status_code = r.getcode()
+ repo_info = r.read().decode("utf-8")
+ except HTTPError as e:
+ status_code = e.getcode()
+ return repo_info, status_code
+
+ def _download_repo_info(self):
+ """Download information about the repository.
+
+ Returns:
+ Information about the repository.
+ """
+ distribution, version = self.short_chroot.split('-', 1)
+ chroot = self.short_chroot
+ while True:
+ repo_info, status_code = self._get(chroot)
+ if repo_info:
+ return repo_info
+ if distribution == "rhel":
+ chroot = "centos-stream-8"
+ distribution = "centos"
+ elif distribution == "centos":
+ if version == "stream-8":
+ version = "8"
+ elif version == "stream-9":
+ version = "9"
+ chroot = "epel-{0}".format(version)
+ distribution = "epel"
+ else:
+ if str(status_code) != "404":
+ self.raise_exception(
+ "This repository does not have any builds yet so you cannot enable it now."
+ )
+ else:
+ self.raise_exception(
+ "Chroot {0} does not exist in {1}".format(self.chroot, self.name)
+ )
+
+ def _enable_repo(self, repo_filename_path, repo_content=None):
+ """Write information to a repo file.
+
+ Args:
+ repo_filename_path: Path to repository.
+ repo_content: Repository information from the host.
+
+ Returns:
+ True, if the information in the repo file matches that stored on the host,
+ False otherwise.
+ """
+ if not repo_content:
+ repo_content = self._download_repo_info()
+ if self._compare_repo_content(repo_filename_path, repo_content):
+ return False
+ if not self.check_mode:
+ with open(repo_filename_path, "w+") as file:
+ file.write(repo_content)
+ os.chmod(
+ repo_filename_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH,
+ )
+ return True
+
+ def _get_repo_with_old_id(self):
+ """Try to get a repository with the old name."""
+ repo_id = "{0}-{1}".format(self.user, self.project)
+ if repo_id in self.base.repos and "_copr" in self.base.repos[repo_id].repofile:
+ file_name = self.base.repos[repo_id].repofile.split("/")[-1]
+ try:
+ copr_hostname = file_name.rsplit(":", 2)[0].split(":", 1)[1]
+ if copr_hostname != self.host:
+ return None
+ return file_name
+ except IndexError:
+ return file_name
+ return None
+
+ def _read_all_repos(self, repo_id=None):
+ """The method is used to initialize the base variable by
+ repositories using the RepoReader class from dnf.
+
+ Args:
+ repo_id: Repo id of the repository we want to work with.
+ """
+ reader = dnf.conf.read.RepoReader(self.base.conf, None)
+ for repo in reader:
+ try:
+ if repo_id:
+ if repo.id == repo_id:
+ self.base.repos.add(repo)
+ break
+ else:
+ self.base.repos.add(repo)
+ except dnf.exceptions.ConfigError as e:
+ self.raise_exception(str(e))
+
+ def _get_copr_repo(self):
+ """Return one specific repository from all repositories on the system.
+
+ Returns:
+ The repository that a user wants to enable, disable, or remove.
+ """
+ repo_id = "copr:{0}:{1}:{2}".format(self.host, self.user, self.project)
+ if repo_id not in self.base.repos:
+ if self._get_repo_with_old_id() is None:
+ return None
+ return self.base.repos[repo_id]
+
+ def _disable_repo(self, repo_filename_path):
+ """Disable the repository.
+
+ Args:
+ repo_filename_path: Path to repository.
+
+ Returns:
+ False, if the repository is already disabled on the system,
+ True otherwise.
+ """
+ self._read_all_repos()
+ repo = self._get_copr_repo()
+ if repo is None:
+ if self.check_mode:
+ return True
+ self._enable_repo(repo_filename_path)
+ self._read_all_repos("copr:{0}:{1}:{2}".format(self.host, self.user, self.project))
+ repo = self._get_copr_repo()
+ for repo_id in repo.cfg.sections():
+ repo_content_api = self._download_repo_info()
+ with open(repo_filename_path, "r") as file:
+ repo_content_file = file.read()
+ if repo_content_file != repo_content_api:
+ if not self.resolve_differences(
+ repo_content_file, repo_content_api, repo_filename_path
+ ):
+ return False
+ if not self.check_mode:
+ self.base.conf.write_raw_configfile(
+ repo.repofile, repo_id, self.base.conf.substitutions, {"enabled": "0"},
+ )
+ return True
+
+ def resolve_differences(self, repo_content_file, repo_content_api, repo_filename_path):
+ """Detect differences between the contents of the repository stored on the
+ system and the information about the repository on the server.
+
+ Args:
+ repo_content_file: The contents of the repository stored on the system.
+ repo_content_api: The information about the repository from the server.
+ repo_filename_path: Path to repository.
+
+ Returns:
+ False, if the contents of the repo file and the information on the server match,
+ True otherwise.
+ """
+ repo_file_lines = repo_content_file.split("\n")
+ repo_api_lines = repo_content_api.split("\n")
+ repo_api_lines.remove("enabled=1")
+ if "enabled=0" in repo_file_lines:
+ repo_file_lines.remove("enabled=0")
+ if " ".join(repo_api_lines) == " ".join(repo_file_lines):
+ return False
+ if not self.check_mode:
+ os.remove(repo_filename_path)
+ self._enable_repo(repo_filename_path, repo_content_api)
+ else:
+ repo_file_lines.remove("enabled=1")
+ if " ".join(repo_api_lines) != " ".join(repo_file_lines):
+ if not self.check_mode:
+ os.remove(repo_filename_path)
+ self._enable_repo(repo_filename_path, repo_content_api)
+ return True
+
+ def _remove_repo(self):
+ """Remove the required repository.
+
+ Returns:
+ True, if the repository has been removed, False otherwise.
+ """
+ self._read_all_repos()
+ repo = self._get_copr_repo()
+ if not repo:
+ return False
+ if not self.check_mode:
+ try:
+ os.remove(repo.repofile)
+ except OSError as e:
+ self.raise_exception(str(e))
+ return True
+
+ def run(self):
+ """The method uses methods of the CoprModule class to change the state of the repository.
+
+ Returns:
+ Dictionary with information that the ansible module displays to the user at the end of the run.
+ """
+ self.need_root()
+ state = dict()
+ repo_filename = "_copr:{0}:{1}:{2}.repo".format(self.host, self.user, self.project)
+ state["repo"] = "{0}/{1}/{2}".format(self.host, self.user, self.project)
+ state["repo_filename"] = repo_filename
+ repo_filename_path = "{0}/_copr:{1}:{2}:{3}.repo".format(
+ self.base.conf.get_reposdir, self.host, self.user, self.project
+ )
+ if self.state == "enabled":
+ enabled = self._enable_repo(repo_filename_path)
+ state["msg"] = "enabled"
+ state["state"] = bool(enabled)
+ elif self.state == "disabled":
+ disabled = self._disable_repo(repo_filename_path)
+ state["msg"] = "disabled"
+ state["state"] = bool(disabled)
+ elif self.state == "absent":
+ removed = self._remove_repo()
+ state["msg"] = "absent"
+ state["state"] = bool(removed)
+ return state
+
+ @staticmethod
+ def _compare_repo_content(repo_filename_path, repo_content_api):
+ """Compare the contents of the stored repository with the information from the server.
+
+ Args:
+ repo_filename_path: Path to repository.
+ repo_content_api: The information about the repository from the server.
+
+ Returns:
+ True, if the information matches, False otherwise.
+ """
+ if not os.path.isfile(repo_filename_path):
+ return False
+ with open(repo_filename_path, "r") as file:
+ repo_content_file = file.read()
+ return repo_content_file == repo_content_api
+
+ @staticmethod
+ def chroot_conf():
+ """Obtain information about the distribution, version, and architecture of the target.
+
+ Returns:
+ Chroot info in the form of distribution-version-architecture.
+ """
+ (distribution, version, codename) = distro.linux_distribution(full_distribution_name=False)
+ base = CoprModule.get_base()
+ return "{0}-{1}-{2}".format(distribution, version, base.conf.arch)
+
+ @staticmethod
+ def _sanitize_username(user):
+ """Modify the group name.
+
+ Args:
+ user: User name.
+
+ Returns:
+ Modified user name if it is a group name with @.
+ """
+ if user[0] == "@":
+ return "group_{0}".format(user[1:])
+ return user
+
+
+def run_module():
+ """The function takes care of the functioning of the whole ansible copr module."""
+ module_args = dict(
+ host=dict(type="str", default="copr.fedorainfracloud.org"),
+ protocol=dict(type="str", default="https"),
+ name=dict(type="str", required=True),
+ state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"),
+ chroot=dict(type="str"),
+ )
+ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+ params = module.params
+
+ if not HAS_DNF_PACKAGES:
+ module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR)
+
+ CoprModule.ansible_module = module
+ copr_module = CoprModule(
+ host=params["host"],
+ name=params["name"],
+ state=params["state"],
+ protocol=params["protocol"],
+ chroot=params["chroot"],
+ check_mode=module.check_mode,
+ )
+ state = copr_module.run()
+
+ info = "Please note that this repository is not part of the main distribution"
+
+ if params["state"] == "enabled" and state["state"]:
+ module.exit_json(
+ changed=state["state"],
+ msg=state["msg"],
+ repo=state["repo"],
+ repo_filename=state["repo_filename"],
+ info=info,
+ )
+ module.exit_json(
+ changed=state["state"],
+ msg=state["msg"],
+ repo=state["repo"],
+ repo_filename=state["repo_filename"],
+ )
+
+
+def main():
+ """Launches ansible Copr module."""
+ run_module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cpanm.py b/ansible_collections/community/general/plugins/modules/cpanm.py
new file mode 100644
index 000000000..6260992df
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cpanm.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Franck Cuny <franck@lumberjaph.net>
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: cpanm
+short_description: Manages Perl library dependencies
+description:
+ - Manage Perl library dependencies using cpanminus.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The Perl library to install. Valid values change according to the I(mode), see notes for more details.
+ - Note that for installing from a local path the parameter I(from_path) should be used.
+ aliases: [pkg]
+ from_path:
+ type: path
+ description:
+ - The local directory or C(tar.gz) file to install from.
+ notest:
+ description:
+ - Do not run unit tests.
+ type: bool
+ default: false
+ locallib:
+ description:
+ - Specify the install base to install modules.
+ type: path
+ mirror:
+ description:
+ - Specifies the base URL for the CPAN mirror to use.
+ type: str
+ mirror_only:
+ description:
+ - Use the mirror's index file instead of the CPAN Meta DB.
+ type: bool
+ default: false
+ installdeps:
+ description:
+ - Only install dependencies.
+ type: bool
+ default: false
+ version:
+ description:
+ - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted.
+ type: str
+ executable:
+ description:
+ - Override the path to the cpanm executable.
+ type: path
+ mode:
+ description:
+ - Controls the module behavior. See notes below for more details.
+ type: str
+ choices: [compatibility, new]
+ default: compatibility
+ version_added: 3.0.0
+ name_check:
+ description:
+ - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified).
+ type: str
+ version_added: 3.0.0
+notes:
+ - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host.
+ - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)."
+ - "C(compatibility) mode:"
+ - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode.
+ - I(name) must be either a module name or a distribution file.
+ - >
+ If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens.
+ Otherwise, it will be installed using the C(cpanm) executable.
+ - I(name) cannot be an URL, or a git URL.
+ - C(cpanm) version specifiers do not work in this mode.
+ - "C(new) mode:"
+ - "When using C(new) mode, the module will behave differently"
+ - >
+ The I(name) parameter may refer to a module name, a distribution file,
+ a HTTP URL or a git repository URL as described in C(cpanminus) documentation.
+ - C(cpanm) version specifiers are recognized.
+author:
+ - "Franck Cuny (@fcuny)"
+ - "Alexei Znamensky (@russoz)"
+'''
+
+EXAMPLES = '''
+- name: Install Dancer perl package
+ community.general.cpanm:
+ name: Dancer
+
+- name: Install version 0.99_05 of the Plack perl package
+ community.general.cpanm:
+ name: MIYAGAWA/Plack-0.99_05.tar.gz
+
+- name: Install Dancer into the specified locallib
+ community.general.cpanm:
+ name: Dancer
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install perl dependencies from local directory
+ community.general.cpanm:
+ from_path: /srv/webapps/my_app/src/
+
+- name: Install Dancer perl package without running the unit tests in indicated locallib
+ community.general.cpanm:
+ name: Dancer
+ notest: true
+ locallib: /srv/webapps/my_app/extlib
+
+- name: Install Dancer perl package from a specific mirror
+ community.general.cpanm:
+ name: Dancer
+ mirror: 'http://cpan.cpantesters.org/'
+
+- name: Install Dancer perl package into the system root path
+ become: true
+ community.general.cpanm:
+ name: Dancer
+
+- name: Install Dancer if it is not already installed OR the installed version is older than version 1.0
+ community.general.cpanm:
+ name: Dancer
+ version: '1.0'
+'''
+
+import os
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+
+
+class CPANMinus(ModuleHelper):
+ output_params = ['name', 'version']
+ module = dict(
+ argument_spec=dict(
+ name=dict(type='str', aliases=['pkg']),
+ version=dict(type='str'),
+ from_path=dict(type='path'),
+ notest=dict(type='bool', default=False),
+ locallib=dict(type='path'),
+ mirror=dict(type='str'),
+ mirror_only=dict(type='bool', default=False),
+ installdeps=dict(type='bool', default=False),
+ executable=dict(type='path'),
+ mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'),
+ name_check=dict(type='str')
+ ),
+ required_one_of=[('name', 'from_path')],
+
+ )
+ command = 'cpanm'
+ command_args_formats = dict(
+ notest=cmd_runner_fmt.as_bool("--notest"),
+ locallib=cmd_runner_fmt.as_opt_val('--local-lib'),
+ mirror=cmd_runner_fmt.as_opt_val('--mirror'),
+ mirror_only=cmd_runner_fmt.as_bool("--mirror-only"),
+ installdeps=cmd_runner_fmt.as_bool("--installdeps"),
+ pkg_spec=cmd_runner_fmt.as_list(),
+ )
+
+ def __init_module__(self):
+ v = self.vars
+ if v.mode == "compatibility":
+ if v.name_check:
+ self.do_raise("Parameter name_check can only be used with mode=new")
+ else:
+ if v.name and v.from_path:
+ self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'")
+
+ self.command = self.get_bin_path(v.executable if v.executable else self.command)
+ self.vars.set("binary", self.command)
+
+ def _is_package_installed(self, name, locallib, version):
+ def process(rc, out, err):
+ return rc == 0
+
+ if name is None or name.endswith('.tar.gz'):
+ return False
+ version = "" if version is None else " " + version
+
+ env = {"PERL5LIB": "%s/lib/perl5" % locallib} if locallib else {}
+ runner = CmdRunner(self.module, ["perl", "-le"], {"mod": cmd_runner_fmt.as_list()}, check_rc=False, environ_update=env)
+ with runner("mod", output_process=process) as ctx:
+ return ctx.run(mod='use %s%s;' % (name, version))
+
+ def sanitize_pkg_spec_version(self, pkg_spec, version):
+ if version is None:
+ return pkg_spec
+ if pkg_spec.endswith('.tar.gz'):
+ self.do_raise(msg="parameter 'version' must not be used when installing from a file")
+ if os.path.isdir(pkg_spec):
+ self.do_raise(msg="parameter 'version' must not be used when installing from a directory")
+ if pkg_spec.endswith('.git'):
+ if version.startswith('~'):
+ self.do_raise(msg="operator '~' not allowed in version parameter when installing from git repository")
+ version = version if version.startswith('@') else '@' + version
+ elif version[0] not in ('@', '~'):
+ version = '~' + version
+ return pkg_spec + version
+
+ def __run__(self):
+ def process(rc, out, err):
+ if self.vars.mode == "compatibility" and rc != 0:
+ self.do_raise(msg=err, cmd=self.vars.cmd_args)
+ return 'is up to date' not in err and 'is up to date' not in out
+
+ runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True)
+
+ v = self.vars
+ pkg_param = 'from_path' if v.from_path else 'name'
+
+ if v.mode == 'compatibility':
+ if self._is_package_installed(v.name, v.locallib, v.version):
+ return
+ pkg_spec = v[pkg_param]
+ else:
+ installed = self._is_package_installed(v.name_check, v.locallib, v.version) if v.name_check else False
+ if installed:
+ return
+ pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version)
+
+ with runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx:
+ self.changed = ctx.run(pkg_spec=pkg_spec)
+
+
+def main():
+ CPANMinus.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/cronvar.py b/ansible_collections/community/general/plugins/modules/cronvar.py
new file mode 100644
index 000000000..7effed2ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/cronvar.py
@@ -0,0 +1,431 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Cronvar Plugin: The goal of this plugin is to provide an idempotent
+# method for set cron variable values. It should play well with the
+# existing cron module as well as allow for manually added variables.
+# Each variable entered will be preceded with a comment describing the
+# variable so that it can be found later. This is required to be
+# present in order for this plugin to find/modify the variable
+
+# This module is based on the crontab module.
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: cronvar
+short_description: Manage variables in crontabs
+description:
+ - Use this module to manage crontab variables.
+ - This module allows you to create, update, or delete cron variable definitions.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the crontab variable.
+ type: str
+ required: true
+ value:
+ description:
+ - The value to set this variable to.
+ - Required if I(state=present).
+ type: str
+ insertafter:
+ description:
+ - If specified, the variable will be inserted after the variable specified.
+ - Used with I(state=present).
+ type: str
+ insertbefore:
+ description:
+ - Used with I(state=present). If specified, the variable will be inserted
+ just before the variable specified.
+ type: str
+ state:
+ description:
+ - Whether to ensure that the variable is present or absent.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - The specific user whose crontab should be modified.
+ - This parameter defaults to C(root) when unset.
+ type: str
+ cron_file:
+ description:
+ - If specified, uses this file instead of an individual user's crontab.
+ - Without a leading C(/), this is assumed to be in I(/etc/cron.d).
+ - With a leading C(/), this is taken as absolute.
+ type: str
+ backup:
+ description:
+ - If set, create a backup of the crontab before it is modified.
+ The location of the backup is returned in the C(backup) variable by this module.
+ type: bool
+ default: false
+requirements:
+ - cron
+author:
+- Doug Luce (@dougluce)
+'''
+
+EXAMPLES = r'''
+- name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists
+ community.general.cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+
+- name: Ensure a variable does not exist. This may remove any variable named "LEGACY"
+ community.general.cronvar:
+ name: LEGACY
+ state: absent
+
+- name: Add a variable to a file under /etc/cron.d
+ community.general.cronvar:
+ name: LOGFILE
+ value: /var/log/yum-autoupdate.log
+ user: root
+ cron_file: ansible_yum-autoupdate
+'''
+
+import os
+import platform
+import pwd
+import re
+import shlex
+import sys
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+class CronVarError(Exception):
+ pass
+
+
+class CronVar(object):
+ """
+ CronVar object to write variables to crontabs.
+
+ user - the user of the crontab (defaults to root)
+ cron_file - a cron file under /etc/cron.d
+ """
+
+ def __init__(self, module, user=None, cron_file=None):
+ self.module = module
+ self.user = user
+ self.lines = None
+ self.wordchars = ''.join(chr(x) for x in range(128) if chr(x) not in ('=', "'", '"',))
+ self.cron_cmd = self.module.get_bin_path('crontab', required=True)
+
+ if cron_file:
+ self.cron_file = ""
+ if os.path.isabs(cron_file):
+ self.cron_file = cron_file
+ else:
+ self.cron_file = os.path.join('/etc/cron.d', cron_file)
+ else:
+ self.cron_file = None
+
+ self.read()
+
+ def read(self):
+ # Read in the crontab from the system
+ self.lines = []
+ if self.cron_file:
+ # read the cronfile
+ try:
+ f = open(self.cron_file, 'r')
+ self.lines = f.read().splitlines()
+ f.close()
+ except IOError:
+ # cron file does not exist
+ return
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+ else:
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
+
+ if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
+ raise CronVarError("Unable to read crontab")
+
+ lines = out.splitlines()
+ count = 0
+ for l in lines:
+ if count > 2 or (not re.match(r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l
+ ) and not re.match(r'# \(/tmp/.*installed on.*\)', l) and not re.match(r'# \(.*version.*\)', l)):
+ self.lines.append(l)
+ count += 1
+
+ def log_message(self, message):
+ self.module.debug('ansible: "%s"' % message)
+
+ def write(self, backup_file=None):
+ """
+ Write the crontab to the system. Saves all information.
+ """
+ if backup_file:
+ fileh = open(backup_file, 'w')
+ elif self.cron_file:
+ fileh = open(self.cron_file, 'w')
+ else:
+ filed, path = tempfile.mkstemp(prefix='crontab')
+ fileh = os.fdopen(filed, 'w')
+
+ fileh.write(self.render())
+ fileh.close()
+
+ # return if making a backup
+ if backup_file:
+ return
+
+ # Add the entire crontab back to the user crontab
+ if not self.cron_file:
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
+ os.unlink(path)
+
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ def remove_variable_file(self):
+ try:
+ os.unlink(self.cron_file)
+ return True
+ except OSError:
+ # cron file does not exist
+ return False
+ except Exception:
+ raise CronVarError("Unexpected error:", sys.exc_info()[0])
+
+ def parse_for_var(self, line):
+ lexer = shlex.shlex(line)
+ lexer.wordchars = self.wordchars
+ varname = lexer.get_token()
+ is_env_var = lexer.get_token() == '='
+ value = ''.join(lexer)
+ if is_env_var:
+ return (varname, value)
+ raise CronVarError("Not a variable.")
+
+ def find_variable(self, name):
+ for l in self.lines:
+ try:
+ (varname, value) = self.parse_for_var(l)
+ if varname == name:
+ return value
+ except CronVarError:
+ pass
+ return None
+
+ def get_var_names(self):
+ var_names = []
+ for l in self.lines:
+ try:
+ var_name, dummy = self.parse_for_var(l)
+ var_names.append(var_name)
+ except CronVarError:
+ pass
+ return var_names
+
+ def add_variable(self, name, value, insertbefore, insertafter):
+ if insertbefore is None and insertafter is None:
+ # Add the variable to the top of the file.
+ self.lines.insert(0, "%s=%s" % (name, value))
+ else:
+ newlines = []
+ for l in self.lines:
+ try:
+ varname, dummy = self.parse_for_var(l) # Throws if not a var line
+ if varname == insertbefore:
+ newlines.append("%s=%s" % (name, value))
+ newlines.append(l)
+ elif varname == insertafter:
+ newlines.append(l)
+ newlines.append("%s=%s" % (name, value))
+ else:
+ raise CronVarError # Append.
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def remove_variable(self, name):
+ self.update_variable(name, None, remove=True)
+
+ def update_variable(self, name, value, remove=False):
+ newlines = []
+ for l in self.lines:
+ try:
+ varname, dummy = self.parse_for_var(l) # Throws if not a var line
+ if varname != name:
+ raise CronVarError # Append.
+ if not remove:
+ newlines.append("%s=%s" % (name, value))
+ except CronVarError:
+ newlines.append(l)
+
+ self.lines = newlines
+
+ def render(self):
+ """
+ Render a proper crontab
+ """
+ result = '\n'.join(self.lines)
+ if result and result[-1] not in ['\n', '\r']:
+ result += '\n'
+ return result
+
+ def _read_user_execute(self):
+ """
+ Returns the command line for reading a crontab
+ """
+ user = ''
+
+ if self.user:
+ if platform.system() == 'SunOS':
+ return "su %s -c '%s -l'" % (shlex_quote(self.user), shlex_quote(self.cron_cmd))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (shlex_quote(self.cron_cmd), shlex_quote(self.user))
+ elif platform.system() == 'HP-UX':
+ return "%s %s %s" % (self.cron_cmd, '-l', shlex_quote(self.user))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, '-l')
+
+ def _write_execute(self, path):
+ """
+ Return the command line for writing a crontab
+ """
+ user = ''
+ if self.user:
+ if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (
+ shlex_quote(self.user), shlex_quote(path), shlex_quote(self.user), self.cron_cmd, shlex_quote(path))
+ elif pwd.getpwuid(os.getuid())[0] != self.user:
+ user = '-u %s' % shlex_quote(self.user)
+ return "%s %s %s" % (self.cron_cmd, user, shlex_quote(path))
+
+
+# ==================================================
+
+def main():
+ # The following example playbooks:
+ #
+ # - community.general.cronvar: name="SHELL" value="/bin/bash"
+ #
+ # - name: Set the email
+ # community.general.cronvar: name="EMAILTO" value="doug@ansibmod.con.com"
+ #
+ # - name: Get rid of the old new host variable
+ # community.general.cronvar: name="NEW_HOST" state=absent
+ #
+ # Would produce:
+ # SHELL = /bin/bash
+ # EMAILTO = doug@ansibmod.con.com
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str'),
+ user=dict(type='str'),
+ cron_file=dict(type='str'),
+ insertafter=dict(type='str'),
+ insertbefore=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ backup=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['insertbefore', 'insertafter']],
+ supports_check_mode=False,
+ )
+
+ name = module.params['name']
+ value = module.params['value']
+ user = module.params['user']
+ cron_file = module.params['cron_file']
+ insertafter = module.params['insertafter']
+ insertbefore = module.params['insertbefore']
+ state = module.params['state']
+ backup = module.params['backup']
+ ensure_present = state == 'present'
+
+ changed = False
+ res_args = dict()
+
+ # Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
+ os.umask(int('022', 8))
+ cronvar = CronVar(module, user, cron_file)
+
+ module.debug('cronvar instantiated - name: "%s"' % name)
+
+ # --- user input validation ---
+
+ if name is None and ensure_present:
+ module.fail_json(msg="You must specify 'name' to insert a new cron variable")
+
+ if value is None and ensure_present:
+ module.fail_json(msg="You must specify 'value' to insert a new cron variable")
+
+ if name is None and not ensure_present:
+ module.fail_json(msg="You must specify 'name' to remove a cron variable")
+
+ # if requested make a backup before making a change
+ if backup:
+ dummy, backup_file = tempfile.mkstemp(prefix='cronvar')
+ cronvar.write(backup_file)
+
+ if cronvar.cron_file and not name and not ensure_present:
+ changed = cronvar.remove_job_file()
+ module.exit_json(changed=changed, cron_file=cron_file, state=state)
+
+ old_value = cronvar.find_variable(name)
+
+ if ensure_present:
+ if old_value is None:
+ cronvar.add_variable(name, value, insertbefore, insertafter)
+ changed = True
+ elif old_value != value:
+ cronvar.update_variable(name, value)
+ changed = True
+ else:
+ if old_value is not None:
+ cronvar.remove_variable(name)
+ changed = True
+
+ res_args = {
+ "vars": cronvar.get_var_names(),
+ "changed": changed
+ }
+
+ if changed:
+ cronvar.write()
+
+ # retain the backup only if crontab or cron file have changed
+ if backup:
+ if changed:
+ res_args['backup_file'] = backup_file
+ else:
+ os.unlink(backup_file)
+
+ if cron_file:
+ res_args['cron_file'] = cron_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/crypttab.py b/ansible_collections/community/general/plugins/modules/crypttab.py
new file mode 100644
index 000000000..6aea362e7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/crypttab.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Steve <yo@groks.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: crypttab
+short_description: Encrypted Linux block devices
+description:
+ - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or
+ optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/)
+ will be stripped from I(name).
+ type: str
+ required: true
+ state:
+ description:
+ - Use I(present) to add a line to C(/etc/crypttab) or update its definition
+ if already present.
+ - Use I(absent) to remove a line with matching I(name).
+ - Use I(opts_present) to add options to those already present; options with
+ different values will be updated.
+ - Use I(opts_absent) to remove options from the existing set.
+ type: str
+ required: true
+ choices: [ absent, opts_absent, opts_present, present ]
+ backing_device:
+ description:
+ - Path to the underlying block device or file, or the UUID of a block-device
+ prefixed with I(UUID=).
+ type: str
+ password:
+ description:
+ - Encryption password, the path to a file containing the password, or
+ C(-) or unset if the password should be entered at boot.
+ type: path
+ opts:
+ description:
+ - A comma-delimited list of options. See C(crypttab(5) ) for details.
+ type: str
+ path:
+ description:
+ - Path to file to use instead of C(/etc/crypttab).
+ - This might be useful in a chroot environment.
+ type: path
+ default: /etc/crypttab
+author:
+- Steve (@groks)
+'''
+
+EXAMPLES = r'''
+- name: Set the options explicitly a device which must already exist
+ community.general.crypttab:
+ name: luks-home
+ state: present
+ opts: discard,cipher=aes-cbc-essiv:sha256
+
+- name: Add the 'discard' option to any existing options for all devices
+ community.general.crypttab:
+ name: '{{ item.device }}'
+ state: opts_present
+ opts: discard
+ loop: '{{ ansible_mounts }}'
+ when: "'/dev/mapper/luks-' in {{ item.device }}"
+'''
+
+import os
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'opts_absent', 'opts_present', 'present']),
+ backing_device=dict(type='str'),
+ password=dict(type='path'),
+ opts=dict(type='str'),
+ path=dict(type='path', default='/etc/crypttab')
+ ),
+ supports_check_mode=True,
+ )
+
+ backing_device = module.params['backing_device']
+ password = module.params['password']
+ opts = module.params['opts']
+ state = module.params['state']
+ path = module.params['path']
+ name = module.params['name']
+ if name.startswith('/dev/mapper/'):
+ name = name[len('/dev/mapper/'):]
+
+ if state != 'absent' and backing_device is None and password is None and opts is None:
+ module.fail_json(msg="expected one or more of 'backing_device', 'password' or 'opts'",
+ **module.params)
+
+ if 'opts' in state and (backing_device is not None or password is not None):
+ module.fail_json(msg="cannot update 'backing_device' or 'password' when state=%s" % state,
+ **module.params)
+
+ for arg_name, arg in (('name', name),
+ ('backing_device', backing_device),
+ ('password', password),
+ ('opts', opts)):
+ if (arg is not None and (' ' in arg or '\t' in arg or arg == '')):
+ module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name,
+ **module.params)
+
+ try:
+ crypttab = Crypttab(path)
+ existing_line = crypttab.match(name)
+ except Exception as e:
+ module.fail_json(msg="failed to open and parse crypttab file: %s" % to_native(e),
+ exception=traceback.format_exc(), **module.params)
+
+ if 'present' in state and existing_line is None and backing_device is None:
+ module.fail_json(msg="'backing_device' required to add a new entry",
+ **module.params)
+
+ changed, reason = False, '?'
+
+ if state == 'absent':
+ if existing_line is not None:
+ changed, reason = existing_line.remove()
+
+ elif state == 'present':
+ if existing_line is not None:
+ changed, reason = existing_line.set(backing_device, password, opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_present':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.add(opts)
+ else:
+ changed, reason = crypttab.add(Line(None, name, backing_device, password, opts))
+
+ elif state == 'opts_absent':
+ if existing_line is not None:
+ changed, reason = existing_line.opts.remove(opts)
+
+ if changed and not module.check_mode:
+ try:
+ f = open(path, 'wb')
+ f.write(to_bytes(crypttab, errors='surrogate_or_strict'))
+ finally:
+ f.close()
+
+ module.exit_json(changed=changed, msg=reason, **module.params)
+
+
+class Crypttab(object):
+ _lines = []
+
+ def __init__(self, path):
+ self.path = path
+ if not os.path.exists(path):
+ if not os.path.exists(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+ open(path, 'a').close()
+
+ try:
+ f = open(path, 'r')
+ for line in f.readlines():
+ self._lines.append(Line(line))
+ finally:
+ f.close()
+
+ def add(self, line):
+ self._lines.append(line)
+ return True, 'added line'
+
+ def lines(self):
+ for line in self._lines:
+ if line.valid():
+ yield line
+
+ def match(self, name):
+ for line in self.lines():
+ if line.name == name:
+ return line
+ return None
+
+ def __str__(self):
+ lines = []
+ for line in self._lines:
+ lines.append(str(line))
+ crypttab = '\n'.join(lines)
+ if len(crypttab) == 0:
+ crypttab += '\n'
+ if crypttab[-1] != '\n':
+ crypttab += '\n'
+ return crypttab
+
+
+class Line(object):
+ def __init__(self, line=None, name=None, backing_device=None, password=None, opts=None):
+ self.line = line
+ self.name = name
+ self.backing_device = backing_device
+ self.password = password
+ self.opts = Options(opts)
+
+ if line is not None:
+ self.line = self.line.rstrip('\n')
+ if self._line_valid(line):
+ self.name, backing_device, password, opts = self._split_line(line)
+
+ self.set(backing_device, password, opts)
+
+ def set(self, backing_device, password, opts):
+ changed = False
+
+ if backing_device is not None and self.backing_device != backing_device:
+ self.backing_device = backing_device
+ changed = True
+
+ if password is not None and self.password != password:
+ self.password = password
+ changed = True
+
+ if opts is not None:
+ opts = Options(opts)
+ if opts != self.opts:
+ self.opts = opts
+ changed = True
+
+ return changed, 'updated line'
+
+ def _line_valid(self, line):
+ if not line.strip() or line.startswith('#') or len(line.split()) not in (2, 3, 4):
+ return False
+ return True
+
+ def _split_line(self, line):
+ fields = line.split()
+ try:
+ field2 = fields[2]
+ except IndexError:
+ field2 = None
+ try:
+ field3 = fields[3]
+ except IndexError:
+ field3 = None
+
+ return (fields[0],
+ fields[1],
+ field2,
+ field3)
+
+ def remove(self):
+ self.line, self.name, self.backing_device = '', None, None
+ return True, 'removed line'
+
+ def valid(self):
+ if self.name is not None and self.backing_device is not None:
+ return True
+ return False
+
+ def __str__(self):
+ if self.valid():
+ fields = [self.name, self.backing_device]
+ if self.password is not None or self.opts:
+ if self.password is not None:
+ fields.append(self.password)
+ else:
+ fields.append('none')
+ if self.opts:
+ fields.append(str(self.opts))
+ return ' '.join(fields)
+ return self.line
+
+
+class Options(dict):
+ """opts_string looks like: 'discard,foo=bar,baz=greeble' """
+
+ def __init__(self, opts_string):
+ super(Options, self).__init__()
+ self.itemlist = []
+ if opts_string is not None:
+ for opt in opts_string.split(','):
+ kv = opt.split('=')
+ if len(kv) > 1:
+ k, v = (kv[0], kv[1])
+ else:
+ k, v = (kv[0], None)
+ self[k] = v
+
+ def add(self, opts_string):
+ changed = False
+ for k, v in Options(opts_string).items():
+ if k in self:
+ if self[k] != v:
+ changed = True
+ else:
+ changed = True
+ self[k] = v
+ return changed, 'updated options'
+
+ def remove(self, opts_string):
+ changed = False
+ for k in Options(opts_string):
+ if k in self:
+ del self[k]
+ changed = True
+ return changed, 'removed options'
+
+ def keys(self):
+ return self.itemlist
+
+ def values(self):
+ return [self[key] for key in self]
+
+ def items(self):
+ return [(key, self[key]) for key in self]
+
+ def __iter__(self):
+ return iter(self.itemlist)
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.itemlist.append(key)
+ super(Options, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self.itemlist.remove(key)
+ super(Options, self).__delitem__(key)
+
+ def __ne__(self, obj):
+ return not (isinstance(obj, Options) and sorted(self.items()) == sorted(obj.items()))
+
+ def __str__(self):
+ ret = []
+ for k, v in self.items():
+ if v is None:
+ ret.append(k)
+ else:
+ ret.append('%s=%s' % (k, v))
+ return ','.join(ret)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/datadog_downtime.py b/ansible_collections/community/general/plugins/modules/datadog_downtime.py
new file mode 100644
index 000000000..6e506eb85
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/datadog_downtime.py
@@ -0,0 +1,315 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Datadog, Inc
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: datadog_downtime
+short_description: Manages Datadog downtimes
+version_added: 2.0.0
+description:
+ - Manages downtimes within Datadog.
+ - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/).
+author:
+ - Datadog (@Datadog)
+requirements:
+ - datadog-api-client
+ - Python 3.6+
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API.
+ - This value can also be set with the C(DATADOG_HOST) environment variable.
+ required: false
+ default: https://api.datadoghq.com
+ type: str
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the downtime.
+ required: false
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ id:
+ description:
+ - The identifier of the downtime.
+ - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state).
+ - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup.
+ type: int
+ monitor_tags:
+ description:
+ - A list of monitor tags to which the downtime applies.
+ - The resulting downtime applies to monitors that match ALL provided monitor tags.
+ type: list
+ elements: str
+ scope:
+ description:
+ - A list of scopes to which the downtime applies.
+ - The resulting downtime applies to sources that matches ALL provided scopes.
+ type: list
+ elements: str
+ monitor_id:
+ description:
+ - The ID of the monitor to mute. If not provided, the downtime applies to all monitors.
+ type: int
+ downtime_message:
+ description:
+ - A message to include with notifications for this downtime.
+ - Email notifications can be sent to specific users by using the same "@username" notation as events.
+ type: str
+ start:
+ type: int
+ description:
+ - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
+ end:
+ type: int
+ description:
+ - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it.
+ timezone:
+ description:
+ - The timezone for the downtime.
+ type: str
+ rrule:
+ description:
+ - The C(RRULE) standard for defining recurring events.
+ - For example, to have a recurring event on the first day of each month,
+ select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1).
+ - Most common rrule options from the iCalendar Spec are supported.
+ - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)).
+ type: str
+"""
+
+EXAMPLES = """
+ - name: Create a downtime
+ register: downtime_var
+ community.general.datadog_downtime:
+ state: present
+ monitor_tags:
+ - "foo:bar"
+ downtime_message: "Downtime for foo:bar"
+ scope: "test"
+ api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created
+ id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}"
+ - name: Save downtime id to file for later updates and idempotence
+ delegate_to: localhost
+ copy:
+ content: "{{ downtime.downtime.id }}"
+ dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}"
+"""
+
+RETURN = """
+# Returns the downtime JSON dictionary from the API response under the C(downtime) key.
+# See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details.
+downtime:
+ description: The downtime returned by the API.
+ type: dict
+ returned: always
+ sample: {
+ "active": true,
+ "canceled": null,
+ "creator_id": 1445416,
+ "disabled": false,
+ "downtime_type": 2,
+ "end": null,
+ "id": 1055751000,
+ "message": "Downtime for foo:bar",
+ "monitor_id": null,
+ "monitor_tags": [
+ "foo:bar"
+ ],
+ "parent_id": null,
+ "recurrence": null,
+ "scope": [
+ "test"
+ ],
+ "start": 1607015009,
+ "timezone": "UTC",
+ "updater_id": null
+ }
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+# Import Datadog
+
+DATADOG_IMP_ERR = None
+HAS_DATADOG = True
+try:
+ from datadog_api_client.v1 import Configuration, ApiClient, ApiException
+ from datadog_api_client.v1.api.downtimes_api import DowntimesApi
+ from datadog_api_client.v1.model.downtime import Downtime
+ from datadog_api_client.v1.model.downtime_recurrence import DowntimeRecurrence
+except ImportError:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_host=dict(required=False, default="https://api.datadoghq.com"),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=False, choices=["present", "absent"], default="present"),
+ monitor_tags=dict(required=False, type="list", elements="str"),
+ scope=dict(required=False, type="list", elements="str"),
+ monitor_id=dict(required=False, type="int"),
+ downtime_message=dict(required=False, no_log=True),
+ start=dict(required=False, type="int"),
+ end=dict(required=False, type="int"),
+ timezone=dict(required=False, type="str"),
+ rrule=dict(required=False, type="str"),
+ id=dict(required=False, type="int"),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib("datadog-api-client"), exception=DATADOG_IMP_ERR)
+
+ configuration = Configuration(
+ host=module.params["api_host"],
+ api_key={
+ "apiKeyAuth": module.params["api_key"],
+ "appKeyAuth": module.params["app_key"]
+ }
+ )
+ with ApiClient(configuration) as api_client:
+ api_client.user_agent = "ansible_collection/community_general (module_name datadog_downtime) {0}".format(
+ api_client.user_agent
+ )
+ api_instance = DowntimesApi(api_client)
+
+ # Validate api and app keys
+ try:
+ api_instance.list_downtimes(current_only=True)
+ except ApiException as e:
+ module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key: {0}".format(e))
+
+ if module.params["state"] == "present":
+ schedule_downtime(module, api_client)
+ elif module.params["state"] == "absent":
+ cancel_downtime(module, api_client)
+
+
+def _get_downtime(module, api_client):
+ api = DowntimesApi(api_client)
+ downtime = None
+ if module.params["id"]:
+ try:
+ downtime = api.get_downtime(module.params["id"])
+ except ApiException as e:
+ module.fail_json(msg="Failed to retrieve downtime with id {0}: {1}".format(module.params["id"], e))
+ return downtime
+
+
+def build_downtime(module):
+ downtime = Downtime()
+ if module.params["monitor_tags"]:
+ downtime.monitor_tags = module.params["monitor_tags"]
+ if module.params["scope"]:
+ downtime.scope = module.params["scope"]
+ if module.params["monitor_id"]:
+ downtime.monitor_id = module.params["monitor_id"]
+ if module.params["downtime_message"]:
+ downtime.message = module.params["downtime_message"]
+ if module.params["start"]:
+ downtime.start = module.params["start"]
+ if module.params["end"]:
+ downtime.end = module.params["end"]
+ if module.params["timezone"]:
+ downtime.timezone = module.params["timezone"]
+ if module.params["rrule"]:
+ downtime.recurrence = DowntimeRecurrence(
+ rrule=module.params["rrule"]
+ )
+ return downtime
+
+
+def _post_downtime(module, api_client):
+ api = DowntimesApi(api_client)
+ downtime = build_downtime(module)
+ try:
+ resp = api.create_downtime(downtime)
+ module.params["id"] = resp.id
+ module.exit_json(changed=True, downtime=resp.to_dict())
+ except ApiException as e:
+ module.fail_json(msg="Failed to create downtime: {0}".format(e))
+
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+
+def _update_downtime(module, current_downtime, api_client):
+ api = DowntimesApi(api_client)
+ downtime = build_downtime(module)
+ try:
+ if current_downtime.disabled:
+ resp = api.create_downtime(downtime)
+ else:
+ resp = api.update_downtime(module.params["id"], downtime)
+ if _equal_dicts(
+ resp.to_dict(),
+ current_downtime.to_dict(),
+ ["active", "creator_id", "updater_id"]
+ ):
+ module.exit_json(changed=False, downtime=resp.to_dict())
+ else:
+ module.exit_json(changed=True, downtime=resp.to_dict())
+ except ApiException as e:
+ module.fail_json(msg="Failed to update downtime: {0}".format(e))
+
+
+def schedule_downtime(module, api_client):
+ downtime = _get_downtime(module, api_client)
+ if downtime is None:
+ _post_downtime(module, api_client)
+ else:
+ _update_downtime(module, downtime, api_client)
+
+
+def cancel_downtime(module, api_client):
+ downtime = _get_downtime(module, api_client)
+ api = DowntimesApi(api_client)
+ if downtime is None:
+ module.exit_json(changed=False)
+ try:
+ api.cancel_downtime(downtime["id"])
+ except ApiException as e:
+ module.fail_json(msg="Failed to create downtime: {0}".format(e))
+
+ module.exit_json(changed=True)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/datadog_event.py b/ansible_collections/community/general/plugins/modules/datadog_event.py
new file mode 100644
index 000000000..b8161eca6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/datadog_event.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Author: Artūras 'arturaz' Šlajus <x11@arturaz.net>
+# Author: Naoya Nakazawa <naoya.n@gmail.com>
+#
+# This module is proudly sponsored by iGeolise (www.igeolise.com) and
+# Tiny Lab Productions (www.tinylabproductions.com).
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_event
+short_description: Posts events to Datadog service
+description:
+ - "Allows to post events to Datadog (www.datadoghq.com) service."
+ - "Uses http://docs.datadoghq.com/api/#events API."
+author:
+ - "Artūras 'arturaz' Šlajus (@arturaz)"
+ - "Naoya Nakazawa (@n0ts)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ api_key:
+ type: str
+ description: ["Your DataDog API key."]
+ required: true
+ app_key:
+ type: str
+ description: ["Your DataDog app key."]
+ required: true
+ title:
+ type: str
+ description: ["The event title."]
+ required: true
+ text:
+ type: str
+ description: ["The body of the event."]
+ required: true
+ date_happened:
+ type: int
+ description:
+ - POSIX timestamp of the event.
+ - Default value is now.
+ priority:
+ type: str
+ description: ["The priority of the event."]
+ default: normal
+ choices: [normal, low]
+ host:
+ type: str
+ description:
+ - Host name to associate with the event.
+ - If not specified, it defaults to the remote system's hostname.
+ api_host:
+ type: str
+ description:
+ - DataDog API endpoint URL.
+ version_added: '3.3.0'
+ tags:
+ type: list
+ elements: str
+ description: ["Comma separated list of tags to apply to the event."]
+ alert_type:
+ type: str
+ description: ["Type of alert."]
+ default: info
+ choices: ['error', 'warning', 'info', 'success']
+ aggregation_key:
+ type: str
+ description: ["An arbitrary string to use for aggregation."]
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+- name: Post an event with low priority
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ priority: low
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+
+- name: Post an event with several tags
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ tags: 'aa,bb,#host:{{ inventory_hostname }}'
+
+- name: Post an event with several tags to another endpoint
+ community.general.datadog_event:
+ title: Testing from ansible
+ text: Test
+ api_key: 9775a026f1ca7d1c6c5af9d94d9595a4
+ app_key: j4JyCYfefWHhgFgiZUqRm63AXHNZQyPGBfJtAzmN
+ api_host: 'https://example.datadoghq.eu'
+ tags:
+ - aa
+ - b
+ - '#host:{{ inventory_hostname }}'
+
+'''
+
+import platform
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ app_key=dict(required=True, no_log=True),
+ api_host=dict(type='str'),
+ title=dict(required=True),
+ text=dict(required=True),
+ date_happened=dict(type='int'),
+ priority=dict(default='normal', choices=['normal', 'low']),
+ host=dict(),
+ tags=dict(type='list', elements='str'),
+ alert_type=dict(default='info', choices=['error', 'warning', 'info', 'success']),
+ aggregation_key=dict(no_log=False),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'app_key': module.params['app_key'],
+ }
+ if module.params['api_host'] is not None:
+ options['api_host'] = module.params['api_host']
+
+ initialize(**options)
+
+ _post_event(module)
+
+
+def _post_event(module):
+ try:
+ if module.params['host'] is None:
+ module.params['host'] = platform.node().split('.')[0]
+ msg = api.Event.create(title=module.params['title'],
+ text=module.params['text'],
+ host=module.params['host'],
+ tags=module.params['tags'],
+ priority=module.params['priority'],
+ alert_type=module.params['alert_type'],
+ aggregation_key=module.params['aggregation_key'],
+ source_type_name='ansible')
+ if msg['status'] != 'ok':
+ module.fail_json(msg=msg)
+
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/datadog_monitor.py b/ansible_collections/community/general/plugins/modules/datadog_monitor.py
new file mode 100644
index 000000000..f58df358b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/datadog_monitor.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Sebastian Kornehl <sebastian.kornehl@asideas.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: datadog_monitor
+short_description: Manages Datadog monitors
+description:
+ - Manages monitors within Datadog.
+ - Options as described on https://docs.datadoghq.com/api/.
+ - The type C(event-v2) was added in community.general 4.8.0.
+author: Sebastian Kornehl (@skornehl)
+requirements: [datadog]
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ api_key:
+ description:
+ - Your Datadog API key.
+ required: true
+ type: str
+ api_host:
+ description:
+ - The URL to the Datadog API. Default value is C(https://api.datadoghq.com).
+ - This value can also be set with the C(DATADOG_HOST) environment variable.
+ required: false
+ type: str
+ version_added: '0.2.0'
+ app_key:
+ description:
+ - Your Datadog app key.
+ required: true
+ type: str
+ state:
+ description:
+ - The designated state of the monitor.
+ required: true
+ choices: ['present', 'absent', 'mute', 'unmute']
+ type: str
+ tags:
+ description:
+ - A list of tags to associate with your monitor when creating or updating.
+ - This can help you categorize and filter monitors.
+ type: list
+ elements: str
+ type:
+ description:
+ - The type of the monitor.
+ - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0.
+ - The type C(composite) was added in community.general 3.4.0.
+ choices:
+ - metric alert
+ - service check
+ - event alert
+ - event-v2 alert
+ - process alert
+ - log alert
+ - query alert
+ - trace-analytics alert
+ - rum alert
+ - composite
+ type: str
+ query:
+ description:
+ - The monitor query to notify on.
+ - Syntax varies depending on what type of monitor you are creating.
+ type: str
+ name:
+ description:
+ - The name of the alert.
+ required: true
+ type: str
+ notification_message:
+ description:
+ - A message to include with notifications for this monitor.
+ - Email notifications can be sent to specific users by using the same '@username' notation as events.
+ - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
+ type: str
+ silenced:
+ type: dict
+ description:
+ - Dictionary of scopes to silence, with timestamps or None.
+ - Each scope will be muted until the given POSIX timestamp or forever if the value is None.
+ notify_no_data:
+ description:
+ - Whether this monitor will notify when data stops reporting.
+ type: bool
+ default: false
+ no_data_timeframe:
+ description:
+ - The number of minutes before a monitor will notify when data stops reporting.
+ - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
+ - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service.
+ type: str
+ timeout_h:
+ description:
+ - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
+ type: str
+ renotify_interval:
+ description:
+ - The number of minutes after the last notification before a monitor will re-notify on the current status.
+ - It will only re-notify if it is not resolved.
+ type: str
+ escalation_message:
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere.
+ - Not applicable if I(renotify_interval=None).
+ type: str
+ notify_audit:
+ description:
+ - Whether tagged users will be notified on changes to this monitor.
+ type: bool
+ default: false
+ thresholds:
+ type: dict
+ description:
+ - A dictionary of thresholds by status.
+ - Only available for service checks and metric alerts.
+ - Because each of them can have multiple thresholds, we do not define them directly in the query.
+ - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})."
+ locked:
+ description:
+ - Whether changes to this monitor should be restricted to the creator or admins.
+ type: bool
+ default: false
+ require_full_window:
+ description:
+ - Whether this monitor needs a full window of data before it gets evaluated.
+ - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
+ type: bool
+ new_host_delay:
+ description:
+ - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
+ - This gives the host time to fully initialize.
+ type: str
+ evaluation_delay:
+ description:
+ - Time to delay evaluation (in seconds).
+ - Effective for sparse values.
+ type: str
+ id:
+ description:
+ - The ID of the alert.
+ - If set, will be used instead of the name to locate the alert.
+ type: str
+ include_tags:
+ description:
+ - Whether notifications from this monitor automatically inserts its triggering tags into the title.
+ type: bool
+ default: true
+ version_added: 1.3.0
+ priority:
+ description:
+ - Integer from 1 (high) to 5 (low) indicating alert severity.
+ type: int
+ version_added: 4.6.0
+'''
+
+EXAMPLES = '''
+- name: Create a metric monitor
+ community.general.datadog_monitor:
+ type: "metric alert"
+ name: "Test monitor"
+ state: "present"
+ query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
+ notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Deletes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Mutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "mute"
+ silenced: '{"*":None}'
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Unmutes a monitor
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "unmute"
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+
+- name: Use datadoghq.eu platform instead of datadoghq.com
+ community.general.datadog_monitor:
+ name: "Test monitor"
+ state: "absent"
+ api_host: https://api.datadoghq.eu
+ api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
+ app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
+'''
+import traceback
+
+# Import Datadog
+DATADOG_IMP_ERR = None
+try:
+ from datadog import initialize, api
+ HAS_DATADOG = True
+except Exception:
+ DATADOG_IMP_ERR = traceback.format_exc()
+ HAS_DATADOG = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_host=dict(),
+ app_key=dict(required=True, no_log=True),
+ state=dict(required=True, choices=['present', 'absent', 'mute', 'unmute']),
+ type=dict(choices=['metric alert', 'service check', 'event alert', 'event-v2 alert', 'process alert',
+ 'log alert', 'query alert', 'trace-analytics alert',
+ 'rum alert', 'composite']),
+ name=dict(required=True),
+ query=dict(),
+ notification_message=dict(no_log=True),
+ silenced=dict(type='dict'),
+ notify_no_data=dict(default=False, type='bool'),
+ no_data_timeframe=dict(),
+ timeout_h=dict(),
+ renotify_interval=dict(),
+ escalation_message=dict(),
+ notify_audit=dict(default=False, type='bool'),
+ thresholds=dict(type='dict', default=None),
+ tags=dict(type='list', elements='str', default=None),
+ locked=dict(default=False, type='bool'),
+ require_full_window=dict(type='bool'),
+ new_host_delay=dict(),
+ evaluation_delay=dict(),
+ id=dict(),
+ include_tags=dict(required=False, default=True, type='bool'),
+ priority=dict(type='int'),
+ )
+ )
+
+ # Prepare Datadog
+ if not HAS_DATADOG:
+ module.fail_json(msg=missing_required_lib('datadogpy'), exception=DATADOG_IMP_ERR)
+
+ options = {
+ 'api_key': module.params['api_key'],
+ 'api_host': module.params['api_host'],
+ 'app_key': module.params['app_key']
+ }
+
+ initialize(**options)
+
+ # Check if api_key and app_key is correct or not
+ # if not, then fail here.
+ response = api.Monitor.get_all()
+ if isinstance(response, dict):
+ msg = response.get('errors', None)
+ if msg:
+ module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
+
+ if module.params['state'] == 'present':
+ install_monitor(module)
+ elif module.params['state'] == 'absent':
+ delete_monitor(module)
+ elif module.params['state'] == 'mute':
+ mute_monitor(module)
+ elif module.params['state'] == 'unmute':
+ unmute_monitor(module)
+
+
+def _fix_template_vars(message):
+ if message:
+ return message.replace('[[', '{{').replace(']]', '}}')
+ return message
+
+
+def _get_monitor(module):
+ if module.params['id'] is not None:
+ monitor = api.Monitor.get(module.params['id'])
+ if 'errors' in monitor:
+ module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
+ return monitor
+ else:
+ monitors = api.Monitor.get_all()
+ for monitor in monitors:
+ if monitor['name'] == _fix_template_vars(module.params['name']):
+ return monitor
+ return {}
+
+
+def _post_monitor(module, options):
+ try:
+ kwargs = dict(type=module.params['type'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ priority=module.params['priority'],
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.create(**kwargs)
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def _equal_dicts(a, b, ignore_keys):
+ ka = set(a).difference(ignore_keys)
+ kb = set(b).difference(ignore_keys)
+ return ka == kb and all(a[k] == b[k] for k in ka)
+
+
+def _update_monitor(module, monitor, options):
+ try:
+ kwargs = dict(id=monitor['id'], query=module.params['query'],
+ name=_fix_template_vars(module.params['name']),
+ message=_fix_template_vars(module.params['notification_message']),
+ escalation_message=_fix_template_vars(module.params['escalation_message']),
+ priority=module.params['priority'],
+ options=options)
+ if module.params['tags'] is not None:
+ kwargs['tags'] = module.params['tags']
+ msg = api.Monitor.update(**kwargs)
+
+ if 'errors' in msg:
+ module.fail_json(msg=str(msg['errors']))
+ elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
+ module.exit_json(changed=False, msg=msg)
+ else:
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def install_monitor(module):
+ options = {
+ "silenced": module.params['silenced'],
+ "notify_no_data": module.boolean(module.params['notify_no_data']),
+ "no_data_timeframe": module.params['no_data_timeframe'],
+ "timeout_h": module.params['timeout_h'],
+ "renotify_interval": module.params['renotify_interval'],
+ "escalation_message": module.params['escalation_message'],
+ "notify_audit": module.boolean(module.params['notify_audit']),
+ "locked": module.boolean(module.params['locked']),
+ "require_full_window": module.params['require_full_window'],
+ "new_host_delay": module.params['new_host_delay'],
+ "evaluation_delay": module.params['evaluation_delay'],
+ "include_tags": module.params['include_tags'],
+ }
+
+ if module.params['type'] == "service check":
+ options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
+ if module.params['type'] in ["metric alert", "log alert", "query alert", "trace-analytics alert", "rum alert"] and module.params['thresholds'] is not None:
+ options["thresholds"] = module.params['thresholds']
+
+ monitor = _get_monitor(module)
+ if not monitor:
+ _post_monitor(module, options)
+ else:
+ _update_monitor(module, monitor, options)
+
+
+def delete_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.delete(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def mute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif monitor['options']['silenced']:
+ module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
+ elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
+ module.exit_json(changed=False)
+ try:
+ if module.params['silenced'] is None or module.params['silenced'] == "":
+ msg = api.Monitor.mute(id=monitor['id'])
+ else:
+ msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+def unmute_monitor(module):
+ monitor = _get_monitor(module)
+ if not monitor:
+ module.fail_json(msg="Monitor %s not found!" % module.params['name'])
+ elif not monitor['options']['silenced']:
+ module.exit_json(changed=False)
+ try:
+ msg = api.Monitor.unmute(monitor['id'])
+ module.exit_json(changed=True, msg=msg)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dconf.py b/ansible_collections/community/general/plugins/modules/dconf.py
new file mode 100644
index 000000000..8c325486c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dconf.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Branko Majic <branko@majic.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: dconf
+author:
+ - "Branko Majic (@azaghal)"
+short_description: Modify and read dconf database
+description:
+ - This module allows modifications and reading of C(dconf) database. The module
+ is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man
+ page for more details.
+ - Since C(dconf) requires a running D-Bus session to change values, the module
+ will try to detect an existing session and reuse it, or run the tool via
+ C(dbus-run-session).
+requirements:
+ - Optionally the C(gi.repository) Python library (usually included in the OS
+ on hosts which have C(dconf)); this will become a non-optional requirement
+ in a future major release of community.general.
+notes:
+ - This module depends on C(psutil) Python library (version 4.0.0 and upwards),
+ C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
+ distribution you are using, you may need to install additional packages to
+ have these available.
+ - This module uses the C(gi.repository) Python library when available for
+ accurate comparison of values in C(dconf) to values specified in Ansible
+ code. C(gi.repository) is likely to be present on most systems which have
+ C(dconf) but may not be present everywhere. When it is missing, a simple
+ string comparison between values is used, and there may be false positives,
+ that is, Ansible may think that a value is being changed when it is not.
+ This fallback will be removed in a future version of this module, at which
+ point the module will stop working on hosts without C(gi.repository).
+ - Detection of existing, running D-Bus session, required to change settings
+ via C(dconf), is not 100% reliable due to implementation details of D-Bus
+ daemon itself. This might lead to running applications not picking-up
+ changes on the fly if options are changed via Ansible and
+ C(dbus-run-session).
+ - Keep in mind that the C(dconf) CLI tool, which this module wraps around,
+ utilises an unusual syntax for the values (GVariant). For example, if you
+ wanted to provide a string value, the correct syntax would be
+ I(value="'myvalue'") - with single quotes as part of the Ansible parameter
+ value.
+ - When using loops in combination with a value like
+ "[('xkb', 'us'), ('xkb', 'se')]", you need to be aware of possible
+ type conversions. Applying a filter C({{ item.value | string }})
+ to the parameter variable can avoid potential conversion problems.
+ - The easiest way to figure out exact syntax/value you need to provide for a
+ key is by making the configuration change in application affected by the
+ key, and then having a look at value set via commands C(dconf dump
+ /path/to/dir/) or C(dconf read /path/to/key).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ key:
+ type: str
+ required: true
+ description:
+ - A dconf key to modify or read from the dconf database.
+ value:
+ type: raw
+ required: false
+ description:
+ - Value to set for the specified dconf key. Value should be specified in
+ GVariant format. Due to complexity of this format, it is best to have a
+ look at existing values in the dconf database.
+ - Required for I(state=present).
+ - Although the type is specified as "raw", it should typically be
+ specified as a string. However, boolean values in particular are
+ handled properly even when specified as booleans rather than strings
+ (in fact, handling booleans properly is why the type of this parameter
+ is "raw").
+ state:
+ type: str
+ required: false
+ default: present
+ choices: [ 'read', 'present', 'absent' ]
+ description:
+ - The action to take upon the key/value.
+'''
+
+RETURN = r"""
+value:
+ description: value associated with the requested key
+ returned: success, state was "read"
+ type: str
+ sample: "'Default'"
+"""
+
+EXAMPLES = r"""
+- name: Configure available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ value: "[('xkb', 'us'), ('xkb', 'se')]"
+ state: present
+
+- name: Read currently available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Gnome
+ community.general.dconf:
+ key: "/org/gnome/desktop/input-sources/sources"
+ state: absent
+
+- name: Configure available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ value: "['us', 'se']"
+ state: present
+
+- name: Read currently available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: read
+ register: keyboard_layouts
+
+- name: Reset the available keyboard layouts in Cinnamon
+ community.general.dconf:
+ key: "/org/gnome/libgnomekbd/keyboard/layouts"
+ state: absent
+
+- name: Disable desktop effects in Cinnamon
+ community.general.dconf:
+ key: "/org/cinnamon/desktop-effects"
+ value: "false"
+ state: present
+"""
+
+
+import os
+import sys
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.respawn import (
+ has_respawned,
+ probe_interpreters_for_module,
+ respawn_module,
+)
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils import deps
+
+glib_module_name = 'gi.repository.GLib'
+
+try:
+ from gi.repository.GLib import Variant, GError
+except ImportError:
+ Variant = None
+ GError = AttributeError
+
+with deps.declare("psutil"):
+ import psutil
+
+
+class DBusWrapper(object):
+ """
+ Helper class that can be used for running a command with a working D-Bus
+ session.
+
+ If possible, command will be run against an existing D-Bus session,
+ otherwise the session will be spawned via dbus-run-session.
+
+ Example usage:
+
+ dbus_wrapper = DBusWrapper(ansible_module)
+ dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
+ """
+
+ def __init__(self, module):
+ """
+ Initialises an instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+ """
+
+ # Store passed-in arguments and set-up some defaults.
+ self.module = module
+
+ # Try to extract existing D-Bus session address.
+ self.dbus_session_bus_address = self._get_existing_dbus_session()
+
+ # If no existing D-Bus session was detected, check if dbus-run-session
+ # is available.
+ if self.dbus_session_bus_address is None:
+ self.dbus_run_session_cmd = self.module.get_bin_path('dbus-run-session', required=True)
+
+ def _get_existing_dbus_session(self):
+ """
+ Detects and returns an existing D-Bus session bus address.
+
+ :returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
+ """
+
+ # We'll be checking the processes of current user only.
+ uid = os.getuid()
+
+ # Go through all the pids for this user, try to extract the D-Bus
+ # session bus address from environment, and ensure it is possible to
+ # connect to it.
+ self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
+
+ for pid in psutil.pids():
+ try:
+ process = psutil.Process(pid)
+ process_real_uid, dummy, dummy = process.uids()
+ if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
+ dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
+ self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
+ dbus_send_cmd = self.module.get_bin_path('dbus-send', required=True)
+ command = [dbus_send_cmd, '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
+ rc, dummy, dummy = self.module.run_command(command)
+
+ if rc == 0:
+ self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
+
+ return dbus_session_bus_address_candidate
+
+ # This can happen with things like SSH sessions etc.
+ except psutil.AccessDenied:
+ pass
+ # Process has disappeared while inspecting it
+ except psutil.NoSuchProcess:
+ pass
+
+ self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
+
+ return None
+
+ def run_command(self, command):
+ """
+ Runs the specified command within a functional D-Bus session. Command is
+ effectively passed-on to AnsibleModule.run_command() method, with
+ modification for using dbus-run-session if necessary.
+
+ :param command: Command to run, including parameters. Each element of the list should be a string.
+ :type module: list
+
+ :returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
+ """
+
+ if self.dbus_session_bus_address is None:
+ self.module.debug("Using dbus-run-session wrapper for running commands.")
+ command = [self.dbus_run_session_cmd] + command
+ rc, out, err = self.module.run_command(command)
+
+ if self.dbus_session_bus_address is None and rc == 127:
+ self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
+ else:
+ extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
+ rc, out, err = self.module.run_command(command, environ_update=extra_environment)
+
+ return rc, out, err
+
+
+class DconfPreference(object):
+
+ def __init__(self, module, check_mode=False):
+ """
+ Initialises instance of the class.
+
+ :param module: Ansible module instance used to signal failures and run commands.
+ :type module: AnsibleModule
+
+ :param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
+ :type check_mode: bool
+ """
+
+ self.module = module
+ self.check_mode = check_mode
+ # Check if dconf binary exists
+ self.dconf_bin = self.module.get_bin_path('dconf', required=True)
+
+ @staticmethod
+ def variants_are_equal(canonical_value, user_value):
+ """Compare two string GVariant representations for equality.
+
+ Assumes `canonical_value` is "canonical" in the sense that the type of
+ the variant is specified explicitly if it cannot be inferred; this is
+ true for textual representations of variants generated by the `dconf`
+ command. The type of `canonical_value` is used to parse `user_value`,
+ so the latter does not need to be explicitly typed.
+
+ Returns True if the two values are equal.
+ """
+ if canonical_value is None:
+ # It's unset in dconf database, so anything the user is trying to
+ # set is a change.
+ return False
+ try:
+ variant1 = Variant.parse(None, canonical_value)
+ variant2 = Variant.parse(variant1.get_type(), user_value)
+ return variant1 == variant2
+ except GError:
+ return canonical_value == user_value
+
+ def read(self, key):
+ """
+ Retrieves current value associated with the dconf key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
+ """
+ command = [self.dconf_bin, "read", key]
+
+ rc, out, err = self.module.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err,
+ out=out,
+ err=err)
+
+ if out == '':
+ value = None
+ else:
+ value = out.rstrip('\n')
+
+ return value
+
+ def write(self, key, value):
+ """
+ Writes the value for specified key.
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key for which the value should be set. Should be a full path.
+ :type key: str
+
+ :param value: Value to set for the specified dconf key. Should be specified in GVariant format.
+ :type value: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+ # If no change is needed (or won't be done due to check_mode), notify
+ # caller straight away.
+ if self.variants_are_equal(self.read(key), value):
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for write operation, wrap
+ # dconf command dbus-launch.
+ command = [self.dconf_bin, "write", key, value]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err),
+ out=out,
+ err=err)
+
+ # Value was changed.
+ return True
+
+ def reset(self, key):
+ """
+ Returns value for the specified key (removes it from user configuration).
+
+ If an error occurs, a call will be made to AnsibleModule.fail_json.
+
+ :param key: dconf key to reset. Should be a full path.
+ :type key: str
+
+ :returns: bool -- True if a change was made, False if no change was required.
+ """
+
+ # Read the current value first.
+ current_value = self.read(key)
+
+ # No change was needed, key is not set at all, or just notify user if we
+ # are in check mode.
+ if current_value is None:
+ return False
+ elif self.check_mode:
+ return True
+
+ # Set-up command to run. Since DBus is needed for reset operation, wrap
+ # dconf command dbus-launch.
+ command = [self.dconf_bin, "reset", key]
+
+ # Run the command and fetch standard return code, stdout, and stderr.
+ dbus_wrapper = DBusWrapper(self.module)
+ rc, out, err = dbus_wrapper.run_command(command)
+
+ if rc != 0:
+ self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err,
+ out=out,
+ err=err)
+
+ # Value was changed.
+ return True
+
+
+def main():
+ # Setup the Ansible module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent', 'read']),
+ key=dict(required=True, type='str', no_log=False),
+ # Converted to str below after special handling of bool.
+ value=dict(required=False, default=None, type='raw'),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['value']),
+ ],
+ )
+
+ if Variant is None:
+ # This interpreter can't see the GLib module. To try to fix that, we'll
+ # look in common locations for system-owned interpreters that can see
+ # it; if we find one, we'll respawn under it. Otherwise we'll proceed
+ # with degraded performance, without the ability to parse GVariants.
+ # Later (in a different PR) we'll actually deprecate this degraded
+ # performance level and fail with an error if the library can't be
+ # found.
+
+ if has_respawned():
+ # This shouldn't be possible; short-circuit early if it happens.
+ module.fail_json(
+ msg="%s must be installed and visible from %s." %
+ (glib_module_name, sys.executable))
+
+ interpreters = ['/usr/bin/python3', '/usr/bin/python2',
+ '/usr/bin/python']
+
+ interpreter = probe_interpreters_for_module(
+ interpreters, glib_module_name)
+
+ if interpreter:
+ # Found the Python bindings; respawn this module under the
+ # interpreter where we found them.
+ respawn_module(interpreter)
+ # This is the end of the line for this process, it will exit here
+ # once the respawned module has completed.
+
+ # Try to be forgiving about the user specifying a boolean as the value, or
+ # more accurately about the fact that YAML and Ansible are quite insistent
+ # about converting strings that look like booleans into booleans. Convert
+ # the boolean into a string of the type dconf will understand. Any type for
+ # the value other than boolean is just converted into a string directly.
+ if module.params['value'] is not None:
+ if isinstance(module.params['value'], bool):
+ module.params['value'] = 'true' if module.params['value'] else 'false'
+ else:
+ module.params['value'] = to_native(
+ module.params['value'], errors='surrogate_or_strict')
+
+ if Variant is None:
+ module.warn(
+ 'WARNING: The gi.repository Python library is not available; '
+ 'using string comparison to check value equality. This fallback '
+ 'will be deprecated in a future version of community.general.')
+
+ deps.validate(module)
+
+ # Create wrapper instance.
+ dconf = DconfPreference(module, module.check_mode)
+
+ # Process based on different states.
+ if module.params['state'] == 'read':
+ value = dconf.read(module.params['key'])
+ module.exit_json(changed=False, value=value)
+ elif module.params['state'] == 'present':
+ changed = dconf.write(module.params['key'], module.params['value'])
+ module.exit_json(changed=changed)
+ elif module.params['state'] == 'absent':
+ changed = dconf.reset(module.params['key'])
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/deploy_helper.py b/ansible_collections/community/general/plugins/modules/deploy_helper.py
new file mode 100644
index 000000000..f0246cae6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/deploy_helper.py
@@ -0,0 +1,535 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
+# Copyright (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: deploy_helper
+author: "Ramon de la Fuente (@ramondelafuente)"
+short_description: Manages some of the steps common in deploying projects
+description:
+ - The Deploy Helper manages some of the steps common in deploying software.
+ It creates a folder structure, manages a symlink for the current release
+ and cleans up old releases.
+ - "Running it with the I(state=query) or I(state=present) will return the C(deploy_helper) fact.
+ C(project_path), whatever you set in the I(path) parameter,
+ C(current_path), the path to the symlink that points to the active release,
+ C(releases_path), the path to the folder to keep releases in,
+ C(shared_path), the path to the folder to keep shared resources in,
+ C(unfinished_filename), the file to check for to recognize unfinished builds,
+ C(previous_release), the release the 'current' symlink is pointing to,
+ C(previous_release_path), the full path to the 'current' symlink target,
+ C(new_release), either the 'release' parameter or a generated timestamp,
+ C(new_release_path), the path to the new release folder (not created by the module)."
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ path:
+ type: path
+ required: true
+ aliases: ['dest']
+ description:
+ - The root path of the project.
+ Returned in the C(deploy_helper.project_path) fact.
+
+ state:
+ type: str
+ description:
+ - The state of the project.
+ C(query) will only gather facts,
+ C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
+ C(finalize) will remove the unfinished_filename file, create a symlink to the newly
+ deployed release and optionally clean old releases,
+ C(clean) will remove failed & old releases,
+ C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with I(state=absent)).
+ choices: [ present, finalize, absent, clean, query ]
+ default: present
+
+ release:
+ type: str
+ description:
+ - The release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
+ This parameter is optional during I(state=present), but needs to be set explicitly for I(state=finalize).
+ You can use the generated fact I(release={{ deploy_helper.new_release }}).
+
+ releases_path:
+ type: str
+ description:
+ - The name of the folder that will hold the releases. This can be relative to I(path) or absolute.
+ Returned in the C(deploy_helper.releases_path) fact.
+ default: releases
+
+ shared_path:
+ type: path
+ description:
+ - The name of the folder that will hold the shared resources. This can be relative to I(path) or absolute.
+ If this is set to an empty string, no shared folder will be created.
+ Returned in the C(deploy_helper.shared_path) fact.
+ default: shared
+
+ current_path:
+ type: path
+ description:
+ - The name of the symlink that is created when the deploy is finalized. Used in I(finalize) and I(clean).
+ Returned in the C(deploy_helper.current_path) fact.
+ default: current
+
+ unfinished_filename:
+ type: str
+ description:
+ - The name of the file that indicates a deploy has not finished. All folders in the I(releases_path) that
+ contain this file will be deleted on I(state=finalize) with I(clean=True), or I(state=clean). This file is
+ automatically deleted from the I(new_release_path) during I(state=finalize).
+ default: DEPLOY_UNFINISHED
+
+ clean:
+ description:
+ - Whether to run the clean procedure in case of I(state=finalize).
+ type: bool
+ default: true
+
+ keep_releases:
+ type: int
+ description:
+ - The number of old releases to keep when cleaning. Used in I(finalize) and I(clean). Any unfinished builds
+ will be deleted first, so only correct releases will count. The current version will not count.
+ default: 5
+
+notes:
+ - Facts are only returned for I(state=query) and I(state=present). If you use both, you should pass any overridden
+ parameters to both calls, otherwise the second call will overwrite the facts of the first one.
+ - When using I(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
+ new naming strategy without problems.
+ - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
+ unless you pass your own release name with I(release). Due to the nature of deploying software, this should not
+ be much of a problem.
+extends_documentation_fragment:
+ - ansible.builtin.files
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+
+# General explanation, starting with an example folder structure for a project:
+
+# root:
+# releases:
+# - 20140415234508
+# - 20140415235146
+# - 20140416082818
+#
+# shared:
+# - sessions
+# - uploads
+#
+# current: releases/20140416082818
+
+
+# The 'releases' folder holds all the available releases. A release is a complete build of the application being
+# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
+# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
+# git tags or commit hashes.
+#
+# During a deploy, a new folder should be created in the releases folder and any build steps required should be
+# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
+# with a link to this build.
+#
+# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
+# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
+# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
+#
+# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
+# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
+# release is reduced to the time it takes to switch the link.
+#
+# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
+# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
+# procedure to remove it during cleanup.
+
+
+# Typical usage
+- name: Initialize the deploy root and gather facts
+ community.general.deploy_helper:
+ path: /path/to/root
+- name: Clone the project to the new release folder
+ ansible.builtin.git:
+ repo: ansible.builtin.git://foosball.example.org/path/to/repo.git
+ dest: '{{ deploy_helper.new_release_path }}'
+ version: v1.1.1
+- name: Add an unfinished file, to allow cleanup on successful finalize
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
+ state: touch
+- name: Perform some build steps, like running your dependency manager for example
+ composer:
+ command: install
+ working_dir: '{{ deploy_helper.new_release_path }}'
+- name: Create some folders in the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.shared_path }}/{{ item }}'
+ state: directory
+ with_items:
+ - sessions
+ - uploads
+- name: Add symlinks from the new release to the shared folder
+ ansible.builtin.file:
+ path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
+ src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
+ state: link
+ with_items:
+ - path: app/sessions
+ src: sessions
+ - path: web/uploads
+ src: uploads
+- name: Finalize the deploy, removing the unfinished file and switching the symlink
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Retrieving facts before running a deploy
+- name: Run 'state=query' to gather facts without changing anything
+ community.general.deploy_helper:
+ path: /path/to/root
+ state: query
+# Remember to set the 'release' parameter when you actually call 'state=present' later
+- name: Initialize the deploy root
+ community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: present
+
+# all paths can be absolute or relative (to the 'path' parameter)
+- community.general.deploy_helper:
+ path: /path/to/root
+ releases_path: /var/www/project/releases
+ shared_path: /var/www/shared
+ current_path: /var/www/active
+
+# Using your own naming strategy for releases (a version tag in this case):
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: v1.1.1
+ state: present
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Using a different unfinished_filename:
+- community.general.deploy_helper:
+ path: /path/to/root
+ unfinished_filename: README.md
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+
+# Postponing the cleanup of older builds:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ clean: false
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+# Or running the cleanup ahead of the new deploy
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: present
+
+# Keeping more old releases:
+- community.general.deploy_helper:
+ path: /path/to/root
+ release: '{{ deploy_helper.new_release }}'
+ state: finalize
+ keep_releases: 10
+# Or, if you use 'clean=false' on finalize:
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: clean
+ keep_releases: 10
+
+# Removing the entire project root folder
+- community.general.deploy_helper:
+ path: /path/to/root
+ state: absent
+
+# Debugging the facts returned by the module
+- community.general.deploy_helper:
+ path: /path/to/root
+- ansible.builtin.debug:
+ var: deploy_helper
+'''
+import os
+import shutil
+import time
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class DeployHelper(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.file_args = module.load_file_common_arguments(module.params)
+
+ self.clean = module.params['clean']
+ self.current_path = module.params['current_path']
+ self.keep_releases = module.params['keep_releases']
+ self.path = module.params['path']
+ self.release = module.params['release']
+ self.releases_path = module.params['releases_path']
+ self.shared_path = module.params['shared_path']
+ self.state = module.params['state']
+ self.unfinished_filename = module.params['unfinished_filename']
+
+ def gather_facts(self):
+ current_path = os.path.join(self.path, self.current_path)
+ releases_path = os.path.join(self.path, self.releases_path)
+ if self.shared_path:
+ shared_path = os.path.join(self.path, self.shared_path)
+ else:
+ shared_path = None
+
+ previous_release, previous_release_path = self._get_last_release(current_path)
+
+ if not self.release and (self.state == 'query' or self.state == 'present'):
+ self.release = time.strftime("%Y%m%d%H%M%S")
+
+ if self.release:
+ new_release_path = os.path.join(releases_path, self.release)
+ else:
+ new_release_path = None
+
+ return {
+ 'project_path': self.path,
+ 'current_path': current_path,
+ 'releases_path': releases_path,
+ 'shared_path': shared_path,
+ 'previous_release': previous_release,
+ 'previous_release_path': previous_release_path,
+ 'new_release': self.release,
+ 'new_release_path': new_release_path,
+ 'unfinished_filename': self.unfinished_filename
+ }
+
+ def delete_path(self, path):
+ if not os.path.lexists(path):
+ return False
+
+ if not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ if not self.module.check_mode:
+ try:
+ shutil.rmtree(path, ignore_errors=False)
+ except Exception as e:
+ self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
+
+ return True
+
+ def create_path(self, path):
+ changed = False
+
+ if not os.path.lexists(path):
+ changed = True
+ if not self.module.check_mode:
+ os.makedirs(path)
+
+ elif not os.path.isdir(path):
+ self.module.fail_json(msg="%s exists but is not a directory" % path)
+
+ changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
+
+ return changed
+
+ def check_link(self, path):
+ if os.path.lexists(path):
+ if not os.path.islink(path):
+ self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
+
+ def create_link(self, source, link_name):
+ if os.path.islink(link_name):
+ norm_link = os.path.normpath(os.path.realpath(link_name))
+ norm_source = os.path.normpath(os.path.realpath(source))
+ if norm_link == norm_source:
+ changed = False
+ else:
+ changed = True
+ if not self.module.check_mode:
+ if not os.path.lexists(source):
+ self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
+ tmp_link_name = link_name + '.' + self.unfinished_filename
+ if os.path.islink(tmp_link_name):
+ os.unlink(tmp_link_name)
+ os.symlink(source, tmp_link_name)
+ os.rename(tmp_link_name, link_name)
+ else:
+ changed = True
+ if not self.module.check_mode:
+ os.symlink(source, link_name)
+
+ return changed
+
+ def remove_unfinished_file(self, new_release_path):
+ changed = False
+ unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
+ if os.path.lexists(unfinished_file_path):
+ changed = True
+ if not self.module.check_mode:
+ os.remove(unfinished_file_path)
+
+ return changed
+
+ def remove_unfinished_builds(self, releases_path):
+ changes = 0
+
+ for release in os.listdir(releases_path):
+ if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
+ if self.module.check_mode:
+ changes += 1
+ else:
+ changes += self.delete_path(os.path.join(releases_path, release))
+
+ return changes
+
+ def remove_unfinished_link(self, path):
+ changed = False
+
+ if not self.release:
+ return changed
+
+ tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
+ if not self.module.check_mode and os.path.exists(tmp_link_name):
+ changed = True
+ os.remove(tmp_link_name)
+
+ return changed
+
+ def cleanup(self, releases_path, reserve_version):
+ changes = 0
+
+ if os.path.lexists(releases_path):
+ releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
+ try:
+ releases.remove(reserve_version)
+ except ValueError:
+ pass
+
+ if not self.module.check_mode:
+ releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
+ for release in releases[self.keep_releases:]:
+ changes += self.delete_path(os.path.join(releases_path, release))
+ elif len(releases) > self.keep_releases:
+ changes += (len(releases) - self.keep_releases)
+
+ return changes
+
+ def _get_file_args(self, path):
+ file_args = self.file_args.copy()
+ file_args['path'] = path
+ return file_args
+
+ def _get_last_release(self, current_path):
+ previous_release = None
+ previous_release_path = None
+
+ if os.path.lexists(current_path):
+ previous_release_path = os.path.realpath(current_path)
+ previous_release = os.path.basename(previous_release_path)
+
+ return previous_release, previous_release_path
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(aliases=['dest'], required=True, type='path'),
+ release=dict(type='str'),
+ releases_path=dict(type='str', default='releases'),
+ shared_path=dict(type='path', default='shared'),
+ current_path=dict(type='path', default='current'),
+ keep_releases=dict(type='int', default=5),
+ clean=dict(type='bool', default=True),
+ unfinished_filename=dict(type='str', default='DEPLOY_UNFINISHED'),
+ state=dict(choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
+ ),
+ required_if=[
+ ('state', 'finalize', ['release']),
+ ],
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ deploy_helper = DeployHelper(module)
+ facts = deploy_helper.gather_facts()
+
+ result = {
+ 'state': deploy_helper.state
+ }
+
+ changes = 0
+
+ if deploy_helper.state == 'query':
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'present':
+ deploy_helper.check_link(facts['current_path'])
+ changes += deploy_helper.create_path(facts['project_path'])
+ changes += deploy_helper.create_path(facts['releases_path'])
+ if deploy_helper.shared_path:
+ changes += deploy_helper.create_path(facts['shared_path'])
+
+ result['ansible_facts'] = {'deploy_helper': facts}
+
+ elif deploy_helper.state == 'finalize':
+ if deploy_helper.keep_releases <= 0:
+ module.fail_json(msg="'keep_releases' should be at least 1")
+
+ changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
+ changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
+ if deploy_helper.clean:
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'clean':
+ changes += deploy_helper.remove_unfinished_link(facts['project_path'])
+ changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
+ changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
+
+ elif deploy_helper.state == 'absent':
+ # destroy the facts
+ result['ansible_facts'] = {'deploy_helper': []}
+ changes += deploy_helper.delete_path(facts['project_path'])
+
+ if changes > 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_network.py b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
new file mode 100644
index 000000000..8c1469063
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dimensiondata_network.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Dimension Data
+# Authors:
+# - Aimon Bustardo <aimon.bustardo@dimensiondata.com>
+# - Bert Diwa <Lamberto.Diwa@dimensiondata.com>
+# - Adam Friedman <tintoy@tintoy.io>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_network
+short_description: Create, update, and delete MCP 1.0 & 2.0 networks
+extends_documentation_fragment:
+ - community.general.dimensiondata
+ - community.general.dimensiondata_wait
+ - community.general.attributes
+
+description:
+ - Create, update, and delete MCP 1.0 & 2.0 networks
+author: 'Aimon Bustardo (@aimonb)'
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the network domain to create.
+ required: true
+ type: str
+ description:
+ description:
+ - Additional description of the network domain.
+ required: false
+ type: str
+ service_plan:
+ description:
+ - The service plan, either "ESSENTIALS" or "ADVANCED".
+ - MCP 2.0 Only.
+ choices: [ESSENTIALS, ADVANCED]
+ default: ESSENTIALS
+ type: str
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create an MCP 1.0 network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA5
+ name: mynet
+
+- name: Create an MCP 2.0 network
+ community.general.dimensiondata_network:
+ region: na
+ mcp_user: my_user
+ mcp_password: my_password
+ location: NA9
+ name: mynet
+ service_plan: ADVANCED
+
+- name: Delete a network
+ community.general.dimensiondata_network:
+ region: na
+ location: NA1
+ name: mynet
+ state: absent
+'''
+
+RETURN = '''
+network:
+ description: Dictionary describing the network.
+ returned: On success when I(state=present).
+ type: complex
+ contains:
+ id:
+ description: Network ID.
+ type: str
+ sample: "8c787000-a000-4050-a215-280893411a7d"
+ name:
+ description: Network name.
+ type: str
+ sample: "My network"
+ description:
+ description: Network description.
+ type: str
+ sample: "My network description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ status:
+ description: Network status. (MCP 2.0 only)
+ type: str
+ sample: NORMAL
+ private_net:
+ description: Private network subnet. (MCP 1.0 only)
+ type: str
+ sample: "10.2.3.0"
+ multicast:
+ description: Multicast enabled? (MCP 1.0 only)
+ type: bool
+ sample: false
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import HAS_LIBCLOUD, DimensionDataModule
+from ansible.module_utils.common.text.converters import to_native
+
+if HAS_LIBCLOUD:
+ from libcloud.compute.base import NodeLocation
+ from libcloud.common.dimensiondata import DimensionDataAPIException
+
+
+class DimensionDataNetworkModule(DimensionDataModule):
+ """
+ The dimensiondata_network module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data network module.
+ """
+
+ super(DimensionDataNetworkModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(type='str', required=True),
+ description=dict(type='str', required=False),
+ service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']),
+ state=dict(default='present', choices=['present', 'absent'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.service_plan = self.module.params['service_plan']
+ self.state = self.module.params['state']
+
+ def state_present(self):
+ network = self._get_network()
+
+ if network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network already exists',
+ network=self._network_to_dict(network)
+ )
+
+ network = self._create_network()
+
+ self.module.exit_json(
+ changed=True,
+ msg='Created network "%s" in datacenter "%s".' % (self.name, self.location),
+ network=self._network_to_dict(network)
+ )
+
+ def state_absent(self):
+ network = self._get_network()
+
+ if not network:
+ self.module.exit_json(
+ changed=False,
+ msg='Network "%s" does not exist' % self.name,
+ network=self._network_to_dict(network)
+ )
+
+ self._delete_network(network)
+
+ def _get_network(self):
+ if self.mcp_version == '1.0':
+ networks = self.driver.list_networks(location=self.location)
+ else:
+ networks = self.driver.ex_list_network_domains(location=self.location)
+
+ matched_network = [network for network in networks if network.name == self.name]
+ if matched_network:
+ return matched_network[0]
+
+ return None
+
+ def _network_to_dict(self, network):
+ network_dict = dict(
+ id=network.id,
+ name=network.name,
+ description=network.description
+ )
+
+ if isinstance(network.location, NodeLocation):
+ network_dict['location'] = network.location.id
+ else:
+ network_dict['location'] = network.location
+
+ if self.mcp_version == '1.0':
+ network_dict['private_net'] = network.private_net
+ network_dict['multicast'] = network.multicast
+ network_dict['status'] = None
+ else:
+ network_dict['private_net'] = None
+ network_dict['multicast'] = None
+ network_dict['status'] = network.status
+
+ return network_dict
+
+ def _create_network(self):
+
+ # Make sure service_plan argument is defined
+ if self.mcp_version == '2.0' and 'service_plan' not in self.module.params:
+ self.module.fail_json(
+ msg='service_plan required when creating network and location is MCP 2.0'
+ )
+
+ # Create network
+ try:
+ if self.mcp_version == '1.0':
+ network = self.driver.ex_create_network(
+ self.location,
+ self.name,
+ description=self.description
+ )
+ else:
+ network = self.driver.ex_create_network_domain(
+ self.location,
+ self.name,
+ self.module.params['service_plan'],
+ description=self.description
+ )
+ except DimensionDataAPIException as e:
+
+ self.module.fail_json(
+ msg="Failed to create new network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ if self.module.params['wait'] is True:
+ network = self._wait_for_network_state(network.id, 'NORMAL')
+
+ return network
+
+ def _delete_network(self, network):
+ try:
+ if self.mcp_version == '1.0':
+ deleted = self.driver.ex_delete_network(network)
+ else:
+ deleted = self.driver.ex_delete_network_domain(network)
+
+ if deleted:
+ self.module.exit_json(
+ changed=True,
+ msg="Deleted network with id %s" % network.id
+ )
+
+ self.module.fail_json(
+ "Unexpected failure deleting network with id %s" % network.id
+ )
+
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg="Failed to delete network: %s" % to_native(e), exception=traceback.format_exc()
+ )
+
+ def _wait_for_network_state(self, net_id, state_to_wait_for):
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_network_domain,
+ self.module.params['wait_poll_interval'],
+ self.module.params['wait_time'],
+ net_id
+ )
+ except DimensionDataAPIException as e:
+ self.module.fail_json(
+ msg='Network did not reach % state in time: %s' % (state_to_wait_for, to_native(e)),
+ exception=traceback.format_exc()
+ )
+
+
+def main():
+ module = DimensionDataNetworkModule()
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
new file mode 100644
index 000000000..7d83ddc69
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dimensiondata_vlan.py
@@ -0,0 +1,564 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016 Dimension Data
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Authors:
+# - Adam Friedman <tintoy@tintoy.io>
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: dimensiondata_vlan
+short_description: Manage a VLAN in a Cloud Control network domain
+extends_documentation_fragment:
+ - community.general.dimensiondata
+ - community.general.dimensiondata_wait
+ - community.general.attributes
+
+description:
+ - Manage VLANs in Cloud Control network domains.
+author: 'Adam Friedman (@tintoy)'
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the target VLAN.
+ type: str
+ required: true
+ description:
+ description:
+ - A description of the VLAN.
+ type: str
+ default: ''
+ network_domain:
+ description:
+ - The Id or name of the target network domain.
+ required: true
+ type: str
+ private_ipv4_base_address:
+ description:
+ - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0).
+ type: str
+ default: ''
+ private_ipv4_prefix_size:
+ description:
+ - The size of the IPv4 address space, e.g 24.
+ - Required, if C(private_ipv4_base_address) is specified.
+ type: int
+ default: 0
+ state:
+ description:
+ - The desired state for the target VLAN.
+ - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist).
+ choices: [present, absent, readonly]
+ default: present
+ type: str
+ allow_expand:
+ description:
+ - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses.
+ - If C(False), the module will fail under these conditions.
+ - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible).
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Add or update VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ description: A test VLAN
+ private_ipv4_base_address: 192.168.23.0
+ private_ipv4_prefix_size: 24
+ state: present
+ wait: true
+
+- name: Read / get VLAN details
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan1
+ state: readonly
+ wait: true
+
+- name: Delete a VLAN
+ community.general.dimensiondata_vlan:
+ region: na
+ location: NA5
+ network_domain: test_network
+ name: my_vlan_1
+ state: absent
+ wait: true
+'''
+
+RETURN = '''
+vlan:
+ description: Dictionary describing the VLAN.
+ returned: On success when I(state) is 'present'
+ type: complex
+ contains:
+ id:
+ description: VLAN ID.
+ type: str
+ sample: "aaaaa000-a000-4050-a215-2808934ccccc"
+ name:
+ description: VLAN name.
+ type: str
+ sample: "My VLAN"
+ description:
+ description: VLAN description.
+ type: str
+ sample: "My VLAN description"
+ location:
+ description: Datacenter location.
+ type: str
+ sample: NA3
+ private_ipv4_base_address:
+ description: The base address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.0
+ private_ipv4_prefix_size:
+ description: The prefix size for the VLAN's private IPV4 network.
+ type: int
+ sample: 24
+ private_ipv4_gateway_address:
+ description: The gateway address for the VLAN's private IPV4 network.
+ type: str
+ sample: 192.168.23.1
+ private_ipv6_base_address:
+ description: The base address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:0
+ private_ipv6_prefix_size:
+ description: The prefix size for the VLAN's IPV6 network.
+ type: int
+ sample: 64
+ private_ipv6_gateway_address:
+ description: The gateway address for the VLAN's IPV6 network.
+ type: str
+ sample: 2402:9900:111:1195:0:0:0:1
+ status:
+ description: VLAN status.
+ type: str
+ sample: NORMAL
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError
+
+try:
+ from libcloud.common.dimensiondata import DimensionDataVlan, DimensionDataAPIException
+
+ HAS_LIBCLOUD = True
+
+except ImportError:
+ DimensionDataVlan = None
+
+ HAS_LIBCLOUD = False
+
+
+class DimensionDataVlanModule(DimensionDataModule):
+ """
+ The dimensiondata_vlan module for Ansible.
+ """
+
+ def __init__(self):
+ """
+ Create a new Dimension Data VLAN module.
+ """
+
+ super(DimensionDataVlanModule, self).__init__(
+ module=AnsibleModule(
+ argument_spec=DimensionDataModule.argument_spec_with_wait(
+ name=dict(required=True, type='str'),
+ description=dict(default='', type='str'),
+ network_domain=dict(required=True, type='str'),
+ private_ipv4_base_address=dict(default='', type='str'),
+ private_ipv4_prefix_size=dict(default=0, type='int'),
+ allow_expand=dict(required=False, default=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent', 'readonly'])
+ ),
+ required_together=DimensionDataModule.required_together()
+ )
+ )
+
+ self.name = self.module.params['name']
+ self.description = self.module.params['description']
+ self.network_domain_selector = self.module.params['network_domain']
+ self.private_ipv4_base_address = self.module.params['private_ipv4_base_address']
+ self.private_ipv4_prefix_size = self.module.params['private_ipv4_prefix_size']
+ self.state = self.module.params['state']
+ self.allow_expand = self.module.params['allow_expand']
+
+ if self.wait and self.state != 'present':
+ self.module.fail_json(
+ msg='The wait parameter is only supported when state is "present".'
+ )
+
+ def state_present(self):
+ """
+ Ensure that the target VLAN is present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}" (should be present).'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ vlan = self._create_vlan(network_domain)
+ self.module.exit_json(
+ msg='Created VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+ else:
+ diff = VlanDiff(vlan, self.module.params)
+ if not diff.has_changes():
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (no changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+
+ return
+
+ try:
+ diff.ensure_legal_change()
+ except InvalidVlanChangeError as invalid_vlan_change:
+ self.module.fail_json(
+ msg='Unable to update VLAN "{0}" in network domain "{1}": {2}'.format(
+ self.name, self.network_domain_selector, invalid_vlan_change
+ )
+ )
+
+ if diff.needs_expand() and not self.allow_expand:
+ self.module.fail_json(
+ msg='The configured private IPv4 network size ({0}-bit prefix) for '.format(
+ self.private_ipv4_prefix_size
+ ) + 'the VLAN differs from its current network size ({0}-bit prefix) '.format(
+ vlan.private_ipv4_range_size
+ ) + 'and needs to be expanded. Use allow_expand=true if this is what you want.'
+ )
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (changes detected).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ if diff.needs_edit():
+ vlan.name = self.name
+ vlan.description = self.description
+
+ self.driver.ex_update_vlan(vlan)
+
+ if diff.needs_expand():
+ vlan.private_ipv4_range_size = self.private_ipv4_prefix_size
+ self.driver.ex_expand_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Updated VLAN "{0}" in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ def state_readonly(self):
+ """
+ Read the target VLAN's state.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if vlan:
+ self.module.exit_json(
+ vlan=vlan_to_dict(vlan),
+ changed=False
+ )
+ else:
+ self.module.fail_json(
+ msg='VLAN "{0}" does not exist in network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ )
+ )
+
+ def state_absent(self):
+ """
+ Ensure that the target VLAN is not present.
+ """
+
+ network_domain = self._get_network_domain()
+
+ vlan = self._get_vlan(network_domain)
+ if not vlan:
+ self.module.exit_json(
+ msg='VLAN "{0}" is absent from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=False
+ )
+
+ return
+
+ if self.module.check_mode:
+ self.module.exit_json(
+ msg='VLAN "{0}" is present in network domain "{1}" (should be absent).'.format(
+ self.name, self.network_domain_selector
+ ),
+ vlan=vlan_to_dict(vlan),
+ changed=True
+ )
+
+ self._delete_vlan(vlan)
+
+ self.module.exit_json(
+ msg='Deleted VLAN "{0}" from network domain "{1}".'.format(
+ self.name, self.network_domain_selector
+ ),
+ changed=True
+ )
+
+ def _get_vlan(self, network_domain):
+ """
+ Retrieve the target VLAN details from CloudControl.
+
+ :param network_domain: The target network domain.
+ :return: The VLAN, or None if the target VLAN was not found.
+ :rtype: DimensionDataVlan
+ """
+
+ vlans = self.driver.ex_list_vlans(
+ location=self.location,
+ network_domain=network_domain
+ )
+ matching_vlans = [vlan for vlan in vlans if vlan.name == self.name]
+ if matching_vlans:
+ return matching_vlans[0]
+
+ return None
+
+ def _create_vlan(self, network_domain):
+ vlan = self.driver.ex_create_vlan(
+ network_domain,
+ self.name,
+ self.private_ipv4_base_address,
+ self.description,
+ self.private_ipv4_prefix_size
+ )
+
+ if self.wait:
+ vlan = self._wait_for_vlan_state(vlan.id, 'NORMAL')
+
+ return vlan
+
+ def _delete_vlan(self, vlan):
+ try:
+ self.driver.ex_delete_vlan(vlan)
+
+ # Not currently supported for deletes due to a bug in libcloud (module will error out if "wait" is specified when "state" is not "present").
+ if self.wait:
+ self._wait_for_vlan_state(vlan, 'NOT_FOUND')
+
+ except DimensionDataAPIException as api_exception:
+ self.module.fail_json(
+ msg='Failed to delete VLAN "{0}" due to unexpected error from the CloudControl API: {1}'.format(
+ vlan.id, api_exception.msg
+ )
+ )
+
+ def _wait_for_vlan_state(self, vlan, state_to_wait_for):
+ network_domain = self._get_network_domain()
+
+ wait_poll_interval = self.module.params['wait_poll_interval']
+ wait_time = self.module.params['wait_time']
+
+ # Bizarre bug in libcloud when checking status after delete; socket.error is too generic to catch in this context so for now we don't even try.
+
+ try:
+ return self.driver.connection.wait_for_state(
+ state_to_wait_for,
+ self.driver.ex_get_vlan,
+ wait_poll_interval,
+ wait_time,
+ vlan
+ )
+
+ except DimensionDataAPIException as api_exception:
+ if api_exception.code != 'RESOURCE_NOT_FOUND':
+ raise
+
+ return DimensionDataVlan(
+ id=vlan.id,
+ status='NOT_FOUND',
+ name='',
+ description='',
+ private_ipv4_range_address='',
+ private_ipv4_range_size=0,
+ ipv4_gateway='',
+ ipv6_range_address='',
+ ipv6_range_size=0,
+ ipv6_gateway='',
+ location=self.location,
+ network_domain=network_domain
+ )
+
+ def _get_network_domain(self):
+ """
+ Retrieve the target network domain from the Cloud Control API.
+
+ :return: The network domain.
+ """
+
+ try:
+ return self.get_network_domain(
+ self.network_domain_selector, self.location
+ )
+ except UnknownNetworkError:
+ self.module.fail_json(
+ msg='Cannot find network domain "{0}" in datacenter "{1}".'.format(
+ self.network_domain_selector, self.location
+ )
+ )
+
+ return None
+
+
+class InvalidVlanChangeError(Exception):
+ """
+ Error raised when an illegal change to VLAN state is attempted.
+ """
+
+ pass
+
+
+class VlanDiff(object):
+ """
+ Represents differences between VLAN information (from CloudControl) and module parameters.
+ """
+
+ def __init__(self, vlan, module_params):
+ """
+
+ :param vlan: The VLAN information from CloudControl.
+ :type vlan: DimensionDataVlan
+ :param module_params: The module parameters.
+ :type module_params: dict
+ """
+
+ self.vlan = vlan
+ self.module_params = module_params
+
+ self.name_changed = module_params['name'] != vlan.name
+ self.description_changed = module_params['description'] != vlan.description
+ self.private_ipv4_base_address_changed = module_params['private_ipv4_base_address'] != vlan.private_ipv4_range_address
+ self.private_ipv4_prefix_size_changed = module_params['private_ipv4_prefix_size'] != vlan.private_ipv4_range_size
+
+ # Is configured prefix size greater than or less than the actual prefix size?
+ private_ipv4_prefix_size_difference = module_params['private_ipv4_prefix_size'] - vlan.private_ipv4_range_size
+ self.private_ipv4_prefix_size_increased = private_ipv4_prefix_size_difference > 0
+ self.private_ipv4_prefix_size_decreased = private_ipv4_prefix_size_difference < 0
+
+ def has_changes(self):
+ """
+ Does the VlanDiff represent any changes between the VLAN and module configuration?
+
+ :return: True, if there are change changes; otherwise, False.
+ """
+
+ return self.needs_edit() or self.needs_expand()
+
+ def ensure_legal_change(self):
+ """
+ Ensure the change (if any) represented by the VlanDiff represents a legal change to VLAN state.
+
+ - private_ipv4_base_address cannot be changed
+ - private_ipv4_prefix_size must be greater than or equal to the VLAN's existing private_ipv4_range_size
+
+ :raise InvalidVlanChangeError: The VlanDiff does not represent a legal change to VLAN state.
+ """
+
+ # Cannot change base address for private IPv4 network.
+ if self.private_ipv4_base_address_changed:
+ raise InvalidVlanChangeError('Cannot change the private IPV4 base address for an existing VLAN.')
+
+ # Cannot shrink private IPv4 network (by increasing prefix size).
+ if self.private_ipv4_prefix_size_increased:
+ raise InvalidVlanChangeError('Cannot shrink the private IPV4 network for an existing VLAN (only expand is supported).')
+
+ def needs_edit(self):
+ """
+ Is an Edit operation required to resolve the differences between the VLAN information and the module parameters?
+
+ :return: True, if an Edit operation is required; otherwise, False.
+ """
+
+ return self.name_changed or self.description_changed
+
+ def needs_expand(self):
+ """
+ Is an Expand operation required to resolve the differences between the VLAN information and the module parameters?
+
+ The VLAN's network is expanded by reducing the size of its network prefix.
+
+ :return: True, if an Expand operation is required; otherwise, False.
+ """
+
+ return self.private_ipv4_prefix_size_decreased
+
+
+def vlan_to_dict(vlan):
+ return {
+ 'id': vlan.id,
+ 'name': vlan.name,
+ 'description': vlan.description,
+ 'location': vlan.location.id,
+ 'private_ipv4_base_address': vlan.private_ipv4_range_address,
+ 'private_ipv4_prefix_size': vlan.private_ipv4_range_size,
+ 'private_ipv4_gateway_address': vlan.ipv4_gateway,
+ 'ipv6_base_address': vlan.ipv6_range_address,
+ 'ipv6_prefix_size': vlan.ipv6_range_size,
+ 'ipv6_gateway_address': vlan.ipv6_gateway,
+ 'status': vlan.status
+ }
+
+
+def main():
+ module = DimensionDataVlanModule()
+
+ if module.state == 'present':
+ module.state_present()
+ elif module.state == 'readonly':
+ module.state_readonly()
+ elif module.state == 'absent':
+ module.state_absent()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/discord.py b/ansible_collections/community/general/plugins/modules/discord.py
new file mode 100644
index 000000000..8b5391d44
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/discord.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Christian Wollinger <cwollinger@web.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: discord
+short_description: Send Discord messages
+version_added: 3.1.0
+description:
+ - Sends a message to a Discord channel using the Discord webhook API.
+author: Christian Wollinger (@cwollinger)
+seealso:
+ - name: API documentation
+ description: Documentation for Discord API
+ link: https://discord.com/developers/docs/resources/webhook#execute-webhook
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ webhook_id:
+ description:
+ - The webhook ID.
+ - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
+ required: true
+ type: str
+ webhook_token:
+ description:
+ - The webhook token.
+ - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})."
+ required: true
+ type: str
+ content:
+ description:
+ - Content of the message to the Discord channel.
+ - At least one of I(content) and I(embeds) must be specified.
+ type: str
+ username:
+ description:
+ - Overrides the default username of the webhook.
+ type: str
+ avatar_url:
+ description:
+ - Overrides the default avatar of the webhook.
+ type: str
+ tts:
+ description:
+ - Set this to C(true) if this is a TTS (Text to Speech) message.
+ type: bool
+ default: false
+ embeds:
+ description:
+ - Send messages as Embeds to the Discord channel.
+ - Embeds can have a colored border, embedded images, text fields and more.
+ - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)"
+ - At least one of I(content) and I(embeds) must be specified.
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = """
+- name: Send a message to the Discord channel
+ community.general.discord:
+ webhook_id: "00000"
+ webhook_token: "XXXYYY"
+ content: "This is a message from ansible"
+
+- name: Send a message to the Discord channel with specific username and avatar
+ community.general.discord:
+ webhook_id: "00000"
+ webhook_token: "XXXYYY"
+ content: "This is a message from ansible"
+ username: Ansible
+ avatar_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
+
+- name: Send a embedded message to the Discord channel
+ community.general.discord:
+ webhook_id: "00000"
+ webhook_token: "XXXYYY"
+ embeds:
+ - title: "Embedded message"
+ description: "This is an embedded message"
+ footer:
+ text: "Author: Ansible"
+ image:
+ url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
+
+- name: Send two embedded messages
+ community.general.discord:
+ webhook_id: "00000"
+ webhook_token: "XXXYYY"
+ embeds:
+ - title: "First message"
+ description: "This is my first embedded message"
+ footer:
+ text: "Author: Ansible"
+ image:
+ url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
+ - title: "Second message"
+ description: "This is my first second message"
+ footer:
+ text: "Author: Ansible"
+ icon_url: "https://docs.ansible.com/ansible/latest/_static/images/logo_invert.png"
+ fields:
+ - name: "Field 1"
+ value: "Value of my first field"
+ - name: "Field 2"
+ value: "Value of my second field"
+ timestamp: "{{ ansible_date_time.iso8601 }}"
+"""
+
+RETURN = """
+http_code:
+ description:
+ - Response Code returned by Discord API.
+ returned: always
+ type: int
+ sample: 204
+"""
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.basic import AnsibleModule
+
+
+def discord_check_mode(module):
+
+ webhook_id = module.params['webhook_id']
+ webhook_token = module.params['webhook_token']
+
+ headers = {
+ 'content-type': 'application/json'
+ }
+
+ url = "https://discord.com/api/webhooks/%s/%s" % (
+ webhook_id, webhook_token)
+
+ response, info = fetch_url(module, url, method='GET', headers=headers)
+ return response, info
+
+
+def discord_text_msg(module):
+
+ webhook_id = module.params['webhook_id']
+ webhook_token = module.params['webhook_token']
+ content = module.params['content']
+ user = module.params['username']
+ avatar_url = module.params['avatar_url']
+ tts = module.params['tts']
+ embeds = module.params['embeds']
+
+ headers = {
+ 'content-type': 'application/json'
+ }
+
+ url = "https://discord.com/api/webhooks/%s/%s" % (
+ webhook_id, webhook_token)
+
+ payload = {
+ 'content': content,
+ 'username': user,
+ 'avatar_url': avatar_url,
+ 'tts': tts,
+ 'embeds': embeds,
+ }
+
+ payload = module.jsonify(payload)
+
+ response, info = fetch_url(module, url, data=payload, headers=headers, method='POST')
+ return response, info
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ webhook_id=dict(type='str', required=True),
+ webhook_token=dict(type='str', required=True, no_log=True),
+ content=dict(type='str'),
+ username=dict(type='str'),
+ avatar_url=dict(type='str'),
+ tts=dict(type='bool', default=False),
+ embeds=dict(type='list', elements='dict'),
+ ),
+ required_one_of=[['content', 'embeds']],
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False,
+ http_code='',
+ )
+
+ if module.check_mode:
+ response, info = discord_check_mode(module)
+ if info['status'] != 200:
+ try:
+ module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info)
+ except Exception:
+ module.fail_json(http_code=info['status'], msg=info['msg'], info=info)
+ else:
+ module.exit_json(msg=info['msg'], changed=False, http_code=info['status'], response=module.from_json(response.read()))
+ else:
+ response, info = discord_text_msg(module)
+ if info['status'] != 204:
+ try:
+ module.fail_json(http_code=info['status'], msg=info['msg'], response=module.from_json(info['body']), info=info)
+ except Exception:
+ module.fail_json(http_code=info['status'], msg=info['msg'], info=info)
+ else:
+ module.exit_json(msg=info['msg'], changed=True, http_code=info['status'])
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/django_manage.py b/ansible_collections/community/general/plugins/modules/django_manage.py
new file mode 100644
index 000000000..537cf0fa7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/django_manage.py
@@ -0,0 +1,418 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2013, Scott Anderson <scottanderson42@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: django_manage
+short_description: Manages a Django application
+description:
+ - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the
+ I(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ command:
+ description:
+ - The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation.
+ - >
+ C(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be
+ removed in community.general 9.0.0. Use C(clearsessions) instead.
+ - C(collectstatic) - Collects the static files into C(STATIC_ROOT).
+ - C(createcachetable) - Creates the cache tables for use with the database cache backend.
+ - C(flush) - Removes all data from the database.
+ - C(loaddata) - Searches for and loads the contents of the named I(fixtures) into the database.
+ - C(migrate) - Synchronizes the database state with models and migrations.
+ - >
+ C(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7).
+ This parameter will be removed in community.general 9.0.0. Use C(migrate) instead.
+ - C(test) - Runs tests for all installed apps.
+ - >
+ C(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be
+ removed in community.general 9.0.0. Use C(check) instead.
+ - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may
+ prompt for user input should be run with the C(--noinput) flag.
+ type: str
+ required: true
+ project_path:
+ description:
+ - The path to the root of the Django application where C(manage.py) lives.
+ type: path
+ required: true
+ aliases: [app_path, chdir]
+ settings:
+ description:
+ - The Python path to the application's settings module, such as C(myapp.settings).
+ type: path
+ required: false
+ pythonpath:
+ description:
+ - A directory to add to the Python path. Typically used to include the settings module if it is located
+ external to the application directory.
+ - This would be equivalent to adding I(pythonpath)'s value to the C(PYTHONPATH) environment variable.
+ type: path
+ required: false
+ aliases: [python_path]
+ virtualenv:
+ description:
+ - An optional path to a C(virtualenv) installation to use while running the manage application.
+ type: path
+ aliases: [virtual_env]
+ apps:
+ description:
+ - A list of space-delimited apps to target. Used by the C(test) command.
+ type: str
+ required: false
+ cache_table:
+ description:
+ - The name of the table used for database-backed caching. Used by the C(createcachetable) command.
+ type: str
+ required: false
+ clear:
+ description:
+ - Clear the existing files before trying to copy or link the original file.
+ - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically.
+ required: false
+ default: false
+ type: bool
+ database:
+ description:
+ - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb),
+ and C(migrate) commands.
+ type: str
+ required: false
+ failfast:
+ description:
+ - Fail the command immediately if a test fails. Used by the C(test) command.
+ required: false
+ default: false
+ type: bool
+ aliases: [fail_fast]
+ fixtures:
+ description:
+ - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command.
+ type: str
+ required: false
+ skip:
+ description:
+ - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command.
+ required: false
+ type: bool
+ merge:
+ description:
+ - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this
+ parameter with C(migrate) command.
+ required: false
+ type: bool
+ link:
+ description:
+ - Will create links to the files instead of copying them, you can only use this parameter with
+ C(collectstatic) command.
+ required: false
+ type: bool
+ testrunner:
+ description:
+ - Controls the test runner class that is used to execute tests.
+ - This parameter is passed as-is to C(manage.py).
+ type: str
+ required: false
+ aliases: [test_runner]
+ ack_venv_creation_deprecation:
+ description:
+ - >-
+ When a I(virtualenv) is set but the virtual environment does not exist, the current behavior is
+ to create a new virtual environment. That behavior is deprecated and if that case happens it will
+ generate a deprecation warning. Set this flag to C(true) to suppress the deprecation warning.
+ - Please note that you will receive no further warning about this being removed until the module
+ will start failing in such cases from community.general 9.0.0 on.
+ type: bool
+ version_added: 5.8.0
+
+notes:
+ - >
+ B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in
+ community.general version 9.0.0 (estimated to be released in May 2024).
+ Please notice that Django 4.1 requires Python 3.8 or greater.
+ - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter
+ is specified. This requirement is deprecated and will be removed in community.general version 9.0.0.
+ - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already
+ exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0.
+ - The parameter I(virtualenv) will remain in use, but it will require the specified virtualenv to exist.
+ The recommended way to create one in Ansible is by using M(ansible.builtin.pip).
+ - This module assumes English error messages for the C(createcachetable) command to detect table existence,
+ unfortunately.
+ - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added
+ as an app in your settings.
+ - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings.
+ - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang,
+ i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter.
+seealso:
+ - name: django-admin and manage.py Reference
+ description: Reference for C(django-admin) or C(manage.py) commands.
+ link: https://docs.djangoproject.com/en/4.1/ref/django-admin/
+ - name: Django Download page
+ description: The page showing how to get Django and the timeline of supported releases.
+ link: https://www.djangoproject.com/download/
+ - name: What Python version can I use with Django?
+ description: From the Django FAQ, the response to Python requirements for the framework.
+ link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django
+requirements: [ "virtualenv", "django" ]
+author:
+ - Alexei Znamensky (@russoz)
+ - Scott Anderson (@tastychutney)
+'''
+
+EXAMPLES = """
+- name: Run cleanup on the application installed in django_dir
+ community.general.django_manage:
+ command: cleanup
+ project_path: "{{ django_dir }}"
+
+- name: Load the initial_data fixture into the application
+ community.general.django_manage:
+ command: loaddata
+ project_path: "{{ django_dir }}"
+ fixtures: "{{ initial_data }}"
+
+- name: Run syncdb on the application
+ community.general.django_manage:
+ command: syncdb
+ project_path: "{{ django_dir }}"
+ settings: "{{ settings_app_name }}"
+ pythonpath: "{{ settings_dir }}"
+ virtualenv: "{{ virtualenv_dir }}"
+
+- name: Run the SmokeTest test case from the main app. Useful for testing deploys
+ community.general.django_manage:
+ command: test
+ project_path: "{{ django_dir }}"
+ apps: main.SmokeTest
+
+- name: Create an initial superuser
+ community.general.django_manage:
+ command: "createsuperuser --noinput --username=admin --email=admin@example.com"
+ project_path: "{{ django_dir }}"
+"""
+
+import os
+import sys
+import shlex
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _fail(module, cmd, out, err, **kwargs):
+ msg = ''
+ if out:
+ msg += "stdout: %s" % (out, )
+ if err:
+ msg += "\n:stderr: %s" % (err, )
+ module.fail_json(cmd=cmd, msg=msg, **kwargs)
+
+
+def _ensure_virtualenv(module):
+
+ venv_param = module.params['virtualenv']
+ if venv_param is None:
+ return
+
+ vbin = os.path.join(venv_param, 'bin')
+ activate = os.path.join(vbin, 'activate')
+
+ if not os.path.exists(activate):
+ # In version 9.0.0, if the venv is not found, it should fail_json() here.
+ if not module.params['ack_venv_creation_deprecation']:
+ module.deprecate(
+ 'The behavior of "creating the virtual environment when missing" is being '
+ 'deprecated and will be removed in community.general version 9.0.0. '
+ 'Set the module parameter `ack_venv_creation_deprecation: true` to '
+ 'prevent this message from showing up when creating a virtualenv.',
+ version='9.0.0',
+ collection_name='community.general',
+ )
+
+ virtualenv = module.get_bin_path('virtualenv', True)
+ vcmd = [virtualenv, venv_param]
+ rc, out_venv, err_venv = module.run_command(vcmd)
+ if rc != 0:
+ _fail(module, vcmd, out_venv, err_venv)
+
+ os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
+ os.environ["VIRTUAL_ENV"] = venv_param
+
+
+def createcachetable_check_changed(output):
+ return "already exists" not in output
+
+
+def flush_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def loaddata_filter_output(line):
+ return "Installed" in line and "Installed 0 object" not in line
+
+
+def syncdb_filter_output(line):
+ return ("Creating table " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line)
+
+
+def migrate_filter_output(line):
+ return ("Migrating forwards " in line) \
+ or ("Installed" in line and "Installed 0 object" not in line) \
+ or ("Applying" in line)
+
+
+def collectstatic_filter_output(line):
+ return line and "0 static files" not in line
+
+
+def main():
+ command_allowed_param_map = dict(
+ cleanup=(),
+ createcachetable=('cache_table', 'database', ),
+ flush=('database', ),
+ loaddata=('database', 'fixtures', ),
+ syncdb=('database', ),
+ test=('failfast', 'testrunner', 'apps', ),
+ validate=(),
+ migrate=('apps', 'skip', 'merge', 'database',),
+ collectstatic=('clear', 'link', ),
+ )
+
+ command_required_param_map = dict(
+ loaddata=('fixtures', ),
+ )
+
+ # forces --noinput on every command that needs it
+ noinput_commands = (
+ 'flush',
+ 'syncdb',
+ 'migrate',
+ 'test',
+ 'collectstatic',
+ )
+
+ # These params are allowed for certain commands only
+ specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'testrunner')
+
+ # These params are automatically added to the command if present
+ general_params = ('settings', 'pythonpath', 'database',)
+ specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
+ end_of_command_params = ('apps', 'cache_table', 'fixtures')
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=True, type='str'),
+ project_path=dict(required=True, type='path', aliases=['app_path', 'chdir']),
+ settings=dict(type='path'),
+ pythonpath=dict(type='path', aliases=['python_path']),
+ virtualenv=dict(type='path', aliases=['virtual_env']),
+
+ apps=dict(),
+ cache_table=dict(type='str'),
+ clear=dict(default=False, type='bool'),
+ database=dict(type='str'),
+ failfast=dict(default=False, type='bool', aliases=['fail_fast']),
+ fixtures=dict(type='str'),
+ testrunner=dict(type='str', aliases=['test_runner']),
+ skip=dict(type='bool'),
+ merge=dict(type='bool'),
+ link=dict(type='bool'),
+ ack_venv_creation_deprecation=dict(type='bool'),
+ ),
+ )
+
+ command_split = shlex.split(module.params['command'])
+ command_bin = command_split[0]
+ project_path = module.params['project_path']
+ virtualenv = module.params['virtualenv']
+
+ try:
+ _deprecation = dict(
+ cleanup="clearsessions",
+ syncdb="migrate",
+ validate="check",
+ )
+ module.deprecate(
+ 'The command {0} has been deprecated as it is no longer supported in recent Django versions.'
+ 'Please use the command {1} instead that provide similar capability.'.format(command_bin, _deprecation[command_bin]),
+ version='9.0.0',
+ collection_name='community.general'
+ )
+ except KeyError:
+ pass
+
+ for param in specific_params:
+ value = module.params[param]
+ if value and param not in command_allowed_param_map[command_bin]:
+ module.fail_json(msg='%s param is incompatible with command=%s' % (param, command_bin))
+
+ for param in command_required_param_map.get(command_bin, ()):
+ if not module.params[param]:
+ module.fail_json(msg='%s param is required for command=%s' % (param, command_bin))
+
+ _ensure_virtualenv(module)
+
+ run_cmd_args = ["./manage.py"] + command_split
+
+ if command_bin in noinput_commands and '--noinput' not in command_split:
+ run_cmd_args.append("--noinput")
+
+ for param in general_params:
+ if module.params[param]:
+ run_cmd_args.append('--%s=%s' % (param, module.params[param]))
+
+ for param in specific_boolean_params:
+ if module.params[param]:
+ run_cmd_args.append('--%s' % param)
+
+ # these params always get tacked on the end of the command
+ for param in end_of_command_params:
+ if module.params[param]:
+ if param in ('fixtures', 'apps'):
+ run_cmd_args.extend(shlex.split(module.params[param]))
+ else:
+ run_cmd_args.append(module.params[param])
+
+ rc, out, err = module.run_command(run_cmd_args, cwd=project_path)
+ if rc != 0:
+ if command_bin == 'createcachetable' and 'table' in err and 'already exists' in err:
+ out = 'already exists.'
+ else:
+ if "Unknown command:" in err:
+ _fail(module, run_cmd_args, err, "Unknown django command: %s" % command_bin)
+ _fail(module, run_cmd_args, out, err, path=os.environ["PATH"], syspath=sys.path)
+
+ changed = False
+
+ lines = out.split('\n')
+ filt = globals().get(command_bin + "_filter_output", None)
+ if filt:
+ filtered_output = list(filter(filt, lines))
+ if len(filtered_output):
+ changed = True
+ check_changed = globals().get("{0}_check_changed".format(command_bin), None)
+ if check_changed:
+ changed = check_changed(out)
+
+ module.exit_json(changed=changed, out=out, cmd=run_cmd_args, app_path=project_path, project_path=project_path,
+ virtualenv=virtualenv, settings=module.params['settings'], pythonpath=module.params['pythonpath'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dnf_versionlock.py b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py
new file mode 100644
index 000000000..fac3ad78d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dnf_versionlock.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Roberto Moreda <moreda@allenta.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: dnf_versionlock
+version_added: '4.0.0'
+short_description: Locks package versions in C(dnf) based systems
+description:
+- Locks package versions using the C(versionlock) plugin in C(dnf) based
+ systems. This plugin takes a set of name and versions for packages and
+ excludes all other versions of those packages. This allows you to for example
+ protect packages from being updated by newer versions. The state of the
+ plugin that reflects locking of packages is the C(locklist).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - The logics of the C(versionlock) plugin for corner cases could be
+ confusing, so please take in account that this module will do its best to
+ give a C(check_mode) prediction on what is going to happen. In case of
+ doubt, check the documentation of the plugin.
+ - Sometimes the module could predict changes in C(check_mode) that will not
+ be such because C(versionlock) concludes that there is already a entry in
+ C(locklist) that already matches.
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Package name spec to add or exclude to or delete from the C(locklist)
+ using the format expected by the C(dnf repoquery) command.
+ - This parameter is mutually exclusive with I(state=clean).
+ type: list
+ required: false
+ elements: str
+ default: []
+ raw:
+ description:
+ - Do not resolve package name specs to NEVRAs to find specific version
+ to lock to. Instead the package name specs are used as they are. This
+ enables locking to not yet available versions of the package.
+ type: bool
+ default: false
+ state:
+ description:
+ - Whether to add (C(present) or C(excluded)) to or remove (C(absent) or
+ C(clean)) from the C(locklist).
+ - C(present) will add a package name spec to the C(locklist). If there is a
+ installed package that matches, then only that version will be added.
+ Otherwise, all available package versions will be added.
+ - C(excluded) will add a package name spec as excluded to the
+ C(locklist). It means that packages represented by the package name
+ spec will be excluded from transaction operations. All available
+ package versions will be added.
+ - C(absent) will delete entries in the C(locklist) that match the
+ package name spec.
+ - C(clean) will delete all entries in the C(locklist). This option is
+ mutually exclusive with C(name).
+ choices: [ 'absent', 'clean', 'excluded', 'present' ]
+ type: str
+ default: present
+notes:
+ - In an ideal world, the C(versionlock) plugin would have a dry-run option to
+ know for sure what is going to happen. So far we have to work with a best
+ guess as close as possible to the behaviour inferred from its code.
+ - For most of cases where you want to lock and unlock specific versions of a
+ package, this works fairly well.
+requirements:
+ - dnf
+ - dnf-plugin-versionlock
+author:
+ - Roberto Moreda (@moreda) <moreda@allenta.com>
+'''
+
+EXAMPLES = r'''
+- name: Prevent installed nginx from being updated
+ community.general.dnf_versionlock:
+ name: nginx
+ state: present
+
+- name: Prevent multiple packages from being updated
+ community.general.dnf_versionlock:
+ name:
+ - nginx
+ - haproxy
+ state: present
+
+- name: Remove lock from nginx to be updated again
+ community.general.dnf_versionlock:
+ package: nginx
+ state: absent
+
+- name: Exclude bind 32:9.11 from installs or updates
+ community.general.dnf_versionlock:
+ package: bind-32:9.11*
+ state: excluded
+
+- name: Keep bash package in major version 4
+ community.general.dnf_versionlock:
+ name: bash-0:4.*
+ raw: true
+ state: present
+
+- name: Delete all entries in the locklist of versionlock
+ community.general.dnf_versionlock:
+ state: clean
+'''
+
+RETURN = r'''
+locklist_pre:
+ description: Locklist before module execution.
+ returned: success
+ type: list
+ elements: str
+ sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ]
+locklist_post:
+ description: Locklist after module execution.
+ returned: success and (not check mode or state is clean)
+ type: list
+ elements: str
+ sample: [ 'bash-0:4.4.20-1.el8_4.*' ]
+specs_toadd:
+ description: Package name specs meant to be added by versionlock.
+ returned: success
+ type: list
+ elements: str
+ sample: [ 'bash' ]
+specs_todelete:
+ description: Package name specs meant to be deleted by versionlock.
+ returned: success
+ type: list
+ elements: str
+ sample: [ 'bind' ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import fnmatch
+import os
+import re
+
+DNF_BIN = "/usr/bin/dnf"
+VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf"
+# NEVRA regex.
+NEVRA_RE = re.compile(r"^(?P<name>.+)-(?P<epoch>\d+):(?P<version>.+)-"
+ r"(?P<release>.+)\.(?P<arch>.+)$")
+
+
+def do_versionlock(module, command, patterns=None, raw=False):
+ patterns = [] if not patterns else patterns
+ raw_parameter = ["--raw"] if raw else []
+ # Call dnf versionlock using a just one full NEVR package-name-spec each
+ # time because multiple package-name-spec and globs are not well supported.
+ #
+ # This is a workaround for two alleged bugs in the dnf versionlock plugin:
+ # * Multiple package-name-spec arguments don't lock correctly
+ # (https://bugzilla.redhat.com/show_bug.cgi?id=2013324).
+ # * Locking a version of a not installed package disallows locking other
+ # versions later (https://bugzilla.redhat.com/show_bug.cgi?id=2013332)
+ #
+ # NOTE: This is suboptimal in terms of performance if there are more than a
+ # few package-name-spec patterns to lock, because there is a command
+ # execution per each. This will improve by changing the strategy once the
+ # mentioned alleged bugs in the dnf versionlock plugin are fixed.
+ if patterns:
+ outs = []
+ for p in patterns:
+ rc, out, err = module.run_command(
+ [DNF_BIN, "-q", "versionlock", command] + raw_parameter + [p],
+ check_rc=True)
+ outs.append(out)
+ out = "\n".join(outs)
+ else:
+ rc, out, err = module.run_command(
+ [DNF_BIN, "-q", "versionlock", command], check_rc=True)
+ return out
+
+
+# This is equivalent to the _match function of the versionlock plugin.
+def match(entry, pattern):
+ entry = entry.lstrip('!')
+ if entry == pattern:
+ return True
+ m = NEVRA_RE.match(entry)
+ if not m:
+ return False
+ for name in (
+ '%s' % m["name"],
+ '%s.%s' % (m["name"], m["arch"]),
+ '%s-%s' % (m["name"], m["version"]),
+ '%s-%s-%s' % (m["name"], m["version"], m["release"]),
+ '%s-%s:%s' % (m["name"], m["epoch"], m["version"]),
+ '%s-%s-%s.%s' % (m["name"], m["version"], m["release"], m["arch"]),
+ '%s-%s:%s-%s' % (m["name"], m["epoch"], m["version"], m["release"]),
+ '%s:%s-%s-%s.%s' % (m["epoch"], m["name"], m["version"], m["release"],
+ m["arch"]),
+ '%s-%s:%s-%s.%s' % (m["name"], m["epoch"], m["version"], m["release"],
+ m["arch"])
+ ):
+ if fnmatch.fnmatch(name, pattern):
+ return True
+ return False
+
+
+def get_packages(module, patterns, only_installed=False):
+ packages_available_map_name_evrs = {}
+ rc, out, err = module.run_command(
+ [DNF_BIN, "-q", "repoquery"] +
+ (["--installed"] if only_installed else []) +
+ patterns,
+ check_rc=True)
+
+ for p in out.split():
+ # Extract the NEVRA pattern.
+ m = NEVRA_RE.match(p)
+ if not m:
+ module.fail_json(
+ msg="failed to parse nevra for %s" % p,
+ rc=rc, out=out, err=err)
+
+ evr = "%s:%s-%s" % (m["epoch"],
+ m["version"],
+ m["release"])
+
+ packages_available_map_name_evrs.setdefault(m["name"], set())
+ packages_available_map_name_evrs[m["name"]].add(evr)
+ return packages_available_map_name_evrs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type="list", elements="str", default=[]),
+ raw=dict(type="bool", default=False),
+ state=dict(type="str", default="present",
+ choices=["present", "absent", "excluded", "clean"]),
+ ),
+ supports_check_mode=True,
+ )
+
+ patterns = module.params["name"]
+ raw = module.params["raw"]
+ state = module.params["state"]
+ changed = False
+ msg = ""
+
+ # Check module pre-requisites.
+ if not os.path.exists(DNF_BIN):
+ module.fail_json(msg="%s was not found" % DNF_BIN)
+ if not os.path.exists(VERSIONLOCK_CONF):
+ module.fail_json(msg="plugin versionlock is required")
+
+ # Check incompatible options.
+ if state == "clean" and patterns:
+ module.fail_json(msg="clean state is incompatible with a name list")
+ if state != "clean" and not patterns:
+ module.fail_json(msg="name list is required for %s state" % state)
+
+ locklist_pre = do_versionlock(module, "list").split()
+
+ specs_toadd = []
+ specs_todelete = []
+
+ if state in ["present", "excluded"]:
+
+ if raw:
+ # Add raw patterns as specs to add.
+ for p in patterns:
+ if ((p if state == "present" else "!" + p)
+ not in locklist_pre):
+ specs_toadd.append(p)
+ else:
+ # Get available packages that match the patterns.
+ packages_map_name_evrs = get_packages(
+ module,
+ patterns)
+
+ # Get installed packages that match the patterns.
+ packages_installed_map_name_evrs = get_packages(
+ module,
+ patterns,
+ only_installed=True)
+
+ # Obtain the list of package specs that require an entry in the
+ # locklist. This list is composed by:
+ # a) the non-installed packages list with all available
+ # versions
+ # b) the installed packages list
+ packages_map_name_evrs.update(packages_installed_map_name_evrs)
+ for name in packages_map_name_evrs:
+ for evr in packages_map_name_evrs[name]:
+ locklist_entry = "%s-%s.*" % (name, evr)
+
+ if (locklist_entry if state == "present"
+ else "!%s" % locklist_entry) not in locklist_pre:
+ specs_toadd.append(locklist_entry)
+
+ if specs_toadd and not module.check_mode:
+ cmd = "add" if state == "present" else "exclude"
+ msg = do_versionlock(module, cmd, patterns=specs_toadd, raw=raw)
+
+ elif state == "absent":
+
+ if raw:
+ # Add raw patterns as specs to delete.
+ for p in patterns:
+ if p in locklist_pre:
+ specs_todelete.append(p)
+
+ else:
+ # Get patterns that match the some line in the locklist.
+ for p in patterns:
+ for e in locklist_pre:
+ if match(e, p):
+ specs_todelete.append(p)
+
+ if specs_todelete and not module.check_mode:
+ msg = do_versionlock(
+ module, "delete", patterns=specs_todelete, raw=raw)
+
+ elif state == "clean":
+ specs_todelete = locklist_pre
+
+ if specs_todelete and not module.check_mode:
+ msg = do_versionlock(module, "clear")
+
+ if specs_toadd or specs_todelete:
+ changed = True
+
+ response = {
+ "changed": changed,
+ "msg": msg,
+ "locklist_pre": locklist_pre,
+ "specs_toadd": specs_toadd,
+ "specs_todelete": specs_todelete
+ }
+ if not module.check_mode:
+ response["locklist_post"] = do_versionlock(module, "list").split()
+ else:
+ if state == "clean":
+ response["locklist_post"] = []
+
+ module.exit_json(**response)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dnsimple.py b/ansible_collections/community/general/plugins/modules/dnsimple.py
new file mode 100644
index 000000000..df41f73a6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dnsimple.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+short_description: Interface with dnsimple.com (a DNS hosting service)
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ account_email:
+ description:
+ - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for.
+ - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)."
+ - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0"
+ type: str
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for more information.
+ type: str
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple.
+ - If omitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ type: str
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*).
+ type: str
+ record_ids:
+ description:
+ - List of records to ensure they either exist or do not exist.
+ type: list
+ elements: str
+ type:
+ description:
+ - The type of DNS record to create.
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ]
+ type: str
+ ttl:
+ description:
+ - The TTL to give the new record in seconds.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Record value.
+ - Must be specified when trying to ensure a record exists.
+ type: str
+ priority:
+ description:
+ - Record priority.
+ type: int
+ state:
+ description:
+ - whether the record should exist or not.
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name.
+ - Only use with C(state) is set to C(present) on a record.
+ type: 'bool'
+ default: false
+ sandbox:
+ description:
+ - Use the DNSimple sandbox environment.
+ - Requires a dedicated account in the dnsimple sandbox environment.
+ - Check U(https://developer.dnsimple.com/sandbox/) for more information.
+ type: 'bool'
+ default: false
+ version_added: 3.5.0
+requirements:
+ - "dnsimple >= 2.0.0"
+author: "Alex Coomans (@drcapulet)"
+'''
+
+EXAMPLES = '''
+- name: Authenticate using email and API token and fetch all domains
+ community.general.dnsimple:
+ account_email: test@example.com
+ account_api_token: dummyapitoken
+ delegate_to: localhost
+
+- name: Delete a domain
+ community.general.dnsimple:
+ domain: my.com
+ state: absent
+ delegate_to: localhost
+
+- name: Create a test.my.com A record to point to 127.0.0.1
+ community.general.dnsimple:
+ domain: my.com
+ record: test
+ type: A
+ value: 127.0.0.1
+ delegate_to: localhost
+ register: record
+
+- name: Delete record using record_ids
+ community.general.dnsimple:
+ domain: my.com
+ record_ids: '{{ record["id"] }}'
+ state: absent
+ delegate_to: localhost
+
+- name: Create a my.com CNAME record to example.com
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: present
+ delegate_to: localhost
+
+- name: Change TTL value for a record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ ttl: 600
+ state: present
+ delegate_to: localhost
+
+- name: Delete the record
+ community.general.dnsimple:
+ domain: my.com
+ record: ''
+ type: CNAME
+ value: example.com
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = r"""# """
+
+import traceback
+import re
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+class DNSimpleV2():
+ """class which uses dnsimple-python >= 2"""
+
+ def __init__(self, account_email, account_api_token, sandbox, module):
+ """init"""
+ self.module = module
+ self.account_email = account_email
+ self.account_api_token = account_api_token
+ self.sandbox = sandbox
+ self.pagination_per_page = 30
+ self.dnsimple_client()
+ self.dnsimple_account()
+
+ def dnsimple_client(self):
+ """creates a dnsimple client object"""
+ if self.account_email and self.account_api_token:
+ client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general")
+ else:
+ msg = "Option account_email or account_api_token not provided. " \
+ "Dnsimple authentiction with a .dnsimple config file is not " \
+ "supported with dnsimple-python>=2.0.0"
+ raise DNSimpleException(msg)
+ client.identity.whoami()
+ self.client = client
+
+ def dnsimple_account(self):
+ """select a dnsimple account. If a user token is used for authentication,
+ this user must only have access to a single account"""
+ account = self.client.identity.whoami().data.account
+ # user supplied a user token instead of account api token
+ if not account:
+ accounts = Accounts(self.client).list_accounts().data
+ if len(accounts) != 1:
+ msg = "The provided dnsimple token is a user token with multiple accounts." \
+ "Use an account token or a user token with access to a single account." \
+ "See https://support.dnsimple.com/articles/api-access-token/"
+ raise DNSimpleException(msg)
+ account = accounts[0]
+ self.account = account
+
+ def get_all_domains(self):
+ """returns a list of all domains"""
+ domain_list = self._get_paginated_result(self.client.domains.list_domains, account_id=self.account.id)
+ return [d.__dict__ for d in domain_list]
+
+ def get_domain(self, domain):
+ """returns a single domain by name or id"""
+ try:
+ dr = self.client.domains.get_domain(self.account.id, domain).data.__dict__
+ except DNSimpleException as e:
+ exception_string = str(e.message)
+ if re.match(r"^Domain .+ not found$", exception_string):
+ dr = None
+ else:
+ raise
+ return dr
+
+ def create_domain(self, domain):
+ """create a single domain"""
+ return self.client.domains.create_domain(self.account.id, domain).data.__dict__
+
+ def delete_domain(self, domain):
+ """delete a single domain"""
+ self.client.domains.delete_domain(self.account.id, domain)
+
+ def get_records(self, zone, dnsimple_filter=None):
+ """return dns ressource records which match a specified filter"""
+ records_list = self._get_paginated_result(self.client.zones.list_records,
+ account_id=self.account.id,
+ zone=zone, filter=dnsimple_filter)
+ return [d.__dict__ for d in records_list]
+
+ def delete_record(self, domain, rid):
+ """delete a single dns ressource record"""
+ self.client.zones.delete_record(self.account.id, domain, rid)
+
+ def update_record(self, domain, rid, ttl=None, priority=None):
+ """update a single dns ressource record"""
+ zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority)
+ result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__
+ return result
+
+ def create_record(self, domain, name, record_type, content, ttl=None, priority=None):
+ """create a single dns ressource record"""
+ zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority)
+ return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__
+
+ def _get_paginated_result(self, operation, **options):
+ """return all results of a paginated api response"""
+ records_pagination = operation(per_page=self.pagination_per_page, **options).pagination
+ result_list = []
+ for page in range(1, records_pagination.total_pages + 1):
+ page_data = operation(per_page=self.pagination_per_page, page=page, **options).data
+ result_list.extend(page_data)
+ return result_list
+
+
+DNSIMPLE_IMP_ERR = []
+HAS_DNSIMPLE = False
+try:
+ # try to import dnsimple >= 2.0.0
+ from dnsimple import Client, DNSimpleException
+ from dnsimple.service import Accounts
+ from dnsimple.version import version as dnsimple_version
+ from dnsimple.struct.zone_record import ZoneRecordUpdateInput, ZoneRecordInput
+ HAS_DNSIMPLE = True
+except ImportError:
+ DNSIMPLE_IMP_ERR.append(traceback.format_exc())
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_email=dict(type='str', fallback=(env_fallback, ['DNSIMPLE_EMAIL'])),
+ account_api_token=dict(type='str',
+ no_log=True,
+ fallback=(env_fallback, ['DNSIMPLE_API_TOKEN'])),
+ domain=dict(type='str'),
+ record=dict(type='str'),
+ record_ids=dict(type='list', elements='str'),
+ type=dict(type='str', choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF',
+ 'URL', 'TXT', 'NS', 'SRV', 'NAPTR',
+ 'PTR', 'AAAA', 'SSHFP', 'HINFO',
+ 'POOL', 'CAA']),
+ ttl=dict(type='int', default=3600),
+ value=dict(type='str'),
+ priority=dict(type='int'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ solo=dict(type='bool', default=False),
+ sandbox=dict(type='bool', default=False),
+ ),
+ required_together=[
+ ['record', 'value']
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_DNSIMPLE:
+ module.fail_json(msg=missing_required_lib('dnsimple'), exception=DNSIMPLE_IMP_ERR[0])
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+ sandbox = module.params.get('sandbox')
+
+ DNSIMPLE_MAJOR_VERSION = LooseVersion(dnsimple_version).version[0]
+
+ try:
+ if DNSIMPLE_MAJOR_VERSION < 2:
+ module.fail_json(
+ msg='Support for python-dnsimple < 2 has been removed in community.general 5.0.0. Update python-dnsimple to version >= 2.0.0.')
+ ds = DNSimpleV2(account_email, account_api_token, sandbox, module)
+ # Let's figure out what operation we want to do
+ # No domain, return a list
+ if not domain:
+ all_domains = ds.get_all_domains()
+ module.exit_json(changed=False, result=all_domains)
+
+ # Domain & No record
+ if record is None and not record_ids:
+ if domain.isdigit():
+ typed_domain = int(domain)
+ else:
+ typed_domain = str(domain)
+ dr = ds.get_domain(typed_domain)
+ # domain does not exist
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ response = ds.create_domain(domain)
+ module.exit_json(changed=True, result=response)
+ # state is absent
+ else:
+ if dr:
+ if not module.check_mode:
+ ds.delete_domain(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # need the not none check since record could be an empty string
+ if record is not None:
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ records_list = ds.get_records(domain, dnsimple_filter={'name': record})
+ rr = next((r for r in records_list if r['name'] == record and r['type'] == record_type and r['content'] == value), None)
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records_list if r['name'] == record and r['type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ ds.delete_record(domain, rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['priority'] != priority:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ response = ds.update_record(domain, rr['id'], ttl, priority)
+ module.exit_json(changed=True, result=response)
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ response = ds.create_record(domain, record, record_type, value, ttl, priority)
+ module.exit_json(changed=True, result=response)
+ # state is absent
+ else:
+ if rr:
+ if not module.check_mode:
+ ds.delete_record(domain, rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ # Make sure these record_ids either all exist or none
+ if record_ids:
+ current_records = ds.get_records(domain, dnsimple_filter=None)
+ current_record_ids = [str(d['id']) for d in current_records]
+ wanted_record_ids = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_record_ids) - set(current_record_ids))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+ # state is absent
+ else:
+ difference = list(set(wanted_record_ids) & set(current_record_ids))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ ds.delete_record(domain, rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+ except DNSimpleException as e:
+ if DNSIMPLE_MAJOR_VERSION > 1:
+ module.fail_json(msg="DNSimple exception: %s" % e.message)
+ else:
+ module.fail_json(msg="DNSimple exception: %s" % str(e.args[0]['message']))
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dnsimple_info.py b/ansible_collections/community/general/plugins/modules/dnsimple_info.py
new file mode 100644
index 000000000..52fd53303
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dnsimple_info.py
@@ -0,0 +1,329 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Edward Hilgendorf, <edward@hilgendorf.me>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dnsimple_info
+
+short_description: Pull basic info from DNSimple API
+
+version_added: "4.2.0"
+
+description: Retrieve existing records and domains from DNSimple API.
+
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ name:
+ description:
+ - The domain name to retrieve info from.
+ - Will return all associated records for this domain if specified.
+ - If not specified, will return all domains associated with the account ID.
+ type: str
+
+ account_id:
+ description: The account ID to query.
+ required: true
+ type: str
+
+ api_key:
+ description: The API key to use.
+ required: true
+ type: str
+
+ record:
+ description:
+ - The record to find.
+ - If specified, only this record will be returned instead of all records.
+ required: false
+ type: str
+
+ sandbox:
+ description: Whether or not to use sandbox environment.
+ required: false
+ default: false
+ type: bool
+
+author:
+ - Edward Hilgendorf (@edhilgendorf)
+'''
+
+EXAMPLES = r'''
+- name: Get all domains from an account
+ community.general.dnsimple_info:
+ account_id: "1234"
+ api_key: "1234"
+
+- name: Get all records from a domain
+ community.general.dnsimple_info:
+ name: "example.com"
+ account_id: "1234"
+ api_key: "1234"
+
+- name: Get all info from a matching record
+ community.general.dnsimple_info:
+ name: "example.com"
+ record: "subdomain"
+ account_id: "1234"
+ api_key: "1234"
+'''
+
+RETURN = r'''
+dnsimple_domain_info:
+ description: Returns a list of dictionaries of all domains associated with the supplied account ID.
+ type: list
+ elements: dict
+ returned: success when I(name) is not specified
+ sample:
+ - account_id: 1234
+ created_at: '2021-10-16T21:25:42Z'
+ id: 123456
+ last_transferred_at:
+ name: example.com
+ reverse: false
+ secondary: false
+ updated_at: '2021-11-10T20:22:50Z'
+ contains:
+ account_id:
+ description: The account ID.
+ type: int
+ created_at:
+ description: When the domain entry was created.
+ type: str
+ id:
+ description: ID of the entry.
+ type: int
+ last_transferred_at:
+ description: Date the domain was transferred, or empty if not.
+ type: str
+ name:
+ description: Name of the record.
+ type: str
+ reverse:
+ description: Whether or not it is a reverse zone record.
+ type: bool
+ updated_at:
+ description: When the domain entry was updated.
+ type: str
+
+dnsimple_records_info:
+ description: Returns a list of dictionaries with all records for the domain supplied.
+ type: list
+ elements: dict
+ returned: success when I(name) is specified, but I(record) is not
+ sample:
+ - content: ns1.dnsimple.com admin.dnsimple.com
+ created_at: '2021-10-16T19:07:34Z'
+ id: 12345
+ name: 'catheadbiscuit'
+ parent_id: null
+ priority: null
+ regions:
+ - global
+ system_record: true
+ ttl: 3600
+ type: SOA
+ updated_at: '2021-11-15T23:55:51Z'
+ zone_id: example.com
+ contains:
+ content:
+ description: Content of the returned record.
+ type: str
+ created_at:
+ description: When the domain entry was created.
+ type: str
+ id:
+ description: ID of the entry.
+ type: int
+ name:
+ description: Name of the record.
+ type: str
+ parent_id:
+ description: Parent record or null.
+ type: int
+ priority:
+ description: Priority setting of the record.
+ type: str
+ regions:
+ description: List of regions where the record is available.
+ type: list
+ system_record:
+ description: Whether or not it is a system record.
+ type: bool
+ ttl:
+ description: Record TTL.
+ type: int
+ type:
+ description: Record type.
+ type: str
+ updated_at:
+ description: When the domain entry was updated.
+ type: str
+ zone_id:
+ description: ID of the zone that the record is associated with.
+ type: str
+dnsimple_record_info:
+ description: Returns a list of dictionaries that match the record supplied.
+ returned: success when I(name) and I(record) are specified
+ type: list
+ elements: dict
+ sample:
+ - content: 1.2.3.4
+ created_at: '2021-11-15T23:55:51Z'
+ id: 123456
+ name: catheadbiscuit
+ parent_id: null
+ priority: null
+ regions:
+ - global
+ system_record: false
+ ttl: 3600
+ type: A
+ updated_at: '2021-11-15T23:55:51Z'
+ zone_id: example.com
+ contains:
+ content:
+ description: Content of the returned record.
+ type: str
+ created_at:
+ description: When the domain entry was created.
+ type: str
+ id:
+ description: ID of the entry.
+ type: int
+ name:
+ description: Name of the record.
+ type: str
+ parent_id:
+ description: Parent record or null.
+ type: int
+ priority:
+ description: Priority setting of the record.
+ type: str
+ regions:
+ description: List of regions where the record is available.
+ type: list
+ system_record:
+ description: Whether or not it is a system record.
+ type: bool
+ ttl:
+ description: Record TTL.
+ type: int
+ type:
+ description: Record type.
+ type: str
+ updated_at:
+ description: When the domain entry was updated.
+ type: str
+ zone_id:
+ description: ID of the zone that the record is associated with.
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils import deps
+
+with deps.declare("requests"):
+ from requests import Request, Session
+
+
+def build_url(account, key, is_sandbox):
+ headers = {'Accept': 'application/json',
+ 'Authorization': 'Bearer ' + key}
+ url = 'https://api{sandbox}.dnsimple.com/'.format(
+ sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account
+ req = Request(url=url, headers=headers)
+ prepped_request = req.prepare()
+ return prepped_request
+
+
+def iterate_data(module, request_object):
+ base_url = request_object.url
+ response = Session().send(request_object)
+ if 'pagination' in response.json():
+ data = response.json()["data"]
+ pages = response.json()["pagination"]["total_pages"]
+ if int(pages) > 1:
+ for page in range(1, pages):
+ page = page + 1
+ request_object.url = base_url + '&page=' + str(page)
+ new_results = Session().send(request_object)
+ data = data + new_results.json()["data"]
+ return data
+ else:
+ module.fail_json('API Call failed, check ID, key and sandbox values')
+
+
+def record_info(dnsimple_mod, req_obj):
+ req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET'
+ return iterate_data(dnsimple_mod, req_obj)
+
+
+def domain_info(dnsimple_mod, req_obj):
+ req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?per_page=100', 'GET'
+ return iterate_data(dnsimple_mod, req_obj)
+
+
+def account_info(dnsimple_mod, req_obj):
+ req_obj.url, req_obj.method = req_obj.url + '/zones/?per_page=100', 'GET'
+ return iterate_data(dnsimple_mod, req_obj)
+
+
+def main():
+ # define available arguments/parameters a user can pass to the module
+ fields = {
+ "account_id": {"required": True, "type": "str"},
+ "api_key": {"required": True, "type": "str", "no_log": True},
+ "name": {"required": False, "type": "str"},
+ "record": {"required": False, "type": "str"},
+ "sandbox": {"required": False, "type": "bool", "default": False}
+ }
+
+ result = {
+ 'changed': False
+ }
+
+ module = AnsibleModule(
+ argument_spec=fields,
+ supports_check_mode=True
+ )
+
+ params = module.params
+ req = build_url(params['account_id'],
+ params['api_key'],
+ params['sandbox'])
+
+ deps.validate(module)
+
+ # At minimum we need account and key
+ if params['account_id'] and params['api_key']:
+ # If we have a record return info on that record
+ if params['name'] and params['record']:
+ result['dnsimple_record_info'] = record_info(module, req)
+ module.exit_json(**result)
+
+ # If we have the account only and domain, return records for the domain
+ elif params['name']:
+ result['dnsimple_records_info'] = domain_info(module, req)
+ module.exit_json(**result)
+
+ # If we have the account only, return domains
+ else:
+ result['dnsimple_domain_info'] = account_info(module, req)
+ module.exit_json(**result)
+ else:
+ module.fail_json(msg="Need at least account_id and api_key")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
new file mode 100644
index 000000000..44587ca39
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dnsmadeeasy.py
@@ -0,0 +1,724 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: dnsmadeeasy
+short_description: Interface with dnsmadeeasy.com (a DNS hosting service)
+description:
+ - >
+ Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
+ monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ account_key:
+ description:
+ - Account API Key.
+ required: true
+ type: str
+
+ account_secret:
+ description:
+ - Account Secret Key.
+ required: true
+ type: str
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
+ resolution
+ required: true
+ type: str
+
+ sandbox:
+ description:
+ - Decides if the sandbox API should be used. Otherwise (default) the production API of DNS Made Easy is used.
+ type: bool
+ default: false
+
+ record_name:
+ description:
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
+ of the state argument.
+ type: str
+
+ record_type:
+ description:
+ - Record type.
+ choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
+ type: str
+
+ record_value:
+ description:
+ - >
+ Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
+ SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
+ - >
+ If record_value is not specified; no changes will be made and the record will be returned in 'result'
+ (in other words, this module can be used to fetch a record's current id, type, and ttl)
+ type: str
+
+ record_ttl:
+ description:
+ - record's "Time to live". Number of seconds the record remains cached in DNS servers.
+ default: 1800
+ type: int
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: true
+ choices: [ 'present', 'absent' ]
+ type: str
+
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+
+ monitor:
+ description:
+ - If C(true), add or change the monitor. This is applicable only for A records.
+ type: bool
+ default: false
+
+ systemDescription:
+ description:
+ - Description used by the monitor.
+ default: ''
+ type: str
+
+ maxEmails:
+ description:
+ - Number of emails sent to the contact list by the monitor.
+ default: 1
+ type: int
+
+ protocol:
+ description:
+ - Protocol used by the monitor.
+ default: 'HTTP'
+ choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
+ type: str
+
+ port:
+ description:
+ - Port used by the monitor.
+ default: 80
+ type: int
+
+ sensitivity:
+ description:
+ - Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
+ default: 'Medium'
+ choices: ['Low', 'Medium', 'High']
+ type: str
+
+ contactList:
+ description:
+ - Name or id of the contact list that the monitor will notify.
+ - The default C('') means the Account Owner.
+ type: str
+
+ httpFqdn:
+ description:
+ - The fully qualified domain name used by the monitor.
+ type: str
+
+ httpFile:
+ description:
+ - The file at the Fqdn that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ httpQueryString:
+ description:
+ - The string in the httpFile that the monitor queries for HTTP or HTTPS.
+ type: str
+
+ failover:
+ description:
+ - If C(true), add or change the failover. This is applicable only for A records.
+ type: bool
+ default: false
+
+ autoFailover:
+ description:
+ - If true, fallback to the primary IP address is manual after a failover.
+ - If false, fallback to the primary IP address is automatic after a failover.
+ type: bool
+ default: false
+
+ ip1:
+ description:
+ - Primary IP address for the failover.
+ - Required if adding or changing the monitor or failover.
+ type: str
+
+ ip2:
+ description:
+ - Secondary IP address for the failover.
+ - Required if adding or changing the failover.
+ type: str
+
+ ip3:
+ description:
+ - Tertiary IP address for the failover.
+ type: str
+
+ ip4:
+ description:
+ - Quaternary IP address for the failover.
+ type: str
+
+ ip5:
+ description:
+ - Quinary IP address for the failover.
+ type: str
+
+notes:
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
+ seconds of actual time by using NTP.
+ - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
+ These values can be be registered and used in your playbooks.
+ - Only A records can have a monitor or failover.
+ - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
+ - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
+ - The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
+
+requirements: [ hashlib, hmac ]
+author: "Brice Burgess (@briceburg)"
+'''
+
+EXAMPLES = '''
+- name: Fetch my.com domain records
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ register: response
+
+- name: Create a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+
+- name: Update the previously created record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_value: 192.0.2.23
+
+- name: Fetch a specific record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ register: response
+
+- name: Delete a record
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ record_type: A
+ state: absent
+ record_name: test
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: true
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+
+- name: Add a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: true
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ ip3: 127.0.0.4
+ ip4: 127.0.0.5
+ ip5: 127.0.0.6
+
+- name: Add a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: true
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: my contact list
+
+- name: Add a monitor with http options
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: true
+ ip1: 127.0.0.2
+ protocol: HTTP # default
+ port: 80 # default
+ maxEmails: 1
+ systemDescription: Monitor Test A record
+ contactList: 1174 # contact list id
+ httpFqdn: http://my.com
+ httpFile: example
+ httpQueryString: some string
+
+- name: Add a monitor and a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: true
+ ip1: 127.0.0.2
+ ip2: 127.0.0.3
+ monitor: true
+ protocol: HTTPS
+ port: 443
+ maxEmails: 1
+ systemDescription: monitoring my.com status
+ contactList: emergencycontacts
+
+- name: Remove a failover
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ failover: false
+
+- name: Remove a monitor
+ community.general.dnsmadeeasy:
+ account_key: key
+ account_secret: secret
+ domain: my.com
+ state: present
+ record_name: test
+ record_type: A
+ record_value: 127.0.0.1
+ monitor: false
+'''
+
+# ============================================
+# DNSMadeEasy module specific support methods.
+#
+
+import json
+import hashlib
+import hmac
+import locale
+from time import strftime, gmtime
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six import string_types
+
+
+class DME2(object):
+
+ def __init__(self, apikey, secret, domain, sandbox, module):
+ self.module = module
+
+ self.api = apikey
+ self.secret = secret
+
+ if sandbox:
+ self.baseurl = 'https://api.sandbox.dnsmadeeasy.com/V2.0/'
+ self.module.warn(warning="Sandbox is enabled. All actions are made against the URL %s" % self.baseurl)
+ else:
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
+
+ self.domain = str(domain)
+ self.domain_map = None # ["domain_name"] => ID
+ self.record_map = None # ["record_name"] => ID
+ self.records = None # ["record_ID"] => <record>
+ self.all_records = None
+ self.contactList_map = None # ["contactList_name"] => ID
+
+ # Lookup the domain ID if passed as a domain name vs. ID
+ if not self.domain.isdigit():
+ self.domain = self.getDomainByName(self.domain)['id']
+
+ self.record_url = 'dns/managed/' + str(self.domain) + '/records'
+ self.monitor_url = 'monitor'
+ self.contactList_url = 'contactList'
+
+ def _headers(self):
+ currTime = self._get_date()
+ hashstring = self._create_hash(currTime)
+ headers = {'x-dnsme-apiKey': self.api,
+ 'x-dnsme-hmac': hashstring,
+ 'x-dnsme-requestDate': currTime,
+ 'content-type': 'application/json'}
+ return headers
+
+ def _get_date(self):
+ locale.setlocale(locale.LC_TIME, 'C')
+ return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
+
+ def _create_hash(self, rightnow):
+ return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
+
+ def query(self, resource, method, data=None):
+ url = self.baseurl + resource
+ if data and not isinstance(data, string_types):
+ data = urlencode(data)
+
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
+
+ try:
+ return json.load(response)
+ except Exception:
+ return {}
+
+ def getDomain(self, domain_id):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.domains.get(domain_id, False)
+
+ def getDomainByName(self, domain_name):
+ if not self.domain_map:
+ self._instMap('domain')
+
+ return self.getDomain(self.domain_map.get(domain_name, 0))
+
+ def getDomains(self):
+ return self.query('dns/managed', 'GET')['data']
+
+ def getRecord(self, record_id):
+ if not self.record_map:
+ self._instMap('record')
+
+ return self.records.get(record_id, False)
+
+ # Try to find a single record matching this one.
+ # How we do this depends on the type of record. For instance, there
+ # can be several MX records for a single record_name while there can
+ # only be a single CNAME for a particular record_name. Note also that
+ # there can be several records with different types for a single name.
+ def getMatchingRecord(self, record_name, record_type, record_value):
+ # Get all the records if not already cached
+ if not self.all_records:
+ self.all_records = self.getRecords()
+
+ if record_type in ["CNAME", "ANAME", "HTTPRED", "PTR"]:
+ for result in self.all_records:
+ if result['name'] == record_name and result['type'] == record_type:
+ return result
+ return False
+ elif record_type in ["A", "AAAA", "MX", "NS", "TXT", "SRV"]:
+ for result in self.all_records:
+ if record_type == "MX":
+ value = record_value.split(" ")[1]
+ # Note that TXT records are surrounded by quotes in the API response.
+ elif record_type == "TXT":
+ value = '"{0}"'.format(record_value)
+ elif record_type == "SRV":
+ value = record_value.split(" ")[3]
+ else:
+ value = record_value
+ if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
+ return result
+ return False
+ else:
+ raise Exception('record_type not yet supported')
+
+ def getRecords(self):
+ return self.query(self.record_url, 'GET')['data']
+
+ def _instMap(self, type):
+ # @TODO cache this call so it's executed only once per ansible execution
+ map = {}
+ results = {}
+
+ # iterate over e.g. self.getDomains() || self.getRecords()
+ for result in getattr(self, 'get' + type.title() + 's')():
+
+ map[result['name']] = result['id']
+ results[result['id']] = result
+
+ # e.g. self.domain_map || self.record_map
+ setattr(self, type + '_map', map)
+ setattr(self, type + 's', results) # e.g. self.domains || self.records
+
+ def prepareRecord(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def createRecord(self, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url, 'POST', data)
+
+ def updateRecord(self, record_id, data):
+ # @TODO update the cache w/ resultant record + id when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
+
+ def deleteRecord(self, record_id):
+ # @TODO remove record from the cache when impleneted
+ return self.query(self.record_url + '/' + str(record_id), 'DELETE')
+
+ def getMonitor(self, record_id):
+ return self.query(self.monitor_url + '/' + str(record_id), 'GET')
+
+ def updateMonitor(self, record_id, data):
+ return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
+
+ def prepareMonitor(self, data):
+ return json.dumps(data, separators=(',', ':'))
+
+ def getContactList(self, contact_list_id):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.contactLists.get(contact_list_id, False)
+
+ def getContactlists(self):
+ return self.query(self.contactList_url, 'GET')['data']
+
+ def getContactListByName(self, name):
+ if not self.contactList_map:
+ self._instMap('contactList')
+
+ return self.getContactList(self.contactList_map.get(name, 0))
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_key=dict(required=True, no_log=True),
+ account_secret=dict(required=True, no_log=True),
+ domain=dict(required=True),
+ sandbox=dict(default=False, type='bool'),
+ state=dict(required=True, choices=['present', 'absent']),
+ record_name=dict(required=False),
+ record_type=dict(required=False, choices=[
+ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
+ record_value=dict(required=False),
+ record_ttl=dict(required=False, default=1800, type='int'),
+ monitor=dict(default=False, type='bool'),
+ systemDescription=dict(default=''),
+ maxEmails=dict(default=1, type='int'),
+ protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
+ port=dict(default=80, type='int'),
+ sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
+ contactList=dict(default=None),
+ httpFqdn=dict(required=False),
+ httpFile=dict(required=False),
+ httpQueryString=dict(required=False),
+ failover=dict(default=False, type='bool'),
+ autoFailover=dict(default=False, type='bool'),
+ ip1=dict(required=False),
+ ip2=dict(required=False),
+ ip3=dict(required=False),
+ ip4=dict(required=False),
+ ip5=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_together=[
+ ['record_value', 'record_ttl', 'record_type']
+ ],
+ required_if=[
+ ['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
+ ['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
+ ]
+ )
+
+ protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
+ sensitivities = dict(Low=8, Medium=5, High=3)
+
+ DME = DME2(module.params["account_key"], module.params[
+ "account_secret"], module.params["domain"], module.params["sandbox"], module)
+ state = module.params["state"]
+ record_name = module.params["record_name"]
+ record_type = module.params["record_type"]
+ record_value = module.params["record_value"]
+
+ # Follow Keyword Controlled Behavior
+ if record_name is None:
+ domain_records = DME.getRecords()
+ if not domain_records:
+ module.fail_json(
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
+ module.exit_json(changed=False, result=domain_records)
+
+ # Fetch existing record + Build new one
+ current_record = DME.getMatchingRecord(record_name, record_type, record_value)
+ new_record = {'name': record_name}
+ for i in ["record_value", "record_type", "record_ttl"]:
+ if not module.params[i] is None:
+ new_record[i[len("record_"):]] = module.params[i]
+ # Special handling for mx record
+ if new_record["type"] == "MX":
+ new_record["mxLevel"] = new_record["value"].split(" ")[0]
+ new_record["value"] = new_record["value"].split(" ")[1]
+
+ # Special handling for SRV records
+ if new_record["type"] == "SRV":
+ new_record["priority"] = new_record["value"].split(" ")[0]
+ new_record["weight"] = new_record["value"].split(" ")[1]
+ new_record["port"] = new_record["value"].split(" ")[2]
+ new_record["value"] = new_record["value"].split(" ")[3]
+
+ # Fetch existing monitor if the A record indicates it should exist and build the new monitor
+ current_monitor = dict()
+ new_monitor = dict()
+ if current_record and current_record['type'] == 'A' and current_record.get('monitor'):
+ current_monitor = DME.getMonitor(current_record['id'])
+
+ # Build the new monitor
+ for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
+ 'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
+ 'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
+ if module.params[i] is not None:
+ if i == 'protocol':
+ # The API requires protocol to be a numeric in the range 1-6
+ new_monitor['protocolId'] = protocols[module.params[i]]
+ elif i == 'sensitivity':
+ # The API requires sensitivity to be a numeric of 8, 5, or 3
+ new_monitor[i] = sensitivities[module.params[i]]
+ elif i == 'contactList':
+ # The module accepts either the name or the id of the contact list
+ contact_list_id = module.params[i]
+ if not contact_list_id.isdigit() and contact_list_id != '':
+ contact_list = DME.getContactListByName(contact_list_id)
+ if not contact_list:
+ module.fail_json(msg="Contact list {0} does not exist".format(contact_list_id))
+ contact_list_id = contact_list.get('id', '')
+ new_monitor['contactListId'] = contact_list_id
+ else:
+ # The module option names match the API field names
+ new_monitor[i] = module.params[i]
+
+ # Compare new record against existing one
+ record_changed = False
+ if current_record:
+ for i in new_record:
+ # Remove leading and trailing quote character from values because TXT records
+ # are surrounded by quotes.
+ if str(current_record[i]).strip('"') != str(new_record[i]):
+ record_changed = True
+ new_record['id'] = str(current_record['id'])
+
+ monitor_changed = False
+ if current_monitor:
+ for i in new_monitor:
+ if str(current_monitor.get(i)) != str(new_monitor[i]):
+ monitor_changed = True
+
+ # Follow Keyword Controlled Behavior
+ if state == 'present':
+ # return the record if no value is specified
+ if "value" not in new_record:
+ if not current_record:
+ module.fail_json(
+ msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ # create record and monitor as the record does not exist
+ if not current_record:
+ record = DME.createRecord(DME.prepareRecord(new_record))
+ if new_monitor.get('monitor') and record_type == "A":
+ monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
+ module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
+ else:
+ module.exit_json(changed=True, result=dict(record=record, monitor=current_monitor))
+
+ # update the record
+ updated = False
+ if record_changed:
+ DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
+ updated = True
+ if monitor_changed:
+ DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
+ updated = True
+ if updated:
+ module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
+
+ # return the record (no changes)
+ module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
+
+ elif state == 'absent':
+ changed = False
+ # delete the record (and the monitor/failover) if it exists
+ if current_record:
+ DME.deleteRecord(current_record['id'])
+ module.exit_json(changed=True)
+
+ # record does not exist, return w/o change.
+ module.exit_json(changed=changed)
+
+ else:
+ module.fail_json(
+ msg="'%s' is an unknown value for the state argument" % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/dpkg_divert.py b/ansible_collections/community/general/plugins/modules/dpkg_divert.py
new file mode 100644
index 000000000..4a1651f51
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/dpkg_divert.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2020, Yann Amar <quidame@poivron.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: dpkg_divert
+short_description: Override a debian package's version of a file
+version_added: '0.2.0'
+author:
+ - quidame (@quidame)
+description:
+ - A diversion is for C(dpkg) the knowledge that only a given package
+ (or the local administrator) is allowed to install a file at a given
+ location. Other packages shipping their own version of this file will
+ be forced to I(divert) it, i.e. to install it at another location. It
+ allows one to keep changes in a file provided by a debian package by
+ preventing its overwrite at package upgrade.
+ - This module manages diversions of debian packages files using the
+ C(dpkg-divert) commandline tool. It can either create or remove a
+ diversion for a given file, but also update an existing diversion
+ to modify its I(holder) and/or its I(divert) location.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ path:
+ description:
+ - The original and absolute path of the file to be diverted or
+ undiverted. This path is unique, i.e. it is not possible to get
+ two diversions for the same I(path).
+ required: true
+ type: path
+ state:
+ description:
+ - When I(state=absent), remove the diversion of the specified
+ I(path); when I(state=present), create the diversion if it does
+ not exist, or update its package I(holder) or I(divert) location,
+ if it already exists.
+ type: str
+ default: present
+ choices: [absent, present]
+ holder:
+ description:
+ - The name of the package whose copy of file is not diverted, also
+ known as the diversion holder or the package the diversion belongs
+ to.
+ - The actual package does not have to be installed or even to exist
+ for its name to be valid. If not specified, the diversion is hold
+ by 'LOCAL', that is reserved by/for dpkg for local diversions.
+ - This parameter is ignored when I(state=absent).
+ type: str
+ divert:
+ description:
+ - The location where the versions of file will be diverted.
+ - Default is to add suffix C(.distrib) to the file path.
+ - This parameter is ignored when I(state=absent).
+ type: path
+ rename:
+ description:
+ - Actually move the file aside (when I(state=present)) or back (when
+ I(state=absent)), but only when changing the state of the diversion.
+ This parameter has no effect when attempting to add a diversion that
+ already exists or when removing an unexisting one.
+ - Unless I(force=true), renaming fails if the destination file already
+ exists (this lock being a dpkg-divert feature, and bypassing it being
+ a module feature).
+ type: bool
+ default: false
+ force:
+ description:
+ - When I(rename=true) and I(force=true), renaming is performed even if
+ the target of the renaming exists, i.e. the existing contents of the
+ file at this location will be lost.
+ - This parameter is ignored when I(rename=false).
+ type: bool
+ default: false
+requirements:
+ - dpkg-divert >= 1.15.0 (Debian family)
+'''
+
+EXAMPLES = r'''
+- name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+
+- name: Divert /usr/bin/busybox by package 'branding'
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ holder: branding
+
+- name: Divert and rename busybox to busybox.dpkg-divert
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ divert: /usr/bin/busybox.dpkg-divert
+ rename: true
+
+- name: Remove the busybox diversion and move the diverted file back
+ community.general.dpkg_divert:
+ path: /usr/bin/busybox
+ state: absent
+ rename: true
+ force: true
+'''
+
+RETURN = r'''
+commands:
+ description: The dpkg-divert commands ran internally by the module.
+ type: list
+ returned: on_success
+ elements: str
+ sample: "/usr/bin/dpkg-divert --no-rename --remove /etc/foobarrc"
+messages:
+ description: The dpkg-divert relevant messages (stdout or stderr).
+ type: list
+ returned: on_success
+ elements: str
+ sample: "Removing 'local diversion of /etc/foobarrc to /etc/foobarrc.distrib'"
+diversion:
+ description: The status of the diversion after task execution.
+ type: dict
+ returned: always
+ contains:
+ divert:
+ description: The location of the diverted file.
+ type: str
+ holder:
+ description: The package holding the diversion.
+ type: str
+ path:
+ description: The path of the file to divert/undivert.
+ type: str
+ state:
+ description: The state of the diversion.
+ type: str
+ sample:
+ {
+ "divert": "/etc/foobarrc.distrib",
+ "holder": "LOCAL",
+ "path": "/etc/foobarrc",
+ "state": "present"
+ }
+'''
+
+
+import re
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+def diversion_state(module, command, path):
+ diversion = dict(path=path, state='absent', divert=None, holder=None)
+ rc, out, err = module.run_command([command, '--listpackage', path], check_rc=True)
+ if out:
+ diversion['state'] = 'present'
+ diversion['holder'] = out.rstrip()
+ rc, out, err = module.run_command([command, '--truename', path], check_rc=True)
+ diversion['divert'] = out.rstrip()
+ return diversion
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True, type='path'),
+ state=dict(required=False, type='str', default='present', choices=['absent', 'present']),
+ holder=dict(required=False, type='str'),
+ divert=dict(required=False, type='path'),
+ rename=dict(required=False, type='bool', default=False),
+ force=dict(required=False, type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ state = module.params['state']
+ holder = module.params['holder']
+ divert = module.params['divert']
+ rename = module.params['rename']
+ force = module.params['force']
+
+ diversion_wanted = dict(path=path, state=state)
+ changed = False
+
+ DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
+ MAINCOMMAND = [DPKG_DIVERT]
+
+ # Option --listpackage is needed and comes with 1.15.0
+ rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'], check_rc=True)
+ [current_version] = [x for x in stdout.splitlines()[0].split() if re.match('^[0-9]+[.][0-9]', x)]
+ if LooseVersion(current_version) < LooseVersion("1.15.0"):
+ module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
+ no_rename_is_supported = (LooseVersion(current_version) >= LooseVersion("1.19.1"))
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+ path_exists = os.path.exists(b_path)
+ # Used for things not doable with a single dpkg-divert command (as forced
+ # renaming of files, and diversion's 'holder' or 'divert' updates).
+ target_exists = False
+ truename_exists = False
+
+ diversion_before = diversion_state(module, DPKG_DIVERT, path)
+ if diversion_before['state'] == 'present':
+ b_divert = to_bytes(diversion_before['divert'], errors='surrogate_or_strict')
+ truename_exists = os.path.exists(b_divert)
+
+ # Append options as requested in the task parameters, but ignore some of
+ # them when removing the diversion.
+ if rename:
+ MAINCOMMAND.append('--rename')
+ elif no_rename_is_supported:
+ MAINCOMMAND.append('--no-rename')
+
+ if state == 'present':
+ if holder and holder != 'LOCAL':
+ MAINCOMMAND.extend(['--package', holder])
+ diversion_wanted['holder'] = holder
+ else:
+ MAINCOMMAND.append('--local')
+ diversion_wanted['holder'] = 'LOCAL'
+
+ if divert:
+ MAINCOMMAND.extend(['--divert', divert])
+ target = divert
+ else:
+ target = '%s.distrib' % path
+
+ MAINCOMMAND.extend(['--add', path])
+ diversion_wanted['divert'] = target
+ b_target = to_bytes(target, errors='surrogate_or_strict')
+ target_exists = os.path.exists(b_target)
+
+ else:
+ MAINCOMMAND.extend(['--remove', path])
+ diversion_wanted['divert'] = None
+ diversion_wanted['holder'] = None
+
+ # Start to populate the returned objects.
+ diversion = diversion_before.copy()
+ maincommand = ' '.join(MAINCOMMAND)
+ commands = [maincommand]
+
+ if module.check_mode or diversion_wanted == diversion_before:
+ MAINCOMMAND.insert(1, '--test')
+ diversion_after = diversion_wanted
+
+ # Just try and see
+ rc, stdout, stderr = module.run_command(MAINCOMMAND)
+
+ if rc == 0:
+ messages = [stdout.rstrip()]
+
+ # else... cases of failure with dpkg-divert are:
+ # - The diversion does not belong to the same package (or LOCAL)
+ # - The divert filename is not the same (e.g. path.distrib != path.divert)
+ # - The renaming is forbidden by dpkg-divert (i.e. both the file and the
+ # diverted file exist)
+
+ elif state != diversion_before['state']:
+ # There should be no case with 'divert' and 'holder' when creating the
+ # diversion from none, and they're ignored when removing the diversion.
+ # So this is all about renaming...
+ if rename and path_exists and (
+ (state == 'absent' and truename_exists) or
+ (state == 'present' and target_exists)):
+ if not force:
+ msg = "Set 'force' param to True to force renaming of files."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ else:
+ msg = "Unexpected error while changing state of the diversion."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+ to_remove = path
+ if state == 'present':
+ to_remove = target
+
+ if not module.check_mode:
+ try:
+ b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
+ os.unlink(b_remove)
+ except OSError as e:
+ msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+
+ messages = [stdout.rstrip()]
+
+ # The situation is that we want to modify the settings (holder or divert)
+ # of an existing diversion. dpkg-divert does not handle this, and we have
+ # to remove the existing diversion first, and then set a new one.
+ else:
+ RMDIVERSION = [DPKG_DIVERT, '--remove', path]
+ if no_rename_is_supported:
+ RMDIVERSION.insert(1, '--no-rename')
+ rmdiversion = ' '.join(RMDIVERSION)
+
+ if module.check_mode:
+ RMDIVERSION.insert(1, '--test')
+
+ if rename:
+ MAINCOMMAND.remove('--rename')
+ if no_rename_is_supported:
+ MAINCOMMAND.insert(1, '--no-rename')
+ maincommand = ' '.join(MAINCOMMAND)
+
+ commands = [rmdiversion, maincommand]
+ rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)
+
+ if module.check_mode:
+ messages = [rmdout.rstrip(), 'Running in check mode']
+ else:
+ rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
+ messages = [rmdout.rstrip(), stdout.rstrip()]
+
+ # Avoid if possible to orphan files (i.e. to dereference them in diversion
+ # database but let them in place), but do not make renaming issues fatal.
+ # BTW, this module is not about state of files involved in the diversion.
+ old = diversion_before['divert']
+ new = diversion_wanted['divert']
+ if new != old:
+ b_old = to_bytes(old, errors='surrogate_or_strict')
+ b_new = to_bytes(new, errors='surrogate_or_strict')
+ if os.path.exists(b_old) and not os.path.exists(b_new):
+ try:
+ os.rename(b_old, b_new)
+ except OSError as e:
+ pass
+
+ if not module.check_mode:
+ diversion_after = diversion_state(module, DPKG_DIVERT, path)
+
+ diversion = diversion_after.copy()
+ diff = dict()
+ if module._diff:
+ diff['before'] = diversion_before
+ diff['after'] = diversion_after
+
+ if diversion_after != diversion_before:
+ changed = True
+
+ if diversion_after == diversion_wanted:
+ module.exit_json(changed=changed, diversion=diversion,
+ commands=commands, messages=messages, diff=diff)
+ else:
+ msg = "Unexpected error: see stdout and stderr for details."
+ module.fail_json(changed=changed, cmd=maincommand, rc=rc, msg=msg,
+ stderr=stderr, stdout=stdout, diversion=diversion)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/easy_install.py b/ansible_collections/community/general/plugins/modules/easy_install.py
new file mode 100644
index 000000000..564493180
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/easy_install.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: easy_install
+short_description: Installs Python libraries
+description:
+ - Installs Python libraries, optionally in a I(virtualenv)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - A Python library name
+ required: true
+ virtualenv:
+ type: str
+ description:
+ - an optional I(virtualenv) directory path to install into. If the
+ I(virtualenv) does not exist, it is created automatically
+ virtualenv_site_packages:
+ description:
+ - Whether the virtual environment will inherit packages from the
+ global site-packages directory. Note that if this setting is
+ changed on an already existing virtual environment it will not
+ have any effect, the environment must be deleted and newly
+ created.
+ type: bool
+ default: false
+ virtualenv_command:
+ type: str
+ description:
+ - The command to create the virtual environment with. For example
+ C(pyvenv), C(virtualenv), C(virtualenv2).
+ default: virtualenv
+ executable:
+ type: str
+ description:
+ - The explicit executable or a pathname to the executable to be used to
+ run easy_install for a specific version of Python installed in the
+ system. For example C(easy_install-3.3), if there are both Python 2.7
+ and 3.3 installations in the system and you want to run easy_install
+ for the Python 3.3 installation.
+ default: easy_install
+ state:
+ type: str
+ description:
+ - The desired state of the library. C(latest) ensures that the latest version is installed.
+ choices: [present, latest]
+ default: present
+notes:
+ - Please note that the C(easy_install) module can only install Python
+ libraries. Thus this module is not able to remove libraries. It is
+ generally recommended to use the M(ansible.builtin.pip) module which you can first install
+ using M(community.general.easy_install).
+ - Also note that I(virtualenv) must be installed on the remote host if the
+ C(virtualenv) parameter is specified.
+requirements: [ "virtualenv" ]
+author: "Matt Wright (@mattupstate)"
+'''
+
+EXAMPLES = '''
+- name: Install or update pip
+ community.general.easy_install:
+ name: pip
+ state: latest
+
+- name: Install Bottle into the specified virtualenv
+ community.general.easy_install:
+ name: bottle
+ virtualenv: /webapps/myapp/venv
+'''
+
+import os
+import os.path
+import tempfile
+from ansible.module_utils.basic import AnsibleModule
+
+
+def install_package(module, name, easy_install, executable_arguments):
+ cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name)
+ rc, out, err = module.run_command(cmd)
+ return rc, out, err
+
+
+def _is_package_installed(module, name, easy_install, executable_arguments):
+ # Copy and add to the arguments
+ executable_arguments = executable_arguments[:]
+ executable_arguments.append('--dry-run')
+ rc, out, err = install_package(module, name, easy_install, executable_arguments)
+ if rc:
+ module.fail_json(msg=err)
+ return 'Downloading' not in out
+
+
+def _get_easy_install(module, env=None, executable=None):
+ candidate_easy_inst_basenames = ['easy_install']
+ easy_install = None
+ if executable is not None:
+ if os.path.isabs(executable):
+ easy_install = executable
+ else:
+ candidate_easy_inst_basenames.insert(0, executable)
+ if easy_install is None:
+ if env is None:
+ opt_dirs = []
+ else:
+ # Try easy_install with the virtualenv directory first.
+ opt_dirs = ['%s/bin' % env]
+ for basename in candidate_easy_inst_basenames:
+ easy_install = module.get_bin_path(basename, False, opt_dirs)
+ if easy_install is not None:
+ break
+ # easy_install should have been found by now. The final call to
+ # get_bin_path will trigger fail_json.
+ if easy_install is None:
+ basename = candidate_easy_inst_basenames[0]
+ easy_install = module.get_bin_path(basename, True, opt_dirs)
+ return easy_install
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'latest'],
+ type='str'),
+ virtualenv=dict(default=None, required=False),
+ virtualenv_site_packages=dict(default=False, type='bool'),
+ virtualenv_command=dict(default='virtualenv', required=False),
+ executable=dict(default='easy_install', required=False),
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ env = module.params['virtualenv']
+ executable = module.params['executable']
+ site_packages = module.params['virtualenv_site_packages']
+ virtualenv_command = module.params['virtualenv_command']
+ executable_arguments = []
+ if module.params['state'] == 'latest':
+ executable_arguments.append('--upgrade')
+
+ rc = 0
+ err = ''
+ out = ''
+
+ if env:
+ virtualenv = module.get_bin_path(virtualenv_command, True)
+
+ if not os.path.exists(os.path.join(env, 'bin', 'activate')):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ command = '%s %s' % (virtualenv, env)
+ if site_packages:
+ command += ' --system-site-packages'
+ cwd = tempfile.gettempdir()
+ rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
+
+ rc += rc_venv
+ out += out_venv
+ err += err_venv
+
+ easy_install = _get_easy_install(module, env, executable)
+
+ cmd = None
+ changed = False
+ installed = _is_package_installed(module, name, easy_install, executable_arguments)
+
+ if not installed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc_easy_inst, out_easy_inst, err_easy_inst = install_package(module, name, easy_install, executable_arguments)
+
+ rc += rc_easy_inst
+ out += out_easy_inst
+ err += err_easy_inst
+
+ changed = True
+
+ if rc != 0:
+ module.fail_json(msg=err, cmd=cmd)
+
+ module.exit_json(changed=changed, binary=easy_install,
+ name=name, virtualenv=env)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ejabberd_user.py b/ansible_collections/community/general/plugins/modules/ejabberd_user.py
new file mode 100644
index 000000000..397207ae6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ejabberd_user.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013, Peter Sprygada <sprygada@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ejabberd_user
+author: "Peter Sprygada (@privateip)"
+short_description: Manages users for ejabberd servers
+requirements:
+ - ejabberd with mod_admin_extra
+description:
+ - This module provides user management for ejabberd servers
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ username:
+ type: str
+ description:
+ - the name of the user to manage
+ required: true
+ host:
+ type: str
+ description:
+ - the ejabberd host associated with this username
+ required: true
+ password:
+ type: str
+ description:
+ - the password to assign to the username
+ required: false
+ logging:
+ description:
+ - enables or disables the local syslog facility for this module
+ required: false
+ default: false
+ type: bool
+ state:
+ type: str
+ description:
+ - describe the desired state of the user to be managed
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+notes:
+ - Password parameter is required for state == present only
+ - Passwords must be stored in clear text for this release
+ - The ejabberd configuration file must include mod_admin_extra as a module.
+'''
+EXAMPLES = '''
+# Example playbook entries using the ejabberd_user module to manage users state.
+
+- name: Create a user if it does not exist
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ password: password
+
+- name: Delete a user if it exists
+ community.general.ejabberd_user:
+ username: test
+ host: server
+ state: absent
+'''
+
+import syslog
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class EjabberdUser(object):
+ """ This object represents a user resource for an ejabberd server. The
+ object manages user creation and deletion using ejabberdctl. The following
+ commands are currently supported:
+ * ejabberdctl register
+ * ejabberdctl deregister
+ """
+
+ def __init__(self, module):
+ self.module = module
+ self.logging = module.params.get('logging')
+ self.state = module.params.get('state')
+ self.host = module.params.get('host')
+ self.user = module.params.get('username')
+ self.pwd = module.params.get('password')
+
+ @property
+ def changed(self):
+ """ This method will check the current user and see if the password has
+ changed. It will return True if the user does not match the supplied
+ credentials and False if it does not
+ """
+ return self.run_command('check_password', [self.user, self.host, self.pwd])
+
+ @property
+ def exists(self):
+ """ This method will check to see if the supplied username exists for
+ host specified. If the user exists True is returned, otherwise False
+ is returned
+ """
+ return self.run_command('check_account', [self.user, self.host])
+
+ def log(self, entry):
+ """ This method will log information to the local syslog facility """
+ if self.logging:
+ syslog.openlog('ansible-%s' % self.module._name)
+ syslog.syslog(syslog.LOG_NOTICE, entry)
+
+ def run_command(self, cmd, options):
+ """ This method will run the any command specified and return the
+ returns using the Ansible common module
+ """
+ cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options
+ self.log('command: %s' % " ".join(cmd))
+ return self.module.run_command(cmd)
+
+ def update(self):
+ """ The update method will update the credentials for the user provided
+ """
+ return self.run_command('change_password', [self.user, self.host, self.pwd])
+
+ def create(self):
+ """ The create method will create a new user on the host with the
+ password provided
+ """
+ return self.run_command('register', [self.user, self.host, self.pwd])
+
+ def delete(self):
+ """ The delete method will delete the user from the host
+ """
+ return self.run_command('unregister', [self.user, self.host])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ username=dict(required=True, type='str'),
+ password=dict(type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger?
+ ),
+ required_if=[
+ ('state', 'present', ['password']),
+ ],
+ supports_check_mode=True,
+ )
+
+ obj = EjabberdUser(module)
+
+ rc = None
+ result = dict(changed=False)
+
+ if obj.state == 'absent':
+ if obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.delete()
+ if rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ elif obj.state == 'present':
+ if not obj.exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.create()
+ elif obj.changed:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = obj.update()
+ if rc is not None and rc != 0:
+ module.fail_json(msg=err, rc=rc)
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
new file mode 100644
index 000000000..cd4bb45de
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/elasticsearch_plugin.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015, Mathew Davies <thepixeldeveloper@googlemail.com>
+# Copyright (c) 2017, Sam Doran <sdoran@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: elasticsearch_plugin
+short_description: Manage Elasticsearch plugins
+description:
+ - Manages Elasticsearch plugins.
+author:
+ - Mathew Davies (@ThePixelDeveloper)
+ - Sam Doran (@samdoran)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: true
+ type: str
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ src:
+ description:
+ - Optionally set the source location to retrieve the plugin from. This can be a file://
+ URL to install from a local file, or a remote URL. If this is not set, the plugin
+ location is just based on the name.
+ - The name parameter must match the descriptor in the plugin ZIP specified.
+ - Is only used if the state would change, which is solely checked based on the name
+ parameter. If, for example, the plugin is already installed, changing this has no
+ effect.
+ - For ES 1.x use url.
+ required: false
+ type: str
+ url:
+ description:
+ - Set exact URL to download the plugin from (Only works for ES 1.x).
+ - For ES 2.x and higher, use src.
+ required: false
+ type: str
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h..."
+ - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
+ default: 1m
+ type: str
+ force:
+ description:
+ - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
+ default: false
+ type: bool
+ plugin_bin:
+ description:
+ - Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
+ - The default changed in Ansible 2.4 to None.
+ type: path
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Elasticsearch
+ default: /usr/share/elasticsearch/plugins/
+ type: path
+ proxy_host:
+ description:
+ - Proxy host to use during plugin installation
+ type: str
+ proxy_port:
+ description:
+ - Proxy port to use during plugin installation
+ type: str
+ version:
+ description:
+ - Version of the plugin to be installed.
+ If plugin exists with previous version, it will NOT be updated
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch Head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: present
+
+- name: Install a specific version of Elasticsearch Head in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ version: 2.0.0
+
+- name: Uninstall Elasticsearch head plugin in Elasticsearch 2.x
+ community.general.elasticsearch_plugin:
+ name: mobz/elasticsearch-head
+ state: absent
+
+- name: Install a specific plugin in Elasticsearch >= 5.0
+ community.general.elasticsearch_plugin:
+ name: analysis-icu
+ state: present
+
+- name: Install the ingest-geoip plugin with a forced installation
+ community.general.elasticsearch_plugin:
+ name: ingest-geoip
+ state: present
+ force: true
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+PLUGIN_BIN_PATHS = tuple([
+ '/usr/share/elasticsearch/bin/elasticsearch-plugin',
+ '/usr/share/elasticsearch/bin/plugin'
+])
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_name, plugin_dir):
+ return os.path.isdir(os.path.join(plugin_dir, plugin_name))
+
+
+def parse_error(string):
+ reason = "ERROR: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
+ is_old_command = (os.path.basename(plugin_bin) == 'plugin')
+
+ # Timeout and version are only valid for plugin, not elasticsearch-plugin
+ if is_old_command:
+ if timeout:
+ cmd_args.append("--timeout %s" % timeout)
+
+ if version:
+ plugin_name = plugin_name + '/' + version
+ cmd_args[2] = plugin_name
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ # Legacy ES 1.x
+ if url:
+ cmd_args.append("--url %s" % url)
+
+ if force:
+ cmd_args.append("--batch")
+ if src:
+ cmd_args.append(src)
+ else:
+ cmd_args.append(plugin_name)
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
+
+ return True, cmd, out, err
+
+
+def get_plugin_bin(module, plugin_bin=None):
+ # Use the plugin_bin that was supplied first before trying other options
+ valid_plugin_bin = None
+ if plugin_bin and os.path.isfile(plugin_bin):
+ valid_plugin_bin = plugin_bin
+
+ else:
+ # Add the plugin_bin passed into the module to the top of the list of paths to test,
+ # testing for that binary name first before falling back to the default paths.
+ bin_paths = list(PLUGIN_BIN_PATHS)
+ if plugin_bin and plugin_bin not in bin_paths:
+ bin_paths.insert(0, plugin_bin)
+
+ # Get separate lists of dirs and binary names from the full paths to the
+ # plugin binaries.
+ plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
+ plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
+
+ # Check for the binary names in the default system paths as well as the path
+ # specified in the module arguments.
+ for bin_file in plugin_bins:
+ valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
+ if valid_plugin_bin:
+ break
+
+ if not valid_plugin_bin:
+ module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
+
+ return valid_plugin_bin
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
+ src=dict(default=None),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ force=dict(type='bool', default=False),
+ plugin_bin=dict(type="path"),
+ plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
+ proxy_host=dict(default=None),
+ proxy_port=dict(default=None),
+ version=dict(default=None)
+ ),
+ mutually_exclusive=[("src", "url")],
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ src = module.params["src"]
+ timeout = module.params["timeout"]
+ force = module.params["force"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ # Search provided path and system paths for valid binary
+ plugin_bin = get_plugin_bin(module, plugin_bin)
+
+ repo = parse_plugin_repo(name)
+ present = is_plugin_present(repo, plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
new file mode 100644
index 000000000..487b6feef
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/emc_vnx_sg_member.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Luca 'remix_tj' Lorenzetto <lorenzetto.luca@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: emc_vnx_sg_member
+
+short_description: Manage storage group member on EMC VNX
+
+
+description:
+ - "This module manages the members of an existing storage group."
+
+extends_documentation_fragment:
+ - community.general.emc.emc_vnx
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - Name of the Storage group to manage.
+ required: true
+ type: str
+ lunid:
+ description:
+ - Lun id to be added.
+ required: true
+ type: int
+ state:
+ description:
+ - Indicates the desired lunid state.
+ - C(present) ensures specified lunid is present in the Storage Group.
+ - C(absent) ensures specified lunid is absent from Storage Group.
+ default: present
+ choices: [ "present", "absent"]
+ type: str
+
+
+author:
+ - Luca 'remix_tj' Lorenzetto (@remixtj)
+'''
+
+EXAMPLES = '''
+- name: Add lun to storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: present
+
+- name: Remove lun from storage group
+ community.general.emc_vnx_sg_member:
+ name: sg01
+ sp_address: sp1a.fqdn
+ sp_user: sysadmin
+ sp_password: sysadmin
+ lunid: 100
+ state: absent
+'''
+
+RETURN = '''
+hluid:
+ description: LUNID that hosts attached to the storage group will see.
+ type: int
+ returned: success
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.storage.emc.emc_vnx import emc_vnx_argument_spec
+
+LIB_IMP_ERR = None
+try:
+ from storops import VNXSystem
+ from storops.exception import VNXCredentialError, VNXStorageGroupError, \
+ VNXAluAlreadyAttachedError, VNXAttachAluError, VNXDetachAluNotFoundError
+ HAS_LIB = True
+except Exception:
+ LIB_IMP_ERR = traceback.format_exc()
+ HAS_LIB = False
+
+
+def run_module():
+ module_args = dict(
+ name=dict(type='str', required=True),
+ lunid=dict(type='int', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+
+ module_args.update(emc_vnx_argument_spec)
+
+ result = dict(
+ changed=False,
+ hluid=None
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ if not HAS_LIB:
+ module.fail_json(msg=missing_required_lib('storops >= 0.5.10'),
+ exception=LIB_IMP_ERR)
+
+ sp_user = module.params['sp_user']
+ sp_address = module.params['sp_address']
+ sp_password = module.params['sp_password']
+ alu = module.params['lunid']
+
+ # if the user is working with this module in only check mode we do not
+ # want to make any changes to the environment, just return the current
+ # state with no modifications
+ if module.check_mode:
+ return result
+
+ try:
+ vnx = VNXSystem(sp_address, sp_user, sp_password)
+ sg = vnx.get_sg(module.params['name'])
+ if sg.existed:
+ if module.params['state'] == 'present':
+ if not sg.has_alu(alu):
+ try:
+ result['hluid'] = sg.attach_alu(alu)
+ result['changed'] = True
+ except VNXAluAlreadyAttachedError:
+ result['hluid'] = sg.get_hlu(alu)
+ except (VNXAttachAluError, VNXStorageGroupError) as e:
+ module.fail_json(msg='Error attaching {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ result['hluid'] = sg.get_hlu(alu)
+ if module.params['state'] == 'absent' and sg.has_alu(alu):
+ try:
+ sg.detach_alu(alu)
+ result['changed'] = True
+ except VNXDetachAluNotFoundError:
+ # being not attached when using absent is OK
+ pass
+ except VNXStorageGroupError as e:
+ module.fail_json(msg='Error detaching alu {0}: '
+ '{1} '.format(alu, to_native(e)),
+ **result)
+ else:
+ module.fail_json(msg='No such storage group named '
+ '{0}'.format(module.params['name']),
+ **result)
+ except VNXCredentialError as e:
+ module.fail_json(msg='{0}'.format(to_native(e)), **result)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/etcd3.py b/ansible_collections/community/general/plugins/modules/etcd3.py
new file mode 100644
index 000000000..9cd027406
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/etcd3.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: etcd3
+short_description: Set or delete key value pairs from an etcd3 cluster
+requirements:
+ - etcd3
+description:
+ - Sets or deletes values in etcd3 cluster using its v3 api.
+ - Needs python etcd3 lib to work
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ key:
+ type: str
+ description:
+ - the key where the information is stored in the cluster
+ required: true
+ value:
+ type: str
+ description:
+ - the information stored
+ required: true
+ host:
+ type: str
+ description:
+ - the IP address of the cluster
+ default: 'localhost'
+ port:
+ type: int
+ description:
+ - the port number used to connect to the cluster
+ default: 2379
+ state:
+ type: str
+ description:
+ - the state of the value for the key.
+ - can be present or absent
+ required: true
+ choices: [ present, absent ]
+ user:
+ type: str
+ description:
+ - The etcd user to authenticate with.
+ password:
+ type: str
+ description:
+ - The password to use for authentication.
+ - Required if I(user) is defined.
+ ca_cert:
+ type: path
+ description:
+ - The Certificate Authority to use to verify the etcd host.
+ - Required if I(client_cert) and I(client_key) are defined.
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - Required if I(client_key) is defined.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - Required if I(client_cert) is defined.
+ timeout:
+ type: int
+ description:
+ - The socket level timeout in seconds.
+author:
+ - Jean-Philippe Evrard (@evrardjp)
+ - Victor Fauth (@vfauth)
+'''
+
+EXAMPLES = """
+- name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379"
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ host: "localhost"
+ port: 2379
+ state: "present"
+
+- name: Authenticate using user/password combination with a timeout of 10 seconds
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ user: "someone"
+ password: "password123"
+ timeout: 10
+
+- name: Authenticate using TLS certificates
+ community.general.etcd3:
+ key: "foo"
+ value: "baz3"
+ state: "present"
+ ca_cert: "/etc/ssl/certs/CA_CERT.pem"
+ client_cert: "/etc/ssl/certs/cert.crt"
+ client_key: "/etc/ssl/private/key.pem"
+"""
+
+RETURN = '''
+key:
+ description: The key that was queried
+ returned: always
+ type: str
+old_value:
+ description: The previous value in the cluster
+ returned: always
+ type: str
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+try:
+ import etcd3
+ HAS_ETCD = True
+ ETCD_IMP_ERR = None
+except ImportError:
+ ETCD_IMP_ERR = traceback.format_exc()
+ HAS_ETCD = False
+
+
+def run_module():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ key=dict(type='str', required=True, no_log=False),
+ value=dict(type='str', required=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=2379),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ user=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ ca_cert=dict(type='path'),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ timeout=dict(type='int'),
+ )
+
+ # seed the result dict in the object
+ # we primarily care about changed and state
+ # change is if this module effectively modified the target
+ # state will include any data that you want your module to pass back
+ # for consumption, for example, in a subsequent task
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_together=[['client_cert', 'client_key'], ['user', 'password']],
+ )
+
+ # It is possible to set `ca_cert` to verify the server identity without
+ # setting `client_cert` or `client_key` to authenticate the client
+ # so required_together is enough
+ # Due to `required_together=[['client_cert', 'client_key']]`, checking the presence
+ # of either `client_cert` or `client_key` is enough
+ if module.params['ca_cert'] is None and module.params['client_cert'] is not None:
+ module.fail_json(msg="The 'ca_cert' parameter must be defined when 'client_cert' and 'client_key' are present.")
+
+ result['key'] = module.params.get('key')
+ module.params['cert_cert'] = module.params.pop('client_cert')
+ module.params['cert_key'] = module.params.pop('client_key')
+
+ if not HAS_ETCD:
+ module.fail_json(msg=missing_required_lib('etcd3'), exception=ETCD_IMP_ERR)
+
+ allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key',
+ 'timeout', 'user', 'password']
+ # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is
+ # the minimum supported version
+ # client_params = {key: value for key, value in module.params.items() if key in allowed_keys}
+ client_params = dict()
+ for key, value in module.params.items():
+ if key in allowed_keys:
+ client_params[key] = value
+ try:
+ etcd = etcd3.client(**client_params)
+ except Exception as exp:
+ module.fail_json(msg='Cannot connect to etcd cluster: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+ try:
+ cluster_value = etcd.get(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot reach data: %s' % (to_native(exp)),
+ exception=traceback.format_exc())
+
+ # Make the cluster_value[0] a string for string comparisons
+ result['old_value'] = to_native(cluster_value[0])
+
+ if module.params['state'] == 'absent':
+ if cluster_value[0] is not None:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.delete(module.params['key'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot delete %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ elif module.params['state'] == 'present':
+ if result['old_value'] != module.params['value']:
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ try:
+ etcd.put(module.params['key'], module.params['value'])
+ except Exception as exp:
+ module.fail_json(msg='Cannot add or edit key %s: %s' % (module.params['key'], to_native(exp)),
+ exception=traceback.format_exc())
+ else:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="State not recognized")
+
+ # manipulate or modify the state as needed (this is going to be the
+ # part where your module will do what it needs to do)
+
+ # during the execution of the module, if there is an exception or a
+ # conditional state that effectively causes a failure, run
+ # AnsibleModule.fail_json() to pass in the message and the result
+
+ # in the event of a successful module execution, you will want to
+ # simple AnsibleModule.exit_json(), passing the key/value results
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/facter.py b/ansible_collections/community/general/plugins/modules/facter.py
new file mode 100644
index 000000000..e7cf52e20
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/facter.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: facter
+short_description: Runs the discovery program I(facter) on the remote system
+description:
+ - Runs the C(facter) discovery program
+ (U(https://github.com/puppetlabs/facter)) on the remote system, returning
+ JSON data that can be useful for inventory purposes.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ arguments:
+ description:
+ - Specifies arguments for facter.
+ type: list
+ elements: str
+requirements:
+ - facter
+ - ruby-json
+author:
+ - Ansible Core Team
+ - Michael DeHaan
+'''
+
+EXAMPLES = '''
+# Example command-line invocation
+# ansible www.example.net -m facter
+
+- name: Execute facter no arguments
+ community.general.facter:
+
+- name: Execute facter with arguments
+ community.general.facter:
+ arguments:
+ - -p
+ - system_uptime
+ - timezone
+ - is_virtual
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arguments=dict(required=False, type='list', elements='str')
+ )
+ )
+
+ facter_path = module.get_bin_path(
+ 'facter',
+ opt_dirs=['/opt/puppetlabs/bin'])
+
+ cmd = [facter_path, "--json"]
+ if module.params['arguments']:
+ cmd += module.params['arguments']
+
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/filesize.py b/ansible_collections/community/general/plugins/modules/filesize.py
new file mode 100644
index 000000000..b3eb90d61
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/filesize.py
@@ -0,0 +1,492 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: filesize
+
+short_description: Create a file with a given size, or resize it if it exists
+
+description:
+ - This module is a simple wrapper around C(dd) to create, extend or truncate
+ a file, given its size. It can be used to manage swap files (that require
+ contiguous blocks) or alternatively, huge sparse files.
+
+author:
+ - quidame (@quidame)
+
+version_added: "3.0.0"
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ path:
+ description:
+ - Path of the regular file to create or resize.
+ type: path
+ required: true
+ size:
+ description:
+ - Requested size of the file.
+ - The value is a number (either C(int) or C(float)) optionally followed
+ by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or
+ C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB),
+ and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of
+ C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB);
+ C(G), C(g) or C(GiB) (= 1024MiB); and so on.
+ - If the multiplicative suffix is not provided, the value is treated as
+ an integer number of blocks of I(blocksize) bytes each (float values
+ are rounded to the closest integer).
+ - When the I(size) value is equal to the current file size, does nothing.
+ - When the I(size) value is bigger than the current file size, bytes from
+ I(source) (if I(sparse) is not C(false)) are appended to the file
+ without truncating it, in other words, without modifying the existing
+ bytes of the file.
+ - When the I(size) value is smaller than the current file size, it is
+ truncated to the requested value without modifying bytes before this
+ value.
+ - That means that a file of any arbitrary size can be grown to any other
+ arbitrary size, and then resized down to its initial size without
+ modifying its initial content.
+ type: raw
+ required: true
+ blocksize:
+ description:
+ - Size of blocks, in bytes if not followed by a multiplicative suffix.
+ - The numeric value (before the unit) C(MUST) be an integer (or a C(float)
+ if it equals an integer).
+ - If not set, the size of blocks is guessed from the OS and commonly
+ results in C(512) or C(4096) bytes, that is used internally by the
+ module or when I(size) has no unit.
+ type: raw
+ source:
+ description:
+ - Device or file that provides input data to provision the file.
+ - This parameter is ignored when I(sparse=true).
+ type: path
+ default: /dev/zero
+ force:
+ description:
+ - Whether or not to overwrite the file if it exists, in other words, to
+ truncate it from 0. When C(true), the module is not idempotent, that
+ means it always reports I(changed=true).
+ - I(force=true) and I(sparse=true) are mutually exclusive.
+ type: bool
+ default: false
+ sparse:
+ description:
+ - Whether or not the file to create should be a sparse file.
+ - This option is effective only on newly created files, or when growing a
+ file, only for the bytes to append.
+ - This option is not supported on OSes or filesystems not supporting sparse files.
+ - I(force=true) and I(sparse=true) are mutually exclusive.
+ type: bool
+ default: false
+ unsafe_writes:
+ description:
+ - This option is silently ignored. This module always modifies file
+ size in-place.
+
+requirements:
+ - dd (Data Duplicator) in PATH
+
+extends_documentation_fragment:
+ - ansible.builtin.files
+ - community.general.attributes
+
+seealso:
+ - name: dd(1) manpage for Linux
+ description: Manual page of the GNU/Linux's dd implementation (from GNU coreutils).
+ link: https://man7.org/linux/man-pages/man1/dd.1.html
+
+ - name: dd(1) manpage for IBM AIX
+ description: Manual page of the IBM AIX's dd implementation.
+ link: https://www.ibm.com/support/knowledgecenter/ssw_aix_72/d_commands/dd.html
+
+ - name: dd(1) manpage for Mac OSX
+ description: Manual page of the Mac OSX's dd implementation.
+ link: https://www.unix.com/man-page/osx/1/dd/
+
+ - name: dd(1M) manpage for Solaris
+ description: Manual page of the Oracle Solaris's dd implementation.
+ link: https://docs.oracle.com/cd/E36784_01/html/E36871/dd-1m.html
+
+ - name: dd(1) manpage for FreeBSD
+ description: Manual page of the FreeBSD's dd implementation.
+ link: https://www.freebsd.org/cgi/man.cgi?dd(1)
+
+ - name: dd(1) manpage for OpenBSD
+ description: Manual page of the OpenBSD's dd implementation.
+ link: https://man.openbsd.org/dd
+
+ - name: dd(1) manpage for NetBSD
+ description: Manual page of the NetBSD's dd implementation.
+ link: https://man.netbsd.org/dd.1
+
+ - name: busybox(1) manpage for Linux
+ description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation.
+ link: https://www.unix.com/man-page/linux/1/busybox
+'''
+
+EXAMPLES = r'''
+- name: Create a file of 1G filled with null bytes
+ community.general.filesize:
+ path: /var/bigfile
+ size: 1G
+
+- name: Extend the file to 2G (2*1024^3)
+ community.general.filesize:
+ path: /var/bigfile
+ size: 2G
+
+- name: Reduce the file to 2GB (2*1000^3)
+ community.general.filesize:
+ path: /var/bigfile
+ size: 2GB
+
+- name: Fill a file with random bytes for backing a LUKS device
+ community.general.filesize:
+ path: ~/diskimage.luks
+ size: 512.0 MiB
+ source: /dev/urandom
+
+- name: Take a backup of MBR boot code into a file, overwriting it if it exists
+ community.general.filesize:
+ path: /media/sdb1/mbr.bin
+ size: 440B
+ source: /dev/sda
+ force: true
+
+- name: Create/resize a sparse file of/to 8TB
+ community.general.filesize:
+ path: /var/local/sparsefile
+ size: 8TB
+ sparse: true
+
+- name: Create a file with specific size and attributes, to be used as swap space
+ community.general.filesize:
+ path: /var/swapfile
+ size: 2G
+ blocksize: 512B
+ mode: u=rw,go=
+ owner: root
+ group: root
+'''
+
+RETURN = r'''
+cmd:
+ description: Command executed to create or resize the file.
+ type: str
+ returned: when changed or failed
+ sample: /usr/bin/dd if=/dev/zero of=/var/swapfile bs=1048576 seek=3072 count=1024
+
+filesize:
+ description: Dictionary of sizes related to the file.
+ type: dict
+ returned: always
+ contains:
+ blocks:
+ description: Number of blocks in the file.
+ type: int
+ sample: 500
+ blocksize:
+ description: Size of the blocks in bytes.
+ type: int
+ sample: 1024
+ bytes:
+ description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize).
+ type: int
+ sample: 512000
+ iec:
+ description: Size of the file, in human-readable format, following IEC standard.
+ type: str
+ sample: 500.0 KiB
+ si:
+ description: Size of the file, in human-readable format, following SI standard.
+ type: str
+ sample: 512.0 kB
+
+size_diff:
+ description: Difference (positive or negative) between old size and new size, in bytes.
+ type: int
+ sample: -1234567890
+ returned: always
+
+path:
+ description: Realpath of the file if it is a symlink, otherwise the same than module's param.
+ type: str
+ sample: /var/swap0
+ returned: always
+'''
+
+
+import re
+import os
+import math
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+# These are the multiplicative suffixes understood (or returned) by dd and
+# others (ls, df, lvresize, lsblk...).
+SIZE_UNITS = dict(
+ B=1,
+ kB=1000**1, KB=1000**1, KiB=1024**1, K=1024**1, k=1024**1,
+ MB=1000**2, mB=1000**2, MiB=1024**2, M=1024**2, m=1024**2,
+ GB=1000**3, gB=1000**3, GiB=1024**3, G=1024**3, g=1024**3,
+ TB=1000**4, tB=1000**4, TiB=1024**4, T=1024**4, t=1024**4,
+ PB=1000**5, pB=1000**5, PiB=1024**5, P=1024**5, p=1024**5,
+ EB=1000**6, eB=1000**6, EiB=1024**6, E=1024**6, e=1024**6,
+ ZB=1000**7, zB=1000**7, ZiB=1024**7, Z=1024**7, z=1024**7,
+ YB=1000**8, yB=1000**8, YiB=1024**8, Y=1024**8, y=1024**8,
+)
+
+
+def bytes_to_human(size, iec=False):
+ """Return human-readable size (with SI or IEC suffix) from bytes. This is
+ only to populate the returned result of the module, not to handle the
+ file itself (we only rely on bytes for that).
+ """
+ unit = 'B'
+ for (u, v) in SIZE_UNITS.items():
+ if size < v:
+ continue
+ if iec:
+ if 'i' not in u or size / v >= 1024:
+ continue
+ else:
+ if v % 5 or size / v >= 1000:
+ continue
+ unit = u
+
+ hsize = round(size / SIZE_UNITS[unit], 2)
+ if unit == 'B':
+ hsize = int(hsize)
+
+ unit = re.sub(r'^(.)', lambda m: m.expand(r'\1').upper(), unit)
+ if unit == 'KB':
+ unit = 'kB'
+
+ return '%s %s' % (str(hsize), unit)
+
+
+def smart_blocksize(size, unit, product, bsize):
+ """Ensure the total size can be written as blocks*blocksize, with blocks
+ and blocksize being integers.
+ """
+ if not product % bsize:
+ return bsize
+
+ # Basically, for a file of 8kB (=8000B), system's block size of 4096 bytes
+ # is not usable. The smallest integer number of kB to work with 512B blocks
+ # is 64, the nexts are 128, 192, 256, and so on.
+
+ unit_size = SIZE_UNITS[unit]
+
+ if size == int(size):
+ if unit_size > SIZE_UNITS['MiB']:
+ if unit_size % 5:
+ return SIZE_UNITS['MiB']
+ return SIZE_UNITS['MB']
+ return unit_size
+
+ if unit == 'B':
+ raise AssertionError("byte is the smallest unit and requires an integer value")
+
+ if 0 < product < bsize:
+ return product
+
+ for bsz in (1024, 1000, 512, 256, 128, 100, 64, 32, 16, 10, 8, 4, 2):
+ if not product % bsz:
+ return bsz
+ return 1
+
+
+def split_size_unit(string, isint=False):
+ """Split a string between the size value (int or float) and the unit.
+ Support optional space(s) between the numeric value and the unit.
+ """
+ unit = re.sub(r'(\d|\.)', r'', string).strip()
+ value = float(re.sub(r'%s' % unit, r'', string).strip())
+ if isint and unit in ('B', ''):
+ if int(value) != value:
+ raise AssertionError("invalid blocksize value: bytes require an integer value")
+
+ if not unit:
+ unit = None
+ product = int(round(value))
+ else:
+ if unit not in SIZE_UNITS.keys():
+ raise AssertionError("invalid size unit (%s): unit must be one of %s, or none." %
+ (unit, ', '.join(sorted(SIZE_UNITS, key=SIZE_UNITS.get))))
+ product = int(round(value * SIZE_UNITS[unit]))
+ return value, unit, product
+
+
+def size_string(value):
+ """Convert a raw value to a string, but only if it is an integer, a float
+ or a string itself.
+ """
+ if not isinstance(value, (int, float, str)):
+ raise AssertionError("invalid value type (%s): size must be integer, float or string" % type(value))
+ return str(value)
+
+
+def size_spec(args):
+ """Return a dictionary with size specifications, especially the size in
+ bytes (after rounding it to an integer number of blocks).
+ """
+ blocksize_in_bytes = split_size_unit(args['blocksize'], True)[2]
+ if blocksize_in_bytes == 0:
+ raise AssertionError("block size cannot be equal to zero")
+
+ size_value, size_unit, size_result = split_size_unit(args['size'])
+ if not size_unit:
+ blocks = int(math.ceil(size_value))
+ else:
+ blocksize_in_bytes = smart_blocksize(size_value, size_unit, size_result, blocksize_in_bytes)
+ blocks = int(math.ceil(size_result / blocksize_in_bytes))
+
+ args['size_diff'] = round_bytes = int(blocks * blocksize_in_bytes)
+ args['size_spec'] = dict(blocks=blocks, blocksize=blocksize_in_bytes, bytes=round_bytes,
+ iec=bytes_to_human(round_bytes, True),
+ si=bytes_to_human(round_bytes))
+ return args['size_spec']
+
+
+def current_size(args):
+ """Return the size of the file at the given location if it exists, or None."""
+ path = args['path']
+ if os.path.exists(path):
+ if not os.path.isfile(path):
+ raise AssertionError("%s exists but is not a regular file" % path)
+ args['file_size'] = os.stat(path).st_size
+ else:
+ args['file_size'] = None
+ return args['file_size']
+
+
+def complete_dd_cmdline(args, dd_cmd):
+ """Compute dd options to grow or truncate a file."""
+ if args['file_size'] == args['size_spec']['bytes'] and not args['force']:
+ # Nothing to do.
+ return list()
+
+ bs = args['size_spec']['blocksize']
+
+ # For sparse files (create, truncate, grow): write count=0 block.
+ if args['sparse']:
+ seek = args['size_spec']['blocks']
+ elif args['force'] or not os.path.exists(args['path']): # Create file
+ seek = 0
+ elif args['size_diff'] < 0: # Truncate file
+ seek = args['size_spec']['blocks']
+ elif args['size_diff'] % bs: # Grow file
+ seek = int(args['file_size'] / bs) + 1
+ else:
+ seek = int(args['file_size'] / bs)
+
+ count = args['size_spec']['blocks'] - seek
+ dd_cmd += ['bs=%s' % str(bs), 'seek=%s' % str(seek), 'count=%s' % str(count)]
+
+ return dd_cmd
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ size=dict(type='raw', required=True),
+ blocksize=dict(type='raw'),
+ source=dict(type='path', default='/dev/zero'),
+ sparse=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+ args = dict(**module.params)
+ diff = dict(before=dict(), after=dict())
+
+ if args['sparse'] and args['force']:
+ module.fail_json(msg='parameters values are mutually exclusive: force=true|sparse=true')
+ if not os.path.exists(os.path.dirname(args['path'])):
+ module.fail_json(msg='parent directory of the file must exist prior to run this module')
+ if not args['blocksize']:
+ args['blocksize'] = str(os.statvfs(os.path.dirname(args['path'])).f_frsize)
+
+ try:
+ args['size'] = size_string(args['size'])
+ args['blocksize'] = size_string(args['blocksize'])
+ initial_filesize = current_size(args)
+ size_descriptors = size_spec(args)
+ except AssertionError as err:
+ module.fail_json(msg=to_native(err))
+
+ expected_filesize = size_descriptors['bytes']
+ if initial_filesize:
+ args['size_diff'] = expected_filesize - initial_filesize
+ diff['after']['size'] = expected_filesize
+ diff['before']['size'] = initial_filesize
+
+ result = dict(
+ changed=args['force'],
+ size_diff=args['size_diff'],
+ path=args['path'],
+ filesize=size_descriptors)
+
+ dd_bin = module.get_bin_path('dd', True)
+ dd_cmd = [dd_bin, 'if=%s' % args['source'], 'of=%s' % args['path']]
+
+ if expected_filesize != initial_filesize or args['force']:
+ result['cmd'] = ' '.join(complete_dd_cmdline(args, dd_cmd))
+ if module.check_mode:
+ result['changed'] = True
+ else:
+ result['rc'], dummy, result['stderr'] = module.run_command(dd_cmd)
+
+ diff['after']['size'] = result_filesize = result['size_diff'] = current_size(args)
+ if initial_filesize:
+ result['size_diff'] = result_filesize - initial_filesize
+ if not args['force']:
+ result['changed'] = result_filesize != initial_filesize
+
+ if result['rc']:
+ msg = "dd error while creating file %s with size %s from source %s: see stderr for details" % (
+ args['path'], args['size'], args['source'])
+ module.fail_json(msg=msg, **result)
+ if result_filesize != expected_filesize:
+ msg = "module error while creating file %s with size %s from source %s: file is %s bytes long" % (
+ args['path'], args['size'], args['source'], result_filesize)
+ module.fail_json(msg=msg, **result)
+
+ # dd follows symlinks, and so does this module, while file module doesn't.
+ # If we call it, this is to manage file's mode, owner and so on, not the
+ # symlink's ones.
+ file_params = dict(**module.params)
+ if os.path.islink(args['path']):
+ file_params['path'] = result['path'] = os.path.realpath(args['path'])
+
+ if args['file_size'] is not None:
+ file_args = module.load_file_common_arguments(file_params)
+ result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
+ result['diff'] = diff
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/filesystem.py b/ansible_collections/community/general/plugins/modules/filesystem.py
new file mode 100644
index 000000000..0e6b815b4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/filesystem.py
@@ -0,0 +1,606 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, quidame <quidame@poivron.org>
+# Copyright (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Alexander Bulimov (@abulimov)
+ - quidame (@quidame)
+module: filesystem
+short_description: Makes a filesystem
+description:
+ - This module creates a filesystem.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - If I(state=present), the filesystem is created if it doesn't already
+ exist, that is the default behaviour if I(state) is omitted.
+ - If I(state=absent), filesystem signatures on I(dev) are wiped if it
+ contains a filesystem (as known by C(blkid)).
+ - When I(state=absent), all other options but I(dev) are ignored, and the
+ module doesn't fail if the device I(dev) doesn't actually exist.
+ type: str
+ choices: [ present, absent ]
+ default: present
+ version_added: 1.3.0
+ fstype:
+ choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ]
+ description:
+ - Filesystem type to be created. This option is required with
+ I(state=present) (or if I(state) is omitted).
+ - ufs support has been added in community.general 3.4.0.
+ type: str
+ aliases: [type]
+ dev:
+ description:
+ - Target path to block device (Linux) or character device (FreeBSD) or
+ regular file (both).
+ - When setting Linux-specific filesystem types on FreeBSD, this module
+ only works when applying to regular files, aka disk images.
+ - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support
+ a regular file as their target I(dev).
+ - Support for character devices on FreeBSD has been added in community.general 3.4.0.
+ type: path
+ required: true
+ aliases: [device]
+ force:
+ description:
+ - If C(true), allows to create new filesystem on devices that already has filesystem.
+ type: bool
+ default: false
+ resizefs:
+ description:
+ - If C(true), if the block device and filesystem size differ, grow the filesystem into the space.
+ - Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems.
+ Attempts to resize other filesystem types will fail.
+ - XFS Will only grow if mounted. Currently, the module is based on commands
+ from C(util-linux) package to perform operations, so resizing of XFS is
+ not supported on FreeBSD systems.
+ - vFAT will likely fail if C(fatresize < 1.04).
+ type: bool
+ default: false
+ opts:
+ description:
+ - List of options to be passed to C(mkfs) command.
+ type: str
+requirements:
+ - Uses specific tools related to the I(fstype) for creating or resizing a
+ filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on).
+ - Uses generic tools mostly related to the Operating System (Linux or
+ FreeBSD) or available on both, as C(blkid).
+ - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required.
+notes:
+ - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid)
+ is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also
+ unable to detect a filesystem), this filesystem is overwritten even if
+ I(force) is C(false).
+ - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide
+ a C(blkid) command that is compatible with this module. However, these
+ packages conflict with each other, and only the C(util-linux) package
+ provides the command required to not fail when I(state=absent).
+seealso:
+ - module: community.general.filesize
+ - module: ansible.posix.mount
+'''
+
+EXAMPLES = '''
+- name: Create a ext2 filesystem on /dev/sdb1
+ community.general.filesystem:
+ fstype: ext2
+ dev: /dev/sdb1
+
+- name: Create a ext4 filesystem on /dev/sdb1 and check disk blocks
+ community.general.filesystem:
+ fstype: ext4
+ dev: /dev/sdb1
+ opts: -cc
+
+- name: Blank filesystem signature on /dev/sdb1
+ community.general.filesystem:
+ dev: /dev/sdb1
+ state: absent
+
+- name: Create a filesystem on top of a regular file
+ community.general.filesystem:
+ dev: /path/to/disk.img
+ fstype: vfat
+'''
+
+import os
+import platform
+import re
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+class Device(object):
+ def __init__(self, module, path):
+ self.module = module
+ self.path = path
+
+ def size(self):
+ """ Return size in bytes of device. Returns int """
+ statinfo = os.stat(self.path)
+ if stat.S_ISBLK(statinfo.st_mode):
+ blockdev_cmd = self.module.get_bin_path("blockdev", required=True)
+ dummy, out, dummy = self.module.run_command([blockdev_cmd, "--getsize64", self.path], check_rc=True)
+ devsize_in_bytes = int(out)
+ elif stat.S_ISCHR(statinfo.st_mode) and platform.system() == 'FreeBSD':
+ diskinfo_cmd = self.module.get_bin_path("diskinfo", required=True)
+ dummy, out, dummy = self.module.run_command([diskinfo_cmd, self.path], check_rc=True)
+ devsize_in_bytes = int(out.split()[2])
+ elif os.path.isfile(self.path):
+ devsize_in_bytes = os.path.getsize(self.path)
+ else:
+ self.module.fail_json(changed=False, msg="Target device not supported: %s" % self)
+
+ return devsize_in_bytes
+
+ def get_mountpoint(self):
+ """Return (first) mountpoint of device. Returns None when not mounted."""
+ cmd_findmnt = self.module.get_bin_path("findmnt", required=True)
+
+ # find mountpoint
+ rc, mountpoint, dummy = self.module.run_command([cmd_findmnt, "--mtab", "--noheadings", "--output",
+ "TARGET", "--source", self.path], check_rc=False)
+ if rc != 0:
+ mountpoint = None
+ else:
+ mountpoint = mountpoint.split('\n')[0]
+
+ return mountpoint
+
+ def __str__(self):
+ return self.path
+
+
+class Filesystem(object):
+
+ MKFS = None
+ MKFS_FORCE_FLAGS = []
+ INFO = None
+ GROW = None
+ GROW_MAX_SPACE_FLAGS = []
+ GROW_MOUNTPOINT_ONLY = False
+
+ LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}
+
+ def __init__(self, module):
+ self.module = module
+
+ @property
+ def fstype(self):
+ return type(self).__name__
+
+ def get_fs_size(self, dev):
+ """Return size in bytes of filesystem on device (integer).
+ Should query the info with a per-fstype command that can access the
+ device whenever it is mounted or not, and parse the command output.
+ Parser must ensure to return an integer, or raise a ValueError.
+ """
+ raise NotImplementedError()
+
+ def create(self, opts, dev):
+ if self.module.check_mode:
+ return
+
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)]
+ self.module.run_command(cmd, check_rc=True)
+
+ def wipefs(self, dev):
+ if self.module.check_mode:
+ return
+
+ # wipefs comes with util-linux package (as 'blockdev' & 'findmnt' above)
+ # that is ported to FreeBSD. The use of dd as a portable fallback is
+ # not doable here if it needs get_mountpoint() (to prevent corruption of
+ # a mounted filesystem), since 'findmnt' is not available on FreeBSD,
+ # even in util-linux port for this OS.
+ wipefs = self.module.get_bin_path('wipefs', required=True)
+ cmd = [wipefs, "--all", str(dev)]
+ self.module.run_command(cmd, check_rc=True)
+
+ def grow_cmd(self, target):
+ """Build and return the resizefs commandline as list."""
+ cmdline = [self.module.get_bin_path(self.GROW, required=True)]
+ cmdline += self.GROW_MAX_SPACE_FLAGS + [target]
+ return cmdline
+
+ def grow(self, dev):
+ """Get dev and fs size and compare. Returns stdout of used command."""
+ devsize_in_bytes = dev.size()
+
+ try:
+ fssize_in_bytes = self.get_fs_size(dev)
+ except NotImplementedError:
+ self.module.fail_json(msg="module does not support resizing %s filesystem yet" % self.fstype)
+ except ValueError as err:
+ self.module.warn("unable to process %s output '%s'" % (self.INFO, to_native(err)))
+ self.module.fail_json(msg="unable to process %s output for %s" % (self.INFO, dev))
+
+ if not fssize_in_bytes < devsize_in_bytes:
+ self.module.exit_json(changed=False, msg="%s filesystem is using the whole device %s" % (self.fstype, dev))
+ elif self.module.check_mode:
+ self.module.exit_json(changed=True, msg="resizing filesystem %s on device %s" % (self.fstype, dev))
+
+ if self.GROW_MOUNTPOINT_ONLY:
+ mountpoint = dev.get_mountpoint()
+ if not mountpoint:
+ self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype))
+ grow_target = mountpoint
+ else:
+ grow_target = str(dev)
+
+ dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True)
+ return out
+
+
+class Ext(Filesystem):
+ MKFS_FORCE_FLAGS = ['-F']
+ INFO = 'tune2fs'
+ GROW = 'resize2fs'
+
+ def get_fs_size(self, dev):
+ """Get Block count and Block size and return their product."""
+ cmd = self.module.get_bin_path(self.INFO, required=True)
+ dummy, out, dummy = self.module.run_command([cmd, '-l', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+
+ block_count = block_size = None
+ for line in out.splitlines():
+ if 'Block count:' in line:
+ block_count = int(line.split(':')[1].strip())
+ elif 'Block size:' in line:
+ block_size = int(line.split(':')[1].strip())
+ if None not in (block_size, block_count):
+ break
+ else:
+ raise ValueError(repr(out))
+
+ return block_size * block_count
+
+
+class Ext2(Ext):
+ MKFS = 'mkfs.ext2'
+
+
+class Ext3(Ext):
+ MKFS = 'mkfs.ext3'
+
+
+class Ext4(Ext):
+ MKFS = 'mkfs.ext4'
+
+
+class XFS(Filesystem):
+ MKFS = 'mkfs.xfs'
+ MKFS_FORCE_FLAGS = ['-f']
+ INFO = 'xfs_info'
+ GROW = 'xfs_growfs'
+ GROW_MOUNTPOINT_ONLY = True
+
+ def get_fs_size(self, dev):
+ """Get bsize and blocks and return their product."""
+ cmdline = [self.module.get_bin_path(self.INFO, required=True)]
+
+ # Depending on the versions, xfs_info is able to get info from the
+ # device, whenever it is mounted or not, or only if unmounted, or
+ # only if mounted, or not at all. For any version until now, it is
+ # able to query info from the mountpoint. So try it first, and use
+ # device as the last resort: it may or may not work.
+ mountpoint = dev.get_mountpoint()
+ if mountpoint:
+ cmdline += [mountpoint]
+ else:
+ cmdline += [str(dev)]
+ dummy, out, dummy = self.module.run_command(cmdline, check_rc=True, environ_update=self.LANG_ENV)
+
+ block_size = block_count = None
+ for line in out.splitlines():
+ col = line.split('=')
+ if col[0].strip() == 'data':
+ if col[1].strip() == 'bsize':
+ block_size = int(col[2].split()[0])
+ if col[2].split()[1] == 'blocks':
+ block_count = int(col[3].split(',')[0])
+ if None not in (block_size, block_count):
+ break
+ else:
+ raise ValueError(repr(out))
+
+ return block_size * block_count
+
+
+class Reiserfs(Filesystem):
+ MKFS = 'mkfs.reiserfs'
+ MKFS_FORCE_FLAGS = ['-q']
+
+
+class Btrfs(Filesystem):
+ MKFS = 'mkfs.btrfs'
+ INFO = 'btrfs'
+ GROW = 'btrfs'
+ GROW_MAX_SPACE_FLAGS = ['filesystem', 'resize', 'max']
+ GROW_MOUNTPOINT_ONLY = True
+
+ def __init__(self, module):
+ super(Btrfs, self).__init__(module)
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ dummy, stdout, stderr = self.module.run_command([mkfs, '--version'], check_rc=True)
+ match = re.search(r" v([0-9.]+)", stdout)
+ if not match:
+ # v0.20-rc1 use stderr
+ match = re.search(r" v([0-9.]+)", stderr)
+ if match:
+ # v0.20-rc1 doesn't have --force parameter added in following version v3.12
+ if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
+ self.MKFS_FORCE_FLAGS = ['-f']
+ else:
+ # assume version is greater or equal to 3.12
+ self.MKFS_FORCE_FLAGS = ['-f']
+ self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' % (stdout, stderr))
+
+ def get_fs_size(self, dev):
+ """Return size in bytes of filesystem on device (integer)."""
+ mountpoint = dev.get_mountpoint()
+ if not mountpoint:
+ self.module.fail_json(msg="%s needs to be mounted for %s operations" % (dev, self.fstype))
+
+ dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO),
+ 'filesystem', 'usage', '-b', mountpoint], check_rc=True)
+ for line in stdout.splitlines():
+ if "Device size" in line:
+ return int(line.split()[-1])
+ raise ValueError(repr(stdout))
+
+
+class Ocfs2(Filesystem):
+ MKFS = 'mkfs.ocfs2'
+ MKFS_FORCE_FLAGS = ['-Fx']
+
+
+class F2fs(Filesystem):
+ MKFS = 'mkfs.f2fs'
+ INFO = 'dump.f2fs'
+ GROW = 'resize.f2fs'
+
+ def __init__(self, module):
+ super(F2fs, self).__init__(module)
+ mkfs = self.module.get_bin_path(self.MKFS, required=True)
+ dummy, out, dummy = self.module.run_command([mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV)
+ # Looking for " F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
+ # mkfs.f2fs displays version since v1.2.0
+ match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
+ if match is not None:
+ # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
+ # before that version -f switch wasn't used
+ if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
+ self.MKFS_FORCE_FLAGS = ['-f']
+
+ def get_fs_size(self, dev):
+ """Get sector size and total FS sectors and return their product."""
+ cmd = self.module.get_bin_path(self.INFO, required=True)
+ dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ sector_size = sector_count = None
+ for line in out.splitlines():
+ if 'Info: sector size = ' in line:
+ # expected: 'Info: sector size = 512'
+ sector_size = int(line.split()[4])
+ elif 'Info: total FS sectors = ' in line:
+ # expected: 'Info: total FS sectors = 102400 (50 MB)'
+ sector_count = int(line.split()[5])
+ if None not in (sector_size, sector_count):
+ break
+ else:
+ raise ValueError(repr(out))
+
+ return sector_size * sector_count
+
+
+class VFAT(Filesystem):
+ INFO = 'fatresize'
+ GROW = 'fatresize'
+ GROW_MAX_SPACE_FLAGS = ['-s', 'max']
+
+ def __init__(self, module):
+ super(VFAT, self).__init__(module)
+ if platform.system() == 'FreeBSD':
+ self.MKFS = 'newfs_msdos'
+ else:
+ self.MKFS = 'mkfs.vfat'
+
+ def get_fs_size(self, dev):
+ """Get and return size of filesystem, in bytes."""
+ cmd = self.module.get_bin_path(self.INFO, required=True)
+ dummy, out, dummy = self.module.run_command([cmd, '--info', str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+ fssize = None
+ for line in out.splitlines()[1:]:
+ parts = line.split(':', 1)
+ if len(parts) < 2:
+ continue
+ param, value = parts
+ if param.strip() in ('Size', 'Cur size'):
+ fssize = int(value.strip())
+ break
+ else:
+ raise ValueError(repr(out))
+
+ return fssize
+
+
+class LVM(Filesystem):
+ MKFS = 'pvcreate'
+ MKFS_FORCE_FLAGS = ['-f']
+ INFO = 'pvs'
+ GROW = 'pvresize'
+
+ def get_fs_size(self, dev):
+ """Get and return PV size, in bytes."""
+ cmd = self.module.get_bin_path(self.INFO, required=True)
+ dummy, size, dummy = self.module.run_command([cmd, '--noheadings', '-o', 'pv_size', '--units', 'b', '--nosuffix', str(dev)], check_rc=True)
+ pv_size = int(size)
+ return pv_size
+
+
+class Swap(Filesystem):
+ MKFS = 'mkswap'
+ MKFS_FORCE_FLAGS = ['-f']
+
+
+class UFS(Filesystem):
+ MKFS = 'newfs'
+ INFO = 'dumpfs'
+ GROW = 'growfs'
+ GROW_MAX_SPACE_FLAGS = ['-y']
+
+ def get_fs_size(self, dev):
+ """Get providersize and fragment size and return their product."""
+ cmd = self.module.get_bin_path(self.INFO, required=True)
+ dummy, out, dummy = self.module.run_command([cmd, str(dev)], check_rc=True, environ_update=self.LANG_ENV)
+
+ fragmentsize = providersize = None
+ for line in out.splitlines():
+ if line.startswith('fsize'):
+ fragmentsize = int(line.split()[1])
+ elif 'providersize' in line:
+ providersize = int(line.split()[-1])
+ if None not in (fragmentsize, providersize):
+ break
+ else:
+ raise ValueError(repr(out))
+
+ return fragmentsize * providersize
+
+
+FILESYSTEMS = {
+ 'ext2': Ext2,
+ 'ext3': Ext3,
+ 'ext4': Ext4,
+ 'ext4dev': Ext4,
+ 'f2fs': F2fs,
+ 'reiserfs': Reiserfs,
+ 'xfs': XFS,
+ 'btrfs': Btrfs,
+ 'vfat': VFAT,
+ 'ocfs2': Ocfs2,
+ 'LVM2_member': LVM,
+ 'swap': Swap,
+ 'ufs': UFS,
+}
+
+
+def main():
+ friendly_names = {
+ 'lvm': 'LVM2_member',
+ }
+
+ fstypes = set(FILESYSTEMS.keys()) - set(friendly_names.values()) | set(friendly_names.keys())
+
+ # There is no "single command" to manipulate filesystems, so we map them all out and their options
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ fstype=dict(type='str', aliases=['type'], choices=list(fstypes)),
+ dev=dict(type='path', required=True, aliases=['device']),
+ opts=dict(type='str'),
+ force=dict(type='bool', default=False),
+ resizefs=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ('state', 'present', ['fstype'])
+ ],
+ supports_check_mode=True,
+ )
+
+ state = module.params['state']
+ dev = module.params['dev']
+ fstype = module.params['fstype']
+ opts = module.params['opts']
+ force = module.params['force']
+ resizefs = module.params['resizefs']
+
+ mkfs_opts = []
+ if opts is not None:
+ mkfs_opts = opts.split()
+
+ changed = False
+
+ if not os.path.exists(dev):
+ msg = "Device %s not found." % dev
+ if state == "present":
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(msg=msg)
+
+ dev = Device(module, dev)
+
+ # In case blkid/fstyp isn't able to identify an existing filesystem, device
+ # is considered as empty, then this existing filesystem would be overwritten
+ # even if force isn't enabled.
+ cmd = module.get_bin_path('blkid', required=True)
+ rc, raw_fs, err = module.run_command([cmd, '-c', os.devnull, '-o', 'value', '-s', 'TYPE', str(dev)])
+ fs = raw_fs.strip()
+ if not fs and platform.system() == 'FreeBSD':
+ cmd = module.get_bin_path('fstyp', required=True)
+ rc, raw_fs, err = module.run_command([cmd, str(dev)])
+ fs = raw_fs.strip()
+
+ if state == "present":
+ if fstype in friendly_names:
+ fstype = friendly_names[fstype]
+
+ try:
+ klass = FILESYSTEMS[fstype]
+ except KeyError:
+ module.fail_json(changed=False, msg="module does not support this filesystem (%s) yet." % fstype)
+
+ filesystem = klass(module)
+
+ same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype]
+ if same_fs and not resizefs and not force:
+ module.exit_json(changed=False)
+ elif same_fs and resizefs:
+ if not filesystem.GROW:
+ module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype)
+
+ out = filesystem.grow(dev)
+
+ module.exit_json(changed=True, msg=out)
+ elif fs and not force:
+ module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err)
+
+ # create fs
+ filesystem.create(mkfs_opts, dev)
+ changed = True
+
+ elif fs:
+ # wipe fs signatures
+ filesystem = Filesystem(module)
+ filesystem.wipefs(dev)
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/flatpak.py b/ansible_collections/community/general/plugins/modules/flatpak.py
new file mode 100644
index 000000000..40a13736f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/flatpak.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak
+short_description: Manage flatpaks
+description:
+ - Allows users to add or remove flatpaks.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+author:
+ - John Kwiatkoski (@JayKayy)
+ - Alexander Bethke (@oolongbrothers)
+requirements:
+ - flatpak
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: path
+ default: flatpak
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The name of the flatpak to manage. To operate on several packages this
+ can accept a list of packages.
+ - When used with I(state=present), I(name) can be specified as a URL to a
+ C(flatpakref) file or the unique reverse DNS name that identifies a flatpak.
+ - Both C(https://) and C(http://) URLs are supported.
+ - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote
+ to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit).
+ - When used with I(state=absent), it is recommended to specify the name in the reverse DNS
+ format.
+ - When supplying a URL with I(state=absent), the module will try to match the
+ installed flatpak based on the name of the flatpakref to remove it. However, there is no
+ guarantee that the names of the flatpakref file and the reverse DNS name of the installed
+ flatpak do match.
+ type: list
+ elements: str
+ required: true
+ no_dependencies:
+ description:
+ - If installing runtime dependencies should be omitted or not
+ - This parameter is primarily implemented for integration testing this module.
+ There might however be some use cases where you would want to have this, like when you are
+ packaging your own flatpaks.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ remote:
+ description:
+ - The flatpak remote (repository) to install the flatpak from.
+ - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before
+ you can use this.
+ - See the M(community.general.flatpak_remote) module for managing flatpak remotes.
+ type: str
+ default: flathub
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present ]
+ type: str
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Install the spotify flatpak
+ community.general.flatpak:
+ name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref
+ state: present
+
+- name: Install the gedit flatpak package without dependencies (not recommended)
+ community.general.flatpak:
+ name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref
+ state: present
+ no_dependencies: true
+
+- name: Install the gedit package from flathub for current user
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: present
+ method: user
+
+- name: Install the Gnome Calendar flatpak from the gnome remote system-wide
+ community.general.flatpak:
+ name: org.gnome.Calendar
+ state: present
+ remote: gnome
+
+- name: Install multiple packages
+ community.general.flatpak:
+ name:
+ - org.gimp.GIMP
+ - org.inkscape.Inkscape
+ - org.mozilla.firefox
+
+- name: Remove the gedit flatpak
+ community.general.flatpak:
+ name: org.gnome.gedit
+ state: absent
+
+- name: Remove multiple packages
+ community.general.flatpak:
+ name:
+ - org.gimp.GIMP
+ - org.inkscape.Inkscape
+ - org.mozilla.firefox
+ state: absent
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n"
+'''
+
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE = "Unknown option --columns=application"
+
+
+def install_flat(module, binary, remote, names, method, no_dependencies):
+ """Add new flatpaks."""
+ global result # pylint: disable=global-variable-not-assigned
+ uri_names = []
+ id_names = []
+ for name in names:
+ if name.startswith('http://') or name.startswith('https://'):
+ uri_names.append(name)
+ else:
+ id_names.append(name)
+ base_command = [binary, "install", "--{0}".format(method)]
+ flatpak_version = _flatpak_version(module, binary)
+ if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
+ base_command += ["-y"]
+ else:
+ base_command += ["--noninteractive"]
+ if no_dependencies:
+ base_command += ["--no-deps"]
+ if uri_names:
+ command = base_command + uri_names
+ _flatpak_command(module, module.check_mode, command)
+ if id_names:
+ command = base_command + [remote] + id_names
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def uninstall_flat(module, binary, names, method):
+ """Remove existing flatpaks."""
+ global result # pylint: disable=global-variable-not-assigned
+ installed_flat_names = [
+ _match_installed_flat_name(module, binary, name, method)
+ for name in names
+ ]
+ command = [binary, "uninstall"]
+ flatpak_version = _flatpak_version(module, binary)
+ if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
+ command += ["-y"]
+ else:
+ command += ["--noninteractive"]
+ command += ["--{0}".format(method)] + installed_flat_names
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def flatpak_exists(module, binary, names, method):
+ """Check if the flatpaks are installed."""
+ command = [binary, "list", "--{0}".format(method)]
+ output = _flatpak_command(module, False, command)
+ installed = []
+ not_installed = []
+ for name in names:
+ parsed_name = _parse_flatpak_name(name).lower()
+ if parsed_name in output.lower():
+ installed.append(name)
+ else:
+ not_installed.append(name)
+ return installed, not_installed
+
+
+def _match_installed_flat_name(module, binary, name, method):
+ # This is a difficult function, since if the user supplies a flatpakref url,
+ # we have to rely on a naming convention:
+ # The flatpakref file name needs to match the flatpak name
+ global result # pylint: disable=global-variable-not-assigned
+ parsed_name = _parse_flatpak_name(name)
+ # Try running flatpak list with columns feature
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ _flatpak_command(module, False, command, ignore_failure=True)
+ if result['rc'] != 0 and OUTDATED_FLATPAK_VERSION_ERROR_MESSAGE in result['stderr']:
+ # Probably flatpak before 1.2
+ matched_flatpak_name = \
+ _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method)
+ else:
+ # Probably flatpak >= 1.2
+ matched_flatpak_name = \
+ _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method)
+
+ if matched_flatpak_name:
+ return matched_flatpak_name
+ else:
+ result['msg'] = "Flatpak removal failed: Could not match any installed flatpaks to " +\
+ "the name `{0}`. ".format(_parse_flatpak_name(name)) +\
+ "If you used a URL, try using the reverse DNS name of the flatpak"
+ module.fail_json(**result)
+
+
+def _match_flat_using_outdated_flatpak_format(module, binary, parsed_name, method):
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "list", "--{0}".format(method), "--app", "--columns=application"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() == row.lower():
+ return row
+
+
+def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method):
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "list", "--{0}".format(method), "--app"]
+ output = _flatpak_command(module, False, command)
+ for row in output.split('\n'):
+ if parsed_name.lower() in row.lower():
+ return row.split()[0]
+
+
+def _parse_flatpak_name(name):
+ if name.startswith('http://') or name.startswith('https://'):
+ file_name = urlparse(name).path.split('/')[-1]
+ file_name_without_extension = file_name.split('.')[0:-1]
+ common_name = ".".join(file_name_without_extension)
+ else:
+ common_name = name
+ return common_name
+
+
+def _flatpak_version(module, binary):
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "--version"]
+ output = _flatpak_command(module, False, command)
+ version_number = output.split()[1]
+ return version_number
+
+
+def _flatpak_command(module, noop, command, ignore_failure=False):
+ global result # pylint: disable=global-variable-not-assigned
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=not ignore_failure
+ )
+ return result['stdout']
+
+
+def main():
+ # This module supports check mode
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ remote=dict(type='str', default='flathub'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default='present',
+ choices=['absent', 'present']),
+ no_dependencies=dict(type='bool', default=False),
+ executable=dict(type='path', default='flatpak')
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ remote = module.params['remote']
+ no_dependencies = module.params['no_dependencies']
+ method = module.params['method']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ installed, not_installed = flatpak_exists(module, binary, name, method)
+ if state == 'present' and not_installed:
+ install_flat(module, binary, remote, not_installed, method, no_dependencies)
+ elif state == 'absent' and installed:
+ uninstall_flat(module, binary, installed, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/flatpak_remote.py b/ansible_collections/community/general/plugins/modules/flatpak_remote.py
new file mode 100644
index 000000000..9c097c411
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/flatpak_remote.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 John Kwiatkoski (@JayKayy) <jkwiat40@gmail.com>
+# Copyright (c) 2018 Alexander Bethke (@oolongbrothers) <oolongbrothers@gmx.net>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: flatpak_remote
+short_description: Manage flatpak repository remotes
+description:
+ - Allows users to add or remove flatpak remotes.
+ - The flatpak remotes concept is comparable to what is called repositories in other packaging
+ formats.
+ - Currently, remote addition is only supported via I(flatpakrepo) file URLs.
+ - Existing remotes will not be updated.
+ - See the M(community.general.flatpak) module for managing flatpaks.
+author:
+ - John Kwiatkoski (@JayKayy)
+ - Alexander Bethke (@oolongbrothers)
+requirements:
+ - flatpak
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ executable:
+ description:
+ - The path to the C(flatpak) executable to use.
+ - By default, this module looks for the C(flatpak) executable on the path.
+ type: str
+ default: flatpak
+ flatpakrepo_url:
+ description:
+ - The URL to the I(flatpakrepo) file representing the repository remote to add.
+ - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url)
+ is added using the specified installation C(method).
+ - When used with I(state=absent), this is not required.
+ - Required when I(state=present).
+ type: str
+ method:
+ description:
+ - The installation method to use.
+ - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system)
+ or only for the current C(user).
+ type: str
+ choices: [ system, user ]
+ default: system
+ name:
+ description:
+ - The desired name for the flatpak remote to be registered under on the managed host.
+ - When used with I(state=present), the remote will be added to the managed host under
+ the specified I(name).
+ - When used with I(state=absent) the remote with that name will be removed.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates the desired package state.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ enabled:
+ description:
+ - Indicates whether this remote is enabled.
+ type: bool
+ default: true
+ version_added: 6.4.0
+'''
+
+EXAMPLES = r'''
+- name: Add the Gnome flatpak remote to the system installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: present
+ flatpakrepo_url: https://sdk.gnome.org/gnome-apps.flatpakrepo
+
+- name: Add the flathub flatpak repository remote to the user installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ flatpakrepo_url: https://dl.flathub.org/repo/flathub.flatpakrepo
+ method: user
+
+- name: Remove the Gnome flatpak remote from the user installation
+ community.general.flatpak_remote:
+ name: gnome
+ state: absent
+ method: user
+
+- name: Remove the flathub remote from the system installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: absent
+
+- name: Disable the flathub remote in the system installation
+ community.general.flatpak_remote:
+ name: flathub
+ state: present
+ enabled: false
+'''
+
+RETURN = r'''
+command:
+ description: The exact flatpak command that was executed
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo"
+msg:
+ description: Module error message
+ returned: failure
+ type: str
+ sample: "Executable '/usr/local/bin/flatpak' was not found on the system."
+rc:
+ description: Return code from flatpak binary
+ returned: When a flatpak command has been executed
+ type: int
+ sample: 0
+stderr:
+ description: Error output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n"
+stdout:
+ description: Output from flatpak binary
+ returned: When a flatpak command has been executed
+ type: str
+ sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+
+def add_remote(module, binary, name, flatpakrepo_url, method):
+ """Add a new remote."""
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "remote-add", "--{0}".format(method), name, flatpakrepo_url]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remove_remote(module, binary, name, method):
+ """Remove an existing remote."""
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "remote-delete", "--{0}".format(method), "--force", name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_exists(module, binary, name, method):
+ """Check if the remote exists."""
+ command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)]
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return True
+ return False
+
+
+def enable_remote(module, binary, name, method):
+ """Enable a remote."""
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "remote-modify", "--enable", "--{0}".format(method), name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def disable_remote(module, binary, name, method):
+ """Disable a remote."""
+ global result # pylint: disable=global-variable-not-assigned
+ command = [binary, "remote-modify", "--disable", "--{0}".format(method), name]
+ _flatpak_command(module, module.check_mode, command)
+ result['changed'] = True
+
+
+def remote_enabled(module, binary, name, method):
+ """Check if the remote is enabled."""
+ command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)]
+ # The query operation for the remote needs to be run even in check mode
+ output = _flatpak_command(module, False, command)
+ for line in output.splitlines():
+ listed_remote = line.split()
+ if len(listed_remote) == 0:
+ continue
+ if listed_remote[0] == to_native(name):
+ return len(listed_remote) == 1 or "disabled" not in listed_remote[1].split(",")
+ return False
+
+
+def _flatpak_command(module, noop, command):
+ global result # pylint: disable=global-variable-not-assigned
+ result['command'] = ' '.join(command)
+ if noop:
+ result['rc'] = 0
+ return ""
+
+ result['rc'], result['stdout'], result['stderr'] = module.run_command(
+ command, check_rc=True
+ )
+ return result['stdout']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ flatpakrepo_url=dict(type='str'),
+ method=dict(type='str', default='system',
+ choices=['user', 'system']),
+ state=dict(type='str', default="present",
+ choices=['absent', 'present']),
+ enabled=dict(type='bool', default=True),
+ executable=dict(type='str', default="flatpak")
+ ),
+ # This module supports check mode
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ flatpakrepo_url = module.params['flatpakrepo_url']
+ method = module.params['method']
+ state = module.params['state']
+ enabled = module.params['enabled']
+ executable = module.params['executable']
+ binary = module.get_bin_path(executable, None)
+
+ if flatpakrepo_url is None:
+ flatpakrepo_url = ''
+
+ global result
+ result = dict(
+ changed=False
+ )
+
+ # If the binary was not found, fail the operation
+ if not binary:
+ module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result)
+
+ remote_already_exists = remote_exists(module, binary, to_bytes(name), method)
+
+ if state == 'present' and not remote_already_exists:
+ add_remote(module, binary, name, flatpakrepo_url, method)
+ elif state == 'absent' and remote_already_exists:
+ remove_remote(module, binary, name, method)
+
+ if state == 'present':
+ remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method)
+
+ if enabled and not remote_already_enabled:
+ enable_remote(module, binary, name, method)
+ if not enabled and remote_already_enabled:
+ disable_remote(module, binary, name, method)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/flowdock.py b/ansible_collections/community/general/plugins/modules/flowdock.py
new file mode 100644
index 000000000..c78716ba4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/flowdock.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: flowdock
+author: "Matt Coddington (@mcodd)"
+short_description: Send a message to a flowdock
+description:
+ - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ type:
+ type: str
+ description:
+ - Whether to post to 'inbox' or 'chat'
+ required: true
+ choices: [ "inbox", "chat" ]
+ msg:
+ type: str
+ description:
+ - Content of the message
+ required: true
+ tags:
+ type: str
+ description:
+ - tags of the message, separated by commas
+ required: false
+ external_user_name:
+ type: str
+ description:
+ - (chat only - required) Name of the "user" sending the message
+ required: false
+ from_address:
+ type: str
+ description:
+ - (inbox only - required) Email address of the message sender
+ required: false
+ source:
+ type: str
+ description:
+ - (inbox only - required) Human readable identifier of the application that uses the Flowdock API
+ required: false
+ subject:
+ type: str
+ description:
+ - (inbox only - required) Subject line of the message
+ required: false
+ from_name:
+ type: str
+ description:
+ - (inbox only) Name of the message sender
+ required: false
+ reply_to:
+ type: str
+ description:
+ - (inbox only) Email address for replies
+ required: false
+ project:
+ type: str
+ description:
+ - (inbox only) Human readable identifier for more detailed message categorization
+ required: false
+ link:
+ type: str
+ description:
+ - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
+ required: false
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+ type: bool
+
+requirements: [ ]
+'''
+
+EXAMPLES = '''
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: inbox
+ token: AAAAAA
+ from_address: user@example.com
+ source: my cool app
+ msg: test from ansible
+ subject: test subject
+
+- name: Send a message to a flowdock
+ community.general.flowdock:
+ type: chat
+ token: AAAAAA
+ external_user_name: testuser
+ msg: test from ansible
+ tags: tag1,tag2,tag3
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ type=dict(required=True, choices=["inbox", "chat"]),
+ external_user_name=dict(required=False),
+ from_address=dict(required=False),
+ source=dict(required=False),
+ subject=dict(required=False),
+ from_name=dict(required=False),
+ reply_to=dict(required=False),
+ project=dict(required=False),
+ tags=dict(required=False),
+ link=dict(required=False),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ type = module.params["type"]
+ token = module.params["token"]
+ if type == 'inbox':
+ url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token)
+ else:
+ url = "https://api.flowdock.com/v1/messages/chat/%s" % (token)
+
+ params = {}
+
+ # required params
+ params['content'] = module.params["msg"]
+
+ # required params for the 'chat' type
+ if module.params['external_user_name']:
+ if type == 'inbox':
+ module.fail_json(msg="external_user_name is not valid for the 'inbox' type")
+ else:
+ params['external_user_name'] = module.params["external_user_name"]
+ elif type == 'chat':
+ module.fail_json(msg="external_user_name is required for the 'chat' type")
+
+ # required params for the 'inbox' type
+ for item in ['from_address', 'source', 'subject']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+ elif type == 'inbox':
+ module.fail_json(msg="%s is required for the 'inbox' type" % item)
+
+ # optional params
+ if module.params["tags"]:
+ params['tags'] = module.params["tags"]
+
+ # optional params for the 'inbox' type
+ for item in ['from_name', 'reply_to', 'project', 'link']:
+ if module.params[item]:
+ if type == 'chat':
+ module.fail_json(msg="%s is not valid for the 'chat' type" % item)
+ else:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ # Send the data to Flowdock
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
+
+ module.exit_json(changed=True, msg=module.params["msg"])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gandi_livedns.py b/ansible_collections/community/general/plugins/modules/gandi_livedns.py
new file mode 100644
index 000000000..cc9dd630b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gandi_livedns.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gandi_livedns
+author:
+ - Gregory Thiemonge (@gthiemonge)
+version_added: "2.3.0"
+short_description: Manage Gandi LiveDNS records
+description:
+ - "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_key:
+ description:
+ - Account API token.
+ type: str
+ required: true
+ record:
+ description:
+ - Record to add.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether the record(s) should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ ttl:
+ description:
+ - The TTL to give the new record.
+ - Required when I(state=present).
+ type: int
+ type:
+ description:
+ - The type of DNS record to create.
+ type: str
+ required: true
+ values:
+ description:
+ - The record values.
+ - Required when I(state=present).
+ type: list
+ elements: str
+ domain:
+ description:
+ - The name of the Domain to work with (for example, "example.com").
+ required: true
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Create a test A record to point to 127.0.0.1 in the my.com domain
+ community.general.gandi_livedns:
+ domain: my.com
+ record: test
+ type: A
+ values:
+ - 127.0.0.1
+ ttl: 7200
+ api_key: dummyapitoken
+ register: record
+
+- name: Create a mail CNAME record to www.my.com domain
+ community.general.gandi_livedns:
+ domain: my.com
+ type: CNAME
+ record: mail
+ values:
+ - www
+ ttl: 7200
+ api_key: dummyapitoken
+ state: present
+
+- name: Change its TTL
+ community.general.gandi_livedns:
+ domain: my.com
+ type: CNAME
+ record: mail
+ values:
+ - www
+ ttl: 10800
+ api_key: dummyapitoken
+ state: present
+
+- name: Delete the record
+ community.general.gandi_livedns:
+ domain: my.com
+ type: CNAME
+ record: mail
+ api_key: dummyapitoken
+ state: absent
+'''
+
+RETURN = r'''
+record:
+ description: A dictionary containing the record data.
+ returned: success, except on record deletion
+ type: dict
+ contains:
+ values:
+ description: The record content (details depend on record type).
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - 192.0.2.91
+ - 192.0.2.92
+ record:
+ description: The record name.
+ returned: success
+ type: str
+ sample: www
+ ttl:
+ description: The time-to-live for the record.
+ returned: success
+ type: int
+ sample: 300
+ type:
+ description: The record type.
+ returned: success
+ type: str
+ sample: A
+ domain:
+ description: The domain associated with the record.
+ returned: success
+ type: str
+ sample: my.com
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.gandi_livedns_api import GandiLiveDNSAPI
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ record=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ttl=dict(type='int'),
+ type=dict(type='str', required=True),
+ values=dict(type='list', elements='str'),
+ domain=dict(type='str', required=True),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['values', 'ttl']),
+ ],
+ )
+
+ gandi_api = GandiLiveDNSAPI(module)
+
+ if module.params['state'] == 'present':
+ ret, changed = gandi_api.ensure_dns_record(module.params['record'],
+ module.params['type'],
+ module.params['ttl'],
+ module.params['values'],
+ module.params['domain'])
+ else:
+ ret, changed = gandi_api.delete_dns_record(module.params['record'],
+ module.params['type'],
+ module.params['values'],
+ module.params['domain'])
+
+ result = dict(
+ changed=changed,
+ )
+ if ret:
+ result['record'] = gandi_api.build_result(ret,
+ module.params['domain'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gconftool2.py b/ansible_collections/community/general/plugins/modules/gconftool2.py
new file mode 100644
index 000000000..949e92b30
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gconftool2.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Kenneth D. Evensen <kevensen@redhat.com>
+# Copyright (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gconftool2
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Edit GNOME Configurations
+description:
+ - This module allows for the manipulation of GNOME 2 Configuration via
+ gconftool-2. Please see the gconftool-2(1) man pages for more details.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ key:
+ type: str
+ description:
+ - A GConf preference key is an element in the GConf repository
+ that corresponds to an application preference. See man gconftool-2(1).
+ required: true
+ value:
+ type: str
+ description:
+ - Preference keys typically have simple values such as strings,
+ integers, or lists of strings and integers. This is ignored if the state
+ is "get". See man gconftool-2(1).
+ value_type:
+ type: str
+ description:
+ - The type of value being set. This is ignored if the state is "get".
+ choices: [ bool, float, int, string ]
+ state:
+ type: str
+ description:
+ - The action to take upon the key/value.
+ - State C(get) is deprecated and will be removed in community.general 8.0.0. Please use the module M(community.general.gconftool2_info) instead.
+ required: true
+ choices: [ absent, get, present ]
+ config_source:
+ type: str
+ description:
+ - Specify a configuration source to use rather than the default path.
+ See man gconftool-2(1).
+ direct:
+ description:
+ - Access the config database directly, bypassing server. If direct is
+ specified then the config_source must be specified as well.
+ See man gconftool-2(1).
+ type: bool
+ default: false
+'''
+
+EXAMPLES = """
+- name: Change the widget font to "Serif 12"
+ community.general.gconftool2:
+ key: "/desktop/gnome/interface/font_name"
+ value_type: "string"
+ value: "Serif 12"
+"""
+
+RETURN = '''
+ key:
+ description: The key specified in the module parameters
+ returned: success
+ type: str
+ sample: /desktop/gnome/interface/font_name
+ value_type:
+ description: The type of the value that was changed
+ returned: success
+ type: str
+ sample: string
+ value:
+ description: The value of the preference key after executing the module
+ returned: success
+ type: str
+ sample: "Serif 12"
+...
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner
+
+
+class GConftool(StateModuleHelper):
+ change_params = ('value', )
+ diff_params = ('value', )
+ output_params = ('key', 'value_type')
+ facts_params = ('key', 'value_type')
+ facts_name = 'gconftool2'
+ module = dict(
+ argument_spec=dict(
+ key=dict(type='str', required=True, no_log=False),
+ value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
+ value=dict(type='str'),
+ state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
+ direct=dict(type='bool', default=False),
+ config_source=dict(type='str'),
+ ),
+ required_if=[
+ ('state', 'present', ['value', 'value_type']),
+ ('state', 'absent', ['value']),
+ ('direct', True, ['config_source']),
+ ],
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.runner = gconftool2_runner(self.module, check_rc=True)
+ if self.vars.state != "get":
+ if not self.vars.direct and self.vars.config_source is not None:
+ self.module.fail_json(msg='If the "config_source" is specified then "direct" must be "true"')
+
+ self.vars.set('previous_value', self._get(), fact=True)
+ self.vars.set('value_type', self.vars.value_type)
+ self.vars.set_meta('value', initial_value=self.vars.previous_value)
+ self.vars.set('playbook_value', self.vars.value, fact=True)
+
+ def _make_process(self, fail_on_err):
+ def process(rc, out, err):
+ if err and fail_on_err:
+ self.ansible.fail_json(msg='gconftool-2 failed with error: %s' % (str(err)))
+ self.vars.value = out.rstrip()
+ return self.vars.value
+ return process
+
+ def _get(self):
+ return self.runner("state key", output_process=self._make_process(False)).run(state="get")
+
+ def state_get(self):
+ self.deprecate(
+ msg="State 'get' is deprecated. Please use the module community.general.gconftool2_info instead",
+ version="8.0.0", collection_name="community.general"
+ )
+
+ def state_absent(self):
+ with self.runner("state key", output_process=self._make_process(False)) as ctx:
+ ctx.run()
+ self.vars.set('new_value', None, fact=True)
+
+ def state_present(self):
+ with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx:
+ self.vars.set('new_value', ctx.run(), fact=True)
+
+
+def main():
+ GConftool.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gconftool2_info.py b/ansible_collections/community/general/plugins/modules/gconftool2_info.py
new file mode 100644
index 000000000..282065b95
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gconftool2_info.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gconftool2_info
+author:
+ - "Alexei Znamensky (@russoz)"
+short_description: Retrieve GConf configurations
+version_added: 5.1.0
+description:
+ - This module allows retrieving application preferences from the GConf database, with the help of C(gconftool-2).
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ key:
+ description:
+ - The key name for an element in the GConf database.
+ type: str
+ required: true
+notes:
+ - See man gconftool-2(1) for more details.
+seealso:
+ - name: gconf repository (archived)
+ description: Git repository for the project. It is an archived project, so the repository is read-only.
+ link: https://gitlab.gnome.org/Archive/gconf
+'''
+
+EXAMPLES = """
+- name: Get value for a certain key in the database.
+ community.general.gconftool2_info:
+ key: /desktop/gnome/background/picture_filename
+ register: result
+"""
+
+RETURN = '''
+ value:
+ description:
+ - The value of the property.
+ returned: success
+ type: str
+ sample: Monospace 10
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner
+
+
+class GConftoolInfo(ModuleHelper):
+ output_params = ['key']
+ module = dict(
+ argument_spec=dict(
+ key=dict(type='str', required=True, no_log=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.runner = gconftool2_runner(self.module, check_rc=True)
+
+ def __run__(self):
+ with self.runner.context(args_order=["state", "key"]) as ctx:
+ rc, out, err = ctx.run(state="get")
+ self.vars.value = None if err and not out else out.rstrip()
+
+
+def main():
+ GConftoolInfo.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gem.py b/ansible_collections/community/general/plugins/modules/gem.py
new file mode 100644
index 000000000..4bc99d39e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gem.py
@@ -0,0 +1,353 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gem
+short_description: Manage Ruby gems
+description:
+ - Manage installation and uninstallation of Ruby gems.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The name of the gem to be managed.
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the gem. C(latest) ensures that the latest version is installed.
+ required: false
+ choices: [present, absent, latest]
+ default: present
+ gem_source:
+ type: path
+ description:
+ - The path to a local gem used as installation source.
+ required: false
+ include_dependencies:
+ description:
+ - Whether to include dependencies or not.
+ required: false
+ type: bool
+ default: true
+ repository:
+ type: str
+ description:
+ - The repository from which the gem will be installed
+ required: false
+ aliases: [source]
+ user_install:
+ description:
+ - Install gem in user's local gems cache or for all users
+ required: false
+ type: bool
+ default: true
+ executable:
+ type: path
+ description:
+ - Override the path to the gem executable
+ required: false
+ install_dir:
+ type: path
+ description:
+ - Install the gems into a specific directory.
+ These gems will be independent from the global installed ones.
+ Specifying this requires user_install to be false.
+ required: false
+ bindir:
+ type: path
+ description:
+ - Install executables into a specific directory.
+ version_added: 3.3.0
+ norc:
+ type: bool
+ default: true
+ description:
+ - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2.
+ - The default changed from C(false) to C(true) in community.general 6.0.0.
+ version_added: 3.3.0
+ env_shebang:
+ description:
+ - Rewrite the shebang line on installed scripts to use /usr/bin/env.
+ required: false
+ default: false
+ type: bool
+ version:
+ type: str
+ description:
+ - Version of the gem to be installed/removed.
+ required: false
+ pre_release:
+ description:
+ - Allow installation of pre-release versions of the gem.
+ required: false
+ default: false
+ type: bool
+ include_doc:
+ description:
+ - Install with or without docs.
+ required: false
+ default: false
+ type: bool
+ build_flags:
+ type: str
+ description:
+ - Allow adding build flags for gem compilation
+ required: false
+ force:
+ description:
+ - Force gem to (un-)install, bypassing dependency checks.
+ required: false
+ default: false
+ type: bool
+author:
+ - "Ansible Core Team"
+ - "Johan Wiren (@johanwiren)"
+'''
+
+EXAMPLES = '''
+- name: Install version 1.0 of vagrant
+ community.general.gem:
+ name: vagrant
+ version: 1.0
+ state: present
+
+- name: Install latest available version of rake
+ community.general.gem:
+ name: rake
+ state: latest
+
+- name: Install rake version 1.0 from a local gem on disk
+ community.general.gem:
+ name: rake
+ gem_source: /path/to/gems/rake-1.0.gem
+ state: present
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_rubygems_path(module):
+ if module.params['executable']:
+ result = module.params['executable'].split(' ')
+ else:
+ result = [module.get_bin_path('gem', True)]
+ return result
+
+
+def get_rubygems_version(module):
+ if hasattr(get_rubygems_version, "ver"):
+ return get_rubygems_version.ver
+
+ cmd = get_rubygems_path(module) + ['--version']
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+
+ match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
+ if not match:
+ return None
+
+ ver = tuple(int(x) for x in match.groups())
+ get_rubygems_version.ver = ver
+
+ return ver
+
+
+def get_rubygems_environ(module):
+ if module.params['install_dir']:
+ return {'GEM_HOME': module.params['install_dir']}
+ return None
+
+
+def get_installed_versions(module, remote=False):
+
+ cmd = get_rubygems_path(module)
+ cmd.append('query')
+ cmd.extend(common_opts(module))
+ if remote:
+ cmd.append('--remote')
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ cmd.append('-n')
+ cmd.append('^%s$' % module.params['name'])
+
+ environ = get_rubygems_environ(module)
+ (rc, out, err) = module.run_command(cmd, environ_update=environ, check_rc=True)
+ installed_versions = []
+ for line in out.splitlines():
+ match = re.match(r"\S+\s+\((?:default: )?(.+)\)", line)
+ if match:
+ versions = match.group(1)
+ for version in versions.split(', '):
+ installed_versions.append(version.split()[0])
+ return installed_versions
+
+
+def exists(module):
+ if module.params['state'] == 'latest':
+ remoteversions = get_installed_versions(module, remote=True)
+ if remoteversions:
+ module.params['version'] = remoteversions[0]
+ installed_versions = get_installed_versions(module)
+ if module.params['version']:
+ if module.params['version'] in installed_versions:
+ return True
+ else:
+ if installed_versions:
+ return True
+ return False
+
+
+def common_opts(module):
+ opts = []
+ ver = get_rubygems_version(module)
+ if module.params['norc'] and ver and ver >= (2, 5, 2):
+ opts.append('--norc')
+ return opts
+
+
+def uninstall(module):
+
+ if module.check_mode:
+ return
+ cmd = get_rubygems_path(module)
+ environ = get_rubygems_environ(module)
+ cmd.append('uninstall')
+ cmd.extend(common_opts(module))
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+
+ if module.params['bindir']:
+ cmd.extend(['--bindir', module.params['bindir']])
+
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ else:
+ cmd.append('--all')
+ cmd.append('--executable')
+ if module.params['force']:
+ cmd.append('--force')
+ cmd.append(module.params['name'])
+ module.run_command(cmd, environ_update=environ, check_rc=True)
+
+
+def install(module):
+
+ if module.check_mode:
+ return
+
+ ver = get_rubygems_version(module)
+
+ cmd = get_rubygems_path(module)
+ cmd.append('install')
+ cmd.extend(common_opts(module))
+ if module.params['version']:
+ cmd.extend(['--version', module.params['version']])
+ if module.params['repository']:
+ cmd.extend(['--source', module.params['repository']])
+ if not module.params['include_dependencies']:
+ cmd.append('--ignore-dependencies')
+ else:
+ if ver and ver < (2, 0, 0):
+ cmd.append('--include-dependencies')
+ if module.params['user_install']:
+ cmd.append('--user-install')
+ else:
+ cmd.append('--no-user-install')
+ if module.params['install_dir']:
+ cmd.extend(['--install-dir', module.params['install_dir']])
+ if module.params['bindir']:
+ cmd.extend(['--bindir', module.params['bindir']])
+ if module.params['pre_release']:
+ cmd.append('--pre')
+ if not module.params['include_doc']:
+ if ver and ver < (2, 0, 0):
+ cmd.append('--no-rdoc')
+ cmd.append('--no-ri')
+ else:
+ cmd.append('--no-document')
+ if module.params['env_shebang']:
+ cmd.append('--env-shebang')
+ cmd.append(module.params['gem_source'])
+ if module.params['build_flags']:
+ cmd.extend(['--', module.params['build_flags']])
+ if module.params['force']:
+ cmd.append('--force')
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ executable=dict(required=False, type='path'),
+ gem_source=dict(required=False, type='path'),
+ include_dependencies=dict(required=False, default=True, type='bool'),
+ name=dict(required=True, type='str'),
+ repository=dict(required=False, aliases=['source'], type='str'),
+ state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'),
+ user_install=dict(required=False, default=True, type='bool'),
+ install_dir=dict(required=False, type='path'),
+ bindir=dict(type='path'),
+ norc=dict(type='bool', default=True),
+ pre_release=dict(required=False, default=False, type='bool'),
+ include_doc=dict(required=False, default=False, type='bool'),
+ env_shebang=dict(required=False, default=False, type='bool'),
+ version=dict(required=False, type='str'),
+ build_flags=dict(required=False, type='str'),
+ force=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']],
+ )
+
+ if module.params['version'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot specify version when state=latest")
+ if module.params['gem_source'] and module.params['state'] == 'latest':
+ module.fail_json(msg="Cannot maintain state=latest when installing from local source")
+ if module.params['user_install'] and module.params['install_dir']:
+ module.fail_json(msg="install_dir requires user_install=false")
+
+ if not module.params['gem_source']:
+ module.params['gem_source'] = module.params['name']
+
+ changed = False
+
+ if module.params['state'] in ['present', 'latest']:
+ if not exists(module):
+ install(module)
+ changed = True
+ elif module.params['state'] == 'absent':
+ if exists(module):
+ uninstall(module)
+ changed = True
+
+ result = {}
+ result['name'] = module.params['name']
+ result['state'] = module.params['state']
+ if module.params['version']:
+ result['version'] = module.params['version']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/git_config.py b/ansible_collections/community/general/plugins/modules/git_config.py
new file mode 100644
index 000000000..d67312174
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/git_config.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Marius Gedminas <marius@pov.lt>
+# Copyright (c) 2016, Matthew Gamble <git@matthewgamble.net>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: git_config
+author:
+ - Matthew Gamble (@djmattyg007)
+ - Marius Gedminas (@mgedmin)
+requirements: ['git']
+short_description: Read and write git configuration
+description:
+ - The C(git_config) module changes git configuration by invoking 'git config'.
+ This is needed if you do not want to use M(ansible.builtin.template) for the entire git
+ config file (for example because you need to change just C(user.email) in
+ /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or
+ do not work correctly in check mode.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ list_all:
+ description:
+ - List all settings (optionally limited to a given I(scope)).
+ type: bool
+ default: false
+ name:
+ description:
+ - The name of the setting. If no value is supplied, the value will
+ be read from the config if it has been set.
+ type: str
+ repo:
+ description:
+ - Path to a git repository for reading and writing values from a
+ specific repo.
+ type: path
+ file:
+ description:
+ - Path to an adhoc git configuration file to be managed using the C(file) scope.
+ type: path
+ version_added: 2.0.0
+ scope:
+ description:
+ - Specify which scope to read/set values from.
+ - This is required when setting config values.
+ - If this is set to C(local), you must also specify the C(repo) parameter.
+ - If this is set to C(file), you must also specify the C(file) parameter.
+ - It defaults to system only when not using I(list_all)=C(true).
+ choices: [ "file", "local", "global", "system" ]
+ type: str
+ state:
+ description:
+ - "Indicates the setting should be set/unset.
+ This parameter has higher precedence than I(value) parameter:
+ when I(state)=absent and I(value) is defined, I(value) is discarded."
+ choices: [ 'present', 'absent' ]
+ default: 'present'
+ type: str
+ value:
+ description:
+ - When specifying the name of a single setting, supply a value to
+ set that setting to the given value.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ value: commit
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: alias.st
+ scope: global
+ value: status
+
+- name: Remove a setting from ~/.gitconfig
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+ state: absent
+
+- name: Add a setting to ~/.gitconfig
+ community.general.git_config:
+ name: core.editor
+ scope: global
+ value: vim
+
+- name: Add a setting system-wide
+ community.general.git_config:
+ name: alias.remotev
+ scope: system
+ value: remote -v
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: alias.diffc
+ value: diff --cached
+
+- name: Add a setting to a system scope (default)
+ community.general.git_config:
+ name: color.ui
+ value: auto
+
+- name: Make etckeeper not complaining when it is invoked by cron
+ community.general.git_config:
+ name: user.email
+ repo: /etc
+ scope: local
+ value: 'root@{{ ansible_fqdn }}'
+
+- name: Read individual values from git config
+ community.general.git_config:
+ name: alias.ci
+ scope: global
+
+- name: Scope system is also assumed when reading values, unless list_all=true
+ community.general.git_config:
+ name: alias.diffc
+
+- name: Read all values from git config
+ community.general.git_config:
+ list_all: true
+ scope: global
+
+- name: When list_all is yes and no scope is specified, you get configuration from all scopes
+ community.general.git_config:
+ list_all: true
+
+- name: Specify a repository to include local settings
+ community.general.git_config:
+ list_all: true
+ repo: /path/to/repo.git
+'''
+
+RETURN = '''
+---
+config_value:
+ description: When I(list_all=false) and value is not set, a string containing the value of the setting in name
+ returned: success
+ type: str
+ sample: "vim"
+
+config_values:
+ description: When I(list_all=true), a dict containing key/value pairs of multiple configuration settings
+ returned: success
+ type: dict
+ sample:
+ core.editor: "vim"
+ color.ui: "auto"
+ alias.diffc: "diff --cached"
+ alias.remotev: "remote -v"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ list_all=dict(required=False, type='bool', default=False),
+ name=dict(type='str'),
+ repo=dict(type='path'),
+ file=dict(type='path'),
+ scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']),
+ state=dict(required=False, type='str', default='present', choices=['present', 'absent']),
+ value=dict(required=False),
+ ),
+ mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']],
+ required_if=[
+ ('scope', 'local', ['repo']),
+ ('scope', 'file', ['file'])
+ ],
+ required_one_of=[['list_all', 'name']],
+ supports_check_mode=True,
+ )
+ git_path = module.get_bin_path('git', True)
+
+ params = module.params
+ # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
+ # Set the locale to C to ensure consistent messages.
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if params['name']:
+ name = params['name']
+ else:
+ name = None
+
+ if params['scope']:
+ scope = params['scope']
+ elif params['list_all']:
+ scope = None
+ else:
+ scope = 'system'
+
+ if params['state'] == 'absent':
+ unset = 'unset'
+ params['value'] = None
+ else:
+ unset = None
+
+ if params['value']:
+ new_value = params['value']
+ else:
+ new_value = None
+
+ args = [git_path, "config", "--includes"]
+ if params['list_all']:
+ args.append('-l')
+ if scope == 'file':
+ args.append('-f')
+ args.append(params['file'])
+ elif scope:
+ args.append("--" + scope)
+ if name:
+ args.append(name)
+
+ if scope == 'local':
+ dir = params['repo']
+ elif params['list_all'] and params['repo']:
+ # Include local settings from a specific repo when listing all available settings
+ dir = params['repo']
+ else:
+ # Run from root directory to avoid accidentally picking up any local config settings
+ dir = "/"
+
+ (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False)
+ if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
+ # This just means nothing has been set at the given scope
+ module.exit_json(changed=False, msg='', config_values={})
+ elif rc >= 2:
+ # If the return code is 1, it just means the option hasn't been set yet, which is fine.
+ module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
+
+ if params['list_all']:
+ values = out.rstrip().splitlines()
+ config_values = {}
+ for value in values:
+ k, v = value.split('=', 1)
+ config_values[k] = v
+ module.exit_json(changed=False, msg='', config_values=config_values)
+ elif not new_value and not unset:
+ module.exit_json(changed=False, msg='', config_value=out.rstrip())
+ elif unset and not out:
+ module.exit_json(changed=False, msg='no setting to unset')
+ else:
+ old_value = out.rstrip()
+ if old_value == new_value:
+ module.exit_json(changed=False, msg="")
+
+ if not module.check_mode:
+ if unset:
+ args.insert(len(args) - 1, "--" + unset)
+ cmd = args
+ else:
+ cmd = args + [new_value]
+ (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False)
+ if err:
+ module.fail_json(rc=rc, msg=err, cmd=cmd)
+
+ module.exit_json(
+ msg='setting changed',
+ diff=dict(
+ before_header=' '.join(args),
+ before=old_value + "\n",
+ after_header=' '.join(args),
+ after=(new_value or '') + "\n"
+ ),
+ changed=True
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_deploy_key.py b/ansible_collections/community/general/plugins/modules/github_deploy_key.py
new file mode 100644
index 000000000..322650bf7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_deploy_key.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_deploy_key
+author: "Ali (@bincyber)"
+short_description: Manages deploy keys for GitHub repositories
+description:
+ - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password,
+ username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin
+ rights on the repository are required."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ github_url:
+ description:
+ - The base URL of the GitHub API
+ required: false
+ type: str
+ version_added: '0.2.0'
+ default: https://api.github.com
+ owner:
+ description:
+ - The name of the individual account or organization that owns the GitHub repository.
+ required: true
+ aliases: [ 'account', 'organization' ]
+ type: str
+ repo:
+ description:
+ - The name of the GitHub repository.
+ required: true
+ aliases: [ 'repository' ]
+ type: str
+ name:
+ description:
+ - The name for the deploy key.
+ required: true
+ aliases: [ 'title', 'label' ]
+ type: str
+ key:
+ description:
+ - The SSH public key to add to the repository as a deploy key.
+ required: true
+ type: str
+ read_only:
+ description:
+ - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write.
+ type: bool
+ default: true
+ state:
+ description:
+ - The state of the deploy key.
+ default: "present"
+ choices: [ "present", "absent" ]
+ type: str
+ force:
+ description:
+ - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title.
+ type: bool
+ default: false
+ username:
+ description:
+ - The username to authenticate with. Should not be set when using personal access token
+ type: str
+ password:
+ description:
+ - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination.
+ type: str
+ token:
+ description:
+ - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password).
+ type: str
+ otp:
+ description:
+ - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password).
+ type: int
+notes:
+ - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/."
+'''
+
+EXAMPLES = '''
+- name: Add a new read-only deploy key to a GitHub repository using basic authentication
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: true
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Remove an existing deploy key from a GitHub repository
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ force: true
+ username: "johndoe"
+ password: "supersecretpassword"
+ state: absent
+
+- name: Add a new deploy key to a GitHub repository, replace an existing key, use an OAuth2 token to authenticate
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "new-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ force: true
+ token: "ABAQDAwXxn7kIMNWzcDfo..."
+
+- name: Re-add a deploy key to a GitHub repository but with a different name
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repository: "example"
+ name: "replace-deploy-key"
+ key: "{{ lookup('file', '~/.ssh/github.pub') }}"
+ username: "johndoe"
+ password: "supersecretpassword"
+
+- name: Add a new deploy key to a GitHub repository using 2FA
+ community.general.github_deploy_key:
+ owner: "johndoe"
+ repo: "example"
+ name: "new-deploy-key-2"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ username: "johndoe"
+ password: "supersecretpassword"
+ otp: 123456
+
+- name: Add a read-only deploy key to a repository hosted on GitHub Enterprise
+ community.general.github_deploy_key:
+ github_url: "https://api.example.com"
+ owner: "janedoe"
+ repo: "example"
+ name: "new-deploy-key"
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAwXxn7kIMNWzcDfou..."
+ read_only: true
+ username: "janedoe"
+ password: "supersecretpassword"
+'''
+
+RETURN = '''
+msg:
+ description: the status message describing what occurred
+ returned: always
+ type: str
+ sample: "Deploy key added successfully"
+
+http_status_code:
+ description: the HTTP status code returned by the GitHub API
+ returned: failed
+ type: int
+ sample: 400
+
+error:
+ description: the error message returned by the GitHub API
+ returned: failed
+ type: str
+ sample: "key is already in use"
+
+id:
+ description: the key identifier assigned by GitHub for the deploy key
+ returned: changed
+ type: int
+ sample: 24381901
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from re import findall
+
+
+class GithubDeployKey(object):
+ def __init__(self, module):
+ self.module = module
+
+ self.github_url = self.module.params['github_url']
+ self.name = module.params['name']
+ self.key = module.params['key']
+ self.state = module.params['state']
+ self.read_only = module.params.get('read_only', True)
+ self.force = module.params.get('force', False)
+ self.username = module.params.get('username', None)
+ self.password = module.params.get('password', None)
+ self.token = module.params.get('token', None)
+ self.otp = module.params.get('otp', None)
+
+ @property
+ def url(self):
+ owner = self.module.params['owner']
+ repo = self.module.params['repo']
+ return "{0}/repos/{1}/{2}/keys".format(self.github_url, owner, repo)
+
+ @property
+ def headers(self):
+ if self.username is not None and self.password is not None:
+ self.module.params['url_username'] = self.username
+ self.module.params['url_password'] = self.password
+ self.module.params['force_basic_auth'] = True
+ if self.otp is not None:
+ return {"X-GitHub-OTP": self.otp}
+ elif self.token is not None:
+ return {"Authorization": "token {0}".format(self.token)}
+ else:
+ return None
+
+ def paginate(self, url):
+ while url:
+ resp, info = fetch_url(self.module, url, headers=self.headers, method="GET")
+
+ if info["status"] == 200:
+ yield self.module.from_json(resp.read())
+
+ links = {}
+ for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]):
+ links[y] = x
+
+ url = links.get('next')
+ else:
+ self.handle_error(method="GET", info=info)
+
+ def get_existing_key(self):
+ for keys in self.paginate(self.url):
+ if keys:
+ for i in keys:
+ existing_key_id = str(i["id"])
+ if i["key"].split() == self.key.split()[:2]:
+ return existing_key_id
+ elif i['title'] == self.name and self.force:
+ return existing_key_id
+ else:
+ return None
+
+ def add_new_key(self):
+ request_body = {"title": self.name, "key": self.key, "read_only": self.read_only}
+
+ resp, info = fetch_url(self.module, self.url, data=self.module.jsonify(request_body), headers=self.headers, method="POST", timeout=30)
+
+ status_code = info["status"]
+
+ if status_code == 201:
+ response_body = self.module.from_json(resp.read())
+ key_id = response_body["id"]
+ self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id)
+ elif status_code == 422:
+ self.module.exit_json(changed=False, msg="Deploy key already exists")
+ else:
+ self.handle_error(method="POST", info=info)
+
+ def remove_existing_key(self, key_id):
+ resp, info = fetch_url(self.module, "{0}/{1}".format(self.url, key_id), headers=self.headers, method="DELETE")
+
+ status_code = info["status"]
+
+ if status_code == 204:
+ if self.state == 'absent':
+ self.module.exit_json(changed=True, msg="Deploy key successfully deleted", id=key_id)
+ else:
+ self.handle_error(method="DELETE", info=info, key_id=key_id)
+
+ def handle_error(self, method, info, key_id=None):
+ status_code = info['status']
+ body = info.get('body')
+ if body:
+ err = self.module.from_json(body)['message']
+
+ if status_code == 401:
+ self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err)
+ elif status_code == 404:
+ self.module.fail_json(msg="GitHub repository does not exist", http_status_code=status_code, error=err)
+ else:
+ if method == "GET":
+ self.module.fail_json(msg="Failed to retrieve existing deploy keys", http_status_code=status_code, error=err)
+ elif method == "POST":
+ self.module.fail_json(msg="Failed to add deploy key", http_status_code=status_code, error=err)
+ elif method == "DELETE":
+ self.module.fail_json(msg="Failed to delete existing deploy key", id=key_id, http_status_code=status_code, error=err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ github_url=dict(required=False, type='str', default="https://api.github.com"),
+ owner=dict(required=True, type='str', aliases=['account', 'organization']),
+ repo=dict(required=True, type='str', aliases=['repository']),
+ name=dict(required=True, type='str', aliases=['title', 'label']),
+ key=dict(required=True, type='str', no_log=False),
+ read_only=dict(required=False, type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ force=dict(required=False, type='bool', default=False),
+ username=dict(required=False, type='str'),
+ password=dict(required=False, type='str', no_log=True),
+ otp=dict(required=False, type='int', no_log=True),
+ token=dict(required=False, type='str', no_log=True)
+ ),
+ mutually_exclusive=[
+ ['password', 'token']
+ ],
+ required_together=[
+ ['username', 'password'],
+ ['otp', 'username', 'password']
+ ],
+ required_one_of=[
+ ['username', 'token']
+ ],
+ supports_check_mode=True,
+ )
+
+ deploy_key = GithubDeployKey(module)
+
+ if module.check_mode:
+ key_id = deploy_key.get_existing_key()
+ if deploy_key.state == "present" and key_id is None:
+ module.exit_json(changed=True)
+ elif deploy_key.state == "present" and key_id is not None:
+ module.exit_json(changed=False)
+
+ # to forcefully modify an existing key, the existing key must be deleted first
+ if deploy_key.state == 'absent' or deploy_key.force:
+ key_id = deploy_key.get_existing_key()
+
+ if key_id is not None:
+ deploy_key.remove_existing_key(key_id)
+ elif deploy_key.state == 'absent':
+ module.exit_json(changed=False, msg="Deploy key does not exist")
+
+ if deploy_key.state == "present":
+ deploy_key.add_new_key()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_issue.py b/ansible_collections/community/general/plugins/modules/github_issue.py
new file mode 100644
index 000000000..4e10e9f92
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_issue.py
@@ -0,0 +1,124 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-18, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_issue
+short_description: View GitHub issue
+description:
+ - View GitHub issue for a given repository and organization.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ repo:
+ description:
+ - Name of repository from which issue needs to be retrieved.
+ required: true
+ type: str
+ organization:
+ description:
+ - Name of the GitHub organization in which the repository is hosted.
+ required: true
+ type: str
+ issue:
+ description:
+ - Issue number for which information is required.
+ required: true
+ type: int
+ action:
+ description:
+ - Get various details about issue depending upon action specified.
+ default: 'get_status'
+ choices:
+ - 'get_status'
+ type: str
+author:
+ - Abhijeet Kasurde (@Akasurde)
+'''
+
+RETURN = '''
+issue_status:
+ description: State of the GitHub issue
+ type: str
+ returned: success
+ sample: open, closed
+'''
+
+EXAMPLES = '''
+- name: Check if GitHub issue is closed or not
+ community.general.github_issue:
+ organization: ansible
+ repo: ansible
+ issue: 23642
+ action: get_status
+ register: r
+
+- name: Take action depending upon issue status
+ ansible.builtin.debug:
+ msg: Do something when issue 23642 is open
+ when: r.issue_status == 'open'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ organization=dict(required=True),
+ repo=dict(required=True),
+ issue=dict(type='int', required=True),
+ action=dict(choices=['get_status'], default='get_status'),
+ ),
+ supports_check_mode=True,
+ )
+
+ organization = module.params['organization']
+ repo = module.params['repo']
+ issue = module.params['issue']
+ action = module.params['action']
+
+ result = dict()
+
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+
+ url = "https://api.github.com/repos/%s/%s/issues/%s" % (organization, repo, issue)
+
+ response, info = fetch_url(module, url, headers=headers)
+ if not (200 <= info['status'] < 400):
+ if info['status'] == 404:
+ module.fail_json(msg="Failed to find issue %s" % issue)
+ module.fail_json(msg="Failed to send request to %s: %s" % (url, info['msg']))
+
+ gh_obj = json.loads(response.read())
+
+ if action == 'get_status' or action is None:
+ if module.check_mode:
+ result.update(changed=True)
+ else:
+ result.update(changed=True, issue_status=gh_obj['state'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_key.py b/ansible_collections/community/general/plugins/modules/github_key.py
new file mode 100644
index 000000000..683a963a7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_key.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: github_key
+short_description: Manage GitHub access keys
+description:
+ - Creates, removes, or updates GitHub access keys.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ description:
+ - GitHub Access Token with permission to list and create public keys.
+ required: true
+ type: str
+ name:
+ description:
+ - SSH key name
+ required: true
+ type: str
+ pubkey:
+ description:
+ - SSH public key value. Required when I(state=present).
+ type: str
+ state:
+ description:
+ - Whether to remove a key, ensure that it exists, or update its value.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ force:
+ description:
+ - The default is C(true), which will replace the existing remote key
+ if it's different than C(pubkey). If C(false), the key will only be
+ set if no key with the given I(name) exists.
+ type: bool
+ default: true
+
+author: Robert Estelle (@erydo)
+'''
+
+RETURN = '''
+deleted_keys:
+ description: An array of key objects that were deleted. Only present on state=absent
+ type: list
+ returned: When state=absent
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}]
+matching_keys:
+ description: An array of keys matching the specified name. Only present on state=present
+ type: list
+ returned: When state=present
+ sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}]
+key:
+ description: Metadata about the key just created. Only present on state=present
+ type: dict
+ returned: success
+ sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}
+'''
+
+EXAMPLES = '''
+- name: Read SSH public key to authorize
+ ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub
+ register: ssh_pub_key
+
+- name: Authorize key with GitHub
+ local_action:
+ module: github_key
+ name: Access Key for Some Machine
+ token: '{{ github_access_token }}'
+ pubkey: '{{ ssh_pub_key.stdout }}'
+'''
+
+
+import json
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+API_BASE = 'https://api.github.com'
+
+
+class GitHubResponse(object):
+ def __init__(self, response, info):
+ self.content = response.read()
+ self.info = info
+
+ def json(self):
+ return json.loads(self.content)
+
+ def links(self):
+ links = {}
+ if 'link' in self.info:
+ link_header = self.info['link']
+ matches = re.findall('<([^>]+)>; rel="([^"]+)"', link_header)
+ for url, rel in matches:
+ links[rel] = url
+ return links
+
+
+class GitHubSession(object):
+ def __init__(self, module, token):
+ self.module = module
+ self.token = token
+
+ def request(self, method, url, data=None):
+ headers = {
+ 'Authorization': 'token %s' % self.token,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/vnd.github.v3+json',
+ }
+ response, info = fetch_url(
+ self.module, url, method=method, data=data, headers=headers)
+ if not (200 <= info['status'] < 400):
+ self.module.fail_json(
+ msg=(" failed to send request %s to %s: %s"
+ % (method, url, info['msg'])))
+ return GitHubResponse(response, info)
+
+
+def get_all_keys(session):
+ url = API_BASE + '/user/keys'
+ result = []
+ while url:
+ r = session.request('GET', url)
+ result.extend(r.json())
+ url = r.links().get('next')
+ return result
+
+
+def create_key(session, name, pubkey, check_mode):
+ if check_mode:
+ from datetime import datetime
+ now = datetime.utcnow()
+ return {
+ 'id': 0,
+ 'key': pubkey,
+ 'title': name,
+ 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY',
+ 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'),
+ 'read_only': False,
+ 'verified': False
+ }
+ else:
+ return session.request(
+ 'POST',
+ API_BASE + '/user/keys',
+ data=json.dumps({'title': name, 'key': pubkey})).json()
+
+
+def delete_keys(session, to_delete, check_mode):
+ if check_mode:
+ return
+
+ for key in to_delete:
+ session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"])
+
+
+def ensure_key_absent(session, name, check_mode):
+ to_delete = [key for key in get_all_keys(session) if key['title'] == name]
+ delete_keys(session, to_delete, check_mode=check_mode)
+
+ return {'changed': bool(to_delete),
+ 'deleted_keys': to_delete}
+
+
+def ensure_key_present(module, session, name, pubkey, force, check_mode):
+ all_keys = get_all_keys(session)
+ matching_keys = [k for k in all_keys if k['title'] == name]
+ deleted_keys = []
+
+ new_signature = pubkey.split(' ')[1]
+ for key in all_keys:
+ existing_signature = key['key'].split(' ')[1]
+ if new_signature == existing_signature and key['title'] != name:
+ module.fail_json(msg=(
+ "another key with the same content is already registered "
+ "under the name |{0}|").format(key['title']))
+
+ if matching_keys and force and matching_keys[0]['key'].split(' ')[1] != new_signature:
+ delete_keys(session, matching_keys, check_mode=check_mode)
+ (deleted_keys, matching_keys) = (matching_keys, [])
+
+ if not matching_keys:
+ key = create_key(session, name, pubkey, check_mode=check_mode)
+ else:
+ key = matching_keys[0]
+
+ return {
+ 'changed': bool(deleted_keys or not matching_keys),
+ 'deleted_keys': deleted_keys,
+ 'matching_keys': matching_keys,
+ 'key': key
+ }
+
+
+def main():
+ argument_spec = {
+ 'token': {'required': True, 'no_log': True},
+ 'name': {'required': True},
+ 'pubkey': {},
+ 'state': {'choices': ['present', 'absent'], 'default': 'present'},
+ 'force': {'default': True, 'type': 'bool'},
+ }
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ token = module.params['token']
+ name = module.params['name']
+ state = module.params['state']
+ force = module.params['force']
+ pubkey = module.params.get('pubkey')
+
+ if pubkey:
+ pubkey_parts = pubkey.split(' ')
+ # Keys consist of a protocol, the key data, and an optional comment.
+ if len(pubkey_parts) < 2:
+ module.fail_json(msg='"pubkey" parameter has an invalid format')
+ elif state == 'present':
+ module.fail_json(msg='"pubkey" is required when state=present')
+
+ session = GitHubSession(module, token)
+ if state == 'present':
+ result = ensure_key_present(module, session, name, pubkey, force=force,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_key_absent(session, name, check_mode=module.check_mode)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_release.py b/ansible_collections/community/general/plugins/modules/github_release.py
new file mode 100644
index 000000000..3ddd6c882
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_release.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Team
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: github_release
+short_description: Interact with GitHub Releases
+description:
+ - Fetch metadata about GitHub Releases
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ description:
+ - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password).
+ type: str
+ user:
+ description:
+ - The GitHub account that owns the repository
+ type: str
+ required: true
+ password:
+ description:
+ - The GitHub account password for the user. Mutually exclusive with C(token).
+ type: str
+ repo:
+ description:
+ - Repository name
+ type: str
+ required: true
+ action:
+ description:
+ - Action to perform
+ type: str
+ required: true
+ choices: [ 'latest_release', 'create_release' ]
+ tag:
+ description:
+ - Tag name when creating a release. Required when using action is set to C(create_release).
+ type: str
+ target:
+ description:
+ - Target of release when creating a release
+ type: str
+ name:
+ description:
+ - Name of release when creating a release
+ type: str
+ body:
+ description:
+ - Description of the release when creating a release
+ type: str
+ draft:
+ description:
+ - Sets if the release is a draft or not. (boolean)
+ type: bool
+ default: false
+ prerelease:
+ description:
+ - Sets if the release is a prerelease or not. (boolean)
+ type: bool
+ default: false
+
+author:
+ - "Adrian Moisey (@adrianmoisey)"
+requirements:
+ - "github3.py >= 1.0.0a3"
+'''
+
+EXAMPLES = '''
+- name: Get latest release of a public repository
+ community.general.github_release:
+ user: ansible
+ repo: ansible
+ action: latest_release
+
+- name: Get latest release of testuseer/testrepo
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: latest_release
+
+- name: Get latest release of test repo using username and password. Ansible 2.4.
+ community.general.github_release:
+ user: testuser
+ password: secret123
+ repo: testrepo
+ action: latest_release
+
+- name: Create a new release
+ community.general.github_release:
+ token: tokenabc1234567890
+ user: testuser
+ repo: testrepo
+ action: create_release
+ tag: test
+ target: master
+ name: My Release
+ body: Some description
+
+'''
+
+RETURN = '''
+tag:
+ description: Version of the created/latest release.
+ type: str
+ returned: success
+ sample: 1.1.0
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github3
+
+ HAS_GITHUB_API = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(required=True),
+ user=dict(required=True),
+ password=dict(no_log=True),
+ token=dict(no_log=True),
+ action=dict(
+ required=True, choices=['latest_release', 'create_release']),
+ tag=dict(type='str'),
+ target=dict(type='str'),
+ name=dict(type='str'),
+ body=dict(type='str'),
+ draft=dict(type='bool', default=False),
+ prerelease=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('password', 'token'),),
+ required_if=[('action', 'create_release', ['tag']),
+ ('action', 'create_release', ['password', 'token'], True)],
+ )
+
+ if not HAS_GITHUB_API:
+ module.fail_json(msg=missing_required_lib('github3.py >= 1.0.0a3'),
+ exception=GITHUB_IMP_ERR)
+
+ repo = module.params['repo']
+ user = module.params['user']
+ password = module.params['password']
+ login_token = module.params['token']
+ action = module.params['action']
+ tag = module.params.get('tag')
+ target = module.params.get('target')
+ name = module.params.get('name')
+ body = module.params.get('body')
+ draft = module.params.get('draft')
+ prerelease = module.params.get('prerelease')
+
+ # login to github
+ try:
+ if password:
+ gh_obj = github3.login(user, password=password)
+ elif login_token:
+ gh_obj = github3.login(token=login_token)
+ else:
+ gh_obj = github3.GitHub()
+
+ # test if we're actually logged in
+ if password or login_token:
+ gh_obj.me()
+ except github3.exceptions.AuthenticationFailed as e:
+ module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e),
+ details="Please check username and password or token "
+ "for repository %s" % repo)
+
+ repository = gh_obj.repository(user, repo)
+
+ if not repository:
+ module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))
+
+ if action == 'latest_release':
+ release = repository.latest_release()
+ if release:
+ module.exit_json(tag=release.tag_name)
+ else:
+ module.exit_json(tag=None)
+
+ if action == 'create_release':
+ release_exists = repository.release_from_tag(tag)
+ if release_exists:
+ module.exit_json(changed=False, msg="Release for tag %s already exists." % tag)
+
+ release = repository.create_release(
+ tag, target, name, body, draft, prerelease)
+ if release:
+ module.exit_json(changed=True, tag=release.tag_name)
+ else:
+ module.exit_json(changed=False, tag=None)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_repo.py b/ansible_collections/community/general/plugins/modules/github_repo.py
new file mode 100644
index 000000000..97076c58a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_repo.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Álvaro Torres Cogollo
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_repo
+short_description: Manage your repositories on Github
+version_added: 2.2.0
+description:
+ - Manages Github repositories using PyGithub library.
+ - Authentication can be done with I(access_token) or with I(username) and I(password).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ username:
+ description:
+ - Username used for authentication.
+ - This is only needed when not using I(access_token).
+ type: str
+ required: false
+ password:
+ description:
+ - Password used for authentication.
+ - This is only needed when not using I(access_token).
+ type: str
+ required: false
+ access_token:
+ description:
+ - Token parameter for authentication.
+ - This is only needed when not using I(username) and I(password).
+ type: str
+ required: false
+ name:
+ description:
+ - Repository name.
+ type: str
+ required: true
+ description:
+ description:
+ - Description for the repository.
+ - Defaults to empty if I(force_defaults=true), which is the default in this module.
+ - Defaults to empty if I(force_defaults=false) when creating a new repository.
+ - This is only used when I(state) is C(present).
+ type: str
+ required: false
+ private:
+ description:
+ - Whether the repository should be private or not.
+ - Defaults to C(false) if I(force_defaults=true), which is the default in this module.
+ - Defaults to C(false) if I(force_defaults=false) when creating a new repository.
+ - This is only used when I(state) is C(present).
+ type: bool
+ required: false
+ state:
+ description:
+ - Whether the repository should exist or not.
+ type: str
+ default: present
+ choices: [ absent, present ]
+ required: false
+ organization:
+ description:
+ - Organization for the repository.
+ - When I(state) is C(present), the repository will be created in the current user profile.
+ type: str
+ required: false
+ api_url:
+ description:
+ - URL to the GitHub API if not using github.com but you own instance.
+ type: str
+ default: 'https://api.github.com'
+ version_added: "3.5.0"
+ force_defaults:
+ description:
+ - Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default.
+ - The default for this option will be deprecated in a future version of this collection, and eventually change to C(false).
+ type: bool
+ default: true
+ required: false
+ version_added: 4.1.0
+requirements:
+- PyGithub>=1.54
+notes:
+- For Python 3, PyGithub>=1.54 should be used.
+- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)."
+- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)."
+author:
+- Álvaro Torres Cogollo (@atorrescogollo)
+'''
+
+EXAMPLES = '''
+- name: Create a Github repository
+ community.general.github_repo:
+ access_token: mytoken
+ organization: MyOrganization
+ name: myrepo
+ description: "Just for fun"
+ private: true
+ state: present
+ force_defaults: false
+ register: result
+
+- name: Delete the repository
+ community.general.github_repo:
+ username: octocat
+ password: password
+ organization: MyOrganization
+ name: myrepo
+ state: absent
+ register: result
+'''
+
+RETURN = '''
+repo:
+ description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository).
+ returned: success and I(state) is C(present)
+ type: dict
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+GITHUB_IMP_ERR = None
+try:
+ from github import Github, GithubException, GithubObject
+ from github.GithubException import UnknownObjectException
+ HAS_GITHUB_PACKAGE = True
+except Exception:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB_PACKAGE = False
+
+
+def authenticate(username=None, password=None, access_token=None, api_url=None):
+ if not api_url:
+ return None
+
+ if access_token:
+ return Github(base_url=api_url, login_or_token=access_token)
+ else:
+ return Github(base_url=api_url, login_or_token=username, password=password)
+
+
+def create_repo(gh, name, organization=None, private=None, description=None, check_mode=False):
+ result = dict(
+ changed=False,
+ repo=dict())
+ if organization:
+ target = gh.get_organization(organization)
+ else:
+ target = gh.get_user()
+
+ repo = None
+ try:
+ repo = target.get_repo(name=name)
+ result['repo'] = repo.raw_data
+ except UnknownObjectException:
+ if not check_mode:
+ repo = target.create_repo(
+ name=name,
+ private=GithubObject.NotSet if private is None else private,
+ description=GithubObject.NotSet if description is None else description,
+ )
+ result['repo'] = repo.raw_data
+
+ result['changed'] = True
+
+ changes = {}
+ if private is not None:
+ if repo is None or repo.raw_data['private'] != private:
+ changes['private'] = private
+ if description is not None:
+ if repo is None or repo.raw_data['description'] not in (description, description or None):
+ changes['description'] = description
+
+ if changes:
+ if not check_mode:
+ repo.edit(**changes)
+
+ result['repo'].update({
+ 'private': repo._private.value if not check_mode else private,
+ 'description': repo._description.value if not check_mode else description,
+ })
+ result['changed'] = True
+
+ return result
+
+
+def delete_repo(gh, name, organization=None, check_mode=False):
+ result = dict(changed=False)
+ if organization:
+ target = gh.get_organization(organization)
+ else:
+ target = gh.get_user()
+ try:
+ repo = target.get_repo(name=name)
+ if not check_mode:
+ repo.delete()
+ result['changed'] = True
+ except UnknownObjectException:
+ pass
+
+ return result
+
+
+def run_module(params, check_mode=False):
+ if params['force_defaults']:
+ params['description'] = params['description'] or ''
+ params['private'] = params['private'] or False
+
+ gh = authenticate(
+ username=params['username'], password=params['password'], access_token=params['access_token'],
+ api_url=params['api_url'])
+ if params['state'] == "absent":
+ return delete_repo(
+ gh=gh,
+ name=params['name'],
+ organization=params['organization'],
+ check_mode=check_mode
+ )
+ else:
+ return create_repo(
+ gh=gh,
+ name=params['name'],
+ organization=params['organization'],
+ private=params['private'],
+ description=params['description'],
+ check_mode=check_mode
+ )
+
+
+def main():
+ module_args = dict(
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ access_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=False, default="present",
+ choices=["present", "absent"]),
+ organization=dict(type='str', required=False, default=None),
+ private=dict(type='bool'),
+ description=dict(type='str'),
+ api_url=dict(type='str', required=False, default='https://api.github.com'),
+ force_defaults=dict(type='bool', default=True),
+ )
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_together=[('username', 'password')],
+ required_one_of=[('username', 'access_token')],
+ mutually_exclusive=[('username', 'access_token')]
+ )
+
+ if not HAS_GITHUB_PACKAGE:
+ module.fail_json(msg=missing_required_lib(
+ "PyGithub"), exception=GITHUB_IMP_ERR)
+
+ try:
+ result = run_module(module.params, module.check_mode)
+ module.exit_json(**result)
+ except GithubException as e:
+ module.fail_json(msg="Github error. {0}".format(repr(e)))
+ except Exception as e:
+ module.fail_json(msg="Unexpected error. {0}".format(repr(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_webhook.py b/ansible_collections/community/general/plugins/modules/github_webhook.py
new file mode 100644
index 000000000..d47b7a82f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_webhook.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook
+short_description: Manage GitHub webhooks
+description:
+ - "Create and delete GitHub webhooks"
+requirements:
+ - "PyGithub >= 1.3.5"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ type: str
+ required: true
+ aliases:
+ - repo
+ url:
+ description:
+ - URL to which payloads will be delivered
+ type: str
+ required: true
+ content_type:
+ description:
+ - The media type used to serialize the payloads
+ type: str
+ required: false
+ choices: [ form, json ]
+ default: form
+ secret:
+ description:
+ - The shared secret between GitHub and the payload URL.
+ type: str
+ required: false
+ insecure_ssl:
+ description:
+ - >
+ Flag to indicate that GitHub should skip SSL verification when calling
+ the hook.
+ required: false
+ type: bool
+ default: false
+ events:
+ description:
+ - >
+ A list of GitHub events the hook is triggered for. Events are listed at
+ U(https://developer.github.com/v3/activity/events/types/). Required
+ unless C(state) is C(absent)
+ required: false
+ type: list
+ elements: str
+ active:
+ description:
+ - Whether or not the hook is active
+ required: false
+ type: bool
+ default: true
+ state:
+ description:
+ - Whether the hook should be present or absent
+ type: str
+ required: false
+ choices: [ absent, present ]
+ default: present
+ user:
+ description:
+ - User to authenticate to GitHub as
+ type: str
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ type: str
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ type: str
+ required: false
+ github_url:
+ description:
+ - Base URL of the GitHub API
+ type: str
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: Create a new webhook that triggers on push (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ events:
+ - push
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+
+- name: Create a new webhook in a github enterprise installation with multiple event triggers (token auth)
+ community.general.github_webhook:
+ repository: myorg/myrepo
+ url: https://jenkins.example.com/ghprbhook/
+ content_type: json
+ secret: "{{ github_shared_secret }}"
+ insecure_ssl: true
+ events:
+ - issue_comment
+ - pull_request
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com
+
+- name: Delete a webhook (password auth)
+ community.general.github_webhook:
+ repository: ansible/ansible
+ url: https://www.example.com/hooks/
+ state: absent
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+'''
+
+RETURN = '''
+---
+hook_id:
+ description: The GitHub ID of the hook created/updated
+ returned: when state is 'present'
+ type: int
+ sample: 6206
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def _create_hook_config(module):
+ hook_config = {
+ "url": module.params["url"],
+ "content_type": module.params["content_type"],
+ "insecure_ssl": "1" if module.params["insecure_ssl"] else "0"
+ }
+
+ secret = module.params.get("secret")
+ if secret:
+ hook_config["secret"] = secret
+
+ return hook_config
+
+
+def create_hook(repo, module):
+ config = _create_hook_config(module)
+ try:
+ hook = repo.create_hook(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to create hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return True, data
+
+
+def update_hook(repo, hook, module):
+ config = _create_hook_config(module)
+ try:
+ hook.update()
+ hook.edit(
+ name="web",
+ config=config,
+ events=module.params["events"],
+ active=module.params["active"])
+
+ changed = hook.update()
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to modify hook for repository %s: %s" % (
+ repo.full_name, to_native(err)))
+
+ data = {"hook_id": hook.id}
+ return changed, data
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=['repo']),
+ url=dict(type='str', required=True),
+ content_type=dict(
+ type='str',
+ choices=('json', 'form'),
+ required=False,
+ default='form'),
+ secret=dict(type='str', required=False, no_log=True),
+ insecure_ssl=dict(type='bool', required=False, default=False),
+ events=dict(type='list', elements='str', required=False),
+ active=dict(type='bool', required=False, default=True),
+ state=dict(
+ type='str',
+ required=False,
+ choices=('absent', 'present'),
+ default='present'),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'),),
+ required_one_of=(("password", "token"),),
+ required_if=(("state", "present", ("events",)),),
+ )
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ hook = None
+ try:
+ for hook in repo.get_hooks():
+ if hook.config.get("url") == module.params["url"]:
+ break
+ else:
+ hook = None
+ except github.GithubException as err:
+ module.fail_json(msg="Unable to get hooks from repository %s: %s" % (
+ module.params["repository"], to_native(err)))
+
+ changed = False
+ data = {}
+ if hook is None and module.params["state"] == "present":
+ changed, data = create_hook(repo, module)
+ elif hook is not None and module.params["state"] == "absent":
+ try:
+ hook.delete()
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to delete hook from repository %s: %s" % (
+ repo.full_name, to_native(err)))
+ else:
+ changed = True
+ elif hook is not None and module.params["state"] == "present":
+ changed, data = update_hook(repo, hook, module)
+ # else, there is no hook and we want there to be no hook
+
+ module.exit_json(changed=changed, **data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/github_webhook_info.py b/ansible_collections/community/general/plugins/modules/github_webhook_info.py
new file mode 100644
index 000000000..a6f7c3e52
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/github_webhook_info.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: github_webhook_info
+short_description: Query information about GitHub webhooks
+description:
+ - "Query information about GitHub webhooks"
+ - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - "PyGithub >= 1.3.5"
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ repository:
+ description:
+ - Full name of the repository to configure a hook for
+ type: str
+ required: true
+ aliases:
+ - repo
+ user:
+ description:
+ - User to authenticate to GitHub as
+ type: str
+ required: true
+ password:
+ description:
+ - Password to authenticate to GitHub with
+ type: str
+ required: false
+ token:
+ description:
+ - Token to authenticate to GitHub with
+ type: str
+ required: false
+ github_url:
+ description:
+ - Base URL of the github api
+ type: str
+ required: false
+ default: https://api.github.com
+
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+- name: List hooks for a repository (password auth)
+ community.general.github_webhook_info:
+ repository: ansible/ansible
+ user: "{{ github_user }}"
+ password: "{{ github_password }}"
+ register: ansible_webhooks
+
+- name: List hooks for a repository on GitHub Enterprise (token auth)
+ community.general.github_webhook_info:
+ repository: myorg/myrepo
+ user: "{{ github_user }}"
+ token: "{{ github_user_api_token }}"
+ github_url: https://github.example.com/api/v3/
+ register: myrepo_webhooks
+'''
+
+RETURN = '''
+---
+hooks:
+ description: A list of hooks that exist for the repo
+ returned: always
+ type: list
+ elements: dict
+ sample:
+ - {
+ "has_shared_secret": true,
+ "url": "https://jenkins.example.com/ghprbhook/",
+ "events": ["issue_comment", "pull_request"],
+ "insecure_ssl": "1",
+ "content_type": "json",
+ "active": true,
+ "id": 6206,
+ "last_response": {"status": "active", "message": "OK", "code": 200}
+ }
+'''
+
+import traceback
+
+GITHUB_IMP_ERR = None
+try:
+ import github
+ HAS_GITHUB = True
+except ImportError:
+ GITHUB_IMP_ERR = traceback.format_exc()
+ HAS_GITHUB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def _munge_hook(hook_obj):
+ retval = {
+ "active": hook_obj.active,
+ "events": hook_obj.events,
+ "id": hook_obj.id,
+ "url": hook_obj.url,
+ }
+ retval.update(hook_obj.config)
+ retval["has_shared_secret"] = "secret" in retval
+ if "secret" in retval:
+ del retval["secret"]
+
+ retval["last_response"] = hook_obj.last_response.raw_data
+ return retval
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repository=dict(type='str', required=True, aliases=["repo"]),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ token=dict(type='str', required=False, no_log=True),
+ github_url=dict(
+ type='str', required=False, default="https://api.github.com")),
+ mutually_exclusive=(('password', 'token'), ),
+ required_one_of=(("password", "token"), ),
+ supports_check_mode=True)
+
+ if not HAS_GITHUB:
+ module.fail_json(msg=missing_required_lib('PyGithub'),
+ exception=GITHUB_IMP_ERR)
+
+ try:
+ github_conn = github.Github(
+ module.params["user"],
+ module.params.get("password") or module.params.get("token"),
+ base_url=module.params["github_url"])
+ except github.GithubException as err:
+ module.fail_json(msg="Could not connect to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+
+ try:
+ repo = github_conn.get_repo(module.params["repository"])
+ except github.BadCredentialsException as err:
+ module.fail_json(msg="Could not authenticate to GitHub at %s: %s" % (
+ module.params["github_url"], to_native(err)))
+ except github.UnknownObjectException as err:
+ module.fail_json(
+ msg="Could not find repository %s in GitHub at %s: %s" % (
+ module.params["repository"], module.params["github_url"],
+ to_native(err)))
+ except Exception as err:
+ module.fail_json(
+ msg="Could not fetch repository %s from GitHub at %s: %s" %
+ (module.params["repository"], module.params["github_url"],
+ to_native(err)),
+ exception=traceback.format_exc())
+
+ try:
+ hooks = [_munge_hook(h) for h in repo.get_hooks()]
+ except github.GithubException as err:
+ module.fail_json(
+ msg="Unable to get hooks from repository %s: %s" %
+ (module.params["repository"], to_native(err)),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, hooks=hooks)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_branch.py
new file mode 100644
index 000000000..d7eecb33f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_branch.py
@@ -0,0 +1,183 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_branch
+short_description: Create or delete a branch
+version_added: 4.2.0
+description:
+ - This module allows to create or delete branches.
+author:
+ - paytroff (@paytroff)
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 2.3.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete branch.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ project:
+ description:
+ - The path or name of the project.
+ required: true
+ type: str
+ branch:
+ description:
+ - The name of the branch that needs to be created.
+ required: true
+ type: str
+ ref_branch:
+ description:
+ - Reference branch to create from.
+ - This must be specified if I(state=present).
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create branch branch2 from main
+ community.general.gitlab_branch:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ branch: branch2
+ ref_branch: main
+ state: present
+
+- name: Delete branch branch2
+ community.general.gitlab_branch:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "group1/project1"
+ branch: branch2
+ state: absent
+
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitlabBranch(object):
+
+ def __init__(self, module, project, gitlab_instance):
+ self.repo = gitlab_instance
+ self._module = module
+ self.project = self.get_project(project)
+
+ def get_project(self, project):
+ try:
+ return self.repo.projects.get(project)
+ except Exception as e:
+ return False
+
+ def get_branch(self, branch):
+ try:
+ return self.project.branches.get(branch)
+ except Exception as e:
+ return False
+
+ def create_branch(self, branch, ref_branch):
+ return self.project.branches.create({'branch': branch, 'ref': ref_branch})
+
+ def delete_branch(self, branch):
+ return branch.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ project=dict(type='str', required=True),
+ branch=dict(type='str', required=True),
+ ref_branch=dict(type='str', required=False),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ required_if=[
+ ['state', 'present', ['ref_branch'], True],
+ ],
+ supports_check_mode=False
+ )
+ ensure_gitlab_package(module)
+
+ project = module.params['project']
+ branch = module.params['branch']
+ ref_branch = module.params['ref_branch']
+ state = module.params['state']
+
+ gitlab_version = gitlab.__version__
+ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
+ module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
+ " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
+
+ gitlab_instance = gitlab_authentication(module)
+ this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance)
+
+ this_branch = this_gitlab.get_branch(branch)
+
+ if not this_branch and state == "present":
+ r_branch = this_gitlab.get_branch(ref_branch)
+ if not r_branch:
+ module.fail_json(msg="Ref branch {b} not exist.".format(b=ref_branch))
+ this_gitlab.create_branch(branch, ref_branch)
+ module.exit_json(changed=True, msg="Created the branch {b}.".format(b=branch))
+ elif this_branch and state == "present":
+ module.exit_json(changed=False, msg="Branch {b} already exist".format(b=branch))
+ elif this_branch and state == "absent":
+ try:
+ this_gitlab.delete_branch(this_branch)
+ module.exit_json(changed=True, msg="Branch {b} deleted.".format(b=branch))
+ except Exception as e:
+ module.fail_json(msg="Error delete branch.", exception=traceback.format_exc())
+ else:
+ module.exit_json(changed=False, msg="No changes are needed.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
new file mode 100644
index 000000000..27cb01f87
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_deploy_key.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_deploy_key
+short_description: Manages GitLab project deploy keys
+description:
+ - Adds, updates and removes project deploy keys
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project:
+ description:
+ - Id or Full path of project in the form of group/name.
+ required: true
+ type: str
+ title:
+ description:
+ - Deploy key's title.
+ required: true
+ type: str
+ key:
+ description:
+ - Deploy key
+ required: true
+ type: str
+ can_push:
+ description:
+ - Whether this key can push to the project.
+ type: bool
+ default: false
+ state:
+ description:
+ - When C(present) the deploy key added to the project if it doesn't exist.
+ - When C(absent) it will be removed from the project if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+'''
+
+EXAMPLES = '''
+- name: "Adding a project deploy key"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+- name: "Update the above deploy key to add push access"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ title: "Jenkins CI"
+ state: present
+ can_push: true
+
+- name: "Remove the previous deploy key from the project"
+ community.general.gitlab_deploy_key:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ project: "my_group/my_project"
+ state: absent
+ key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..."
+
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: key is already in use"
+
+deploy_key:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitLabDeployKey(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.deploy_key_object = None
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ @param key_key String of the key
+ @param key_can_push Option of the deploy_key
+ @param options Deploy key options
+ '''
+ def create_or_update_deploy_key(self, project, key_title, key_key, options):
+ changed = False
+
+ # note: unfortunately public key cannot be updated directly by
+ # GitLab REST API, so for that case we need to delete and
+ # than recreate the key
+ if self.deploy_key_object and self.deploy_key_object.key != key_key:
+ if not self._module.check_mode:
+ self.deploy_key_object.delete()
+ self.deploy_key_object = None
+
+ # Because we have already call exists_deploy_key in main()
+ if self.deploy_key_object is None:
+ deploy_key = self.create_deploy_key(project, {
+ 'title': key_title,
+ 'key': key_key,
+ 'can_push': options['can_push']})
+ changed = True
+ else:
+ changed, deploy_key = self.update_deploy_key(self.deploy_key_object, {
+ 'title': key_title,
+ 'can_push': options['can_push']})
+
+ self.deploy_key_object = deploy_key
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title)
+
+ try:
+ deploy_key.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update deploy key: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the deploy_key
+ '''
+ def create_deploy_key(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ deploy_key = project.keys.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create deploy key: %s " % to_native(e))
+
+ return deploy_key
+
+ '''
+ @param deploy_key Deploy Key Object
+ @param arguments Attributes of the deploy_key
+ '''
+ def update_deploy_key(self, deploy_key, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(deploy_key, arg_key) != arguments[arg_key]:
+ setattr(deploy_key, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, deploy_key)
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def find_deploy_key(self, project, key_title):
+ deploy_keys = project.keys.list(all=True)
+ for deploy_key in deploy_keys:
+ if (deploy_key.title == key_title):
+ return deploy_key
+
+ '''
+ @param project Project object
+ @param key_title Title of the key
+ '''
+ def exists_deploy_key(self, project, key_title):
+ # When project exists, object will be stored in self.project_object.
+ deploy_key = self.find_deploy_key(project, key_title)
+ if deploy_key:
+ self.deploy_key_object = deploy_key
+ return True
+ return False
+
+ def delete_deploy_key(self):
+ if self._module.check_mode:
+ return True
+
+ return self.deploy_key_object.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=False),
+ can_push=dict(type='bool', default=False),
+ title=dict(type='str', required=True)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ key_title = module.params['title']
+ key_keyfile = module.params['key']
+ key_can_push = module.params['can_push']
+
+ gitlab_instance = gitlab_authentication(module)
+
+ gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance)
+
+ project = find_project(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create deploy key: project %s doesn't exists" % project_identifier)
+
+ deploy_key_exists = gitlab_deploy_key.exists_deploy_key(project, key_title)
+
+ if state == 'absent':
+ if deploy_key_exists:
+ gitlab_deploy_key.delete_deploy_key()
+ module.exit_json(changed=True, msg="Successfully deleted deploy key %s" % key_title)
+ else:
+ module.exit_json(changed=False, msg="Deploy key deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_deploy_key.create_or_update_deploy_key(project, key_title, key_keyfile, {'can_push': key_can_push}):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deploy_key_object._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the deploy key %s" % key_title,
+ deploy_key=gitlab_deploy_key.deploy_key_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group.py b/ansible_collections/community/general/plugins/modules/gitlab_group.py
new file mode 100644
index 000000000..4de1ffc5f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group.py
@@ -0,0 +1,400 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_group
+short_description: Creates/updates/deletes GitLab Groups
+description:
+ - When the group does not exist in GitLab, it will be created.
+ - When the group does exist and state=absent, the group will be deleted.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - Name of the group you want to create.
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the group you want to create, this will be api_url/group_path
+ - If not supplied, the group_name will be used.
+ type: str
+ description:
+ description:
+ - A description for the group.
+ type: str
+ state:
+ description:
+ - create or delete group.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ parent:
+ description:
+ - Allow to create subgroups
+ - Id or Full path of parent group in the form of group/name
+ type: str
+ visibility:
+ description:
+ - Default visibility of the group
+ choices: ["private", "internal", "public"]
+ default: private
+ type: str
+ project_creation_level:
+ description:
+ - Determine if developers can create projects in the group.
+ choices: ["developer", "maintainer", "noone"]
+ type: str
+ version_added: 3.7.0
+ auto_devops_enabled:
+ description:
+ - Default to Auto DevOps pipeline for all projects within this group.
+ type: bool
+ version_added: 3.7.0
+ subgroup_creation_level:
+ description:
+ - Allowed to create subgroups.
+ choices: ["maintainer", "owner"]
+ type: str
+ version_added: 3.7.0
+ require_two_factor_authentication:
+ description:
+ - Require all users in this group to setup two-factor authentication.
+ type: bool
+ version_added: 3.7.0
+ avatar_path:
+ description:
+ - Absolute path image to configure avatar. File size should not exceed 200 kb.
+ - This option is only used on creation, not for updates.
+ type: path
+ version_added: 4.2.0
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: false
+ name: my_first_group
+ state: absent
+
+- name: "Create GitLab Group"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: true
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+
+# The group will by created at https://gitlab.dj-wasabi.local/super_parent/parent/my_first_group
+- name: "Create GitLab SubGroup"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: true
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_group
+ path: my_first_group
+ state: present
+ parent: "super_parent/parent"
+
+# Other group which only allows sub-groups - no projects
+- name: "Create GitLab Group for SubGroups only"
+ community.general.gitlab_group:
+ api_url: https://gitlab.example.com/
+ validate_certs: true
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_main_group
+ path: my_main_group
+ state: present
+ project_creation_level: noone
+ auto_devops_enabled: false
+ subgroup_creation_level: maintainer
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+group:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.group_object = None
+
+ '''
+ @param group Group object
+ '''
+ def get_group_id(self, group):
+ if group is not None:
+ return group.id
+ return None
+
+ '''
+ @param name Name of the group
+ @param parent Parent group full path
+ @param options Group options
+ '''
+ def create_or_update_group(self, name, parent, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.group_object is None:
+ parent_id = self.get_group_id(parent)
+
+ payload = {
+ 'name': name,
+ 'path': options['path'],
+ 'parent_id': parent_id,
+ 'visibility': options['visibility'],
+ 'project_creation_level': options['project_creation_level'],
+ 'auto_devops_enabled': options['auto_devops_enabled'],
+ 'subgroup_creation_level': options['subgroup_creation_level'],
+ }
+ if options.get('description'):
+ payload['description'] = options['description']
+ if options.get('require_two_factor_authentication'):
+ payload['require_two_factor_authentication'] = options['require_two_factor_authentication']
+ group = self.create_group(payload)
+
+ # add avatar to group
+ if options['avatar_path']:
+ try:
+ group.avatar = open(options['avatar_path'], 'rb')
+ except IOError as e:
+ self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e))
+ changed = True
+ else:
+ changed, group = self.update_group(self.group_object, {
+ 'name': name,
+ 'description': options['description'],
+ 'visibility': options['visibility'],
+ 'project_creation_level': options['project_creation_level'],
+ 'auto_devops_enabled': options['auto_devops_enabled'],
+ 'subgroup_creation_level': options['subgroup_creation_level'],
+ 'require_two_factor_authentication': options['require_two_factor_authentication'],
+ })
+
+ self.group_object = group
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the group %s" % name)
+
+ try:
+ group.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update group: %s " % e)
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the group
+ '''
+ def create_group(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ # Filter out None values
+ filtered = dict((arg_key, arg_value) for arg_key, arg_value in arguments.items() if arg_value is not None)
+
+ group = self._gitlab.groups.create(filtered)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create group: %s " % to_native(e))
+
+ return group
+
+ '''
+ @param group Group Object
+ @param arguments Attributes of the group
+ '''
+ def update_group(self, group, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(group, arg_key) != arguments[arg_key]:
+ setattr(group, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, group)
+
+ def delete_group(self):
+ group = self.group_object
+
+ if len(group.projects.list(all=False)) >= 1:
+ self._module.fail_json(
+ msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.")
+ else:
+ if self._module.check_mode:
+ return True
+
+ try:
+ group.delete()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to delete group: %s " % to_native(e))
+
+ '''
+ @param name Name of the groupe
+ @param full_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ '''
+ def exists_group(self, project_identifier):
+ # When group/user exists, object will be stored in self.group_object.
+ group = find_group(self._gitlab, project_identifier)
+ if group:
+ self.group_object = group
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ parent=dict(type='str'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"]),
+ project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']),
+ auto_devops_enabled=dict(type='bool'),
+ subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']),
+ require_two_factor_authentication=dict(type='bool'),
+ avatar_path=dict(type='path'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ group_name = module.params['name']
+ group_path = module.params['path']
+ description = module.params['description']
+ state = module.params['state']
+ parent_identifier = module.params['parent']
+ group_visibility = module.params['visibility']
+ project_creation_level = module.params['project_creation_level']
+ auto_devops_enabled = module.params['auto_devops_enabled']
+ subgroup_creation_level = module.params['subgroup_creation_level']
+ require_two_factor_authentication = module.params['require_two_factor_authentication']
+ avatar_path = module.params['avatar_path']
+
+ gitlab_instance = gitlab_authentication(module)
+
+ # Define default group_path based on group_name
+ if group_path is None:
+ group_path = group_name.replace(" ", "_")
+
+ gitlab_group = GitLabGroup(module, gitlab_instance)
+
+ parent_group = None
+ if parent_identifier:
+ parent_group = find_group(gitlab_instance, parent_identifier)
+ if not parent_group:
+ module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists")
+
+ group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path)
+ else:
+ group_exists = gitlab_group.exists_group(group_path)
+
+ if state == 'absent':
+ if group_exists:
+ gitlab_group.delete_group()
+ module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name)
+ else:
+ module.exit_json(changed=False, msg="Group deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_group.create_or_update_group(group_name, parent_group, {
+ "path": group_path,
+ "description": description,
+ "visibility": group_visibility,
+ "project_creation_level": project_creation_level,
+ "auto_devops_enabled": auto_devops_enabled,
+ "subgroup_creation_level": subgroup_creation_level,
+ "require_two_factor_authentication": require_two_factor_authentication,
+ "avatar_path": avatar_path,
+ }):
+ module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the group %s" % group_name, group=gitlab_group.group_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_members.py b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
new file mode 100644
index 000000000..66298e882
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group_members.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_group_members
+short_description: Manage group members on GitLab Server
+description:
+ - This module allows to add and remove members to/from a group, or change a member's access level in a group on GitLab.
+version_added: '1.2.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - python-gitlab python module <= 1.15.0
+ - administrator rights on the GitLab server
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ gitlab_group:
+ description:
+ - The C(full_path) of the GitLab group the member is added to/removed from.
+ - Setting this to C(name) or C(path) has been disallowed since community.general 6.0.0. Use C(full_path) instead.
+ required: true
+ type: str
+ gitlab_user:
+ description:
+ - A username or a list of usernames to add to/remove from the GitLab group.
+ - Mutually exclusive with I(gitlab_users_access).
+ type: list
+ elements: str
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ - Mutually exclusive with I(gitlab_users_access).
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ gitlab_users_access:
+ description:
+ - Provide a list of user to access level mappings.
+ - Every dictionary in this list specifies a user (by username) and the access level the user should have.
+ - Mutually exclusive with I(gitlab_user) and I(access_level).
+ - Use together with I(purge_users) to remove all users not specified here from the group.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: A username or a list of usernames to add to/remove from the GitLab group.
+ type: str
+ required: true
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ required: true
+ version_added: 3.6.0
+ state:
+ description:
+ - State of the member in the group.
+ - On C(present), it adds a user to a GitLab group.
+ - On C(absent), it removes a user from a GitLab group.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ purge_users:
+ description:
+ - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
+ If omitted do not purge orphaned members.
+ - Is only used when I(state=present).
+ type: list
+ elements: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner']
+ version_added: 3.6.0
+'''
+
+EXAMPLES = r'''
+- name: Add a user to a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ access_level: developer
+ state: present
+
+- name: Remove a user from a GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ state: absent
+
+- name: Add a list of Users to A GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user:
+ - user1
+ - user2
+ access_level: developer
+ state: present
+
+- name: Add a list of Users with Dedicated Access Levels to A GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_users_access:
+ - name: user1
+ access_level: developer
+ - name: user2
+ access_level: maintainer
+ state: present
+
+- name: Add a user, remove all others which might be on this access level
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_user: username
+ access_level: developer
+ pruge_users: developer
+ state: present
+
+- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group
+ community.general.gitlab_group_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_group: groupname
+ gitlab_users_access:
+ - name: user1
+ access_level: developer
+ - name: user2
+ access_level: maintainer
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitLabGroup(object):
+ def __init__(self, module, gl):
+ self._module = module
+ self._gitlab = gl
+
+ # get user id if the user exists
+ def get_user_id(self, gitlab_user):
+ user_exists = self._gitlab.users.list(username=gitlab_user, all=True)
+ if user_exists:
+ return user_exists[0].id
+
+ # get group id if group exists
+ def get_group_id(self, gitlab_group):
+ groups = self._gitlab.groups.list(search=gitlab_group, all=True)
+ for group in groups:
+ if group.full_path == gitlab_group:
+ return group.id
+
+ # get all members in a group
+ def get_members_in_a_group(self, gitlab_group_id):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ return group.members.list(all=True)
+
+ # get single member in a group by user name
+ def get_member_in_a_group(self, gitlab_group_id, gitlab_user_id):
+ member = None
+ group = self._gitlab.groups.get(gitlab_group_id)
+ try:
+ member = group.members.get(gitlab_user_id)
+ if member:
+ return member
+ except gitlab.exceptions.GitlabGetError as e:
+ return None
+
+ # check if the user is a member of the group
+ def is_user_a_member(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return True
+ return False
+
+ # add user to a group
+ def add_member_to_group(self, gitlab_user_id, gitlab_group_id, access_level):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ add_member = group.members.create(
+ {'user_id': gitlab_user_id, 'access_level': access_level})
+
+ # remove user from a group
+ def remove_user_from_group(self, gitlab_user_id, gitlab_group_id):
+ group = self._gitlab.groups.get(gitlab_group_id)
+ group.members.delete(gitlab_user_id)
+
+ # get user's access level
+ def get_user_access_level(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return member.access_level
+
+ # update user's access level in a group
+ def update_user_access_level(self, members, gitlab_user_id, access_level):
+ for member in members:
+ if member.id == gitlab_user_id:
+ member.access_level = access_level
+ member.save()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ gitlab_group=dict(type='str', required=True),
+ gitlab_user=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ purge_users=dict(type='list', elements='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']),
+ gitlab_users_access=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True),
+ )
+ ),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['gitlab_user', 'gitlab_users_access'],
+ ['access_level', 'gitlab_users_access'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ['gitlab_user', 'access_level'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
+ ['gitlab_user', 'gitlab_users_access'],
+ ],
+ required_if=[
+ ['state', 'present', ['access_level', 'gitlab_users_access'], True],
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ access_level_int = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS,
+ }
+
+ gitlab_group = module.params['gitlab_group']
+ state = module.params['state']
+ access_level = module.params['access_level']
+ purge_users = module.params['purge_users']
+
+ if purge_users:
+ purge_users = [access_level_int[level] for level in purge_users]
+
+ # connect to gitlab server
+ gl = gitlab_authentication(module)
+
+ group = GitLabGroup(module, gl)
+
+ gitlab_group_id = group.get_group_id(gitlab_group)
+
+ # group doesn't exist
+ if not gitlab_group_id:
+ module.fail_json(msg="group '%s' not found." % gitlab_group)
+
+ members = []
+ if module.params['gitlab_user'] is not None:
+ gitlab_users_access = []
+ gitlab_users = module.params['gitlab_user']
+ for gl_user in gitlab_users:
+ gitlab_users_access.append({'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None})
+ elif module.params['gitlab_users_access'] is not None:
+ gitlab_users_access = module.params['gitlab_users_access']
+ for user_level in gitlab_users_access:
+ user_level['access_level'] = access_level_int[user_level['access_level']]
+
+ if len(gitlab_users_access) == 1 and not purge_users:
+ # only single user given
+ members = [group.get_member_in_a_group(gitlab_group_id, group.get_user_id(gitlab_users_access[0]['name']))]
+ if members[0] is None:
+ members = []
+ elif len(gitlab_users_access) > 1 or purge_users:
+ # list of users given
+ members = group.get_members_in_a_group(gitlab_group_id)
+ else:
+ module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.",
+ result_data=[])
+
+ changed = False
+ error = False
+ changed_users = []
+ changed_data = []
+
+ for gitlab_user in gitlab_users_access:
+ gitlab_user_id = group.get_user_id(gitlab_user['name'])
+
+ # user doesn't exist
+ if not gitlab_user_id:
+ if state == 'absent':
+ changed_users.append("user '%s' not found, and thus also not part of the group" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
+ 'msg': "user '%s' not found, and thus also not part of the group" % gitlab_user['name']})
+ else:
+ error = True
+ changed_users.append("user '%s' not found." % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "user '%s' not found." % gitlab_user['name']})
+ continue
+
+ is_user_a_member = group.is_user_a_member(members, gitlab_user_id)
+
+ # check if the user is a member in the group
+ if not is_user_a_member:
+ if state == 'present':
+ # add user to the group
+ try:
+ if not module.check_mode:
+ group.add_member_to_group(gitlab_user_id, gitlab_group_id, gitlab_user['access_level'])
+ changed = True
+ changed_users.append("Successfully added user '%s' to group" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
+ 'msg': "Successfully added user '%s' to group" % gitlab_user['name']})
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ error = True
+ changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)})
+ # state as absent
+ else:
+ changed_users.append("User, '%s', is not a member in the group. No change to report" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
+ 'msg': "User, '%s', is not a member in the group. No change to report" % gitlab_user['name']})
+ # in case that a user is a member
+ else:
+ if state == 'present':
+ # compare the access level
+ user_access_level = group.get_user_access_level(members, gitlab_user_id)
+ if user_access_level == gitlab_user['access_level']:
+ changed_users.append("User, '%s', is already a member in the group. No change to report" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
+ 'msg': "User, '%s', is already a member in the group. No change to report" % gitlab_user['name']})
+ else:
+ # update the access level for the user
+ try:
+ if not module.check_mode:
+ group.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level'])
+ changed = True
+ changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
+ 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']})
+ except (gitlab.exceptions.GitlabUpdateError) as e:
+ error = True
+ changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)})
+ else:
+ # remove the user from the group
+ try:
+ if not module.check_mode:
+ group.remove_user_from_group(gitlab_user_id, gitlab_group_id)
+ changed = True
+ changed_users.append("Successfully removed user, '%s', from the group" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
+ 'msg': "Successfully removed user, '%s', from the group" % gitlab_user['name']})
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ error = True
+ changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)})
+
+ # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users
+ if state == 'present' and purge_users:
+ uppercase_names_in_gitlab_users_access = []
+ for name in gitlab_users_access:
+ uppercase_names_in_gitlab_users_access.append(name['name'].upper())
+
+ for member in members:
+ if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access:
+ try:
+ if not module.check_mode:
+ group.remove_user_from_group(member.id, gitlab_group_id)
+ changed = True
+ changed_users.append("Successfully removed user '%s', from group. Was not in given list" % member.username)
+ changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED',
+ 'msg': "Successfully removed user '%s', from group. Was not in given list" % member.username})
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ error = True
+ changed_users.append("Failed to removed user, '%s', from the group" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Failed to remove user, '%s' from the group: %s" % (gitlab_user['name'], e)})
+
+ if len(gitlab_users_access) == 1 and error:
+ # if single user given and an error occurred return error for list errors will be per user
+ module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data)
+ elif error:
+ module.fail_json(msg='FAILED: At least one given user/permission could not be set', result_data=changed_data)
+
+ module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
new file mode 100644
index 000000000..c7befe123
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_group_variable.py
@@ -0,0 +1,455 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Florent Madiot (scodeman@scode.io)
+# Based on code:
+# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: gitlab_group_variable
+short_description: Creates, updates, or deletes GitLab groups variables
+version_added: 1.2.0
+description:
+ - Creates a group variable if it does not exist.
+ - When a group variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab group,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - Florent Madiot (@scodeman)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete group variable.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ group:
+ description:
+ - The path and name of the group.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to C(true), delete all variables which are not untouched in the task.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, set masked and protected to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for group variables requires GitLab >= 9.5.
+ - Support for environment_scope requires GitLab Premium >= 13.11.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)).
+ default: {}
+ type: dict
+ variables:
+ version_added: 4.5.0
+ description:
+ - A list of dictionaries that represents CI/CD variables.
+ - This modules works internal with this sructure, even if the older I(vars) parameter is used.
+ default: []
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the variable.
+ type: str
+ required: true
+ value:
+ description:
+ - The variable value.
+ - Required when I(state=present).
+ type: str
+ masked:
+ description:
+ - Wether variable value is masked or not.
+ type: bool
+ default: false
+ protected:
+ description:
+ - Wether variable value is protected or not.
+ type: bool
+ default: false
+ variable_type:
+ description:
+ - Wether a variable is an environment variable (C(env_var)) or a file (C(file)).
+ type: str
+ choices: [ "env_var", "file" ]
+ default: env_var
+ environment_scope:
+ description:
+ - The scope for the variable.
+ type: str
+ default: '*'
+'''
+
+
+EXAMPLES = r'''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ purge: false
+ variables:
+ - name: ACCESS_KEY_ID
+ value: abc123
+ - name: SECRET_ACCESS_KEY
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+ environment_scope: production
+
+- name: Delete one variable
+ community.general.gitlab_group_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ group: scodeman/testgroup/
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = r'''
+group_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables
+)
+
+
+def vars_to_variables(vars, module):
+ # transform old vars to new variables structure
+ variables = list()
+ for item, value in vars.items():
+ if (isinstance(value, string_types) or
+ isinstance(value, (integer_types, float))):
+ variables.append(
+ {
+ "name": item,
+ "value": str(value),
+ "masked": False,
+ "protected": False,
+ "variable_type": "env_var",
+ }
+ )
+
+ elif isinstance(value, dict):
+ new_item = {"name": item, "value": value.get('value')}
+
+ new_item = {
+ "name": item,
+ "value": value.get('value'),
+ "masked": value.get('masked'),
+ "protected": value.get('protected'),
+ "variable_type": value.get('variable_type'),
+ }
+
+ if value.get('environment_scope'):
+ new_item['environment_scope'] = value.get('environment_scope')
+
+ variables.append(new_item)
+
+ else:
+ module.fail_json(msg="value must be of type string, integer, float or dict")
+
+ return variables
+
+
+class GitlabGroupVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.group = self.get_group(module.params['group'])
+ self._module = module
+
+ def get_group(self, group_name):
+ return self.repo.groups.get(group_name)
+
+ def list_all_group_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.group.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.group.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ var = {
+ "key": var_obj.get('key'),
+ "value": var_obj.get('value'),
+ "masked": var_obj.get('masked'),
+ "protected": var_obj.get('protected'),
+ "variable_type": var_obj.get('variable_type'),
+ }
+ if var_obj.get('environment_scope') is not None:
+ var["environment_scope"] = var_obj.get('environment_scope')
+
+ self.group.variables.create(var)
+ return True
+
+ def update_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ self.delete_variable(var_obj)
+ self.create_variable(var_obj)
+ return True
+
+ def delete_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ self.group.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')})
+ return True
+
+
+def compare(requested_variables, existing_variables, state):
+ # we need to do this, because it was determined in a previous version - more or less buggy
+ # basically it is not necessary and might results in more/other bugs!
+ # but it is required and only relevant for check mode!!
+ # logic represents state 'present' when not purge. all other can be derived from that
+ # untouched => equal in both
+ # updated => name and scope are equal
+ # added => name and scope does not exist
+ untouched = list()
+ updated = list()
+ added = list()
+
+ if state == 'present':
+ existing_key_scope_vars = list()
+ for item in existing_variables:
+ existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')})
+
+ for var in requested_variables:
+ if var in existing_variables:
+ untouched.append(var)
+ else:
+ compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')}
+ if compare_item in existing_key_scope_vars:
+ updated.append(var)
+ else:
+ added.append(var)
+
+ return untouched, updated, added
+
+
+def native_python_main(this_gitlab, purge, requested_variables, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ before = [x.attributes for x in gitlab_keys]
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ existing_variables = filter_returned_variables(gitlab_keys)
+
+ for item in requested_variables:
+ item['key'] = item.pop('name')
+ item['value'] = str(item.get('value'))
+ if item.get('protected') is None:
+ item['protected'] = False
+ if item.get('masked') is None:
+ item['masked'] = False
+ if item.get('environment_scope') is None:
+ item['environment_scope'] = '*'
+ if item.get('variable_type') is None:
+ item['variable_type'] = 'env_var'
+
+ if module.check_mode:
+ untouched, updated, added = compare(requested_variables, existing_variables, state)
+
+ if state == 'present':
+ add_or_update = [x for x in requested_variables if x not in existing_variables]
+ for item in add_or_update:
+ try:
+ if this_gitlab.create_variable(item):
+ return_value['added'].append(item)
+
+ except Exception:
+ if this_gitlab.update_variable(item):
+ return_value['updated'].append(item)
+
+ if purge:
+ # refetch and filter
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ existing_variables = filter_returned_variables(gitlab_keys)
+
+ remove = [x for x in existing_variables if x not in requested_variables]
+ for item in remove:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ elif state == 'absent':
+ # value does not matter on removing variables.
+ # key and environment scope are sufficient
+ for item in existing_variables:
+ item.pop('value')
+ item.pop('variable_type')
+ for item in requested_variables:
+ item.pop('value')
+ item.pop('variable_type')
+
+ if not purge:
+ remove_requested = [x for x in requested_variables if x in existing_variables]
+ for item in remove_requested:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ else:
+ for item in existing_variables:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ if module.check_mode:
+ return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched)
+
+ if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0:
+ change = True
+
+ gitlab_keys = this_gitlab.list_all_group_variables()
+ after = [x.attributes for x in gitlab_keys]
+
+ return change, return_value, before, after
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ group=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', no_log=True),
+ masked=dict(type='bool', default=False),
+ protected=dict(type='bool', default=False),
+ environment_scope=dict(type='str', default='*'),
+ variable_type=dict(type='str', default='env_var', choices=["env_var", "file"])
+ )),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['vars', 'variables'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+ ensure_gitlab_package(module)
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if var_list:
+ variables = vars_to_variables(var_list, module)
+ else:
+ variables = module.params['variables']
+
+ if state == 'present':
+ if any(x['value'] is None for x in variables):
+ module.fail_json(msg='value parameter is required in state present')
+
+ gitlab_instance = gitlab_authentication(module)
+
+ this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance)
+
+ changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module)
+
+ # postprocessing
+ for item in after:
+ item.pop('group_id')
+ item['name'] = item.pop('key')
+ for item in before:
+ item.pop('group_id')
+ item['name'] = item.pop('key')
+
+ untouched_key_name = 'key'
+ if not module.check_mode:
+ untouched_key_name = 'name'
+ raw_return_value['untouched'] = [x for x in before if x in after]
+
+ added = [x.get('key') for x in raw_return_value['added']]
+ updated = [x.get('key') for x in raw_return_value['updated']]
+ removed = [x.get('key') for x in raw_return_value['removed']]
+ untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']]
+ return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched)
+
+ module.exit_json(changed=changed, group_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_hook.py b/ansible_collections/community/general/plugins/modules/gitlab_hook.py
new file mode 100644
index 000000000..adf90eb7b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_hook.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2018, Marcus Watkins <marwatk@marcuswatkins.net>
+# Based on code:
+# Copyright (c) 2013, Phillip Gentry <phillip@cx.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_hook
+short_description: Manages GitLab project hooks
+description:
+ - Adds, updates and removes project hook
+author:
+ - Marcus Watkins (@marwatk)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project:
+ description:
+ - Id or Full path of the project in the form of group/name.
+ required: true
+ type: str
+ hook_url:
+ description:
+ - The url that you want GitLab to post to, this is used as the primary key for updates and deletion.
+ required: true
+ type: str
+ state:
+ description:
+ - When C(present) the hook will be updated to match the input or created if it doesn't exist.
+ - When C(absent) hook will be deleted if it exists.
+ default: present
+ type: str
+ choices: [ "present", "absent" ]
+ push_events:
+ description:
+ - Trigger hook on push events.
+ type: bool
+ default: true
+ push_events_branch_filter:
+ description:
+ - Branch name of wildcard to trigger hook on push events
+ type: str
+ version_added: '0.2.0'
+ default: ''
+ issues_events:
+ description:
+ - Trigger hook on issues events.
+ type: bool
+ default: false
+ merge_requests_events:
+ description:
+ - Trigger hook on merge requests events.
+ type: bool
+ default: false
+ tag_push_events:
+ description:
+ - Trigger hook on tag push events.
+ type: bool
+ default: false
+ note_events:
+ description:
+ - Trigger hook on note events or when someone adds a comment.
+ type: bool
+ default: false
+ job_events:
+ description:
+ - Trigger hook on job events.
+ type: bool
+ default: false
+ pipeline_events:
+ description:
+ - Trigger hook on pipeline events.
+ type: bool
+ default: false
+ wiki_page_events:
+ description:
+ - Trigger hook on wiki events.
+ type: bool
+ default: false
+ hook_validate_certs:
+ description:
+ - Whether GitLab will do SSL verification when triggering the hook.
+ type: bool
+ default: false
+ aliases: [ enable_ssl_verification ]
+ token:
+ description:
+ - Secret token to validate hook messages at the receiver.
+ - If this is present it will always result in a change as it cannot be retrieved from GitLab.
+ - Will show up in the X-GitLab-Token HTTP request header.
+ required: false
+ type: str
+'''
+
+EXAMPLES = '''
+- name: "Adding a project hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: present
+ push_events: true
+ tag_push_events: true
+ hook_validate_certs: false
+ token: "my-super-secret-token-that-my-ci-server-will-check"
+
+- name: "Delete the previous hook"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: "my_group/my_project"
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+
+- name: "Delete a hook by numeric project id"
+ community.general.gitlab_hook:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ project: 10
+ hook_url: "https://my-ci-server.example.com/gitlab-hook"
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+hook:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_project, gitlab_authentication, ensure_gitlab_package
+)
+
+
+class GitLabHook(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.hook_object = None
+
+ '''
+ @param project Project Object
+ @param hook_url Url to call on event
+ @param description Description of the group
+ @param parent Parent group full path
+ '''
+ def create_or_update_hook(self, project, hook_url, options):
+ changed = False
+
+ # Because we have already call userExists in main()
+ if self.hook_object is None:
+ hook = self.create_hook(project, {
+ 'url': hook_url,
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token'],
+ })
+ changed = True
+ else:
+ changed, hook = self.update_hook(self.hook_object, {
+ 'push_events': options['push_events'],
+ 'push_events_branch_filter': options['push_events_branch_filter'],
+ 'issues_events': options['issues_events'],
+ 'merge_requests_events': options['merge_requests_events'],
+ 'tag_push_events': options['tag_push_events'],
+ 'note_events': options['note_events'],
+ 'job_events': options['job_events'],
+ 'pipeline_events': options['pipeline_events'],
+ 'wiki_page_events': options['wiki_page_events'],
+ 'enable_ssl_verification': options['enable_ssl_verification'],
+ 'token': options['token'],
+ })
+
+ self.hook_object = hook
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url)
+
+ try:
+ hook.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update hook: %s " % e)
+
+ return changed
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the hook
+ '''
+ def create_hook(self, project, arguments):
+ if self._module.check_mode:
+ return True
+
+ hook = project.hooks.create(arguments)
+
+ return hook
+
+ '''
+ @param hook Hook Object
+ @param arguments Attributes of the hook
+ '''
+ def update_hook(self, hook, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arg_value is not None:
+ if getattr(hook, arg_key, None) != arg_value:
+ setattr(hook, arg_key, arg_value)
+ changed = True
+
+ return (changed, hook)
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def find_hook(self, project, hook_url):
+ hooks = project.hooks.list(all=True)
+ for hook in hooks:
+ if (hook.url == hook_url):
+ return hook
+
+ '''
+ @param project Project object
+ @param hook_url Url to call on event
+ '''
+ def exists_hook(self, project, hook_url):
+ # When project exists, object will be stored in self.project_object.
+ hook = self.find_hook(project, hook_url)
+ if hook:
+ self.hook_object = hook
+ return True
+ return False
+
+ def delete_hook(self):
+ if not self._module.check_mode:
+ self.hook_object.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ project=dict(type='str', required=True),
+ hook_url=dict(type='str', required=True),
+ push_events=dict(type='bool', default=True),
+ push_events_branch_filter=dict(type='str', default=''),
+ issues_events=dict(type='bool', default=False),
+ merge_requests_events=dict(type='bool', default=False),
+ tag_push_events=dict(type='bool', default=False),
+ note_events=dict(type='bool', default=False),
+ job_events=dict(type='bool', default=False),
+ pipeline_events=dict(type='bool', default=False),
+ wiki_page_events=dict(type='bool', default=False),
+ hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']),
+ token=dict(type='str', no_log=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password']
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ state = module.params['state']
+ project_identifier = module.params['project']
+ hook_url = module.params['hook_url']
+ push_events = module.params['push_events']
+ push_events_branch_filter = module.params['push_events_branch_filter']
+ issues_events = module.params['issues_events']
+ merge_requests_events = module.params['merge_requests_events']
+ tag_push_events = module.params['tag_push_events']
+ note_events = module.params['note_events']
+ job_events = module.params['job_events']
+ pipeline_events = module.params['pipeline_events']
+ wiki_page_events = module.params['wiki_page_events']
+ enable_ssl_verification = module.params['hook_validate_certs']
+ hook_token = module.params['token']
+
+ gitlab_instance = gitlab_authentication(module)
+
+ gitlab_hook = GitLabHook(module, gitlab_instance)
+
+ project = find_project(gitlab_instance, project_identifier)
+
+ if project is None:
+ module.fail_json(msg="Failed to create hook: project %s doesn't exists" % project_identifier)
+
+ hook_exists = gitlab_hook.exists_hook(project, hook_url)
+
+ if state == 'absent':
+ if hook_exists:
+ gitlab_hook.delete_hook()
+ module.exit_json(changed=True, msg="Successfully deleted hook %s" % hook_url)
+ else:
+ module.exit_json(changed=False, msg="Hook deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_hook.create_or_update_hook(project, hook_url, {
+ "push_events": push_events,
+ "push_events_branch_filter": push_events_branch_filter,
+ "issues_events": issues_events,
+ "merge_requests_events": merge_requests_events,
+ "tag_push_events": tag_push_events,
+ "note_events": note_events,
+ "job_events": job_events,
+ "pipeline_events": pipeline_events,
+ "wiki_page_events": wiki_page_events,
+ "enable_ssl_verification": enable_ssl_verification,
+ "token": hook_token,
+ }):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the hook %s" % hook_url, hook=gitlab_hook.hook_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project.py b/ansible_collections/community/general/plugins/modules/gitlab_project.py
new file mode 100644
index 000000000..db360d578
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project.py
@@ -0,0 +1,678 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project
+short_description: Creates/updates/deletes GitLab Projects
+description:
+ - When the project does not exist in GitLab, it will be created.
+ - When the project does exists and I(state=absent), the project will be deleted.
+ - When changes are made to the project, the project will be updated.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ group:
+ description:
+ - Id or the full path of the group of which this projects belongs to.
+ type: str
+ name:
+ description:
+ - The name of the project.
+ required: true
+ type: str
+ path:
+ description:
+ - The path of the project you want to create, this will be server_url/<group>/path.
+ - If not supplied, name will be used.
+ type: str
+ description:
+ description:
+ - An description for the project.
+ type: str
+ initialize_with_readme:
+ description:
+ - Will initialize the project with a default C(README.md).
+ - Is only used when the project is created, and ignored otherwise.
+ type: bool
+ default: false
+ version_added: "4.0.0"
+ issues_enabled:
+ description:
+ - Whether you want to create issues or not.
+ - Possible values are true and false.
+ type: bool
+ default: true
+ merge_requests_enabled:
+ description:
+ - If merge requests can be made or not.
+ - Possible values are true and false.
+ type: bool
+ default: true
+ wiki_enabled:
+ description:
+ - If an wiki for this project should be available or not.
+ type: bool
+ default: true
+ snippets_enabled:
+ description:
+ - If creating snippets should be available or not.
+ type: bool
+ default: true
+ visibility:
+ description:
+ - C(private) Project access must be granted explicitly for each user.
+ - C(internal) The project can be cloned by any logged in user.
+ - C(public) The project can be cloned without any authentication.
+ default: private
+ type: str
+ choices: ["private", "internal", "public"]
+ aliases:
+ - visibility_level
+ import_url:
+ description:
+ - Git repository which will be imported into gitlab.
+ - GitLab server needs read access to this git repository.
+ required: false
+ type: str
+ state:
+ description:
+ - Create or delete project.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ merge_method:
+ description:
+ - What requirements are placed upon merges.
+ - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only.
+ type: str
+ choices: ["ff", "merge", "rebase_merge"]
+ default: merge
+ version_added: "1.0.0"
+ lfs_enabled:
+ description:
+ - Enable Git large file systems to manages large files such
+ as audio, video, and graphics files.
+ type: bool
+ required: false
+ default: false
+ version_added: "2.0.0"
+ username:
+ description:
+ - Used to create a personal project under a user's name.
+ type: str
+ version_added: "3.3.0"
+ allow_merge_on_skipped_pipeline:
+ description:
+ - Allow merge when skipped pipelines exist.
+ type: bool
+ version_added: "3.4.0"
+ only_allow_merge_if_all_discussions_are_resolved:
+ description:
+ - All discussions on a merge request (MR) have to be resolved.
+ type: bool
+ version_added: "3.4.0"
+ only_allow_merge_if_pipeline_succeeds:
+ description:
+ - Only allow merges if pipeline succeeded.
+ type: bool
+ version_added: "3.4.0"
+ packages_enabled:
+ description:
+ - Enable GitLab package repository.
+ type: bool
+ version_added: "3.4.0"
+ remove_source_branch_after_merge:
+ description:
+ - Remove the source branch after merge.
+ type: bool
+ version_added: "3.4.0"
+ squash_option:
+ description:
+ - Squash commits when merging.
+ type: str
+ choices: ["never", "always", "default_off", "default_on"]
+ version_added: "3.4.0"
+ ci_config_path:
+ description:
+ - Custom path to the CI configuration file for this project.
+ type: str
+ version_added: "3.7.0"
+ shared_runners_enabled:
+ description:
+ - Enable shared runners for this project.
+ type: bool
+ version_added: "3.7.0"
+ avatar_path:
+ description:
+ - Absolute path image to configure avatar. File size should not exceed 200 kb.
+ - This option is only used on creation, not for updates.
+ type: path
+ version_added: "4.2.0"
+ default_branch:
+ description:
+ - Default branch name for a new project.
+ - This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true).
+ type: str
+ version_added: "4.2.0"
+ builds_access_level:
+ description:
+ - C(private) means that repository CI/CD is allowed only to project members.
+ - C(disabled) means that repository CI/CD is disabled.
+ - C(enabled) means that repository CI/CD is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.2.0"
+ forking_access_level:
+ description:
+ - C(private) means that repository forks is allowed only to project members.
+ - C(disabled) means that repository forks are disabled.
+ - C(enabled) means that repository forks are enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.2.0"
+ container_registry_access_level:
+ description:
+ - C(private) means that container registry is allowed only to project members.
+ - C(disabled) means that container registry is disabled.
+ - C(enabled) means that container registry is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.2.0"
+ releases_access_level:
+ description:
+ - C(private) means that accessing release is allowed only to project members.
+ - C(disabled) means that accessing release is disabled.
+ - C(enabled) means that accessing release is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.4.0"
+ environments_access_level:
+ description:
+ - C(private) means that deployment to environment is allowed only to project members.
+ - C(disabled) means that deployment to environment is disabled.
+ - C(enabled) means that deployment to environment is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.4.0"
+ feature_flags_access_level:
+ description:
+ - C(private) means that feature rollout is allowed only to project members.
+ - C(disabled) means that feature rollout is disabled.
+ - C(enabled) means that feature rollout is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.4.0"
+ infrastructure_access_level:
+ description:
+ - C(private) means that configuring infrastructure is allowed only to project members.
+ - C(disabled) means that configuring infrastructure is disabled.
+ - C(enabled) means that configuring infrastructure is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.4.0"
+ monitor_access_level:
+ description:
+ - C(private) means that monitoring health is allowed only to project members.
+ - C(disabled) means that monitoring health is disabled.
+ - C(enabled) means that monitoring health is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.4.0"
+ security_and_compliance_access_level:
+ description:
+ - C(private) means that accessing security and complicance tab is allowed only to project members.
+ - C(disabled) means that accessing security and complicance tab is disabled.
+ - C(enabled) means that accessing security and complicance tab is enabled.
+ type: str
+ choices: ["private", "disabled", "enabled"]
+ version_added: "6.4.0"
+ topics:
+ description:
+ - A topic or list of topics to be assigned to a project.
+ - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)).
+ type: list
+ elements: str
+ version_added: "6.6.0"
+'''
+
+EXAMPLES = r'''
+- name: Create GitLab Project
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ api_token }}"
+ name: my_first_project
+ group: "10481470"
+
+- name: Delete GitLab Project
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: false
+ name: my_first_project
+ state: absent
+ delegate_to: localhost
+
+- name: Create GitLab Project in group Ansible
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ validate_certs: true
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: my_first_project
+ group: ansible
+ issues_enabled: false
+ merge_method: rebase_merge
+ wiki_enabled: true
+ snippets_enabled: true
+ import_url: http://git.example.com/example/lab.git
+ initialize_with_readme: true
+ state: present
+ delegate_to: localhost
+
+- name: get the initial root password
+ ansible.builtin.shell: |
+ grep 'Password:' /etc/gitlab/initial_root_password | sed -e 's/Password\: \(.*\)/\1/'
+ register: initial_root_password
+
+- name: Create a GitLab Project using a username/password via oauth_token
+ community.general.gitlab_project:
+ api_url: https://gitlab.example.com/
+ api_username: root
+ api_password: "{{ initial_root_password }}"
+ name: my_second_project
+ group: "10481470"
+'''
+
+RETURN = r'''
+msg:
+ description: Success or failure message.
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server.
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API.
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+project:
+ description: API object.
+ returned: always
+ type: dict
+'''
+
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+class GitLabProject(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.project_object = None
+
+ '''
+ @param project_name Name of the project
+ @param namespace Namespace Object (User or Group)
+ @param options Options of the project
+ '''
+ def create_or_update_project(self, project_name, namespace, options):
+ changed = False
+ project_options = {
+ 'name': project_name,
+ 'description': options['description'],
+ 'issues_enabled': options['issues_enabled'],
+ 'merge_requests_enabled': options['merge_requests_enabled'],
+ 'merge_method': options['merge_method'],
+ 'wiki_enabled': options['wiki_enabled'],
+ 'snippets_enabled': options['snippets_enabled'],
+ 'visibility': options['visibility'],
+ 'lfs_enabled': options['lfs_enabled'],
+ 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'],
+ 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'],
+ 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'],
+ 'packages_enabled': options['packages_enabled'],
+ 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'],
+ 'squash_option': options['squash_option'],
+ 'ci_config_path': options['ci_config_path'],
+ 'shared_runners_enabled': options['shared_runners_enabled'],
+ 'builds_access_level': options['builds_access_level'],
+ 'forking_access_level': options['forking_access_level'],
+ 'container_registry_access_level': options['container_registry_access_level'],
+ 'releases_access_level': options['releases_access_level'],
+ 'environments_access_level': options['environments_access_level'],
+ 'feature_flags_access_level': options['feature_flags_access_level'],
+ 'infrastructure_access_level': options['infrastructure_access_level'],
+ 'monitor_access_level': options['monitor_access_level'],
+ 'security_and_compliance_access_level': options['security_and_compliance_access_level'],
+ }
+
+ # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version
+ # and check if less than 14. If yes we use tag_list instead topics
+ if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"):
+ project_options['tag_list'] = options['topics']
+ else:
+ project_options['topics'] = options['topics']
+
+ # Because we have already call userExists in main()
+ if self.project_object is None:
+ project_options.update({
+ 'path': options['path'],
+ 'import_url': options['import_url'],
+ })
+ if options['initialize_with_readme']:
+ project_options['initialize_with_readme'] = options['initialize_with_readme']
+ if options['default_branch']:
+ project_options['default_branch'] = options['default_branch']
+
+ project_options = self.get_options_with_value(project_options)
+ project = self.create_project(namespace, project_options)
+
+ # add avatar to project
+ if options['avatar_path']:
+ try:
+ project.avatar = open(options['avatar_path'], 'rb')
+ except IOError as e:
+ self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e))
+
+ changed = True
+ else:
+ changed, project = self.update_project(self.project_object, project_options)
+
+ self.project_object = project
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name)
+
+ try:
+ project.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed update project: %s " % e)
+ return True
+ return False
+
+ '''
+ @param namespace Namespace Object (User or Group)
+ @param arguments Attributes of the project
+ '''
+ def create_project(self, namespace, arguments):
+ if self._module.check_mode:
+ return True
+
+ arguments['namespace_id'] = namespace.id
+ try:
+ project = self._gitlab.projects.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create project: %s " % to_native(e))
+
+ return project
+
+ '''
+ @param arguments Attributes of the project
+ '''
+ def get_options_with_value(self, arguments):
+ ret_arguments = dict()
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ ret_arguments[arg_key] = arg_value
+
+ return ret_arguments
+
+ '''
+ @param project Project Object
+ @param arguments Attributes of the project
+ '''
+ def update_project(self, project, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if getattr(project, arg_key) != arguments[arg_key]:
+ setattr(project, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, project)
+
+ def delete_project(self):
+ if self._module.check_mode:
+ return True
+
+ project = self.project_object
+
+ return project.delete()
+
+ '''
+ @param namespace User/Group object
+ @param name Name of the project
+ '''
+ def exists_project(self, namespace, path):
+ # When project exists, object will be stored in self.project_object.
+ project = find_project(self._gitlab, namespace.full_path + '/' + path)
+ if project:
+ self.project_object = project
+ return True
+ return False
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ group=dict(type='str'),
+ name=dict(type='str', required=True),
+ path=dict(type='str'),
+ description=dict(type='str'),
+ initialize_with_readme=dict(type='bool', default=False),
+ default_branch=dict(type='str'),
+ issues_enabled=dict(type='bool', default=True),
+ merge_requests_enabled=dict(type='bool', default=True),
+ merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]),
+ wiki_enabled=dict(type='bool', default=True),
+ snippets_enabled=dict(default=True, type='bool'),
+ visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]),
+ import_url=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ lfs_enabled=dict(default=False, type='bool'),
+ username=dict(type='str'),
+ allow_merge_on_skipped_pipeline=dict(type='bool'),
+ only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'),
+ only_allow_merge_if_pipeline_succeeds=dict(type='bool'),
+ packages_enabled=dict(type='bool'),
+ remove_source_branch_after_merge=dict(type='bool'),
+ squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']),
+ ci_config_path=dict(type='str'),
+ shared_runners_enabled=dict(type='bool'),
+ avatar_path=dict(type='path'),
+ builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']),
+ topics=dict(type='list', elements='str'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['group', 'username'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ group_identifier = module.params['group']
+ project_name = module.params['name']
+ project_path = module.params['path']
+ project_description = module.params['description']
+ initialize_with_readme = module.params['initialize_with_readme']
+ issues_enabled = module.params['issues_enabled']
+ merge_requests_enabled = module.params['merge_requests_enabled']
+ merge_method = module.params['merge_method']
+ wiki_enabled = module.params['wiki_enabled']
+ snippets_enabled = module.params['snippets_enabled']
+ visibility = module.params['visibility']
+ import_url = module.params['import_url']
+ state = module.params['state']
+ lfs_enabled = module.params['lfs_enabled']
+ username = module.params['username']
+ allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline']
+ only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved']
+ only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds']
+ packages_enabled = module.params['packages_enabled']
+ remove_source_branch_after_merge = module.params['remove_source_branch_after_merge']
+ squash_option = module.params['squash_option']
+ ci_config_path = module.params['ci_config_path']
+ shared_runners_enabled = module.params['shared_runners_enabled']
+ avatar_path = module.params['avatar_path']
+ default_branch = module.params['default_branch']
+ builds_access_level = module.params['builds_access_level']
+ forking_access_level = module.params['forking_access_level']
+ container_registry_access_level = module.params['container_registry_access_level']
+ releases_access_level = module.params['releases_access_level']
+ environments_access_level = module.params['environments_access_level']
+ feature_flags_access_level = module.params['feature_flags_access_level']
+ infrastructure_access_level = module.params['infrastructure_access_level']
+ monitor_access_level = module.params['monitor_access_level']
+ security_and_compliance_access_level = module.params['security_and_compliance_access_level']
+ topics = module.params['topics']
+
+ if default_branch and not initialize_with_readme:
+ module.fail_json(msg="Param default_branch need param initialize_with_readme set to true")
+
+ gitlab_instance = gitlab_authentication(module)
+
+ # Set project_path to project_name if it is empty.
+ if project_path is None:
+ project_path = project_name.replace(" ", "_")
+
+ gitlab_project = GitLabProject(module, gitlab_instance)
+
+ namespace = None
+ namespace_id = None
+ if group_identifier:
+ group = find_group(gitlab_instance, group_identifier)
+ if group is None:
+ module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier)
+
+ namespace_id = group.id
+ else:
+ if username:
+ namespace = gitlab_instance.namespaces.list(search=username, all=False)[0]
+ else:
+ namespace = gitlab_instance.namespaces.list(search=gitlab_instance.user.username, all=False)[0]
+ namespace_id = namespace.id
+
+ if not namespace_id:
+ module.fail_json(msg="Failed to find the namespace or group ID which is required to look up the namespace")
+
+ try:
+ namespace = gitlab_instance.namespaces.get(namespace_id)
+ except gitlab.exceptions.GitlabGetError as e:
+ module.fail_json(msg="Failed to find the namespace for the given user: %s" % to_native(e))
+
+ if not namespace:
+ module.fail_json(msg="Failed to find the namespace for the project")
+ project_exists = gitlab_project.exists_project(namespace, project_path)
+
+ if state == 'absent':
+ if project_exists:
+ gitlab_project.delete_project()
+ module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name)
+ module.exit_json(changed=False, msg="Project deleted or does not exists")
+
+ if state == 'present':
+
+ if gitlab_project.create_or_update_project(project_name, namespace, {
+ "path": project_path,
+ "description": project_description,
+ "initialize_with_readme": initialize_with_readme,
+ "default_branch": default_branch,
+ "issues_enabled": issues_enabled,
+ "merge_requests_enabled": merge_requests_enabled,
+ "merge_method": merge_method,
+ "wiki_enabled": wiki_enabled,
+ "snippets_enabled": snippets_enabled,
+ "visibility": visibility,
+ "import_url": import_url,
+ "lfs_enabled": lfs_enabled,
+ "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline,
+ "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved,
+ "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds,
+ "packages_enabled": packages_enabled,
+ "remove_source_branch_after_merge": remove_source_branch_after_merge,
+ "squash_option": squash_option,
+ "ci_config_path": ci_config_path,
+ "shared_runners_enabled": shared_runners_enabled,
+ "avatar_path": avatar_path,
+ "builds_access_level": builds_access_level,
+ "forking_access_level": forking_access_level,
+ "container_registry_access_level": container_registry_access_level,
+ "releases_access_level": releases_access_level,
+ "environments_access_level": environments_access_level,
+ "feature_flags_access_level": feature_flags_access_level,
+ "infrastructure_access_level": infrastructure_access_level,
+ "monitor_access_level": monitor_access_level,
+ "security_and_compliance_access_level": security_and_compliance_access_level,
+ "topics": topics,
+ }):
+
+ module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs)
+ module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.project_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py b/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py
new file mode 100644
index 000000000..5b1a8d3f1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_badge.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Guillaume MARTINEZ (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project_badge
+short_description: Manage project badges on GitLab Server
+version_added: 6.1.0
+description:
+ - This module allows to add and remove badges to/from a project.
+author: Guillaume MARTINEZ (@Lunik)
+requirements:
+ - C(owner) or C(maintainer) rights to project on the GitLab server
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project:
+ description:
+ - The name (or full path) of the GitLab project the badge is added to/removed from.
+ required: true
+ type: str
+
+ state:
+ description:
+ - State of the badge in the project.
+ - On C(present), it adds a badge to a GitLab project.
+ - On C(absent), it removes a badge from a GitLab project.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ link_url:
+ description:
+ - The URL associated with the badge.
+ required: true
+ type: str
+
+ image_url:
+ description:
+ - The image URL of the badge.
+ - A badge is identified by this URL.
+ required: true
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Add a badge to a GitLab Project
+ community.general.gitlab_project_badge:
+ api_url: 'https://example.gitlab.com'
+ api_token: 'Your-Private-Token'
+ project: projectname
+ state: present
+ link_url: 'https://example.gitlab.com/%{project_path}'
+ image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg'
+
+- name: Remove a badge from a GitLab Project
+ community.general.gitlab_project_badge:
+ api_url: 'https://example.gitlab.com'
+ api_token: 'Your-Private-Token'
+ project: projectname
+ state: absent
+ link_url: 'https://example.gitlab.com/%{project_path}'
+ image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg'
+'''
+
+RETURN = '''
+badge:
+ description: The badge information.
+ returned: when I(state=present)
+ type: dict
+ sample:
+ id: 1
+ link_url: 'http://example.com/ci_status.svg?project=%{project_path}&ref=%{default_branch}'
+ image_url: 'https://shields.io/my/badge'
+ rendered_link_url: 'http://example.com/ci_status.svg?project=example-org/example-project&ref=master'
+ rendered_image_url: 'https://shields.io/my/badge'
+ kind: project
+'''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, find_project, ensure_gitlab_package
+)
+
+
+def present_strategy(module, gl, project, wished_badge):
+ changed = False
+
+ existing_badge = None
+ for badge in project.badges.list(iterator=True):
+ if badge.image_url == wished_badge["image_url"]:
+ existing_badge = badge
+ break
+
+ if not existing_badge:
+ changed = True
+ if module.check_mode:
+ return changed, {"status": "A project badge would be created."}
+
+ badge = project.badges.create(wished_badge)
+ return changed, badge.attributes
+
+ if existing_badge.link_url != wished_badge["link_url"]:
+ changed = True
+ existing_badge.link_url = wished_badge["link_url"]
+
+ if changed:
+ if module.check_mode:
+ return changed, {"status": "Project badge attributes would be changed."}
+
+ existing_badge.save()
+
+ return changed, existing_badge.attributes
+
+
+def absent_strategy(module, gl, project, wished_badge):
+ changed = False
+
+ existing_badge = None
+ for badge in project.badges.list(iterator=True):
+ if badge.image_url == wished_badge["image_url"]:
+ existing_badge = badge
+ break
+
+ if not existing_badge:
+ return changed, None
+
+ changed = True
+ if module.check_mode:
+ return changed, {"status": "Project badge would be destroyed."}
+
+ existing_badge.delete()
+
+ return changed, None
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ ensure_gitlab_package(module)
+
+ gitlab_project = module.params['project']
+ state = module.params['state']
+
+ gl = gitlab_authentication(module)
+
+ project = find_project(gl, gitlab_project)
+ # project doesn't exist
+ if not project:
+ module.fail_json(msg="project '%s' not found." % gitlab_project)
+
+ wished_badge = {
+ "link_url": module.params["link_url"],
+ "image_url": module.params["image_url"],
+ }
+
+ changed, summary = state_strategy[state](module=module, gl=gl, project=project, wished_badge=wished_badge)
+
+ module.exit_json(changed=changed, badge=summary)
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ project=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ link_url=dict(type='str', required=True),
+ image_url=dict(type='str', required=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
+ ],
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_members.py b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py
new file mode 100644
index 000000000..905358443
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_members.py
@@ -0,0 +1,449 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Sergey Mikhaltsov <metanovii@gmail.com>
+# Copyright (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: gitlab_project_members
+short_description: Manage project members on GitLab Server
+version_added: 2.2.0
+description:
+ - This module allows to add and remove members to/from a project, or change a member's access level in a project on GitLab.
+author:
+ - Sergey Mikhaltsov (@metanovii)
+ - Zainab Alsaffar (@zanssa)
+requirements:
+ - python-gitlab python module <= 1.15.0
+ - owner or maintainer rights to project on the GitLab server
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project:
+ description:
+ - The name (or full path) of the GitLab project the member is added to/removed from.
+ required: true
+ type: str
+ gitlab_user:
+ description:
+ - A username or a list of usernames to add to/remove from the GitLab project.
+ - Mutually exclusive with I(gitlab_users_access).
+ type: list
+ elements: str
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer']
+ gitlab_users_access:
+ description:
+ - Provide a list of user to access level mappings.
+ - Every dictionary in this list specifies a user (by username) and the access level the user should have.
+ - Mutually exclusive with I(gitlab_user) and I(access_level).
+ - Use together with I(purge_users) to remove all users not specified here from the project.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description: A username or a list of usernames to add to/remove from the GitLab project.
+ type: str
+ required: true
+ access_level:
+ description:
+ - The access level for the user.
+ - Required if I(state=present), user state is set to present.
+ type: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer']
+ required: true
+ version_added: 3.7.0
+ state:
+ description:
+ - State of the member in the project.
+ - On C(present), it adds a user to a GitLab project.
+ - On C(absent), it removes a user from a GitLab project.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ purge_users:
+ description:
+ - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list.
+ If omitted do not purge orphaned members.
+ - Is only used when I(state=present).
+ type: list
+ elements: str
+ choices: ['guest', 'reporter', 'developer', 'maintainer']
+ version_added: 3.7.0
+'''
+
+EXAMPLES = r'''
+- name: Add a user to a GitLab Project
+ community.general.gitlab_project_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ validate_certs: true
+ project: projectname
+ gitlab_user: username
+ access_level: developer
+ state: present
+
+- name: Remove a user from a GitLab project
+ community.general.gitlab_project_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ validate_certs: false
+ project: projectname
+ gitlab_user: username
+ state: absent
+
+- name: Add a list of Users to A GitLab project
+ community.general.gitlab_project_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ gitlab_project: projectname
+ gitlab_user:
+ - user1
+ - user2
+ access_level: developer
+ state: present
+
+- name: Add a list of Users with Dedicated Access Levels to A GitLab project
+ community.general.gitlab_project_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ project: projectname
+ gitlab_users_access:
+ - name: user1
+ access_level: developer
+ - name: user2
+ access_level: maintainer
+ state: present
+
+- name: Add a user, remove all others which might be on this access level
+ community.general.gitlab_project_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ project: projectname
+ gitlab_user: username
+ access_level: developer
+ purge_users: developer
+ state: present
+
+- name: Remove a list of Users with Dedicated Access Levels to A GitLab project
+ community.general.gitlab_project_members:
+ api_url: 'https://gitlab.example.com'
+ api_token: 'Your-Private-Token'
+ project: projectname
+ gitlab_users_access:
+ - name: user1
+ access_level: developer
+ - name: user2
+ access_level: maintainer
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitLabProjectMembers(object):
+ def __init__(self, module, gl):
+ self._module = module
+ self._gitlab = gl
+
+ def get_project(self, project_name):
+ try:
+ project_exists = self._gitlab.projects.get(project_name)
+ return project_exists.id
+ except gitlab.exceptions.GitlabGetError as e:
+ project_exists = self._gitlab.projects.list(search=project_name, all=False)
+ if project_exists:
+ return project_exists[0].id
+
+ def get_user_id(self, gitlab_user):
+ user_exists = self._gitlab.users.list(username=gitlab_user, all=False)
+ if user_exists:
+ return user_exists[0].id
+
+ # get all members in a project
+ def get_members_in_a_project(self, gitlab_project_id):
+ project = self._gitlab.projects.get(gitlab_project_id)
+ return project.members.list(all=True)
+
+ # get single member in a project by user name
+ def get_member_in_a_project(self, gitlab_project_id, gitlab_user_id):
+ member = None
+ project = self._gitlab.projects.get(gitlab_project_id)
+ try:
+ member = project.members.get(gitlab_user_id)
+ if member:
+ return member
+ except gitlab.exceptions.GitlabGetError as e:
+ return None
+
+ # check if the user is a member of the project
+ def is_user_a_member(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return True
+ return False
+
+ # add user to a project
+ def add_member_to_project(self, gitlab_user_id, gitlab_project_id, access_level):
+ project = self._gitlab.projects.get(gitlab_project_id)
+ add_member = project.members.create(
+ {'user_id': gitlab_user_id, 'access_level': access_level})
+
+ # remove user from a project
+ def remove_user_from_project(self, gitlab_user_id, gitlab_project_id):
+ project = self._gitlab.projects.get(gitlab_project_id)
+ project.members.delete(gitlab_user_id)
+
+ # get user's access level
+ def get_user_access_level(self, members, gitlab_user_id):
+ for member in members:
+ if member.id == gitlab_user_id:
+ return member.access_level
+
+ # update user's access level in a project
+ def update_user_access_level(self, members, gitlab_user_id, access_level):
+ for member in members:
+ if member.id == gitlab_user_id:
+ member.access_level = access_level
+ member.save()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ project=dict(type='str', required=True),
+ gitlab_user=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']),
+ purge_users=dict(type='list', elements='str', choices=[
+ 'guest', 'reporter', 'developer', 'maintainer']),
+ gitlab_users_access=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ access_level=dict(type='str', choices=[
+ 'guest', 'reporter', 'developer', 'maintainer'], required=True),
+ )
+ ),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['gitlab_user', 'gitlab_users_access'],
+ ['access_level', 'gitlab_users_access'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ['gitlab_user', 'access_level'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
+ ['gitlab_user', 'gitlab_users_access'],
+ ],
+ required_if=[
+ ['state', 'present', ['access_level', 'gitlab_users_access'], True],
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ access_level_int = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ }
+
+ gitlab_project = module.params['project']
+ state = module.params['state']
+ access_level = module.params['access_level']
+ purge_users = module.params['purge_users']
+
+ if purge_users:
+ purge_users = [access_level_int[level] for level in purge_users]
+
+ # connect to gitlab server
+ gl = gitlab_authentication(module)
+
+ project = GitLabProjectMembers(module, gl)
+
+ gitlab_project_id = project.get_project(gitlab_project)
+
+ # project doesn't exist
+ if not gitlab_project_id:
+ module.fail_json(msg="project '%s' not found." % gitlab_project)
+
+ members = []
+ if module.params['gitlab_user'] is not None:
+ gitlab_users_access = []
+ gitlab_users = module.params['gitlab_user']
+ for gl_user in gitlab_users:
+ gitlab_users_access.append(
+ {'name': gl_user, 'access_level': access_level_int[access_level] if access_level else None})
+ elif module.params['gitlab_users_access'] is not None:
+ gitlab_users_access = module.params['gitlab_users_access']
+ for user_level in gitlab_users_access:
+ user_level['access_level'] = access_level_int[user_level['access_level']]
+
+ if len(gitlab_users_access) == 1 and not purge_users:
+ # only single user given
+ members = [project.get_member_in_a_project(
+ gitlab_project_id, project.get_user_id(gitlab_users_access[0]['name']))]
+ if members[0] is None:
+ members = []
+ elif len(gitlab_users_access) > 1 or purge_users:
+ # list of users given
+ members = project.get_members_in_a_project(gitlab_project_id)
+ else:
+ module.exit_json(changed='OK', result="Nothing to do, please give at least one user or set purge_users true.",
+ result_data=[])
+
+ changed = False
+ error = False
+ changed_users = []
+ changed_data = []
+
+ for gitlab_user in gitlab_users_access:
+ gitlab_user_id = project.get_user_id(gitlab_user['name'])
+
+ # user doesn't exist
+ if not gitlab_user_id:
+ if state == 'absent':
+ changed_users.append("user '%s' not found, and thus also not part of the project" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
+ 'msg': "user '%s' not found, and thus also not part of the project" % gitlab_user['name']})
+ else:
+ error = True
+ changed_users.append("user '%s' not found." % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "user '%s' not found." % gitlab_user['name']})
+ continue
+
+ is_user_a_member = project.is_user_a_member(members, gitlab_user_id)
+
+ # check if the user is a member in the project
+ if not is_user_a_member:
+ if state == 'present':
+ # add user to the project
+ try:
+ if not module.check_mode:
+ project.add_member_to_project(gitlab_user_id, gitlab_project_id, gitlab_user['access_level'])
+ changed = True
+ changed_users.append("Successfully added user '%s' to project" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
+ 'msg': "Successfully added user '%s' to project" % gitlab_user['name']})
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ error = True
+ changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Not allowed to add the access level for the member, %s: %s" % (gitlab_user['name'], e)})
+ # state as absent
+ else:
+ changed_users.append("User, '%s', is not a member in the project. No change to report" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
+ 'msg': "User, '%s', is not a member in the project. No change to report" % gitlab_user['name']})
+ # in case that a user is a member
+ else:
+ if state == 'present':
+ # compare the access level
+ user_access_level = project.get_user_access_level(members, gitlab_user_id)
+ if user_access_level == gitlab_user['access_level']:
+ changed_users.append("User, '%s', is already a member in the project. No change to report" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'OK',
+ 'msg': "User, '%s', is already a member in the project. No change to report" % gitlab_user['name']})
+ else:
+ # update the access level for the user
+ try:
+ if not module.check_mode:
+ project.update_user_access_level(members, gitlab_user_id, gitlab_user['access_level'])
+ changed = True
+ changed_users.append("Successfully updated the access level for the user, '%s'" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
+ 'msg': "Successfully updated the access level for the user, '%s'" % gitlab_user['name']})
+ except (gitlab.exceptions.GitlabUpdateError) as e:
+ error = True
+ changed_users.append("Failed to updated the access level for the user, '%s'" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Not allowed to update the access level for the member, %s: %s" % (gitlab_user['name'], e)})
+ else:
+ # remove the user from the project
+ try:
+ if not module.check_mode:
+ project.remove_user_from_project(gitlab_user_id, gitlab_project_id)
+ changed = True
+ changed_users.append("Successfully removed user, '%s', from the project" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'CHANGED',
+ 'msg': "Successfully removed user, '%s', from the project" % gitlab_user['name']})
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ error = True
+ changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)})
+
+ # if state = present and purge_users set delete users which are in members having give access level but not in gitlab_users
+ if state == 'present' and purge_users:
+ uppercase_names_in_gitlab_users_access = []
+ for name in gitlab_users_access:
+ uppercase_names_in_gitlab_users_access.append(name['name'].upper())
+
+ for member in members:
+ if member.access_level in purge_users and member.username.upper() not in uppercase_names_in_gitlab_users_access:
+ try:
+ if not module.check_mode:
+ project.remove_user_from_project(member.id, gitlab_project_id)
+ changed = True
+ changed_users.append("Successfully removed user '%s', from project. Was not in given list" % member.username)
+ changed_data.append({'gitlab_user': member.username, 'result': 'CHANGED',
+ 'msg': "Successfully removed user '%s', from project. Was not in given list" % member.username})
+ except (gitlab.exceptions.GitlabDeleteError) as e:
+ error = True
+ changed_users.append("Failed to removed user, '%s', from the project" % gitlab_user['name'])
+ changed_data.append({'gitlab_user': gitlab_user['name'], 'result': 'FAILED',
+ 'msg': "Failed to remove user, '%s' from the project: %s" % (gitlab_user['name'], e)})
+
+ if len(gitlab_users_access) == 1 and error:
+ # if single user given and an error occurred return error for list errors will be per user
+ module.fail_json(msg="FAILED: '%s '" % changed_users[0], result_data=changed_data)
+ elif error:
+ module.fail_json(
+ msg='FAILED: At least one given user/permission could not be set', result_data=changed_data)
+
+ module.exit_json(changed=changed, msg='Successfully set memberships', result="\n".join(changed_users), result_data=changed_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
new file mode 100644
index 000000000..63569dd78
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_project_variable.py
@@ -0,0 +1,486 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_project_variable
+short_description: Creates/updates/deletes GitLab Projects Variables
+description:
+ - When a project variable does not exist, it will be created.
+ - When a project variable does exist, its value will be updated when the values are different.
+ - Variables which are untouched in the playbook, but are not untouched in the GitLab project,
+ they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)).
+author:
+ - "Markus Bergholz (@markuman)"
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete project variable.
+ - Possible values are present and absent.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ project:
+ description:
+ - The path and name of the project.
+ required: true
+ type: str
+ purge:
+ description:
+ - When set to true, all variables which are not untouched in the task will be deleted.
+ default: false
+ type: bool
+ vars:
+ description:
+ - When the list element is a simple key-value pair, masked and protected will be set to false.
+ - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can
+ have full control about whether a value should be masked, protected or both.
+ - Support for protected values requires GitLab >= 9.3.
+ - Support for masked values requires GitLab >= 11.10.
+ - Support for environment_scope requires GitLab Premium >= 13.11.
+ - Support for variable_type requires GitLab >= 11.11.
+ - A I(value) must be a string or a number.
+ - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file).
+ - Field I(environment_scope) must be a string defined by scope environment.
+ - When a value is masked, it must be in Base64 and have a length of at least 8 characters.
+ See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables).
+ default: {}
+ type: dict
+ variables:
+ version_added: 4.4.0
+ description:
+ - A list of dictionaries that represents CI/CD variables.
+ - This module works internal with this structure, even if the older I(vars) parameter is used.
+ default: []
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The name of the variable.
+ type: str
+ required: true
+ value:
+ description:
+ - The variable value.
+ - Required when I(state=present).
+ type: str
+ masked:
+ description:
+ - Wether variable value is masked or not.
+ - Support for masked values requires GitLab >= 11.10.
+ type: bool
+ default: false
+ protected:
+ description:
+ - Wether variable value is protected or not.
+ - Support for protected values requires GitLab >= 9.3.
+ type: bool
+ default: false
+ variable_type:
+ description:
+ - Wether a variable is an environment variable (C(env_var)) or a file (C(file)).
+ - Support for I(variable_type) requires GitLab >= 11.11.
+ type: str
+ choices: ["env_var", "file"]
+ default: env_var
+ environment_scope:
+ description:
+ - The scope for the variable.
+ - Support for I(environment_scope) requires GitLab Premium >= 13.11.
+ type: str
+ default: '*'
+'''
+
+
+EXAMPLES = '''
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ variables:
+ - name: ACCESS_KEY_ID
+ value: abc123
+ - name: SECRET_ACCESS_KEY
+ value: dassgrfaeui8989
+ masked: true
+ protected: true
+ environment_scope: production
+
+- name: Set or update some CI/CD variables
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ purge: false
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY:
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+ environment_scope: '*'
+
+- name: Delete one variable
+ community.general.gitlab_project_variable:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: markuman/dotfiles
+ state: absent
+ vars:
+ ACCESS_KEY_ID: abc123
+'''
+
+RETURN = '''
+project_variable:
+ description: Four lists of the variablenames which were added, updated, removed or exist.
+ returned: always
+ type: dict
+ contains:
+ added:
+ description: A list of variables which were created.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ untouched:
+ description: A list of variables which exist.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ removed:
+ description: A list of variables which were deleted.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+ updated:
+ description: A list of variables whose values were changed.
+ returned: always
+ type: list
+ sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY']
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six import integer_types
+
+GITLAB_IMP_ERR = None
+try:
+ import gitlab # noqa: F401, pylint: disable=unused-import
+ HAS_GITLAB_PACKAGE = True
+except Exception:
+ GITLAB_IMP_ERR = traceback.format_exc()
+ HAS_GITLAB_PACKAGE = False
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables
+)
+
+
+def vars_to_variables(vars, module):
+ # transform old vars to new variables structure
+ variables = list()
+ for item, value in vars.items():
+ if (isinstance(value, string_types) or
+ isinstance(value, (integer_types, float))):
+ variables.append(
+ {
+ "name": item,
+ "value": str(value),
+ "masked": False,
+ "protected": False,
+ "variable_type": "env_var",
+ }
+ )
+
+ elif isinstance(value, dict):
+
+ new_item = {
+ "name": item,
+ "value": value.get('value'),
+ "masked": value.get('masked'),
+ "protected": value.get('protected'),
+ "variable_type": value.get('variable_type'),
+ }
+
+ if value.get('environment_scope'):
+ new_item['environment_scope'] = value.get('environment_scope')
+
+ variables.append(new_item)
+
+ else:
+ module.fail_json(msg="value must be of type string, integer, float or dict")
+
+ return variables
+
+
+class GitlabProjectVariables(object):
+
+ def __init__(self, module, gitlab_instance):
+ self.repo = gitlab_instance
+ self.project = self.get_project(module.params['project'])
+ self._module = module
+
+ def get_project(self, project_name):
+ return self.repo.projects.get(project_name)
+
+ def list_all_project_variables(self):
+ page_nb = 1
+ variables = []
+ vars_page = self.project.variables.list(page=page_nb)
+ while len(vars_page) > 0:
+ variables += vars_page
+ page_nb += 1
+ vars_page = self.project.variables.list(page=page_nb)
+ return variables
+
+ def create_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+
+ var = {
+ "key": var_obj.get('key'),
+ "value": var_obj.get('value'),
+ "masked": var_obj.get('masked'),
+ "protected": var_obj.get('protected'),
+ "variable_type": var_obj.get('variable_type'),
+ }
+
+ if var_obj.get('environment_scope') is not None:
+ var["environment_scope"] = var_obj.get('environment_scope')
+
+ self.project.variables.create(var)
+ return True
+
+ def update_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ self.delete_variable(var_obj)
+ self.create_variable(var_obj)
+ return True
+
+ def delete_variable(self, var_obj):
+ if self._module.check_mode:
+ return True
+ self.project.variables.delete(var_obj.get('key'), filter={'environment_scope': var_obj.get('environment_scope')})
+ return True
+
+
+def compare(requested_variables, existing_variables, state):
+ # we need to do this, because it was determined in a previous version - more or less buggy
+ # basically it is not necessary and might results in more/other bugs!
+ # but it is required and only relevant for check mode!!
+ # logic represents state 'present' when not purge. all other can be derived from that
+ # untouched => equal in both
+ # updated => name and scope are equal
+ # added => name and scope does not exist
+ untouched = list()
+ updated = list()
+ added = list()
+
+ if state == 'present':
+ existing_key_scope_vars = list()
+ for item in existing_variables:
+ existing_key_scope_vars.append({'key': item.get('key'), 'environment_scope': item.get('environment_scope')})
+
+ for var in requested_variables:
+ if var in existing_variables:
+ untouched.append(var)
+ else:
+ compare_item = {'key': var.get('name'), 'environment_scope': var.get('environment_scope')}
+ if compare_item in existing_key_scope_vars:
+ updated.append(var)
+ else:
+ added.append(var)
+
+ return untouched, updated, added
+
+
+def native_python_main(this_gitlab, purge, requested_variables, state, module):
+
+ change = False
+ return_value = dict(added=list(), updated=list(), removed=list(), untouched=list())
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ before = [x.attributes for x in gitlab_keys]
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ existing_variables = filter_returned_variables(gitlab_keys)
+
+ # filter out and enrich before compare
+ for item in requested_variables:
+ item['key'] = item.pop('name')
+ item['value'] = str(item.get('value'))
+ if item.get('protected') is None:
+ item['protected'] = False
+ if item.get('masked') is None:
+ item['masked'] = False
+ if item.get('environment_scope') is None:
+ item['environment_scope'] = '*'
+ if item.get('variable_type') is None:
+ item['variable_type'] = 'env_var'
+
+ if module.check_mode:
+ untouched, updated, added = compare(requested_variables, existing_variables, state)
+
+ if state == 'present':
+ add_or_update = [x for x in requested_variables if x not in existing_variables]
+ for item in add_or_update:
+ try:
+ if this_gitlab.create_variable(item):
+ return_value['added'].append(item)
+
+ except Exception:
+ if this_gitlab.update_variable(item):
+ return_value['updated'].append(item)
+
+ if purge:
+ # refetch and filter
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ existing_variables = filter_returned_variables(gitlab_keys)
+
+ remove = [x for x in existing_variables if x not in requested_variables]
+ for item in remove:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ elif state == 'absent':
+ # value does not matter on removing variables.
+ # key and environment scope are sufficient
+ for item in existing_variables:
+ item.pop('value')
+ item.pop('variable_type')
+ for item in requested_variables:
+ item.pop('value')
+ item.pop('variable_type')
+
+ if not purge:
+ remove_requested = [x for x in requested_variables if x in existing_variables]
+ for item in remove_requested:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ else:
+ for item in existing_variables:
+ if this_gitlab.delete_variable(item):
+ return_value['removed'].append(item)
+
+ if module.check_mode:
+ return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched)
+
+ if return_value['added'] or return_value['removed'] or return_value['updated']:
+ change = True
+
+ gitlab_keys = this_gitlab.list_all_project_variables()
+ after = [x.attributes for x in gitlab_keys]
+
+ return change, return_value, before, after
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ project=dict(type='str', required=True),
+ purge=dict(type='bool', required=False, default=False),
+ vars=dict(type='dict', required=False, default=dict(), no_log=True),
+ variables=dict(type='list', elements='dict', required=False, default=list(), options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', no_log=True),
+ masked=dict(type='bool', default=False),
+ protected=dict(type='bool', default=False),
+ environment_scope=dict(type='str', default='*'),
+ variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]),
+ )),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['vars', 'variables'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+ ensure_gitlab_package(module)
+
+ if not HAS_GITLAB_PACKAGE:
+ module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR)
+
+ purge = module.params['purge']
+ var_list = module.params['vars']
+ state = module.params['state']
+
+ if var_list:
+ variables = vars_to_variables(var_list, module)
+ else:
+ variables = module.params['variables']
+
+ if state == 'present':
+ if any(x['value'] is None for x in variables):
+ module.fail_json(msg='value parameter is required in state present')
+
+ gitlab_instance = gitlab_authentication(module)
+
+ this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance)
+
+ change, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module)
+
+ # postprocessing
+ for item in after:
+ item.pop('project_id')
+ item['name'] = item.pop('key')
+ for item in before:
+ item.pop('project_id')
+ item['name'] = item.pop('key')
+
+ untouched_key_name = 'key'
+ if not module.check_mode:
+ untouched_key_name = 'name'
+ raw_return_value['untouched'] = [x for x in before if x in after]
+
+ added = [x.get('key') for x in raw_return_value['added']]
+ updated = [x.get('key') for x in raw_return_value['updated']]
+ removed = [x.get('key') for x in raw_return_value['removed']]
+ untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']]
+ return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched)
+
+ module.exit_json(changed=change, project_variable=return_value)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py
new file mode 100644
index 000000000..fea374cbf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_protected_branch.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: gitlab_protected_branch
+short_description: Manage protection of existing branches
+version_added: 3.4.0
+description:
+ - (un)Marking existing branches for protection.
+author:
+ - "Werner Dijkerman (@dj-wasabi)"
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 2.3.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Create or delete protected branch.
+ default: present
+ type: str
+ choices: ["present", "absent"]
+ project:
+ description:
+ - The path and name of the project.
+ required: true
+ type: str
+ name:
+ description:
+ - The name of the branch that needs to be protected.
+ - Can make use a wildcard character for like C(production/*) or just have C(main) or C(develop) as value.
+ required: true
+ type: str
+ merge_access_levels:
+ description:
+ - Access levels allowed to merge.
+ default: maintainer
+ type: str
+ choices: ["maintainer", "developer", "nobody"]
+ push_access_level:
+ description:
+ - Access levels allowed to push.
+ default: maintainer
+ type: str
+ choices: ["maintainer", "developer", "nobody"]
+'''
+
+
+EXAMPLES = '''
+- name: Create protected branch on main
+ community.general.gitlab_protected_branch:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "dj-wasabi/collection.general"
+ name: main
+ merge_access_levels: maintainer
+ push_access_level: nobody
+
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.api import basic_auth_argument_spec
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitlabProtectedBranch(object):
+
+ def __init__(self, module, project, gitlab_instance):
+ self.repo = gitlab_instance
+ self._module = module
+ self.project = self.get_project(project)
+ self.ACCESS_LEVEL = {
+ 'nobody': gitlab.NO_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS
+ }
+
+ def get_project(self, project_name):
+ return self.repo.projects.get(project_name)
+
+ def protected_branch_exist(self, name):
+ try:
+ return self.project.protectedbranches.get(name)
+ except Exception as e:
+ return False
+
+ def create_protected_branch(self, name, merge_access_levels, push_access_level):
+ if self._module.check_mode:
+ return True
+ merge = self.ACCESS_LEVEL[merge_access_levels]
+ push = self.ACCESS_LEVEL[push_access_level]
+ self.project.protectedbranches.create({
+ 'name': name,
+ 'merge_access_level': merge,
+ 'push_access_level': push
+ })
+
+ def compare_protected_branch(self, name, merge_access_levels, push_access_level):
+ configured_merge = self.ACCESS_LEVEL[merge_access_levels]
+ configured_push = self.ACCESS_LEVEL[push_access_level]
+ current = self.protected_branch_exist(name=name)
+ current_merge = current.merge_access_levels[0]['access_level']
+ current_push = current.push_access_levels[0]['access_level']
+ if current:
+ if current.name == name and current_merge == configured_merge and current_push == configured_push:
+ return True
+ return False
+
+ def delete_protected_branch(self, name):
+ if self._module.check_mode:
+ return True
+ return self.project.protectedbranches.delete(name)
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(
+ project=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]),
+ push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True
+ )
+ ensure_gitlab_package(module)
+
+ project = module.params['project']
+ name = module.params['name']
+ merge_access_levels = module.params['merge_access_levels']
+ push_access_level = module.params['push_access_level']
+ state = module.params['state']
+
+ gitlab_version = gitlab.__version__
+ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
+ module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
+ " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version)
+
+ gitlab_instance = gitlab_authentication(module)
+ this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance)
+
+ p_branch = this_gitlab.protected_branch_exist(name=name)
+ if not p_branch and state == "present":
+ this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level)
+ module.exit_json(changed=True, msg="Created the proteched branch.")
+ elif p_branch and state == "present":
+ if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level):
+ this_gitlab.delete_protected_branch(name=name)
+ this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level)
+ module.exit_json(changed=True, msg="Recreated the proteched branch.")
+ elif p_branch and state == "absent":
+ this_gitlab.delete_protected_branch(name=name)
+ module.exit_json(changed=True, msg="Deleted the proteched branch.")
+ module.exit_json(changed=False, msg="No changes are needed.")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_runner.py b/ansible_collections/community/general/plugins/modules/gitlab_runner.py
new file mode 100644
index 000000000..a41b135fc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_runner.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Raphaël Droz (raphael.droz@gmail.com)
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2018, Samy Coenen <samy.coenen@nubera.be>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_runner
+short_description: Create, modify and delete GitLab Runners
+description:
+ - Register, update and delete runners with the GitLab API.
+ - All operations are performed using the GitLab API v4.
+ - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html).
+ - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at
+ U(https://$GITLAB_URL/profile/personal_access_tokens).
+ - A valid registration token is required for registering a new runner.
+ To create shared runners, you need to ask your administrator to give you this token.
+ It can be found at U(https://$GITLAB_URL/admin/runners/).
+notes:
+ - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required.
+ - Runners need to have unique descriptions.
+author:
+ - Samy Coenen (@SamyCoenen)
+ - Guillaume Martinez (@Lunik)
+requirements:
+ - python >= 2.7
+ - python-gitlab >= 1.5.0
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ group:
+ description:
+ - ID or full path of the group in the form group/subgroup.
+ - Mutually exclusive with I(owned) and I(project).
+ type: str
+ version_added: '6.5.0'
+ project:
+ description:
+ - ID or full path of the project in the form of group/name.
+ - Mutually exclusive with I(owned) since community.general 4.5.0.
+ - Mutually exclusive with I(group).
+ type: str
+ version_added: '3.7.0'
+ description:
+ description:
+ - The unique name of the runner.
+ required: true
+ type: str
+ aliases:
+ - name
+ state:
+ description:
+ - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name.
+ required: false
+ default: present
+ choices: ["present", "absent"]
+ type: str
+ registration_token:
+ description:
+ - The registration token is used to register new runners.
+ - Required if I(state) is C(present).
+ type: str
+ owned:
+ description:
+ - Searches only runners available to the user when searching for existing, when false admin token required.
+ - Mutually exclusive with I(project) since community.general 4.5.0.
+ - Mutually exclusive with I(group).
+ default: false
+ type: bool
+ version_added: 2.0.0
+ active:
+ description:
+ - Define if the runners is immediately active after creation.
+ required: false
+ default: true
+ type: bool
+ locked:
+ description:
+ - Determines if the runner is locked or not.
+ required: false
+ default: false
+ type: bool
+ access_level:
+ description:
+ - Determines if a runner can pick up jobs only from protected branches.
+ - If I(access_level_on_creation) is not explicitly set to C(true), this option is ignored on registration and
+ is only applied on updates.
+ - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches.
+ - If set to C(ref_protected), runner can pick up jobs only from protected branches.
+ - The current default is C(ref_protected). This will change to no default in community.general 8.0.0.
+ From that version on, if this option is not specified explicitly, GitLab will use C(not_protected)
+ on creation, and the value set will not be changed on any updates.
+ required: false
+ choices: ["not_protected", "ref_protected"]
+ type: str
+ access_level_on_creation:
+ description:
+ - Whether the runner should be registered with an access level or not.
+ - If set to C(true), the value of I(access_level) is used for runner registration.
+ - If set to C(false), GitLab registers the runner with the default access level.
+ - The current default of this option is C(false). This default is deprecated and will change to C(true) in commuinty.general 7.0.0.
+ required: false
+ type: bool
+ version_added: 6.3.0
+ maximum_timeout:
+ description:
+ - The maximum time that a runner has to complete a specific job.
+ required: false
+ default: 3600
+ type: int
+ run_untagged:
+ description:
+ - Run untagged jobs or not.
+ required: false
+ default: true
+ type: bool
+ tag_list:
+ description: The tags that apply to the runner.
+ required: false
+ default: []
+ type: list
+ elements: str
+'''
+
+EXAMPLES = '''
+- name: "Register runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ registration_token: 4gfdsg345
+ description: Docker Machine t1
+ state: present
+ active: true
+ tag_list: ['docker']
+ run_untagged: false
+ locked: false
+
+- name: "Delete runner"
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ state: absent
+
+- name: Delete an owned runner as a non-admin
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ description: Docker Machine t1
+ owned: true
+ state: absent
+
+- name: Register runner for a specific project
+ community.general.gitlab_runner:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ registration_token: 4gfdsg345
+ description: MyProject runner
+ state: present
+ project: mygroup/mysubgroup/myproject
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+runner:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+try:
+ cmp # pylint: disable=used-before-assignment
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+
+class GitLabRunner(object):
+ def __init__(self, module, gitlab_instance, group=None, project=None):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.runner_object = None
+
+ # Whether to operate on GitLab-instance-wide or project-wide runners
+ # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774
+ # for group runner token access
+ if project:
+ self._runners_endpoint = project.runners.list
+ elif group:
+ self._runners_endpoint = group.runners.list
+ elif module.params['owned']:
+ self._runners_endpoint = gitlab_instance.runners.list
+ else:
+ self._runners_endpoint = gitlab_instance.runners.all
+
+ def create_or_update_runner(self, description, options):
+ changed = False
+
+ arguments = {
+ 'active': options['active'],
+ 'locked': options['locked'],
+ 'run_untagged': options['run_untagged'],
+ 'maximum_timeout': options['maximum_timeout'],
+ 'tag_list': options['tag_list'],
+ }
+ if options.get('access_level') is not None:
+ arguments['access_level'] = options['access_level']
+ # Because we have already call userExists in main()
+ if self.runner_object is None:
+ arguments['description'] = description
+ arguments['token'] = options['registration_token']
+
+ access_level_on_creation = self._module.params['access_level_on_creation']
+ if access_level_on_creation is None:
+ message = "The option 'access_level_on_creation' is unspecified, so 'false' is assumed. "\
+ "That means any value of 'access_level' is ignored and GitLab registers the runner with its default value. "\
+ "The option 'access_level_on_creation' will switch to 'true' in community.general 7.0.0"
+ self._module.deprecate(message, version='7.0.0', collection_name='community.general')
+ access_level_on_creation = False
+
+ if not access_level_on_creation:
+ arguments.pop('access_level', None)
+
+ runner = self.create_runner(arguments)
+ changed = True
+ else:
+ changed, runner = self.update_runner(self.runner_object, arguments)
+
+ self.runner_object = runner
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description)
+
+ try:
+ runner.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update runner: %s " % to_native(e))
+ return True
+ else:
+ return False
+
+ '''
+ @param arguments Attributes of the runner
+ '''
+ def create_runner(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ try:
+ runner = self._gitlab.runners.create(arguments)
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create runner: %s " % to_native(e))
+
+ return runner
+
+ '''
+ @param runner Runner object
+ @param arguments Attributes of the runner
+ '''
+ def update_runner(self, runner, arguments):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ if arguments[arg_key] is not None:
+ if isinstance(arguments[arg_key], list):
+ list1 = getattr(runner, arg_key)
+ list1.sort()
+ list2 = arguments[arg_key]
+ list2.sort()
+ if cmp(list1, list2):
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+ else:
+ if getattr(runner, arg_key) != arguments[arg_key]:
+ setattr(runner, arg_key, arguments[arg_key])
+ changed = True
+
+ return (changed, runner)
+
+ '''
+ @param description Description of the runner
+ '''
+ def find_runner(self, description):
+ runners = self._runners_endpoint(as_list=False)
+
+ for runner in runners:
+ # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner
+ # object, so we need to handle both
+ if hasattr(runner, "description"):
+ if (runner.description == description):
+ return self._gitlab.runners.get(runner.id)
+ else:
+ if (runner['description'] == description):
+ return self._gitlab.runners.get(runner['id'])
+
+ '''
+ @param description Description of the runner
+ '''
+ def exists_runner(self, description):
+ # When runner exists, object will be stored in self.runner_object.
+ runner = self.find_runner(description)
+
+ if runner:
+ self.runner_object = runner
+ return True
+ return False
+
+ def delete_runner(self):
+ if self._module.check_mode:
+ return True
+
+ runner = self.runner_object
+
+ return runner.delete()
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ description=dict(type='str', required=True, aliases=["name"]),
+ active=dict(type='bool', default=True),
+ owned=dict(type='bool', default=False),
+ tag_list=dict(type='list', elements='str', default=[]),
+ run_untagged=dict(type='bool', default=True),
+ locked=dict(type='bool', default=False),
+ access_level=dict(type='str', choices=["not_protected", "ref_protected"]),
+ access_level_on_creation=dict(type='bool'),
+ maximum_timeout=dict(type='int', default=3600),
+ registration_token=dict(type='str', no_log=True),
+ project=dict(type='str'),
+ group=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present"]),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ['project', 'owned'],
+ ['group', 'owned'],
+ ['project', 'group'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'],
+ ],
+ required_if=[
+ ('state', 'present', ['registration_token']),
+ ],
+ supports_check_mode=True,
+ )
+ ensure_gitlab_package(module)
+
+ state = module.params['state']
+ runner_description = module.params['description']
+ runner_active = module.params['active']
+ tag_list = module.params['tag_list']
+ run_untagged = module.params['run_untagged']
+ runner_locked = module.params['locked']
+ access_level = module.params['access_level']
+ maximum_timeout = module.params['maximum_timeout']
+ registration_token = module.params['registration_token']
+ project = module.params['project']
+ group = module.params['group']
+
+ if access_level is None:
+ message = "The option 'access_level' is unspecified, so 'ref_protected' is assumed. "\
+ "In order to align the module with GitLab's runner API, this option will lose "\
+ "its default value in community.general 8.0.0. From that version on, you must set "\
+ "this option to 'ref_protected' explicitly, if you want to have a protected runner, "\
+ "otherwise GitLab's default access level gets applied, which is 'not_protected'"
+ module.deprecate(message, version='8.0.0', collection_name='community.general')
+ access_level = 'ref_protected'
+
+ gitlab_instance = gitlab_authentication(module)
+ gitlab_project = None
+ gitlab_group = None
+
+ if project:
+ try:
+ gitlab_project = gitlab_instance.projects.get(project)
+ except gitlab.exceptions.GitlabGetError as e:
+ module.fail_json(msg='No such a project %s' % project, exception=to_native(e))
+ elif group:
+ try:
+ gitlab_group = gitlab_instance.groups.get(group)
+ except gitlab.exceptions.GitlabGetError as e:
+ module.fail_json(msg='No such a group %s' % group, exception=to_native(e))
+
+ gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project)
+ runner_exists = gitlab_runner.exists_runner(runner_description)
+
+ if state == 'absent':
+ if runner_exists:
+ gitlab_runner.delete_runner()
+ module.exit_json(changed=True, msg="Successfully deleted runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, msg="Runner deleted or does not exists")
+
+ if state == 'present':
+ if gitlab_runner.create_or_update_runner(runner_description, {
+ "active": runner_active,
+ "tag_list": tag_list,
+ "run_untagged": run_untagged,
+ "locked": runner_locked,
+ "access_level": access_level,
+ "maximum_timeout": maximum_timeout,
+ "registration_token": registration_token,
+ }):
+ module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs,
+ msg="Successfully created or updated the runner %s" % runner_description)
+ else:
+ module.exit_json(changed=False, runner=gitlab_runner.runner_object._attrs,
+ msg="No need to update the runner %s" % runner_description)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gitlab_user.py b/ansible_collections/community/general/plugins/modules/gitlab_user.py
new file mode 100644
index 000000000..94f371316
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gitlab_user.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be)
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: gitlab_user
+short_description: Creates/updates/deletes/blocks/unblocks GitLab Users
+description:
+ - When the user does not exist in GitLab, it will be created.
+ - When the user exists and state=absent, the user will be deleted.
+ - When the user exists and state=blocked, the user will be blocked.
+ - When changes are made to user, the user will be updated.
+notes:
+ - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+ - Guillaume Martinez (@Lunik)
+ - Lennert Mertens (@LennertMertens)
+ - Stef Graces (@stgrace)
+requirements:
+ - python >= 2.7
+ - python-gitlab python module
+ - administrator rights on the GitLab server
+extends_documentation_fragment:
+ - community.general.auth_basic
+ - community.general.gitlab
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - Name of the user you want to create.
+ - Required only if C(state) is set to C(present).
+ type: str
+ username:
+ description:
+ - The username of the user.
+ required: true
+ type: str
+ password:
+ description:
+ - The password of the user.
+ - GitLab server enforces minimum password length to 8, set this value with 8 or more characters.
+ type: str
+ reset_password:
+ description:
+ - Whether the user can change its password or not.
+ default: false
+ type: bool
+ version_added: 3.3.0
+ email:
+ description:
+ - The email that belongs to the user.
+ - Required only if C(state) is set to C(present).
+ type: str
+ sshkey_name:
+ description:
+ - The name of the SSH public key.
+ type: str
+ sshkey_file:
+ description:
+ - The SSH public key itself.
+ type: str
+ sshkey_expires_at:
+ description:
+ - The expiration date of the SSH public key in ISO 8601 format C(YYYY-MM-DDTHH:MM:SSZ).
+ - This is only used when adding new SSH public keys.
+ type: str
+ version_added: 3.1.0
+ group:
+ description:
+ - Id or Full path of parent group in the form of group/name.
+ - Add user as a member to this group.
+ type: str
+ access_level:
+ description:
+ - The access level to the group. One of the following can be used.
+ - guest
+ - reporter
+ - developer
+ - master (alias for maintainer)
+ - maintainer
+ - owner
+ default: guest
+ type: str
+ choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"]
+ state:
+ description:
+ - Create, delete or block a user.
+ default: present
+ type: str
+ choices: ["present", "absent", "blocked", "unblocked"]
+ confirm:
+ description:
+ - Require confirmation.
+ type: bool
+ default: true
+ isadmin:
+ description:
+ - Grant admin privileges to the user.
+ type: bool
+ default: false
+ external:
+ description:
+ - Define external parameter for this user.
+ type: bool
+ default: false
+ identities:
+ description:
+ - List of identities to be added/updated for this user.
+ - To remove all other identities from this user, set I(overwrite_identities=true).
+ type: list
+ elements: dict
+ suboptions:
+ provider:
+ description:
+ - The name of the external identity provider
+ type: str
+ extern_uid:
+ description:
+ - User ID for external identity.
+ type: str
+ version_added: 3.3.0
+ overwrite_identities:
+ description:
+ - Overwrite identities with identities added in this module.
+ - This means that all identities that the user has and that are not listed in I(identities) are removed from the user.
+ - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list.
+ type: bool
+ default: false
+ version_added: 3.3.0
+'''
+
+EXAMPLES = '''
+- name: "Delete GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: false
+ username: myusername
+ state: absent
+
+- name: "Create GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ validate_certs: true
+ api_username: dj-wasabi
+ api_password: "MySecretPassword"
+ name: My Name
+ username: myusername
+ password: mysecretpassword
+ email: me@example.com
+ sshkey_name: MySSH
+ sshkey_file: ssh-rsa AAAAB3NzaC1yc...
+ state: present
+ group: super_group/mon_group
+ access_level: owner
+
+- name: "Create GitLab User using external identity provider"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ validate_certs: true
+ api_token: "{{ access_token }}"
+ name: My Name
+ username: myusername
+ password: mysecretpassword
+ email: me@example.com
+ identities:
+ - provider: Keycloak
+ extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc
+ state: present
+ group: super_group/mon_group
+ access_level: owner
+
+- name: "Block GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: false
+ username: myusername
+ state: blocked
+
+- name: "Unblock GitLab User"
+ community.general.gitlab_user:
+ api_url: https://gitlab.example.com/
+ api_token: "{{ access_token }}"
+ validate_certs: false
+ username: myusername
+ state: unblocked
+'''
+
+RETURN = '''
+msg:
+ description: Success or failure message
+ returned: always
+ type: str
+ sample: "Success"
+
+result:
+ description: json parsed response from the server
+ returned: always
+ type: dict
+
+error:
+ description: the error message returned by the GitLab API
+ returned: failed
+ type: str
+ sample: "400: path is already in use"
+
+user:
+ description: API object
+ returned: always
+ type: dict
+'''
+
+
+from ansible.module_utils.api import basic_auth_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.gitlab import (
+ auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package
+)
+
+
+class GitLabUser(object):
+ def __init__(self, module, gitlab_instance):
+ self._module = module
+ self._gitlab = gitlab_instance
+ self.user_object = None
+ self.ACCESS_LEVEL = {
+ 'guest': gitlab.GUEST_ACCESS,
+ 'reporter': gitlab.REPORTER_ACCESS,
+ 'developer': gitlab.DEVELOPER_ACCESS,
+ 'master': gitlab.MAINTAINER_ACCESS,
+ 'maintainer': gitlab.MAINTAINER_ACCESS,
+ 'owner': gitlab.OWNER_ACCESS,
+ }
+
+ '''
+ @param username Username of the user
+ @param options User options
+ '''
+ def create_or_update_user(self, username, options):
+ changed = False
+ potentionally_changed = False
+
+ # Because we have already call userExists in main()
+ if self.user_object is None:
+ user = self.create_user({
+ 'name': options['name'],
+ 'username': username,
+ 'password': options['password'],
+ 'reset_password': options['reset_password'],
+ 'email': options['email'],
+ 'skip_confirmation': not options['confirm'],
+ 'admin': options['isadmin'],
+ 'external': options['external'],
+ 'identities': options['identities'],
+ })
+ changed = True
+ else:
+ changed, user = self.update_user(
+ self.user_object, {
+ # add "normal" parameters here, put uncheckable
+ # params in the dict below
+ 'name': {'value': options['name']},
+ 'email': {'value': options['email']},
+
+ # note: for some attributes like this one the key
+ # from reading back from server is unfortunately
+ # different to the one needed for pushing/writing,
+ # in that case use the optional setter key
+ 'is_admin': {
+ 'value': options['isadmin'], 'setter': 'admin'
+ },
+ 'external': {'value': options['external']},
+ 'identities': {'value': options['identities']},
+ },
+ {
+ # put "uncheckable" params here, this means params
+ # which the gitlab does accept for setting but does
+ # not return any information about it
+ 'skip_reconfirmation': {'value': not options['confirm']},
+ 'password': {'value': options['password']},
+ 'reset_password': {'value': options['reset_password']},
+ 'overwrite_identities': {'value': options['overwrite_identities']},
+ }
+ )
+
+ # note: as we unfortunately have some uncheckable parameters
+ # where it is not possible to determine if the update
+ # changed something or not, we must assume here that a
+ # changed happened and that an user object update is needed
+ potentionally_changed = True
+
+ # Assign ssh keys
+ if options['sshkey_name'] and options['sshkey_file']:
+ key_changed = self.add_ssh_key_to_user(user, {
+ 'name': options['sshkey_name'],
+ 'file': options['sshkey_file'],
+ 'expires_at': options['sshkey_expires_at']})
+ changed = changed or key_changed
+
+ # Assign group
+ if options['group_path']:
+ group_changed = self.assign_user_to_group(user, options['group_path'], options['access_level'])
+ changed = changed or group_changed
+
+ self.user_object = user
+ if (changed or potentionally_changed) and not self._module.check_mode:
+ try:
+ user.save()
+ except Exception as e:
+ self._module.fail_json(msg="Failed to update user: %s " % to_native(e))
+
+ if changed:
+ if self._module.check_mode:
+ self._module.exit_json(changed=True, msg="Successfully created or updated the user %s" % username)
+ return True
+ else:
+ return False
+
+ '''
+ @param group User object
+ '''
+ def get_user_id(self, user):
+ if user is not None:
+ return user.id
+ return None
+
+ '''
+ @param user User object
+ @param sshkey_name Name of the ssh key
+ '''
+ def ssh_key_exists(self, user, sshkey_name):
+ keyList = map(lambda k: k.title, user.keys.list(all=True))
+
+ return sshkey_name in keyList
+
+ '''
+ @param user User object
+ @param sshkey Dict containing sshkey infos {"name": "", "file": "", "expires_at": ""}
+ '''
+ def add_ssh_key_to_user(self, user, sshkey):
+ if not self.ssh_key_exists(user, sshkey['name']):
+ if self._module.check_mode:
+ return True
+
+ try:
+ parameter = {
+ 'title': sshkey['name'],
+ 'key': sshkey['file'],
+ }
+ if sshkey['expires_at'] is not None:
+ parameter['expires_at'] = sshkey['expires_at']
+ user.keys.create(parameter)
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign sshkey to user: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to find
+ '''
+ def find_member(self, group, user_id):
+ try:
+ member = group.members.get(user_id)
+ except gitlab.exceptions.GitlabGetError:
+ return None
+ return member
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ '''
+ def member_exists(self, group, user_id):
+ member = self.find_member(group, user_id)
+
+ return member is not None
+
+ '''
+ @param group Group object
+ @param user_id Id of the user to check
+ @param access_level GitLab access_level to check
+ '''
+ def member_as_good_access_level(self, group, user_id, access_level):
+ member = self.find_member(group, user_id)
+
+ return member.access_level == access_level
+
+ '''
+ @param user User object
+ @param group_path Complete path of the Group including parent group path. <parent_path>/<group_path>
+ @param access_level GitLab access_level to assign
+ '''
+ def assign_user_to_group(self, user, group_identifier, access_level):
+ group = find_group(self._gitlab, group_identifier)
+
+ if self._module.check_mode:
+ return True
+
+ if group is None:
+ return False
+
+ if self.member_exists(group, self.get_user_id(user)):
+ member = self.find_member(group, self.get_user_id(user))
+ if not self.member_as_good_access_level(group, member.id, self.ACCESS_LEVEL[access_level]):
+ member.access_level = self.ACCESS_LEVEL[access_level]
+ member.save()
+ return True
+ else:
+ try:
+ group.members.create({
+ 'user_id': self.get_user_id(user),
+ 'access_level': self.ACCESS_LEVEL[access_level]})
+ except gitlab.exceptions.GitlabCreateError as e:
+ self._module.fail_json(msg="Failed to assign user to group: %s" % to_native(e))
+ return True
+ return False
+
+ '''
+ @param user User object
+ @param arguments User attributes
+ '''
+ def update_user(self, user, arguments, uncheckable_args):
+ changed = False
+
+ for arg_key, arg_value in arguments.items():
+ av = arg_value['value']
+
+ if av is not None:
+ if arg_key == "identities":
+ changed = self.add_identities(user, av, uncheckable_args['overwrite_identities']['value'])
+
+ elif getattr(user, arg_key) != av:
+ setattr(user, arg_value.get('setter', arg_key), av)
+ changed = True
+
+ for arg_key, arg_value in uncheckable_args.items():
+ av = arg_value['value']
+
+ if av is not None:
+ setattr(user, arg_value.get('setter', arg_key), av)
+
+ return (changed, user)
+
+ '''
+ @param arguments User attributes
+ '''
+ def create_user(self, arguments):
+ if self._module.check_mode:
+ return True
+
+ identities = None
+ if 'identities' in arguments:
+ identities = arguments['identities']
+ del arguments['identities']
+
+ try:
+ user = self._gitlab.users.create(arguments)
+ if identities:
+ self.add_identities(user, identities)
+
+ except (gitlab.exceptions.GitlabCreateError) as e:
+ self._module.fail_json(msg="Failed to create user: %s " % to_native(e))
+
+ return user
+
+ '''
+ @param user User object
+ @param identites List of identities to be added/updated
+ @param overwrite_identities Overwrite user identities with identities passed to this module
+ '''
+ def add_identities(self, user, identities, overwrite_identities=False):
+ changed = False
+ if overwrite_identities:
+ changed = self.delete_identities(user, identities)
+
+ for identity in identities:
+ if identity not in user.identities:
+ setattr(user, 'provider', identity['provider'])
+ setattr(user, 'extern_uid', identity['extern_uid'])
+ if not self._module.check_mode:
+ user.save()
+ changed = True
+ return changed
+
+ '''
+ @param user User object
+ @param identites List of identities to be added/updated
+ '''
+ def delete_identities(self, user, identities):
+ changed = False
+ for identity in user.identities:
+ if identity not in identities:
+ if not self._module.check_mode:
+ user.identityproviders.delete(identity['provider'])
+ changed = True
+ return changed
+
+ '''
+ @param username Username of the user
+ '''
+ def find_user(self, username):
+ users = self._gitlab.users.list(search=username, all=True)
+ for user in users:
+ if (user.username == username):
+ return user
+
+ '''
+ @param username Username of the user
+ '''
+ def exists_user(self, username):
+ # When user exists, object will be stored in self.user_object.
+ user = self.find_user(username)
+ if user:
+ self.user_object = user
+ return True
+ return False
+
+ '''
+ @param username Username of the user
+ '''
+ def is_active(self, username):
+ user = self.find_user(username)
+ return user.attributes['state'] == 'active'
+
+ def delete_user(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.user_object
+
+ return user.delete()
+
+ def block_user(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.user_object
+
+ return user.block()
+
+ def unblock_user(self):
+ if self._module.check_mode:
+ return True
+
+ user = self.user_object
+
+ return user.unblock()
+
+
+def sanitize_arguments(arguments):
+ for key, value in list(arguments.items()):
+ if value is None:
+ del arguments[key]
+ return arguments
+
+
+def main():
+ argument_spec = basic_auth_argument_spec()
+ argument_spec.update(auth_argument_spec())
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ state=dict(type='str', default="present", choices=["absent", "present", "blocked", "unblocked"]),
+ username=dict(type='str', required=True),
+ password=dict(type='str', no_log=True),
+ reset_password=dict(type='bool', default=False, no_log=False),
+ email=dict(type='str'),
+ sshkey_name=dict(type='str'),
+ sshkey_file=dict(type='str', no_log=False),
+ sshkey_expires_at=dict(type='str', no_log=False),
+ group=dict(type='str'),
+ access_level=dict(type='str', default="guest", choices=["developer", "guest", "maintainer", "master", "owner", "reporter"]),
+ confirm=dict(type='bool', default=True),
+ isadmin=dict(type='bool', default=False),
+ external=dict(type='bool', default=False),
+ identities=dict(type='list', elements='dict'),
+ overwrite_identities=dict(type='bool', default=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['api_username', 'api_token'],
+ ['api_username', 'api_oauth_token'],
+ ['api_username', 'api_job_token'],
+ ['api_token', 'api_oauth_token'],
+ ['api_token', 'api_job_token'],
+ ],
+ required_together=[
+ ['api_username', 'api_password'],
+ ],
+ required_one_of=[
+ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token']
+ ],
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['name', 'email']),
+ )
+ )
+ ensure_gitlab_package(module)
+
+ user_name = module.params['name']
+ state = module.params['state']
+ user_username = module.params['username'].lower()
+ user_password = module.params['password']
+ user_reset_password = module.params['reset_password']
+ user_email = module.params['email']
+ user_sshkey_name = module.params['sshkey_name']
+ user_sshkey_file = module.params['sshkey_file']
+ user_sshkey_expires_at = module.params['sshkey_expires_at']
+ group_path = module.params['group']
+ access_level = module.params['access_level']
+ confirm = module.params['confirm']
+ user_isadmin = module.params['isadmin']
+ user_external = module.params['external']
+ user_identities = module.params['identities']
+ overwrite_identities = module.params['overwrite_identities']
+
+ gitlab_instance = gitlab_authentication(module)
+
+ gitlab_user = GitLabUser(module, gitlab_instance)
+ user_exists = gitlab_user.exists_user(user_username)
+ if user_exists:
+ user_is_active = gitlab_user.is_active(user_username)
+ else:
+ user_is_active = False
+
+ if state == 'absent':
+ if user_exists:
+ gitlab_user.delete_user()
+ module.exit_json(changed=True, msg="Successfully deleted user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User deleted or does not exists")
+
+ if state == 'blocked':
+ if user_exists and user_is_active:
+ gitlab_user.block_user()
+ module.exit_json(changed=True, msg="Successfully blocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User already blocked or does not exists")
+
+ if state == 'unblocked':
+ if user_exists and not user_is_active:
+ gitlab_user.unblock_user()
+ module.exit_json(changed=True, msg="Successfully unblocked user %s" % user_username)
+ else:
+ module.exit_json(changed=False, msg="User is not blocked or does not exists")
+
+ if state == 'present':
+ if gitlab_user.create_or_update_user(user_username, {
+ "name": user_name,
+ "password": user_password,
+ "reset_password": user_reset_password,
+ "email": user_email,
+ "sshkey_name": user_sshkey_name,
+ "sshkey_file": user_sshkey_file,
+ "sshkey_expires_at": user_sshkey_expires_at,
+ "group_path": group_path,
+ "access_level": access_level,
+ "confirm": confirm,
+ "isadmin": user_isadmin,
+ "external": user_external,
+ "identities": user_identities,
+ "overwrite_identities": overwrite_identities,
+ }):
+ module.exit_json(changed=True, msg="Successfully created or updated the user %s" % user_username, user=gitlab_user.user_object._attrs)
+ else:
+ module.exit_json(changed=False, msg="No need to update the user %s" % user_username, user=gitlab_user.user_object._attrs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/grove.py b/ansible_collections/community/general/plugins/modules/grove.py
new file mode 100644
index 000000000..b3e0508ff
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/grove.py
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: grove
+short_description: Sends a notification to a grove.io channel
+description:
+ - The C(grove) module sends a message for a service to a Grove.io
+ channel.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ channel_token:
+ type: str
+ description:
+ - Token of the channel to post to.
+ required: true
+ service:
+ type: str
+ description:
+ - Name of the service (displayed as the "user" in the message)
+ required: false
+ default: ansible
+ message_content:
+ type: str
+ description:
+ - Message content.
+ - The alias I(message) is deprecated and will be removed in community.general 4.0.0.
+ required: true
+ url:
+ type: str
+ description:
+ - Service URL for the web client
+ required: false
+ icon_url:
+ type: str
+ description:
+ - Icon for the service
+ required: false
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author: "Jonas Pfenniger (@zimbatm)"
+'''
+
+EXAMPLES = '''
+- name: Sends a notification to a grove.io channel
+ community.general.grove:
+ channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg
+ service: my-app
+ message: 'deployed {{ target }}'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+BASE_URL = 'https://grove.io/api/notice/%s/'
+
+# ==============================================================
+# do_notify_grove
+
+
+def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None):
+ my_url = BASE_URL % (channel_token,)
+
+ my_data = dict(service=service, message=message)
+ if url is not None:
+ my_data['url'] = url
+ if icon_url is not None:
+ my_data['icon_url'] = icon_url
+
+ data = urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
+
+# ==============================================================
+# main
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ channel_token=dict(type='str', required=True, no_log=True),
+ message_content=dict(type='str', required=True),
+ service=dict(type='str', default='ansible'),
+ url=dict(type='str', default=None),
+ icon_url=dict(type='str', default=None),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ channel_token = module.params['channel_token']
+ service = module.params['service']
+ message = module.params['message_content']
+ url = module.params['url']
+ icon_url = module.params['icon_url']
+
+ do_notify_grove(module, channel_token, service, message, url, icon_url)
+
+ # Mission complete
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/gunicorn.py b/ansible_collections/community/general/plugins/modules/gunicorn.py
new file mode 100644
index 000000000..2b2abcf8e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/gunicorn.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Alejandro Gomez <alexgomez2202@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: gunicorn
+short_description: Run gunicorn with various settings
+description:
+ - Starts gunicorn with the parameters specified. Common settings for gunicorn
+ configuration are supported. For additional configuration use a config file
+ See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more
+ options. It's recommended to always use the chdir option to avoid problems
+ with the location of the app.
+requirements: [gunicorn]
+author:
+ - "Alejandro Gomez (@agmezr)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ app:
+ type: str
+ required: true
+ aliases: ['name']
+ description:
+ - The app module. A name refers to a WSGI callable that should be found in the specified module.
+ venv:
+ type: path
+ aliases: ['virtualenv']
+ description:
+ - 'Path to the virtualenv directory.'
+ config:
+ type: path
+ description:
+ - 'Path to the gunicorn configuration file.'
+ aliases: ['conf']
+ chdir:
+ type: path
+ description:
+ - 'Chdir to specified directory before apps loading.'
+ pid:
+ type: path
+ description:
+ - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp
+ pid file will be created to check a successful run of gunicorn.'
+ worker:
+ type: str
+ choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']
+ description:
+ - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.'
+ user:
+ type: str
+ description:
+ - 'Switch worker processes to run as this user.'
+notes:
+ - If not specified on config file, a temporary error log will be created on /tmp dir.
+ Please make sure you have write access in /tmp dir. Not needed but will help you to
+ identify any problem with configuration.
+'''
+
+EXAMPLES = '''
+- name: Simple gunicorn run example
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+
+- name: Run gunicorn on a virtualenv
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ venv: '/workspace/example/venv'
+
+- name: Run gunicorn with a config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+
+- name: Run gunicorn as ansible user with specified pid and config file
+ community.general.gunicorn:
+ app: 'wsgi'
+ chdir: '/workspace/example'
+ conf: '/workspace/example/gunicorn.cfg'
+ venv: '/workspace/example/venv'
+ pid: '/workspace/example/gunicorn.pid'
+ user: 'ansible'
+'''
+
+RETURN = '''
+gunicorn:
+ description: process id of gunicorn
+ returned: changed
+ type: str
+ sample: "1234"
+'''
+
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def search_existing_config(config, option):
+ ''' search in config file for specified option '''
+ if config and os.path.isfile(config):
+ with open(config, 'r') as f:
+ for line in f:
+ if option in line:
+ return line
+ return None
+
+
+def remove_tmp_file(file_path):
+ ''' remove temporary files '''
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+
+
+def main():
+
+ # available gunicorn options on module
+ gunicorn_options = {
+ 'config': '-c',
+ 'chdir': '--chdir',
+ 'worker': '-k',
+ 'user': '-u',
+ }
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ app=dict(required=True, type='str', aliases=['name']),
+ venv=dict(type='path', aliases=['virtualenv']),
+ config=dict(type='path', aliases=['conf']),
+ chdir=dict(type='path'),
+ pid=dict(type='path'),
+ user=dict(type='str'),
+ worker=dict(type='str', choices=['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp']),
+ )
+ )
+
+ # temporary files in case no option provided
+ tmp_error_log = os.path.join(module.tmpdir, 'gunicorn.temp.error.log')
+ tmp_pid_file = os.path.join(module.tmpdir, 'gunicorn.temp.pid')
+
+ # remove temp file if exists
+ remove_tmp_file(tmp_pid_file)
+ remove_tmp_file(tmp_error_log)
+
+ # obtain app name and venv
+ params = module.params
+ app = params['app']
+ venv = params['venv']
+ pid = params['pid']
+
+ # use venv path if exists
+ if venv:
+ gunicorn_command = "/".join((venv, 'bin', 'gunicorn'))
+ else:
+ gunicorn_command = module.get_bin_path('gunicorn')
+
+ # to daemonize the process
+ options = ["-D"]
+
+ # fill options
+ for option in gunicorn_options:
+ param = params[option]
+ if param:
+ options.append(gunicorn_options[option])
+ options.append(param)
+
+ error_log = search_existing_config(params['config'], 'errorlog')
+ if not error_log:
+ # place error log somewhere in case of fail
+ options.append("--error-logfile")
+ options.append(tmp_error_log)
+
+ pid_file = search_existing_config(params['config'], 'pid')
+ if not params['pid'] and not pid_file:
+ pid = tmp_pid_file
+
+ # add option for pid file if not found on config file
+ if not pid_file:
+ options.append('--pid')
+ options.append(pid)
+
+ # put args together
+ args = [gunicorn_command] + options + [app]
+ rc, out, err = module.run_command(args, use_unsafe_shell=False, encoding=None)
+
+ if not err:
+ # wait for gunicorn to dump to log
+ time.sleep(0.5)
+ if os.path.isfile(pid):
+ with open(pid, 'r') as f:
+ result = f.readline().strip()
+
+ if not params['pid']:
+ os.remove(pid)
+
+ module.exit_json(changed=True, pid=result, debug=" ".join(args))
+ else:
+ # if user defined own error log, check that
+ if error_log:
+ error = 'Please check your {0}'.format(error_log.strip())
+ else:
+ if os.path.isfile(tmp_error_log):
+ with open(tmp_error_log, 'r') as f:
+ error = f.read()
+ # delete tmp log
+ os.remove(tmp_error_log)
+ else:
+ error = "Log not found"
+
+ module.fail_json(msg='Failed to start gunicorn. {0}'.format(error), error=err)
+
+ else:
+ module.fail_json(msg='Failed to start gunicorn {0}'.format(err), error=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hana_query.py b/ansible_collections/community/general/plugins/modules/hana_query.py
new file mode 100644
index 000000000..0b12e9935
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hana_query.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Rainer Leber <rainerleber@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: hana_query
+short_description: Execute SQL on HANA
+version_added: 3.2.0
+description: This module executes SQL statements on HANA with hdbsql.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ sid:
+ description: The system ID.
+ type: str
+ required: true
+ instance:
+ description: The instance number.
+ type: str
+ required: true
+ user:
+ description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM).
+ type: str
+ default: SYSTEM
+ userstore:
+ description: If C(true) the user must be in hdbuserstore.
+ type: bool
+ default: false
+ version_added: 3.5.0
+ password:
+ description:
+ - The password to connect to the database.
+ - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should
+ be used whenever possible, as command line arguments can be seen by other users
+ on the same machine."
+ type: str
+ autocommit:
+ description: Autocommit the statement.
+ type: bool
+ default: true
+ host:
+ description: The Host IP address. The port can be defined as well.
+ type: str
+ database:
+ description: Define the database on which to connect.
+ type: str
+ encrypted:
+ description: Use encrypted connection. Defaults to C(false).
+ type: bool
+ default: false
+ filepath:
+ description:
+ - One or more files each containing one SQL query to run.
+ - Must be a string or list containing strings.
+ type: list
+ elements: path
+ query:
+ description:
+ - SQL query to run.
+ - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list.
+ It is better to supply a one-element list instead to avoid mangled input.
+ type: list
+ elements: str
+author:
+ - Rainer Leber (@rainerleber)
+'''
+
+EXAMPLES = r'''
+- name: Simple select query
+ community.general.hana_query:
+ sid: "hdb"
+ instance: "01"
+ password: "Test123"
+ query: "select user_name from users"
+
+- name: Run several queries
+ community.general.hana_query:
+ sid: "hdb"
+ instance: "01"
+ password: "Test123"
+ query:
+ - "select user_name from users;"
+ - select * from SYSTEM;
+ host: "localhost"
+ autocommit: false
+
+- name: Run several queries from file
+ community.general.hana_query:
+ sid: "hdb"
+ instance: "01"
+ password: "Test123"
+ filepath:
+ - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt
+ - /tmp/HANA.txt
+ host: "localhost"
+
+- name: Run several queries from user store
+ community.general.hana_query:
+ sid: "hdb"
+ instance: "01"
+ user: hdbstoreuser
+ userstore: true
+ query:
+ - "select user_name from users;"
+ - select * from users;
+ autocommit: false
+'''
+
+RETURN = r'''
+query_result:
+ description: List containing results of all queries executed (one sublist for every query).
+ returned: on success
+ type: list
+ elements: list
+ sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]]
+'''
+
+import csv
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import StringIO
+from ansible.module_utils.common.text.converters import to_native
+
+
+def csv_to_list(rawcsv):
+ reader_raw = csv.DictReader(StringIO(rawcsv))
+ reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw]
+ return list(reader)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ sid=dict(type='str', required=True),
+ instance=dict(type='str', required=True),
+ encrypted=dict(type='bool', default=False),
+ host=dict(type='str', required=False),
+ user=dict(type='str', default="SYSTEM"),
+ userstore=dict(type='bool', default=False),
+ password=dict(type='str', no_log=True),
+ database=dict(type='str', required=False),
+ query=dict(type='list', elements='str', required=False),
+ filepath=dict(type='list', elements='path', required=False),
+ autocommit=dict(type='bool', default=True),
+ ),
+ required_one_of=[('query', 'filepath')],
+ required_if=[('userstore', False, ['password'])],
+ supports_check_mode=False,
+ )
+ rc, out, err, out_raw = [0, [], "", ""]
+
+ params = module.params
+
+ sid = (params['sid']).upper()
+ instance = params['instance']
+ user = params['user']
+ userstore = params['userstore']
+ password = params['password']
+ autocommit = params['autocommit']
+ host = params['host']
+ database = params['database']
+ encrypted = params['encrypted']
+
+ filepath = params['filepath']
+ query = params['query']
+
+ bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance)
+
+ try:
+ command = [module.get_bin_path(bin_path, required=True)]
+ except Exception as e:
+ module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e)))
+
+ if encrypted is True:
+ command.extend(['-attemptencrypt'])
+ if autocommit is False:
+ command.extend(['-z'])
+ if host is not None:
+ command.extend(['-n', host])
+ if database is not None:
+ command.extend(['-d', database])
+ # -x Suppresses additional output, such as the number of selected rows in a result set.
+ if userstore:
+ command.extend(['-x', '-U', user])
+ else:
+ command.extend(['-x', '-i', instance, '-u', user, '-p', password])
+
+ if filepath is not None:
+ command.extend(['-I'])
+ for p in filepath:
+ # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt,
+ # iterates through files and append the output to var out.
+ query_command = command + [p]
+ (rc, out_raw, err) = module.run_command(query_command)
+ out.append(csv_to_list(out_raw))
+ if query is not None:
+ for q in query:
+ # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users",
+ # iterates through multiple commands and append the output to var out.
+ query_command = command + [q]
+ (rc, out_raw, err) = module.run_command(query_command)
+ out.append(csv_to_list(out_raw))
+ changed = True
+
+ module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/haproxy.py b/ansible_collections/community/general/plugins/modules/haproxy.py
new file mode 100644
index 000000000..56f987d80
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/haproxy.py
@@ -0,0 +1,488 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Ravi Bhure <ravibhure@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: haproxy
+short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands
+author:
+ - Ravi Bhure (@ravibhure)
+description:
+ - Enable, disable, drain and set weights for HAProxy backend servers using socket commands.
+notes:
+ - Enable, disable and drain commands are restricted and can only be issued on
+ sockets configured for level 'admin'. For example, you can add the line
+ 'stats socket /var/run/haproxy.sock level admin' to the general section of
+ haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt).
+ - Depends on netcat (C(nc)) being available; you need to install the appropriate
+ package for your operating system before this module can be used.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ backend:
+ description:
+ - Name of the HAProxy backend pool.
+ - If this parameter is unset, it will be auto-detected.
+ type: str
+ drain:
+ description:
+ - Wait until the server has no active connections or until the timeout
+ determined by wait_interval and wait_retries is reached.
+ - Continue only after the status changes to C(MAINT).
+ - This overrides the shutdown_sessions option.
+ type: bool
+ default: false
+ host:
+ description:
+ - Name of the backend host to change.
+ type: str
+ required: true
+ shutdown_sessions:
+ description:
+ - When disabling a server, immediately terminate all the sessions attached
+ to the specified server.
+ - This can be used to terminate long-running sessions after a server is put
+ into maintenance mode. Overridden by the drain option.
+ type: bool
+ default: false
+ socket:
+ description:
+ - Path to the HAProxy socket file.
+ type: path
+ default: /var/run/haproxy.sock
+ state:
+ description:
+ - Desired state of the provided backend host.
+ - Note that C(drain) state was added in version 2.4.
+ - It is supported only by HAProxy version 1.5 or later,
+ - When used on versions < 1.5, it will be ignored.
+ type: str
+ required: true
+ choices: [ disabled, drain, enabled ]
+ agent:
+ description:
+ - Disable/enable agent checks (depending on I(state) value).
+ type: bool
+ default: false
+ version_added: 1.0.0
+ health:
+ description:
+ - Disable/enable health checks (depending on I(state) value).
+ type: bool
+ default: false
+ version_added: "1.0.0"
+ fail_on_not_found:
+ description:
+ - Fail whenever trying to enable/disable a backend host that does not exist.
+ type: bool
+ default: false
+ wait:
+ description:
+ - Wait until the server reports a status of C(UP) when I(state=enabled),
+ status of C(MAINT) when I(state=disabled) or status of C(DRAIN) when I(state=drain).
+ type: bool
+ default: false
+ wait_interval:
+ description:
+ - Number of seconds to wait between retries.
+ type: int
+ default: 5
+ wait_retries:
+ description:
+ - Number of times to check for status after changing the state.
+ type: int
+ default: 25
+ weight:
+ description:
+ - The value passed in argument.
+ - If the value ends with the C(%) sign, then the new weight will be
+ relative to the initially configured weight.
+ - Relative weights are only permitted between 0 and 100% and absolute
+ weights are permitted between 0 and 256.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Disable server in 'www' backend pool
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Disable server in 'www' backend pool, also stop health/agent checks
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ health: true
+ agent: true
+
+- name: Disable server without backend pool name (apply to all available backend pool)
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+
+- name: Disable server, provide socket file
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+
+- name: Disable server, provide socket file, wait until status reports in maintenance
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: true
+
+# Place server in drain mode, providing a socket file. Then check the server's
+# status every minute to see if it changes to maintenance mode, continuing if it
+# does in an hour and failing otherwise.
+- community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+ wait: true
+ drain: true
+ wait_interval: 60
+ wait_retries: 60
+
+- name: Disable backend server in 'www' backend pool and drop open sessions to it
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ socket: /var/run/haproxy.sock
+ shutdown_sessions: true
+
+- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found
+ community.general.haproxy:
+ state: disabled
+ host: '{{ inventory_hostname }}'
+ fail_on_not_found: true
+
+- name: Enable server in 'www' backend pool
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+
+- name: Enable server in 'www' backend pool wait until healthy
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: true
+
+- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ backend: www
+ wait: true
+ wait_retries: 10
+ wait_interval: 5
+
+- name: Enable server in 'www' backend pool with change server(s) weight
+ community.general.haproxy:
+ state: enabled
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ weight: 10
+ backend: www
+
+- name: Set the server in 'www' backend pool to drain mode
+ community.general.haproxy:
+ state: drain
+ host: '{{ inventory_hostname }}'
+ socket: /var/run/haproxy.sock
+ backend: www
+'''
+
+import csv
+import socket
+import time
+from string import Template
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+
+
+DEFAULT_SOCKET_LOCATION = "/var/run/haproxy.sock"
+RECV_SIZE = 1024
+ACTION_CHOICES = ['enabled', 'disabled', 'drain']
+WAIT_RETRIES = 25
+WAIT_INTERVAL = 5
+
+
+######################################################################
+class TimeoutException(Exception):
+ pass
+
+
+class HAProxy(object):
+ """
+ Used for communicating with HAProxy through its local UNIX socket interface.
+ Perform common tasks in Haproxy related to enable server and
+ disable server.
+
+ The complete set of external commands Haproxy handles is documented
+ on their website:
+
+ http://haproxy.1wt.eu/download/1.5/doc/configuration.txt#Unix Socket commands
+ """
+
+ def __init__(self, module):
+ self.module = module
+
+ self.state = self.module.params['state']
+ self.host = self.module.params['host']
+ self.backend = self.module.params['backend']
+ self.weight = self.module.params['weight']
+ self.socket = self.module.params['socket']
+ self.shutdown_sessions = self.module.params['shutdown_sessions']
+ self.fail_on_not_found = self.module.params['fail_on_not_found']
+ self.agent = self.module.params['agent']
+ self.health = self.module.params['health']
+ self.wait = self.module.params['wait']
+ self.wait_retries = self.module.params['wait_retries']
+ self.wait_interval = self.module.params['wait_interval']
+ self._drain = self.module.params['drain']
+ self.command_results = {}
+
+ def execute(self, cmd, timeout=200, capture_output=True):
+ """
+ Executes a HAProxy command by sending a message to a HAProxy's local
+ UNIX socket and waiting up to 'timeout' milliseconds for the response.
+ """
+ self.client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.client.connect(self.socket)
+ self.client.sendall(to_bytes('%s\n' % cmd))
+
+ result = b''
+ buf = b''
+ buf = self.client.recv(RECV_SIZE)
+ while buf:
+ result += buf
+ buf = self.client.recv(RECV_SIZE)
+ result = to_text(result, errors='surrogate_or_strict')
+
+ if capture_output:
+ self.capture_command_output(cmd, result.strip())
+ self.client.close()
+ return result
+
+ def capture_command_output(self, cmd, output):
+ """
+ Capture the output for a command
+ """
+ if 'command' not in self.command_results:
+ self.command_results['command'] = []
+ self.command_results['command'].append(cmd)
+ if 'output' not in self.command_results:
+ self.command_results['output'] = []
+ self.command_results['output'].append(output)
+
+ def discover_all_backends(self):
+ """
+ Discover all entries with svname = 'BACKEND' and return a list of their corresponding
+ pxnames
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r)))
+
+ def discover_version(self):
+ """
+ Attempt to extract the haproxy version.
+ Return a tuple containing major and minor version.
+ """
+ data = self.execute('show info', 200, False)
+ lines = data.splitlines()
+ line = [x for x in lines if 'Version:' in x]
+ try:
+ version_values = line[0].partition(':')[2].strip().split('.', 3)
+ version = (int(version_values[0]), int(version_values[1]))
+ except (ValueError, TypeError, IndexError):
+ version = None
+
+ return version
+
+ def execute_for_backends(self, cmd, pxname, svname, wait_for_status=None):
+ """
+ Run some command on the specified backends. If no backends are provided they will
+ be discovered automatically (all backends)
+ """
+ # Discover backends if none are given
+ if pxname is None:
+ backends = self.discover_all_backends()
+ else:
+ backends = [pxname]
+
+ # Run the command for each requested backend
+ for backend in backends:
+ # Fail when backends were not found
+ state = self.get_state_for(backend, svname)
+ if (self.fail_on_not_found) and state is None:
+ self.module.fail_json(
+ msg="The specified backend '%s/%s' was not found!" % (backend, svname))
+
+ if state is not None:
+ self.execute(Template(cmd).substitute(pxname=backend, svname=svname))
+ if self.wait:
+ self.wait_until_status(backend, svname, wait_for_status)
+
+ def get_state_for(self, pxname, svname):
+ """
+ Find the state of specific services. When pxname is not set, get all backends for a specific host.
+ Returns a list of dictionaries containing the status and weight for those services.
+ """
+ data = self.execute('show stat', 200, False).lstrip('# ')
+ r = csv.DictReader(data.splitlines())
+ state = tuple(
+ map(
+ lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']},
+ filter(lambda d: (pxname is None or d['pxname']
+ == pxname) and d['svname'] == svname, r)
+ )
+ )
+ return state or None
+
+ def wait_until_status(self, pxname, svname, status):
+ """
+ Wait for a service to reach the specified status. Try RETRIES times
+ with INTERVAL seconds of sleep in between. If the service has not reached
+ the expected status in that time, the module will fail. If the service was
+ not found, the module will fail.
+ """
+ for i in range(1, self.wait_retries):
+ state = self.get_state_for(pxname, svname)
+
+ # We can assume there will only be 1 element in state because both svname and pxname are always set when we get here
+ # When using track we get a status like this: MAINT (via pxname/svname) so we need to do substring matching
+ if status in state[0]['status']:
+ if not self._drain or state[0]['scur'] == '0':
+ return True
+ time.sleep(self.wait_interval)
+
+ self.module.fail_json(msg="server %s/%s not status '%s' after %d retries. Aborting." %
+ (pxname, svname, status, self.wait_retries))
+
+ def enabled(self, host, backend, weight):
+ """
+ Enabled action, marks server to UP and checks are re-enabled,
+ also supports to get current weight for server (default) and
+ set the weight for haproxy backend server when provides.
+ """
+ cmd = "get weight $pxname/$svname; enable server $pxname/$svname"
+ if self.agent:
+ cmd += "; enable agent $pxname/$svname"
+ if self.health:
+ cmd += "; enable health $pxname/$svname"
+ if weight:
+ cmd += "; set weight $pxname/$svname %s" % weight
+ self.execute_for_backends(cmd, backend, host, 'UP')
+
+ def disabled(self, host, backend, shutdown_sessions):
+ """
+ Disabled action, marks server to DOWN for maintenance. In this mode, no more checks will be
+ performed on the server until it leaves maintenance,
+ also it shutdown sessions while disabling backend host server.
+ """
+ cmd = "get weight $pxname/$svname"
+ if self.agent:
+ cmd += "; disable agent $pxname/$svname"
+ if self.health:
+ cmd += "; disable health $pxname/$svname"
+ cmd += "; disable server $pxname/$svname"
+ if shutdown_sessions:
+ cmd += "; shutdown sessions server $pxname/$svname"
+ self.execute_for_backends(cmd, backend, host, 'MAINT')
+
+ def drain(self, host, backend, status='DRAIN'):
+ """
+ Drain action, sets the server to DRAIN mode.
+ In this mode, the server will not accept any new connections
+ other than those that are accepted via persistence.
+ """
+ haproxy_version = self.discover_version()
+
+ # check if haproxy version supports DRAIN state (starting with 1.5)
+ if haproxy_version and (1, 5) <= haproxy_version:
+ cmd = "set server $pxname/$svname state drain"
+ self.execute_for_backends(cmd, backend, host, "DRAIN")
+ if status == "MAINT":
+ self.disabled(host, backend, self.shutdown_sessions)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do it.
+ """
+ # Get the state before the run
+ self.command_results['state_before'] = self.get_state_for(self.backend, self.host)
+
+ # toggle enable/disable server
+ if self.state == 'enabled':
+ self.enabled(self.host, self.backend, self.weight)
+ elif self.state == 'disabled' and self._drain:
+ self.drain(self.host, self.backend, status='MAINT')
+ elif self.state == 'disabled':
+ self.disabled(self.host, self.backend, self.shutdown_sessions)
+ elif self.state == 'drain':
+ self.drain(self.host, self.backend)
+ else:
+ self.module.fail_json(msg="unknown state specified: '%s'" % self.state)
+
+ # Get the state after the run
+ self.command_results['state_after'] = self.get_state_for(self.backend, self.host)
+
+ # Report change status
+ self.command_results['changed'] = (self.command_results['state_before'] != self.command_results['state_after'])
+
+ self.module.exit_json(**self.command_results)
+
+
+def main():
+
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=ACTION_CHOICES),
+ host=dict(type='str', required=True),
+ backend=dict(type='str'),
+ weight=dict(type='str'),
+ socket=dict(type='path', default=DEFAULT_SOCKET_LOCATION),
+ shutdown_sessions=dict(type='bool', default=False),
+ fail_on_not_found=dict(type='bool', default=False),
+ health=dict(type='bool', default=False),
+ agent=dict(type='bool', default=False),
+ wait=dict(type='bool', default=False),
+ wait_retries=dict(type='int', default=WAIT_RETRIES),
+ wait_interval=dict(type='int', default=WAIT_INTERVAL),
+ drain=dict(type='bool', default=False),
+ ),
+ )
+
+ if not socket:
+ module.fail_json(msg="unable to locate haproxy socket")
+
+ ansible_haproxy = HAProxy(module)
+ ansible_haproxy.act()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/heroku_collaborator.py b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
new file mode 100644
index 000000000..e7b0de3f9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/heroku_collaborator.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: heroku_collaborator
+short_description: Add or delete app collaborators on Heroku
+description:
+ - Manages collaborators for Heroku apps.
+ - If set to C(present) and heroku user is already collaborator, then do nothing.
+ - If set to C(present) and heroku user is not collaborator, then add user to app.
+ - If set to C(absent) and heroku user is collaborator, then delete user from app.
+author:
+ - Marcel Arns (@marns93)
+requirements:
+ - heroku3
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_key:
+ type: str
+ description:
+ - Heroku API key
+ apps:
+ type: list
+ elements: str
+ description:
+ - List of Heroku App names
+ required: true
+ suppress_invitation:
+ description:
+ - Suppress email invitation when creating collaborator
+ type: bool
+ default: false
+ user:
+ type: str
+ description:
+ - User ID or e-mail
+ required: true
+ state:
+ type: str
+ description:
+ - Create or remove the heroku collaborator
+ choices: ["present", "absent"]
+ default: "present"
+notes:
+ - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key).
+ - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
+'''
+
+EXAMPLES = '''
+- name: Create a heroku collaborator
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: max.mustermann@example.com
+ apps: heroku-example-app
+ state: present
+
+- name: An example of using the module in loop
+ community.general.heroku_collaborator:
+ api_key: YOUR_API_KEY
+ user: '{{ item.user }}'
+ apps: '{{ item.apps | default(apps) }}'
+ suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
+ state: '{{ item.state | default("present") }}'
+ with_items:
+ - { user: 'a.b@example.com' }
+ - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
+ - { user: 'x.y@example.com', apps: ["heroku-example-app"] }
+'''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper
+
+
+def add_or_delete_heroku_collaborator(module, client):
+ user = module.params['user']
+ state = module.params['state']
+ affected_apps = []
+ result_state = False
+
+ for app in module.params['apps']:
+ if app not in client.apps():
+ module.fail_json(msg='App {0} does not exist'.format(app))
+
+ heroku_app = client.apps()[app]
+
+ heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
+
+ if state == 'absent' and user in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.remove_collaborator(user)
+ affected_apps += [app]
+ result_state = True
+ elif state == 'present' and user not in heroku_collaborator_list:
+ if not module.check_mode:
+ heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
+ affected_apps += [app]
+ result_state = True
+
+ return result_state, affected_apps
+
+
+def main():
+ argument_spec = HerokuHelper.heroku_argument_spec()
+ argument_spec.update(
+ user=dict(required=True, type='str'),
+ apps=dict(required=True, type='list', elements='str'),
+ suppress_invitation=dict(default=False, type='bool'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HerokuHelper(module).get_heroku_client()
+
+ has_changed, msg = add_or_delete_heroku_collaborator(module, client)
+ module.exit_json(changed=has_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hg.py b/ansible_collections/community/general/plugins/modules/hg.py
new file mode 100644
index 000000000..dbbd504b4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hg.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Yeukhon Wong <yeukhon@acm.org>
+# Copyright (c) 2014, Nate Coraor <nate@bx.psu.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: hg
+short_description: Manages Mercurial (hg) repositories
+description:
+ - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address.
+author: "Yeukhon Wong (@yeukhon)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ repo:
+ description:
+ - The repository address.
+ required: true
+ aliases: [ name ]
+ type: str
+ dest:
+ description:
+ - Absolute path of where the repository should be cloned to.
+ This parameter is required, unless clone and update are set to no
+ type: path
+ revision:
+ description:
+ - Equivalent C(-r) option in hg command which could be the changeset, revision number,
+ branch name or even tag.
+ aliases: [ version ]
+ type: str
+ force:
+ description:
+ - Discards uncommitted changes. Runs C(hg update -C). Prior to
+ 1.9, the default was C(true).
+ type: bool
+ default: false
+ purge:
+ description:
+ - Deletes untracked files. Runs C(hg purge).
+ type: bool
+ default: false
+ update:
+ description:
+ - If C(false), do not retrieve new revisions from the origin repository
+ type: bool
+ default: true
+ clone:
+ description:
+ - If C(false), do not clone the repository if it does not exist locally.
+ type: bool
+ default: true
+ executable:
+ description:
+ - Path to hg executable to use. If not supplied,
+ the normal mechanism for resolving binary paths will be used.
+ type: str
+notes:
+ - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156).
+ - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
+ SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
+ one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
+ the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
+ - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such,
+ if the underlying system still uses a Python version below 2.7.9, you will have issues checking out
+ bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01).
+'''
+
+EXAMPLES = '''
+- name: Ensure the current working copy is inside the stable branch and deletes untracked files if any.
+ community.general.hg:
+ repo: https://bitbucket.org/user/repo1
+ dest: /home/user/repo1
+ revision: stable
+ purge: true
+
+- name: Get information about the repository whether or not it has already been cloned locally.
+ community.general.hg:
+ repo: git://bitbucket.org/user/repo
+ dest: /srv/checkout
+ clone: false
+ update: false
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class Hg(object):
+ def __init__(self, module, dest, repo, revision, hg_path):
+ self.module = module
+ self.dest = dest
+ self.repo = repo
+ self.revision = revision
+ self.hg_path = hg_path
+
+ def _command(self, args_list):
+ (rc, out, err) = self.module.run_command([self.hg_path] + args_list)
+ return (rc, out, err)
+
+ def _list_untracked(self):
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print']
+ return self._command(args)
+
+ def get_revision(self):
+ """
+ hg id -b -i -t returns a string in the format:
+ "<changeset>[+] <branch_name> <tag>"
+ This format lists the state of the current working copy,
+ and indicates whether there are uncommitted changes by the
+ plus sign. Otherwise, the sign is omitted.
+
+ Read the full description via hg id --help
+ """
+ (rc, out, err) = self._command(['id', '-b', '-i', '-t', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def get_remote_revision(self):
+ (rc, out, err) = self._command(['id', self.repo])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ else:
+ return to_native(out).strip('\n')
+
+ def has_local_mods(self):
+ now = self.get_revision()
+ if '+' in now:
+ return True
+ else:
+ return False
+
+ def discard(self):
+ before = self.has_local_mods()
+ if not before:
+ return False
+
+ args = ['update', '-C', '-R', self.dest, '-r', '.']
+ (rc, out, err) = self._command(args)
+ if rc != 0:
+ self.module.fail_json(msg=err)
+
+ after = self.has_local_mods()
+ if before != after and not after: # no more local modification
+ return True
+
+ def purge(self):
+ # before purge, find out if there are any untracked files
+ (rc1, out1, err1) = self._list_untracked()
+ if rc1 != 0:
+ self.module.fail_json(msg=err1)
+
+ # there are some untrackd files
+ if out1 != '':
+ args = ['purge', '--config', 'extensions.purge=', '-R', self.dest]
+ (rc2, out2, err2) = self._command(args)
+ if rc2 != 0:
+ self.module.fail_json(msg=err2)
+ return True
+ else:
+ return False
+
+ def cleanup(self, force, purge):
+ discarded = False
+ purged = False
+
+ if force:
+ discarded = self.discard()
+ if purge:
+ purged = self.purge()
+ if discarded or purged:
+ return True
+ else:
+ return False
+
+ def pull(self):
+ return self._command(
+ ['pull', '-R', self.dest, self.repo])
+
+ def update(self):
+ if self.revision is not None:
+ return self._command(['update', '-r', self.revision, '-R', self.dest])
+ return self._command(['update', '-R', self.dest])
+
+ def clone(self):
+ if self.revision is not None:
+ return self._command(['clone', self.repo, self.dest, '-r', self.revision])
+ return self._command(['clone', self.repo, self.dest])
+
+ @property
+ def at_revision(self):
+ """
+ There is no point in pulling from a potentially down/slow remote site
+ if the desired changeset is already the current changeset.
+ """
+ if self.revision is None or len(self.revision) < 7:
+ # Assume it's a rev number, tag, or branch
+ return False
+ (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
+ if rc != 0:
+ self.module.fail_json(msg=err)
+ if out.startswith(self.revision):
+ return True
+ return False
+
+
+# ===========================================
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ repo=dict(type='str', required=True, aliases=['name']),
+ dest=dict(type='path'),
+ revision=dict(type='str', default=None, aliases=['version']),
+ force=dict(type='bool', default=False),
+ purge=dict(type='bool', default=False),
+ update=dict(type='bool', default=True),
+ clone=dict(type='bool', default=True),
+ executable=dict(type='str', default=None),
+ ),
+ )
+ repo = module.params['repo']
+ dest = module.params['dest']
+ revision = module.params['revision']
+ force = module.params['force']
+ purge = module.params['purge']
+ update = module.params['update']
+ clone = module.params['clone']
+ hg_path = module.params['executable'] or module.get_bin_path('hg', True)
+ if dest is not None:
+ hgrc = os.path.join(dest, '.hg/hgrc')
+
+ # initial states
+ before = ''
+ changed = False
+ cleaned = False
+
+ if not dest and (clone or update):
+ module.fail_json(msg="the destination directory must be specified unless clone=false and update=false")
+
+ hg = Hg(module, dest, repo, revision, hg_path)
+
+ # If there is no hgrc file, then assume repo is absent
+ # and perform clone. Otherwise, perform pull and update.
+ if not clone and not update:
+ out = hg.get_remote_revision()
+ module.exit_json(after=out, changed=False)
+ if not os.path.exists(hgrc):
+ if clone:
+ (rc, out, err) = hg.clone()
+ if rc != 0:
+ module.fail_json(msg=err)
+ else:
+ module.exit_json(changed=False)
+ elif not update:
+ # Just return having found a repo already in the dest path
+ before = hg.get_revision()
+ elif hg.at_revision:
+ # no update needed, don't pull
+ before = hg.get_revision()
+
+ # but force and purge if desired
+ cleaned = hg.cleanup(force, purge)
+ else:
+ # get the current state before doing pulling
+ before = hg.get_revision()
+
+ # can perform force and purge
+ cleaned = hg.cleanup(force, purge)
+
+ (rc, out, err) = hg.pull()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ (rc, out, err) = hg.update()
+ if rc != 0:
+ module.fail_json(msg=err)
+
+ after = hg.get_revision()
+ if before != after or cleaned:
+ changed = True
+
+ module.exit_json(before=before, after=after, changed=changed, cleaned=cleaned)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hipchat.py b/ansible_collections/community/general/plugins/modules/hipchat.py
new file mode 100644
index 000000000..11b5fb735
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hipchat.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hipchat
+short_description: Send a message to Hipchat
+description:
+ - Send a message to a Hipchat room, with options to control the formatting.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ room:
+ type: str
+ description:
+ - ID or name of the room.
+ required: true
+ msg_from:
+ type: str
+ description:
+ - Name the message will appear to be sent from. Max length is 15
+ characters - above this it will be truncated.
+ default: Ansible
+ aliases: [from]
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ color:
+ type: str
+ description:
+ - Background color for the message.
+ default: yellow
+ choices: [ "yellow", "red", "green", "purple", "gray", "random" ]
+ msg_format:
+ type: str
+ description:
+ - Message format.
+ default: text
+ choices: [ "text", "html" ]
+ notify:
+ description:
+ - If true, a notification will be triggered for users in the room.
+ type: bool
+ default: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ api:
+ type: str
+ description:
+ - API url if using a self-hosted hipchat server. For Hipchat API version
+ 2 use the default URI with C(/v2) instead of C(/v1).
+ default: 'https://api.hipchat.com/v1'
+
+author:
+- Shirou Wakayama (@shirou)
+- Paul Bourdel (@pb8226)
+'''
+
+EXAMPLES = '''
+- name: Send a message to a Hipchat room
+ community.general.hipchat:
+ room: notif
+ msg: Ansible task finished
+
+- name: Send a message to a Hipchat room using Hipchat API version 2
+ community.general.hipchat:
+ api: https://api.hipchat.com/v2/
+ token: OAUTH2_TOKEN
+ room: notify
+ msg: Ansible task finished
+'''
+
+# ===========================================
+# HipChat module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+DEFAULT_URI = "https://api.hipchat.com/v1"
+
+MSG_URI_V1 = "/rooms/message"
+
+NOTIFY_URI_V2 = "/room/{id_or_name}/notification"
+
+
+def send_msg_v1(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI_V1):
+ '''sending message to hipchat v1 server'''
+
+ params = {}
+ params['room_id'] = room
+ params['from'] = msg_from[:15] # max length is 15
+ params['message'] = msg
+ params['message_format'] = msg_format
+ params['color'] = color
+ params['api'] = api
+ params['notify'] = int(notify)
+
+ url = api + MSG_URI_V1 + "?auth_token=%s" % (token)
+ data = urlencode(params)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def send_msg_v2(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=NOTIFY_URI_V2):
+ '''sending message to hipchat v2 server'''
+
+ headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'}
+
+ body = dict()
+ body['message'] = msg
+ body['color'] = color
+ body['message_format'] = msg_format
+ body['notify'] = notify
+
+ POST_URL = api + NOTIFY_URI_V2
+
+ url = POST_URL.replace('{id_or_name}', pathname2url(room))
+ data = json.dumps(body)
+
+ if module.check_mode:
+ # In check mode, exit before actually sending the message
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+
+ # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows
+ # 204 to be the expected result code.
+ if info['status'] in [200, 204]:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ room=dict(required=True),
+ msg=dict(required=True),
+ msg_from=dict(default="Ansible", aliases=['from']),
+ color=dict(default="yellow", choices=["yellow", "red", "green",
+ "purple", "gray", "random"]),
+ msg_format=dict(default="text", choices=["text", "html"]),
+ notify=dict(default=True, type='bool'),
+ validate_certs=dict(default=True, type='bool'),
+ api=dict(default=DEFAULT_URI),
+ ),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ room = str(module.params["room"])
+ msg = module.params["msg"]
+ msg_from = module.params["msg_from"]
+ color = module.params["color"]
+ msg_format = module.params["msg_format"]
+ notify = module.params["notify"]
+ api = module.params["api"]
+
+ try:
+ if api.find('/v2') != -1:
+ send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ else:
+ send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api)
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/homebrew.py b/ansible_collections/community/general/plugins/modules/homebrew.py
new file mode 100644
index 000000000..7592f95a4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/homebrew.py
@@ -0,0 +1,981 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Andrew Dunham <andrew@du.nham.ca>
+# Copyright (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on macports (Jimmy Tang <jcftang@gmail.com>)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Andrew Dunham (@andrew-d)"
+requirements:
+ - homebrew must already be installed on the target system
+short_description: Package manager for Homebrew
+description:
+ - Manages Homebrew packages
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - A list of names of packages to install/remove.
+ aliases: [ 'formula', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "A C(:) separated list of paths to search for C(brew) executable.
+ Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command,
+ providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin'
+ type: path
+ state:
+ description:
+ - state of the package.
+ choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ]
+ default: present
+ type: str
+ update_homebrew:
+ description:
+ - update homebrew itself first.
+ type: bool
+ default: false
+ upgrade_all:
+ description:
+ - upgrade all homebrew packages.
+ type: bool
+ default: false
+ aliases: ['upgrade']
+ install_options:
+ description:
+ - options flags to install a package.
+ aliases: ['options']
+ type: list
+ elements: str
+ upgrade_options:
+ description:
+ - Option flags to upgrade.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+notes:
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+'''
+
+EXAMPLES = '''
+# Install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+
+# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
+- community.general.homebrew:
+ name: foo
+ path: /my/other/location/bin
+ state: present
+
+# Update homebrew first and install formula foo with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: present
+ update_homebrew: true
+
+# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
+- community.general.homebrew:
+ name: foo
+ state: latest
+ update_homebrew: true
+
+# Update homebrew and upgrade all packages
+- community.general.homebrew:
+ update_homebrew: true
+ upgrade_all: true
+
+# Miscellaneous other examples
+- community.general.homebrew:
+ name: foo
+ state: head
+
+- community.general.homebrew:
+ name: foo
+ state: linked
+
+- community.general.homebrew:
+ name: foo
+ state: absent
+
+- community.general.homebrew:
+ name: foo,bar
+ state: absent
+
+- community.general.homebrew:
+ name: foo
+ state: present
+ install_options: with-baz,enable-debug
+
+- name: Install formula foo with 'brew' from cask
+ community.general.homebrew:
+ name: homebrew/cask/foo
+ state: present
+
+- name: Use ignore-pinned option while upgrading all
+ community.general.homebrew:
+ upgrade_all: true
+ upgrade_options: ignore-pinned
+'''
+
+RETURN = '''
+msg:
+ description: if the cache was updated or not
+ returned: always
+ type: str
+ sample: "Changed: 0, Unchanged: 2"
+unchanged_pkgs:
+ description:
+ - List of package names which are unchanged after module run
+ returned: success
+ type: list
+ sample: ["awscli", "ag"]
+ version_added: '0.2.0'
+changed_pkgs:
+ description:
+ - List of package names which are changed after module run
+ returned: success
+ type: list
+ sample: ['git', 'git-cola']
+ version_added: '0.2.0'
+'''
+
+import os.path
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \+ # plusses
+ \- # dashes
+ : # colons (for URLs)
+ @ # at-sign
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, string_types):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
+
+ if package is None:
+ return True
+
+ return (
+ isinstance(package, string_types)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path, packages=None, state=None,
+ update_homebrew=False, upgrade_all=False,
+ install_options=None, upgrade_options=None):
+ if not install_options:
+ install_options = list()
+ if not upgrade_options:
+ upgrade_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all,
+ install_options=install_options,
+ upgrade_options=upgrade_options,)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.changed_pkgs = []
+ self.unchanged_pkgs = []
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
+
+ return False
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ self.current_package,
+ ])
+
+ return rc != 0
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.upgrade_all:
+ self._upgrade_all()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew would be updated.'
+ raise HomebrewException(self.message)
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Homebrew packages would be upgraded.'
+ raise HomebrewException(self.message)
+ cmd = [self.brew_path, 'upgrade'] + self.upgrade_options
+
+ rc, out, err = self.module.run_command(cmd)
+ if rc == 0:
+ if not out:
+ self.message = 'Homebrew packages already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.unchanged_pkgs.append(self.current_package)
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall', '--force']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ self.changed_count += 1
+ self.changed_pkgs.append(self.current_package)
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
+
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
+
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "formula"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default=False,
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ upgrade_options=dict(
+ default=None,
+ type='list',
+ elements='str',
+ )
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ packages = p['name']
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head', ):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ if not update_homebrew:
+ module.run_command_environ_update.update(
+ dict(HOMEBREW_NO_AUTO_UPDATE="True")
+ )
+ upgrade_all = p['upgrade_all']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ p['upgrade_options'] = p['upgrade_options'] or []
+ upgrade_options = ['--{0}'.format(upgrade_option)
+ for upgrade_option in p['upgrade_options']]
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ upgrade_all=upgrade_all, install_options=install_options,
+ upgrade_options=upgrade_options)
+ (failed, changed, message) = brew.run()
+ changed_pkgs = brew.changed_pkgs
+ unchanged_pkgs = brew.unchanged_pkgs
+
+ if failed:
+ module.fail_json(msg=message)
+ module.exit_json(
+ changed=changed,
+ msg=message,
+ unchanged_pkgs=unchanged_pkgs,
+ changed_pkgs=changed_pkgs
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/homebrew_cask.py b/ansible_collections/community/general/plugins/modules/homebrew_cask.py
new file mode 100644
index 000000000..c992693b6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/homebrew_cask.py
@@ -0,0 +1,895 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+ - "Enric Lluelles (@enriclluelles)"
+short_description: Install and uninstall homebrew casks
+description:
+ - Manages Homebrew casks.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of cask to install or remove.
+ aliases: [ 'cask', 'package', 'pkg' ]
+ type: list
+ elements: str
+ path:
+ description:
+ - "':' separated list of paths to search for 'brew' executable."
+ default: '/usr/local/bin:/opt/homebrew/bin'
+ type: path
+ state:
+ description:
+ - State of the cask.
+ choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ]
+ default: present
+ type: str
+ sudo_password:
+ description:
+ - The sudo password to be passed to SUDO_ASKPASS.
+ required: false
+ type: str
+ update_homebrew:
+ description:
+ - Update homebrew itself first.
+ - Note that C(brew cask update) is a synonym for C(brew update).
+ type: bool
+ default: false
+ install_options:
+ description:
+ - Options flags to install a package.
+ aliases: [ 'options' ]
+ type: list
+ elements: str
+ accept_external_apps:
+ description:
+ - Allow external apps.
+ type: bool
+ default: false
+ upgrade_all:
+ description:
+ - Upgrade all casks.
+ - Mutually exclusive with C(upgraded) state.
+ type: bool
+ default: false
+ aliases: [ 'upgrade' ]
+ greedy:
+ description:
+ - Upgrade casks that auto update.
+ - Passes C(--greedy) to C(brew outdated --cask) when checking
+ if an installed cask has a newer version available,
+ or to C(brew upgrade --cask) when upgrading all casks.
+ type: bool
+ default: false
+'''
+EXAMPLES = '''
+- name: Install cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+
+- name: Remove cask
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'appdir=/Applications'
+
+- name: Install cask with install options
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: 'debug,appdir=/Applications'
+
+- name: Install cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ install_options: force
+
+- name: Allow external app
+ community.general.homebrew_cask:
+ name: alfred
+ state: present
+ accept_external_apps: true
+
+- name: Remove cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: absent
+ install_options: force
+
+- name: Upgrade all casks
+ community.general.homebrew_cask:
+ upgrade_all: true
+
+- name: Upgrade all casks with greedy option
+ community.general.homebrew_cask:
+ upgrade_all: true
+ greedy: true
+
+- name: Upgrade given cask with force option
+ community.general.homebrew_cask:
+ name: alfred
+ state: upgraded
+ install_options: force
+
+- name: Upgrade cask with greedy option
+ community.general.homebrew_cask:
+ name: 1password
+ state: upgraded
+ greedy: true
+
+- name: Using sudo password for installing cask
+ community.general.homebrew_cask:
+ name: wireshark
+ state: present
+ sudo_password: "{{ ansible_become_pass }}"
+'''
+
+import os
+import re
+import tempfile
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems, string_types
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group_complement(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ . # dots
+ \- # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ . # dots
+ / # slash (for taps)
+ \- # dashes
+ @ # at symbol
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, (string_types)):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - dots
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, string_types)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, string_types)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, string_types)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, string_types):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+
+ @property
+ def brew_version(self):
+ try:
+ return self._brew_version
+ except AttributeError:
+ return None
+
+ @brew_version.setter
+ def brew_version(self, brew_version):
+ self._brew_version = brew_version
+
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=path, casks=None, state=None,
+ sudo_password=None, update_homebrew=False,
+ install_options=None, accept_external_apps=False,
+ upgrade_all=False, greedy=False):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in iteritems(kwargs):
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_brew_path()
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_outdated(self):
+ if not self.valid_cask(self.current_cask):
+ return False
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'outdated', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'outdated']
+
+ cask_is_outdated_command = base_opts + (['--greedy'] if self.greedy else []) + [self.current_cask]
+
+ rc, out, err = self.module.run_command(cask_is_outdated_command)
+
+ return out != ""
+
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, "list", "--cask"]
+ else:
+ base_opts = [self.brew_path, "cask", "list"]
+
+ cmd = base_opts + [self.current_cask]
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def _get_brew_version(self):
+ if self.brew_version:
+ return self.brew_version
+
+ cmd = [self.brew_path, '--version']
+
+ rc, out, err = self.module.run_command(cmd, check_rc=True)
+
+ # get version string from first line of "brew --version" output
+ version = out.split('\n')[0].split(' ')[1]
+ self.brew_version = version
+ return self.brew_version
+
+ def _brew_cask_command_is_deprecated(self):
+ # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/)
+ return LooseVersion(self._get_brew_version()) >= LooseVersion('2.6.0')
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.upgrade_all:
+ return self._upgrade_all()
+
+ if self.casks:
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'upgraded':
+ return self._upgrade_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ self.failed = True
+ self.message = "You must select a cask to install."
+ raise HomebrewCaskException(self.message)
+
+ # sudo_password fix ---------------------- {{{
+ def _run_command_with_sudo_password(self, cmd):
+ rc, out, err = '', '', ''
+
+ with tempfile.NamedTemporaryFile() as sudo_askpass_file:
+ sudo_askpass_file.write(b"#!/bin/sh\n\necho '%s'\n" % to_bytes(self.sudo_password))
+ os.chmod(sudo_askpass_file.name, 0o700)
+ sudo_askpass_file.file.close()
+
+ rc, out, err = self.module.run_command(
+ cmd,
+ environ_update={'SUDO_ASKPASS': sudo_askpass_file.name}
+ )
+
+ self.module.add_cleanup_file(sudo_askpass_file.name)
+
+ return (rc, out, err)
+ # /sudo_password fix --------------------- }}}
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, string_types):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # _upgrade_all --------------------------- {{{
+ def _upgrade_all(self):
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Casks would be upgraded.'
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ cmd = [self.brew_path, 'upgrade', '--cask']
+ else:
+ cmd = [self.brew_path, 'cask', 'upgrade']
+
+ if self.greedy:
+ cmd = cmd + ['--greedy']
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if rc == 0:
+ if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE):
+ self.message = 'Homebrew casks already upgraded.'
+
+ else:
+ self.changed = True
+ self.message = 'Homebrew casks upgraded.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /_upgrade_all -------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if '--force' not in self.install_options and self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'install', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'install']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ elif self.accept_external_apps and re.search(r"Error: It seems there is already an App at", err):
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_cask(self):
+ command = 'upgrade'
+
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ command = 'install'
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.message = 'Cask is already upgraded: {0}'.format(
+ self.current_cask,
+ )
+ self.unchanged_count += 1
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be upgraded: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, command, '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', command]
+
+ opts = base_opts + self.install_options + [self.current_cask]
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed() and not self._current_cask_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask upgraded: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _upgrade_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._upgrade_current_cask()
+
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ if self._brew_cask_command_is_deprecated():
+ base_opts = [self.brew_path, 'uninstall', '--cask']
+ else:
+ base_opts = [self.brew_path, 'cask', 'uninstall']
+
+ opts = base_opts + [self.current_cask] + self.install_options
+
+ cmd = [opt for opt in opts if opt]
+
+ rc, out, err = '', '', ''
+
+ if self.sudo_password:
+ rc, out, err = self._run_command_with_sudo_password(cmd)
+ else:
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled --------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ aliases=["pkg", "package", "cask"],
+ required=False,
+ type='list',
+ elements='str',
+ ),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin",
+ required=False,
+ type='path',
+ ),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ sudo_password=dict(
+ type="str",
+ required=False,
+ no_log=True,
+ ),
+ update_homebrew=dict(
+ default=False,
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ elements='str',
+ ),
+ accept_external_apps=dict(
+ default=False,
+ type='bool',
+ ),
+ upgrade_all=dict(
+ default=False,
+ aliases=["upgrade"],
+ type='bool',
+ ),
+ greedy=dict(
+ default=False,
+ type='bool',
+ ),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p['name']:
+ casks = p['name']
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ sudo_password = p['sudo_password']
+
+ update_homebrew = p['update_homebrew']
+ upgrade_all = p['upgrade_all']
+ greedy = p['greedy']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ accept_external_apps = p['accept_external_apps']
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state, sudo_password=sudo_password,
+ update_homebrew=update_homebrew,
+ install_options=install_options,
+ accept_external_apps=accept_external_apps,
+ upgrade_all=upgrade_all,
+ greedy=greedy,
+ )
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/homebrew_tap.py b/ansible_collections/community/general/plugins/modules/homebrew_tap.py
new file mode 100644
index 000000000..b230dbb34
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/homebrew_tap.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Daniel Jaouen <dcj24@cornell.edu>
+# Copyright (c) 2016, Indrajit Raychaudhuri <irc+code@indrajit.com>
+#
+# Based on homebrew (Andrew Dunham <andrew@du.nham.ca>)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: homebrew_tap
+author:
+ - "Indrajit Raychaudhuri (@indrajitr)"
+ - "Daniel Jaouen (@danieljaouen)"
+short_description: Tap a Homebrew repository
+description:
+ - Tap external Homebrew repositories.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The GitHub user/organization repository to tap.
+ required: true
+ aliases: ['tap']
+ type: list
+ elements: str
+ url:
+ description:
+ - The optional git URL of the repository to tap. The URL is not
+ assumed to be on GitHub, and the protocol doesn't have to be HTTP.
+ Any location and protocol that git can handle is fine.
+ - I(name) option may not be a list of multiple taps (but a single
+ tap instead) when this option is provided.
+ required: false
+ type: str
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+ type: str
+ path:
+ description:
+ - "A C(:) separated list of paths to search for C(brew) executable."
+ default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin'
+ type: path
+ version_added: '2.1.0'
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = r'''
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+
+- name: Tap a Homebrew repository, state absent
+ community.general.homebrew_tap:
+ name: homebrew/dupes
+ state: absent
+
+- name: Tap a Homebrew repository, state present
+ community.general.homebrew_tap:
+ name: homebrew/dupes,homebrew/science
+ state: present
+
+- name: Tap a Homebrew repository using url, state present
+ community.general.homebrew_tap:
+ name: telemachus/brew
+ url: 'https://bitbucket.org/telemachus/brew'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ tap_name = re.sub('homebrew-', '', tap.lower())
+
+ return tap_name in taps
+
+
+def add_tap(module, brew_path, tap, url=None):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ url,
+ ])
+ if rc == 0:
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s due to %s' % (tap, err)
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, changed, unchanged, added, msg = False, False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s due to %s' % (tap, err)
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, changed, unchanged, removed, msg = False, False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], type='list', required=True, elements='str'),
+ url=dict(default=None, required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ path=dict(
+ default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin",
+ required=False,
+ type='path',
+ ),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ if path:
+ path = path.split(':')
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=path,
+ )
+
+ taps = module.params['name']
+ url = module.params['url']
+
+ if module.params['state'] == 'present':
+ if url is None:
+ # No tap URL provided explicitly, continue with bulk addition
+ # of all the taps.
+ failed, changed, msg = add_taps(module, brew_path, taps)
+ else:
+ # When an tap URL is provided explicitly, we allow adding
+ # *single* tap only. Validate and proceed to add single tap.
+ if len(taps) > 1:
+ msg = "List of multiple taps may not be provided with 'url' option."
+ module.fail_json(msg=msg)
+ else:
+ failed, changed, msg = add_tap(module, brew_path, taps[0], url)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/homectl.py b/ansible_collections/community/general/plugins/modules/homectl.py
new file mode 100644
index 000000000..301e388d3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/homectl.py
@@ -0,0 +1,658 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, James Livulpi
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: homectl
+author:
+ - "James Livulpi (@jameslivulpi)"
+short_description: Manage user accounts with systemd-homed
+version_added: 4.4.0
+description:
+ - Manages a user's home directory managed by systemd-homed.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The user name to create, remove, or update.
+ required: true
+ aliases: [ 'user', 'username' ]
+ type: str
+ password:
+ description:
+ - Set the user's password to this.
+ - Homed requires this value to be in cleartext on user creation and updating a user.
+ - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt.
+ - See U(https://systemd.io/USER_RECORD/).
+ - This is required for I(state=present). When an existing user is updated this is checked against the stored hash in homed.
+ type: str
+ state:
+ description:
+ - The operation to take on the user.
+ choices: [ 'absent', 'present' ]
+ default: present
+ type: str
+ storage:
+ description:
+ - Indicates the storage mechanism for the user's home directory.
+ - If the storage type is not specified, ``homed.conf(5)`` defines which default storage to use.
+ - Only used when a user is first created.
+ choices: [ 'classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs' ]
+ type: str
+ disksize:
+ description:
+ - The intended home directory disk space.
+ - Human readable value such as C(10G), C(10M), or C(10B).
+ type: str
+ resize:
+ description:
+ - When used with I(disksize) this will attempt to resize the home directory immediately.
+ default: false
+ type: bool
+ realname:
+ description:
+ - The user's real ('human') name.
+ - This can also be used to add a comment to maintain compatibility with C(useradd).
+ aliases: [ 'comment' ]
+ type: str
+ realm:
+ description:
+ - The 'realm' a user is defined in.
+ type: str
+ email:
+ description:
+ - The email address of the user.
+ type: str
+ location:
+ description:
+ - A free-form location string describing the location of the user.
+ type: str
+ iconname:
+ description:
+ - The name of an icon picked by the user, for example for the purpose of an avatar.
+ - Should follow the semantics defined in the Icon Naming Specification.
+ - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics.
+ type: str
+ homedir:
+ description:
+ - Path to use as home directory for the user.
+ - This is the directory the user's home directory is mounted to while the user is logged in.
+ - This is not where the user's data is actually stored, see I(imagepath) for that.
+ - Only used when a user is first created.
+ type: path
+ imagepath:
+ description:
+ - Path to place the user's home directory.
+ - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information.
+ - Only used when a user is first created.
+ type: path
+ uid:
+ description:
+ - Sets the UID of the user.
+ - If using I(gid) homed requires the value to be the same.
+ - Only used when a user is first created.
+ type: int
+ gid:
+ description:
+ - Sets the gid of the user.
+ - If using I(uid) homed requires the value to be the same.
+ - Only used when a user is first created.
+ type: int
+ mountopts:
+ description:
+ - String separated by comma each indicating mount options for a users home directory.
+ - Valid options are C(nosuid), C(nodev) or C(noexec).
+ - Homed by default uses C(nodev) and C(nosuid) while C(noexec) is off.
+ type: str
+ umask:
+ description:
+ - Sets the umask for the user's login sessions
+ - Value from C(0000) to C(0777).
+ type: int
+ memberof:
+ description:
+ - String separated by comma each indicating a UNIX group this user shall be a member of.
+ - Groups the user should be a member of should be supplied as comma separated list.
+ aliases: [ 'groups' ]
+ type: str
+ skeleton:
+ description:
+ - The absolute path to the skeleton directory to populate a new home directory from.
+ - This is only used when a home directory is first created.
+ - If not specified homed by default uses C(/etc/skel).
+ aliases: [ 'skel' ]
+ type: path
+ shell:
+ description:
+ - Shell binary to use for terminal logins of given user.
+ - If not specified homed by default uses C(/bin/bash).
+ type: str
+ environment:
+ description:
+ - String separated by comma each containing an environment variable and its value to
+ set for the user's login session, in a format compatible with ``putenv()``.
+ - Any environment variable listed here is automatically set by pam_systemd for all
+ login sessions of the user.
+ aliases: [ 'setenv' ]
+ type: str
+ timezone:
+ description:
+ - Preferred timezone to use for the user.
+ - Should be a tzdata compatible location string such as C(America/New_York).
+ type: str
+ locked:
+ description:
+ - Whether the user account should be locked or not.
+ type: bool
+ language:
+ description:
+ - The preferred language/locale for the user.
+ - This should be in a format compatible with the C($LANG) environment variable.
+ type: str
+ passwordhint:
+ description:
+ - Password hint for the given user.
+ type: str
+ sshkeys:
+ description:
+ - String separated by comma each listing a SSH public key that is authorized to access the account.
+ - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file.
+ type: str
+ notbefore:
+ description:
+ - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in.
+ type: int
+ notafter:
+ description:
+ - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in.
+ type: int
+'''
+
+EXAMPLES = '''
+- name: Add the user 'james'
+ community.general.homectl:
+ name: johnd
+ password: myreallysecurepassword1!
+ state: present
+
+- name: Add the user 'alice' with a zsh shell, uid of 1000, and gid of 2000
+ community.general.homectl:
+ name: alice
+ password: myreallysecurepassword1!
+ state: present
+ shell: /bin/zsh
+ uid: 1000
+ gid: 1000
+
+- name: Modify an existing user 'frank' to have 10G of diskspace and resize usage now
+ community.general.homectl:
+ name: frank
+ password: myreallysecurepassword1!
+ state: present
+ disksize: 10G
+ resize: true
+
+- name: Remove an existing user 'janet'
+ community.general.homectl:
+ name: janet
+ state: absent
+'''
+
+RETURN = '''
+data:
+ description: A json dictionary returned from C(homectl inspect -j).
+ returned: success
+ type: dict
+ sample: {
+ "data": {
+ "binding": {
+ "e9ed2a5b0033427286b228e97c1e8343": {
+ "fileSystemType": "btrfs",
+ "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b",
+ "gid": 60268,
+ "imagePath": "/home/james.home",
+ "luksCipher": "aes",
+ "luksCipherMode": "xts-plain64",
+ "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81",
+ "luksVolumeKeySize": 32,
+ "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f",
+ "storage": "luks",
+ "uid": 60268
+ }
+ },
+ "diskSize": 3221225472,
+ "disposition": "regular",
+ "lastChangeUSec": 1641941238208691,
+ "lastPasswordChangeUSec": 1641941238208691,
+ "privileged": {
+ "hashedPassword": [
+ "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV."
+ ]
+ },
+ "signature": [
+ {
+ "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==",
+ "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n"
+ }
+ ],
+ "status": {
+ "e9ed2a5b0033427286b228e97c1e8343": {
+ "diskCeiling": 21845405696,
+ "diskFloor": 268435456,
+ "diskSize": 3221225472,
+ "service": "io.systemd.Home",
+ "signedLocally": true,
+ "state": "inactive"
+ }
+ },
+ "userName": "james",
+ }
+ }
+'''
+
+import crypt
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.basic import jsonify
+from ansible.module_utils.common.text.formatters import human_to_bytes
+
+
+class Homectl(object):
+ '''#TODO DOC STRINGS'''
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.name = module.params['name']
+ self.password = module.params['password']
+ self.storage = module.params['storage']
+ self.disksize = module.params['disksize']
+ self.resize = module.params['resize']
+ self.realname = module.params['realname']
+ self.realm = module.params['realm']
+ self.email = module.params['email']
+ self.location = module.params['location']
+ self.iconname = module.params['iconname']
+ self.homedir = module.params['homedir']
+ self.imagepath = module.params['imagepath']
+ self.uid = module.params['uid']
+ self.gid = module.params['gid']
+ self.umask = module.params['umask']
+ self.memberof = module.params['memberof']
+ self.skeleton = module.params['skeleton']
+ self.shell = module.params['shell']
+ self.environment = module.params['environment']
+ self.timezone = module.params['timezone']
+ self.locked = module.params['locked']
+ self.passwordhint = module.params['passwordhint']
+ self.sshkeys = module.params['sshkeys']
+ self.language = module.params['language']
+ self.notbefore = module.params['notbefore']
+ self.notafter = module.params['notafter']
+ self.mountopts = module.params['mountopts']
+
+ self.result = {}
+
+ # Cannot run homectl commands if service is not active
+ def homed_service_active(self):
+ is_active = True
+ cmd = ['systemctl', 'show', 'systemd-homed.service', '-p', 'ActiveState']
+ rc, show_service_stdout, stderr = self.module.run_command(cmd)
+ if rc == 0:
+ state = show_service_stdout.rsplit('=')[1]
+ if state.strip() != 'active':
+ is_active = False
+ return is_active
+
+ def user_exists(self):
+ exists = False
+ valid_pw = False
+ # Get user properties if they exist in json
+ rc, stdout, stderr = self.get_user_metadata()
+ if rc == 0:
+ exists = True
+ # User exists now compare password given with current hashed password stored in the user metadata.
+ if self.state != 'absent': # Don't need checking on remove user
+ stored_pwhash = json.loads(stdout)['privileged']['hashedPassword'][0]
+ if self._check_password(stored_pwhash):
+ valid_pw = True
+ return exists, valid_pw
+
+ def create_user(self):
+ record = self.create_json_record(create=True)
+ cmd = [self.module.get_bin_path('homectl', True)]
+ cmd.append('create')
+ cmd.append('--identity=-') # Read the user record from standard input.
+ return self.module.run_command(cmd, data=record)
+
+ def _hash_password(self, password):
+ method = crypt.METHOD_SHA512
+ salt = crypt.mksalt(method, rounds=10000)
+ pw_hash = crypt.crypt(password, salt)
+ return pw_hash
+
+ def _check_password(self, pwhash):
+ hash = crypt.crypt(self.password, pwhash)
+ return pwhash == hash
+
+ def remove_user(self):
+ cmd = [self.module.get_bin_path('homectl', True)]
+ cmd.append('remove')
+ cmd.append(self.name)
+ return self.module.run_command(cmd)
+
+ def prepare_modify_user_command(self):
+ record = self.create_json_record()
+ cmd = [self.module.get_bin_path('homectl', True)]
+ cmd.append('update')
+ cmd.append(self.name)
+ cmd.append('--identity=-') # Read the user record from standard input.
+ # Resize disksize now resize = true
+ # This is not valid in user record (json) and requires it to be passed on command.
+ if self.disksize and self.resize:
+ cmd.append('--and-resize')
+ cmd.append('true')
+ self.result['changed'] = True
+ return cmd, record
+
+ def get_user_metadata(self):
+ cmd = [self.module.get_bin_path('homectl', True)]
+ cmd.append('inspect')
+ cmd.append(self.name)
+ cmd.append('-j')
+ cmd.append('--no-pager')
+ rc, stdout, stderr = self.module.run_command(cmd)
+ return rc, stdout, stderr
+
+ # Build up dictionary to jsonify for homectl commands.
+ def create_json_record(self, create=False):
+ record = {}
+ user_metadata = {}
+ self.result['changed'] = False
+ # Get the current user record if not creating a new user record.
+ if not create:
+ rc, user_metadata, stderr = self.get_user_metadata()
+ user_metadata = json.loads(user_metadata)
+ # Remove elements that are not meant to be updated from record.
+ # These are always part of the record when a user exists.
+ user_metadata.pop('signature', None)
+ user_metadata.pop('binding', None)
+ user_metadata.pop('status', None)
+ # Let last change Usec be updated by homed when command runs.
+ user_metadata.pop('lastChangeUSec', None)
+ # Now only change fields that are called on leaving whats currently in the record intact.
+ record = user_metadata
+
+ record['userName'] = self.name
+ record['secret'] = {'password': [self.password]}
+
+ if create:
+ password_hash = self._hash_password(self.password)
+ record['privileged'] = {'hashedPassword': [password_hash]}
+ self.result['changed'] = True
+
+ if self.uid and self.gid and create:
+ record['uid'] = self.uid
+ record['gid'] = self.gid
+ self.result['changed'] = True
+
+ if self.memberof:
+ member_list = list(self.memberof.split(','))
+ if member_list != record.get('memberOf', [None]):
+ record['memberOf'] = member_list
+ self.result['changed'] = True
+
+ if self.realname:
+ if self.realname != record.get('realName'):
+ record['realName'] = self.realname
+ self.result['changed'] = True
+
+ # Cannot update storage unless were creating a new user.
+ # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/
+ if self.storage and create:
+ record['storage'] = self.storage
+ self.result['changed'] = True
+
+ # Cannot update homedir unless were creating a new user.
+ # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/
+ if self.homedir and create:
+ record['homeDirectory'] = self.homedir
+ self.result['changed'] = True
+
+ # Cannot update imagepath unless were creating a new user.
+ # See 'Fields in the binding section' at https://systemd.io/USER_RECORD/
+ if self.imagepath and create:
+ record['imagePath'] = self.imagepath
+ self.result['changed'] = True
+
+ if self.disksize:
+ # convert humand readble to bytes
+ if self.disksize != record.get('diskSize'):
+ record['diskSize'] = human_to_bytes(self.disksize)
+ self.result['changed'] = True
+
+ if self.realm:
+ if self.realm != record.get('realm'):
+ record['realm'] = self.realm
+ self.result['changed'] = True
+
+ if self.email:
+ if self.email != record.get('emailAddress'):
+ record['emailAddress'] = self.email
+ self.result['changed'] = True
+
+ if self.location:
+ if self.location != record.get('location'):
+ record['location'] = self.location
+ self.result['changed'] = True
+
+ if self.iconname:
+ if self.iconname != record.get('iconName'):
+ record['iconName'] = self.iconname
+ self.result['changed'] = True
+
+ if self.skeleton:
+ if self.skeleton != record.get('skeletonDirectory'):
+ record['skeletonDirectory'] = self.skeleton
+ self.result['changed'] = True
+
+ if self.shell:
+ if self.shell != record.get('shell'):
+ record['shell'] = self.shell
+ self.result['changed'] = True
+
+ if self.umask:
+ if self.umask != record.get('umask'):
+ record['umask'] = self.umask
+ self.result['changed'] = True
+
+ if self.environment:
+ if self.environment != record.get('environment', [None]):
+ record['environment'] = list(self.environment.split(','))
+ self.result['changed'] = True
+
+ if self.timezone:
+ if self.timezone != record.get('timeZone'):
+ record['timeZone'] = self.timezone
+ self.result['changed'] = True
+
+ if self.locked:
+ if self.locked != record.get('locked'):
+ record['locked'] = self.locked
+ self.result['changed'] = True
+
+ if self.passwordhint:
+ if self.passwordhint != record.get('privileged', {}).get('passwordHint'):
+ record['privileged']['passwordHint'] = self.passwordhint
+ self.result['changed'] = True
+
+ if self.sshkeys:
+ if self.sshkeys != record.get('privileged', {}).get('sshAuthorizedKeys'):
+ record['privileged']['sshAuthorizedKeys'] = list(self.sshkeys.split(','))
+ self.result['changed'] = True
+
+ if self.language:
+ if self.locked != record.get('preferredLanguage'):
+ record['preferredLanguage'] = self.language
+ self.result['changed'] = True
+
+ if self.notbefore:
+ if self.locked != record.get('notBeforeUSec'):
+ record['notBeforeUSec'] = self.notbefore
+ self.result['changed'] = True
+
+ if self.notafter:
+ if self.locked != record.get('notAfterUSec'):
+ record['notAfterUSec'] = self.notafter
+ self.result['changed'] = True
+
+ if self.mountopts:
+ opts = list(self.mountopts.split(','))
+ if 'nosuid' in opts:
+ if record.get('mountNoSuid') is not True:
+ record['mountNoSuid'] = True
+ self.result['changed'] = True
+ else:
+ if record.get('mountNoSuid') is not False:
+ record['mountNoSuid'] = False
+ self.result['changed'] = True
+
+ if 'nodev' in opts:
+ if record.get('mountNoDevices') is not True:
+ record['mountNoDevices'] = True
+ self.result['changed'] = True
+ else:
+ if record.get('mountNoDevices') is not False:
+ record['mountNoDevices'] = False
+ self.result['changed'] = True
+
+ if 'noexec' in opts:
+ if record.get('mountNoExecute') is not True:
+ record['mountNoExecute'] = True
+ self.result['changed'] = True
+ else:
+ if record.get('mountNoExecute') is not False:
+ record['mountNoExecute'] = False
+ self.result['changed'] = True
+
+ return jsonify(record)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True, aliases=['user', 'username']),
+ password=dict(type='str', no_log=True),
+ storage=dict(type='str', choices=['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs']),
+ disksize=dict(type='str'),
+ resize=dict(type='bool', default=False),
+ realname=dict(type='str', aliases=['comment']),
+ realm=dict(type='str'),
+ email=dict(type='str'),
+ location=dict(type='str'),
+ iconname=dict(type='str'),
+ homedir=dict(type='path'),
+ imagepath=dict(type='path'),
+ uid=dict(type='int'),
+ gid=dict(type='int'),
+ umask=dict(type='int'),
+ environment=dict(type='str', aliases=['setenv']),
+ timezone=dict(type='str'),
+ memberof=dict(type='str', aliases=['groups']),
+ skeleton=dict(type='path', aliases=['skel']),
+ shell=dict(type='str'),
+ locked=dict(type='bool'),
+ passwordhint=dict(type='str', no_log=True),
+ sshkeys=dict(type='str', no_log=True),
+ language=dict(type='str'),
+ notbefore=dict(type='int'),
+ notafter=dict(type='int'),
+ mountopts=dict(type='str'),
+ ),
+ supports_check_mode=True,
+
+ required_if=[
+ ('state', 'present', ['password']),
+ ('resize', True, ['disksize']),
+ ]
+ )
+
+ homectl = Homectl(module)
+ homectl.result['state'] = homectl.state
+
+ # First we need to make sure homed service is active
+ if not homectl.homed_service_active():
+ module.fail_json(msg='systemd-homed.service is not active')
+
+ # handle removing user
+ if homectl.state == 'absent':
+ user_exists, valid_pwhash = homectl.user_exists()
+ if user_exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, stdout, stderr = homectl.remove_user()
+ if rc != 0:
+ module.fail_json(name=homectl.name, msg=stderr, rc=rc)
+ homectl.result['changed'] = True
+ homectl.result['rc'] = rc
+ homectl.result['msg'] = 'User %s removed!' % homectl.name
+ else:
+ homectl.result['changed'] = False
+ homectl.result['msg'] = 'User does not exist!'
+
+ # Handle adding a user
+ if homectl.state == 'present':
+ user_exists, valid_pwhash = homectl.user_exists()
+ if not user_exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, stdout, stderr = homectl.create_user()
+ if rc != 0:
+ module.fail_json(name=homectl.name, msg=stderr, rc=rc)
+ rc, user_metadata, stderr = homectl.get_user_metadata()
+ homectl.result['data'] = json.loads(user_metadata)
+ homectl.result['rc'] = rc
+ homectl.result['msg'] = 'User %s created!' % homectl.name
+ else:
+ if valid_pwhash:
+ # Run this to see if changed would be True or False which is useful for check_mode
+ cmd, record = homectl.prepare_modify_user_command()
+ else:
+ # User gave wrong password fail with message
+ homectl.result['changed'] = False
+ homectl.result['msg'] = 'User exists but password is incorrect!'
+ module.fail_json(**homectl.result)
+
+ if module.check_mode:
+ module.exit_json(**homectl.result)
+
+ # Now actually modify the user if changed was set to true at any point.
+ if homectl.result['changed']:
+ rc, stdout, stderr = module.run_command(cmd, data=record)
+ if rc != 0:
+ module.fail_json(name=homectl.name, msg=stderr, rc=rc, changed=False)
+ rc, user_metadata, stderr = homectl.get_user_metadata()
+ homectl.result['data'] = json.loads(user_metadata)
+ homectl.result['rc'] = rc
+ if homectl.result['changed']:
+ homectl.result['msg'] = 'User %s modified' % homectl.name
+
+ module.exit_json(**homectl.result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
new file mode 100644
index 000000000..820e4538e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/honeybadger_deployment.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Benjamin Curtis <benjamin.curtis@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: honeybadger_deployment
+author: "Benjamin Curtis (@stympy)"
+short_description: Notify Honeybadger.io about app deployments
+description:
+ - Notify Honeybadger.io about app deployments (see U(http://docs.honeybadger.io/article/188-deployment-tracking)).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - API token.
+ required: true
+ environment:
+ type: str
+ description:
+ - The environment name, typically 'production', 'staging', etc.
+ required: true
+ user:
+ type: str
+ description:
+ - The username of the person doing the deployment
+ repo:
+ type: str
+ description:
+ - URL of the project repository
+ revision:
+ type: str
+ description:
+ - A hash, number, tag, or other identifier showing what revision was deployed
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ default: "https://api.honeybadger.io/v1/deploys"
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+
+'''
+
+EXAMPLES = '''
+- name: Notify Honeybadger.io about an app deployment
+ community.general.honeybadger_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: b6826b8
+ repo: 'git@github.com:user/repo.git'
+'''
+
+RETURN = '''# '''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ user=dict(required=False),
+ repo=dict(required=False),
+ revision=dict(required=False),
+ url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ params = {}
+
+ if module.params["environment"]:
+ params["deploy[environment]"] = module.params["environment"]
+
+ if module.params["user"]:
+ params["deploy[local_username]"] = module.params["user"]
+
+ if module.params["repo"]:
+ params["deploy[repository]"] = module.params["repo"]
+
+ if module.params["revision"]:
+ params["deploy[revision]"] = module.params["revision"]
+
+ params["api_key"] = module.params["token"]
+
+ url = module.params.get('url')
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 201:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hpilo_boot.py b/ansible_collections/community/general/plugins/modules/hpilo_boot.py
new file mode 100644
index 000000000..ace79a493
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hpilo_boot.py
@@ -0,0 +1,218 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: hpilo_boot
+author: Dag Wieers (@dagwieers)
+short_description: Boot system using specific media through HP iLO interface
+description:
+ - "This module boots a system through its HP iLO interface. The boot media
+ can be one of: cdrom, floppy, hdd, network or usb."
+ - This module requires the hpilo python module.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ type: str
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ default: Administrator
+ type: str
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ default: admin
+ type: str
+ media:
+ description:
+ - The boot media to boot the system from
+ choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ]
+ type: str
+ image:
+ description:
+ - The URL of a cdrom, floppy or usb boot media image.
+ protocol://username:password@hostname:port/filename
+ - protocol is either 'http' or 'https'
+ - username:password is optional
+ - port is optional
+ type: str
+ state:
+ description:
+ - The state of the boot media.
+ - "no_boot: Do not boot from the device"
+ - "boot_once: Boot from the device once and then notthereafter"
+ - "boot_always: Boot from the device each time the server is rebooted"
+ - "connect: Connect the virtual media device and set to boot_always"
+ - "disconnect: Disconnects the virtual media device and set to no_boot"
+ - "poweroff: Power off the server"
+ default: boot_once
+ type: str
+ choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ]
+ force:
+ description:
+ - Whether to force a reboot (even when the system is already booted).
+ - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running.
+ default: false
+ type: bool
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ type: str
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- python-hpilo
+notes:
+- To use a USB key image you need to specify floppy as boot media.
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ media: cdrom
+ image: http://some-web-server/iso/boot.iso
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+
+- name: Power off a server
+ community.general.hpilo_boot:
+ host: YOUR_ILO_HOST
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ state: poweroff
+ delegate_to: localhost
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+import time
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ media=dict(type='str', choices=['cdrom', 'floppy', 'rbsu', 'hdd', 'network', 'normal', 'usb']),
+ image=dict(type='str'),
+ state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']),
+ force=dict(type='bool', default=False),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ )
+ )
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ media = module.params['media']
+ image = module.params['image']
+ state = module.params['state']
+ force = module.params['force']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+ changed = False
+ status = {}
+ power_status = 'UNKNOWN'
+
+ if media and state in ('boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot'):
+
+ # Workaround for: Error communicating with iLO: Problem manipulating EV
+ try:
+ ilo.set_one_time_boot(media)
+ except hpilo.IloError:
+ time.sleep(60)
+ ilo.set_one_time_boot(media)
+
+ # TODO: Verify if image URL exists/works
+ if image:
+ ilo.insert_virtual_media(media, image)
+ changed = True
+
+ if media == 'cdrom':
+ ilo.set_vm_status('cdrom', state, True)
+ status = ilo.get_vm_status()
+ changed = True
+ elif media in ('floppy', 'usb'):
+ ilo.set_vf_status(state, True)
+ status = ilo.get_vf_status()
+ changed = True
+
+ # Only perform a boot when state is boot_once or boot_always, or in case we want to force a reboot
+ if state in ('boot_once', 'boot_always') or force:
+
+ power_status = ilo.get_host_power_status()
+
+ if not force and power_status == 'ON':
+ module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host)
+
+ if power_status == 'ON':
+ ilo.warm_boot_server()
+# ilo.cold_boot_server()
+ changed = True
+ else:
+ ilo.press_pwr_btn()
+# ilo.reset_server()
+# ilo.set_host_power(host_power=True)
+ changed = True
+
+ elif state in ('poweroff'):
+
+ power_status = ilo.get_host_power_status()
+
+ if not power_status == 'OFF':
+ ilo.hold_pwr_btn()
+# ilo.set_host_power(host_power=False)
+ changed = True
+
+ module.exit_json(changed=changed, power=power_status, **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hpilo_info.py b/ansible_collections/community/general/plugins/modules/hpilo_info.py
new file mode 100644
index 000000000..cef6597e4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hpilo_info.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2012 Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hpilo_info
+author: Dag Wieers (@dagwieers)
+short_description: Gather information through an HP iLO interface
+description:
+- This module gathers information on a specific system using its HP iLO interface.
+ These information includes hardware and network related information useful
+ for provisioning (e.g. macaddress, uuid).
+- This module requires the C(hpilo) python module.
+- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)!
+extends_documentation_fragment:
+- community.general.attributes
+- community.general.attributes.info_module
+options:
+ host:
+ description:
+ - The HP iLO hostname/address that is linked to the physical system.
+ type: str
+ required: true
+ login:
+ description:
+ - The login name to authenticate to the HP iLO interface.
+ type: str
+ default: Administrator
+ password:
+ description:
+ - The password to authenticate to the HP iLO interface.
+ type: str
+ default: admin
+ ssl_version:
+ description:
+ - Change the ssl_version used.
+ default: TLSv1
+ type: str
+ choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ]
+requirements:
+- hpilo
+notes:
+- This module ought to be run from a system that can access the HP iLO
+ interface directly, either by using C(local_action) or using C(delegate_to).
+'''
+
+EXAMPLES = r'''
+- name: Gather facts from a HP iLO interface only if the system is an HP server
+ community.general.hpilo_info:
+ host: YOUR_ILO_ADDRESS
+ login: YOUR_ILO_LOGIN
+ password: YOUR_ILO_PASSWORD
+ when: cmdb_hwmodel.startswith('HP ')
+ delegate_to: localhost
+ register: results
+
+- ansible.builtin.fail:
+ msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !'
+ when: cmdb_serialno != results.hw_system_serial
+'''
+
+RETURN = r'''
+# Typical output of HP iLO_info for a physical system
+hw_bios_date:
+ description: BIOS date
+ returned: always
+ type: str
+ sample: 05/05/2011
+
+hw_bios_version:
+ description: BIOS version
+ returned: always
+ type: str
+ sample: P68
+
+hw_ethX:
+ description: Interface information (for each interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:55
+ macaddress_dash: 00-11-22-33-44-55
+
+hw_eth_ilo:
+ description: Interface information (for the iLO network interface)
+ returned: always
+ type: dict
+ sample:
+ - macaddress: 00:11:22:33:44:BA
+ - macaddress_dash: 00-11-22-33-44-BA
+
+hw_product_name:
+ description: Product name
+ returned: always
+ type: str
+ sample: ProLiant DL360 G7
+
+hw_product_uuid:
+ description: Product UUID
+ returned: always
+ type: str
+ sample: ef50bac8-2845-40ff-81d9-675315501dac
+
+hw_system_serial:
+ description: System serial number
+ returned: always
+ type: str
+ sample: ABC12345D6
+
+hw_uuid:
+ description: Hardware UUID
+ returned: always
+ type: str
+ sample: 123456ABC78901D2
+
+host_power_status:
+ description:
+ - Power status of host.
+ - Will be one of C(ON), C(OFF) and C(UNKNOWN).
+ returned: always
+ type: str
+ sample: "ON"
+ version_added: 3.5.0
+'''
+
+import re
+import traceback
+import warnings
+
+HPILO_IMP_ERR = None
+try:
+ import hpilo
+ HAS_HPILO = True
+except ImportError:
+ HPILO_IMP_ERR = traceback.format_exc()
+ HAS_HPILO = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+# Suppress warnings from hpilo
+warnings.simplefilter('ignore')
+
+
+def parse_flat_interface(entry, non_numeric='hw_eth_ilo'):
+ try:
+ infoname = 'hw_eth' + str(int(entry['Port']) - 1)
+ except Exception:
+ infoname = non_numeric
+
+ info = {
+ 'macaddress': entry['MAC'].replace('-', ':'),
+ 'macaddress_dash': entry['MAC']
+ }
+ return (infoname, info)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ login=dict(type='str', default='Administrator'),
+ password=dict(type='str', default='admin', no_log=True),
+ ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_HPILO:
+ module.fail_json(msg=missing_required_lib('python-hpilo'), exception=HPILO_IMP_ERR)
+
+ host = module.params['host']
+ login = module.params['login']
+ password = module.params['password']
+ ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
+
+ ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
+
+ info = {
+ 'module_hw': True,
+ }
+
+ # TODO: Count number of CPUs, DIMMs and total memory
+ try:
+ data = ilo.get_host_data()
+ power_state = ilo.get_host_power_status()
+ except hpilo.IloCommunicationError as e:
+ module.fail_json(msg=to_native(e))
+
+ for entry in data:
+ if 'type' not in entry:
+ continue
+ elif entry['type'] == 0: # BIOS Information
+ info['hw_bios_version'] = entry['Family']
+ info['hw_bios_date'] = entry['Date']
+ elif entry['type'] == 1: # System Information
+ info['hw_uuid'] = entry['UUID']
+ info['hw_system_serial'] = entry['Serial Number'].rstrip()
+ info['hw_product_name'] = entry['Product Name']
+ info['hw_product_uuid'] = entry['cUUID']
+ elif entry['type'] == 209: # Embedded NIC MAC Assignment
+ if 'fields' in entry:
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_eth' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_eth_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ else:
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+ elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
+ for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
+ if name.startswith('Port'):
+ try:
+ infoname = 'hw_iscsi' + str(int(value) - 1)
+ except Exception:
+ infoname = 'hw_iscsi_ilo'
+ elif name.startswith('MAC'):
+ info[infoname] = {
+ 'macaddress': value.replace('-', ':'),
+ 'macaddress_dash': value
+ }
+ elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
+ (infoname, entry_info) = parse_flat_interface(entry, 'hw_eth_ilo')
+ info[infoname] = entry_info
+
+ # Collect health (RAM/CPU data)
+ health = ilo.get_embedded_health()
+ info['hw_health'] = health
+
+ memory_details_summary = health.get('memory', {}).get('memory_details_summary')
+ # RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
+ if memory_details_summary:
+ info['hw_memory_details_summary'] = memory_details_summary
+ info['hw_memory_total'] = 0
+ for cpu, details in memory_details_summary.items():
+ cpu_total_memory_size = details.get('total_memory_size')
+ if cpu_total_memory_size:
+ ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
+ if ram:
+ if ram.group(2) == 'GB':
+ info['hw_memory_total'] = info['hw_memory_total'] + int(ram.group(1))
+
+ # reformat into a text friendly format
+ info['hw_memory_total'] = "{0} GB".format(info['hw_memory_total'])
+
+ # Report host state
+ info['host_power_status'] = power_state or 'UNKNOWN'
+
+ module.exit_json(**info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hponcfg.py b/ansible_collections/community/general/plugins/modules/hponcfg.py
new file mode 100644
index 000000000..612a20d92
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hponcfg.py
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: hponcfg
+author: Dag Wieers (@dagwieers)
+short_description: Configure HP iLO interface using hponcfg
+description:
+ - This modules configures the HP iLO interface using hponcfg.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ path:
+ description:
+ - The XML file as accepted by hponcfg.
+ required: true
+ aliases: ['src']
+ type: path
+ minfw:
+ description:
+ - The minimum firmware level needed.
+ required: false
+ type: str
+ executable:
+ description:
+ - Path to the hponcfg executable (C(hponcfg) which uses $PATH).
+ default: hponcfg
+ type: str
+ verbose:
+ description:
+ - Run hponcfg in verbose mode (-v).
+ default: false
+ type: bool
+requirements:
+ - hponcfg tool
+notes:
+ - You need a working hponcfg on the target system.
+'''
+
+EXAMPLES = r'''
+- name: Example hponcfg configuration XML
+ ansible.builtin.copy:
+ content: |
+ <ribcl VERSION="2.0">
+ <login USER_LOGIN="user" PASSWORD="password">
+ <rib_info MODE="WRITE">
+ <mod_global_settings>
+ <session_timeout value="0"/>
+ <ssh_status value="Y"/>
+ <ssh_port value="22"/>
+ <serial_cli_status value="3"/>
+ <serial_cli_speed value="5"/>
+ </mod_global_settings>
+ </rib_info>
+ </login>
+ </ribcl>
+ dest: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO using enable-ssh.xml
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+
+- name: Configure HP iLO on VMware ESXi hypervisor
+ community.general.hponcfg:
+ src: /tmp/enable-ssh.xml
+ executable: /opt/hp/tools/hponcfg
+'''
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+
+
+class HPOnCfg(ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ src=dict(type='path', required=True, aliases=['path']),
+ minfw=dict(type='str'),
+ executable=dict(default='hponcfg', type='str'),
+ verbose=dict(default=False, type='bool'),
+ )
+ )
+ command_args_formats = dict(
+ src=cmd_runner_fmt.as_opt_val("-f"),
+ verbose=cmd_runner_fmt.as_bool("-v"),
+ minfw=cmd_runner_fmt.as_opt_val("-m"),
+ )
+
+ def __run__(self):
+ runner = CmdRunner(
+ self.module,
+ self.vars.executable,
+ self.command_args_formats,
+ check_rc=True,
+ )
+ runner(['src', 'verbose', 'minfw']).run()
+
+ # Consider every action a change (not idempotent yet!)
+ self.changed = True
+
+
+def main():
+ HPOnCfg.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/htpasswd.py b/ansible_collections/community/general/plugins/modules/htpasswd.py
new file mode 100644
index 000000000..180b02073
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/htpasswd.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Nimbis Services, Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: htpasswd
+short_description: Manage user files for basic authentication
+description:
+ - Add and remove username/password entries in a password file using htpasswd.
+ - This is used by web servers such as Apache and Nginx for basic authentication.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ path:
+ type: path
+ required: true
+ aliases: [ dest, destfile ]
+ description:
+ - Path to the file that contains the usernames and passwords
+ name:
+ type: str
+ required: true
+ aliases: [ username ]
+ description:
+ - User name to add or remove
+ password:
+ type: str
+ required: false
+ description:
+ - Password associated with user.
+ - Must be specified if user does not exist yet.
+ crypt_scheme:
+ type: str
+ required: false
+ default: "apr_md5_crypt"
+ description:
+ - Encryption scheme to be used. As well as the four choices listed
+ here, you can also use any other hash supported by passlib, such as
+ C(portable_apache22) and C(host_apache24); or C(md5_crypt) and C(sha256_crypt),
+ which are Linux passwd hashes. Only some schemes in addition to
+ the four choices below will be compatible with Apache or Nginx, and
+ supported schemes depend on passlib version and its dependencies.
+ - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme).
+ - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext).'
+ state:
+ type: str
+ required: false
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the user entry should be present or not
+ create:
+ required: false
+ type: bool
+ default: true
+ description:
+ - Used with I(state=present). If specified, the file will be created
+ if it does not already exist. If set to C(false), will fail if the
+ file does not exist
+notes:
+ - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
+ - "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
+ - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
+requirements: [ passlib>=1.6 ]
+author: "Ansible Core Team"
+extends_documentation_fragment:
+ - files
+ - community.general.attributes
+'''
+
+EXAMPLES = """
+- name: Add a user to a password file and ensure permissions are set
+ community.general.htpasswd:
+ path: /etc/nginx/passwdfile
+ name: janedoe
+ password: '9s36?;fyNp'
+ owner: root
+ group: www-data
+ mode: 0640
+
+- name: Remove a user from a password file
+ community.general.htpasswd:
+ path: /etc/apache2/passwdfile
+ name: foobar
+ state: absent
+
+- name: Add a user to a password file suitable for use by libpam-pwdfile
+ community.general.htpasswd:
+ path: /etc/mail/passwords
+ name: alex
+ password: oedu2eGh
+ crypt_scheme: md5_crypt
+"""
+
+
+import os
+import tempfile
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+PASSLIB_IMP_ERR = None
+try:
+ from passlib.apache import HtpasswdFile, htpasswd_context
+ from passlib.context import CryptContext
+ import passlib
+except ImportError:
+ PASSLIB_IMP_ERR = traceback.format_exc()
+ passlib_installed = False
+else:
+ passlib_installed = True
+
+apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
+
+
+def create_missing_directories(dest):
+ destpath = os.path.dirname(dest)
+ if not os.path.exists(destpath):
+ os.makedirs(destpath)
+
+
+def present(dest, username, password, crypt_scheme, create, check_mode):
+ """ Ensures user is present
+
+ Returns (msg, changed) """
+ if crypt_scheme in apache_hashes:
+ context = htpasswd_context
+ else:
+ context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
+ if not os.path.exists(dest):
+ if not create:
+ raise ValueError('Destination %s does not exist' % dest)
+ if check_mode:
+ return ("Create %s" % dest, True)
+ create_missing_directories(dest)
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Created %s and added %s" % (dest, username), True)
+ else:
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
+ else:
+ ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
+
+ found = None
+ if getattr(ht, 'check_password', None):
+ found = ht.check_password(username, password)
+ else:
+ found = ht.verify(username, password)
+
+ if found:
+ return ("%s already present" % username, False)
+ else:
+ if not check_mode:
+ if getattr(ht, 'set_password', None):
+ ht.set_password(username, password)
+ else:
+ ht.update(username, password)
+ ht.save()
+ return ("Add/update %s" % username, True)
+
+
+def absent(dest, username, check_mode):
+ """ Ensures user is absent
+
+ Returns (msg, changed) """
+ if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
+ ht = HtpasswdFile(dest, new=False)
+ else:
+ ht = HtpasswdFile(dest)
+
+ if username not in ht.users():
+ return ("%s not present" % username, False)
+ else:
+ if not check_mode:
+ ht.delete(username)
+ ht.save()
+ return ("Remove %s" % username, True)
+
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_fs_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+
+def main():
+ arg_spec = dict(
+ path=dict(type='path', required=True, aliases=["dest", "destfile"]),
+ name=dict(type='str', required=True, aliases=["username"]),
+ password=dict(type='str', required=False, default=None, no_log=True),
+ crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"),
+ state=dict(type='str', required=False, default="present", choices=["present", "absent"]),
+ create=dict(type='bool', default=True),
+
+ )
+ module = AnsibleModule(argument_spec=arg_spec,
+ add_file_common_args=True,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ username = module.params['name']
+ password = module.params['password']
+ crypt_scheme = module.params['crypt_scheme']
+ state = module.params['state']
+ create = module.params['create']
+ check_mode = module.check_mode
+
+ if not passlib_installed:
+ module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR)
+
+ # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
+ try:
+ f = open(path, "r")
+ except IOError:
+ # No preexisting file to remove blank lines from
+ f = None
+ else:
+ try:
+ lines = f.readlines()
+ finally:
+ f.close()
+
+ # If the file gets edited, it returns true, so only edit the file if it has blank lines
+ strip = False
+ for line in lines:
+ if not line.strip():
+ strip = True
+ break
+
+ if strip:
+ # If check mode, create a temporary file
+ if check_mode:
+ temp = tempfile.NamedTemporaryFile()
+ path = temp.name
+ f = open(path, "w")
+ try:
+ [f.write(line) for line in lines if line.strip()]
+ finally:
+ f.close()
+
+ try:
+ if state == 'present':
+ (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
+ elif state == 'absent':
+ if not os.path.exists(path):
+ module.exit_json(msg="%s not present" % username,
+ warnings="%s does not exist" % path, changed=False)
+ (msg, changed) = absent(path, username, check_mode)
+ else:
+ module.fail_json(msg="Invalid state: %s" % state)
+
+ check_file_attrs(module, changed, msg)
+ module.exit_json(msg=msg, changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
new file mode 100644
index 000000000..434db242f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_ecs_instance.py
@@ -0,0 +1,2142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_ecs_instance
+description:
+ - instance management.
+short_description: Creates a resource of Ecs/Instance in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ required: true
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ required: true
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the ECS name. Value requirements consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.).
+ type: str
+ required: true
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. Constraints the
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ elements: dict
+ required: true
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ required: true
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ required: true
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ required: true
+ suboptions:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ required: false
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ required: true
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements, consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types 'uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ required: false
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ required: true
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ required: false
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be
+ assigned.
+ type: str
+ required: false
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this
+ parameter is left blank, the default security group is bound to
+ the ECS by default.
+ type: list
+ elements: str
+ required: false
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ required: false
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ required: false
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ required: false
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with
+ base64. The maximum size of the content to be injected (before
+ encoding) is 32 KB. For Linux ECSs, this parameter does not take
+ effect when adminPass is used.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create an ecs instance
+- name: Create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ register: eip
+- name: Create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ register: disk
+- name: Create an instance
+ community.general.hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the name of the AZ where the ECS is located.
+ type: str
+ returned: success
+ flavor_name:
+ description:
+ - Specifies the name of the system flavor.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the ID of the system image.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the ECS name. Value requirements "Consists of 1 to 64
+ characters, including letters, digits, underscores C(_), hyphens
+ (-), periods (.)".
+ type: str
+ returned: success
+ nics:
+ description:
+ - Specifies the NIC information of the ECS. The
+ network of the NIC must belong to the VPC specified by vpc_id. A
+ maximum of 12 NICs can be attached to an ECS.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address of the NIC. The value is an IPv4
+ address. Its value must be an unused IP
+ address in the network segment of the subnet.
+ type: str
+ returned: success
+ subnet_id:
+ description:
+ - Specifies the ID of subnet.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID corresponding to the IP address.
+ type: str
+ returned: success
+ root_volume:
+ description:
+ - Specifies the configuration of the ECS's system disks.
+ type: dict
+ returned: success
+ contains:
+ volume_type:
+ description:
+ - Specifies the ECS system disk type.
+ - SATA is common I/O disk type.
+ - SAS is high I/O disk type.
+ - SSD is ultra-high I/O disk type.
+ - co-p1 is high I/O (performance-optimized I) disk type.
+ - uh-l1 is ultra-high I/O (latency-optimized) disk type.
+ - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1
+ disks. For other ECSs, do not use co-p1 or uh-l1 disks.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the system disk size, in GB. The value range is
+ 1 to 1024. The system disk size must be
+ greater than or equal to the minimum system disk size
+ supported by the image (min_disk attribute of the image).
+ If this parameter is not specified or is set to 0, the
+ default system disk size is the minimum value of the
+ system disk in the image (min_disk attribute of the
+ image).
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID or ID of the original data disk
+ contained in the full-ECS image.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the ECS belongs.
+ type: str
+ returned: success
+ admin_pass:
+ description:
+ - Specifies the initial login password of the administrator account
+ for logging in to an ECS using password authentication. The Linux
+ administrator is root, and the Windows administrator is
+ Administrator. Password complexity requirements consists of 8 to
+ 26 characters. The password must contain at least three of the
+ following character types "uppercase letters, lowercase letters,
+ digits, and special characters (!@$%^-_=+[{}]:,./?)". The password
+ cannot contain the username or the username in reverse. The
+ Windows ECS password cannot contain the username, the username in
+ reverse, or more than two consecutive characters in the username.
+ type: str
+ returned: success
+ data_volumes:
+ description:
+ - Specifies the data disks of ECS instance.
+ type: list
+ returned: success
+ contains:
+ volume_id:
+ description:
+ - Specifies the disk ID.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the disk device name.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the description of an ECS, which is a null string by
+ default. Can contain a maximum of 85 characters. Cannot contain
+ special characters, such as < and >.
+ type: str
+ returned: success
+ eip_id:
+ description:
+ - Specifies the ID of the elastic IP address assigned to the ECS.
+ Only elastic IP addresses in the DOWN state can be assigned.
+ type: str
+ returned: success
+ enable_auto_recovery:
+ description:
+ - Specifies whether automatic recovery is enabled on the ECS.
+ type: bool
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the ID of the enterprise project to which the ECS
+ belongs.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the security groups of the ECS. If this parameter is left
+ blank, the default security group is bound to the ECS by default.
+ type: list
+ returned: success
+ server_metadata:
+ description:
+ - Specifies the metadata of ECS to be created.
+ type: dict
+ returned: success
+ server_tags:
+ description:
+ - Specifies the tags of an ECS. When you create ECSs, one ECS
+ supports up to 10 tags.
+ type: dict
+ returned: success
+ ssh_key_name:
+ description:
+ - Specifies the name of the SSH key used for logging in to the ECS.
+ type: str
+ returned: success
+ user_data:
+ description:
+ - Specifies the user data to be injected during the ECS creation
+ process. Text, text files, and gzip files can be injected.
+ The content to be injected must be encoded with base64. The maximum
+ size of the content to be injected (before encoding) is 32 KB. For
+ Linux ECSs, this parameter does not take effect when adminPass is
+ used.
+ type: str
+ returned: success
+ config_drive:
+ description:
+ - Specifies the configuration driver.
+ type: str
+ returned: success
+ created:
+ description:
+ - Specifies the time when an ECS was created.
+ type: str
+ returned: success
+ disk_config_type:
+ description:
+ - Specifies the disk configuration type. MANUAL is The image
+ space is not expanded. AUTO is the image space of the system disk
+ will be expanded to be as same as the flavor.
+ type: str
+ returned: success
+ host_name:
+ description:
+ - Specifies the host name of the ECS.
+ type: str
+ returned: success
+ image_name:
+ description:
+ - Specifies the image name of the ECS.
+ type: str
+ returned: success
+ power_state:
+ description:
+ - Specifies the power status of the ECS.
+ type: int
+ returned: success
+ server_alias:
+ description:
+ - Specifies the ECS alias.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT,
+ REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR,
+ and DELETED.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ flavor_name=dict(type='str', required=True),
+ image_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ nics=dict(
+ type='list', required=True, elements='dict',
+ options=dict(
+ ip_address=dict(type='str', required=True),
+ subnet_id=dict(type='str', required=True)
+ ),
+ ),
+ root_volume=dict(type='dict', required=True, options=dict(
+ volume_type=dict(type='str', required=True),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ )),
+ vpc_id=dict(type='str', required=True),
+ admin_pass=dict(type='str', no_log=True),
+ data_volumes=dict(type='list', elements='dict', options=dict(
+ volume_id=dict(type='str', required=True),
+ device=dict(type='str')
+ )),
+ description=dict(type='str'),
+ eip_id=dict(type='str'),
+ enable_auto_recovery=dict(type='bool'),
+ enterprise_project_id=dict(type='str'),
+ security_groups=dict(type='list', elements='str'),
+ server_metadata=dict(type='dict'),
+ server_tags=dict(type='dict'),
+ ssh_key_name=dict(type='str'),
+ user_data=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "ecs")
+
+ try:
+ _init(config)
+ is_exist = module.params['id']
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params['id']:
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "admin_pass": module.params.get("admin_pass"),
+ "availability_zone": module.params.get("availability_zone"),
+ "data_volumes": module.params.get("data_volumes"),
+ "description": module.params.get("description"),
+ "eip_id": module.params.get("eip_id"),
+ "enable_auto_recovery": module.params.get("enable_auto_recovery"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "flavor_name": module.params.get("flavor_name"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "nics": module.params.get("nics"),
+ "root_volume": module.params.get("root_volume"),
+ "security_groups": module.params.get("security_groups"),
+ "server_metadata": module.params.get("server_metadata"),
+ "server_tags": module.params.get("server_tags"),
+ "ssh_key_name": module.params.get("ssh_key_name"),
+ "user_data": module.params.get("user_data"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait(config, r, client, timeout)
+
+ sub_job_identity = {
+ "job_type": "createSingleServer",
+ }
+ for item in navigate_value(obj, ["entities", "sub_jobs"]):
+ for k, v in sub_job_identity.items():
+ if item[k] != v:
+ break
+ else:
+ obj = item
+ break
+ else:
+ raise Exception("Can't find the sub job")
+ module.params['id'] = navigate_value(obj, ["entities", "server_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ client = config.client(get_region(module), "ecs", "project")
+
+ params = build_delete_nics_parameters(expect_state)
+ params1 = build_delete_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_delete_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ params = build_set_auto_recovery_parameters(expect_state)
+ params1 = build_set_auto_recovery_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_set_auto_recovery_request(module, params, client)
+
+ params = build_attach_nics_parameters(expect_state)
+ params1 = build_attach_nics_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ r = send_attach_nics_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+ multi_invoke_delete_volume(config, expect_state, client, timeout)
+
+ multi_invoke_attach_data_disk(config, expect_state, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_delete_parameters(opts)
+ if params:
+ r = send_delete_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ preprocess_read_response(r)
+ res["read"] = fill_read_resp_body(r)
+
+ r = send_read_auto_recovery_request(module, client)
+ res["read_auto_recovery"] = fill_read_auto_recovery_resp_body(r)
+
+ return res, None
+
+
+def preprocess_read_response(resp):
+ v = resp.get("os-extended-volumes:volumes_attached")
+ if v and isinstance(v, list):
+ for i in range(len(v)):
+ if v[i].get("bootIndex") == "0":
+ root_volume = v[i]
+
+ if (i + 1) != len(v):
+ v[i] = v[-1]
+
+ v.pop()
+
+ resp["root_volume"] = root_volume
+ break
+
+ v = resp.get("addresses")
+ if v:
+ rv = {}
+ eips = []
+ for val in v.values():
+ for item in val:
+ if item["OS-EXT-IPS:type"] == "floating":
+ eips.append(item)
+ else:
+ rv[item["OS-EXT-IPS:port_id"]] = item
+
+ for item in eips:
+ k = item["OS-EXT-IPS:port_id"]
+ if k in rv:
+ rv[k]["eip_address"] = item.get("addr", "")
+ else:
+ rv[k] = item
+ item["eip_address"] = item.get("addr", "")
+ item["addr"] = ""
+
+ resp["address"] = rv.values()
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ adjust_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "enterprise_project_id=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={offset}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "ecs", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "cloudservers/detail" + query_link
+
+ result = []
+ p = {'offset': 1}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ adjust_list_resp(identity_obj, item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['offset'] += 1
+
+ return result
+
+
+def build_delete_nics_parameters(opts):
+ params = dict()
+
+ v = expand_delete_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_delete_nics_nics(d, array_index):
+ cv = d["current_state"].get("nics")
+ if not cv:
+ return None
+
+ val = cv
+
+ ev = d.get("nics")
+ if ev:
+ m = [item.get("ip_address") for item in ev]
+ val = [item for item in cv if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("port_id")
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_delete_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics/delete")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_set_auto_recovery_parameters(opts):
+ params = dict()
+
+ v = expand_set_auto_recovery_support_auto_recovery(opts, None)
+ if v is not None:
+ params["support_auto_recovery"] = v
+
+ return params
+
+
+def expand_set_auto_recovery_support_auto_recovery(d, array_index):
+ v = navigate_value(d, ["enable_auto_recovery"], None)
+ return None if v is None else str(v).lower()
+
+
+def send_set_auto_recovery_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(set_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_pass"], None)
+ if not is_empty_value(v):
+ params["adminPass"] = v
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = expand_create_extendparam(opts, None)
+ if not is_empty_value(v):
+ params["extendparam"] = v
+
+ v = navigate_value(opts, ["flavor_name"], None)
+ if not is_empty_value(v):
+ params["flavorRef"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = navigate_value(opts, ["ssh_key_name"], None)
+ if not is_empty_value(v):
+ params["key_name"] = v
+
+ v = navigate_value(opts, ["server_metadata"], None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ v = expand_create_root_volume(opts, None)
+ if not is_empty_value(v):
+ params["root_volume"] = v
+
+ v = expand_create_security_groups(opts, None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ v = expand_create_server_tags(opts, None)
+ if not is_empty_value(v):
+ params["server_tags"] = v
+
+ v = navigate_value(opts, ["user_data"], None)
+ if not is_empty_value(v):
+ params["user_data"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpcid"] = v
+
+ if not params:
+ return params
+
+ params = {"server": params}
+
+ return params
+
+
+def expand_create_extendparam(d, array_index):
+ r = dict()
+
+ r["chargingMode"] = 0
+
+ v = navigate_value(d, ["enterprise_project_id"], array_index)
+ if not is_empty_value(v):
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(d, ["enable_auto_recovery"], array_index)
+ if not is_empty_value(v):
+ r["support_auto_recovery"] = v
+
+ return r
+
+
+def expand_create_nics(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ v = navigate_value(
+ d, ["nics"], new_ai)
+
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_ai["nics"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["nics", "ip_address"], new_ai)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["nics", "subnet_id"], new_ai)
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["eip_id"], array_index)
+ if not is_empty_value(v):
+ r["id"] = v
+
+ return r
+
+
+def expand_create_root_volume(d, array_index):
+ r = dict()
+
+ v = expand_create_root_volume_extendparam(d, array_index)
+ if not is_empty_value(v):
+ r["extendparam"] = v
+
+ v = navigate_value(d, ["root_volume", "size"], array_index)
+ if not is_empty_value(v):
+ r["size"] = v
+
+ v = navigate_value(d, ["root_volume", "volume_type"], array_index)
+ if not is_empty_value(v):
+ r["volumetype"] = v
+
+ return r
+
+
+def expand_create_root_volume_extendparam(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["root_volume", "snapshot_id"], array_index)
+ if not is_empty_value(v):
+ r["snapshotId"] = v
+
+ return r
+
+
+def expand_create_security_groups(d, array_index):
+ v = d.get("security_groups")
+ if not v:
+ return None
+
+ return [{"id": i} for i in v]
+
+
+def expand_create_server_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [{"key": k, "value": v1} for k, v1 in v.items()]
+
+
+def send_create_request(module, params, client):
+ url = "cloudservers"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_nics_parameters(opts):
+ params = dict()
+
+ v = expand_attach_nics_nics(opts, None)
+ if not is_empty_value(v):
+ params["nics"] = v
+
+ return params
+
+
+def expand_attach_nics_nics(d, array_index):
+ ev = d.get("nics")
+ if not ev:
+ return None
+
+ val = ev
+
+ cv = d["current_state"].get("nics")
+ if cv:
+ m = [item.get("ip_address") for item in cv]
+ val = [item for item in ev if item.get("ip_address") not in m]
+
+ r = []
+ for item in val:
+ transformed = dict()
+
+ v = item.get("ip_address")
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = item.get("subnet_id")
+ if not is_empty_value(v):
+ transformed["subnet_id"] = v
+
+ if transformed:
+ r.append(transformed)
+
+ return r
+
+
+def send_attach_nics_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/nics")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_nics), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_volume_request(module, params, client, info):
+ path_parameters = {
+ "volume_id": ["volume_id"],
+ }
+ data = dict((key, navigate_value(info, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data)
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete_volume), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_attach_data_disk_parameters(opts, array_index):
+ params = dict()
+
+ v = expand_attach_data_disk_volume_attachment(opts, array_index)
+ if not is_empty_value(v):
+ params["volumeAttachment"] = v
+
+ return params
+
+
+def expand_attach_data_disk_volume_attachment(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["data_volumes", "device"], array_index)
+ if not is_empty_value(v):
+ r["device"] = v
+
+ v = navigate_value(d, ["data_volumes", "volume_id"], array_index)
+ if not is_empty_value(v):
+ r["volumeId"] = v
+
+ return r
+
+
+def send_attach_data_disk_request(module, params, client):
+ url = build_path(module, "cloudservers/{id}/attachvolume")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(attach_data_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_delete_parameters(opts):
+ params = dict()
+
+ params["delete_publicip"] = False
+
+ params["delete_volume"] = False
+
+ v = expand_delete_servers(opts, None)
+ if not is_empty_value(v):
+ params["servers"] = v
+
+ return params
+
+
+def expand_delete_servers(d, array_index):
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = expand_delete_servers_id(d, new_ai)
+ if not is_empty_value(v):
+ transformed["id"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_delete_servers_id(d, array_index):
+ return d["ansible_module"].params.get("id")
+
+
+def send_delete_request(module, params, client):
+ url = "cloudservers/delete"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "jobs/{job_id}", result)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_ecs_instance): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def multi_invoke_delete_volume(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = None
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in expect]
+ opts1 = {
+ "data_volumes": [
+ i for i in current if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ r = send_delete_volume_request(module, None, client, loop_val[i])
+ async_wait(config, r, client, timeout)
+
+
+def multi_invoke_attach_data_disk(config, opts, client, timeout):
+ module = config.module
+
+ opts1 = opts
+ expect = opts["data_volumes"]
+ current = opts["current_state"]["data_volumes"]
+ if expect and current:
+ v = [i["volume_id"] for i in current]
+ opts1 = {
+ "data_volumes": [
+ i for i in expect if i["volume_id"] not in v
+ ]
+ }
+
+ loop_val = navigate_value(opts1, ["data_volumes"])
+ if not loop_val:
+ return
+
+ for i in range(len(loop_val)):
+ params = build_attach_data_disk_parameters(opts1, {"data_volumes": i})
+ r = send_attach_data_disk_request(module, params, client)
+ async_wait(config, r, client, timeout)
+
+
+def send_read_request(module, client):
+ url = build_path(module, "cloudservers/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["server"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ v = fill_read_resp_address(body.get("address"))
+ result["address"] = v
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_read_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_os_extended_volumes_volumes_attached(
+ body.get("os-extended-volumes:volumes_attached"))
+ result["os-extended-volumes:volumes_attached"] = v
+
+ v = fill_read_resp_root_volume(body.get("root_volume"))
+ result["root_volume"] = v
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_read_resp_address(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["OS-EXT-IPS:port_id"] = item.get("OS-EXT-IPS:port_id")
+
+ val["OS-EXT-IPS:type"] = item.get("OS-EXT-IPS:type")
+
+ val["addr"] = item.get("addr")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["image_name"] = value.get("image_name")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_os_extended_volumes_volumes_attached(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["bootIndex"] = item.get("bootIndex")
+
+ val["device"] = item.get("device")
+
+ val["id"] = item.get("id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_root_volume(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["device"] = value.get("device")
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def send_read_auto_recovery_request(module, client):
+ url = build_path(module, "cloudservers/{id}/autorecovery")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(read_auto_recovery), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def fill_read_auto_recovery_resp_body(body):
+ result = dict()
+
+ result["support_auto_recovery"] = body.get("support_auto_recovery")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-AZ:availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "config_drive"], array_index)
+ r["config_drive"] = v
+
+ v = navigate_value(response, ["read", "created"], array_index)
+ r["created"] = v
+
+ v = flatten_data_volumes(response, array_index)
+ r["data_volumes"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "OS-DCF:diskConfig"], array_index)
+ r["disk_config_type"] = v
+
+ v = flatten_enable_auto_recovery(response, array_index)
+ r["enable_auto_recovery"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "flavor", "id"], array_index)
+ r["flavor_name"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:hostname"], array_index)
+ r["host_name"] = v
+
+ v = navigate_value(response, ["read", "image", "id"], array_index)
+ r["image_id"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "image_name"], array_index)
+ r["image_name"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = flatten_nics(response, array_index)
+ r["nics"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-STS:power_state"], array_index)
+ r["power_state"] = v
+
+ v = flatten_root_volume(response, array_index)
+ r["root_volume"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:instance_name"], array_index)
+ r["server_alias"] = v
+
+ v = flatten_server_tags(response, array_index)
+ r["server_tags"] = v
+
+ v = navigate_value(response, ["read", "key_name"], array_index)
+ r["ssh_key_name"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(
+ response, ["read", "OS-EXT-SRV-ATTR:user_data"], array_index)
+ r["user_data"] = v
+
+ v = navigate_value(response, ["read", "metadata", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def flatten_data_volumes(d, array_index):
+ v = navigate_value(d, ["read", "os-extended-volumes:volumes_attached"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.os-extended-volumes:volumes_attached"] = i
+
+ val = dict()
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(
+ d, ["read", "os-extended-volumes:volumes_attached", "id"], new_ai)
+ val["volume_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_auto_recovery(d, array_index):
+ v = navigate_value(d, ["read_auto_recovery", "support_auto_recovery"],
+ array_index)
+ return v == "true"
+
+
+def flatten_nics(d, array_index):
+ v = navigate_value(d, ["read", "address"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.address"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "address", "addr"], new_ai)
+ val["ip_address"] = v
+
+ v = navigate_value(
+ d, ["read", "address", "OS-EXT-IPS:port_id"], new_ai)
+ val["port_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_root_volume(d, array_index):
+ result = dict()
+
+ v = navigate_value(d, ["read", "root_volume", "device"], array_index)
+ result["device"] = v
+
+ v = navigate_value(d, ["read", "root_volume", "id"], array_index)
+ result["volume_id"] = v
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return None
+
+
+def flatten_server_tags(d, array_index):
+ v = navigate_value(d, ["read", "tags"], array_index)
+ if not v:
+ return None
+
+ r = dict()
+ for item in v:
+ v1 = item.split("=")
+ if v1:
+ r[v1[0]] = v1[1]
+ return r
+
+
+def adjust_options(opts, states):
+ adjust_data_volumes(opts, states)
+
+ adjust_nics(opts, states)
+
+
+def adjust_data_volumes(parent_input, parent_cur):
+ iv = parent_input.get("data_volumes")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("data_volumes")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["volume_id"] != icv["volume_id"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(data_volumes) failed, "
+ "the array number is not equal")
+
+ parent_cur["data_volumes"] = result
+
+
+def adjust_nics(parent_input, parent_cur):
+ iv = parent_input.get("nics")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("nics")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ lcv = len(cv)
+ result = []
+ q = []
+ for iiv in iv:
+ if len(q) == lcv:
+ break
+
+ icv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ icv = cv[j]
+
+ if iiv["ip_address"] != icv["ip_address"]:
+ continue
+
+ result.append(icv)
+ q.append(j)
+ break
+ else:
+ break
+
+ if len(q) != lcv:
+ for i in range(lcv):
+ if i not in q:
+ result.append(cv[i])
+
+ if len(result) != lcv:
+ raise Exception("adjust property(nics) failed, "
+ "the array number is not equal")
+
+ parent_cur["nics"] = result
+
+
+def set_unreadable_options(opts, states):
+ states["admin_pass"] = opts.get("admin_pass")
+
+ states["eip_id"] = opts.get("eip_id")
+
+ set_unread_nics(
+ opts.get("nics"), states.get("nics"))
+
+ set_unread_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ states["security_groups"] = opts.get("security_groups")
+
+ states["server_metadata"] = opts.get("server_metadata")
+
+
+def set_unread_nics(inputv, curv):
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ if not (curv and isinstance(curv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ cv["subnet_id"] = iv.get("subnet_id")
+
+
+def set_unread_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ curv["size"] = inputv.get("size")
+
+ curv["snapshot_id"] = inputv.get("snapshot_id")
+
+ curv["volume_type"] = inputv.get("volume_type")
+
+
+def set_readonly_options(opts, states):
+ opts["config_drive"] = states.get("config_drive")
+
+ opts["created"] = states.get("created")
+
+ opts["disk_config_type"] = states.get("disk_config_type")
+
+ opts["host_name"] = states.get("host_name")
+
+ opts["image_name"] = states.get("image_name")
+
+ set_readonly_nics(
+ opts.get("nics"), states.get("nics"))
+
+ opts["power_state"] = states.get("power_state")
+
+ set_readonly_root_volume(
+ opts.get("root_volume"), states.get("root_volume"))
+
+ opts["server_alias"] = states.get("server_alias")
+
+ opts["status"] = states.get("status")
+
+
+def set_readonly_nics(inputv, curv):
+ if not (curv and isinstance(curv, list)):
+ return
+
+ if not (inputv and isinstance(inputv, list)):
+ return
+
+ lcv = len(curv)
+ q = []
+ for iv in inputv:
+ if len(q) == lcv:
+ break
+
+ cv = None
+ for j in range(lcv):
+ if j in q:
+ continue
+
+ cv = curv[j]
+
+ if iv["ip_address"] != cv["ip_address"]:
+ continue
+
+ q.append(j)
+ break
+ else:
+ continue
+
+ iv["port_id"] = cv.get("port_id")
+
+
+def set_readonly_root_volume(inputv, curv):
+ if not (inputv and isinstance(inputv, dict)):
+ return
+
+ if not (curv and isinstance(curv, dict)):
+ return
+
+ inputv["device"] = curv.get("device")
+
+ inputv["volume_id"] = curv.get("volume_id")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_ecs_instance): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["servers"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = None
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["OS-EXT-AZ:availability_zone"] = v
+
+ result["OS-EXT-SRV-ATTR:hostname"] = None
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = None
+
+ v = navigate_value(all_opts, ["user_data"], None)
+ result["OS-EXT-SRV-ATTR:user_data"] = v
+
+ result["OS-EXT-STS:power_state"] = None
+
+ result["config_drive"] = None
+
+ result["created"] = None
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ v = expand_list_flavor(all_opts, None)
+ result["flavor"] = v
+
+ result["id"] = None
+
+ v = expand_list_image(all_opts, None)
+ result["image"] = v
+
+ v = navigate_value(all_opts, ["ssh_key_name"], None)
+ result["key_name"] = v
+
+ v = expand_list_metadata(all_opts, None)
+ result["metadata"] = v
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["status"] = None
+
+ v = expand_list_tags(all_opts, None)
+ result["tags"] = v
+
+ return result
+
+
+def expand_list_flavor(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["flavor_name"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_image(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_tags(d, array_index):
+ v = d.get("server_tags")
+ if not v:
+ return None
+
+ return [k + "=" + v1 for k, v1 in v.items()]
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["OS-DCF:diskConfig"] = body.get("OS-DCF:diskConfig")
+
+ result["OS-EXT-AZ:availability_zone"] = body.get(
+ "OS-EXT-AZ:availability_zone")
+
+ result["OS-EXT-SRV-ATTR:hostname"] = body.get("OS-EXT-SRV-ATTR:hostname")
+
+ result["OS-EXT-SRV-ATTR:instance_name"] = body.get(
+ "OS-EXT-SRV-ATTR:instance_name")
+
+ result["OS-EXT-SRV-ATTR:user_data"] = body.get("OS-EXT-SRV-ATTR:user_data")
+
+ result["OS-EXT-STS:power_state"] = body.get("OS-EXT-STS:power_state")
+
+ result["config_drive"] = body.get("config_drive")
+
+ result["created"] = body.get("created")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ v = fill_list_resp_flavor(body.get("flavor"))
+ result["flavor"] = v
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_image(body.get("image"))
+ result["image"] = v
+
+ result["key_name"] = body.get("key_name")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["name"] = body.get("name")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ return result
+
+
+def fill_list_resp_flavor(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_image(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def adjust_list_resp(opts, resp):
+ adjust_list_api_tags(opts, resp)
+
+
+def adjust_list_api_tags(parent_input, parent_cur):
+ iv = parent_input.get("tags")
+ if not (iv and isinstance(iv, list)):
+ return
+
+ cv = parent_cur.get("tags")
+ if not (cv and isinstance(cv, list)):
+ return
+
+ result = []
+ for iiv in iv:
+ if iiv not in cv:
+ break
+
+ result.append(iiv)
+
+ j = cv.index(iiv)
+ cv[j] = cv[-1]
+ cv.pop()
+
+ if cv:
+ result.extend(cv)
+ parent_cur["tags"] = result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py b/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py
new file mode 100644
index 000000000..7d445ddd2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_evs_disk.py
@@ -0,0 +1,1217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_evs_disk
+description:
+ - block storage management.
+short_description: Creates a resource of Evs/Disk in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '30m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '30m'
+ delete:
+ description:
+ - The timeouts for delete operation.
+ type: str
+ default: '30m'
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ required: true
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ required: true
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ required: false
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ required: false
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ required: false
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ required: false
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ required: false
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ required: false
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ required: false
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ required: false
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ required: false
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# test create disk
+- name: Create a disk
+ community.general.hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+'''
+
+RETURN = '''
+ availability_zone:
+ description:
+ - Specifies the AZ where you want to create the disk.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the disk name. The value can contain a maximum of 255
+ bytes.
+ type: str
+ returned: success
+ volume_type:
+ description:
+ - Specifies the disk type. Currently, the value can be SSD, SAS, or
+ SATA.
+ - SSD specifies the ultra-high I/O disk type.
+ - SAS specifies the high I/O disk type.
+ - SATA specifies the common I/O disk type.
+ - If the specified disk type is not available in the AZ, the
+ disk will fail to create. If the EVS disk is created from a
+ snapshot, the volume_type field must be the same as that of the
+ snapshot's source disk.
+ type: str
+ returned: success
+ backup_id:
+ description:
+ - Specifies the ID of the backup that can be used to create a disk.
+ This parameter is mandatory when you use a backup to create the
+ disk.
+ type: str
+ returned: success
+ description:
+ description:
+ - Specifies the disk description. The value can contain a maximum
+ of 255 bytes.
+ type: str
+ returned: success
+ enable_full_clone:
+ description:
+ - If the disk is created from a snapshot and linked cloning needs
+ to be used, set this parameter to True.
+ type: bool
+ returned: success
+ enable_scsi:
+ description:
+ - If this parameter is set to True, the disk device type will be
+ SCSI, which allows ECS OSs to directly access underlying storage
+ media. SCSI reservation command is supported. If this parameter
+ is set to False, the disk device type will be VBD, which supports
+ only simple SCSI read/write commands.
+ - If parameter enable_share is set to True and this parameter
+ is not specified, shared SCSI disks are created. SCSI EVS disks
+ cannot be created from backups, which means that this parameter
+ cannot be True if backup_id has been specified.
+ type: bool
+ returned: success
+ enable_share:
+ description:
+ - Specifies whether the disk is shareable. The default value is
+ False.
+ type: bool
+ returned: success
+ encryption_id:
+ description:
+ - Specifies the encryption ID. The length of it fixes at 36 bytes.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. This ID is associated with
+ the disk during the disk creation. If it is not specified, the
+ disk is bound to the default enterprise project.
+ type: str
+ returned: success
+ image_id:
+ description:
+ - Specifies the image ID. If this parameter is specified, the disk
+ is created from an image. BMS system disks cannot be
+ created from BMS images.
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the disk size, in GB. Its values are as follows, System
+ disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This
+ parameter is mandatory when you create an empty disk or use an
+ image or a snapshot to create a disk. If you use an image or a
+ snapshot to create a disk, the disk size must be greater than or
+ equal to the image or snapshot size. This parameter is optional
+ when you use a backup to create a disk. If this parameter is not
+ specified, the disk size is equal to the backup size.
+ type: int
+ returned: success
+ snapshot_id:
+ description:
+ - Specifies the snapshot ID. If this parameter is specified, the
+ disk is created from a snapshot.
+ type: str
+ returned: success
+ attachments:
+ description:
+ - Specifies the disk attachment information.
+ type: complex
+ returned: success
+ contains:
+ attached_at:
+ description:
+ - Specifies the time when the disk was attached. Time
+ format is 'UTC YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ attachment_id:
+ description:
+ - Specifies the ID of the attachment information.
+ type: str
+ returned: success
+ device:
+ description:
+ - Specifies the device name.
+ type: str
+ returned: success
+ server_id:
+ description:
+ - Specifies the ID of the server to which the disk is
+ attached.
+ type: str
+ returned: success
+ backup_policy_id:
+ description:
+ - Specifies the backup policy ID.
+ type: str
+ returned: success
+ created_at:
+ description:
+ - Specifies the time when the disk was created. Time format is 'UTC
+ YYYY-MM-DDTHH:MM:SS'.
+ type: str
+ returned: success
+ is_bootable:
+ description:
+ - Specifies whether the disk is bootable.
+ type: bool
+ returned: success
+ is_readonly:
+ description:
+ - Specifies whether the disk is read-only or read/write. True
+ indicates that the disk is read-only. False indicates that the
+ disk is read/write.
+ type: bool
+ returned: success
+ source_volume_id:
+ description:
+ - Specifies the source disk ID. This parameter has a value if the
+ disk is created from a source disk.
+ type: str
+ returned: success
+ status:
+ description:
+ - Specifies the disk status.
+ type: str
+ returned: success
+ tags:
+ description:
+ - Specifies the disk tags.
+ type: dict
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='30m', type='str'),
+ update=dict(default='30m', type='str'),
+ delete=dict(default='30m', type='str'),
+ ), default=dict()),
+ availability_zone=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ volume_type=dict(type='str', required=True),
+ backup_id=dict(type='str'),
+ description=dict(type='str'),
+ enable_full_clone=dict(type='bool'),
+ enable_scsi=dict(type='bool'),
+ enable_share=dict(type='bool'),
+ encryption_id=dict(type='str'),
+ enterprise_project_id=dict(type='str'),
+ image_id=dict(type='str'),
+ size=dict(type='int'),
+ snapshot_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "evs")
+
+ try:
+ _init(config)
+ is_exist = module.params.get('id')
+
+ result = None
+ changed = False
+ if module.params['state'] == 'present':
+ if not is_exist:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ if not module.check_mode:
+ update(config, inputv, result)
+
+ inputv = user_input_parameters(module)
+ resp, array_index = read_resource(config)
+ result = build_state(inputv, resp, array_index)
+ set_readonly_options(inputv, result)
+ if are_different_dicts(inputv, result):
+ raise Exception("Update resource failed, "
+ "some attributes are not updated")
+
+ changed = True
+
+ result['id'] = module.params.get('id')
+ else:
+ result = dict()
+ if is_exist:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def _init(config):
+ module = config.module
+ if module.params.get('id'):
+ return
+
+ v = search_resource(config)
+ n = len(v)
+ if n > 1:
+ raise Exception("find more than one resources(%s)" % ", ".join([
+ navigate_value(i, ["id"])
+ for i in v
+ ]))
+
+ if n == 1:
+ module.params['id'] = navigate_value(v[0], ["id"])
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "backup_id": module.params.get("backup_id"),
+ "description": module.params.get("description"),
+ "enable_full_clone": module.params.get("enable_full_clone"),
+ "enable_scsi": module.params.get("enable_scsi"),
+ "enable_share": module.params.get("enable_share"),
+ "encryption_id": module.params.get("encryption_id"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "image_id": module.params.get("image_id"),
+ "name": module.params.get("name"),
+ "size": module.params.get("size"),
+ "snapshot_id": module.params.get("snapshot_id"),
+ "volume_type": module.params.get("volume_type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+ opts["ansible_module"] = module
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ obj = async_wait(config, r, client1, timeout)
+ module.params['id'] = navigate_value(obj, ["entities", "volume_id"])
+
+
+def update(config, expect_state, current_state):
+ module = config.module
+ expect_state["current_state"] = current_state
+ current_state["current_state"] = current_state
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+
+ params = build_update_parameters(expect_state)
+ params1 = build_update_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ send_update_request(module, params, client)
+
+ params = build_extend_disk_parameters(expect_state)
+ params1 = build_extend_disk_parameters(current_state)
+ if params and are_different_dicts(params, params1):
+ client1 = config.client(get_region(module), "evsv2.1", "project")
+ r = send_extend_disk_request(module, params, client1)
+
+ client1 = config.client(get_region(module), "volume", "project")
+ client1.endpoint = client1.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client1, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "evs", "project")
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+
+ r = send_delete_request(module, None, client)
+
+ client = config.client(get_region(module), "volume", "project")
+ client.endpoint = client.endpoint.replace("/v2/", "/v1/")
+ async_wait(config, r, client, timeout)
+
+
+def read_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return res, None
+
+
+def build_state(opts, response, array_index):
+ states = flatten_options(response, array_index)
+ set_unreadable_options(opts, states)
+ return states
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enable_share"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "multiattach=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["name"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "name=" + (str(v) if v else str(v).lower()))
+
+ v = navigate_value(opts, ["availability_zone"])
+ if v or v in [False, 0]:
+ query_params.append(
+ "availability_zone=" + (str(v) if v else str(v).lower()))
+
+ query_link = "?limit=10&offset={start}"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "volumev3", "project")
+ opts = user_input_parameters(module)
+ name = module.params.get("name")
+ query_link = _build_query_link(opts)
+ link = "os-vendor-volumes/detail" + query_link
+
+ result = []
+ p = {'start': 0}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ if name == item.get("name"):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['start'] += len(r)
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["backup_id"], None)
+ if not is_empty_value(v):
+ params["backup_id"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["image_id"], None)
+ if not is_empty_value(v):
+ params["imageRef"] = v
+
+ v = expand_create_metadata(opts, None)
+ if not is_empty_value(v):
+ params["metadata"] = v
+
+ v = navigate_value(opts, ["enable_share"], None)
+ if not is_empty_value(v):
+ params["multiattach"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["size"], None)
+ if not is_empty_value(v):
+ params["size"] = v
+
+ v = navigate_value(opts, ["snapshot_id"], None)
+ if not is_empty_value(v):
+ params["snapshot_id"] = v
+
+ v = navigate_value(opts, ["volume_type"], None)
+ if not is_empty_value(v):
+ params["volume_type"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def expand_create_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ if not is_empty_value(v):
+ r["__system__cmkid"] = v
+
+ v = expand_create_metadata_system_encrypted(d, array_index)
+ if not is_empty_value(v):
+ r["__system__encrypted"] = v
+
+ v = expand_create_metadata_full_clone(d, array_index)
+ if not is_empty_value(v):
+ r["full_clone"] = v
+
+ v = expand_create_metadata_hw_passthrough(d, array_index)
+ if not is_empty_value(v):
+ r["hw:passthrough"] = v
+
+ return r
+
+
+def expand_create_metadata_system_encrypted(d, array_index):
+ v = navigate_value(d, ["encryption_id"], array_index)
+ return "1" if v else ""
+
+
+def expand_create_metadata_full_clone(d, array_index):
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ return "0" if v else ""
+
+
+def expand_create_metadata_hw_passthrough(d, array_index):
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ if v is None:
+ return v
+ return "true" if v else "false"
+
+
+def send_create_request(module, params, client):
+ url = "cloudvolumes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if v is not None:
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"volume": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def build_extend_disk_parameters(opts):
+ params = dict()
+
+ v = expand_extend_disk_os_extend(opts, None)
+ if not is_empty_value(v):
+ params["os-extend"] = v
+
+ return params
+
+
+def expand_extend_disk_os_extend(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["size"], array_index)
+ if not is_empty_value(v):
+ r["new_size"] = v
+
+ return r
+
+
+def send_extend_disk_request(module, params, client):
+ url = build_path(module, "cloudvolumes/{id}/action")
+
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(extend_disk), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "job_id": ["job_id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "jobs/{job_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["SUCCESS"],
+ ["RUNNING", "INIT"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_evs_disk): error "
+ "waiting to be done, error= %s" % str(ex))
+
+
+def send_read_request(module, client):
+ url = build_path(module, "os-vendor-volumes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volume"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_read_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_read_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_read_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_read_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+def flatten_options(response, array_index):
+ r = dict()
+
+ v = flatten_attachments(response, array_index)
+ r["attachments"] = v
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "metadata", "policy"], array_index)
+ r["backup_policy_id"] = v
+
+ v = navigate_value(response, ["read", "created_at"], array_index)
+ r["created_at"] = v
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = flatten_enable_full_clone(response, array_index)
+ r["enable_full_clone"] = v
+
+ v = flatten_enable_scsi(response, array_index)
+ r["enable_scsi"] = v
+
+ v = navigate_value(response, ["read", "multiattach"], array_index)
+ r["enable_share"] = v
+
+ v = navigate_value(
+ response, ["read", "metadata", "__system__cmkid"], array_index)
+ r["encryption_id"] = v
+
+ v = navigate_value(
+ response, ["read", "enterprise_project_id"], array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(
+ response, ["read", "volume_image_metadata", "id"], array_index)
+ r["image_id"] = v
+
+ v = flatten_is_bootable(response, array_index)
+ r["is_bootable"] = v
+
+ v = flatten_is_readonly(response, array_index)
+ r["is_readonly"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "size"], array_index)
+ r["size"] = v
+
+ v = navigate_value(response, ["read", "snapshot_id"], array_index)
+ r["snapshot_id"] = v
+
+ v = navigate_value(response, ["read", "source_volid"], array_index)
+ r["source_volume_id"] = v
+
+ v = navigate_value(response, ["read", "status"], array_index)
+ r["status"] = v
+
+ v = navigate_value(response, ["read", "tags"], array_index)
+ r["tags"] = v
+
+ v = navigate_value(response, ["read", "volume_type"], array_index)
+ r["volume_type"] = v
+
+ return r
+
+
+def flatten_attachments(d, array_index):
+ v = navigate_value(d, ["read", "attachments"],
+ array_index)
+ if not v:
+ return None
+ n = len(v)
+ result = []
+
+ new_ai = dict()
+ if array_index:
+ new_ai.update(array_index)
+
+ for i in range(n):
+ new_ai["read.attachments"] = i
+
+ val = dict()
+
+ v = navigate_value(d, ["read", "attachments", "attached_at"], new_ai)
+ val["attached_at"] = v
+
+ v = navigate_value(d, ["read", "attachments", "attachment_id"], new_ai)
+ val["attachment_id"] = v
+
+ v = navigate_value(d, ["read", "attachments", "device"], new_ai)
+ val["device"] = v
+
+ v = navigate_value(d, ["read", "attachments", "server_id"], new_ai)
+ val["server_id"] = v
+
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if result else None
+
+
+def flatten_enable_full_clone(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "full_clone"],
+ array_index)
+ if v is None:
+ return v
+ return True if v == "0" else False
+
+
+def flatten_enable_scsi(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "hw:passthrough"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_bootable(d, array_index):
+ v = navigate_value(d, ["read", "bootable"], array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def flatten_is_readonly(d, array_index):
+ v = navigate_value(d, ["read", "metadata", "readonly"],
+ array_index)
+ if v is None:
+ return v
+ return True if v in ["true", "True"] else False
+
+
+def set_unreadable_options(opts, states):
+ states["backup_id"] = opts.get("backup_id")
+
+
+def set_readonly_options(opts, states):
+ opts["attachments"] = states.get("attachments")
+
+ opts["backup_policy_id"] = states.get("backup_policy_id")
+
+ opts["created_at"] = states.get("created_at")
+
+ opts["is_bootable"] = states.get("is_bootable")
+
+ opts["is_readonly"] = states.get("is_readonly")
+
+ opts["source_volume_id"] = states.get("source_volume_id")
+
+ opts["status"] = states.get("status")
+
+ opts["tags"] = states.get("tags")
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_evs_disk): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["volumes"], None)
+
+
+def expand_list_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["encryption_id"], array_index)
+ r["__system__cmkid"] = v
+
+ r["attached_mode"] = None
+
+ v = navigate_value(d, ["enable_full_clone"], array_index)
+ r["full_clone"] = v
+
+ v = navigate_value(d, ["enable_scsi"], array_index)
+ r["hw:passthrough"] = v
+
+ r["policy"] = None
+
+ r["readonly"] = None
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_volume_image_metadata(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["image_id"], array_index)
+ r["id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_attachments(body.get("attachments"))
+ result["attachments"] = v
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["bootable"] = body.get("bootable")
+
+ result["created_at"] = body.get("created_at")
+
+ result["description"] = body.get("description")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ v = fill_list_resp_metadata(body.get("metadata"))
+ result["metadata"] = v
+
+ result["multiattach"] = body.get("multiattach")
+
+ result["name"] = body.get("name")
+
+ result["size"] = body.get("size")
+
+ result["snapshot_id"] = body.get("snapshot_id")
+
+ result["source_volid"] = body.get("source_volid")
+
+ result["status"] = body.get("status")
+
+ result["tags"] = body.get("tags")
+
+ v = fill_list_resp_volume_image_metadata(body.get("volume_image_metadata"))
+ result["volume_image_metadata"] = v
+
+ result["volume_type"] = body.get("volume_type")
+
+ return result
+
+
+def fill_list_resp_attachments(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["attached_at"] = item.get("attached_at")
+
+ val["attachment_id"] = item.get("attachment_id")
+
+ val["device"] = item.get("device")
+
+ val["server_id"] = item.get("server_id")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["__system__cmkid"] = value.get("__system__cmkid")
+
+ result["attached_mode"] = value.get("attached_mode")
+
+ result["full_clone"] = value.get("full_clone")
+
+ result["hw:passthrough"] = value.get("hw:passthrough")
+
+ result["policy"] = value.get("policy")
+
+ result["readonly"] = value.get("readonly")
+
+ return result
+
+
+def fill_list_resp_volume_image_metadata(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["id"] = value.get("id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py b/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py
new file mode 100644
index 000000000..357fd5520
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_network_vpc.py
@@ -0,0 +1,500 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2018 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_network_vpc
+description:
+ - Represents an vpc resource.
+short_description: Creates a Huawei Cloud VPC
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in vpc.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeout for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeout for update operation.
+ type: str
+ default: '15m'
+ delete:
+ description:
+ - The timeout for delete operation.
+ type: str
+ default: '15m'
+ name:
+ description:
+ - The name of vpc.
+ type: str
+ required: true
+ cidr:
+ description:
+ - The range of available subnets in the vpc.
+ type: str
+ required: true
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create a vpc
+ community.general.hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+'''
+
+RETURN = '''
+ id:
+ description:
+ - the id of vpc.
+ type: str
+ returned: success
+ name:
+ description:
+ - the name of vpc.
+ type: str
+ returned: success
+ cidr:
+ description:
+ - the range of available subnets in the vpc.
+ type: str
+ returned: success
+ status:
+ description:
+ - the status of vpc.
+ type: str
+ returned: success
+ routes:
+ description:
+ - the route information.
+ type: complex
+ returned: success
+ contains:
+ destination:
+ description:
+ - the destination network segment of a route.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - the next hop of a route. If the route type is peering,
+ it will provide VPC peering connection ID.
+ type: str
+ returned: success
+ enable_shared_snat:
+ description:
+ - show whether the shared snat is enabled.
+ type: bool
+ returned: success
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcClientException404, HwcModule,
+ are_different_dicts, is_empty_value,
+ wait_to_finish, get_region,
+ build_path, navigate_value)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(
+ default='present', choices=['present', 'absent'], type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ delete=dict(default='15m', type='str'),
+ ), default=dict()),
+ name=dict(required=True, type='str'),
+ cidr=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+ config = Config(module, 'vpc')
+
+ state = module.params['state']
+
+ if (not module.params.get("id")) and module.params.get("name"):
+ module.params['id'] = get_id_by_name(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "vpc", "project")
+ fetch = fetch_resource(module, client, link)
+ if fetch:
+ fetch = fetch.get('vpc')
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {"cidr": current_state["cidr"]}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config, self_link(module))
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config, self_link(module))
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config, "vpcs")
+ fetch = response_to_hash(module, fetch.get('vpc'))
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.post(link, resource_to_create(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_done = wait_for_operation(config, 'create', r)
+ v = ""
+ try:
+ v = navigate_value(wait_done, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, 'vpcs/{op_id}', {'op_id': v})
+ return fetch_resource(module, client, url)
+
+
+def update(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ r = None
+ try:
+ r = client.put(link, resource_to_update(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_operation(config, 'update', r)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config, link):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ wait_for_delete(module, client, link)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_network_vpc): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_id_by_name(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ name = module.params.get("name")
+ link = "vpcs"
+ query_link = "?marker={marker}&limit=10"
+ link += query_link
+ not_format_keys = re.findall("={marker}", link)
+ none_values = re.findall("=None", link)
+
+ if not (not_format_keys or none_values):
+ r = None
+ try:
+ r = client.get(link)
+ except Exception:
+ pass
+ if r is None:
+ return None
+ r = r.get('vpcs', [])
+ ids = [
+ i.get('id') for i in r if i.get('name', '') == name
+ ]
+ if not ids:
+ return None
+ elif len(ids) == 1:
+ return ids[0]
+ else:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+ elif none_values:
+ module.fail_json(
+ msg="Can not find id by name because url includes None.")
+ else:
+ p = {'marker': ''}
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('vpcs', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == name:
+ ids.add(i.get('id'))
+ if len(ids) >= 2:
+ module.fail_json(
+ msg="Multiple resources with same name are found.")
+
+ p['marker'] = r[-1].get('id')
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "vpcs/{id}")
+
+
+def resource_to_create(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def resource_to_update(module):
+ params = dict()
+
+ v = module.params.get('cidr')
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ if not params:
+ return params
+
+ params = {"vpc": params}
+
+ return params
+
+
+def _get_editable_properties(module):
+ return {
+ "cidr": module.params.get("cidr"),
+ }
+
+
+def response_to_hash(module, response):
+ """ Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'id': response.get(u'id'),
+ u'name': response.get(u'name'),
+ u'cidr': response.get(u'cidr'),
+ u'status': response.get(u'status'),
+ u'routes': VpcRoutesArray(
+ response.get(u'routes', []), module).from_response(),
+ u'enable_shared_snat': response.get(u'enable_shared_snat')
+ }
+
+
+def wait_for_operation(config, op_type, op_result):
+ module = config.module
+ op_id = ""
+ try:
+ op_id = navigate_value(op_result, ['vpc', 'id'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ url = build_path(module, "vpcs/{op_id}", {'op_id': op_id})
+ timeout = 60 * int(module.params['timeouts'][op_type].rstrip('m'))
+ states = {
+ 'create': {
+ 'allowed': ['CREATING', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ },
+ 'update': {
+ 'allowed': ['PENDING_UPDATE', 'DONW', 'OK'],
+ 'complete': ['OK'],
+ }
+ }
+
+ return wait_for_completion(url, timeout, states[op_type]['allowed'],
+ states[op_type]['complete'], config)
+
+
+def wait_for_completion(op_uri, timeout, allowed_states,
+ complete_states, config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ def _refresh_status():
+ r = None
+ try:
+ r = fetch_resource(module, client, op_uri)
+ except Exception:
+ return None, ""
+
+ status = ""
+ try:
+ status = navigate_value(r, ['vpc', 'status'])
+ except Exception:
+ return None, ""
+
+ return r, status
+
+ try:
+ return wait_to_finish(complete_states, allowed_states,
+ _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def wait_for_delete(module, client, link):
+
+ def _refresh_status():
+ try:
+ client.get(link)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['delete'].rstrip('m'))
+ try:
+ return wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+class VpcRoutesArray(object):
+ def __init__(self, request, module):
+ self.module = module
+ if request:
+ self.request = request
+ else:
+ self.request = []
+
+ def to_request(self):
+ items = []
+ for item in self.request:
+ items.append(self._request_for_item(item))
+ return items
+
+ def from_response(self):
+ items = []
+ for item in self.request:
+ items.append(self._response_from_item(item))
+ return items
+
+ def _request_for_item(self, item):
+ return {
+ u'destination': item.get('destination'),
+ u'nexthop': item.get('next_hop')
+ }
+
+ def _response_from_item(self, item):
+ return {
+ u'destination': item.get(u'destination'),
+ u'next_hop': item.get(u'nexthop')
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
new file mode 100644
index 000000000..88207d3f9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_smn_topic.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_smn_topic
+description:
+ - Represents a SMN notification topic resource.
+short_description: Creates a resource of SMNTopic in Huaweicloud Cloud
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - requests >= 2.18.4
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huaweicloud Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ type: str
+ required: false
+ name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ type: str
+ required: true
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create a smn topic
+ community.general.hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user_name: "{{ user_name }}"
+ password: "{{ password }}"
+ domain_name: "{{ domain_name }}"
+ project_name: "{{ project_name }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+'''
+
+RETURN = '''
+create_time:
+ description:
+ - Time when the topic was created.
+ returned: success
+ type: str
+display_name:
+ description:
+ - Topic display name, which is presented as the name of the email
+ sender in an email message. The topic display name contains a
+ maximum of 192 bytes.
+ returned: success
+ type: str
+name:
+ description:
+ - Name of the topic to be created. The topic name is a string of 1
+ to 256 characters. It must contain upper- or lower-case letters,
+ digits, hyphens (-), and underscores C(_), and must start with a
+ letter or digit.
+ returned: success
+ type: str
+push_policy:
+ description:
+ - Message pushing policy. 0 indicates that the message sending
+ fails and the message is cached in the queue. 1 indicates that
+ the failed message is discarded.
+ returned: success
+ type: int
+topic_urn:
+ description:
+ - Resource identifier of a topic, which is unique.
+ returned: success
+ type: str
+update_time:
+ description:
+ - Time when the topic was updated.
+ returned: success
+ type: str
+'''
+
+###############################################################################
+# Imports
+###############################################################################
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (Config, HwcClientException,
+ HwcModule, navigate_value,
+ are_different_dicts, is_empty_value,
+ build_path, get_region)
+import re
+
+###############################################################################
+# Main
+###############################################################################
+
+
+def main():
+ """Main function"""
+
+ module = HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ display_name=dict(type='str'),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ config = Config(module, "smn")
+
+ state = module.params['state']
+
+ if not module.params.get("id"):
+ module.params['id'] = get_resource_id(config)
+
+ fetch = None
+ link = self_link(module)
+ # the link will include Nones if required format parameters are missed
+ if not re.search('/None/|/None$', link):
+ client = config.client(get_region(module), "smn", "project")
+ fetch = fetch_resource(module, client, link)
+ changed = False
+
+ if fetch:
+ if state == 'present':
+ expect = _get_resource_editable_properties(module)
+ current_state = response_to_hash(module, fetch)
+ current = {'display_name': current_state['display_name']}
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ fetch = update(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = current_state
+ else:
+ if not module.check_mode:
+ delete(config)
+ fetch = {}
+ changed = True
+ else:
+ if state == 'present':
+ if not module.check_mode:
+ fetch = create(config)
+ fetch = response_to_hash(module, fetch)
+ changed = True
+ else:
+ fetch = {}
+
+ fetch.update({'changed': changed})
+
+ module.exit_json(**fetch)
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ r = None
+ try:
+ r = client.post(link, create_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error creating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return get_resource(config, r)
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.put(link, update_resource_opts(module))
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error updating "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return fetch_resource(module, client, link)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = self_link(module)
+ try:
+ client.delete(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error deleting "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def fetch_resource(module, client, link):
+ try:
+ return client.get(link)
+ except HwcClientException as ex:
+ msg = ("module(hwc_smn_topic): error fetching "
+ "resource, error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+
+def get_resource(config, result):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ v = ""
+ try:
+ v = navigate_value(result, ['topic_urn'])
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ d = {'topic_urn': v}
+ url = build_path(module, 'notifications/topics/{topic_urn}', d)
+
+ return fetch_resource(module, client, url)
+
+
+def get_resource_id(config):
+ module = config.module
+ client = config.client(get_region(module), "smn", "project")
+
+ link = "notifications/topics"
+ query_link = "?offset={offset}&limit=10"
+ link += query_link
+
+ p = {'offset': 0}
+ v = module.params.get('name')
+ ids = set()
+ while True:
+ r = None
+ try:
+ r = client.get(link.format(**p))
+ except Exception:
+ pass
+ if r is None:
+ break
+ r = r.get('topics', [])
+ if r == []:
+ break
+ for i in r:
+ if i.get('name') == v:
+ ids.add(i.get('topic_urn'))
+ if len(ids) >= 2:
+ module.fail_json(msg="Multiple resources are found")
+
+ p['offset'] += 1
+
+ return ids.pop() if ids else None
+
+
+def self_link(module):
+ return build_path(module, "notifications/topics/{id}")
+
+
+def create_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ v = module.params.get('name')
+ if not is_empty_value(v):
+ params["name"] = v
+
+ return params
+
+
+def update_resource_opts(module):
+ params = dict()
+
+ v = module.params.get('display_name')
+ if not is_empty_value(v):
+ params["display_name"] = v
+
+ return params
+
+
+def _get_resource_editable_properties(module):
+ return {
+ "display_name": module.params.get("display_name"),
+ }
+
+
+def response_to_hash(module, response):
+ """Remove unnecessary properties from the response.
+ This is for doing comparisons with Ansible's current parameters.
+ """
+ return {
+ u'create_time': response.get(u'create_time'),
+ u'display_name': response.get(u'display_name'),
+ u'name': response.get(u'name'),
+ u'push_policy': _push_policy_convert_from_response(
+ response.get('push_policy')),
+ u'topic_urn': response.get(u'topic_urn'),
+ u'update_time': response.get(u'update_time')
+ }
+
+
+def _push_policy_convert_from_response(value):
+ return {
+ 0: "the message sending fails and is cached in the queue",
+ 1: "the failed message is discarded",
+ }.get(int(value))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
new file mode 100644
index 000000000..9fc0361b3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_eip.py
@@ -0,0 +1,884 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_eip
+description:
+ - elastic ip management.
+short_description: Creates a resource of Vpc/EIP in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '5m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '5m'
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ required: true
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ required: false
+ suboptions:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ required: true
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ required: false
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ required: false
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ required: false
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ required: false
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create an eip and bind it to a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ register: port
+- name: Create an eip and bind it to a port
+ community.general.hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+'''
+
+RETURN = '''
+ type:
+ description:
+ - Specifies the EIP type.
+ type: str
+ returned: success
+ dedicated_bandwidth:
+ description:
+ - Specifies the dedicated bandwidth object.
+ type: dict
+ returned: success
+ contains:
+ charge_mode:
+ description:
+ - Specifies whether the bandwidth is billed by traffic or
+ by bandwidth size. The value can be bandwidth or traffic.
+ If this parameter is left blank or is null character
+ string, default value bandwidth is used. For IPv6
+ addresses, the default parameter value is bandwidth
+ outside China and is traffic in China.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the bandwidth name. The value is a string of 1
+ to 64 characters that can contain letters, digits,
+ underscores C(_), hyphens (-), and periods (.).
+ type: str
+ returned: success
+ size:
+ description:
+ - Specifies the bandwidth size. The value ranges from 1
+ Mbit/s to 2000 Mbit/s by default. (The specific range may
+ vary depending on the configuration in each region. You
+ can see the bandwidth range of each region on the
+ management console.) The minimum unit for bandwidth
+ adjustment varies depending on the bandwidth range. The
+ details are as follows:.
+ - The minimum unit is 1 Mbit/s if the allowed bandwidth
+ size ranges from 0 to 300 Mbit/s (with 300 Mbit/s
+ included).
+ - The minimum unit is 50 Mbit/s if the allowed bandwidth
+ size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s
+ included).
+ - The minimum unit is 500 Mbit/s if the allowed bandwidth
+ size is greater than 1000 Mbit/s.
+ type: int
+ returned: success
+ id:
+ description:
+ - Specifies the ID of dedicated bandwidth.
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID.
+ type: str
+ returned: success
+ ip_version:
+ description:
+ - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this
+ parameter is left blank, an IPv4 address will be assigned.
+ type: int
+ returned: success
+ ipv4_address:
+ description:
+ - Specifies the obtained IPv4 EIP. The system automatically assigns
+ an EIP if you do not specify it.
+ type: str
+ returned: success
+ port_id:
+ description:
+ - Specifies the port ID. This parameter is returned only when a
+ private IP address is bound with the EIP.
+ type: str
+ returned: success
+ shared_bandwidth_id:
+ description:
+ - Specifies the ID of shared bandwidth.
+ type: str
+ returned: success
+ create_time:
+ description:
+ - Specifies the time (UTC time) when the EIP was assigned.
+ type: str
+ returned: success
+ ipv6_address:
+ description:
+ - Specifies the obtained IPv6 EIP.
+ type: str
+ returned: success
+ private_ip_address:
+ description:
+ - Specifies the private IP address bound with the EIP. This
+ parameter is returned only when a private IP address is bound
+ with the EIP.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='5m', type='str'),
+ update=dict(default='5m', type='str'),
+ ), default=dict()),
+ type=dict(type='str', required=True),
+ dedicated_bandwidth=dict(type='dict', options=dict(
+ charge_mode=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ size=dict(type='int', required=True)
+ )),
+ enterprise_project_id=dict(type='str'),
+ ip_version=dict(type='int'),
+ ipv4_address=dict(type='str'),
+ port_id=dict(type='str'),
+ shared_bandwidth_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "dedicated_bandwidth": module.params.get("dedicated_bandwidth"),
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "ip_version": module.params.get("ip_version"),
+ "ipv4_address": module.params.get("ipv4_address"),
+ "port_id": module.params.get("port_id"),
+ "shared_bandwidth_id": module.params.get("shared_bandwidth_id"),
+ "type": module.params.get("type"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["publicip", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ if module.params["port_id"]:
+ module.params["port_id"] = ""
+ update(config)
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "publicips/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["ip_version"])
+ if v:
+ query_params.append("ip_version=" + str(v))
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "publicips" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_bandwidth(opts, None)
+ if not is_empty_value(v):
+ params["bandwidth"] = v
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = expand_create_publicip(opts, None)
+ if not is_empty_value(v):
+ params["publicip"] = v
+
+ return params
+
+
+def expand_create_bandwidth(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ if not (v or sbwid):
+ raise Exception("must input shared_bandwidth_id or "
+ "dedicated_bandwidth")
+
+ if sbwid:
+ return {
+ "id": sbwid,
+ "share_type": "WHOLE"}
+
+ return {
+ "charge_mode": v["charge_mode"],
+ "name": v["name"],
+ "share_type": "PER",
+ "size": v["size"]}
+
+
+def expand_create_publicip(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["ipv4_address"], array_index)
+ if not is_empty_value(v):
+ r["ip_address"] = v
+
+ v = navigate_value(d, ["ip_version"], array_index)
+ if not is_empty_value(v):
+ r["ip_version"] = v
+
+ v = navigate_value(d, ["type"], array_index)
+ if not is_empty_value(v):
+ r["type"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "publicips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "publicip_id": ["publicip", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "publicips/{publicip_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_version"], None)
+ if not is_empty_value(v):
+ params["ip_version"] = v
+
+ v = navigate_value(opts, ["port_id"], None)
+ if v is not None:
+ params["port_id"] = v
+
+ if not params:
+ return params
+
+ params = {"publicip": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ url = build_path(module, "publicips/{id}")
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["publicip", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ None,
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_eip): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "publicips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "publicips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "create_time"], array_index)
+ r["create_time"] = v
+
+ v = r.get("dedicated_bandwidth")
+ v = flatten_dedicated_bandwidth(response, array_index, v, exclude_output)
+ r["dedicated_bandwidth"] = v
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "ip_version"], array_index)
+ r["ip_version"] = v
+
+ v = navigate_value(response, ["read", "public_ip_address"], array_index)
+ r["ipv4_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "public_ipv6_address"],
+ array_index)
+ r["ipv6_address"] = v
+
+ v = navigate_value(response, ["read", "port_id"], array_index)
+ r["port_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "private_ip_address"],
+ array_index)
+ r["private_ip_address"] = v
+
+ v = r.get("shared_bandwidth_id")
+ v = flatten_shared_bandwidth_id(response, array_index, v, exclude_output)
+ r["shared_bandwidth_id"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ return r
+
+
+def flatten_dedicated_bandwidth(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+ if not (v and v == "PER"):
+ return current_value
+
+ result = current_value
+ if not result:
+ result = dict()
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+ if v is not None:
+ result["id"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_name"], array_index)
+ if v is not None:
+ result["name"] = v
+
+ v = navigate_value(d, ["read", "bandwidth_size"], array_index)
+ if v is not None:
+ result["size"] = v
+
+ return result if result else current_value
+
+
+def flatten_shared_bandwidth_id(d, array_index, current_value, exclude_output):
+ v = navigate_value(d, ["read", "bandwidth_id"], array_index)
+
+ v1 = navigate_value(d, ["read", "bandwidth_share_type"], array_index)
+
+ return v if (v1 and v1 == "WHOLE") else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_eip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["publicips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_bandwidth_id(all_opts, None)
+ result["bandwidth_id"] = v
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "name"], None)
+ result["bandwidth_name"] = v
+
+ result["bandwidth_share_type"] = None
+
+ v = navigate_value(all_opts, ["dedicated_bandwidth", "size"], None)
+ result["bandwidth_size"] = v
+
+ result["create_time"] = None
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_version"], None)
+ result["ip_version"] = v
+
+ v = navigate_value(all_opts, ["port_id"], None)
+ result["port_id"] = v
+
+ result["private_ip_address"] = None
+
+ v = navigate_value(all_opts, ["ipv4_address"], None)
+ result["public_ip_address"] = v
+
+ result["public_ipv6_address"] = None
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ return result
+
+
+def expand_list_bandwidth_id(d, array_index):
+ v = navigate_value(d, ["dedicated_bandwidth"], array_index)
+ sbwid = navigate_value(d, ["shared_bandwidth_id"], array_index)
+ if v and sbwid:
+ raise Exception("don't input shared_bandwidth_id and "
+ "dedicated_bandwidth at same time")
+
+ return sbwid
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["bandwidth_id"] = body.get("bandwidth_id")
+
+ result["bandwidth_name"] = body.get("bandwidth_name")
+
+ result["bandwidth_share_type"] = body.get("bandwidth_share_type")
+
+ result["bandwidth_size"] = body.get("bandwidth_size")
+
+ result["create_time"] = body.get("create_time")
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["ip_version"] = body.get("ip_version")
+
+ result["port_id"] = body.get("port_id")
+
+ result["private_ip_address"] = body.get("private_ip_address")
+
+ result["public_ip_address"] = body.get("public_ip_address")
+
+ result["public_ipv6_address"] = body.get("public_ipv6_address")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ result["type"] = body.get("type")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py
new file mode 100644
index 000000000..2d6832ce5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_peering_connect.py
@@ -0,0 +1,698 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or SPDX-License-Identifier: GPL-3.0-or-later
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_peering_connect
+description:
+ - vpc peering management.
+short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ required: true
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ required: true
+ suboptions:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ required: true
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ required: false
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ community.general.hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+'''
+
+RETURN = '''
+ local_vpc_id:
+ description:
+ - Specifies the ID of local VPC.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the name of the VPC peering connection. The value can
+ contain 1 to 64 characters.
+ type: str
+ returned: success
+ peering_vpc:
+ description:
+ - Specifies information about the peering VPC.
+ type: dict
+ returned: success
+ contains:
+ vpc_id:
+ description:
+ - Specifies the ID of peering VPC.
+ type: str
+ returned: success
+ project_id:
+ description:
+ - Specifies the ID of the project which the peering vpc
+ belongs to.
+ type: str
+ returned: success
+ description:
+ description:
+ - The description of vpc peering connection.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ local_vpc_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ peering_vpc=dict(type='dict', required=True, options=dict(
+ vpc_id=dict(type='str', required=True),
+ project_id=dict(type='str')
+ )),
+ description=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "local_vpc_id": module.params.get("local_vpc_id"),
+ "name": module.params.get("name"),
+ "peering_vpc": module.params.get("peering_vpc"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["peering", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["local_vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/peerings" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = expand_create_accept_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["accept_vpc_info"] = v
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_request_vpc_info(opts, None)
+ if not is_empty_value(v):
+ params["request_vpc_info"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def expand_create_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ if not is_empty_value(v):
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def expand_create_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = ""
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ if not is_empty_value(v):
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/peerings"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "peering_id": ["peering", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["peering", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["PENDING_ACCEPTANCE"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_peering_connect): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ if not params:
+ return params
+
+ params = {"peering": params}
+
+ return params
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/peerings/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peering"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ v = fill_read_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_read_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "request_vpc_info", "vpc_id"],
+ array_index)
+ r["local_vpc_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = r.get("peering_vpc")
+ v = flatten_peering_vpc(response, array_index, v, exclude_output)
+ r["peering_vpc"] = v
+
+ return r
+
+
+def flatten_peering_vpc(d, array_index, current_value, exclude_output):
+ result = current_value
+ has_init_value = True
+ if not result:
+ result = dict()
+ has_init_value = False
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "tenant_id"],
+ array_index)
+ result["project_id"] = v
+
+ v = navigate_value(d, ["read", "accept_vpc_info", "vpc_id"], array_index)
+ result["vpc_id"] = v
+
+ if has_init_value:
+ return result
+
+ for v in result.values():
+ if v is not None:
+ return result
+ return current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_peering_connect): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["peerings"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = expand_list_accept_vpc_info(all_opts, None)
+ result["accept_vpc_info"] = v
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = expand_list_request_vpc_info(all_opts, None)
+ result["request_vpc_info"] = v
+
+ result["status"] = None
+
+ return result
+
+
+def expand_list_accept_vpc_info(d, array_index):
+ r = dict()
+
+ v = navigate_value(d, ["peering_vpc", "project_id"], array_index)
+ r["tenant_id"] = v
+
+ v = navigate_value(d, ["peering_vpc", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def expand_list_request_vpc_info(d, array_index):
+ r = dict()
+
+ r["tenant_id"] = None
+
+ v = navigate_value(d, ["local_vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ for v in r.values():
+ if v is not None:
+ return r
+ return None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ v = fill_list_resp_accept_vpc_info(body.get("accept_vpc_info"))
+ result["accept_vpc_info"] = v
+
+ result["description"] = body.get("description")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_request_vpc_info(body.get("request_vpc_info"))
+ result["request_vpc_info"] = v
+
+ result["status"] = body.get("status")
+
+ return result
+
+
+def fill_list_resp_accept_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_request_vpc_info(value):
+ if not value:
+ return None
+
+ result = dict()
+
+ result["tenant_id"] = value.get("tenant_id")
+
+ result["vpc_id"] = value.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py
new file mode 100644
index 000000000..2d830493d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_port.py
@@ -0,0 +1,1167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_port
+description:
+ - vpc port management.
+short_description: Creates a resource of Vpc/Port in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ required: true
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ required: false
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ required: false
+ type: list
+ elements: dict
+ suboptions:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ required: false
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ required: false
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ elements: dict
+ required: false
+ suboptions:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ required: false
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ required: false
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ required: false
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ required: false
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create a port
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a port
+ community.general.hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet to which the port belongs.
+ type: str
+ returned: success
+ admin_state_up:
+ description:
+ - Specifies the administrative state of the port.
+ type: bool
+ returned: success
+ allowed_address_pairs:
+ description:
+ - Specifies a set of zero or more allowed address pairs.
+ type: list
+ returned: success
+ contains:
+ ip_address:
+ description:
+ - Specifies the IP address. It cannot set it to 0.0.0.0.
+ Configure an independent security group for the port if a
+ large CIDR block (subnet mask less than 24) is configured
+ for parameter allowed_address_pairs.
+ type: str
+ returned: success
+ mac_address:
+ description:
+ - Specifies the MAC address.
+ type: str
+ returned: success
+ extra_dhcp_opts:
+ description:
+ - Specifies the extended option of DHCP.
+ type: list
+ returned: success
+ contains:
+ name:
+ description:
+ - Specifies the option name.
+ type: str
+ returned: success
+ value:
+ description:
+ - Specifies the option value.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the port IP address.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the port name. The value can contain no more than 255
+ characters.
+ type: str
+ returned: success
+ security_groups:
+ description:
+ - Specifies the ID of the security group.
+ type: list
+ returned: success
+ mac_address:
+ description:
+ - Specifies the port MAC address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ ), default=dict()),
+ subnet_id=dict(type='str', required=True),
+ admin_state_up=dict(type='bool'),
+ allowed_address_pairs=dict(
+ type='list', elements='dict',
+ options=dict(
+ ip_address=dict(type='str'),
+ mac_address=dict(type='str')
+ ),
+ ),
+ extra_dhcp_opts=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str'),
+ value=dict(type='str')
+ )),
+ ip_address=dict(type='str'),
+ name=dict(type='str'),
+ security_groups=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "admin_state_up": module.params.get("admin_state_up"),
+ "allowed_address_pairs": module.params.get("allowed_address_pairs"),
+ "extra_dhcp_opts": module.params.get("extra_dhcp_opts"),
+ "ip_address": module.params.get("ip_address"),
+ "name": module.params.get("name"),
+ "security_groups": module.params.get("security_groups"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["port", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ send_update_request(module, params, client)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "ports/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ array_index = {
+ "read.fixed_ips": 0,
+ }
+
+ return update_properties(module, res, array_index, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["subnet_id"])
+ if v:
+ query_params.append("network_id=" + str(v))
+
+ v = navigate_value(opts, ["name"])
+ if v:
+ query_params.append("name=" + str(v))
+
+ v = navigate_value(opts, ["admin_state_up"])
+ if v:
+ query_params.append("admin_state_up=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "ports" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["admin_state_up"], None)
+ if not is_empty_value(v):
+ params["admin_state_up"] = v
+
+ v = expand_create_allowed_address_pairs(opts, None)
+ if not is_empty_value(v):
+ params["allowed_address_pairs"] = v
+
+ v = expand_create_extra_dhcp_opts(opts, None)
+ if not is_empty_value(v):
+ params["extra_dhcp_opts"] = v
+
+ v = expand_create_fixed_ips(opts, None)
+ if not is_empty_value(v):
+ params["fixed_ips"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["network_id"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_create_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_create_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_create_request(module, params, client):
+ url = "ports"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "port_id": ["port", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "ports/{port_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["port", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE", "DOWN"],
+ ["BUILD"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_port): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = expand_update_allowed_address_pairs(opts, None)
+ if v is not None:
+ params["allowed_address_pairs"] = v
+
+ v = expand_update_extra_dhcp_opts(opts, None)
+ if v is not None:
+ params["extra_dhcp_opts"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["security_groups"], None)
+ if not is_empty_value(v):
+ params["security_groups"] = v
+
+ if not params:
+ return params
+
+ params = {"port": params}
+
+ return params
+
+
+def expand_update_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ if not is_empty_value(v):
+ transformed["mac_address"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def expand_update_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+ if not v:
+ return req
+ n = len(v)
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ if not is_empty_value(v):
+ transformed["opt_value"] = v
+
+ if transformed:
+ req.append(transformed)
+
+ return req
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "ports/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "ports/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["port"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_read_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_read_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_read_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_read_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_read_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "admin_state_up"], array_index)
+ r["admin_state_up"] = v
+
+ v = r.get("allowed_address_pairs")
+ v = flatten_allowed_address_pairs(response, array_index, v, exclude_output)
+ r["allowed_address_pairs"] = v
+
+ v = r.get("extra_dhcp_opts")
+ v = flatten_extra_dhcp_opts(response, array_index, v, exclude_output)
+ r["extra_dhcp_opts"] = v
+
+ v = navigate_value(response, ["read", "fixed_ips", "ip_address"],
+ array_index)
+ r["ip_address"] = v
+
+ if not exclude_output:
+ v = navigate_value(response, ["read", "mac_address"], array_index)
+ r["mac_address"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "security_groups"], array_index)
+ r["security_groups"] = v
+
+ v = navigate_value(response, ["read", "network_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def flatten_allowed_address_pairs(d, array_index,
+ current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "allowed_address_pairs"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.allowed_address_pairs"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "ip_address"],
+ new_array_index)
+ val["ip_address"] = v
+
+ v = navigate_value(d, ["read", "allowed_address_pairs", "mac_address"],
+ new_array_index)
+ val["mac_address"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def flatten_extra_dhcp_opts(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "extra_dhcp_opts"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.extra_dhcp_opts"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_name"],
+ new_array_index)
+ val["name"] = v
+
+ v = navigate_value(d, ["read", "extra_dhcp_opts", "opt_value"],
+ new_array_index)
+ val["value"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_port): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["ports"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["admin_state_up"], None)
+ result["admin_state_up"] = v
+
+ v = expand_list_allowed_address_pairs(all_opts, None)
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = None
+
+ result["binding_vnic_type"] = None
+
+ result["device_id"] = None
+
+ result["device_owner"] = None
+
+ result["dns_name"] = None
+
+ v = expand_list_extra_dhcp_opts(all_opts, None)
+ result["extra_dhcp_opts"] = v
+
+ v = expand_list_fixed_ips(all_opts, None)
+ result["fixed_ips"] = v
+
+ result["id"] = None
+
+ result["mac_address"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["network_id"] = v
+
+ v = navigate_value(all_opts, ["security_groups"], None)
+ result["security_groups"] = v
+
+ result["status"] = None
+
+ result["tenant_id"] = None
+
+ return result
+
+
+def expand_list_allowed_address_pairs(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["allowed_address_pairs"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["allowed_address_pairs"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["allowed_address_pairs", "ip_address"],
+ new_array_index)
+ transformed["ip_address"] = v
+
+ v = navigate_value(d, ["allowed_address_pairs", "mac_address"],
+ new_array_index)
+ transformed["mac_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_extra_dhcp_opts(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ v = navigate_value(d, ["extra_dhcp_opts"],
+ new_array_index)
+
+ n = len(v) if v else 1
+ for i in range(n):
+ new_array_index["extra_dhcp_opts"] = i
+ transformed = dict()
+
+ v = navigate_value(d, ["extra_dhcp_opts", "name"], new_array_index)
+ transformed["opt_name"] = v
+
+ v = navigate_value(d, ["extra_dhcp_opts", "value"], new_array_index)
+ transformed["opt_value"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def expand_list_fixed_ips(d, array_index):
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ req = []
+
+ n = 1
+ for i in range(n):
+ transformed = dict()
+
+ v = navigate_value(d, ["ip_address"], new_array_index)
+ transformed["ip_address"] = v
+
+ for v in transformed.values():
+ if v is not None:
+ req.append(transformed)
+ break
+
+ return req if req else None
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["admin_state_up"] = body.get("admin_state_up")
+
+ v = fill_list_resp_allowed_address_pairs(body.get("allowed_address_pairs"))
+ result["allowed_address_pairs"] = v
+
+ result["binding_host_id"] = body.get("binding_host_id")
+
+ result["binding_vnic_type"] = body.get("binding_vnic_type")
+
+ result["device_id"] = body.get("device_id")
+
+ result["device_owner"] = body.get("device_owner")
+
+ result["dns_name"] = body.get("dns_name")
+
+ v = fill_list_resp_extra_dhcp_opts(body.get("extra_dhcp_opts"))
+ result["extra_dhcp_opts"] = v
+
+ v = fill_list_resp_fixed_ips(body.get("fixed_ips"))
+ result["fixed_ips"] = v
+
+ result["id"] = body.get("id")
+
+ result["mac_address"] = body.get("mac_address")
+
+ result["name"] = body.get("name")
+
+ result["network_id"] = body.get("network_id")
+
+ result["security_groups"] = body.get("security_groups")
+
+ result["status"] = body.get("status")
+
+ result["tenant_id"] = body.get("tenant_id")
+
+ return result
+
+
+def fill_list_resp_allowed_address_pairs(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ val["mac_address"] = item.get("mac_address")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_extra_dhcp_opts(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["opt_name"] = item.get("opt_name")
+
+ val["opt_value"] = item.get("opt_value")
+
+ result.append(val)
+
+ return result
+
+
+def fill_list_resp_fixed_ips(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["ip_address"] = item.get("ip_address")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
new file mode 100644
index 000000000..c57ddc670
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_private_ip.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_private_ip
+description:
+ - vpc private ip management.
+short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection.
+ - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned. Cannot be changed after creating the private ip.
+ type: str
+ required: true
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address. Cannot be changed after
+ creating the private ip.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create a private ip
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ register: subnet
+- name: Create a private ip
+ community.general.hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+'''
+
+RETURN = '''
+ subnet_id:
+ description:
+ - Specifies the ID of the subnet from which IP addresses are
+ assigned.
+ type: str
+ returned: success
+ ip_address:
+ description:
+ - Specifies the target IP address. The value can be an available IP
+ address in the subnet. If it is not specified, the system
+ automatically assigns an IP address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ subnet_id=dict(type='str', required=True),
+ ip_address=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s)of an"
+ " existing resource.(%s)" % (current, expect, module.params.get('id')))
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "ip_address": module.params.get("ip_address"),
+ "subnet_id": module.params.get("subnet_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["privateips", "id"],
+ {"privateips": 0})
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = build_path(module, "subnets/{subnet_id}/privateips") + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["ip_address"], None)
+ if not is_empty_value(v):
+ params["ip_address"] = v
+
+ v = navigate_value(opts, ["subnet_id"], None)
+ if not is_empty_value(v):
+ params["subnet_id"] = v
+
+ if not params:
+ return params
+
+ params = {"privateips": [params]}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "privateips"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "privateips/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "privateips/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateip"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "ip_address"], array_index)
+ r["ip_address"] = v
+
+ v = navigate_value(response, ["read", "subnet_id"], array_index)
+ r["subnet_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_private_ip): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["privateips"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["ip_address"], None)
+ result["ip_address"] = v
+
+ v = navigate_value(all_opts, ["subnet_id"], None)
+ result["subnet_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["id"] = body.get("id")
+
+ result["ip_address"] = body.get("ip_address")
+
+ result["subnet_id"] = body.get("subnet_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
new file mode 100644
index 000000000..1612cac50
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_route.py
@@ -0,0 +1,443 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_route
+description:
+ - vpc route management.
+short_description: Creates a resource of Vpc/Route in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection.
+ - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted.
+ - No parameter support updating. If one of option is changed, the module will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ required: true
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ required: false
+ default: 'peering'
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create a peering connect
+- name: Create a local vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ register: vpc1
+- name: Create a peering vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ register: vpc2
+- name: Create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ register: connect
+- name: Create a route
+ community.general.hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+'''
+
+RETURN = '''
+ id:
+ description:
+ - UUID of the route.
+ type: str
+ returned: success
+ destination:
+ description:
+ - Specifies the destination IP address or CIDR block.
+ type: str
+ returned: success
+ next_hop:
+ description:
+ - Specifies the next hop. The value is VPC peering connection ID.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the VPC ID to which route is added.
+ type: str
+ returned: success
+ type:
+ description:
+ - Specifies the type of route.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ destination=dict(type='str', required=True),
+ next_hop=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ type=dict(type='str', default='peering'),
+ id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = get_resource_by_id(config)
+ if module.params['state'] == 'present':
+ opts = user_input_parameters(module)
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing route.(%s)" % (resource, opts,
+ config.module.params.get(
+ 'id')))
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "destination": module.params.get("destination"),
+ "next_hop": module.params.get("next_hop"),
+ "type": module.params.get("type"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["route", "id"])
+
+ result = update_properties(module, {"read": fill_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ send_delete_request(module, None, client)
+
+
+def get_resource_by_id(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_resp_body(r)
+
+ result = update_properties(module, res, None, exclude_output)
+ return result
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["type"])
+ if v:
+ query_params.append("type=" + str(v))
+
+ v = navigate_value(opts, ["destination"])
+ if v:
+ query_params.append("destination=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "network", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "v2.0/vpc/routes" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["destination"], None)
+ if not is_empty_value(v):
+ params["destination"] = v
+
+ v = navigate_value(opts, ["next_hop"], None)
+ if not is_empty_value(v):
+ params["nexthop"] = v
+
+ v = navigate_value(opts, ["type"], None)
+ if not is_empty_value(v):
+ params["type"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"route": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "v2.0/vpc/routes"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "v2.0/vpc/routes/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["route"], None)
+
+
+def fill_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "destination"], array_index)
+ r["destination"] = v
+
+ v = navigate_value(response, ["read", "nexthop"], array_index)
+ r["next_hop"] = v
+
+ v = navigate_value(response, ["read", "type"], array_index)
+ r["type"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_route): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["routes"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["destination"], None)
+ result["destination"] = v
+
+ v = navigate_value(all_opts, ["id"], None)
+ result["id"] = v
+
+ v = navigate_value(all_opts, ["next_hop"], None)
+ result["nexthop"] = v
+
+ v = navigate_value(all_opts, ["type"], None)
+ result["type"] = v
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["destination"] = body.get("destination")
+
+ result["id"] = body.get("id")
+
+ result["nexthop"] = body.get("nexthop")
+
+ result["type"] = body.get("type")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
new file mode 100644
index 000000000..c210b912d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group.py
@@ -0,0 +1,650 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over I(name),
+ I(enterprise_project_id) and I(vpc_id) for security group selection.
+ - I(name), I(enterprise_project_id) and I(vpc_id) are used for security
+ group selection. If more than one security group with this options exists,
+ execution is aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.s
+ type: str
+ required: false
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create a security group
+- name: Create a security group
+ community.general.hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+'''
+
+RETURN = '''
+ name:
+ description:
+ - Specifies the security group name. The value is a string of 1 to
+ 64 characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ enterprise_project_id:
+ description:
+ - Specifies the enterprise project ID. When creating a security
+ group, associate the enterprise project ID with the security
+ group.
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the resource ID of the VPC to which the security group
+ belongs.
+ type: str
+ returned: success
+ rules:
+ description:
+ - Specifies the security group rule, which ensures that resources
+ in the security group can communicate with one another.
+ type: complex
+ returned: success
+ contains:
+ description:
+ description:
+ - Provides supplementary information about the security
+ group rule.
+ type: str
+ returned: success
+ direction:
+ description:
+ - Specifies the direction of access control. The value can
+ be egress or ingress.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4
+ or IPv6.
+ type: str
+ returned: success
+ id:
+ description:
+ - Specifies the security group rule ID.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to
+ 65535. If the protocol is not icmp, the value cannot be
+ smaller than the port_range_min value. An empty value
+ indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1
+ to 65535. The value cannot be greater than the
+ port_range_max value. An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp,
+ udp, or others. If the parameter is left blank, the
+ security group supports all protocols.
+ type: str
+ returned: success
+ remote_address_group_id:
+ description:
+ - Specifies the ID of remote IP address group.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control
+ direction is set to egress, the parameter specifies the
+ source IP address. If the access control direction is set
+ to ingress, the parameter specifies the destination IP
+ address.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ name=dict(type='str', required=True),
+ enterprise_project_id=dict(type='str'),
+ vpc_id=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get("id"):
+ resource = read_resource(config)
+ if module.params['state'] == 'present':
+ check_resource_option(resource, module)
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = update_properties(module, {"read": v[0]}, None)
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ resource = create(config)
+ changed = True
+
+ result = resource
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "enterprise_project_id": module.params.get("enterprise_project_id"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ "id": module.params.get("id"),
+ }
+
+
+def check_resource_option(resource, module):
+ opts = user_input_parameters(module)
+
+ resource = {
+ "enterprise_project_id": resource.get("enterprise_project_id"),
+ "name": resource.get("name"),
+ "vpc_id": resource.get("vpc_id"),
+ "id": resource.get("id"),
+ }
+
+ if are_different_dicts(resource, opts):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (resource, opts,
+ module.params.get('id')))
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group", "id"])
+
+ result = update_properties(module, {"read": fill_read_resp_body(r)}, None)
+ return result
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_params = []
+
+ v = navigate_value(opts, ["enterprise_project_id"])
+ if v:
+ query_params.append("enterprise_project_id=" + str(v))
+
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_params.append("vpc_id=" + str(v))
+
+ query_link = "?marker={marker}&limit=10"
+ if query_params:
+ query_link += "&" + "&".join(query_params)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-groups" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["enterprise_project_id"], None)
+ if not is_empty_value(v):
+ params["enterprise_project_id"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-groups"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-groups/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-groups/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_read_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_read_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "enterprise_project_id"],
+ array_index)
+ r["enterprise_project_id"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ if not exclude_output:
+ v = r.get("rules")
+ v = flatten_rules(response, array_index, v, exclude_output)
+ r["rules"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ v = navigate_value(response, ["read", "id"], array_index)
+ r["id"] = v
+
+ return r
+
+
+def flatten_rules(d, array_index, current_value, exclude_output):
+ n = 0
+ result = current_value
+ has_init_value = True
+ if result:
+ n = len(result)
+ else:
+ has_init_value = False
+ result = []
+ v = navigate_value(d, ["read", "security_group_rules"],
+ array_index)
+ if not v:
+ return current_value
+ n = len(v)
+
+ new_array_index = dict()
+ if array_index:
+ new_array_index.update(array_index)
+
+ for i in range(n):
+ new_array_index["read.security_group_rules"] = i
+
+ val = dict()
+ if len(result) >= (i + 1) and result[i]:
+ val = result[i]
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "description"],
+ new_array_index)
+ val["description"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "direction"],
+ new_array_index)
+ val["direction"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "ethertype"],
+ new_array_index)
+ val["ethertype"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "id"],
+ new_array_index)
+ val["id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_max"],
+ new_array_index)
+ val["port_range_max"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "port_range_min"],
+ new_array_index)
+ val["port_range_min"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "protocol"],
+ new_array_index)
+ val["protocol"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_address_group_id"],
+ new_array_index)
+ val["remote_address_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_group_id"],
+ new_array_index)
+ val["remote_group_id"] = v
+
+ if not exclude_output:
+ v = navigate_value(d, ["read", "security_group_rules", "remote_ip_prefix"],
+ new_array_index)
+ val["remote_ip_prefix"] = v
+
+ if len(result) >= (i + 1):
+ result[i] = val
+ else:
+ for v in val.values():
+ if v is not None:
+ result.append(val)
+ break
+
+ return result if (has_init_value or result) else current_value
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_groups"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["enterprise_project_id"], None)
+ result["enterprise_project_id"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["security_group_rules"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["enterprise_project_id"] = body.get("enterprise_project_id")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ v = fill_list_resp_security_group_rules(body.get("security_group_rules"))
+ result["security_group_rules"] = v
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def fill_list_resp_security_group_rules(value):
+ if not value:
+ return None
+
+ result = []
+ for item in value:
+ val = dict()
+
+ val["description"] = item.get("description")
+
+ val["direction"] = item.get("direction")
+
+ val["ethertype"] = item.get("ethertype")
+
+ val["id"] = item.get("id")
+
+ val["port_range_max"] = item.get("port_range_max")
+
+ val["port_range_min"] = item.get("port_range_min")
+
+ val["protocol"] = item.get("protocol")
+
+ val["remote_address_group_id"] = item.get("remote_address_group_id")
+
+ val["remote_group_id"] = item.get("remote_group_id")
+
+ val["remote_ip_prefix"] = item.get("remote_ip_prefix")
+
+ val["security_group_id"] = item.get("security_group_id")
+
+ result.append(val)
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
new file mode 100644
index 000000000..bfb5d6a61
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_security_group_rule.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_security_group_rule
+description:
+ - vpc security group management.
+short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud
+notes:
+ - If I(id) option is provided, it takes precedence over
+ I(enterprise_project_id) for security group rule selection.
+ - I(security_group_id) is used for security group rule selection. If more
+ than one security group rule with this options exists, execution is
+ aborted.
+ - No parameter support updating. If one of option is changed, the module
+ will create a new resource.
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ required: true
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ required: true
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ required: false
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ required: false
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ required: false
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ required: false
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ required: false
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ required: false
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create a security group rule
+- name: Create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ register: sg
+- name: Create a security group rule
+ community.general.hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 22
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+'''
+
+RETURN = '''
+ direction:
+ description:
+ - Specifies the direction of access control. The value can be
+ egress or ingress.
+ type: str
+ returned: success
+ security_group_id:
+ description:
+ - Specifies the security group rule ID, which uniquely identifies
+ the security group rule.
+ type: str
+ returned: success
+ description:
+ description:
+ - Provides supplementary information about the security group rule.
+ The value is a string of no more than 255 characters that can
+ contain letters and digits.
+ type: str
+ returned: success
+ ethertype:
+ description:
+ - Specifies the IP protocol version. The value can be IPv4 or IPv6.
+ If you do not set this parameter, IPv4 is used by default.
+ type: str
+ returned: success
+ port_range_max:
+ description:
+ - Specifies the end port number. The value ranges from 1 to 65535.
+ If the protocol is not icmp, the value cannot be smaller than the
+ port_range_min value. An empty value indicates all ports.
+ type: int
+ returned: success
+ port_range_min:
+ description:
+ - Specifies the start port number. The value ranges from 1 to
+ 65535. The value cannot be greater than the port_range_max value.
+ An empty value indicates all ports.
+ type: int
+ returned: success
+ protocol:
+ description:
+ - Specifies the protocol type. The value can be icmp, tcp, or udp.
+ If the parameter is left blank, the security group supports all
+ protocols.
+ type: str
+ returned: success
+ remote_group_id:
+ description:
+ - Specifies the ID of the peer security group. The value is
+ exclusive with parameter remote_ip_prefix.
+ type: str
+ returned: success
+ remote_ip_prefix:
+ description:
+ - Specifies the remote IP address. If the access control direction
+ is set to egress, the parameter specifies the source IP address.
+ If the access control direction is set to ingress, the parameter
+ specifies the destination IP address. The value can be in the
+ CIDR format or IP addresses. The parameter is exclusive with
+ parameter remote_group_id.
+ type: str
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcModule, are_different_dicts, build_path,
+ get_region, is_empty_value, navigate_value)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ direction=dict(type='str', required=True),
+ security_group_id=dict(type='str', required=True),
+ description=dict(type='str'),
+ ethertype=dict(type='str'),
+ port_range_max=dict(type='int'),
+ port_range_min=dict(type='int'),
+ protocol=dict(type='str'),
+ remote_group_id=dict(type='str'),
+ remote_ip_prefix=dict(type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params['id']:
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ raise Exception(
+ "Cannot change option from (%s) to (%s) for an"
+ " existing security group(%s)." % (current, expect, module.params.get('id')))
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "description": module.params.get("description"),
+ "direction": module.params.get("direction"),
+ "ethertype": module.params.get("ethertype"),
+ "port_range_max": module.params.get("port_range_max"),
+ "port_range_min": module.params.get("port_range_min"),
+ "protocol": module.params.get("protocol"),
+ "remote_group_id": module.params.get("remote_group_id"),
+ "remote_ip_prefix": module.params.get("remote_ip_prefix"),
+ "security_group_id": module.params.get("security_group_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ module.params['id'] = navigate_value(r, ["security_group_rule", "id"])
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["security_group_id"])
+ if v:
+ query_link += "&security_group_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "security-group-rules" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["description"], None)
+ if not is_empty_value(v):
+ params["description"] = v
+
+ v = navigate_value(opts, ["direction"], None)
+ if not is_empty_value(v):
+ params["direction"] = v
+
+ v = navigate_value(opts, ["ethertype"], None)
+ if not is_empty_value(v):
+ params["ethertype"] = v
+
+ v = navigate_value(opts, ["port_range_max"], None)
+ if not is_empty_value(v):
+ params["port_range_max"] = v
+
+ v = navigate_value(opts, ["port_range_min"], None)
+ if not is_empty_value(v):
+ params["port_range_min"] = v
+
+ v = navigate_value(opts, ["protocol"], None)
+ if not is_empty_value(v):
+ params["protocol"] = v
+
+ v = navigate_value(opts, ["remote_group_id"], None)
+ if not is_empty_value(v):
+ params["remote_group_id"] = v
+
+ v = navigate_value(opts, ["remote_ip_prefix"], None)
+ if not is_empty_value(v):
+ params["remote_ip_prefix"] = v
+
+ v = navigate_value(opts, ["security_group_id"], None)
+ if not is_empty_value(v):
+ params["security_group_id"] = v
+
+ if not params:
+ return params
+
+ params = {"security_group_rule": params}
+
+ return params
+
+
+def send_create_request(module, params, client):
+ url = "security-group-rules"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "security-group-rules/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rule"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "description"], array_index)
+ r["description"] = v
+
+ v = navigate_value(response, ["read", "direction"], array_index)
+ r["direction"] = v
+
+ v = navigate_value(response, ["read", "ethertype"], array_index)
+ r["ethertype"] = v
+
+ v = navigate_value(response, ["read", "port_range_max"], array_index)
+ r["port_range_max"] = v
+
+ v = navigate_value(response, ["read", "port_range_min"], array_index)
+ r["port_range_min"] = v
+
+ v = navigate_value(response, ["read", "protocol"], array_index)
+ r["protocol"] = v
+
+ v = navigate_value(response, ["read", "remote_group_id"], array_index)
+ r["remote_group_id"] = v
+
+ v = navigate_value(response, ["read", "remote_ip_prefix"], array_index)
+ r["remote_ip_prefix"] = v
+
+ v = navigate_value(response, ["read", "security_group_id"], array_index)
+ r["security_group_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_security_group_rule): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["security_group_rules"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["description"], None)
+ result["description"] = v
+
+ v = navigate_value(all_opts, ["direction"], None)
+ result["direction"] = v
+
+ v = navigate_value(all_opts, ["ethertype"], None)
+ result["ethertype"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["port_range_max"], None)
+ result["port_range_max"] = v
+
+ v = navigate_value(all_opts, ["port_range_min"], None)
+ result["port_range_min"] = v
+
+ v = navigate_value(all_opts, ["protocol"], None)
+ result["protocol"] = v
+
+ result["remote_address_group_id"] = None
+
+ v = navigate_value(all_opts, ["remote_group_id"], None)
+ result["remote_group_id"] = v
+
+ v = navigate_value(all_opts, ["remote_ip_prefix"], None)
+ result["remote_ip_prefix"] = v
+
+ v = navigate_value(all_opts, ["security_group_id"], None)
+ result["security_group_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["description"] = body.get("description")
+
+ result["direction"] = body.get("direction")
+
+ result["ethertype"] = body.get("ethertype")
+
+ result["id"] = body.get("id")
+
+ result["port_range_max"] = body.get("port_range_max")
+
+ result["port_range_min"] = body.get("port_range_min")
+
+ result["protocol"] = body.get("protocol")
+
+ result["remote_address_group_id"] = body.get("remote_address_group_id")
+
+ result["remote_group_id"] = body.get("remote_group_id")
+
+ result["remote_ip_prefix"] = body.get("remote_ip_prefix")
+
+ result["security_group_id"] = body.get("security_group_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
new file mode 100644
index 000000000..7fb107f53
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/hwc_vpc_subnet.py
@@ -0,0 +1,741 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Huawei
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+###############################################################################
+# Documentation
+###############################################################################
+
+DOCUMENTATION = '''
+---
+module: hwc_vpc_subnet
+description:
+ - subnet management.
+short_description: Creates a resource of Vpc/Subnet in Huawei Cloud
+version_added: '0.2.0'
+author: Huawei Inc. (@huaweicloud)
+requirements:
+ - keystoneauth1 >= 3.6.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether the given object should exist in Huawei Cloud.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ timeouts:
+ description:
+ - The timeouts for each operations.
+ type: dict
+ default: {}
+ suboptions:
+ create:
+ description:
+ - The timeouts for create operation.
+ type: str
+ default: '15m'
+ update:
+ description:
+ - The timeouts for update operation.
+ type: str
+ default: '15m'
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet. Cannot be changed after creating the subnet.
+ type: str
+ required: true
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ required: true
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs. Cannot
+ be changed after creating the subnet.
+ type: str
+ required: true
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs. Cannot be changed
+ after creating the subnet.
+ type: str
+ required: false
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ required: false
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ elements: str
+ required: false
+extends_documentation_fragment:
+ - community.general.hwc
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# create subnet
+- name: Create vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ register: vpc
+- name: Create subnet
+ community.general.hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+'''
+
+RETURN = '''
+ cidr:
+ description:
+ - Specifies the subnet CIDR block. The value must be within the VPC
+ CIDR block and be in CIDR format. The subnet mask cannot be
+ greater than 28.
+ type: str
+ returned: success
+ gateway_ip:
+ description:
+ - Specifies the gateway of the subnet. The value must be an IP
+ address in the subnet.
+ type: str
+ returned: success
+ name:
+ description:
+ - Specifies the subnet name. The value is a string of 1 to 64
+ characters that can contain letters, digits, underscores C(_),
+ hyphens (-), and periods (.).
+ type: str
+ returned: success
+ vpc_id:
+ description:
+ - Specifies the ID of the VPC to which the subnet belongs.
+ type: str
+ returned: success
+ availability_zone:
+ description:
+ - Specifies the AZ to which the subnet belongs.
+ type: str
+ returned: success
+ dhcp_enable:
+ description:
+ - Specifies whether DHCP is enabled for the subnet. The value can
+ be true (enabled) or false(disabled), and default value is true.
+ If this parameter is set to false, newly created ECSs cannot
+ obtain IP addresses, and usernames and passwords cannot be
+ injected using Cloud-init.
+ type: bool
+ returned: success
+ dns_address:
+ description:
+ - Specifies the DNS server addresses for subnet. The address
+ in the head will be used first.
+ type: list
+ returned: success
+'''
+
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (
+ Config, HwcClientException, HwcClientException404, HwcModule,
+ are_different_dicts, build_path, get_region, is_empty_value,
+ navigate_value, wait_to_finish)
+
+
+def build_module():
+ return HwcModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'],
+ type='str'),
+ timeouts=dict(type='dict', options=dict(
+ create=dict(default='15m', type='str'),
+ update=dict(default='15m', type='str'),
+ ), default=dict()),
+ cidr=dict(type='str', required=True),
+ gateway_ip=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ vpc_id=dict(type='str', required=True),
+ availability_zone=dict(type='str'),
+ dhcp_enable=dict(type='bool'),
+ dns_address=dict(type='list', elements='str')
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ """Main function"""
+
+ module = build_module()
+ config = Config(module, "vpc")
+
+ try:
+ resource = None
+ if module.params.get('id'):
+ resource = True
+ else:
+ v = search_resource(config)
+ if len(v) > 1:
+ raise Exception("Found more than one resource(%s)" % ", ".join([
+ navigate_value(i, ["id"]) for i in v]))
+
+ if len(v) == 1:
+ resource = v[0]
+ module.params['id'] = navigate_value(resource, ["id"])
+
+ result = {}
+ changed = False
+ if module.params['state'] == 'present':
+ if resource is None:
+ if not module.check_mode:
+ create(config)
+ changed = True
+
+ current = read_resource(config, exclude_output=True)
+ expect = user_input_parameters(module)
+ if are_different_dicts(expect, current):
+ if not module.check_mode:
+ update(config)
+ changed = True
+
+ result = read_resource(config)
+ result['id'] = module.params.get('id')
+ else:
+ if resource:
+ if not module.check_mode:
+ delete(config)
+ changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ else:
+ result['changed'] = changed
+ module.exit_json(**result)
+
+
+def user_input_parameters(module):
+ return {
+ "availability_zone": module.params.get("availability_zone"),
+ "cidr": module.params.get("cidr"),
+ "dhcp_enable": module.params.get("dhcp_enable"),
+ "dns_address": module.params.get("dns_address"),
+ "gateway_ip": module.params.get("gateway_ip"),
+ "name": module.params.get("name"),
+ "vpc_id": module.params.get("vpc_id"),
+ }
+
+
+def create(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_create_parameters(opts)
+ r = send_create_request(module, params, client)
+ obj = async_wait_create(config, r, client, timeout)
+ module.params['id'] = navigate_value(obj, ["subnet", "id"])
+
+
+def update(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ timeout = 60 * int(module.params['timeouts']['update'].rstrip('m'))
+ opts = user_input_parameters(module)
+
+ params = build_update_parameters(opts)
+ if params:
+ r = send_update_request(module, params, client)
+ async_wait_update(config, r, client, timeout)
+
+
+def delete(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ send_delete_request(module, None, client)
+
+ url = build_path(module, "subnets/{id}")
+
+ def _refresh_status():
+ try:
+ client.get(url)
+ except HwcClientException404:
+ return True, "Done"
+
+ except Exception:
+ return None, ""
+
+ return True, "Pending"
+
+ timeout = 60 * int(module.params['timeouts']['create'].rstrip('m'))
+ try:
+ wait_to_finish(["Done"], ["Pending"], _refresh_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(delete) to "
+ "be done, error= %s" % str(ex))
+
+
+def read_resource(config, exclude_output=False):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+
+ res = {}
+
+ r = send_read_request(module, client)
+ res["read"] = fill_read_resp_body(r)
+
+ return update_properties(module, res, None, exclude_output)
+
+
+def _build_query_link(opts):
+ query_link = "?marker={marker}&limit=10"
+ v = navigate_value(opts, ["vpc_id"])
+ if v:
+ query_link += "&vpc_id=" + str(v)
+
+ return query_link
+
+
+def search_resource(config):
+ module = config.module
+ client = config.client(get_region(module), "vpc", "project")
+ opts = user_input_parameters(module)
+ identity_obj = _build_identity_object(opts)
+ query_link = _build_query_link(opts)
+ link = "subnets" + query_link
+
+ result = []
+ p = {'marker': ''}
+ while True:
+ url = link.format(**p)
+ r = send_list_request(module, client, url)
+ if not r:
+ break
+
+ for item in r:
+ item = fill_list_resp_body(item)
+ if not are_different_dicts(identity_obj, item):
+ result.append(item)
+
+ if len(result) > 1:
+ break
+
+ p['marker'] = r[-1].get('id')
+
+ return result
+
+
+def build_create_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["availability_zone"], None)
+ if not is_empty_value(v):
+ params["availability_zone"] = v
+
+ v = navigate_value(opts, ["cidr"], None)
+ if not is_empty_value(v):
+ params["cidr"] = v
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_create_dns_list(opts, None)
+ if not is_empty_value(v):
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["gateway_ip"], None)
+ if not is_empty_value(v):
+ params["gateway_ip"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_create_primary_dns(opts, None)
+ if not is_empty_value(v):
+ params["primary_dns"] = v
+
+ v = expand_create_secondary_dns(opts, None)
+ if not is_empty_value(v):
+ params["secondary_dns"] = v
+
+ v = navigate_value(opts, ["vpc_id"], None)
+ if not is_empty_value(v):
+ params["vpc_id"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_create_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v if (v and len(v) > 2) else []
+
+
+def expand_create_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_create_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_create_request(module, params, client):
+ url = "subnets"
+ try:
+ r = client.post(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(create), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_create(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(create) to "
+ "be done, error= %s" % str(ex))
+
+
+def build_update_parameters(opts):
+ params = dict()
+
+ v = navigate_value(opts, ["dhcp_enable"], None)
+ if v is not None:
+ params["dhcp_enable"] = v
+
+ v = expand_update_dns_list(opts, None)
+ if v is not None:
+ params["dnsList"] = v
+
+ v = navigate_value(opts, ["name"], None)
+ if not is_empty_value(v):
+ params["name"] = v
+
+ v = expand_update_primary_dns(opts, None)
+ if v is not None:
+ params["primary_dns"] = v
+
+ v = expand_update_secondary_dns(opts, None)
+ if v is not None:
+ params["secondary_dns"] = v
+
+ if not params:
+ return params
+
+ params = {"subnet": params}
+
+ return params
+
+
+def expand_update_dns_list(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ if v:
+ if len(v) > 2:
+ return v
+ return None
+ return []
+
+
+def expand_update_primary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[0] if v else ""
+
+
+def expand_update_secondary_dns(d, array_index):
+ v = navigate_value(d, ["dns_address"], array_index)
+ return v[1] if (v and len(v) > 1) else ""
+
+
+def send_update_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.put(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(update), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def async_wait_update(config, result, client, timeout):
+ module = config.module
+
+ path_parameters = {
+ "subnet_id": ["subnet", "id"],
+ }
+ data = dict((key, navigate_value(result, path))
+ for key, path in path_parameters.items())
+
+ url = build_path(module, "subnets/{subnet_id}", data)
+
+ def _query_status():
+ r = None
+ try:
+ r = client.get(url, timeout=timeout)
+ except HwcClientException:
+ return None, ""
+
+ try:
+ s = navigate_value(r, ["subnet", "status"])
+ return r, s
+ except Exception:
+ return None, ""
+
+ try:
+ return wait_to_finish(
+ ["ACTIVE"],
+ ["UNKNOWN"],
+ _query_status, timeout)
+ except Exception as ex:
+ module.fail_json(msg="module(hwc_vpc_subnet): error "
+ "waiting for api(update) to "
+ "be done, error= %s" % str(ex))
+
+
+def send_delete_request(module, params, client):
+ url = build_path(module, "vpcs/{vpc_id}/subnets/{id}")
+
+ try:
+ r = client.delete(url, params)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(delete), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return r
+
+
+def send_read_request(module, client):
+ url = build_path(module, "subnets/{id}")
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(read), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnet"], None)
+
+
+def fill_read_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+def update_properties(module, response, array_index, exclude_output=False):
+ r = user_input_parameters(module)
+
+ v = navigate_value(response, ["read", "availability_zone"], array_index)
+ r["availability_zone"] = v
+
+ v = navigate_value(response, ["read", "cidr"], array_index)
+ r["cidr"] = v
+
+ v = navigate_value(response, ["read", "dhcp_enable"], array_index)
+ r["dhcp_enable"] = v
+
+ v = navigate_value(response, ["read", "dnsList"], array_index)
+ r["dns_address"] = v
+
+ v = navigate_value(response, ["read", "gateway_ip"], array_index)
+ r["gateway_ip"] = v
+
+ v = navigate_value(response, ["read", "name"], array_index)
+ r["name"] = v
+
+ v = navigate_value(response, ["read", "vpc_id"], array_index)
+ r["vpc_id"] = v
+
+ return r
+
+
+def send_list_request(module, client, url):
+
+ r = None
+ try:
+ r = client.get(url)
+ except HwcClientException as ex:
+ msg = ("module(hwc_vpc_subnet): error running "
+ "api(list), error: %s" % str(ex))
+ module.fail_json(msg=msg)
+
+ return navigate_value(r, ["subnets"], None)
+
+
+def _build_identity_object(all_opts):
+ result = dict()
+
+ v = navigate_value(all_opts, ["availability_zone"], None)
+ result["availability_zone"] = v
+
+ v = navigate_value(all_opts, ["cidr"], None)
+ result["cidr"] = v
+
+ v = navigate_value(all_opts, ["dhcp_enable"], None)
+ result["dhcp_enable"] = v
+
+ v = navigate_value(all_opts, ["dns_address"], None)
+ result["dnsList"] = v
+
+ v = navigate_value(all_opts, ["gateway_ip"], None)
+ result["gateway_ip"] = v
+
+ result["id"] = None
+
+ v = navigate_value(all_opts, ["name"], None)
+ result["name"] = v
+
+ result["neutron_network_id"] = None
+
+ result["neutron_subnet_id"] = None
+
+ result["primary_dns"] = None
+
+ result["secondary_dns"] = None
+
+ result["status"] = None
+
+ v = navigate_value(all_opts, ["vpc_id"], None)
+ result["vpc_id"] = v
+
+ return result
+
+
+def fill_list_resp_body(body):
+ result = dict()
+
+ result["availability_zone"] = body.get("availability_zone")
+
+ result["cidr"] = body.get("cidr")
+
+ result["dhcp_enable"] = body.get("dhcp_enable")
+
+ result["dnsList"] = body.get("dnsList")
+
+ result["gateway_ip"] = body.get("gateway_ip")
+
+ result["id"] = body.get("id")
+
+ result["name"] = body.get("name")
+
+ result["neutron_network_id"] = body.get("neutron_network_id")
+
+ result["neutron_subnet_id"] = body.get("neutron_subnet_id")
+
+ result["primary_dns"] = body.get("primary_dns")
+
+ result["secondary_dns"] = body.get("secondary_dns")
+
+ result["status"] = body.get("status")
+
+ result["vpc_id"] = body.get("vpc_id")
+
+ return result
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py b/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py
new file mode 100644
index 000000000..774f29134
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ibm_sa_domain.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_domain
+short_description: Manages domains on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ domain:
+ description:
+ - Name of the domain to be managed.
+ required: true
+ type: str
+ state:
+ description:
+ - The desired state of the domain.
+ default: "present"
+ choices: [ "present", "absent" ]
+ type: str
+ ldap_id:
+ description:
+ - ldap id to add to the domain.
+ required: false
+ type: str
+ size:
+ description:
+ - Size of the domain.
+ required: false
+ type: str
+ hard_capacity:
+ description:
+ - Hard capacity of the domain.
+ required: false
+ type: str
+ soft_capacity:
+ description:
+ - Soft capacity of the domain.
+ required: false
+ type: str
+ max_cgs:
+ description:
+ - Number of max cgs.
+ required: false
+ type: str
+ max_dms:
+ description:
+ - Number of max dms.
+ required: false
+ type: str
+ max_mirrors:
+ description:
+ - Number of max_mirrors.
+ required: false
+ type: str
+ max_pools:
+ description:
+ - Number of max_pools.
+ required: false
+ type: str
+ max_volumes:
+ description:
+ - Number of max_volumes.
+ required: false
+ type: str
+ perf_class:
+ description:
+ - Add the domain to a performance class.
+ required: false
+ type: str
+
+extends_documentation_fragment:
+ - community.general.ibm_storage
+ - community.general.attributes
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ size: domain_size
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete domain.
+ community.general.ibm_sa_domain:
+ domain: domain_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+msg:
+ description: module return status.
+ returned: as needed
+ type: str
+ sample: "domain 'domain_name' created successfully."
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ domain=dict(required=True),
+ size=dict(),
+ max_dms=dict(),
+ max_cgs=dict(),
+ ldap_id=dict(),
+ max_mirrors=dict(),
+ max_pools=dict(),
+ max_volumes=dict(),
+ perf_class=dict(),
+ hard_capacity=dict(),
+ soft_capacity=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ domain = xcli_client.cmd.domain_list(
+ domain=module.params['domain']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ msg = 'Domain \'{0}\''.format(module.params['domain'])
+ if state == 'present' and not domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_create', xcli_client)
+ msg += " created successfully."
+ elif state == 'absent' and domain:
+ state_changed = execute_pyxcli_command(
+ module, 'domain_delete', xcli_client)
+ msg += " deleted successfully."
+ else:
+ msg += " state unchanged."
+
+ module.exit_json(changed=state_changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_host.py b/ansible_collections/community/general/plugins/modules/ibm_sa_host.py
new file mode 100644
index 000000000..614865ae0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ibm_sa_host.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host
+short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ type: str
+ state:
+ description:
+ - Host state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ type: str
+ cluster:
+ description:
+ - The name of the cluster to include the host.
+ required: false
+ type: str
+ domain:
+ description:
+ - The domains the cluster will be attached to.
+ To include more than one domain,
+ separate domain names with commas.
+ To include all existing domains, use an asterisk ("*").
+ required: false
+ type: str
+ iscsi_chap_name:
+ description:
+ - The host's CHAP name identifier
+ required: false
+ type: str
+ iscsi_chap_secret:
+ description:
+ - The password of the initiator used to
+ authenticate to the system when CHAP is enable
+ required: false
+ type: str
+
+extends_documentation_fragment:
+ - community.general.ibm_storage
+ - community.general.attributes
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Define new host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete host.
+ community.general.ibm_sa_host:
+ host: host_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ cluster=dict(),
+ domain=dict(),
+ iscsi_chap_name=dict(),
+ iscsi_chap_secret=dict(no_log=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ host = xcli_client.cmd.host_list(
+ host=module.params['host']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_define', xcli_client)
+ elif state == 'absent' and host:
+ state_changed = execute_pyxcli_command(
+ module, 'host_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py b/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py
new file mode 100644
index 000000000..fdb27f85a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ibm_sa_host_ports.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_host_ports
+short_description: Add host ports on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module adds ports to or removes them from the hosts
+ on IBM Spectrum Accelerate Family storage systems."
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ host:
+ description:
+ - Host name.
+ required: true
+ type: str
+ state:
+ description:
+ - Host ports state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ type: str
+ iscsi_name:
+ description:
+ - iSCSI initiator name.
+ required: false
+ type: str
+ fcaddress:
+ description:
+ - Fiber channel address.
+ required: false
+ type: str
+ num_of_visible_targets:
+ description:
+ - Number of visible targets.
+ required: false
+ type: str
+
+extends_documentation_fragment:
+ - community.general.ibm_storage
+ - community.general.attributes
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Add ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Remove ports for host.
+ community.general.ibm_sa_host_ports:
+ host: test_host
+ iscsi_name: iqn.1994-05.com***
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl,
+ spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ host=dict(required=True),
+ iscsi_name=dict(),
+ fcaddress=dict(),
+ num_of_visible_targets=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ ports = []
+ try:
+ ports = xcli_client.cmd.host_list_ports(
+ host=module.params.get('host')).as_list
+ except Exception:
+ pass
+ state = module.params['state']
+ port_exists = False
+ ports = [port.get('port_name') for port in ports]
+
+ fc_ports = (module.params.get('fcaddress')
+ if module.params.get('fcaddress') else [])
+ iscsi_ports = (module.params.get('iscsi_name')
+ if module.params.get('iscsi_name') else [])
+ for port in ports:
+ if port in iscsi_ports or port in fc_ports:
+ port_exists = True
+ break
+ state_changed = False
+ if state == 'present' and not port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_add_port', xcli_client)
+ if state == 'absent' and port_exists:
+ state_changed = execute_pyxcli_command(
+ module, 'host_remove_port', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py b/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py
new file mode 100644
index 000000000..88065aa4e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ibm_sa_pool.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_pool
+short_description: Handles pools on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems"
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ pool:
+ description:
+ - Pool name.
+ required: true
+ type: str
+ state:
+ description:
+ - Pool state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ type: str
+ size:
+ description:
+ - Pool size in GB
+ required: false
+ type: str
+ snapshot_size:
+ description:
+ - Pool snapshot size in GB
+ required: false
+ type: str
+ domain:
+ description:
+ - Adds the pool to the specified domain.
+ required: false
+ type: str
+ perf_class:
+ description:
+ - Assigns a perf_class to the pool.
+ required: false
+ type: str
+
+extends_documentation_fragment:
+ - community.general.ibm_storage
+ - community.general.attributes
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create new pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ size: 300
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete pool.
+ community.general.ibm_sa_pool:
+ name: pool_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ pool=dict(required=True),
+ size=dict(),
+ snapshot_size=dict(),
+ domain=dict(),
+ perf_class=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ pool = xcli_client.cmd.pool_list(
+ pool=module.params['pool']).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_create', xcli_client)
+ if state == 'absent' and pool:
+ state_changed = execute_pyxcli_command(
+ module, 'pool_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py b/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py
new file mode 100644
index 000000000..bc5f81b32
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ibm_sa_vol.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol
+short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems."
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ pool:
+ description:
+ - Volume pool.
+ required: false
+ type: str
+ state:
+ description:
+ - Volume state.
+ default: "present"
+ choices: [ "present", "absent" ]
+ type: str
+ size:
+ description:
+ - Volume size.
+ required: false
+ type: str
+
+extends_documentation_fragment:
+ - community.general.ibm_storage
+ - community.general.attributes
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Create a new volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ pool: pool_name
+ size: 17
+ state: present
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+
+- name: Delete an existing volume.
+ community.general.ibm_sa_vol:
+ vol: volume_name
+ state: absent
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ pool=dict(),
+ size=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ volume = xcli_client.cmd.vol_list(
+ vol=module.params.get('vol')).as_single_element
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_create', xcli_client)
+ elif state == 'absent' and volume:
+ state_changed = execute_pyxcli_command(
+ module, 'vol_delete', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py b/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py
new file mode 100644
index 000000000..ea8b485ef
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ibm_sa_vol_map.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (C) 2018 IBM CORPORATION
+# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ibm_sa_vol_map
+short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems
+
+description:
+ - "This module maps volumes to or unmaps them from the hosts on
+ IBM Spectrum Accelerate Family storage systems."
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ vol:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ state:
+ default: "present"
+ choices: [ "present", "absent" ]
+ description:
+ - When the state is present the volume is mapped.
+ When the state is absent, the volume is meant to be unmapped.
+ type: str
+
+ cluster:
+ description:
+ - Maps the volume to a cluster.
+ required: false
+ type: str
+ host:
+ description:
+ - Maps the volume to a host.
+ required: false
+ type: str
+ lun:
+ description:
+ - The LUN identifier.
+ required: false
+ type: str
+ override:
+ description:
+ - Overrides the existing volume mapping.
+ required: false
+ type: str
+
+extends_documentation_fragment:
+ - community.general.ibm_storage
+ - community.general.attributes
+
+author:
+ - Tzur Eliyahu (@tzure)
+'''
+
+EXAMPLES = '''
+- name: Map volume to host.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Map volume to cluster.
+ community.general.ibm_sa_vol_map:
+ vol: volume_name
+ lun: 1
+ cluster: cluster_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: present
+
+- name: Unmap volume.
+ community.general.ibm_sa_vol_map:
+ host: host_name
+ username: admin
+ password: secret
+ endpoints: hostdev-system
+ state: absent
+'''
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command,
+ connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed)
+
+
+def main():
+ argument_spec = spectrum_accelerate_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ vol=dict(required=True),
+ lun=dict(),
+ cluster=dict(),
+ host=dict(),
+ override=dict()
+ )
+ )
+
+ module = AnsibleModule(argument_spec)
+ is_pyxcli_installed(module)
+
+ xcli_client = connect_ssl(module)
+ # required args
+ mapping = False
+ try:
+ mapped_hosts = xcli_client.cmd.vol_mapping_list(
+ vol=module.params.get('vol')).as_list
+ for host in mapped_hosts:
+ if host['host'] == module.params.get("host", ""):
+ mapping = True
+ except Exception:
+ pass
+ state = module.params['state']
+
+ state_changed = False
+ if state == 'present' and not mapping:
+ state_changed = execute_pyxcli_command(module, 'map_vol', xcli_client)
+ if state == 'absent' and mapping:
+ state_changed = execute_pyxcli_command(
+ module, 'unmap_vol', xcli_client)
+
+ module.exit_json(changed=state_changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/icinga2_feature.py b/ansible_collections/community/general/plugins/modules/icinga2_feature.py
new file mode 100644
index 000000000..6e6bc5416
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/icinga2_feature.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Copyright (c) 2018, Ansible Project
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_feature
+
+short_description: Manage Icinga2 feature
+description:
+ - This module can be used to enable or disable an Icinga2 feature.
+author: "Loic Blot (@nerzhul)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - This is the feature name to enable or disable.
+ required: true
+ state:
+ type: str
+ description:
+ - If set to C(present) and feature is disabled, then feature is enabled.
+ - If set to C(present) and feature is already enabled, then nothing is changed.
+ - If set to C(absent) and feature is enabled, then feature is disabled.
+ - If set to C(absent) and feature is already disabled, then nothing is changed.
+ choices: [ "present", "absent" ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Enable ido-pgsql feature
+ community.general.icinga2_feature:
+ name: ido-pgsql
+ state: present
+
+- name: Disable api feature
+ community.general.icinga2_feature:
+ name: api
+ state: absent
+'''
+
+RETURN = '''
+#
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Icinga2FeatureHelper:
+ def __init__(self, module):
+ self.module = module
+ self._icinga2 = module.get_bin_path('icinga2', True)
+ self.feature_name = self.module.params['name']
+ self.state = self.module.params['state']
+
+ def _exec(self, args):
+ cmd = [self._icinga2, 'feature']
+ rc, out, err = self.module.run_command(cmd + args, check_rc=True)
+ return rc, out
+
+ def manage(self):
+ rc, out = self._exec(["list"])
+ if rc != 0:
+ self.module.fail_json(msg="Unable to list icinga2 features. "
+ "Ensure icinga2 is installed and present in binary path.")
+
+ # If feature is already in good state, just exit
+ if (re.search("Disabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "absent") or \
+ (re.search("Enabled features:.* %s[ \n]" % self.feature_name, out) and self.state == "present"):
+ self.module.exit_json(changed=False)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ feature_enable_str = "enable" if self.state == "present" else "disable"
+
+ rc, out = self._exec([feature_enable_str, self.feature_name])
+
+ change_applied = False
+ if self.state == "present":
+ if rc != 0:
+ self.module.fail_json(msg="Failed to %s feature %s."
+ " icinga2 command returned %s" % (feature_enable_str,
+ self.feature_name,
+ out))
+
+ if re.search("already enabled", out) is None:
+ change_applied = True
+ else:
+ if rc == 0:
+ change_applied = True
+ # RC is not 0 for this already disabled feature, handle it as no change applied
+ elif re.search("Cannot disable feature '%s'. Target file .* does not exist" % self.feature_name, out):
+ change_applied = False
+ else:
+ self.module.fail_json(msg="Failed to disable feature. Command returns %s" % out)
+
+ self.module.exit_json(changed=change_applied)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=["present", "absent"], default="present")
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+ Icinga2FeatureHelper(module).manage()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/icinga2_host.py b/ansible_collections/community/general/plugins/modules/icinga2_host.py
new file mode 100644
index 000000000..7f25c55d9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/icinga2_host.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This module is proudly sponsored by CGI (www.cgi.com) and
+# KPN (www.kpn.com).
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: icinga2_host
+short_description: Manage a host in Icinga2
+description:
+ - "Add or remove a host to Icinga2 through the API."
+ - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)"
+author: "Jurgen Brand (@t794104)"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ url:
+ type: str
+ description:
+ - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
+ use_proxy:
+ description:
+ - If C(false), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ type: bool
+ default: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ url_username:
+ type: str
+ description:
+ - The username for use in HTTP basic authentication.
+ - This parameter can be used without C(url_password) for sites that allow empty passwords.
+ url_password:
+ type: str
+ description:
+ - The password for use in HTTP basic authentication.
+ - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used.
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ type: bool
+ default: false
+ client_cert:
+ type: path
+ description:
+ - PEM formatted certificate chain file to be used for SSL client
+ authentication. This file can also include the key as well, and if
+ the key is included, C(client_key) is not required.
+ client_key:
+ type: path
+ description:
+ - PEM formatted file that contains your private key to be used for SSL
+ client authentication. If C(client_cert) contains both the certificate
+ and key, this option is not required.
+ state:
+ type: str
+ description:
+ - Apply feature state.
+ choices: [ "present", "absent" ]
+ default: present
+ name:
+ type: str
+ description:
+ - Name used to create / delete the host. This does not need to be the FQDN, but does needs to be unique.
+ required: true
+ aliases: [host]
+ zone:
+ type: str
+ description:
+ - The zone from where this host should be polled.
+ template:
+ type: str
+ description:
+ - The template used to define the host.
+ - Template cannot be modified after object creation.
+ check_command:
+ type: str
+ description:
+ - The command used to check if the host is alive.
+ default: "hostalive"
+ display_name:
+ type: str
+ description:
+ - The name used to display the host.
+ - If not specified, it defaults to the value of the I(name) parameter.
+ ip:
+ type: str
+ description:
+ - The IP address of the host.
+ required: true
+ variables:
+ type: dict
+ description:
+ - Dictionary of variables.
+extends_documentation_fragment:
+ - ansible.builtin.url
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Add host to icinga
+ community.general.icinga2_host:
+ url: "https://icinga2.example.com"
+ url_username: "ansible"
+ url_password: "a_secret"
+ state: present
+ name: "{{ ansible_fqdn }}"
+ ip: "{{ ansible_default_ipv4.address }}"
+ variables:
+ foo: "bar"
+ delegate_to: 127.0.0.1
+'''
+
+RETURN = '''
+name:
+ description: The name used to create, modify or delete the host
+ type: str
+ returned: always
+data:
+ description: The data structure used for create, modify or delete of the host
+ type: dict
+ returned: always
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+# ===========================================
+# Icinga2 API class
+#
+class icinga2_api:
+ module = None
+
+ def __init__(self, module):
+ self.module = module
+
+ def call_url(self, path, data='', method='GET'):
+ headers = {
+ 'Accept': 'application/json',
+ 'X-HTTP-Method-Override': method,
+ }
+ url = self.module.params.get("url") + "/" + path
+ rsp, info = fetch_url(module=self.module, url=url, data=data, headers=headers, method=method, use_proxy=self.module.params['use_proxy'])
+ body = ''
+ if rsp:
+ body = json.loads(rsp.read())
+ if info['status'] >= 400:
+ body = info['body']
+ return {'code': info['status'], 'data': body}
+
+ def check_connection(self):
+ ret = self.call_url('v1/status')
+ if ret['code'] == 200:
+ return True
+ return False
+
+ def exists(self, hostname):
+ data = {
+ "filter": "match(\"" + hostname + "\", host.name)",
+ }
+ ret = self.call_url(
+ path="v1/objects/hosts",
+ data=self.module.jsonify(data)
+ )
+ if ret['code'] == 200:
+ if len(ret['data']['results']) == 1:
+ return True
+ return False
+
+ def create(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="PUT"
+ )
+ return ret
+
+ def delete(self, hostname):
+ data = {"cascade": 1}
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="DELETE"
+ )
+ return ret
+
+ def modify(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ data=self.module.jsonify(data),
+ method="POST"
+ )
+ return ret
+
+ def diff(self, hostname, data):
+ ret = self.call_url(
+ path="v1/objects/hosts/" + hostname,
+ method="GET"
+ )
+ changed = False
+ ic_data = ret['data']['results'][0]
+ for key in data['attrs']:
+ if key not in ic_data['attrs'].keys():
+ changed = True
+ elif data['attrs'][key] != ic_data['attrs'][key]:
+ changed = True
+ return changed
+
+
+# ===========================================
+# Module execution.
+#
+def main():
+ # use the predefined argument spec for url
+ argument_spec = url_argument_spec()
+ # add our own arguments
+ argument_spec.update(
+ state=dict(default="present", choices=["absent", "present"]),
+ name=dict(required=True, aliases=['host']),
+ zone=dict(),
+ template=dict(default=None),
+ check_command=dict(default="hostalive"),
+ display_name=dict(default=None),
+ ip=dict(required=True),
+ variables=dict(type='dict', default=None),
+ )
+
+ # Define the main module
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params["state"]
+ name = module.params["name"]
+ zone = module.params["zone"]
+ template = []
+ if module.params["template"]:
+ template = [module.params["template"]]
+ check_command = module.params["check_command"]
+ ip = module.params["ip"]
+ display_name = module.params["display_name"]
+ if not display_name:
+ display_name = name
+ variables = module.params["variables"]
+
+ try:
+ icinga = icinga2_api(module=module)
+ icinga.check_connection()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e))
+
+ data = {
+ 'templates': template,
+ 'attrs': {
+ 'address': ip,
+ 'display_name': display_name,
+ 'check_command': check_command,
+ 'zone': zone,
+ 'vars.made_by': "ansible"
+ }
+ }
+
+ for key, value in variables.items():
+ data['attrs']['vars.' + key] = value
+
+ changed = False
+ if icinga.exists(name):
+ if state == "absent":
+ if module.check_mode:
+ module.exit_json(changed=True, name=name, data=data)
+ else:
+ try:
+ ret = icinga.delete(name)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) deleting host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception deleting host: " + str(e))
+
+ elif icinga.diff(name, data):
+ if module.check_mode:
+ module.exit_json(changed=False, name=name, data=data)
+
+ # Template attribute is not allowed in modification
+ del data['attrs']['templates']
+
+ ret = icinga.modify(name, data)
+
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) modifying host: '%s'" % (ret['code'], ret['data']))
+
+ else:
+ if state == "present":
+ if module.check_mode:
+ changed = True
+ else:
+ try:
+ ret = icinga.create(name, data)
+ if ret['code'] == 200:
+ changed = True
+ else:
+ module.fail_json(msg="bad return code (%s) creating host: '%s'" % (ret['code'], ret['data']))
+ except Exception as e:
+ module.fail_json(msg="exception creating host: " + str(e))
+
+ module.exit_json(changed=changed, name=name, data=data)
+
+
+# import module snippets
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py
new file mode 100644
index 000000000..d760a2c3a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_command.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_command
+short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - For use with Dell iDRAC operations that require Redfish OEM extensions.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC.
+ type: str
+ username:
+ description:
+ - Username for authenticating to iDRAC.
+ type: str
+ password:
+ description:
+ - Password for authenticating to iDRAC.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to iDRAC.
+ type: str
+ version_added: 2.3.0
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to iDRAC.
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - ID of the System, Manager or Chassis to modify.
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Create BIOS configuration job (schedule BIOS setting update)
+ community.general.idrac_redfish_command:
+ category: Systems
+ command: CreateBiosConfigJob
+ resource_id: System.Embedded.1
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+return_values:
+ description: Dictionary containing command-specific response data from the action.
+ returned: on success
+ type: dict
+ version_added: 6.6.0
+ sample: {
+ "job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"
+ }
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils.common.text.converters import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def create_bios_config_job(self):
+ result = {}
+ key = "Bios"
+ jobs = "Jobs"
+
+ # Search for 'key' entry and extract URI from it
+ response = self.get_request(self.root_uri + self.systems_uris[0])
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found" % key}
+
+ bios_uri = data[key]["@odata.id"]
+
+ # Extract proper URI
+ response = self.get_request(self.root_uri + bios_uri)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+ set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
+ "@odata.id"]
+
+ payload = {"TargetSettingsURI": set_bios_attr_uri}
+ response = self.post_request(
+ self.root_uri + self.manager_uri + "/" + jobs, payload)
+ if response['ret'] is False:
+ return response
+
+ response_output = response['resp'].__dict__
+ job_id_full = response_output["headers"]["Location"]
+ job_id = re.search("JID_.+", job_id_full).group()
+ return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["CreateBiosConfigJob"],
+ "Accounts": [],
+ "Manager": []
+}
+
+
+def main():
+ result = {}
+ return_values = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Systems":
+ # execute only if we find a System resource
+ # NOTE: Currently overriding the usage of 'data_modification' due to
+ # how 'resource_id' is processed. In the case of CreateBiosConfigJob,
+ # we interact with BOTH systems and managers, so you currently cannot
+ # specify a single 'resource_id' to make both '_find_systems_resource'
+ # and '_find_managers_resource' return success. Since
+ # CreateBiosConfigJob doesn't use the matched 'resource_id' for a
+ # system regardless of what's specified, disabling the 'resource_id'
+ # inspection for the next call allows a specific manager to be
+ # specified with 'resource_id'. If we ever need to expand the input
+ # to inspect a specific system and manager in parallel, this will need
+ # updates.
+ rf_utils.data_modification = False
+ result = rf_utils._find_systems_resource()
+ rf_utils.data_modification = True
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "CreateBiosConfigJob":
+ # execute only if we find a Managers resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ result = rf_utils.create_bios_config_job()
+ if 'job_id' in result:
+ return_values['job_id'] = result['job_id']
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ module.exit_json(changed=True, msg='Action was successful', return_values=return_values)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
new file mode 100644
index 000000000..cc47e62d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_config.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_config
+short_description: Manages servers through iDRAC using Dell Redfish APIs
+description:
+ - For use with Dell iDRAC operations that require Redfish OEM extensions
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ set or update a configuration attribute.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ type: str
+ description:
+ - Category to execute on iDRAC.
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC.
+ - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and
+ I(SetSystemAttributes) are mutually exclusive commands when C(category)
+ is I(Manager).
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC.
+ type: str
+ username:
+ description:
+ - Username for authenticating to iDRAC.
+ type: str
+ password:
+ description:
+ - Password for authenticating to iDRAC.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to iDRAC.
+ type: str
+ version_added: 2.3.0
+ manager_attributes:
+ required: false
+ description:
+ - Dictionary of iDRAC attribute name and value pairs to update.
+ default: {}
+ type: 'dict'
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to iDRAC.
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - ID of the System, Manager or Chassis to modify.
+ type: str
+ version_added: '0.2.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Enable NTP and set NTP server and Time zone attributes in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ NTPConfigGroup.1.NTPEnable: "Enabled"
+ NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}"
+ Time.1.Timezone: "{{ timezone }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable Syslog and set Syslog servers in iDRAC
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SysLog.1.SysLogEnable: "Enabled"
+ SysLog.1.Server1: "{{ syslog_server1 }}"
+ SysLog.1.Server2: "{{ syslog_server2 }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Configure SNMP community string, port, protocol and trap format
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetManagerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ SNMP.1.AgentEnable: "Enabled"
+ SNMP.1.AgentCommunity: "public_community_string"
+ SNMP.1.TrapFormat: "SNMPv1"
+ SNMP.1.SNMPProtocol: "All"
+ SNMP.1.DiscoveryPort: 161
+ SNMP.1.AlertPort: 162
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Enable CSIOR
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetLifecycleControllerAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+
+ - name: Set Power Supply Redundancy Policy to A/B Grid Redundant
+ community.general.idrac_redfish_config:
+ category: Manager
+ command: SetSystemAttributes
+ resource_id: iDRAC.Embedded.1
+ manager_attributes:
+ ServerPwr.1.PSRedPolicy: "A/B Grid Redundant"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username}}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.validation import (
+ check_mutually_exclusive,
+ check_required_arguments
+)
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils.common.text.converters import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def set_manager_attributes(self, command):
+
+ result = {}
+ required_arg_spec = {'manager_attributes': {'required': True}}
+
+ try:
+ check_required_arguments(required_arg_spec, self.module.params)
+
+ except TypeError as e:
+ msg = to_native(e)
+ self.module.fail_json(msg=msg)
+
+ key = "Attributes"
+ command_manager_attributes_uri_map = {
+ "SetManagerAttributes": self.manager_uri,
+ "SetLifecycleControllerAttributes": "/redfish/v1/Managers/LifecycleController.Embedded.1",
+ "SetSystemAttributes": "/redfish/v1/Managers/System.Embedded.1"
+ }
+ manager_uri = command_manager_attributes_uri_map.get(command, self.manager_uri)
+
+ attributes = self.module.params['manager_attributes']
+
+ attrs_to_patch = {}
+ attrs_skipped = {}
+ attrs_bad = {} # Store attrs which were not found in the system
+
+ # Search for key entry and extract URI from it
+ response = self.get_request(self.root_uri + manager_uri + "/" + key)
+ if response['ret'] is False:
+ return response
+ result['ret'] = True
+ data = response['data']
+
+ if key not in data:
+ return {'ret': False,
+ 'msg': "%s: Key %s not found" % (command, key),
+ 'warning': ""}
+
+ for attr_name, attr_value in attributes.items():
+ # Check if attribute exists
+ if attr_name not in data[u'Attributes']:
+ # Skip and proceed to next attribute if this isn't valid
+ attrs_bad.update({attr_name: attr_value})
+ continue
+
+ # Find out if value is already set to what we want. If yes, exclude
+ # those attributes
+ if data[u'Attributes'][attr_name] == attr_value:
+ attrs_skipped.update({attr_name: attr_value})
+ else:
+ attrs_to_patch.update({attr_name: attr_value})
+
+ warning = ""
+ if attrs_bad:
+ warning = "Incorrect attributes %s" % (attrs_bad)
+
+ if not attrs_to_patch:
+ return {'ret': True, 'changed': False,
+ 'msg': "No changes made. Manager attributes already set.",
+ 'warning': warning}
+
+ payload = {"Attributes": attrs_to_patch}
+ response = self.patch_request(self.root_uri + manager_uri + "/" + key, payload)
+ if response['ret'] is False:
+ return response
+
+ return {'ret': True, 'changed': True,
+ 'msg': "%s: Modified Manager attributes %s" % (command, attrs_to_patch),
+ 'warning': warning}
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]
+}
+
+
+# list of mutually exclusive commands for a category
+CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = {
+ "Manager": [["SetManagerAttributes", "SetLifecycleControllerAttributes",
+ "SetSystemAttributes"]]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ manager_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ resource_id=dict()
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # check for mutually exclusive commands
+ try:
+ # check_mutually_exclusive accepts a single list or list of lists that
+ # are groups of terms that should be mutually exclusive with one another
+ # and checks that against a dictionary
+ check_mutually_exclusive(CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE[category],
+ dict.fromkeys(command_list, True))
+
+ except TypeError as e:
+ module.fail_json(msg=to_native(e))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command in ["SetManagerAttributes", "SetLifecycleControllerAttributes", "SetSystemAttributes"]:
+ result = rf_utils.set_manager_attributes(command)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ if result.get('warning'):
+ module.warn(to_native(result['warning']))
+
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
new file mode 100644
index 000000000..aece61664
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/idrac_redfish_info.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: idrac_redfish_info
+short_description: Gather PowerEdge server information through iDRAC using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote iDRAC controllers to
+ get information back.
+ - For use with Dell EMC iDRAC operations that require Redfish OEM extensions.
+ - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)!
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on iDRAC.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iDRAC.
+ - C(GetManagerAttributes) returns the list of dicts containing iDRAC,
+ LifecycleController and System attributes.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of iDRAC.
+ type: str
+ username:
+ description:
+ - Username for authenticating to iDRAC.
+ type: str
+ password:
+ description:
+ - Password for authenticating to iDRAC.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to iDRAC.
+ type: str
+ version_added: 2.3.0
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to iDRAC.
+ default: 10
+ type: int
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get Manager attributes with a default of 20 seconds
+ community.general.idrac_redfish_info:
+ category: Manager
+ command: GetManagerAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ # Examples to display the value of all or a single iDRAC attribute
+ - name: Store iDRAC attributes as a fact variable
+ ansible.builtin.set_fact:
+ idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}"
+
+ - name: Display all iDRAC attributes
+ ansible.builtin.debug:
+ var: idrac_attributes
+
+ - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute
+ ansible.builtin.debug:
+ var: idrac_attributes['Syslog.1.SysLogEnable']
+
+ # Examples to display the value of all or a single LifecycleController attribute
+ - name: Store LifecycleController attributes as a fact variable
+ ansible.builtin.set_fact:
+ lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}"
+
+ - name: Display LifecycleController attributes
+ ansible.builtin.debug:
+ var: lc_attributes
+
+ - name: Display the value of 'CollectSystemInventoryOnRestart' attribute
+ ansible.builtin.debug:
+ var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart']
+
+ # Examples to display the value of all or a single System attribute
+ - name: Store System attributes as a fact variable
+ ansible.builtin.set_fact:
+ system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}"
+
+ - name: Display System attributes
+ ansible.builtin.debug:
+ var: system_attributes
+
+ - name: Display the value of 'PSRedPolicy'
+ ansible.builtin.debug:
+ var: system_attributes['ServerPwr.1.PSRedPolicy']
+
+'''
+
+RETURN = '''
+msg:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of Manager attributes
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils.common.text.converters import to_native
+
+
+class IdracRedfishUtils(RedfishUtils):
+
+ def get_manager_attributes(self):
+ result = {}
+ manager_attributes = []
+ properties = ['Attributes', 'Id']
+
+ response = self.get_request(self.root_uri + self.manager_uri)
+
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ # Manager attributes are supported as part of iDRAC OEM extension
+ # Attributes are supported only on iDRAC9
+ try:
+ for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']:
+ attributes_uri = members[u'@odata.id']
+
+ response = self.get_request(self.root_uri + attributes_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+
+ attributes = {}
+ for prop in properties:
+ if prop in data:
+ attributes[prop] = data.get(prop)
+
+ if attributes:
+ manager_attributes.append(attributes)
+
+ result['ret'] = True
+
+ except (AttributeError, KeyError) as e:
+ result['ret'] = False
+ result['msg'] = "Failed to find attribute/key: " + str(e)
+
+ result["entries"] = manager_attributes
+ return result
+
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["GetManagerAttributes"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=True,
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+
+ if category == "Manager":
+ # execute only if we find a Manager resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "GetManagerAttributes":
+ result = rf_utils.get_manager_attributes()
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ module.exit_json(redfish_facts=result)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py
new file mode 100644
index 000000000..0ec385e73
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_command.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ilo_redfish_command
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+version_added: 6.6.0
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+extends_documentation_fragment:
+ - community.general.attributes
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ choices: ['Systems']
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ username:
+ required: false
+ description:
+ - Username for authenticating to iLO.
+ type: str
+ password:
+ required: false
+ description:
+ - Password for authenticating to iLO.
+ type: str
+ auth_token:
+ required: false
+ description:
+ - Security token for authenticating to iLO.
+ type: str
+ timeout:
+ required: false
+ description:
+ - Timeout in seconds for HTTP requests to iLO.
+ default: 60
+ type: int
+author:
+ - Varni H P (@varini-hp)
+'''
+
+EXAMPLES = '''
+ - name: Wait for iLO Reboot Completion
+ community.general.ilo_redfish_command:
+ category: Systems
+ command: WaitforiLORebootCompletion
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+ilo_redfish_command:
+ description: Returns the status of the operation performed on the iLO.
+ type: dict
+ contains:
+ WaitforiLORebootCompletion:
+ description: Returns the output msg and whether the function executed successfully.
+ type: dict
+ contains:
+ ret:
+ description: Return True/False based on whether the operation was performed succesfully.
+ type: bool
+ msg:
+ description: Status of the operation performed on the iLO.
+ type: str
+ returned: always
+'''
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["WaitforiLORebootCompletion"]
+}
+
+from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ timeout=dict(type="int", default=60),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True)
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ timeout = module.params['timeout']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native(
+ "Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(
+ msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ if category == "Systems":
+ # execute only if we find a System resource
+
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "WaitforiLORebootCompletion":
+ result[command] = rf_utils.wait_for_ilo_reboot_completion()
+
+ # Return data back or fail with proper message
+ if not result[command]['ret']:
+ module.fail_json(msg=result)
+
+ changed = result[command].get('changed', False)
+ module.exit_json(ilo_redfish_command=result, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py
new file mode 100644
index 000000000..1f021895d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_config.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ilo_redfish_config
+short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions
+version_added: 4.2.0
+description:
+ - Builds Redfish URIs locally and sends them to iLO to
+ set or update a configuration attribute.
+ - For use with HPE iLO operations that require Redfish OEM extensions.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ type: str
+ description:
+ - Command category to execute on iLO.
+ choices: ['Manager']
+ command:
+ required: true
+ description:
+ - List of commands to execute on iLO.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of iLO.
+ type: str
+ username:
+ description:
+ - Username for authenticating to iLO.
+ type: str
+ password:
+ description:
+ - Password for authenticating to iLO.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to iLO.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to iLO.
+ default: 10
+ type: int
+ attribute_name:
+ required: true
+ description:
+ - Name of the attribute to be configured.
+ type: str
+ attribute_value:
+ required: false
+ description:
+ - Value of the attribute to be configured.
+ type: str
+author:
+ - "Bhavya B (@bhavya06)"
+'''
+
+EXAMPLES = '''
+ - name: Disable WINS Registration
+ community.general.ilo_redfish_config:
+ category: Manager
+ command: SetWINSReg
+ baseuri: 15.X.X.X
+ username: Admin
+ password: Testpass123
+ attribute_name: WINSRegistration
+
+ - name: Set Time Zone
+ community.general.ilo_redfish_config:
+ category: Manager
+ command: SetTimeZone
+ baseuri: 15.X.X.X
+ username: Admin
+ password: Testpass123
+ attribute_name: TimeZone
+ attribute_value: Chennai
+
+ - name: Set NTP Servers
+ community.general.ilo_redfish_config:
+ category: Manager
+ command: SetNTPServers
+ baseuri: 15.X.X.X
+ username: Admin
+ password: Testpass123
+ attribute_name: StaticNTPServers
+ attribute_value: X.X.X.X
+
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"]
+}
+
+from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True, choices=list(
+ CATEGORY_COMMANDS_ALL.keys())),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ attribute_name=dict(required=True),
+ attribute_value=dict(type='str'),
+ timeout=dict(type='int', default=10)
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ creds = {"user": module.params['username'],
+ "pswd": module.params['password'],
+ "token": module.params['auth_token']}
+
+ timeout = module.params['timeout']
+
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
+ mgr_attributes = {'mgr_attr_name': module.params['attribute_name'],
+ 'mgr_attr_value': module.params['attribute_value']}
+ changed = False
+
+ offending = [
+ cmd for cmd in command_list if cmd not in CATEGORY_COMMANDS_ALL[category]]
+
+ if offending:
+ module.fail_json(msg=to_native("Invalid Command(s): '%s'. Allowed Commands = %s" % (
+ offending, CATEGORY_COMMANDS_ALL[category])))
+
+ if category == "Manager":
+ resource = rf_utils._find_managers_resource()
+ if not resource['ret']:
+ module.fail_json(msg=to_native(resource['msg']))
+
+ dispatch = dict(
+ SetTimeZone=rf_utils.set_time_zone,
+ SetDNSserver=rf_utils.set_dns_server,
+ SetDomainName=rf_utils.set_domain_name,
+ SetNTPServers=rf_utils.set_ntp_server,
+ SetWINSReg=rf_utils.set_wins_registration
+ )
+
+ for command in command_list:
+ result[command] = dispatch[command](mgr_attributes)
+ if 'changed' in result[command]:
+ changed |= result[command]['changed']
+
+ module.exit_json(ilo_redfish_config=result, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py b/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py
new file mode 100644
index 000000000..90cafb8ec
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ilo_redfish_info.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ilo_redfish_info
+short_description: Gathers server information through iLO using Redfish APIs
+version_added: 4.2.0
+description:
+ - Builds Redfish URIs locally and sends them to iLO to
+ get information back.
+ - For use with HPE iLO operations that require Redfish OEM extensions.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ category:
+ required: true
+ description:
+ - List of categories to execute on iLO.
+ type: list
+ elements: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on iLO.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of iLO.
+ type: str
+ username:
+ description:
+ - Username for authenticating to iLO.
+ type: str
+ password:
+ description:
+ - Password for authenticating to iLO.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to iLO.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to iLO.
+ default: 10
+ type: int
+author:
+ - "Bhavya B (@bhavya06)"
+'''
+
+EXAMPLES = '''
+ - name: Get iLO Sessions
+ community.general.ilo_redfish_info:
+ category: Sessions
+ command: GetiLOSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result_sessions
+'''
+
+RETURN = '''
+ilo_redfish_info:
+ description: Returns iLO sessions.
+ type: dict
+ contains:
+ GetiLOSessions:
+ description: Returns the iLO session msg and whether the function executed successfully.
+ type: dict
+ contains:
+ ret:
+ description: Check variable to see if the information was successfully retrieved.
+ type: bool
+ msg:
+ description: Information of all active iLO sessions.
+ type: list
+ elements: dict
+ contains:
+ Description:
+ description: Provides a description of the resource.
+ type: str
+ Id:
+ description: The sessionId.
+ type: str
+ Name:
+ description: The name of the resource.
+ type: str
+ UserName:
+ description: Name to use to log in to the management processor.
+ type: str
+ returned: always
+'''
+
+CATEGORY_COMMANDS_ALL = {
+ "Sessions": ["GetiLOSessions"]
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Sessions": "GetiLOSessions"
+}
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True, type='list', elements='str'),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=True
+ )
+
+ creds = {"user": module.params['username'],
+ "pswd": module.params['password'],
+ "token": module.params['auth_token']}
+
+ timeout = module.params['timeout']
+
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = iLORedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in CATEGORY_COMMANDS_ALL[category]:
+ command_list.append(entry)
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Sessions":
+ for command in command_list:
+ if command == "GetiLOSessions":
+ result[command] = rf_utils.get_ilo_sessions()
+
+ module.exit_json(ilo_redfish_info=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/imc_rest.py b/ansible_collections/community/general/plugins/modules/imc_rest.py
new file mode 100644
index 000000000..4bbaad23a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/imc_rest.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: imc_rest
+short_description: Manage Cisco IMC hardware through its REST API
+description:
+ - Provides direct access to the Cisco IMC REST API.
+ - Perform any configuration changes and actions that the Cisco IMC supports.
+ - More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html).
+author:
+ - Dag Wieers (@dagwieers)
+requirements:
+ - lxml
+ - xmljson >= 0.1.8
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ hostname:
+ description:
+ - IP Address or hostname of Cisco IMC, resolvable by Ansible control host.
+ required: true
+ aliases: [ host, ip ]
+ type: str
+ username:
+ description:
+ - Username used to login to the switch.
+ default: admin
+ aliases: [ user ]
+ type: str
+ password:
+ description:
+ - The password to use for authentication.
+ default: password
+ type: str
+ path:
+ description:
+ - Name of the absolute path of the filename that includes the body
+ of the http request being sent to the Cisco IMC REST API.
+ - Parameter C(path) is mutual exclusive with parameter C(content).
+ aliases: [ 'src', 'config_file' ]
+ type: path
+ content:
+ description:
+ - When used instead of C(path), sets the content of the API requests directly.
+ - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module.
+ - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream,
+ the Cisco IMC output is subsequently merged.
+ - Parameter C(content) is mutual exclusive with parameter C(path).
+ type: str
+ protocol:
+ description:
+ - Connection protocol to use.
+ default: https
+ choices: [ http, https ]
+ type: str
+ timeout:
+ description:
+ - The socket level timeout in seconds.
+ - This is the time that every single connection (every fragment) can spend.
+ If this C(timeout) is reached, the module will fail with a
+ C(Connection failure) indicating that C(The read operation timed out).
+ default: 60
+ type: int
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated.
+ - This should only set to C(false) used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+notes:
+- The XML fragments don't need an authentication cookie, this is injected by the module automatically.
+- The Cisco IMC XML output is being translated to JSON using the Cobra convention.
+- Any configConfMo change requested has a return status of 'modified', even if there was no actual change
+ from the previous configuration. As a result, this module will always report a change on subsequent runs.
+ In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt.
+- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout)
+ parameter. Some XML fragments can take longer than the default timeout.
+- More information about the IMC REST API is available from
+ U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html)
+'''
+
+EXAMPLES = r'''
+- name: Power down server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: false
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="down"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Configure IMC using multiple XML fragments
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: false
+ timeout: 120
+ content: |
+ <!-- Configure Serial-on-LAN -->
+ <configConfMo><inConfig>
+ <solIf dn="sys/rack-unit-1/sol-if" adminState="enable" speed=="115200" comport="com0"/>
+ </inConfig></configConfMo>
+
+ <!-- Configure Console Redirection -->
+ <configConfMo><inConfig>
+ <biosVfConsoleRedirection dn="sys/rack-unit-1/bios/bios-settings/Console-redirection"
+ vpBaudRate="115200"
+ vpConsoleRedirection="com-0"
+ vpFlowControl="none"
+ vpTerminalType="vt100"
+ vpPuttyKeyPad="LINUX"
+ vpRedirectionAfterPOST="Always Enable"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Enable PXE boot and power-cycle server
+ community.general.imc_rest:
+ hostname: '{{ imc_hostname }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: false
+ content: |
+ <!-- Configure PXE boot -->
+ <configConfMo><inConfig>
+ <lsbootLan dn="sys/rack-unit-1/boot-policy/lan-read-only" access="read-only" order="1" prot="pxe" type="lan"/>
+ </inConfig></configConfMo>
+
+ <!-- Power cycle server -->
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" adminPower="cycle-immediate"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Reconfigure IMC to boot from storage
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: false
+ content: |
+ <configConfMo><inConfig>
+ <lsbootStorage dn="sys/rack-unit-1/boot-policy/storage-read-write" access="read-write" order="1" type="storage"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Add customer description to server
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: false
+ content: |
+ <configConfMo><inConfig>
+ <computeRackUnit dn="sys/rack-unit-1" usrLbl="Customer Lab - POD{{ pod_id }} - {{ inventory_hostname_short }}"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+
+- name: Disable HTTP and increase session timeout to max value 10800 secs
+ community.general.imc_rest:
+ hostname: '{{ imc_host }}'
+ username: '{{ imc_username }}'
+ password: '{{ imc_password }}'
+ validate_certs: false
+ timeout: 120
+ content: |
+ <configConfMo><inConfig>
+ <commHttp dn="sys/svc-ext/http-svc" adminState="disabled"/>
+ </inConfig></configConfMo>
+
+ <configConfMo><inConfig>
+ <commHttps dn="sys/svc-ext/https-svc" adminState="enabled" sessionTimeout="10800"/>
+ </inConfig></configConfMo>
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+aaLogin:
+ description: Cisco IMC XML output for the login, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "outCookie": "1498902428/9de6dc36-417c-157c-106c-139efe2dc02a",
+ "outPriv": "admin",
+ "outRefreshPeriod": "600",
+ "outSessionId": "114",
+ "outVersion": "2.0(13e)",
+ "response": "yes"
+ }
+configConfMo:
+ description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention
+ returned: success
+ type: dict
+ sample: |
+elapsed:
+ description: Elapsed time in seconds
+ returned: always
+ type: int
+ sample: 31
+response:
+ description: HTTP response message, including content length
+ returned: always
+ type: str
+ sample: OK (729 bytes)
+status:
+ description: The HTTP response status code
+ returned: always
+ type: dict
+ sample: 200
+error:
+ description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention
+ returned: failed
+ type: dict
+ sample: |
+ "attributes": {
+ "cookie": "",
+ "errorCode": "ERR-xml-parse-error",
+ "errorDescr": "XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. ",
+ "invocationResult": "594",
+ "response": "yes"
+ }
+error_code:
+ description: Cisco IMC error code
+ returned: failed
+ type: str
+ sample: ERR-xml-parse-error
+error_text:
+ description: Cisco IMC error message
+ returned: failed
+ type: str
+ sample: |
+ XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.
+input:
+ description: RAW XML input sent to the Cisco IMC, causing the error
+ returned: failed
+ type: str
+ sample: |
+ <configConfMo><inConfig><computeRackUnit dn="sys/rack-unit-1" admin_Power="down"/></inConfig></configConfMo>
+output:
+ description: RAW XML output received from the Cisco IMC, with error details
+ returned: failed
+ type: str
+ sample: >
+ <error cookie=""
+ response="yes"
+ errorCode="ERR-xml-parse-error"
+ invocationResult="594"
+ errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/>
+'''
+
+import datetime
+import os
+import traceback
+
+LXML_ETREE_IMP_ERR = None
+try:
+ import lxml.etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+XMLJSON_COBRA_IMP_ERR = None
+try:
+ from xmljson import cobra
+ HAS_XMLJSON_COBRA = True
+except ImportError:
+ XMLJSON_COBRA_IMP_ERR = traceback.format_exc()
+ HAS_XMLJSON_COBRA = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves import zip_longest
+from ansible.module_utils.urls import fetch_url
+
+
+def imc_response(module, rawoutput, rawinput=''):
+ ''' Handle IMC returned data '''
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ result = cobra.data(xmloutput)
+
+ # Handle errors
+ if xmloutput.get('errorCode') and xmloutput.get('errorDescr'):
+ if rawinput:
+ result['input'] = rawinput
+ result['output'] = rawoutput
+ result['error_code'] = xmloutput.get('errorCode')
+ result['error_text'] = xmloutput.get('errorDescr')
+ module.fail_json(msg='Request failed: %(error_text)s' % result, **result)
+
+ return result
+
+
+def logout(module, url, cookie, timeout):
+ ''' Perform a logout, if needed '''
+ data = '<aaaLogout cookie="%s" inCookie="%s"/>' % (cookie, cookie)
+ resp, auth = fetch_url(module, url, data=data, method="POST", timeout=timeout)
+
+
+def merge(one, two):
+ ''' Merge two complex nested datastructures into one'''
+ if isinstance(one, dict) and isinstance(two, dict):
+ copy = dict(one)
+ # copy.update({key: merge(one.get(key, None), two[key]) for key in two})
+ copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two))
+ return copy
+
+ elif isinstance(one, list) and isinstance(two, list):
+ return [merge(alpha, beta) for (alpha, beta) in zip_longest(one, two)]
+
+ return one if two is None else two
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(type='str', required=True, aliases=['host', 'ip']),
+ username=dict(type='str', default='admin', aliases=['user']),
+ password=dict(type='str', default='password', no_log=True),
+ content=dict(type='str'),
+ path=dict(type='path', aliases=['config_file', 'src']),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ timeout=dict(type='int', default=60),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['content', 'path']],
+ )
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if not HAS_XMLJSON_COBRA:
+ module.fail_json(msg=missing_required_lib('xmljson >= 0.1.8'), exception=XMLJSON_COBRA_IMP_ERR)
+
+ hostname = module.params['hostname']
+ username = module.params['username']
+ password = module.params['password']
+
+ content = module.params['content']
+ path = module.params['path']
+
+ protocol = module.params['protocol']
+ timeout = module.params['timeout']
+
+ result = dict(
+ failed=False,
+ changed=False,
+ )
+
+ # Report missing file
+ file_exists = False
+ if path:
+ if os.path.isfile(path):
+ file_exists = True
+ else:
+ module.fail_json(msg='Cannot find/access path:\n%s' % path)
+
+ start = datetime.datetime.utcnow()
+
+ # Perform login first
+ url = '%s://%s/nuova' % (protocol, hostname)
+ data = '<aaaLogin inName="%s" inPassword="%s"/>' % (username, password)
+ resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or auth['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result)
+ result.update(imc_response(module, resp.read()))
+
+ # Store cookie for future requests
+ cookie = ''
+ try:
+ cookie = result['aaaLogin']['attributes']['outCookie']
+ except Exception:
+ module.fail_json(msg='Could not find cookie in output', **result)
+
+ try:
+ # Prepare request data
+ if content:
+ rawdata = content
+ elif file_exists:
+ with open(path, 'r') as config_object:
+ rawdata = config_object.read()
+
+ # Wrap the XML documents in a <root> element
+ xmldata = lxml.etree.fromstring('<root>%s</root>' % rawdata.replace('\n', ''))
+
+ # Handle each XML document separately in the same session
+ for xmldoc in list(xmldata):
+ if xmldoc.tag is lxml.etree.Comment:
+ continue
+ # Add cookie to XML
+ xmldoc.set('cookie', cookie)
+ data = lxml.etree.tostring(xmldoc)
+
+ # Perform actual request
+ resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout)
+ if resp is None or info['status'] != 200:
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result)
+
+ # Merge results with previous results
+ rawoutput = resp.read()
+ result = merge(result, imc_response(module, rawoutput, rawinput=data))
+ result['response'] = info['msg']
+ result['status'] = info['status']
+
+ # Check for any changes
+ # NOTE: Unfortunately IMC API always report status as 'modified'
+ xmloutput = lxml.etree.fromstring(rawoutput)
+ results = xmloutput.xpath('/configConfMo/outConfig/*/@status')
+ result['changed'] = ('modified' in results)
+
+ # Report success
+ result['elapsed'] = (datetime.datetime.utcnow() - start).seconds
+ module.exit_json(**result)
+ finally:
+ logout(module, url, cookie, timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/imgadm.py b/ansible_collections/community/general/plugins/modules/imgadm.py
new file mode 100644
index 000000000..6e4b81098
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/imgadm.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, 2017 Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: imgadm
+short_description: Manage SmartOS images
+description:
+ - Manage SmartOS virtual machine images through imgadm(1M)
+author: Jasper Lievisse Adriaanse (@jasperla)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ force:
+ required: false
+ type: bool
+ description:
+ - Force a given operation (where supported by imgadm(1M)).
+ pool:
+ required: false
+ default: zones
+ description:
+ - zpool to import to or delete images from.
+ type: str
+ source:
+ required: false
+ description:
+ - URI for the image source.
+ type: str
+ state:
+ required: true
+ choices: [ present, absent, deleted, imported, updated, vacuumed ]
+ description:
+ - State the object operated on should be in. C(imported) is an alias for
+ for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
+ and C(uuid) to C(*), it will remove all unused images.
+ type: str
+
+ type:
+ required: false
+ choices: [ imgapi, docker, dsapi ]
+ default: imgapi
+ description:
+ - Type for image sources.
+ type: str
+
+ uuid:
+ required: false
+ description:
+ - Image UUID. Can either be a full UUID or C(*) for all images.
+ type: str
+
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Import an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: imported
+
+- name: Delete an image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: deleted
+
+- name: Update all images
+ community.general.imgadm:
+ uuid: '*'
+ state: updated
+
+- name: Update a single image
+ community.general.imgadm:
+ uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
+ state: updated
+
+- name: Add a source
+ community.general.imgadm:
+ source: 'https://datasets.project-fifo.net'
+ state: present
+
+- name: Add a Docker source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ type: docker
+ state: present
+
+- name: Remove a source
+ community.general.imgadm:
+ source: 'https://docker.io'
+ state: absent
+'''
+
+RETURN = '''
+source:
+ description: Source that is managed.
+ returned: When not managing an image.
+ type: str
+ sample: https://datasets.project-fifo.net
+uuid:
+ description: UUID for an image operated on.
+ returned: When not managing an image source.
+ type: str
+ sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'present'
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
+# -E option to return any errors in JSON, the generated JSON does not play well
+# with the JSON parsers of Python. The returned message contains '\n' as part of
+# the stacktrace, which breaks the parsers.
+
+
+class Imgadm(object):
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.cmd = module.get_bin_path('imgadm', required=True)
+ self.changed = False
+ self.uuid = module.params['uuid']
+
+ # Since there are a number of (natural) aliases, prevent having to look
+ # them up everytime we operate on `state`.
+ if self.params['state'] in ['present', 'imported', 'updated']:
+ self.present = True
+ else:
+ self.present = False
+
+ # Perform basic UUID validation upfront.
+ if self.uuid and self.uuid != '*':
+ if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
+ module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
+
+ # Helper method to massage stderr
+ def errmsg(self, stderr):
+ match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
+ if match:
+ return match.groups()[0]
+ else:
+ return 'Unexpected failure'
+
+ def update_images(self):
+ if self.uuid == '*':
+ cmd = '{0} update'.format(self.cmd)
+ else:
+ cmd = '{0} update {1}'.format(self.cmd, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
+
+ # There is no feedback from imgadm(1M) to determine if anything
+ # was actually changed. So treat this as an 'always-changes' operation.
+ # Note that 'imgadm -v' produces unparseable JSON...
+ self.changed = True
+
+ def manage_sources(self):
+ force = self.params['force']
+ source = self.params['source']
+ imgtype = self.params['type']
+
+ cmd = '{0} sources'.format(self.cmd)
+
+ if force:
+ cmd += ' -f'
+
+ if self.present:
+ cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
+
+ # Check the various responses.
+ # Note that trying to add a source with the wrong type is handled
+ # above as it results in a non-zero status.
+
+ regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Added "%s" image source "%s"' % (imgtype, source)
+ if re.match(regex, stdout):
+ self.changed = True
+ else:
+ # Type is ignored by imgadm(1M) here
+ cmd += ' -d %s' % source
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
+
+ regex = 'Do not have image source "%s", no change' % source
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = 'Deleted ".*" image source "%s"' % source
+ if re.match(regex, stdout):
+ self.changed = True
+
+ def manage_images(self):
+ pool = self.params['pool']
+ state = self.params['state']
+
+ if state == 'vacuumed':
+ # Unconditionally pass '--force', otherwise we're prompted with 'y/N'
+ cmd = '{0} vacuum -f'.format(self.cmd)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
+ else:
+ if stdout == '':
+ self.changed = False
+ else:
+ self.changed = True
+ if self.present:
+ cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
+
+ regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = False
+
+ regex = '.*ActiveImageNotFound.*'
+ if re.match(regex, stderr):
+ self.changed = False
+
+ regex = 'Imported image {0}.*'.format(self.uuid)
+ if re.match(regex, stdout.splitlines()[-1]):
+ self.changed = True
+ else:
+ cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ regex = '.*ImageNotInstalled.*'
+ if re.match(regex, stderr):
+ # Even if the 'rc' was non-zero (3), we handled the situation
+ # in order to determine if there was a change.
+ self.changed = False
+
+ regex = 'Deleted image {0}'.format(self.uuid)
+ if re.match(regex, stdout):
+ self.changed = True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ force=dict(type='bool'),
+ pool=dict(default='zones'),
+ source=dict(),
+ state=dict(required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
+ type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
+ uuid=dict()
+ ),
+ # This module relies largely on imgadm(1M) to enforce idempotency, which does not
+ # provide a "noop" (or equivalent) mode to do a dry-run.
+ supports_check_mode=False,
+ )
+
+ imgadm = Imgadm(module)
+
+ uuid = module.params['uuid']
+ source = module.params['source']
+ state = module.params['state']
+
+ result = {'state': state}
+
+ # Either manage sources or images.
+ if source:
+ result['source'] = source
+ imgadm.manage_sources()
+ else:
+ result['uuid'] = uuid
+
+ if state == 'updated':
+ imgadm.update_images()
+ else:
+ # Make sure operate on a single image for the following actions
+ if (uuid == '*') and (state != 'vacuumed'):
+ module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
+ imgadm.manage_images()
+
+ result['changed'] = imgadm.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/infinity.py b/ansible_collections/community/general/plugins/modules/infinity.py
new file mode 100644
index 000000000..65aa591f4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/infinity.py
@@ -0,0 +1,575 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, <meiliu@fusionlayer.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: infinity
+short_description: Manage Infinity IPAM using Rest API
+description:
+ - Manage Infinity IPAM using REST API.
+author:
+ - Meirong Liu (@MeganLiu)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ server_ip:
+ description:
+ - Infinity server_ip with IP address.
+ type: str
+ required: true
+ username:
+ description:
+ - Username to access Infinity.
+ - The user must have REST API privileges.
+ type: str
+ required: true
+ password:
+ description:
+ - Infinity password.
+ type: str
+ required: true
+ action:
+ description:
+ - Action to perform
+ type: str
+ required: true
+ choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ]
+ network_id:
+ description:
+ - Network ID.
+ type: str
+ ip_address:
+ description:
+ - IP Address for a reservation or a release.
+ type: str
+ network_address:
+ description:
+ - Network address with CIDR format (e.g., 192.168.310.0).
+ type: str
+ network_size:
+ description:
+ - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26).
+ type: str
+ network_name:
+ description:
+ - The name of a network.
+ type: str
+ network_location:
+ description:
+ - The parent network id for a given network.
+ type: int
+ default: -1
+ network_type:
+ description:
+ - Network type defined by Infinity
+ type: str
+ choices: [ lan, shared_lan, supernet ]
+ default: lan
+ network_family:
+ description:
+ - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack
+ type: str
+ choices: [ '4', '6', dual ]
+ default: '4'
+'''
+
+EXAMPLES = r'''
+---
+- hosts: localhost
+ connection: local
+ strategy: debug
+ tasks:
+ - name: Reserve network into Infinity IPAM
+ community.general.infinity:
+ server_ip: 80.75.107.12
+ username: username
+ password: password
+ action: reserve_network
+ network_name: reserve_new_ansible_network
+ network_family: 4
+ network_type: lan
+ network_id: 1201
+ network_size: /28
+ register: infinity
+'''
+
+RETURN = r'''
+network_id:
+ description: id for a given network
+ returned: success
+ type: str
+ sample: '1501'
+ip_info:
+ description: when reserve next available ip address from a network, the ip address info ) is returned.
+ returned: success
+ type: str
+ sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}'
+network_info:
+ description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned.
+ returned: success
+ type: str
+ sample: {
+ "network_address": "192.168.10.32/28",
+ "network_family": "4",
+ "network_id": 3102,
+ "network_size": null,
+ "description": null,
+ "network_location": "3085",
+ "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null},
+ "network_type": "lan",
+ "network_name": "'reserve_new_ansible_network'"
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule, json
+from ansible.module_utils.urls import open_url
+
+
+class Infinity(object):
+ """
+ Class for manage REST API calls with the Infinity.
+ """
+
+ def __init__(self, module, server_ip, username, password):
+ self.module = module
+ self.auth_user = username
+ self.auth_pass = password
+ self.base_url = "https://%s/rest/v1/" % (str(server_ip))
+
+ def _get_api_call_ansible_handler(
+ self,
+ method='get',
+ resource_url='',
+ stat_codes=None,
+ params=None,
+ payload_data=None):
+ """
+ Perform the HTTPS request by using ansible get/delete method
+ """
+ stat_codes = [200] if stat_codes is None else stat_codes
+ request_url = str(self.base_url) + str(resource_url)
+ response = None
+ headers = {'Content-Type': 'application/json'}
+ if not request_url:
+ self.module.exit_json(
+ msg="When sending Rest api call , the resource URL is empty, please check.")
+ if payload_data and not isinstance(payload_data, str):
+ payload_data = json.dumps(payload_data)
+ response_raw = open_url(
+ str(request_url),
+ method=method,
+ timeout=20,
+ headers=headers,
+ url_username=self.auth_user,
+ url_password=self.auth_pass,
+ validate_certs=False,
+ force_basic_auth=True,
+ data=payload_data)
+
+ response = response_raw.read()
+ payload = ''
+ if response_raw.code not in stat_codes:
+ self.module.exit_json(
+ changed=False,
+ meta=" openurl response_raw.code show error and error code is %r" %
+ (response_raw.code))
+ else:
+ if isinstance(response, str) and len(response) > 0:
+ payload = response
+ elif method.lower() == 'delete' and response_raw.code == 204:
+ payload = 'Delete is done.'
+ if isinstance(payload, dict) and "text" in payload:
+ self.module.exit_json(
+ changed=False,
+ meta="when calling rest api, returned data is not json ")
+ raise Exception(payload["text"])
+ return payload
+
+ # ---------------------------------------------------------------------------
+ # get_network()
+ # ---------------------------------------------------------------------------
+ def get_network(self, network_id, network_name, limit=-1):
+ """
+ Search network_name inside Infinity by using rest api
+ Network id or network_name needs to be provided
+ return the details of a given with given network_id or name
+ """
+ if network_name is None and network_id is None:
+ self.module.exit_json(
+ msg="You must specify one of the options 'network_name' or 'network_id'.")
+ method = "get"
+ resource_url = ''
+ params = {}
+ response = None
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if network_id is None and network_name:
+ method = "get"
+ resource_url = "search"
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list) and len(
+ response) > 1 and limit == 1:
+ response = response[0]
+ response = json.dumps(response)
+ return response
+
+ # ---------------------------------------------------------------------------
+ # get_network_id()
+ # ---------------------------------------------------------------------------
+ def get_network_id(self, network_name="", network_type='lan'):
+ """
+ query network_id from Infinity via rest api based on given network_name
+ """
+ method = 'get'
+ resource_url = 'search'
+ response = None
+ if network_name is None:
+ self.module.exit_json(
+ msg="You must specify the option 'network_name'")
+ params = {"query": json.dumps(
+ {"name": network_name, "type": "network"})}
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, payload_data=json.dumps(params))
+ network_id = ""
+ if response and isinstance(response, str):
+ response = json.loads(response)
+ if response and isinstance(response, list):
+ response = response[0]
+ network_id = response['id']
+ return network_id
+
+ # ---------------------------------------------------------------------------
+ # reserve_next_available_ip()
+ # ---------------------------------------------------------------------------
+ def reserve_next_available_ip(self, network_id=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ network_id: the id of the network that users would like to reserve network from
+ return the next available ip address from that given network
+ """
+ method = "post"
+ resource_url = ''
+ response = None
+ ip_info = ''
+ if not network_id:
+ self.module.exit_json(
+ msg="You must specify the option 'network_id'.")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_ip"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if response and response.find(
+ "[") >= 0 and response.find("]") >= 0:
+ start_pos = response.find("{")
+ end_pos = response.find("}")
+ ip_info = response[start_pos: (end_pos + 1)]
+ return ip_info
+
+ # -------------------------
+ # release_ip()
+ # -------------------------
+ def release_ip(self, network_id="", ip_address=""):
+ """
+ Reserve ip address via Infinity by using rest api
+ """
+ method = "get"
+ resource_url = ''
+ response = None
+ if ip_address is None or network_id is None:
+ self.module.exit_json(
+ msg="You must specify those two options: 'network_id' and 'ip_address'.")
+
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg="There is an error in release ip %s from network %s." %
+ (ip_address, network_id))
+
+ ip_list = json.loads(response)
+ ip_idlist = []
+ for ip_item in ip_list:
+ ip_id = ip_item['id']
+ ip_idlist.append(ip_id)
+ deleted_ip_id = ''
+ for ip_id in ip_idlist:
+ ip_response = ''
+ resource_url = "ip_addresses/" + str(ip_id)
+ ip_response = self._get_api_call_ansible_handler(
+ method,
+ resource_url,
+ stat_codes=[200])
+ if ip_response and json.loads(
+ ip_response)['address'] == str(ip_address):
+ deleted_ip_id = ip_id
+ break
+ if deleted_ip_id:
+ method = 'delete'
+ resource_url = "ip_addresses/" + str(deleted_ip_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release ip, could not find the ip address %r from the given network %r' ." %
+ (ip_address, network_id))
+
+ return response
+
+ # -------------------
+ # delete_network()
+ # -------------------
+ def delete_network(self, network_id="", network_name=""):
+ """
+ delete network from Infinity by using rest api
+ """
+ method = 'delete'
+ resource_url = ''
+ response = None
+ if network_id is None and network_name is None:
+ self.module.exit_json(
+ msg="You must specify one of those options: 'network_id','network_name' .")
+ if network_id is None and network_name:
+ network_id = self.get_network_id(network_name=network_name)
+ if network_id:
+ resource_url = "networks/" + str(network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ return response
+
+ # reserve_network()
+ # ---------------------------------------------------------------------------
+ def reserve_network(self, network_id="",
+ reserved_network_name="", reserved_network_description="",
+ reserved_network_size="", reserved_network_family='4',
+ reserved_network_type='lan', reserved_network_address="",):
+ """
+ Reserves the first available network of specified size from a given supernet
+ <dt>network_name (required)</dt><dd>Name of the network</dd>
+ <dt>description (optional)</dt><dd>Free description</dd>
+ <dt>network_family (required)</dt><dd>Address family of the network. One of '4', '6', 'IPv4', 'IPv6', 'dual'</dd>
+ <dt>network_address (optional)</dt><dd>Address of the new network. If not given, the first network available will be created.</dd>
+ <dt>network_size (required)</dt><dd>Size of the new network in /&lt;prefix&gt; notation.</dd>
+ <dt>network_type (required)</dt><dd>Type of network. One of 'supernet', 'lan', 'shared_lan'</dd>
+
+ """
+ method = 'post'
+ resource_url = ''
+ network_info = None
+ if network_id is None or reserved_network_name is None or reserved_network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options: 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ if network_id:
+ resource_url = "networks/" + str(network_id) + "/reserve_network"
+ if not reserved_network_family:
+ reserved_network_family = '4'
+ if not reserved_network_type:
+ reserved_network_type = 'lan'
+ payload_data = {
+ "network_name": reserved_network_name,
+ 'description': reserved_network_description,
+ 'network_size': reserved_network_size,
+ 'network_family': reserved_network_family,
+ 'network_type': reserved_network_type,
+ 'network_location': int(network_id)}
+ if reserved_network_address:
+ payload_data.update({'network_address': reserved_network_address})
+
+ network_info = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[200, 201], payload_data=payload_data)
+
+ return network_info
+
+ # ---------------------------------------------------------------------------
+ # release_network()
+ # ---------------------------------------------------------------------------
+ def release_network(
+ self,
+ network_id="",
+ released_network_name="",
+ released_network_type='lan'):
+ """
+ Release the network with name 'released_network_name' from the given supernet network_id
+ """
+ method = 'get'
+ response = None
+ if network_id is None or released_network_name is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_id', 'reserved_network_name' and 'reserved_network_size'")
+ matched_network_id = ""
+ resource_url = "networks/" + str(network_id) + "/children"
+ response = self._get_api_call_ansible_handler(method, resource_url)
+ if not response:
+ self.module.exit_json(
+ msg=" there is an error in releasing network %r from network %s." %
+ (network_id, released_network_name))
+ if response:
+ response = json.loads(response)
+ for child_net in response:
+ if child_net['network'] and child_net['network']['network_name'] == released_network_name:
+ matched_network_id = child_net['network']['network_id']
+ break
+ response = None
+ if matched_network_id:
+ method = 'delete'
+ resource_url = "networks/" + str(matched_network_id)
+ response = self._get_api_call_ansible_handler(
+ method, resource_url, stat_codes=[204])
+ else:
+ self.module.exit_json(
+ msg=" When release network , could not find the network %r from the given superent %r' " %
+ (released_network_name, network_id))
+
+ return response
+
+ # ---------------------------------------------------------------------------
+ # add_network()
+ # ---------------------------------------------------------------------------
+ def add_network(
+ self, network_name="", network_address="",
+ network_size="", network_family='4',
+ network_type='lan', network_location=-1):
+ """
+ add a new LAN network into a given supernet Fusionlayer Infinity via rest api or default supernet
+ required fields=['network_name', 'network_family', 'network_type', 'network_address','network_size' ]
+ """
+ method = 'post'
+ resource_url = 'networks'
+ response = None
+ if network_name is None or network_address is None or network_size is None:
+ self.module.exit_json(
+ msg="You must specify those options 'network_name', 'network_address' and 'network_size'")
+
+ if not network_family:
+ network_family = '4'
+ if not network_type:
+ network_type = 'lan'
+ if not network_location:
+ network_location = -1
+ payload_data = {
+ "network_name": network_name,
+ 'network_address': network_address,
+ 'network_size': network_size,
+ 'network_family': network_family,
+ 'network_type': network_type,
+ 'network_location': network_location}
+ response = self._get_api_call_ansible_handler(
+ method='post', resource_url=resource_url,
+ stat_codes=[200], payload_data=payload_data)
+ return response
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server_ip=dict(type='str', required=True),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True),
+ network_id=dict(type='str'),
+ ip_address=dict(type='str'),
+ network_name=dict(type='str'),
+ network_location=dict(type='int', default=-1),
+ network_family=dict(type='str', default='4', choices=['4', '6', 'dual']),
+ network_type=dict(type='str', default='lan', choices=['lan', 'shared_lan', 'supernet']),
+ network_address=dict(type='str'),
+ network_size=dict(type='str'),
+ action=dict(type='str', required=True, choices=[
+ 'add_network',
+ 'delete_network',
+ 'get_network',
+ 'get_network_id',
+ 'release_ip',
+ 'release_network',
+ 'reserve_network',
+ 'reserve_next_available_ip',
+ ],),
+ ),
+ required_together=(
+ ['username', 'password'],
+ ),
+ )
+ server_ip = module.params["server_ip"]
+ username = module.params["username"]
+ password = module.params["password"]
+ action = module.params["action"]
+ network_id = module.params["network_id"]
+ released_ip = module.params["ip_address"]
+ network_name = module.params["network_name"]
+ network_family = module.params["network_family"]
+ network_type = module.params["network_type"]
+ network_address = module.params["network_address"]
+ network_size = module.params["network_size"]
+ network_location = module.params["network_location"]
+ my_infinity = Infinity(module, server_ip, username, password)
+ result = ''
+ if action == "reserve_next_available_ip":
+ if network_id:
+ result = my_infinity.reserve_next_available_ip(network_id)
+ if not result:
+ result = 'There is an error in calling method of reserve_next_available_ip'
+ module.exit_json(changed=False, meta=result)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_ip":
+ if network_id and released_ip:
+ result = my_infinity.release_ip(
+ network_id=network_id, ip_address=released_ip)
+ module.exit_json(changed=True, meta=result)
+ elif action == "delete_network":
+ result = my_infinity.delete_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "get_network_id":
+ result = my_infinity.get_network_id(
+ network_name=network_name, network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+ elif action == "get_network":
+ result = my_infinity.get_network(
+ network_id=network_id, network_name=network_name)
+ module.exit_json(changed=True, meta=result)
+ elif action == "reserve_network":
+ result = my_infinity.reserve_network(
+ network_id=network_id,
+ reserved_network_name=network_name,
+ reserved_network_size=network_size,
+ reserved_network_family=network_family,
+ reserved_network_type=network_type,
+ reserved_network_address=network_address)
+ module.exit_json(changed=True, meta=result)
+ elif action == "release_network":
+ result = my_infinity.release_network(
+ network_id=network_id,
+ released_network_name=network_name,
+ released_network_type=network_type)
+ module.exit_json(changed=True, meta=result)
+
+ elif action == "add_network":
+ result = my_infinity.add_network(
+ network_name=network_name,
+ network_location=network_location,
+ network_address=network_address,
+ network_size=network_size,
+ network_family=network_family,
+ network_type=network_type)
+
+ module.exit_json(changed=True, meta=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_database.py b/ansible_collections/community/general/plugins/modules/influxdb_database.py
new file mode 100644
index 000000000..046b16e18
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/influxdb_database.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_database
+short_description: Manage InfluxDB databases
+description:
+ - Manage InfluxDB databases.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ state:
+ description:
+ - Determines if the database should be created or destroyed.
+ choices: [ absent, present ]
+ default: present
+ type: str
+extends_documentation_fragment:
+ - community.general.influxdb
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_database command from Ansible Playbooks
+- name: Create database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+
+- name: Destroy database
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ state: absent
+
+- name: Create database using custom credentials
+ community.general.influxdb_database:
+ hostname: "{{influxdb_ip_address}}"
+ username: "{{influxdb_username}}"
+ password: "{{influxdb_password}}"
+ database_name: "{{influxdb_database_name}}"
+ ssl: true
+ validate_certs: true
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+def find_database(module, client, database_name):
+ database = None
+
+ try:
+ databases = client.get_list_database()
+ for db in databases:
+ if db['name'] == database_name:
+ database = db
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+ return database
+
+
+def create_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.create_database(database_name)
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=True)
+
+
+def drop_database(module, client, database_name):
+ if not module.check_mode:
+ try:
+ client.drop_database(database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ database_name=dict(required=True, type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent'])
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+ database_name = influxdb.database_name
+ database = find_database(module, client, database_name)
+
+ if state == 'present':
+ if database:
+ module.exit_json(changed=False)
+ else:
+ create_database(module, client, database_name)
+
+ if state == 'absent':
+ if database:
+ drop_database(module, client, database_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_query.py b/ansible_collections/community/general/plugins/modules/influxdb_query.py
new file mode 100644
index 000000000..c2e3d8acc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/influxdb_query.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_query
+short_description: Query data points from InfluxDB
+description:
+ - Query data points from InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ query:
+ description:
+ - Query to be executed.
+ required: true
+ type: str
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - community.general.influxdb
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Query connections
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections"
+ register: connection
+
+- name: Query connections with tags filters
+ community.general.influxdb_query:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ query: "select mean(value) from connections where region='zue01' and host='server01'"
+ register: connection
+
+- name: Print results from the query
+ ansible.builtin.debug:
+ var: connection.query_results
+'''
+
+RETURN = r'''
+query_results:
+ description: Result from the query
+ returned: success
+ type: list
+ sample:
+ - mean: 1245.5333333333333
+ time: "1970-01-01T00:00:00Z"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBRead(InfluxDb):
+
+ def read_by_query(self, query):
+ client = self.connect_to_influxdb()
+ try:
+ rs = client.query(query)
+ if rs:
+ return list(rs.get_points())
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ query=dict(type='str', required=True),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ influx = AnsibleInfluxDBRead(module)
+ query = module.params.get('query')
+ results = influx.read_by_query(query)
+ module.exit_json(changed=True, query_results=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
new file mode 100644
index 000000000..28d5450ff
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/influxdb_retention_policy.py
@@ -0,0 +1,350 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Kamil Szczygiel <kamil.szczygiel () intel.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_retention_policy
+short_description: Manage InfluxDB retention policies
+description:
+ - Manage InfluxDB retention policies.
+author: "Kamil Szczygiel (@kamsz)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+ - requests
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+ policy_name:
+ description:
+ - Name of the retention policy.
+ required: true
+ type: str
+ state:
+ description:
+ - State of the retention policy.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ version_added: 3.1.0
+ duration:
+ description:
+ - Determines how long InfluxDB should keep the data. If specified, it
+ should be C(INF) or at least one hour. If not specified, C(INF) is
+ assumed. Supports complex duration expressions with multiple units.
+ - Required only if I(state) is set to C(present).
+ type: str
+ replication:
+ description:
+ - Determines how many independent copies of each point are stored in the cluster.
+ - Required only if I(state) is set to C(present).
+ type: int
+ default:
+ description:
+ - Sets the retention policy as default retention policy.
+ type: bool
+ default: false
+ shard_group_duration:
+ description:
+ - Determines the time range covered by a shard group. If specified it
+ must be at least one hour. If none, it's determined by InfluxDB by
+ the rentention policy's duration. Supports complex duration expressions
+ with multiple units.
+ type: str
+ version_added: '2.0.0'
+extends_documentation_fragment:
+ - community.general.influxdb
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+# Example influxdb_retention_policy command from Ansible Playbooks
+- name: Create 1 hour retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ policy_name: test
+ duration: 1h
+ replication: 1
+ ssl: true
+ validate_certs: true
+ state: present
+
+- name: Create 1 day retention policy with 1 hour shard group duration
+ community.general.influxdb_retention_policy:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ policy_name: test
+ duration: 1d
+ replication: 1
+ shard_group_duration: 1h
+ state: present
+
+- name: Create 1 week retention policy with 1 day shard group duration
+ community.general.influxdb_retention_policy:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ policy_name: test
+ duration: 1w
+ replication: 1
+ shard_group_duration: 1d
+ state: present
+
+- name: Create infinite retention policy with 1 week of shard group duration
+ community.general.influxdb_retention_policy:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ policy_name: test
+ duration: INF
+ replication: 1
+ ssl: false
+ validate_certs: false
+ shard_group_duration: 1w
+ state: present
+
+- name: Create retention policy with complex durations
+ community.general.influxdb_retention_policy:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ policy_name: test
+ duration: 5d1h30m
+ replication: 1
+ ssl: false
+ validate_certs: false
+ shard_group_duration: 1d10h30m
+ state: present
+
+- name: Drop retention policy
+ community.general.influxdb_retention_policy:
+ hostname: "{{ influxdb_ip_address }}"
+ database_name: "{{ influxdb_database_name }}"
+ policy_name: test
+ state: absent
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+import re
+
+try:
+ import requests.exceptions
+ from influxdb import exceptions
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+from ansible.module_utils.common.text.converters import to_native
+
+
+VALID_DURATION_REGEX = re.compile(r'^(INF|(\d+(ns|u|µ|ms|s|m|h|d|w)))+$')
+
+DURATION_REGEX = re.compile(r'(\d+)(ns|u|µ|ms|s|m|h|d|w)')
+EXTENDED_DURATION_REGEX = re.compile(r'(?:(\d+)(ns|u|µ|ms|m|h|d|w)|(\d+(?:\.\d+)?)(s))')
+
+DURATION_UNIT_NANOSECS = {
+ 'ns': 1,
+ 'u': 1000,
+ 'µ': 1000,
+ 'ms': 1000 * 1000,
+ 's': 1000 * 1000 * 1000,
+ 'm': 1000 * 1000 * 1000 * 60,
+ 'h': 1000 * 1000 * 1000 * 60 * 60,
+ 'd': 1000 * 1000 * 1000 * 60 * 60 * 24,
+ 'w': 1000 * 1000 * 1000 * 60 * 60 * 24 * 7,
+}
+
+MINIMUM_VALID_DURATION = 1 * DURATION_UNIT_NANOSECS['h']
+MINIMUM_VALID_SHARD_GROUP_DURATION = 1 * DURATION_UNIT_NANOSECS['h']
+
+
+def check_duration_literal(value):
+ return VALID_DURATION_REGEX.search(value) is not None
+
+
+def parse_duration_literal(value, extended=False):
+ duration = 0.0
+
+ if value == "INF":
+ return duration
+
+ lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value)
+
+ for duration_literal in lookup:
+ filtered_literal = list(filter(None, duration_literal))
+ duration_val = float(filtered_literal[0])
+ duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]]
+
+ return duration
+
+
+def find_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ hostname = module.params['hostname']
+ retention_policy = None
+
+ try:
+ retention_policies = client.get_list_retention_policies(database=database_name)
+ for policy in retention_policies:
+ if policy['name'] == policy_name:
+ retention_policy = policy
+ break
+ except requests.exceptions.ConnectionError as e:
+ module.fail_json(msg="Cannot connect to database %s on %s : %s" % (database_name, hostname, to_native(e)))
+
+ if retention_policy is not None:
+ retention_policy["duration"] = parse_duration_literal(retention_policy["duration"], extended=True)
+ retention_policy["shardGroupDuration"] = parse_duration_literal(retention_policy["shardGroupDuration"], extended=True)
+
+ return retention_policy
+
+
+def create_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ shard_group_duration = module.params['shard_group_duration']
+
+ if not check_duration_literal(duration):
+ module.fail_json(msg="Failed to parse value of duration")
+
+ influxdb_duration_format = parse_duration_literal(duration)
+ if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION:
+ module.fail_json(msg="duration value must be at least 1h")
+
+ if shard_group_duration is not None:
+ if not check_duration_literal(shard_group_duration):
+ module.fail_json(msg="Failed to parse value of shard_group_duration")
+
+ influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration)
+ if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION:
+ module.fail_json(msg="shard_group_duration value must be finite and at least 1h")
+
+ if not module.check_mode:
+ try:
+ if shard_group_duration:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default,
+ shard_group_duration)
+ else:
+ client.create_retention_policy(policy_name, duration, replication, database_name, default)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def alter_retention_policy(module, client, retention_policy):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+ duration = module.params['duration']
+ replication = module.params['replication']
+ default = module.params['default']
+ shard_group_duration = module.params['shard_group_duration']
+
+ changed = False
+
+ if not check_duration_literal(duration):
+ module.fail_json(msg="Failed to parse value of duration")
+
+ influxdb_duration_format = parse_duration_literal(duration)
+ if influxdb_duration_format != 0 and influxdb_duration_format < MINIMUM_VALID_DURATION:
+ module.fail_json(msg="duration value must be at least 1h")
+
+ if shard_group_duration is None:
+ influxdb_shard_group_duration_format = retention_policy["shardGroupDuration"]
+ else:
+ if not check_duration_literal(shard_group_duration):
+ module.fail_json(msg="Failed to parse value of shard_group_duration")
+
+ influxdb_shard_group_duration_format = parse_duration_literal(shard_group_duration)
+ if influxdb_shard_group_duration_format < MINIMUM_VALID_SHARD_GROUP_DURATION:
+ module.fail_json(msg="shard_group_duration value must be finite and at least 1h")
+
+ if (retention_policy['duration'] != influxdb_duration_format or
+ retention_policy['shardGroupDuration'] != influxdb_shard_group_duration_format or
+ retention_policy['replicaN'] != int(replication) or
+ retention_policy['default'] != default):
+ if not module.check_mode:
+ try:
+ client.alter_retention_policy(policy_name, database_name, duration, replication, default,
+ shard_group_duration)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ changed = True
+ module.exit_json(changed=changed)
+
+
+def drop_retention_policy(module, client):
+ database_name = module.params['database_name']
+ policy_name = module.params['policy_name']
+
+ if not module.check_mode:
+ try:
+ client.drop_retention_policy(policy_name, database_name)
+ except exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+ module.exit_json(changed=True)
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ database_name=dict(required=True, type='str'),
+ policy_name=dict(required=True, type='str'),
+ duration=dict(type='str'),
+ replication=dict(type='int'),
+ default=dict(default=False, type='bool'),
+ shard_group_duration=dict(type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['duration', 'replication']),
+ ),
+ )
+
+ state = module.params['state']
+
+ influxdb = InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+
+ retention_policy = find_retention_policy(module, client)
+
+ if state == 'present':
+ if retention_policy:
+ alter_retention_policy(module, client, retention_policy)
+ else:
+ create_retention_policy(module, client)
+
+ if state == 'absent':
+ if retention_policy:
+ drop_retention_policy(module, client)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_user.py b/ansible_collections/community/general/plugins/modules/influxdb_user.py
new file mode 100644
index 000000000..bbd0f8f5a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/influxdb_user.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Vitaliy Zhhuta <zhhuta () gmail.com>
+# insipred by Kamil Szczygiel <kamil.szczygiel () intel.com> influxdb_database module
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: influxdb_user
+short_description: Manage InfluxDB users
+description:
+ - Manage InfluxDB users.
+author: "Vitaliy Zhhuta (@zhhuta)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ user_name:
+ description:
+ - Name of the user.
+ required: true
+ type: str
+ user_password:
+ description:
+ - Password to be set for the user.
+ required: false
+ type: str
+ admin:
+ description:
+ - Whether the user should be in the admin role or not.
+ - Since version 2.8, the role will also be updated.
+ default: false
+ type: bool
+ state:
+ description:
+ - State of the user.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ grants:
+ description:
+ - Privileges to grant to this user.
+ - Takes a list of dicts containing the "database" and "privilege" keys.
+ - If this argument is not provided, the current grants will be left alone.
+ - If an empty list is provided, all grants for the user will be removed.
+ type: list
+ elements: dict
+extends_documentation_fragment:
+ - community.general.influxdb
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Create a user on localhost using default login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+
+- name: Create a user on localhost using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create an admin user on a remote host using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ admin: true
+ hostname: "{{ influxdb_hostname }}"
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+
+- name: Create a user on localhost with privileges
+ community.general.influxdb_user:
+ user_name: john
+ user_password: s3cr3t
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ grants:
+ - database: 'collectd'
+ privilege: 'WRITE'
+ - database: 'graphite'
+ privilege: 'READ'
+
+- name: Destroy a user using custom login credentials
+ community.general.influxdb_user:
+ user_name: john
+ login_username: "{{ influxdb_username }}"
+ login_password: "{{ influxdb_password }}"
+ state: absent
+'''
+
+RETURN = r'''
+#only defaults
+'''
+
+import json
+
+from ansible.module_utils.urls import ConnectionError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+import ansible_collections.community.general.plugins.module_utils.influxdb as influx
+
+
+def find_user(module, client, user_name):
+ user_result = None
+
+ try:
+ users = client.get_list_users()
+ for user in users:
+ if user['user'] == user_name:
+ user_result = user
+ break
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+ return user_result
+
+
+def check_user_password(module, client, user_name, user_password):
+ try:
+ client.switch_user(user_name, user_password)
+ client.get_list_users()
+ except influx.exceptions.InfluxDBClientError as e:
+ if e.code == 401:
+ return False
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+ finally:
+ # restore previous user
+ client.switch_user(module.params['username'], module.params['password'])
+ return True
+
+
+def set_user_password(module, client, user_name, user_password):
+ if not module.check_mode:
+ try:
+ client.set_user_password(user_name, user_password)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def create_user(module, client, user_name, user_password, admin):
+ if not module.check_mode:
+ try:
+ client.create_user(user_name, user_password, admin)
+ except ConnectionError as e:
+ module.fail_json(msg=to_native(e))
+
+
+def drop_user(module, client, user_name):
+ if not module.check_mode:
+ try:
+ client.drop_user(user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ module.exit_json(changed=True)
+
+
+def set_user_grants(module, client, user_name, grants):
+ changed = False
+
+ current_grants = []
+ try:
+ current_grants = client.get_list_privileges(user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ if not module.check_mode or 'user not found' not in e.content:
+ module.fail_json(msg=e.content)
+
+ try:
+ parsed_grants = []
+ # Fix privileges wording
+ for i, v in enumerate(current_grants):
+ if v['privilege'] != 'NO PRIVILEGES':
+ if v['privilege'] == 'ALL PRIVILEGES':
+ v['privilege'] = 'ALL'
+ parsed_grants.append(v)
+
+ # check if the current grants are included in the desired ones
+ for current_grant in parsed_grants:
+ if current_grant not in grants:
+ if not module.check_mode:
+ client.revoke_privilege(current_grant['privilege'],
+ current_grant['database'],
+ user_name)
+ changed = True
+
+ # check if the desired grants are included in the current ones
+ for grant in grants:
+ if grant not in parsed_grants:
+ if not module.check_mode:
+ client.grant_privilege(grant['privilege'],
+ grant['database'],
+ user_name)
+ changed = True
+
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=e.content)
+
+ return changed
+
+
+INFLUX_AUTH_FIRST_USER_REQUIRED = "error authorizing query: create admin user first or disable authentication"
+
+
+def main():
+ argument_spec = influx.InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ user_name=dict(required=True, type='str'),
+ user_password=dict(required=False, type='str', no_log=True),
+ admin=dict(default='False', type='bool'),
+ grants=dict(type='list', elements='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ user_name = module.params['user_name']
+ user_password = module.params['user_password']
+ admin = module.params['admin']
+ grants = module.params['grants']
+ influxdb = influx.InfluxDb(module)
+ client = influxdb.connect_to_influxdb()
+
+ user = None
+ try:
+ user = find_user(module, client, user_name)
+ except influx.exceptions.InfluxDBClientError as e:
+ if e.code == 403:
+ reason = None
+ try:
+ msg = json.loads(e.content)
+ reason = msg["error"]
+ except (KeyError, ValueError):
+ module.fail_json(msg=to_native(e))
+
+ if reason != INFLUX_AUTH_FIRST_USER_REQUIRED:
+ module.fail_json(msg=to_native(e))
+ else:
+ module.fail_json(msg=to_native(e))
+
+ changed = False
+
+ if state == 'present':
+ if user:
+ if not check_user_password(module, client, user_name, user_password) and user_password is not None:
+ set_user_password(module, client, user_name, user_password)
+ changed = True
+
+ try:
+ if admin and not user['admin']:
+ if not module.check_mode:
+ client.grant_admin_privileges(user_name)
+ changed = True
+ elif not admin and user['admin']:
+ if not module.check_mode:
+ client.revoke_admin_privileges(user_name)
+ changed = True
+ except influx.exceptions.InfluxDBClientError as e:
+ module.fail_json(msg=to_native(e))
+
+ else:
+ user_password = user_password or ''
+ create_user(module, client, user_name, user_password, admin)
+ changed = True
+
+ if grants is not None:
+ if set_user_grants(module, client, user_name, grants):
+ changed = True
+
+ module.exit_json(changed=changed)
+
+ if state == 'absent':
+ if user:
+ drop_user(module, client, user_name)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/influxdb_write.py b/ansible_collections/community/general/plugins/modules/influxdb_write.py
new file mode 100644
index 000000000..f95b6dae8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/influxdb_write.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: influxdb_write
+short_description: Write data points into InfluxDB
+description:
+ - Write data points into InfluxDB.
+author: "René Moser (@resmo)"
+requirements:
+ - "python >= 2.6"
+ - "influxdb >= 0.9"
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ data_points:
+ description:
+ - Data points as dict to write into the database.
+ required: true
+ type: list
+ elements: dict
+ database_name:
+ description:
+ - Name of the database.
+ required: true
+ type: str
+extends_documentation_fragment:
+ - community.general.influxdb
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Write points into database
+ community.general.influxdb_write:
+ hostname: "{{influxdb_ip_address}}"
+ database_name: "{{influxdb_database_name}}"
+ data_points:
+ - measurement: connections
+ tags:
+ host: server01
+ region: us-west
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 2000
+ - measurement: connections
+ tags:
+ host: server02
+ region: us-east
+ time: "{{ ansible_date_time.iso8601 }}"
+ fields:
+ value: 3000
+'''
+
+RETURN = r'''
+# only defaults
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.influxdb import InfluxDb
+
+
+class AnsibleInfluxDBWrite(InfluxDb):
+
+ def write_data_point(self, data_points):
+ client = self.connect_to_influxdb()
+
+ try:
+ client.write_points(data_points)
+ except Exception as e:
+ self.module.fail_json(msg=to_native(e))
+
+
+def main():
+ argument_spec = InfluxDb.influxdb_argument_spec()
+ argument_spec.update(
+ data_points=dict(required=True, type='list', elements='dict'),
+ database_name=dict(required=True, type='str'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ influx = AnsibleInfluxDBWrite(module)
+ data_points = module.params.get('data_points')
+ influx.write_data_point(data_points)
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ini_file.py b/ansible_collections/community/general/plugins/modules/ini_file.py
new file mode 100644
index 000000000..874f10ae0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ini_file.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
+# Copyright (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ini_file
+short_description: Tweak settings in INI files
+extends_documentation_fragment:
+ - files
+ - community.general.attributes
+description:
+ - Manage (add, remove, change) individual settings in an INI-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble).
+ - Adds missing sections if they don't exist.
+ - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
+ - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when
+ no other modifications need to be applied.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ path:
+ description:
+ - Path to the INI-style file; this file is created if required.
+ - Before Ansible 2.3 this option was only usable as I(dest).
+ type: path
+ required: true
+ aliases: [ dest ]
+ section:
+ description:
+ - Section name in INI file. This is added if I(state=present) automatically when
+ a single value is being set.
+ - If left empty, being omitted, or being set to C(null), the I(option) will be placed before the first I(section).
+ - Using C(null) is also required if the config format does not support sections.
+ type: str
+ option:
+ description:
+ - If set (required for changing a I(value)), this is the name of the option.
+ - May be omitted if adding/removing a whole I(section).
+ type: str
+ value:
+ description:
+ - The string value to be associated with an I(option).
+ - May be omitted when removing an I(option).
+ - Mutually exclusive with I(values).
+ - I(value=v) is equivalent to I(values=[v]).
+ type: str
+ values:
+ description:
+ - The string value to be associated with an I(option).
+ - May be omitted when removing an I(option).
+ - Mutually exclusive with I(value).
+ - I(value=v) is equivalent to I(values=[v]).
+ type: list
+ elements: str
+ version_added: 3.6.0
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: false
+ state:
+ description:
+ - If set to C(absent) and I(exclusive) set to C(true) all matching I(option) lines are removed.
+ - If set to C(absent) and I(exclusive) set to C(false) the specified I(option=value) lines are removed,
+ but the other I(option)s with the same name are not touched.
+ - If set to C(present) and I(exclusive) set to C(false) the specified I(option=values) lines are added,
+ but the other I(option)s with the same name are not touched.
+ - If set to C(present) and I(exclusive) set to C(true) all given I(option=values) lines will be
+ added and the other I(option)s with the same name are removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ exclusive:
+ description:
+ - If set to C(true) (default), all matching I(option) lines are removed when I(state=absent),
+ or replaced when I(state=present).
+ - If set to C(false), only the specified I(value(s)) are added when I(state=present),
+ or removed when I(state=absent), and existing ones are not modified.
+ type: bool
+ default: true
+ version_added: 3.6.0
+ no_extra_spaces:
+ description:
+ - Do not insert spaces before and after '=' symbol.
+ type: bool
+ default: false
+ create:
+ description:
+ - If set to C(false), the module will fail if the file does not already exist.
+ - By default it will create the file if it is missing.
+ type: bool
+ default: true
+ allow_no_value:
+ description:
+ - Allow option without value and without '=' symbol.
+ type: bool
+ default: false
+notes:
+ - While it is possible to add an I(option) without specifying a I(value), this makes no sense.
+ - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
+ - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files.
+author:
+ - Jan-Piet Mens (@jpmens)
+ - Ales Nosek (@noseka1)
+'''
+
+EXAMPLES = r'''
+# Before Ansible 2.3, option 'dest' was used instead of 'path'
+- name: Ensure "fav=lemonade is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: fav
+ value: lemonade
+ mode: '0600'
+ backup: true
+
+- name: Ensure "temperature=cold is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/anotherconf
+ section: drinks
+ option: temperature
+ value: cold
+ backup: true
+
+- name: Add "beverage=lemon juice" is in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: beverage
+ value: lemon juice
+ mode: '0600'
+ state: present
+ exclusive: false
+
+- name: Ensure multiple values "beverage=coke" and "beverage=pepsi" are in section "[drinks]" in specified file
+ community.general.ini_file:
+ path: /etc/conf
+ section: drinks
+ option: beverage
+ values:
+ - coke
+ - pepsi
+ mode: '0600'
+ state: present
+'''
+
+import io
+import os
+import re
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+
+
+def match_opt(option, line):
+ option = re.escape(option)
+ return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+
+
+def match_active_opt(option, line):
+ option = re.escape(option)
+ return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line)
+
+
+def update_section_line(changed, section_lines, index, changed_lines, newline, msg):
+ option_changed = section_lines[index] != newline
+ changed = changed or option_changed
+ if option_changed:
+ msg = 'option changed'
+ section_lines[index] = newline
+ changed_lines[index] = 1
+ return (changed, msg)
+
+
+def do_ini(module, filename, section=None, option=None, values=None,
+ state='present', exclusive=True, backup=False, no_extra_spaces=False,
+ create=True, allow_no_value=False):
+
+ if section is not None:
+ section = to_text(section)
+ if option is not None:
+ option = to_text(option)
+
+ # deduplicate entries in values
+ values_unique = []
+ [values_unique.append(to_text(value)) for value in values if value not in values_unique and value is not None]
+ values = values_unique
+
+ diff = dict(
+ before='',
+ after='',
+ before_header='%s (content)' % filename,
+ after_header='%s (content)' % filename,
+ )
+
+ if not os.path.exists(filename):
+ if not create:
+ module.fail_json(rc=257, msg='Destination %s does not exist!' % filename)
+ destpath = os.path.dirname(filename)
+ if not os.path.exists(destpath) and not module.check_mode:
+ os.makedirs(destpath)
+ ini_lines = []
+ else:
+ with io.open(filename, 'r', encoding="utf-8-sig") as ini_file:
+ ini_lines = [to_text(line) for line in ini_file.readlines()]
+
+ if module._diff:
+ diff['before'] = u''.join(ini_lines)
+
+ changed = False
+
+ # ini file could be empty
+ if not ini_lines:
+ ini_lines.append(u'\n')
+
+ # last line of file may not contain a trailing newline
+ if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n':
+ ini_lines[-1] += u'\n'
+ changed = True
+
+ # append fake section lines to simplify the logic
+ # At top:
+ # Fake random section to do not match any other in the file
+ # Using commit hash as fake section name
+ fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5"
+
+ # Insert it at the beginning
+ ini_lines.insert(0, u'[%s]' % fake_section_name)
+
+ # At bottom:
+ ini_lines.append(u'[')
+
+ # If no section is defined, fake section is used
+ if not section:
+ section = fake_section_name
+
+ within_section = not section
+ section_start = section_end = 0
+ msg = 'OK'
+ if no_extra_spaces:
+ assignment_format = u'%s=%s\n'
+ else:
+ assignment_format = u'%s = %s\n'
+
+ option_no_value_present = False
+
+ non_blank_non_comment_pattern = re.compile(to_text(r'^[ \t]*([#;].*)?$'))
+
+ before = after = []
+ section_lines = []
+
+ for index, line in enumerate(ini_lines):
+ # find start and end of section
+ if line.startswith(u'[%s]' % section):
+ within_section = True
+ section_start = index
+ elif line.startswith(u'['):
+ if within_section:
+ section_end = index
+ break
+
+ before = ini_lines[0:section_start]
+ section_lines = ini_lines[section_start:section_end]
+ after = ini_lines[section_end:len(ini_lines)]
+
+ # Keep track of changed section_lines
+ changed_lines = [0] * len(section_lines)
+
+ # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex
+ #
+ # 1. edit all lines where we have a option=value pair with a matching value in values[]
+ # 2. edit all the remaining lines where we have a matching option
+ # 3. delete remaining lines where we have a matching option
+ # 4. insert missing option line(s) at the end of the section
+
+ if state == 'present' and option:
+ for index, line in enumerate(section_lines):
+ if match_opt(option, line):
+ match = match_opt(option, line)
+ if values and match.group(6) in values:
+ matched_value = match.group(6)
+ if not matched_value and allow_no_value:
+ # replace existing option with no value line(s)
+ newline = u'%s\n' % option
+ option_no_value_present = True
+ else:
+ # replace existing option=value line(s)
+ newline = assignment_format % (option, matched_value)
+ (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
+ values.remove(matched_value)
+ elif not values and allow_no_value:
+ # replace existing option with no value line(s)
+ newline = u'%s\n' % option
+ (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
+ option_no_value_present = True
+ break
+
+ if state == 'present' and exclusive and not allow_no_value:
+ # override option with no value to option with value if not allow_no_value
+ if len(values) > 0:
+ for index, line in enumerate(section_lines):
+ if not changed_lines[index] and match_opt(option, line):
+ newline = assignment_format % (option, values.pop(0))
+ (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg)
+ if len(values) == 0:
+ break
+ # remove all remaining option occurrences from the rest of the section
+ for index in range(len(section_lines) - 1, 0, -1):
+ if not changed_lines[index] and match_opt(option, section_lines[index]):
+ del section_lines[index]
+ del changed_lines[index]
+ changed = True
+ msg = 'option changed'
+
+ if state == 'present':
+ # insert missing option line(s) at the end of the section
+ for index in range(len(section_lines), 0, -1):
+ # search backwards for previous non-blank or non-comment line
+ if not non_blank_non_comment_pattern.match(section_lines[index - 1]):
+ if option and values:
+ # insert option line(s)
+ for element in values[::-1]:
+ # items are added backwards, so traverse the list backwards to not confuse the user
+ # otherwise some of their options might appear in reverse order for whatever fancy reason ¯\_(ツ)_/¯
+ if element is not None:
+ # insert option=value line
+ section_lines.insert(index, assignment_format % (option, element))
+ msg = 'option added'
+ changed = True
+ elif element is None and allow_no_value:
+ # insert option with no value line
+ section_lines.insert(index, u'%s\n' % option)
+ msg = 'option added'
+ changed = True
+ elif option and not values and allow_no_value and not option_no_value_present:
+ # insert option with no value line(s)
+ section_lines.insert(index, u'%s\n' % option)
+ msg = 'option added'
+ changed = True
+ break
+
+ if state == 'absent':
+ if option:
+ if exclusive:
+ # delete all option line(s) with given option and ignore value
+ new_section_lines = [line for line in section_lines if not (match_active_opt(option, line))]
+ if section_lines != new_section_lines:
+ changed = True
+ msg = 'option changed'
+ section_lines = new_section_lines
+ elif not exclusive and len(values) > 0:
+ # delete specified option=value line(s)
+ new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)]
+ if section_lines != new_section_lines:
+ changed = True
+ msg = 'option changed'
+ section_lines = new_section_lines
+ else:
+ # drop the entire section
+ if section_lines:
+ section_lines = []
+ msg = 'section removed'
+ changed = True
+
+ # reassemble the ini_lines after manipulation
+ ini_lines = before + section_lines + after
+
+ # remove the fake section line
+ del ini_lines[0]
+ del ini_lines[-1:]
+
+ if not within_section and state == 'present':
+ ini_lines.append(u'[%s]\n' % section)
+ msg = 'section and option added'
+ if option and values:
+ for value in values:
+ ini_lines.append(assignment_format % (option, value))
+ elif option and not values and allow_no_value:
+ ini_lines.append(u'%s\n' % option)
+ else:
+ msg = 'only section added'
+ changed = True
+
+ if module._diff:
+ diff['after'] = u''.join(ini_lines)
+
+ backup_file = None
+ if changed and not module.check_mode:
+ if backup:
+ backup_file = module.backup_local(filename)
+
+ encoded_ini_lines = [to_bytes(line) for line in ini_lines]
+ try:
+ tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
+ f = os.fdopen(tmpfd, 'wb')
+ f.writelines(encoded_ini_lines)
+ f.close()
+ except IOError:
+ module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc())
+
+ try:
+ module.atomic_move(tmpfile, filename)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary \
+ file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc())
+
+ return (changed, backup_file, diff, msg)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['dest']),
+ section=dict(type='str'),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ values=dict(type='list', elements='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ exclusive=dict(type='bool', default=True),
+ no_extra_spaces=dict(type='bool', default=False),
+ allow_no_value=dict(type='bool', default=False),
+ create=dict(type='bool', default=True)
+ ),
+ mutually_exclusive=[
+ ['value', 'values']
+ ],
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ section = module.params['section']
+ option = module.params['option']
+ value = module.params['value']
+ values = module.params['values']
+ state = module.params['state']
+ exclusive = module.params['exclusive']
+ backup = module.params['backup']
+ no_extra_spaces = module.params['no_extra_spaces']
+ allow_no_value = module.params['allow_no_value']
+ create = module.params['create']
+
+ if state == 'present' and not allow_no_value and value is None and not values:
+ module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.")
+
+ if value is not None:
+ values = [value]
+ elif values is None:
+ values = []
+
+ (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value)
+
+ if not module.check_mode and os.path.exists(path):
+ file_args = module.load_file_common_arguments(module.params)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
+ results = dict(
+ changed=changed,
+ diff=diff,
+ msg=msg,
+ path=path,
+ )
+ if backup_file is not None:
+ results['backup_file'] = backup_file
+
+ # Mission complete
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/installp.py b/ansible_collections/community/general/plugins/modules/installp.py
new file mode 100644
index 000000000..41064363d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/installp.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: installp
+author:
+ - Kairo Araujo (@kairoaraujo)
+short_description: Manage packages on AIX
+description:
+ - Manage packages using 'installp' on AIX
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ accept_license:
+ description:
+ - Whether to accept the license for the package(s).
+ type: bool
+ default: false
+ name:
+ description:
+ - One or more packages to install or remove.
+ - Use C(all) to install all packages available on informed C(repository_path).
+ type: list
+ elements: str
+ required: true
+ aliases: [ pkg ]
+ repository_path:
+ description:
+ - Path with AIX packages (required to install).
+ type: path
+ state:
+ description:
+ - Whether the package needs to be present on or absent from the system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+notes:
+- If the package is already installed, even the package/fileset is new, the module will not install it.
+'''
+
+EXAMPLES = r'''
+- name: Install package foo
+ community.general.installp:
+ name: foo
+ repository_path: /repository/AIX71/installp/base
+ accept_license: true
+ state: present
+
+- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt
+ repository_path: /repository/AIX71/installp/base
+ accept_license: true
+ state: present
+
+- name: Install bos.sysmgt.nim.master only
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ repository_path: /repository/AIX71/installp/base
+ accept_license: true
+ state: present
+
+- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
+ community.general.installp:
+ name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
+ repository_path: /repository/AIX71/installp/base
+ accept_license: true
+ state: present
+
+- name: Remove packages bos.sysmgt.nim.master
+ community.general.installp:
+ name: bos.sysmgt.nim.master
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _check_new_pkg(module, package, repository_path):
+ """
+ Check if the package of fileset is correct name and repository path.
+
+ :param module: Ansible module arguments spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package information.
+ """
+
+ if os.path.isdir(repository_path):
+ installp_cmd = module.get_bin_path('installp', True)
+ rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+
+ if package == 'all':
+ pkg_info = "All packages on dir"
+ return True, pkg_info
+
+ else:
+ pkg_info = {}
+ for line in package_result.splitlines():
+ if re.findall(package, line):
+ pkg_name = line.split()[0].strip()
+ pkg_version = line.split()[1].strip()
+ pkg_info[pkg_name] = pkg_version
+
+ return True, pkg_info
+
+ return False, None
+
+ else:
+ module.fail_json(msg="Repository path %s is not valid." % repository_path)
+
+
+def _check_installed_pkg(module, package, repository_path):
+ """
+ Check the package on AIX.
+ It verifies if the package is installed and informations
+
+ :param module: Ansible module parameters spec.
+ :param package: Package/fileset name.
+ :param repository_path: Repository package path.
+ :return: Bool, package data.
+ """
+
+ lslpp_cmd = module.get_bin_path('lslpp', True)
+ rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
+
+ if rc == 1:
+ package_state = ' '.join(err.split()[-2:])
+ if package_state == 'not installed.':
+ return False, None
+ else:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ if rc != 0:
+ module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
+
+ pkg_data = {}
+ full_pkg_data = lslpp_result.splitlines()
+ for line in full_pkg_data:
+ pkg_name, fileset, level = line.split(':')[0:3]
+ pkg_data[pkg_name] = fileset, level
+
+ return True, pkg_data
+
+
+def remove(module, installp_cmd, packages):
+ repository_path = None
+ remove_count = 0
+ removed_pkgs = []
+ not_found_pkg = []
+ for package in packages:
+ pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
+
+ if pkg_check:
+ if not module.check_mode:
+ rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
+ remove_count += 1
+ removed_pkgs.append(package)
+
+ else:
+ not_found_pkg.append(package)
+
+ if remove_count > 0:
+ if len(not_found_pkg) > 1:
+ not_found_pkg.insert(0, "Package(s) not found: ")
+
+ changed = True
+ msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
+
+ else:
+ changed = False
+ msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
+
+ return changed, msg
+
+
+def install(module, installp_cmd, packages, repository_path, accept_license):
+ installed_pkgs = []
+ not_found_pkgs = []
+ already_installed_pkgs = {}
+
+ accept_license_param = {
+ True: '-Y',
+ False: '',
+ }
+
+ # Validate if package exists on repository path.
+ for package in packages:
+ pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
+
+ # If package exists on repository path, check if package is installed.
+ if pkg_check:
+ pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
+
+ # If package is already installed.
+ if pkg_check_current:
+ # Check if package is a package and not a fileset, get version
+ # and add the package into already installed list
+ if package in pkg_info.keys():
+ already_installed_pkgs[package] = pkg_info[package][1]
+
+ else:
+ # If the package is not a package but a fileset, confirm
+ # and add the fileset/package into already installed list
+ for key in pkg_info.keys():
+ if package in pkg_info[key]:
+ already_installed_pkgs[package] = pkg_info[key][1]
+
+ else:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
+ if rc != 0:
+ module.fail_json(msg="Failed to run installp", rc=rc, err=err)
+ installed_pkgs.append(package)
+
+ else:
+ not_found_pkgs.append(package)
+
+ if len(installed_pkgs) > 0:
+ installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
+ else:
+ installed_msg = ''
+
+ if len(not_found_pkgs) > 0:
+ not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
+ else:
+ not_found_msg = ''
+
+ if len(already_installed_pkgs) > 0:
+ already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
+ else:
+ already_installed_msg = ''
+
+ if len(installed_pkgs) > 0:
+ changed = True
+ msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+ else:
+ changed = False
+ msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
+
+ return changed, msg
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ repository_path=dict(type='path'),
+ accept_license=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ repository_path = module.params['repository_path']
+ accept_license = module.params['accept_license']
+ state = module.params['state']
+
+ installp_cmd = module.get_bin_path('installp', True)
+
+ if state == 'present':
+ if repository_path is None:
+ module.fail_json(msg="repository_path is required to install package")
+
+ changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
+
+ elif state == 'absent':
+ changed, msg = remove(module, installp_cmd, name)
+
+ else:
+ module.fail_json(changed=False, msg="Unexpected state.")
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/interfaces_file.py b/ansible_collections/community/general/plugins/modules/interfaces_file.py
new file mode 100644
index 000000000..f19c019f4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/interfaces_file.py
@@ -0,0 +1,416 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: interfaces_file
+short_description: Tweak settings in /etc/network/interfaces files
+extends_documentation_fragment:
+ - ansible.builtin.files
+ - community.general.attributes
+description:
+ - Manage (add, remove, change) individual interface options in an interfaces-style file without having
+ to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file.
+ - Read information about interfaces from interfaces-styled files
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ dest:
+ type: path
+ description:
+ - Path to the interfaces file
+ default: /etc/network/interfaces
+ iface:
+ type: str
+ description:
+ - Name of the interface, required for value changes or option remove
+ address_family:
+ type: str
+ description:
+ - Address family of the interface, useful if same interface name is used for both inet and inet6
+ option:
+ type: str
+ description:
+ - Name of the option, required for value changes or option remove
+ value:
+ type: str
+ description:
+ - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
+ If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
+ C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
+ ones or cleaning the whole option set are supported
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: false
+ state:
+ type: str
+ description:
+ - If set to C(absent) the option or section will be removed if present instead of created.
+ default: "present"
+ choices: [ "present", "absent" ]
+
+notes:
+ - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
+requirements: []
+author: "Roman Belyakovsky (@hryamzik)"
+'''
+
+RETURN = '''
+dest:
+ description: destination file/path
+ returned: success
+ type: str
+ sample: "/etc/network/interfaces"
+ifaces:
+ description: interfaces dictionary
+ returned: success
+ type: complex
+ contains:
+ ifaces:
+ description: interface dictionary
+ returned: success
+ type: dict
+ contains:
+ eth0:
+ description: Name of the interface
+ returned: success
+ type: dict
+ contains:
+ address_family:
+ description: interface address family
+ returned: success
+ type: str
+ sample: "inet"
+ method:
+ description: interface method
+ returned: success
+ type: str
+ sample: "manual"
+ mtu:
+ description: other options, all values returned as strings
+ returned: success
+ type: str
+ sample: "1500"
+ pre-up:
+ description: list of C(pre-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ up:
+ description: list of C(up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ post-up:
+ description: list of C(post-up) scripts
+ returned: success
+ type: list
+ sample:
+ - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+ down:
+ description: list of C(down) scripts
+ returned: success
+ type: list
+ sample:
+ - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
+ - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
+...
+'''
+
+EXAMPLES = '''
+- name: Set eth1 mtu configuration value to 8000
+ community.general.interfaces_file:
+ dest: /etc/network/interfaces.d/eth1.cfg
+ iface: eth1
+ option: mtu
+ value: 8000
+ backup: true
+ state: present
+ register: eth1_cfg
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes
+
+
+def lineDict(line):
+ return {'line': line, 'line_type': 'unknown'}
+
+
+def optionDict(line, iface, option, value, address_family):
+ return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family}
+
+
+def getValueFromLine(s):
+ spaceRe = re.compile(r'\s+')
+ m = list(spaceRe.finditer(s))[-1]
+ valueEnd = m.start()
+ option = s.split()[0]
+ optionStart = s.find(option)
+ optionLen = len(option)
+ return s[optionLen + optionStart:].strip()
+
+
+def read_interfaces_file(module, filename):
+ with open(filename, 'r') as f:
+ return read_interfaces_lines(module, f)
+
+
+def read_interfaces_lines(module, line_strings):
+ lines = []
+ ifaces = {}
+ currently_processing = None
+ i = 0
+ for line in line_strings:
+ i += 1
+ words = line.split()
+ if len(words) < 1:
+ lines.append(lineDict(line))
+ continue
+ if words[0][0] == "#":
+ lines.append(lineDict(line))
+ continue
+ if words[0] == "mapping":
+ # currmap = calloc(1, sizeof *currmap);
+ lines.append(lineDict(line))
+ currently_processing = "MAPPING"
+ elif words[0] == "source":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-dir":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "source-directory":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "iface":
+ currif = {
+ "pre-up": [],
+ "up": [],
+ "down": [],
+ "post-up": []
+ }
+ iface_name = words[1]
+ try:
+ currif['address_family'] = words[2]
+ except IndexError:
+ currif['address_family'] = None
+ address_family = currif['address_family']
+ try:
+ currif['method'] = words[3]
+ except IndexError:
+ currif['method'] = None
+
+ ifaces[iface_name] = currif
+ lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif, 'address_family': address_family})
+ currently_processing = "IFACE"
+ elif words[0] == "auto":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0].startswith("allow-"):
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-auto-down":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ elif words[0] == "no-scripts":
+ lines.append(lineDict(line))
+ currently_processing = "NONE"
+ else:
+ if currently_processing == "IFACE":
+ option_name = words[0]
+ value = getValueFromLine(line)
+ lines.append(optionDict(line, iface_name, option_name, value, address_family))
+ if option_name in ["pre-up", "up", "down", "post-up"]:
+ currif[option_name].append(value)
+ else:
+ currif[option_name] = value
+ elif currently_processing == "MAPPING":
+ lines.append(lineDict(line))
+ elif currently_processing == "NONE":
+ lines.append(lineDict(line))
+ else:
+ module.fail_json(msg="misplaced option %s in line %d" % (line, i))
+ return None, None
+ return lines, ifaces
+
+
+def get_interface_options(iface_lines):
+ return [i for i in iface_lines if i['line_type'] == 'option']
+
+
+def get_target_options(iface_options, option):
+ return [i for i in iface_options if i['option'] == option]
+
+
+def update_existing_option_line(target_option, value):
+ old_line = target_option['line']
+ old_value = target_option['value']
+ prefix_start = old_line.find(target_option["option"])
+ optionLen = len(target_option["option"])
+ old_value_position = re.search(r"\s+".join(map(re.escape, old_value.split())), old_line[prefix_start + optionLen:])
+ start = old_value_position.start() + prefix_start + optionLen
+ end = old_value_position.end() + prefix_start + optionLen
+ line = old_line[:start] + value + old_line[end:]
+ return line
+
+
+def set_interface_option(module, lines, iface, option, raw_value, state, address_family=None):
+ value = str(raw_value)
+ changed = False
+
+ iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
+ if address_family is not None:
+ iface_lines = [item for item in iface_lines
+ if "address_family" in item and item["address_family"] == address_family]
+
+ if len(iface_lines) < 1:
+ # interface not found
+ module.fail_json(msg="Error: interface %s not found" % iface)
+ return changed, None
+
+ iface_options = get_interface_options(iface_lines)
+ target_options = get_target_options(iface_options, option)
+
+ if state == "present":
+ if len(target_options) < 1:
+ changed = True
+ # add new option
+ last_line_dict = iface_lines[-1]
+ changed, lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family)
+ else:
+ if option in ["pre-up", "up", "down", "post-up"]:
+ if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
+ changed, lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options, address_family)
+ else:
+ # if more than one option found edit the last one
+ if target_options[-1]['value'] != value:
+ changed = True
+ target_option = target_options[-1]
+ line = update_existing_option_line(target_option, value)
+ address_family = target_option['address_family']
+ index = len(lines) - lines[::-1].index(target_option) - 1
+ lines[index] = optionDict(line, iface, option, value, address_family)
+ elif state == "absent":
+ if len(target_options) >= 1:
+ if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
+ for target_option in [ito for ito in target_options if ito['value'] == value]:
+ changed = True
+ lines = [ln for ln in lines if ln != target_option]
+ else:
+ changed = True
+ for target_option in target_options:
+ lines = [ln for ln in lines if ln != target_option]
+ else:
+ module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
+
+ return changed, lines
+
+
+def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options, address_family):
+ # Changing method of interface is not an addition
+ if option == 'method':
+ changed = False
+ for ln in lines:
+ if ln.get('line_type', '') == 'iface' and ln.get('iface', '') == iface and value != ln.get('params', {}).get('method', ''):
+ changed = True
+ ln['line'] = re.sub(ln.get('params', {}).get('method', '') + '$', value, ln.get('line'))
+ ln['params']['method'] = value
+ return changed, lines
+
+ last_line = last_line_dict['line']
+ prefix_start = last_line.find(last_line.split()[0])
+ suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
+ prefix = last_line[:prefix_start]
+
+ if len(iface_options) < 1:
+ # interface has no options, ident
+ prefix += " "
+
+ line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
+ option_dict = optionDict(line, iface, option, value, address_family)
+ index = len(lines) - lines[::-1].index(last_line_dict)
+ lines.insert(index, option_dict)
+ return True, lines
+
+
+def write_changes(module, lines, dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'wb') as f:
+ f.write(to_bytes(''.join(lines), errors='surrogate_or_strict'))
+ module.atomic_move(tmpfile, os.path.realpath(dest))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(type='path', default='/etc/network/interfaces'),
+ iface=dict(type='str'),
+ address_family=dict(type='str'),
+ option=dict(type='str'),
+ value=dict(type='str'),
+ backup=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ required_by=dict(
+ option=('iface',),
+ ),
+ )
+
+ dest = module.params['dest']
+ iface = module.params['iface']
+ address_family = module.params['address_family']
+ option = module.params['option']
+ value = module.params['value']
+ backup = module.params['backup']
+ state = module.params['state']
+
+ if option is not None and state == "present" and value is None:
+ module.fail_json(msg="Value must be set if option is defined and state is 'present'")
+
+ lines, ifaces = read_interfaces_file(module, dest)
+
+ changed = False
+
+ if option is not None:
+ changed, lines = set_interface_option(module, lines, iface, option, value, state, address_family)
+
+ if changed:
+ dummy, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(dest)
+ write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
+
+ module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ip_netns.py b/ansible_collections/community/general/plugins/modules/ip_netns.py
new file mode 100644
index 000000000..69534c810
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ip_netns.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Arie Bregman <abregman@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ip_netns
+author: "Arie Bregman (@bregman-arie)"
+short_description: Manage network namespaces
+requirements: [ ip ]
+description:
+ - Create or delete network namespaces using the ip command.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ required: false
+ description:
+ - Name of the namespace
+ type: str
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the namespace should exist
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create a namespace named mario
+ community.general.ip_netns:
+ name: mario
+ state: present
+
+- name: Delete a namespace named luigi
+ community.general.ip_netns:
+ name: luigi
+ state: absent
+'''
+
+RETURN = '''
+# Default return values
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_text
+
+
+class Namespace(object):
+ """Interface to network namespaces. """
+
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params['name']
+ self.state = module.params['state']
+
+ def _netns(self, command):
+ '''Run ip nents command'''
+ return self.module.run_command(['ip', 'netns'] + command)
+
+ def exists(self):
+ '''Check if the namespace already exists'''
+ rc, out, err = self.module.run_command(['ip', 'netns', 'list'])
+ if rc != 0:
+ self.module.fail_json(msg=to_text(err))
+ return self.name in out
+
+ def add(self):
+ '''Create network namespace'''
+ rtc, out, err = self._netns(['add', self.name])
+
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def delete(self):
+ '''Delete network namespace'''
+ rtc, out, err = self._netns(['del', self.name])
+ if rtc != 0:
+ self.module.fail_json(msg=err)
+
+ def check(self):
+ '''Run check mode'''
+ changed = False
+
+ if self.state == 'present' and self.exists():
+ changed = True
+
+ elif self.state == 'absent' and self.exists():
+ changed = True
+ elif self.state == 'present' and not self.exists():
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+ def run(self):
+ '''Make the necessary changes'''
+ changed = False
+
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ changed = True
+ elif self.state == 'present':
+ if not self.exists():
+ self.add()
+ changed = True
+
+ self.module.exit_json(changed=changed)
+
+
+def main():
+ """Entry point."""
+ module = AnsibleModule(
+ argument_spec={
+ 'name': {'default': None},
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ },
+ supports_check_mode=True,
+ )
+
+ network_namespace = Namespace(module)
+ if module.check_mode:
+ network_namespace.check()
+ else:
+ network_namespace.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_config.py b/ansible_collections/community/general/plugins/modules/ipa_config.py
new file mode 100644
index 000000000..ec94b58d4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_config.py
@@ -0,0 +1,369 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_config
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage Global FreeIPA Configuration Settings
+description:
+ - Modify global configuration settings of a FreeIPA Server.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ ipaconfigstring:
+ description: Extra hashes to generate in password plug-in.
+ aliases: ["configstring"]
+ type: list
+ elements: str
+ choices: ["AllowNThash", "KDC:Disable Last Success", "KDC:Disable Lockout", "KDC:Disable Default Preauth for SPNs"]
+ version_added: '2.5.0'
+ ipadefaultloginshell:
+ description: Default shell for new users.
+ aliases: ["loginshell"]
+ type: str
+ ipadefaultemaildomain:
+ description: Default e-mail domain for new users.
+ aliases: ["emaildomain"]
+ type: str
+ ipadefaultprimarygroup:
+ description: Default group for new users.
+ aliases: ["primarygroup"]
+ type: str
+ version_added: '2.5.0'
+ ipagroupsearchfields:
+ description: A list of fields to search in when searching for groups.
+ aliases: ["groupsearchfields"]
+ type: list
+ elements: str
+ version_added: '2.5.0'
+ ipahomesrootdir:
+ description: Default location of home directories.
+ aliases: ["homesrootdir"]
+ type: str
+ version_added: '2.5.0'
+ ipakrbauthzdata:
+ description: Default types of PAC supported for services.
+ aliases: ["krbauthzdata"]
+ type: list
+ elements: str
+ choices: ["MS-PAC", "PAD", "nfs:NONE"]
+ version_added: '2.5.0'
+ ipamaxusernamelength:
+ description: Maximum length of usernames.
+ aliases: ["maxusernamelength"]
+ type: int
+ version_added: '2.5.0'
+ ipapwdexpadvnotify:
+ description: Notice of impending password expiration, in days.
+ aliases: ["pwdexpadvnotify"]
+ type: int
+ version_added: '2.5.0'
+ ipasearchrecordslimit:
+ description: Maximum number of records to search (-1 or 0 is unlimited).
+ aliases: ["searchrecordslimit"]
+ type: int
+ version_added: '2.5.0'
+ ipasearchtimelimit:
+ description: Maximum amount of time (seconds) for a search (-1 or 0 is unlimited).
+ aliases: ["searchtimelimit"]
+ type: int
+ version_added: '2.5.0'
+ ipaselinuxusermaporder:
+ description: The SELinux user map order (order in increasing priority of SELinux users).
+ aliases: ["selinuxusermaporder"]
+ type: list
+ elements: str
+ version_added: '3.7.0'
+ ipauserauthtype:
+ description: The authentication type to use by default.
+ aliases: ["userauthtype"]
+ choices: ["password", "radius", "otp", "pkinit", "hardened", "disabled"]
+ type: list
+ elements: str
+ version_added: '2.5.0'
+ ipausersearchfields:
+ description: A list of fields to search in when searching for users.
+ aliases: ["usersearchfields"]
+ type: list
+ elements: str
+ version_added: '2.5.0'
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure password plugin features DC:Disable Last Success and KDC:Disable Lockout are enabled
+ community.general.ipa_config:
+ ipaconfigstring: ["KDC:Disable Last Success", "KDC:Disable Lockout"]
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default login shell is bash
+ community.general.ipa_config:
+ ipadefaultloginshell: /bin/bash
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default e-mail domain is ansible.com
+ community.general.ipa_config:
+ ipadefaultemaildomain: ansible.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default primary group is set to ipausers
+ community.general.ipa_config:
+ ipadefaultprimarygroup: ipausers
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the group search fields are set to 'cn,description'
+ community.general.ipa_config:
+ ipagroupsearchfields: ['cn', 'description']
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the home directory location is set to /home
+ community.general.ipa_config:
+ ipahomesrootdir: /home
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default types of PAC supported for services is set to MS-PAC and PAD
+ community.general.ipa_config:
+ ipakrbauthzdata: ["MS-PAC", "PAD"]
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the maximum user name length is set to 32
+ community.general.ipa_config:
+ ipamaxusernamelength: 32
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the password expiration notice is set to 4 days
+ community.general.ipa_config:
+ ipapwdexpadvnotify: 4
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the search record limit is set to 100
+ community.general.ipa_config:
+ ipasearchrecordslimit: 100
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the search time limit is set to 2 seconds
+ community.general.ipa_config:
+ ipasearchtimelimit: 2
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the default user auth type is password
+ community.general.ipa_config:
+ ipauserauthtype: ['password']
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the user search fields is set to 'uid,givenname,sn,ou,title'
+ community.general.ipa_config:
+ ipausersearchfields: ['uid', 'givenname', 'sn', 'ou', 'title']
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the SELinux user map order is set
+ community.general.ipa_config:
+ ipaselinuxusermaporder:
+ - "guest_u:s0"
+ - "xguest_u:s0"
+ - "user_u:s0"
+ - "staff_u:s0-s0:c0.c1023"
+ - "unconfined_u:s0-s0:c0.c1023"
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+'''
+
+RETURN = r'''
+config:
+ description: Configuration as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class ConfigIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ConfigIPAClient, self).__init__(module, host, port, protocol)
+
+ def config_show(self):
+ return self._post_json(method='config_show', name=None)
+
+ def config_mod(self, name, item):
+ return self._post_json(method='config_mod', name=name, item=item)
+
+
+def get_config_dict(ipaconfigstring=None, ipadefaultloginshell=None,
+ ipadefaultemaildomain=None, ipadefaultprimarygroup=None,
+ ipagroupsearchfields=None, ipahomesrootdir=None,
+ ipakrbauthzdata=None, ipamaxusernamelength=None,
+ ipapwdexpadvnotify=None, ipasearchrecordslimit=None,
+ ipasearchtimelimit=None, ipaselinuxusermaporder=None,
+ ipauserauthtype=None, ipausersearchfields=None):
+ config = {}
+ if ipaconfigstring is not None:
+ config['ipaconfigstring'] = ipaconfigstring
+ if ipadefaultloginshell is not None:
+ config['ipadefaultloginshell'] = ipadefaultloginshell
+ if ipadefaultemaildomain is not None:
+ config['ipadefaultemaildomain'] = ipadefaultemaildomain
+ if ipadefaultprimarygroup is not None:
+ config['ipadefaultprimarygroup'] = ipadefaultprimarygroup
+ if ipagroupsearchfields is not None:
+ config['ipagroupsearchfields'] = ','.join(ipagroupsearchfields)
+ if ipahomesrootdir is not None:
+ config['ipahomesrootdir'] = ipahomesrootdir
+ if ipakrbauthzdata is not None:
+ config['ipakrbauthzdata'] = ipakrbauthzdata
+ if ipamaxusernamelength is not None:
+ config['ipamaxusernamelength'] = str(ipamaxusernamelength)
+ if ipapwdexpadvnotify is not None:
+ config['ipapwdexpadvnotify'] = str(ipapwdexpadvnotify)
+ if ipasearchrecordslimit is not None:
+ config['ipasearchrecordslimit'] = str(ipasearchrecordslimit)
+ if ipasearchtimelimit is not None:
+ config['ipasearchtimelimit'] = str(ipasearchtimelimit)
+ if ipaselinuxusermaporder is not None:
+ config['ipaselinuxusermaporder'] = '$'.join(ipaselinuxusermaporder)
+ if ipauserauthtype is not None:
+ config['ipauserauthtype'] = ipauserauthtype
+ if ipausersearchfields is not None:
+ config['ipausersearchfields'] = ','.join(ipausersearchfields)
+
+ return config
+
+
+def get_config_diff(client, ipa_config, module_config):
+ return client.get_diff(ipa_data=ipa_config, module_data=module_config)
+
+
+def ensure(module, client):
+ module_config = get_config_dict(
+ ipaconfigstring=module.params.get('ipaconfigstring'),
+ ipadefaultloginshell=module.params.get('ipadefaultloginshell'),
+ ipadefaultemaildomain=module.params.get('ipadefaultemaildomain'),
+ ipadefaultprimarygroup=module.params.get('ipadefaultprimarygroup'),
+ ipagroupsearchfields=module.params.get('ipagroupsearchfields'),
+ ipahomesrootdir=module.params.get('ipahomesrootdir'),
+ ipakrbauthzdata=module.params.get('ipakrbauthzdata'),
+ ipamaxusernamelength=module.params.get('ipamaxusernamelength'),
+ ipapwdexpadvnotify=module.params.get('ipapwdexpadvnotify'),
+ ipasearchrecordslimit=module.params.get('ipasearchrecordslimit'),
+ ipasearchtimelimit=module.params.get('ipasearchtimelimit'),
+ ipaselinuxusermaporder=module.params.get('ipaselinuxusermaporder'),
+ ipauserauthtype=module.params.get('ipauserauthtype'),
+ ipausersearchfields=module.params.get('ipausersearchfields'),
+ )
+ ipa_config = client.config_show()
+ diff = get_config_diff(client, ipa_config, module_config)
+
+ changed = False
+ new_config = {}
+ for module_key in diff:
+ if module_config.get(module_key) != ipa_config.get(module_key, None):
+ changed = True
+ new_config.update({module_key: module_config.get(module_key)})
+
+ if changed and not module.check_mode:
+ client.config_mod(name=None, item=new_config)
+
+ return changed, client.config_show()
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ ipaconfigstring=dict(type='list', elements='str',
+ choices=['AllowNThash',
+ 'KDC:Disable Last Success',
+ 'KDC:Disable Lockout',
+ 'KDC:Disable Default Preauth for SPNs'],
+ aliases=['configstring']),
+ ipadefaultloginshell=dict(type='str', aliases=['loginshell']),
+ ipadefaultemaildomain=dict(type='str', aliases=['emaildomain']),
+ ipadefaultprimarygroup=dict(type='str', aliases=['primarygroup']),
+ ipagroupsearchfields=dict(type='list', elements='str',
+ aliases=['groupsearchfields']),
+ ipahomesrootdir=dict(type='str', aliases=['homesrootdir']),
+ ipakrbauthzdata=dict(type='list', elements='str',
+ choices=['MS-PAC', 'PAD', 'nfs:NONE'],
+ aliases=['krbauthzdata']),
+ ipamaxusernamelength=dict(type='int', aliases=['maxusernamelength']),
+ ipapwdexpadvnotify=dict(type='int', aliases=['pwdexpadvnotify']),
+ ipasearchrecordslimit=dict(type='int', aliases=['searchrecordslimit']),
+ ipasearchtimelimit=dict(type='int', aliases=['searchtimelimit']),
+ ipaselinuxusermaporder=dict(type='list', elements='str',
+ aliases=['selinuxusermaporder']),
+ ipauserauthtype=dict(type='list', elements='str',
+ aliases=['userauthtype'],
+ choices=["password", "radius", "otp", "pkinit",
+ "hardened", "disabled"]),
+ ipausersearchfields=dict(type='list', elements='str',
+ aliases=['usersearchfields']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = ConfigIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
new file mode 100644
index 000000000..b1a90141b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_dnsrecord.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnsrecord
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA DNS records
+description:
+ - Add, modify and delete an IPA DNS Record using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which DNS record needs to be managed.
+ required: true
+ type: str
+ record_name:
+ description:
+ - The DNS record name to manage.
+ required: true
+ aliases: ["name"]
+ type: str
+ record_type:
+ description:
+ - The type of DNS record name.
+ - Currently, 'A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV' and 'MX' are supported.
+ - "'A6', 'CNAME', 'DNAME' and 'TXT' are added in version 2.5."
+ - "'SRV' and 'MX' are added in version 2.8."
+ required: false
+ default: 'A'
+ choices: ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'MX', 'PTR', 'SRV', 'TXT']
+ type: str
+ record_value:
+ description:
+ - Manage DNS record name with this value.
+ - Mutually exclusive with I(record_values), and exactly one of I(record_value) and I(record_values) has to be specified.
+ - Use I(record_values) if you need to specify multiple values.
+ - In the case of 'A' or 'AAAA' record types, this will be the IP address.
+ - In the case of 'A6' record type, this will be the A6 Record data.
+ - In the case of 'CNAME' record type, this will be the hostname.
+ - In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'PTR' record type, this will be the hostname.
+ - In the case of 'TXT' record type, this will be a text.
+ - In the case of 'SRV' record type, this will be a service record.
+ - In the case of 'MX' record type, this will be a mail exchanger record.
+ type: str
+ record_values:
+ description:
+ - Manage DNS record name with this value.
+ - Mutually exclusive with I(record_value), and exactly one of I(record_value) and I(record_values) has to be specified.
+ - In the case of 'A' or 'AAAA' record types, this will be the IP address.
+ - In the case of 'A6' record type, this will be the A6 Record data.
+ - In the case of 'CNAME' record type, this will be the hostname.
+ - In the case of 'DNAME' record type, this will be the DNAME target.
+ - In the case of 'PTR' record type, this will be the hostname.
+ - In the case of 'TXT' record type, this will be a text.
+ - In the case of 'SRV' record type, this will be a service record.
+ - In the case of 'MX' record type, this will be a mail exchanger record.
+ type: list
+ elements: str
+ record_ttl:
+ description:
+ - Set the TTL for the record.
+ - Applies only when adding a new or changing the value of I(record_value) or I(record_values).
+ required: false
+ type: int
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: vm-001
+ record_type: 'AAAA'
+ record_value: '::1'
+
+- name: Ensure that dns records exists with a TTL
+ community.general.ipa_dnsrecord:
+ name: host02
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_values: '::1,fe80::1'
+ record_ttl: 300
+ ipa_host: ipa.example.com
+ ipa_pass: topsecret
+ state: present
+
+- name: Ensure a PTR record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: 2.168.192.in-addr.arpa
+ record_name: 5
+ record_type: 'PTR'
+ record_value: 'internal.ipa.example.com'
+
+- name: Ensure a TXT record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos
+ record_type: 'TXT'
+ record_value: 'EXAMPLE.COM'
+
+- name: Ensure an SRV record is present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: _kerberos._udp.example.com
+ record_type: 'SRV'
+ record_value: '10 50 88 ipa.example.com'
+
+- name: Ensure an MX records are present
+ community.general.ipa_dnsrecord:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ record_name: '@'
+ record_type: 'MX'
+ record_values:
+ - '1 mailserver-01.example.com'
+ - '2 mailserver-02.example.com'
+
+- name: Ensure that dns record is removed
+ community.general.ipa_dnsrecord:
+ name: host01
+ zone_name: example.com
+ record_type: 'AAAA'
+ record_value: '::1'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+'''
+
+RETURN = r'''
+dnsrecord:
+ description: DNS record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class DNSRecordIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSRecordIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnsrecord_find(self, zone_name, record_name):
+ if record_name == '@':
+ return self._post_json(method='dnsrecord_show', name=zone_name, item={'idnsname': record_name, 'all': True})
+ else:
+ return self._post_json(method='dnsrecord_find', name=zone_name, item={'idnsname': record_name, 'all': True})
+
+ def dnsrecord_add(self, zone_name=None, record_name=None, details=None):
+ item = dict(idnsname=record_name)
+
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+
+ for value in details['record_values']:
+ if details['record_type'] == 'A':
+ item.update(a_part_ip_address=value)
+ elif details['record_type'] == 'AAAA':
+ item.update(aaaa_part_ip_address=value)
+ elif details['record_type'] == 'A6':
+ item.update(a6_part_data=value)
+ elif details['record_type'] == 'CNAME':
+ item.update(cname_part_hostname=value)
+ elif details['record_type'] == 'DNAME':
+ item.update(dname_part_target=value)
+ elif details['record_type'] == 'PTR':
+ item.update(ptr_part_hostname=value)
+ elif details['record_type'] == 'TXT':
+ item.update(txtrecord=value)
+ elif details['record_type'] == 'SRV':
+ item.update(srvrecord=value)
+ elif details['record_type'] == 'MX':
+ item.update(mxrecord=value)
+
+ self._post_json(method='dnsrecord_add', name=zone_name, item=item)
+
+ def dnsrecord_mod(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ if details.get('record_ttl'):
+ item.update(dnsttl=details['record_ttl'])
+ return self._post_json(method='dnsrecord_mod', name=zone_name, item=item)
+
+ def dnsrecord_del(self, zone_name=None, record_name=None, details=None):
+ item = get_dnsrecord_dict(details)
+ item.update(idnsname=record_name)
+ return self._post_json(method='dnsrecord_del', name=zone_name, item=item)
+
+
+def get_dnsrecord_dict(details=None):
+ module_dnsrecord = dict()
+ if details['record_type'] == 'A' and details['record_values']:
+ module_dnsrecord.update(arecord=details['record_values'])
+ elif details['record_type'] == 'AAAA' and details['record_values']:
+ module_dnsrecord.update(aaaarecord=details['record_values'])
+ elif details['record_type'] == 'A6' and details['record_values']:
+ module_dnsrecord.update(a6record=details['record_values'])
+ elif details['record_type'] == 'CNAME' and details['record_values']:
+ module_dnsrecord.update(cnamerecord=details['record_values'])
+ elif details['record_type'] == 'DNAME' and details['record_values']:
+ module_dnsrecord.update(dnamerecord=details['record_values'])
+ elif details['record_type'] == 'PTR' and details['record_values']:
+ module_dnsrecord.update(ptrrecord=details['record_values'])
+ elif details['record_type'] == 'TXT' and details['record_values']:
+ module_dnsrecord.update(txtrecord=details['record_values'])
+ elif details['record_type'] == 'SRV' and details['record_values']:
+ module_dnsrecord.update(srvrecord=details['record_values'])
+ elif details['record_type'] == 'MX' and details['record_values']:
+ module_dnsrecord.update(mxrecord=details['record_values'])
+
+ if details.get('record_ttl'):
+ module_dnsrecord.update(dnsttl=details['record_ttl'])
+
+ return module_dnsrecord
+
+
+def get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord):
+ details = get_dnsrecord_dict(module_dnsrecord)
+ return client.get_diff(ipa_data=ipa_dnsrecord, module_data=details)
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ record_name = module.params['record_name']
+ record_ttl = module.params.get('record_ttl')
+ state = module.params['state']
+
+ ipa_dnsrecord = client.dnsrecord_find(zone_name, record_name)
+
+ record_values = module.params['record_values']
+ if module.params['record_value'] is not None:
+ record_values = [module.params['record_value']]
+
+ module_dnsrecord = dict(
+ record_type=module.params['record_type'],
+ record_values=record_values,
+ record_ttl=to_native(record_ttl, nonstring='passthru'),
+ )
+
+ # ttl is not required to change records
+ if module_dnsrecord['record_ttl'] is None:
+ module_dnsrecord.pop('record_ttl')
+
+ changed = False
+ if state == 'present':
+ if not ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_add(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ diff = get_dnsrecord_diff(client, ipa_dnsrecord, module_dnsrecord)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_mod(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+ else:
+ if ipa_dnsrecord:
+ changed = True
+ if not module.check_mode:
+ client.dnsrecord_del(zone_name=zone_name,
+ record_name=record_name,
+ details=module_dnsrecord)
+
+ return changed, client.dnsrecord_find(zone_name, record_name)
+
+
+def main():
+ record_types = ['A', 'AAAA', 'A6', 'CNAME', 'DNAME', 'PTR', 'TXT', 'SRV', 'MX']
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ zone_name=dict(type='str', required=True),
+ record_name=dict(type='str', aliases=['name'], required=True),
+ record_type=dict(type='str', default='A', choices=record_types),
+ record_value=dict(type='str'),
+ record_values=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ record_ttl=dict(type='int', required=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['record_value', 'record_values']],
+ required_one_of=[['record_value', 'record_values']],
+ supports_check_mode=True
+ )
+
+ client = DNSRecordIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_dnszone.py b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
new file mode 100644
index 000000000..06c93841e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_dnszone.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Fran Fitzpatrick (francis.x.fitzpatrick@gmail.com)
+# Borrowed heavily from other work by Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_dnszone
+author: Fran Fitzpatrick (@fxfitz)
+short_description: Manage FreeIPA DNS Zones
+description:
+ - Add and delete an IPA DNS Zones using IPA API
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ zone_name:
+ description:
+ - The DNS zone name to which needs to be managed.
+ required: true
+ type: str
+ state:
+ description: State to ensure
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+ dynamicupdate:
+ description: Apply dynamic update to zone.
+ default: false
+ type: bool
+ allowsyncptr:
+ description: Allow synchronization of forward and reverse records in the zone.
+ default: false
+ type: bool
+ version_added: 4.3.0
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure dns zone is present
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+
+- name: Ensure dns zone is present and is dynamic update
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ dynamicupdate: true
+
+- name: Ensure that dns zone is removed
+ community.general.ipa_dnszone:
+ zone_name: example.com
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: topsecret
+ state: absent
+
+- name: Ensure dns zone is present and is allowing sync
+ community.general.ipa_dnszone:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ zone_name: example.com
+ allowsyncptr: true
+'''
+
+RETURN = r'''
+zone:
+ description: DNS zone as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class DNSZoneIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(DNSZoneIPAClient, self).__init__(module, host, port, protocol)
+
+ def dnszone_find(self, zone_name, details=None):
+ items = {'all': 'true',
+ 'idnsname': zone_name, }
+ if details is not None:
+ items.update(details)
+
+ return self._post_json(
+ method='dnszone_find',
+ name=zone_name,
+ item=items
+ )
+
+ def dnszone_add(self, zone_name=None, details=None):
+ items = {}
+ if details is not None:
+ items.update(details)
+
+ return self._post_json(
+ method='dnszone_add',
+ name=zone_name,
+ item=items
+ )
+
+ def dnszone_mod(self, zone_name=None, details=None):
+ items = {}
+ if details is not None:
+ items.update(details)
+
+ return self._post_json(
+ method='dnszone_mod',
+ name=zone_name,
+ item=items
+ )
+
+ def dnszone_del(self, zone_name=None, record_name=None, details=None):
+ return self._post_json(
+ method='dnszone_del', name=zone_name, item={})
+
+
+def ensure(module, client):
+ zone_name = module.params['zone_name']
+ state = module.params['state']
+ dynamicupdate = module.params['dynamicupdate']
+ allowsyncptr = module.params['allowsyncptr']
+
+ changed = False
+
+ # does zone exist
+ ipa_dnszone = client.dnszone_find(zone_name)
+
+ if state == 'present':
+ if not ipa_dnszone:
+
+ changed = True
+ if not module.check_mode:
+ client.dnszone_add(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
+ elif ipa_dnszone['idnsallowdynupdate'][0] != str(dynamicupdate).upper() or ipa_dnszone['idnsallowsyncptr'][0] != str(allowsyncptr).upper():
+ changed = True
+ if not module.check_mode:
+ client.dnszone_mod(zone_name=zone_name, details={'idnsallowdynupdate': dynamicupdate, 'idnsallowsyncptr': allowsyncptr})
+ else:
+ changed = False
+
+ # state is absent
+ else:
+ # check for generic zone existence
+ if ipa_dnszone:
+ changed = True
+ if not module.check_mode:
+ client.dnszone_del(zone_name=zone_name)
+
+ return changed, client.dnszone_find(zone_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(zone_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ dynamicupdate=dict(type='bool', required=False, default=False),
+ allowsyncptr=dict(type='bool', required=False, default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = DNSZoneIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, zone = ensure(module, client)
+ module.exit_json(changed=changed, zone=zone)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_group.py b/ansible_collections/community/general/plugins/modules/ipa_group.py
new file mode 100644
index 000000000..87e7f0e66
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_group.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_group
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA group
+description:
+ - Add, modify and delete group within IPA server
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ append:
+ description:
+ - If C(true), add the listed I(user) and I(group) to the group members.
+ - If C(false), only the listed I(user) and I(group) will be group members, removing any other members.
+ default: false
+ type: bool
+ version_added: 4.0.0
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - Description of the group.
+ type: str
+ external:
+ description:
+ - Allow adding external non-IPA members from trusted domains.
+ type: bool
+ gidnumber:
+ description:
+ - GID (use this option to set it manually).
+ aliases: ['gid']
+ type: str
+ group:
+ description:
+ - List of group names assigned to this group.
+ - If I(append=false) and an empty list is passed all groups will be removed from this group.
+ - Groups that are already assigned but not passed will be removed.
+ - If I(append=true) the listed groups will be assigned without removing other groups.
+ - If option is omitted assigned groups will not be checked or changed.
+ type: list
+ elements: str
+ nonposix:
+ description:
+ - Create as a non-POSIX group.
+ type: bool
+ user:
+ description:
+ - List of user names assigned to this group.
+ - If I(append=false) and an empty list is passed all users will be removed from this group.
+ - Users that are already assigned but not passed will be removed.
+ - If I(append=true) the listed users will be assigned without removing other users.
+ - If option is omitted assigned users will not be checked or changed.
+ type: list
+ elements: str
+ external_user:
+ description:
+ - List of external users assigned to this group.
+ - Behaves identically to I(user) with respect to I(append) attribute.
+ - List entries can be in C(DOMAIN\\username) or SID format.
+ - Unless SIDs are provided, the module will always attempt to make changes even if the group already has all the users.
+ This is because only SIDs are returned by IPA query.
+ - I(external=true) is needed for this option to work.
+ type: list
+ elements: str
+ version_added: 6.3.0
+ state:
+ description:
+ - State to ensure
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure group is present
+ community.general.ipa_group:
+ name: oinstall
+ gidnumber: '54321'
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that groups sysops and appops are assigned to ops but no other group
+ community.general.ipa_group:
+ name: ops
+ group:
+ - sysops
+ - appops
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that users linus and larry are assign to the group, but no other user
+ community.general.ipa_group:
+ name: sysops
+ user:
+ - linus
+ - larry
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that new starter named john is member of the group, without removing other members
+ community.general.ipa_group:
+ name: developers
+ user:
+ - john
+ append: true
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Add external user to a group
+ community.general.ipa_group:
+ name: developers
+ external: true
+ append: true
+ external_user:
+ - S-1-5-21-123-1234-12345-63421
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Add a user from MYDOMAIN
+ community.general.ipa_group:
+ name: developers
+ external: true
+ append: true
+ external_user:
+ - MYDOMAIN\\john
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure group is absent
+ community.general.ipa_group:
+ name: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+group:
+ description: Group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class GroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(GroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def group_find(self, name):
+ return self._post_json(method='group_find', name=None, item={'all': True, 'cn': name})
+
+ def group_add(self, name, item):
+ return self._post_json(method='group_add', name=name, item=item)
+
+ def group_mod(self, name, item):
+ return self._post_json(method='group_mod', name=name, item=item)
+
+ def group_del(self, name):
+ return self._post_json(method='group_del', name=name)
+
+ def group_add_member(self, name, item):
+ return self._post_json(method='group_add_member', name=name, item=item)
+
+ def group_add_member_group(self, name, item):
+ return self.group_add_member(name=name, item={'group': item})
+
+ def group_add_member_user(self, name, item):
+ return self.group_add_member(name=name, item={'user': item})
+
+ def group_add_member_externaluser(self, name, item):
+ return self.group_add_member(name=name, item={'ipaexternalmember': item})
+
+ def group_remove_member(self, name, item):
+ return self._post_json(method='group_remove_member', name=name, item=item)
+
+ def group_remove_member_group(self, name, item):
+ return self.group_remove_member(name=name, item={'group': item})
+
+ def group_remove_member_user(self, name, item):
+ return self.group_remove_member(name=name, item={'user': item})
+
+ def group_remove_member_externaluser(self, name, item):
+ return self.group_remove_member(name=name, item={'ipaexternalmember': item})
+
+
+def get_group_dict(description=None, external=None, gid=None, nonposix=None):
+ group = {}
+ if description is not None:
+ group['description'] = description
+ if external is not None:
+ group['external'] = external
+ if gid is not None:
+ group['gidnumber'] = gid
+ if nonposix is not None:
+ group['nonposix'] = nonposix
+ return group
+
+
+def get_group_diff(client, ipa_group, module_group):
+ data = []
+ # With group_add attribute nonposix is passed, whereas with group_mod only posix can be passed.
+ if 'nonposix' in module_group:
+ # Only non-posix groups can be changed to posix
+ if not module_group['nonposix'] and ipa_group.get('nonposix'):
+ module_group['posix'] = True
+ del module_group['nonposix']
+
+ if 'external' in module_group:
+ if module_group['external'] and 'ipaexternalgroup' in ipa_group.get('objectclass'):
+ del module_group['external']
+
+ return client.get_diff(ipa_data=ipa_group, module_data=module_group)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ user = module.params['user']
+ external = module.params['external']
+ external_user = module.params['external_user']
+ append = module.params['append']
+
+ module_group = get_group_dict(description=module.params['description'],
+ external=external,
+ gid=module.params['gidnumber'],
+ nonposix=module.params['nonposix'])
+ ipa_group = client.group_find(name=name)
+
+ if (not (external or external_user is None)):
+ module.fail_json("external_user can only be set if external = True")
+
+ changed = False
+ if state == 'present':
+ if not ipa_group:
+ changed = True
+ if not module.check_mode:
+ ipa_group = client.group_add(name, item=module_group)
+ else:
+ diff = get_group_diff(client, ipa_group, module_group)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_group.get(key)
+ client.group_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_group', []), group,
+ client.group_add_member_group,
+ client.group_remove_member_group,
+ append=append) or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('member_user', []), user,
+ client.group_add_member_user,
+ client.group_remove_member_user,
+ append=append) or changed
+
+ if external_user is not None:
+ changed = client.modify_if_diff(name, ipa_group.get('ipaexternalmember', []), external_user,
+ client.group_add_member_externaluser,
+ client.group_remove_member_externaluser,
+ append=append) or changed
+ else:
+ if ipa_group:
+ changed = True
+ if not module.check_mode:
+ client.group_del(name)
+
+ return changed, client.group_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ external=dict(type='bool'),
+ external_user=dict(type='list', elements='str'),
+ gidnumber=dict(type='str', aliases=['gid']),
+ group=dict(type='list', elements='str'),
+ nonposix=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'),
+ append=dict(type='bool', default=False))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ client = GroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, group = ensure(module, client)
+ module.exit_json(changed=changed, group=group)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
new file mode 100644
index 000000000..b7633262b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_hbacrule.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hbacrule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA HBAC rule
+description:
+ - Add, modify or delete an IPA HBAC rule using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description: Description
+ type: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ required: false
+ type: list
+ elements: str
+ hostcategory:
+ description: Host category
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of hostgroup names to assign.
+ - If an empty list is passed all hostgroups will be removed. from the rule
+ - If option is omitted hostgroups will not be checked or changed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all services will be removed from the rule.
+ - If option is omitted services will not be checked or changed.
+ type: list
+ elements: str
+ servicecategory:
+ description: Service category
+ choices: ['all']
+ type: str
+ servicegroup:
+ description:
+ - List of service group names to assign.
+ - If an empty list is passed all assigned service groups will be removed from the rule.
+ - If option is omitted service groups will not be checked or changed.
+ type: list
+ elements: str
+ sourcehost:
+ description:
+ - List of source host names to assign.
+ - If an empty list if passed all assigned source hosts will be removed from the rule.
+ - If option is omitted source hosts will not be checked or changed.
+ type: list
+ elements: str
+ sourcehostcategory:
+ description: Source host category
+ choices: ['all']
+ type: str
+ sourcehostgroup:
+ description:
+ - List of source host group names to assign.
+ - If an empty list if passed all assigned source host groups will be removed from the rule.
+ - If option is omitted source host groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure
+ default: "present"
+ choices: ["absent", "disabled", "enabled","present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list if passed all assigned users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description: User category
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user group names to assign.
+ - If an empty list if passed all assigned user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure rule to allow all users to access any host from any host
+ community.general.ipa_hbacrule:
+ name: allow_all
+ description: Allow all users to access any host from any host
+ hostcategory: all
+ servicecategory: all
+ usercategory: all
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule with certain limitations
+ community.general.ipa_hbacrule:
+ name: allow_all_developers_access_to_db
+ description: Allow all developers to access any database from any host
+ hostgroup:
+ - db-server
+ usergroup:
+ - developers
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure rule is absent
+ community.general.ipa_hbacrule:
+ name: rule_to_be_deleted
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hbacrule:
+ description: HBAC rule as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class HBACRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def hbacrule_find(self, name):
+ return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
+
+ def hbacrule_add(self, name, item):
+ return self._post_json(method='hbacrule_add', name=name, item=item)
+
+ def hbacrule_mod(self, name, item):
+ return self._post_json(method='hbacrule_mod', name=name, item=item)
+
+ def hbacrule_del(self, name):
+ return self._post_json(method='hbacrule_del', name=name)
+
+ def hbacrule_add_host(self, name, item):
+ return self._post_json(method='hbacrule_add_host', name=name, item=item)
+
+ def hbacrule_remove_host(self, name, item):
+ return self._post_json(method='hbacrule_remove_host', name=name, item=item)
+
+ def hbacrule_add_service(self, name, item):
+ return self._post_json(method='hbacrule_add_service', name=name, item=item)
+
+ def hbacrule_remove_service(self, name, item):
+ return self._post_json(method='hbacrule_remove_service', name=name, item=item)
+
+ def hbacrule_add_user(self, name, item):
+ return self._post_json(method='hbacrule_add_user', name=name, item=item)
+
+ def hbacrule_remove_user(self, name, item):
+ return self._post_json(method='hbacrule_remove_user', name=name, item=item)
+
+ def hbacrule_add_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
+
+ def hbacrule_remove_sourcehost(self, name, item):
+ return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
+
+
+def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
+ sourcehostcategory=None,
+ usercategory=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if servicecategory is not None:
+ data['servicecategory'] = servicecategory
+ if sourcehostcategory is not None:
+ data['sourcehostcategory'] = sourcehostcategory
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ return data
+
+
+def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
+ return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ service = module.params['service']
+ servicecategory = module.params['servicecategory']
+ servicegroup = module.params['servicegroup']
+ sourcehost = module.params['sourcehost']
+ sourcehostcategory = module.params['sourcehostcategory']
+ sourcehostgroup = module.params['sourcehostgroup']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_hbacrule = get_hbacrule_dict(description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ servicecategory=servicecategory,
+ sourcehostcategory=sourcehostcategory,
+ usercategory=usercategory)
+ ipa_hbacrule = client.hbacrule_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
+ else:
+ diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hbacrule.get(key)
+ client.hbacrule_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'host') or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
+ client.hbacrule_add_host,
+ client.hbacrule_remove_host, 'hostgroup') or changed
+
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvc') or changed
+
+ if servicegroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
+ servicegroup,
+ client.hbacrule_add_service,
+ client.hbacrule_remove_service, 'hbacsvcgroup') or changed
+
+ if sourcehost is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'host') or changed
+
+ if sourcehostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
+ client.hbacrule_add_sourcehost,
+ client.hbacrule_remove_sourcehost, 'hostgroup') or changed
+
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'user') or changed
+
+ if usergroup is not None:
+ changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
+ client.hbacrule_add_user,
+ client.hbacrule_remove_user, 'group') or changed
+ else:
+ if ipa_hbacrule:
+ changed = True
+ if not module.check_mode:
+ client.hbacrule_del(name=name)
+
+ return changed, client.hbacrule_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ servicecategory=dict(type='str', choices=['all']),
+ servicegroup=dict(type='list', elements='str'),
+ sourcehost=dict(type='list', elements='str'),
+ sourcehostcategory=dict(type='str', choices=['all']),
+ sourcehostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = HBACRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hbacrule = ensure(module, client)
+ module.exit_json(changed=changed, hbacrule=hbacrule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_host.py b/ansible_collections/community/general/plugins/modules/ipa_host.py
new file mode 100644
index 000000000..d561401d4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_host.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_host
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host
+description:
+ - Add, modify and delete an IPA host using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ fqdn:
+ description:
+ - Full qualified domain name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - A description of this host.
+ type: str
+ force:
+ description:
+ - Force host name even if not in DNS.
+ required: false
+ type: bool
+ ip_address:
+ description:
+ - Add the host to DNS with this IP address.
+ type: str
+ mac_address:
+ description:
+ - List of Hardware MAC address(es) off this host.
+ - If option is omitted MAC addresses will not be checked or changed.
+ - If an empty list is passed all assigned MAC addresses will be removed.
+ - MAC addresses that are already assigned but not passed will be removed.
+ aliases: ["macaddress"]
+ type: list
+ elements: str
+ ns_host_location:
+ description:
+ - Host location (e.g. "Lab 2")
+ aliases: ["nshostlocation"]
+ type: str
+ ns_hardware_platform:
+ description:
+ - Host hardware platform (e.g. "Lenovo T61")
+ aliases: ["nshardwareplatform"]
+ type: str
+ ns_os_version:
+ description:
+ - Host operating system and version (e.g. "Fedora 9")
+ aliases: ["nsosversion"]
+ type: str
+ user_certificate:
+ description:
+ - List of Base-64 encoded server certificates.
+ - If option is omitted certificates will not be checked or changed.
+ - If an empty list is passed all assigned certificates will be removed.
+ - Certificates already assigned but not passed will be removed.
+ aliases: ["usercertificate"]
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ update_dns:
+ description:
+ - If set C("True") with state as C("absent"), then removes DNS records of the host managed by FreeIPA DNS.
+ - This option has no effect for states other than "absent".
+ type: bool
+ random_password:
+ description: Generate a random password to be used in bulk enrollment.
+ type: bool
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host is present
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ ns_host_location: Lab
+ ns_os_version: CentOS 7
+ ns_hardware_platform: Lenovo T61
+ mac_address:
+ - "08:00:27:E3:B1:2D"
+ - "52:54:00:BD:97:1E"
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Generate a random password for bulk enrolment
+ community.general.ipa_host:
+ name: host01.example.com
+ description: Example host
+ ip_address: 192.168.0.123
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: false
+ random_password: true
+
+- name: Ensure host is disabled
+ community.general.ipa_host:
+ name: host01.example.com
+ state: disabled
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that all user certificates are removed
+ community.general.ipa_host:
+ name: host01.example.com
+ user_certificate: []
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host and its DNS record is absent
+ community.general.ipa_host:
+ name: host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_dns: true
+'''
+
+RETURN = r'''
+host:
+ description: Host as returned by IPA API.
+ returned: always
+ type: dict
+host_diff:
+ description: List of options that differ and would be changed
+ returned: if check mode and a difference is found
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class HostIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostIPAClient, self).__init__(module, host, port, protocol)
+
+ def host_show(self, name):
+ return self._post_json(method='host_show', name=name)
+
+ def host_find(self, name):
+ return self._post_json(method='host_find', name=None, item={'all': True, 'fqdn': name})
+
+ def host_add(self, name, host):
+ return self._post_json(method='host_add', name=name, item=host)
+
+ def host_mod(self, name, host):
+ return self._post_json(method='host_mod', name=name, item=host)
+
+ def host_del(self, name, update_dns):
+ return self._post_json(method='host_del', name=name, item={'updatedns': update_dns})
+
+ def host_disable(self, name):
+ return self._post_json(method='host_disable', name=name)
+
+
+def get_host_dict(description=None, force=None, ip_address=None, ns_host_location=None, ns_hardware_platform=None,
+ ns_os_version=None, user_certificate=None, mac_address=None, random_password=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ if force is not None:
+ data['force'] = force
+ if ip_address is not None:
+ data['ip_address'] = ip_address
+ if ns_host_location is not None:
+ data['nshostlocation'] = ns_host_location
+ if ns_hardware_platform is not None:
+ data['nshardwareplatform'] = ns_hardware_platform
+ if ns_os_version is not None:
+ data['nsosversion'] = ns_os_version
+ if user_certificate is not None:
+ data['usercertificate'] = [{"__base64__": item} for item in user_certificate]
+ if mac_address is not None:
+ data['macaddress'] = mac_address
+ if random_password is not None:
+ data['random'] = random_password
+ return data
+
+
+def get_host_diff(client, ipa_host, module_host):
+ non_updateable_keys = ['force', 'ip_address']
+ if not module_host.get('random'):
+ non_updateable_keys.append('random')
+ for key in non_updateable_keys:
+ if key in module_host:
+ del module_host[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_host)
+
+
+def ensure(module, client):
+ name = module.params['fqdn']
+ state = module.params['state']
+
+ ipa_host = client.host_find(name=name)
+ module_host = get_host_dict(description=module.params['description'],
+ force=module.params['force'], ip_address=module.params['ip_address'],
+ ns_host_location=module.params['ns_host_location'],
+ ns_hardware_platform=module.params['ns_hardware_platform'],
+ ns_os_version=module.params['ns_os_version'],
+ user_certificate=module.params['user_certificate'],
+ mac_address=module.params['mac_address'],
+ random_password=module.params.get('random_password'),
+ )
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_host:
+ changed = True
+ if not module.check_mode:
+ # OTP password generated by FreeIPA is visible only for host_add command
+ # so, return directly from here.
+ return changed, client.host_add(name=name, host=module_host)
+ else:
+ diff = get_host_diff(client, ipa_host, module_host)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_host.get(key)
+ ipa_host_show = client.host_show(name=name)
+ if ipa_host_show.get('has_keytab', False) and module.params.get('random_password'):
+ client.host_disable(name=name)
+ return changed, client.host_mod(name=name, host=data)
+
+ else:
+ if ipa_host:
+ changed = True
+ update_dns = module.params.get('update_dns', False)
+ if not module.check_mode:
+ client.host_del(name=name, update_dns=update_dns)
+
+ return changed, client.host_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ fqdn=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool'),
+ ip_address=dict(type='str'),
+ ns_host_location=dict(type='str', aliases=['nshostlocation']),
+ ns_hardware_platform=dict(type='str', aliases=['nshardwareplatform']),
+ ns_os_version=dict(type='str', aliases=['nsosversion']),
+ user_certificate=dict(type='list', aliases=['usercertificate'], elements='str'),
+ mac_address=dict(type='list', aliases=['macaddress'], elements='str'),
+ update_dns=dict(type='bool'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ random_password=dict(type='bool', no_log=False),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
new file mode 100644
index 000000000..12232de89
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_hostgroup.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_hostgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA host-group
+description:
+ - Add, modify and delete an IPA host-group using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ append:
+ description:
+ - If C(true), add the listed I(host) to the I(hostgroup).
+ - If C(false), only the listed I(host) will be in I(hostgroup), removing any other hosts.
+ default: false
+ type: bool
+ version_added: 6.6.0
+ cn:
+ description:
+ - Name of host-group.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ host:
+ description:
+ - List of hosts that belong to the host-group.
+ - If an empty list is passed all hosts will be removed from the group.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host-groups than belong to that host-group.
+ - If an empty list is passed all host-groups will be removed from the group.
+ - If option is omitted host-groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the group.
+ type: list
+ elements: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure host-group databases is present
+ community.general.ipa_hostgroup:
+ name: databases
+ state: present
+ host:
+ - db.example.com
+ hostgroup:
+ - mysql-server
+ - oracle-server
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure host-group databases is absent
+ community.general.ipa_hostgroup:
+ name: databases
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+hostgroup:
+ description: Hostgroup as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class HostGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(HostGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def hostgroup_find(self, name):
+ return self._post_json(method='hostgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def hostgroup_add(self, name, item):
+ return self._post_json(method='hostgroup_add', name=name, item=item)
+
+ def hostgroup_mod(self, name, item):
+ return self._post_json(method='hostgroup_mod', name=name, item=item)
+
+ def hostgroup_del(self, name):
+ return self._post_json(method='hostgroup_del', name=name)
+
+ def hostgroup_add_member(self, name, item):
+ return self._post_json(method='hostgroup_add_member', name=name, item=item)
+
+ def hostgroup_add_host(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'host': item})
+
+ def hostgroup_add_hostgroup(self, name, item):
+ return self.hostgroup_add_member(name=name, item={'hostgroup': item})
+
+ def hostgroup_remove_member(self, name, item):
+ return self._post_json(method='hostgroup_remove_member', name=name, item=item)
+
+ def hostgroup_remove_host(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'host': item})
+
+ def hostgroup_remove_hostgroup(self, name, item):
+ return self.hostgroup_remove_member(name=name, item={'hostgroup': item})
+
+
+def get_hostgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup):
+ return client.get_diff(ipa_data=ipa_hostgroup, module_data=module_hostgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ append = module.params['append']
+
+ ipa_hostgroup = client.hostgroup_find(name=name)
+ module_hostgroup = get_hostgroup_dict(description=module.params['description'])
+
+ changed = False
+ if state == 'present':
+ if not ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_hostgroup = client.hostgroup_add(name=name, item=module_hostgroup)
+ else:
+ diff = get_hostgroup_diff(client, ipa_hostgroup, module_hostgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_hostgroup.get(key)
+ client.hostgroup_mod(name=name, item=data)
+
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_host', []),
+ [item.lower() for item in host],
+ client.hostgroup_add_host,
+ client.hostgroup_remove_host,
+ append=append) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_hostgroup.get('member_hostgroup', []),
+ [item.lower() for item in hostgroup],
+ client.hostgroup_add_hostgroup,
+ client.hostgroup_remove_hostgroup,
+ append=append) or changed
+
+ else:
+ if ipa_hostgroup:
+ changed = True
+ if not module.check_mode:
+ client.hostgroup_del(name=name)
+
+ return changed, client.hostgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ append=dict(type='bool', default=False))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = HostGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, hostgroup = ensure(module, client)
+ module.exit_json(changed=changed, hostgroup=hostgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py b/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py
new file mode 100644
index 000000000..e2d8f0cd5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_otpconfig.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Ansible Project
+# Heavily influenced from Fran Fitzpatrick <francis.x.fitzpatrick@gmail.com> ipa_config module
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_otpconfig
+author: justchris1 (@justchris1)
+short_description: Manage FreeIPA OTP Configuration Settings
+version_added: 2.5.0
+description:
+ - Modify global configuration settings of a FreeIPA Server with respect to OTP (One Time Passwords).
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ ipatokentotpauthwindow:
+ description: TOTP authentication window in seconds.
+ aliases: ["totpauthwindow"]
+ type: int
+ ipatokentotpsyncwindow:
+ description: TOTP synchronization window in seconds.
+ aliases: ["totpsyncwindow"]
+ type: int
+ ipatokenhotpauthwindow:
+ description: HOTP authentication window in number of hops.
+ aliases: ["hotpauthwindow"]
+ type: int
+ ipatokenhotpsyncwindow:
+ description: HOTP synchronization window in hops.
+ aliases: ["hotpsyncwindow"]
+ type: int
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure the TOTP authentication window is set to 300 seconds
+ community.general.ipa_otpconfig:
+ ipatokentotpauthwindow: '300'
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the TOTP synchronization window is set to 86400 seconds
+ community.general.ipa_otpconfig:
+ ipatokentotpsyncwindow: '86400'
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the HOTP authentication window is set to 10 hops
+ community.general.ipa_otpconfig:
+ ipatokenhotpauthwindow: '10'
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+
+- name: Ensure the HOTP synchronization window is set to 100 hops
+ community.general.ipa_otpconfig:
+ ipatokenhotpsyncwindow: '100'
+ ipa_host: localhost
+ ipa_user: admin
+ ipa_pass: supersecret
+'''
+
+RETURN = r'''
+otpconfig:
+ description: OTP configuration as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class OTPConfigIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(OTPConfigIPAClient, self).__init__(module, host, port, protocol)
+
+ def otpconfig_show(self):
+ return self._post_json(method='otpconfig_show', name=None)
+
+ def otpconfig_mod(self, name, item):
+ return self._post_json(method='otpconfig_mod', name=name, item=item)
+
+
+def get_otpconfig_dict(ipatokentotpauthwindow=None, ipatokentotpsyncwindow=None,
+ ipatokenhotpauthwindow=None, ipatokenhotpsyncwindow=None):
+
+ config = {}
+ if ipatokentotpauthwindow is not None:
+ config['ipatokentotpauthwindow'] = str(ipatokentotpauthwindow)
+ if ipatokentotpsyncwindow is not None:
+ config['ipatokentotpsyncwindow'] = str(ipatokentotpsyncwindow)
+ if ipatokenhotpauthwindow is not None:
+ config['ipatokenhotpauthwindow'] = str(ipatokenhotpauthwindow)
+ if ipatokenhotpsyncwindow is not None:
+ config['ipatokenhotpsyncwindow'] = str(ipatokenhotpsyncwindow)
+
+ return config
+
+
+def get_otpconfig_diff(client, ipa_config, module_config):
+ return client.get_diff(ipa_data=ipa_config, module_data=module_config)
+
+
+def ensure(module, client):
+ module_otpconfig = get_otpconfig_dict(
+ ipatokentotpauthwindow=module.params.get('ipatokentotpauthwindow'),
+ ipatokentotpsyncwindow=module.params.get('ipatokentotpsyncwindow'),
+ ipatokenhotpauthwindow=module.params.get('ipatokenhotpauthwindow'),
+ ipatokenhotpsyncwindow=module.params.get('ipatokenhotpsyncwindow'),
+ )
+ ipa_otpconfig = client.otpconfig_show()
+ diff = get_otpconfig_diff(client, ipa_otpconfig, module_otpconfig)
+
+ changed = False
+ new_otpconfig = {}
+ for module_key in diff:
+ if module_otpconfig.get(module_key) != ipa_otpconfig.get(module_key, None):
+ changed = True
+ new_otpconfig.update({module_key: module_otpconfig.get(module_key)})
+
+ if changed and not module.check_mode:
+ client.otpconfig_mod(name=None, item=new_otpconfig)
+
+ return changed, client.otpconfig_show()
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ ipatokentotpauthwindow=dict(type='int', aliases=['totpauthwindow'], no_log=False),
+ ipatokentotpsyncwindow=dict(type='int', aliases=['totpsyncwindow'], no_log=False),
+ ipatokenhotpauthwindow=dict(type='int', aliases=['hotpauthwindow'], no_log=False),
+ ipatokenhotpsyncwindow=dict(type='int', aliases=['hotpsyncwindow'], no_log=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ client = OTPConfigIPAClient(
+ module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot']
+ )
+
+ try:
+ client.login(
+ username=module.params['ipa_user'],
+ password=module.params['ipa_pass']
+ )
+ changed, otpconfig = ensure(module, client)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, otpconfig=otpconfig)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_otptoken.py b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py
new file mode 100644
index 000000000..f25ab6023
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_otptoken.py
@@ -0,0 +1,534 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_otptoken
+author: justchris1 (@justchris1)
+short_description: Manage FreeIPA OTPs
+version_added: 2.5.0
+description:
+ - Add, modify, and delete One Time Passwords in IPA.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ uniqueid:
+ description: Unique ID of the token in IPA.
+ required: true
+ aliases: ["name"]
+ type: str
+ newuniqueid:
+ description: If specified, the unique id specified will be changed to this.
+ type: str
+ otptype:
+ description:
+ - Type of OTP.
+ - "B(Note:) Cannot be modified after OTP is created."
+ type: str
+ choices: [ totp, hotp ]
+ secretkey:
+ description:
+ - Token secret (Base64).
+ - If OTP is created and this is not specified, a random secret will be generated by IPA.
+ - "B(Note:) Cannot be modified after OTP is created."
+ type: str
+ description:
+ description: Description of the token (informational only).
+ type: str
+ owner:
+ description: Assigned user of the token.
+ type: str
+ enabled:
+ description: Mark the token as enabled (default C(true)).
+ default: true
+ type: bool
+ notbefore:
+ description:
+ - First date/time the token can be used.
+ - In the format C(YYYYMMddHHmmss).
+ - For example, C(20180121182022) will allow the token to be used starting on 21 January 2018 at 18:20:22.
+ type: str
+ notafter:
+ description:
+ - Last date/time the token can be used.
+ - In the format C(YYYYMMddHHmmss).
+ - For example, C(20200121182022) will allow the token to be used until 21 January 2020 at 18:20:22.
+ type: str
+ vendor:
+ description: Token vendor name (informational only).
+ type: str
+ model:
+ description: Token model (informational only).
+ type: str
+ serial:
+ description: Token serial (informational only).
+ type: str
+ state:
+ description: State to ensure.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ algorithm:
+ description:
+ - Token hash algorithm.
+ - "B(Note:) Cannot be modified after OTP is created."
+ choices: ['sha1', 'sha256', 'sha384', 'sha512']
+ type: str
+ digits:
+ description:
+ - Number of digits each token code will have.
+ - "B(Note:) Cannot be modified after OTP is created."
+ choices: [ 6, 8 ]
+ type: int
+ offset:
+ description:
+ - TOTP token / IPA server time difference.
+ - "B(Note:) Cannot be modified after OTP is created."
+ type: int
+ interval:
+ description:
+ - Length of TOTP token code validity in seconds.
+ - "B(Note:) Cannot be modified after OTP is created."
+ type: int
+ counter:
+ description:
+ - Initial counter for the HOTP token.
+ - "B(Note:) Cannot be modified after OTP is created."
+ type: int
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+'''
+
+EXAMPLES = r'''
+- name: Create a totp for pinky, allowing the IPA server to generate using defaults
+ community.general.ipa_otptoken:
+ uniqueid: Token123
+ otptype: totp
+ owner: pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Create a 8 digit hotp for pinky with sha256 with specified validity times
+ community.general.ipa_otptoken:
+ uniqueid: Token123
+ enabled: true
+ otptype: hotp
+ digits: 8
+ secretkey: UMKSIER00zT2T2tWMUlTRmNlekRCbFQvWFBVZUh2dElHWGR6T3VUR3IzK2xjaFk9
+ algorithm: sha256
+ notbefore: 20180121182123
+ notafter: 20220121182123
+ owner: pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Update Token123 to indicate a vendor, model, serial number (info only), and description
+ community.general.ipa_otptoken:
+ uniqueid: Token123
+ vendor: Acme
+ model: acme101
+ serial: SerialNumber1
+ description: Acme OTP device
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Disable Token123
+ community.general.ipa_otptoken:
+ uniqueid: Token123
+ enabled: false
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Rename Token123 to TokenABC and enable it
+ community.general.ipa_otptoken:
+ uniqueid: Token123
+ newuniqueid: TokenABC
+ enabled: true
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+otptoken:
+ description: OTP Token as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, sanitize_keys
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class OTPTokenIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(OTPTokenIPAClient, self).__init__(module, host, port, protocol)
+
+ def otptoken_find(self, name):
+ return self._post_json(method='otptoken_find', name=None, item={'all': True,
+ 'ipatokenuniqueid': name,
+ 'timelimit': '0',
+ 'sizelimit': '0'})
+
+ def otptoken_add(self, name, item):
+ return self._post_json(method='otptoken_add', name=name, item=item)
+
+ def otptoken_mod(self, name, item):
+ return self._post_json(method='otptoken_mod', name=name, item=item)
+
+ def otptoken_del(self, name):
+ return self._post_json(method='otptoken_del', name=name)
+
+
+def base64_to_base32(base64_string):
+ """Converts base64 string to base32 string"""
+ b32_string = base64.b32encode(base64.b64decode(base64_string)).decode('ascii')
+ return b32_string
+
+
+def base32_to_base64(base32_string):
+ """Converts base32 string to base64 string"""
+ b64_string = base64.b64encode(base64.b32decode(base32_string)).decode('ascii')
+ return b64_string
+
+
+def get_otptoken_dict(ansible_to_ipa, uniqueid=None, newuniqueid=None, otptype=None, secretkey=None, description=None, owner=None,
+ enabled=None, notbefore=None, notafter=None, vendor=None,
+ model=None, serial=None, algorithm=None, digits=None, offset=None,
+ interval=None, counter=None):
+ """Create the dictionary of settings passed in"""
+
+ otptoken = {}
+ if uniqueid is not None:
+ otptoken[ansible_to_ipa['uniqueid']] = uniqueid
+ if newuniqueid is not None:
+ otptoken[ansible_to_ipa['newuniqueid']] = newuniqueid
+ if otptype is not None:
+ otptoken[ansible_to_ipa['otptype']] = otptype.upper()
+ if secretkey is not None:
+ # For some unknown reason, while IPA returns the secret in base64,
+ # it wants the secret passed in as base32. This makes it more difficult
+ # for comparison (does 'current' equal to 'new'). Moreover, this may
+ # cause some subtle issue in a playbook as the output is encoded
+ # in a different way than if it was passed in as a parameter. For
+ # these reasons, have the module standardize on base64 input (as parameter)
+ # and output (from IPA).
+ otptoken[ansible_to_ipa['secretkey']] = base64_to_base32(secretkey)
+ if description is not None:
+ otptoken[ansible_to_ipa['description']] = description
+ if owner is not None:
+ otptoken[ansible_to_ipa['owner']] = owner
+ if enabled is not None:
+ otptoken[ansible_to_ipa['enabled']] = 'FALSE' if enabled else 'TRUE'
+ if notbefore is not None:
+ otptoken[ansible_to_ipa['notbefore']] = notbefore + 'Z'
+ if notafter is not None:
+ otptoken[ansible_to_ipa['notafter']] = notafter + 'Z'
+ if vendor is not None:
+ otptoken[ansible_to_ipa['vendor']] = vendor
+ if model is not None:
+ otptoken[ansible_to_ipa['model']] = model
+ if serial is not None:
+ otptoken[ansible_to_ipa['serial']] = serial
+ if algorithm is not None:
+ otptoken[ansible_to_ipa['algorithm']] = algorithm
+ if digits is not None:
+ otptoken[ansible_to_ipa['digits']] = str(digits)
+ if offset is not None:
+ otptoken[ansible_to_ipa['offset']] = str(offset)
+ if interval is not None:
+ otptoken[ansible_to_ipa['interval']] = str(interval)
+ if counter is not None:
+ otptoken[ansible_to_ipa['counter']] = str(counter)
+
+ return otptoken
+
+
+def transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible):
+ """Transform the output received by IPA to a format more friendly
+ before it is returned to the user. IPA returns even simple
+ strings as a list of strings. It also returns bools and
+ int as string. This function cleans that up before return.
+ """
+ updated_otptoken = ipa_otptoken
+
+ # Used to hold values that will be sanitized from output as no_log.
+ # For the case where secretkey is not specified at the module, but
+ # is passed back from IPA.
+ sanitize_strings = set()
+
+ # Rename the IPA parameters to the more friendly ansible module names for them
+ for ipa_parameter in ipa_to_ansible:
+ if ipa_parameter in ipa_otptoken:
+ updated_otptoken[ipa_to_ansible[ipa_parameter]] = ipa_otptoken[ipa_parameter]
+ updated_otptoken.pop(ipa_parameter)
+
+ # Change the type from IPA's list of string to the appropriate return value type
+ # based on field. By default, assume they should be strings.
+ for ansible_parameter in ansible_to_ipa:
+ if ansible_parameter in updated_otptoken:
+ if isinstance(updated_otptoken[ansible_parameter], list) and len(updated_otptoken[ansible_parameter]) == 1:
+ if ansible_parameter in ['digits', 'offset', 'interval', 'counter']:
+ updated_otptoken[ansible_parameter] = int(updated_otptoken[ansible_parameter][0])
+ elif ansible_parameter == 'enabled':
+ updated_otptoken[ansible_parameter] = bool(updated_otptoken[ansible_parameter][0])
+ else:
+ updated_otptoken[ansible_parameter] = updated_otptoken[ansible_parameter][0]
+
+ if 'secretkey' in updated_otptoken:
+ if isinstance(updated_otptoken['secretkey'], dict):
+ if '__base64__' in updated_otptoken['secretkey']:
+ sanitize_strings.add(updated_otptoken['secretkey']['__base64__'])
+ b64key = updated_otptoken['secretkey']['__base64__']
+ updated_otptoken.pop('secretkey')
+ updated_otptoken['secretkey'] = b64key
+ sanitize_strings.add(b64key)
+ elif '__base32__' in updated_otptoken['secretkey']:
+ sanitize_strings.add(updated_otptoken['secretkey']['__base32__'])
+ b32key = updated_otptoken['secretkey']['__base32__']
+ b64key = base32_to_base64(b32key)
+ updated_otptoken.pop('secretkey')
+ updated_otptoken['secretkey'] = b64key
+ sanitize_strings.add(b32key)
+ sanitize_strings.add(b64key)
+
+ return updated_otptoken, sanitize_strings
+
+
+def validate_modifications(ansible_to_ipa, module, ipa_otptoken,
+ module_otptoken, unmodifiable_after_creation):
+ """Checks to see if the requested modifications are valid. Some elements
+ cannot be modified after initial creation. However, we still want to
+ validate arguments that are specified, but are not different than what
+ is currently set on the server.
+ """
+
+ modifications_valid = True
+
+ for parameter in unmodifiable_after_creation:
+ if ansible_to_ipa[parameter] in module_otptoken and ansible_to_ipa[parameter] in ipa_otptoken:
+ mod_value = module_otptoken[ansible_to_ipa[parameter]]
+
+ # For someone unknown reason, the returns from IPA put almost all
+ # values in a list, even though passing them in a list (even of
+ # length 1) will be rejected. The module values for all elements
+ # other than type (totp or hotp) have this happen.
+ if parameter == 'otptype':
+ ipa_value = ipa_otptoken[ansible_to_ipa[parameter]]
+ else:
+ if len(ipa_otptoken[ansible_to_ipa[parameter]]) != 1:
+ module.fail_json(msg=("Invariant fail: Return value from IPA is not a list " +
+ "of length 1. Please open a bug report for the module."))
+ if parameter == 'secretkey':
+ # We stored the secret key in base32 since we had assumed that would need to
+ # be the format if we were contacting IPA to create it. However, we are
+ # now comparing it against what is already set in the IPA server, so convert
+ # back to base64 for comparison.
+ mod_value = base32_to_base64(mod_value)
+
+ # For the secret key, it is even more specific in that the key is returned
+ # in a dict, in the list, as the __base64__ entry for the IPA response.
+ ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__']
+ if '__base64__' in ipa_otptoken[ansible_to_ipa[parameter]][0]:
+ ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base64__']
+ elif '__base32__' in ipa_otptoken[ansible_to_ipa[parameter]][0]:
+ b32key = ipa_otptoken[ansible_to_ipa[parameter]][0]['__base32__']
+ b64key = base32_to_base64(b32key)
+ ipa_value = b64key
+ else:
+ ipa_value = None
+ else:
+ ipa_value = ipa_otptoken[ansible_to_ipa[parameter]][0]
+
+ if mod_value != ipa_value:
+ modifications_valid = False
+ fail_message = ("Parameter '" + parameter + "' cannot be changed once " +
+ "the OTP is created and the requested value specified here (" +
+ str(mod_value) +
+ ") differs from what is set in the IPA server ("
+ + str(ipa_value) + ")")
+ module.fail_json(msg=fail_message)
+
+ return modifications_valid
+
+
+def ensure(module, client):
+ # dict to map from ansible parameter names to attribute names
+ # used by IPA (which are not so friendly).
+ ansible_to_ipa = {'uniqueid': 'ipatokenuniqueid',
+ 'newuniqueid': 'rename',
+ 'otptype': 'type',
+ 'secretkey': 'ipatokenotpkey',
+ 'description': 'description',
+ 'owner': 'ipatokenowner',
+ 'enabled': 'ipatokendisabled',
+ 'notbefore': 'ipatokennotbefore',
+ 'notafter': 'ipatokennotafter',
+ 'vendor': 'ipatokenvendor',
+ 'model': 'ipatokenmodel',
+ 'serial': 'ipatokenserial',
+ 'algorithm': 'ipatokenotpalgorithm',
+ 'digits': 'ipatokenotpdigits',
+ 'offset': 'ipatokentotpclockoffset',
+ 'interval': 'ipatokentotptimestep',
+ 'counter': 'ipatokenhotpcounter'}
+
+ # Create inverse dictionary for mapping return values
+ ipa_to_ansible = {}
+ for (k, v) in ansible_to_ipa.items():
+ ipa_to_ansible[v] = k
+
+ unmodifiable_after_creation = ['otptype', 'secretkey', 'algorithm',
+ 'digits', 'offset', 'interval', 'counter']
+ state = module.params['state']
+ uniqueid = module.params['uniqueid']
+
+ module_otptoken = get_otptoken_dict(ansible_to_ipa=ansible_to_ipa,
+ uniqueid=module.params.get('uniqueid'),
+ newuniqueid=module.params.get('newuniqueid'),
+ otptype=module.params.get('otptype'),
+ secretkey=module.params.get('secretkey'),
+ description=module.params.get('description'),
+ owner=module.params.get('owner'),
+ enabled=module.params.get('enabled'),
+ notbefore=module.params.get('notbefore'),
+ notafter=module.params.get('notafter'),
+ vendor=module.params.get('vendor'),
+ model=module.params.get('model'),
+ serial=module.params.get('serial'),
+ algorithm=module.params.get('algorithm'),
+ digits=module.params.get('digits'),
+ offset=module.params.get('offset'),
+ interval=module.params.get('interval'),
+ counter=module.params.get('counter'))
+
+ ipa_otptoken = client.otptoken_find(name=uniqueid)
+
+ if ansible_to_ipa['newuniqueid'] in module_otptoken:
+ # Check to see if the new unique id is already taken in use
+ ipa_otptoken_new = client.otptoken_find(name=module_otptoken[ansible_to_ipa['newuniqueid']])
+ if ipa_otptoken_new:
+ module.fail_json(msg=("Requested rename through newuniqueid to " +
+ module_otptoken[ansible_to_ipa['newuniqueid']] +
+ " failed because the new unique id is already in use"))
+
+ changed = False
+ if state == 'present':
+ if not ipa_otptoken:
+ changed = True
+ if not module.check_mode:
+ # It would not make sense to have a rename after creation, so if the user
+ # specified a newuniqueid, just replace the uniqueid with the updated one
+ # before creation
+ if ansible_to_ipa['newuniqueid'] in module_otptoken:
+ module_otptoken[ansible_to_ipa['uniqueid']] = module_otptoken[ansible_to_ipa['newuniqueid']]
+ uniqueid = module_otptoken[ansible_to_ipa['newuniqueid']]
+ module_otptoken.pop(ansible_to_ipa['newuniqueid'])
+
+ # IPA wants the unique id in the first position and not as a key/value pair.
+ # Get rid of it from the otptoken dict and just specify it in the name field
+ # for otptoken_add.
+ if ansible_to_ipa['uniqueid'] in module_otptoken:
+ module_otptoken.pop(ansible_to_ipa['uniqueid'])
+
+ module_otptoken['all'] = True
+ ipa_otptoken = client.otptoken_add(name=uniqueid, item=module_otptoken)
+ else:
+ if not validate_modifications(ansible_to_ipa, module, ipa_otptoken,
+ module_otptoken, unmodifiable_after_creation):
+ module.fail_json(msg="Modifications requested in module are not valid")
+
+ # IPA will reject 'modifications' that do not actually modify anything
+ # if any of the unmodifiable elements are specified. Explicitly
+ # get rid of them here. They were not different or else the
+ # we would have failed out in validate_modifications.
+ for x in unmodifiable_after_creation:
+ if ansible_to_ipa[x] in module_otptoken:
+ module_otptoken.pop(ansible_to_ipa[x])
+
+ diff = client.get_diff(ipa_data=ipa_otptoken, module_data=module_otptoken)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+
+ # IPA wants the unique id in the first position and not as a key/value pair.
+ # Get rid of it from the otptoken dict and just specify it in the name field
+ # for otptoken_mod.
+ if ansible_to_ipa['uniqueid'] in module_otptoken:
+ module_otptoken.pop(ansible_to_ipa['uniqueid'])
+
+ module_otptoken['all'] = True
+ ipa_otptoken = client.otptoken_mod(name=uniqueid, item=module_otptoken)
+ else:
+ if ipa_otptoken:
+ changed = True
+ if not module.check_mode:
+ client.otptoken_del(name=uniqueid)
+
+ # Transform the output to use ansible keywords (not the IPA keywords) and
+ # sanitize any key values in the output.
+ ipa_otptoken, sanitize_strings = transform_output(ipa_otptoken, ansible_to_ipa, ipa_to_ansible)
+ module.no_log_values = module.no_log_values.union(sanitize_strings)
+ sanitized_otptoken = sanitize_keys(obj=ipa_otptoken, no_log_strings=module.no_log_values)
+ return changed, sanitized_otptoken
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(uniqueid=dict(type='str', aliases=['name'], required=True),
+ newuniqueid=dict(type='str'),
+ otptype=dict(type='str', choices=['totp', 'hotp']),
+ secretkey=dict(type='str', no_log=True),
+ description=dict(type='str'),
+ owner=dict(type='str'),
+ enabled=dict(type='bool', default=True),
+ notbefore=dict(type='str'),
+ notafter=dict(type='str'),
+ vendor=dict(type='str'),
+ model=dict(type='str'),
+ serial=dict(type='str'),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ algorithm=dict(type='str', choices=['sha1', 'sha256', 'sha384', 'sha512']),
+ digits=dict(type='int', choices=[6, 8]),
+ offset=dict(type='int'),
+ interval=dict(type='int'),
+ counter=dict(type='int'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = OTPTokenIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, otptoken = ensure(module, client)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, otptoken=otptoken)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py
new file mode 100644
index 000000000..6a6c4318b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_pwpolicy.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_pwpolicy
+author: Adralioh (@adralioh)
+short_description: Manage FreeIPA password policies
+description:
+- Add, modify, or delete a password policy using the IPA API.
+version_added: 2.0.0
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ group:
+ description:
+ - Name of the group that the policy applies to.
+ - If omitted, the global policy is used.
+ aliases: ["name"]
+ type: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ maxpwdlife:
+ description: Maximum password lifetime (in days).
+ type: str
+ minpwdlife:
+ description: Minimum password lifetime (in hours).
+ type: str
+ historylength:
+ description:
+ - Number of previous passwords that are remembered.
+ - Users cannot reuse remembered passwords.
+ type: str
+ minclasses:
+ description: Minimum number of character classes.
+ type: str
+ minlength:
+ description: Minimum password length.
+ type: str
+ priority:
+ description:
+ - Priority of the policy.
+ - High number means lower priority.
+ - Required when C(cn) is not the global policy.
+ type: str
+ maxfailcount:
+ description: Maximum number of consecutive failures before lockout.
+ type: str
+ failinterval:
+ description: Period (in seconds) after which the number of failed login attempts is reset.
+ type: str
+ lockouttime:
+ description: Period (in seconds) for which users are locked out.
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+'''
+
+EXAMPLES = r'''
+- name: Modify the global password policy
+ community.general.ipa_pwpolicy:
+ maxpwdlife: '90'
+ minpwdlife: '1'
+ historylength: '8'
+ minclasses: '3'
+ minlength: '16'
+ maxfailcount: '6'
+ failinterval: '60'
+ lockouttime: '600'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure the password policy for the group admins is present
+ community.general.ipa_pwpolicy:
+ group: admins
+ state: present
+ maxpwdlife: '60'
+ minpwdlife: '24'
+ historylength: '16'
+ minclasses: '4'
+ priority: '10'
+ maxfailcount: '4'
+ failinterval: '600'
+ lockouttime: '1200'
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure that the group sysops does not have a unique password policy
+ community.general.ipa_pwpolicy:
+ group: sysops
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+pwpolicy:
+ description: Password policy as returned by IPA API.
+ returned: always
+ type: dict
+ sample:
+ cn: ['admins']
+ cospriority: ['10']
+ dn: 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com'
+ krbmaxpwdlife: ['60']
+ krbminpwdlife: ['24']
+ krbpwdfailurecountinterval: ['600']
+ krbpwdhistorylength: ['16']
+ krbpwdlockoutduration: ['1200']
+ krbpwdmaxfailure: ['4']
+ krbpwdmindiffchars: ['4']
+ objectclass: ['top', 'nscontainer', 'krbpwdpolicy']
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class PwPolicyIPAClient(IPAClient):
+ '''The global policy will be selected when `name` is `None`'''
+ def __init__(self, module, host, port, protocol):
+ super(PwPolicyIPAClient, self).__init__(module, host, port, protocol)
+
+ def pwpolicy_find(self, name):
+ if name is None:
+ # Manually set the cn to the global policy because pwpolicy_find will return a random
+ # different policy if cn is `None`
+ name = 'global_policy'
+ return self._post_json(method='pwpolicy_find', name=None, item={'all': True, 'cn': name})
+
+ def pwpolicy_add(self, name, item):
+ return self._post_json(method='pwpolicy_add', name=name, item=item)
+
+ def pwpolicy_mod(self, name, item):
+ return self._post_json(method='pwpolicy_mod', name=name, item=item)
+
+ def pwpolicy_del(self, name):
+ return self._post_json(method='pwpolicy_del', name=name)
+
+
+def get_pwpolicy_dict(maxpwdlife=None, minpwdlife=None, historylength=None, minclasses=None,
+ minlength=None, priority=None, maxfailcount=None, failinterval=None,
+ lockouttime=None):
+ pwpolicy = {}
+ if maxpwdlife is not None:
+ pwpolicy['krbmaxpwdlife'] = maxpwdlife
+ if minpwdlife is not None:
+ pwpolicy['krbminpwdlife'] = minpwdlife
+ if historylength is not None:
+ pwpolicy['krbpwdhistorylength'] = historylength
+ if minclasses is not None:
+ pwpolicy['krbpwdmindiffchars'] = minclasses
+ if minlength is not None:
+ pwpolicy['krbpwdminlength'] = minlength
+ if priority is not None:
+ pwpolicy['cospriority'] = priority
+ if maxfailcount is not None:
+ pwpolicy['krbpwdmaxfailure'] = maxfailcount
+ if failinterval is not None:
+ pwpolicy['krbpwdfailurecountinterval'] = failinterval
+ if lockouttime is not None:
+ pwpolicy['krbpwdlockoutduration'] = lockouttime
+
+ return pwpolicy
+
+
+def get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy):
+ return client.get_diff(ipa_data=ipa_pwpolicy, module_data=module_pwpolicy)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['group']
+
+ module_pwpolicy = get_pwpolicy_dict(maxpwdlife=module.params.get('maxpwdlife'),
+ minpwdlife=module.params.get('minpwdlife'),
+ historylength=module.params.get('historylength'),
+ minclasses=module.params.get('minclasses'),
+ minlength=module.params.get('minlength'),
+ priority=module.params.get('priority'),
+ maxfailcount=module.params.get('maxfailcount'),
+ failinterval=module.params.get('failinterval'),
+ lockouttime=module.params.get('lockouttime'))
+
+ ipa_pwpolicy = client.pwpolicy_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_pwpolicy:
+ changed = True
+ if not module.check_mode:
+ ipa_pwpolicy = client.pwpolicy_add(name=name, item=module_pwpolicy)
+ else:
+ diff = get_pwpolicy_diff(client, ipa_pwpolicy, module_pwpolicy)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_pwpolicy = client.pwpolicy_mod(name=name, item=module_pwpolicy)
+ else:
+ if ipa_pwpolicy:
+ changed = True
+ if not module.check_mode:
+ client.pwpolicy_del(name=name)
+
+ return changed, ipa_pwpolicy
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(group=dict(type='str', aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ maxpwdlife=dict(type='str'),
+ minpwdlife=dict(type='str'),
+ historylength=dict(type='str'),
+ minclasses=dict(type='str'),
+ minlength=dict(type='str'),
+ priority=dict(type='str'),
+ maxfailcount=dict(type='str'),
+ failinterval=dict(type='str'),
+ lockouttime=dict(type='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = PwPolicyIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, pwpolicy = ensure(module, client)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, pwpolicy=pwpolicy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_role.py b/ansible_collections/community/general/plugins/modules/ipa_role.py
new file mode 100644
index 000000000..fce315b66
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_role.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_role
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA role
+description:
+- Add, modify and delete a role within FreeIPA server using FreeIPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ cn:
+ description:
+ - Role name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ description:
+ description:
+ - A description of this role-group.
+ type: str
+ group:
+ description:
+ - List of group names assign to this role.
+ - If an empty list is passed all assigned groups will be unassigned from the role.
+ - If option is omitted groups will not be checked or changed.
+ - If option is passed all assigned groups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ host:
+ description:
+ - List of host names to assign.
+ - If an empty list is passed all assigned hosts will be unassigned from the role.
+ - If option is omitted hosts will not be checked or changed.
+ - If option is passed all assigned hosts that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ hostgroup:
+ description:
+ - List of host group names to assign.
+ - If an empty list is passed all assigned host groups will be removed from the role.
+ - If option is omitted host groups will not be checked or changed.
+ - If option is passed all assigned hostgroups that are not passed will be unassigned from the role.
+ type: list
+ elements: str
+ privilege:
+ description:
+ - List of privileges granted to the role.
+ - If an empty list is passed all assigned privileges will be removed.
+ - If option is omitted privileges will not be checked or changed.
+ - If option is passed all assigned privileges that are not passed will be removed.
+ type: list
+ elements: str
+ service:
+ description:
+ - List of service names to assign.
+ - If an empty list is passed all assigned services will be removed from the role.
+ - If option is omitted services will not be checked or changed.
+ - If option is passed all assigned services that are not passed will be removed from the role.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ user:
+ description:
+ - List of user names to assign.
+ - If an empty list is passed all assigned users will be removed from the role.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure role is present
+ community.general.ipa_role:
+ name: dba
+ description: Database Administrators
+ state: present
+ user:
+ - pinky
+ - brain
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure role with certain details
+ community.general.ipa_role:
+ name: another-role
+ description: Just another role
+ group:
+ - editors
+ host:
+ - host01.example.com
+ hostgroup:
+ - hostgroup01
+ privilege:
+ - Group Administrators
+ - User Administrators
+ service:
+ - service01
+
+- name: Ensure role is absent
+ community.general.ipa_role:
+ name: dba
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+role:
+ description: Role as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class RoleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(RoleIPAClient, self).__init__(module, host, port, protocol)
+
+ def role_find(self, name):
+ return self._post_json(method='role_find', name=None, item={'all': True, 'cn': name})
+
+ def role_add(self, name, item):
+ return self._post_json(method='role_add', name=name, item=item)
+
+ def role_mod(self, name, item):
+ return self._post_json(method='role_mod', name=name, item=item)
+
+ def role_del(self, name):
+ return self._post_json(method='role_del', name=name)
+
+ def role_add_member(self, name, item):
+ return self._post_json(method='role_add_member', name=name, item=item)
+
+ def role_add_group(self, name, item):
+ return self.role_add_member(name=name, item={'group': item})
+
+ def role_add_host(self, name, item):
+ return self.role_add_member(name=name, item={'host': item})
+
+ def role_add_hostgroup(self, name, item):
+ return self.role_add_member(name=name, item={'hostgroup': item})
+
+ def role_add_service(self, name, item):
+ return self.role_add_member(name=name, item={'service': item})
+
+ def role_add_user(self, name, item):
+ return self.role_add_member(name=name, item={'user': item})
+
+ def role_remove_member(self, name, item):
+ return self._post_json(method='role_remove_member', name=name, item=item)
+
+ def role_remove_group(self, name, item):
+ return self.role_remove_member(name=name, item={'group': item})
+
+ def role_remove_host(self, name, item):
+ return self.role_remove_member(name=name, item={'host': item})
+
+ def role_remove_hostgroup(self, name, item):
+ return self.role_remove_member(name=name, item={'hostgroup': item})
+
+ def role_remove_service(self, name, item):
+ return self.role_remove_member(name=name, item={'service': item})
+
+ def role_remove_user(self, name, item):
+ return self.role_remove_member(name=name, item={'user': item})
+
+ def role_add_privilege(self, name, item):
+ return self._post_json(method='role_add_privilege', name=name, item={'privilege': item})
+
+ def role_remove_privilege(self, name, item):
+ return self._post_json(method='role_remove_privilege', name=name, item={'privilege': item})
+
+
+def get_role_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_role_diff(client, ipa_role, module_role):
+ return client.get_diff(ipa_data=ipa_role, module_data=module_role)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ group = module.params['group']
+ host = module.params['host']
+ hostgroup = module.params['hostgroup']
+ privilege = module.params['privilege']
+ service = module.params['service']
+ user = module.params['user']
+
+ module_role = get_role_dict(description=module.params['description'])
+ ipa_role = client.role_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_role:
+ changed = True
+ if not module.check_mode:
+ ipa_role = client.role_add(name=name, item=module_role)
+ else:
+ diff = get_role_diff(client, ipa_role, module_role)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_role.get(key)
+ client.role_mod(name=name, item=data)
+
+ if group is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_group', []), group,
+ client.role_add_group,
+ client.role_remove_group) or changed
+ if host is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_host', []), host,
+ client.role_add_host,
+ client.role_remove_host) or changed
+
+ if hostgroup is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_hostgroup', []), hostgroup,
+ client.role_add_hostgroup,
+ client.role_remove_hostgroup) or changed
+
+ if privilege is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('memberof_privilege', []), privilege,
+ client.role_add_privilege,
+ client.role_remove_privilege) or changed
+ if service is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_service', []), service,
+ client.role_add_service,
+ client.role_remove_service) or changed
+ if user is not None:
+ changed = client.modify_if_diff(name, ipa_role.get('member_user', []), user,
+ client.role_add_user,
+ client.role_remove_user) or changed
+
+ else:
+ if ipa_role:
+ changed = True
+ if not module.check_mode:
+ client.role_del(name)
+
+ return changed, client.role_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ group=dict(type='list', elements='str'),
+ host=dict(type='list', elements='str'),
+ hostgroup=dict(type='list', elements='str'),
+ privilege=dict(type='list', elements='str'),
+ service=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ user=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = RoleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, role = ensure(module, client)
+ module.exit_json(changed=changed, role=role)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_service.py b/ansible_collections/community/general/plugins/modules/ipa_service.py
new file mode 100644
index 000000000..d9541674f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_service.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_service
+author: Cédric Parent (@cprh)
+short_description: Manage FreeIPA service
+description:
+- Add and delete an IPA service using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ krbcanonicalname:
+ description:
+ - Principal of the service.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ hosts:
+ description:
+ - Defines the list of 'ManagedBy' hosts.
+ required: false
+ type: list
+ elements: str
+ force:
+ description:
+ - Force principal name even if host is not in DNS.
+ required: false
+ type: bool
+ skip_host_check:
+ description:
+ - Force service to be created even when host object does not exist to manage it.
+ - This is only used on creation, not for updating existing services.
+ required: false
+ type: bool
+ default: false
+ version_added: 4.7.0
+ state:
+ description: State to ensure.
+ required: false
+ default: present
+ choices: ["absent", "present"]
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure service is present
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: present
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure service is absent
+ community.general.ipa_service:
+ name: http/host01.example.com
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Changing Managing hosts list
+ community.general.ipa_service:
+ name: http/host01.example.com
+ hosts:
+ - host01.example.com
+ - host02.example.com
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+service:
+ description: Service as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class ServiceIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(ServiceIPAClient, self).__init__(module, host, port, protocol)
+
+ def service_find(self, name):
+ return self._post_json(method='service_find', name=None, item={'all': True, 'krbcanonicalname': name})
+
+ def service_add(self, name, service):
+ return self._post_json(method='service_add', name=name, item=service)
+
+ def service_mod(self, name, service):
+ return self._post_json(method='service_mod', name=name, item=service)
+
+ def service_del(self, name):
+ return self._post_json(method='service_del', name=name)
+
+ def service_disable(self, name):
+ return self._post_json(method='service_disable', name=name)
+
+ def service_add_host(self, name, item):
+ return self._post_json(method='service_add_host', name=name, item={'host': item})
+
+ def service_remove_host(self, name, item):
+ return self._post_json(method='service_remove_host', name=name, item={'host': item})
+
+
+def get_service_dict(force=None, krbcanonicalname=None, skip_host_check=None):
+ data = {}
+ if force is not None:
+ data['force'] = force
+ if krbcanonicalname is not None:
+ data['krbcanonicalname'] = krbcanonicalname
+ if skip_host_check is not None:
+ data['skip_host_check'] = skip_host_check
+ return data
+
+
+def get_service_diff(client, ipa_host, module_service):
+ non_updateable_keys = ['force', 'krbcanonicalname', 'skip_host_check']
+ for key in non_updateable_keys:
+ if key in module_service:
+ del module_service[key]
+
+ return client.get_diff(ipa_data=ipa_host, module_data=module_service)
+
+
+def ensure(module, client):
+ name = module.params['krbcanonicalname']
+ state = module.params['state']
+ hosts = module.params['hosts']
+
+ ipa_service = client.service_find(name=name)
+ module_service = get_service_dict(force=module.params['force'], skip_host_check=module.params['skip_host_check'])
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_add(name=name, service=module_service)
+ else:
+ diff = get_service_diff(client, ipa_service, module_service)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_service.get(key)
+ client.service_mod(name=name, service=data)
+ if hosts is not None:
+ if 'managedby_host' in ipa_service:
+ for host in ipa_service['managedby_host']:
+ if host not in hosts:
+ if not module.check_mode:
+ client.service_remove_host(name=name, item=host)
+ changed = True
+ for host in hosts:
+ if host not in ipa_service['managedby_host']:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+ else:
+ for host in hosts:
+ if not module.check_mode:
+ client.service_add_host(name=name, item=host)
+ changed = True
+
+ else:
+ if ipa_service:
+ changed = True
+ if not module.check_mode:
+ client.service_del(name=name)
+
+ return changed, client.service_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(
+ krbcanonicalname=dict(type='str', required=True, aliases=['name']),
+ force=dict(type='bool', required=False),
+ skip_host_check=dict(type='bool', default=False, required=False),
+ hosts=dict(type='list', required=False, elements='str'),
+ state=dict(type='str', required=False, default='present',
+ choices=['present', 'absent']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = ServiceIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, host = ensure(module, client)
+ module.exit_json(changed=changed, host=host)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_subca.py b/ansible_collections/community/general/plugins/modules/ipa_subca.py
new file mode 100644
index 000000000..882b1ac39
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_subca.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_subca
+author: Abhijeet Kasurde (@Akasurde)
+short_description: Manage FreeIPA Lightweight Sub Certificate Authorities
+description:
+- Add, modify, enable, disable and delete an IPA Lightweight Sub Certificate Authorities using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ subca_name:
+ description:
+ - The Sub Certificate Authority name which needs to be managed.
+ required: true
+ aliases: ["name"]
+ type: str
+ subca_subject:
+ description:
+ - The Sub Certificate Authority's Subject. e.g., 'CN=SampleSubCA1,O=testrelm.test'.
+ required: true
+ type: str
+ subca_desc:
+ description:
+ - The Sub Certificate Authority's description.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ - State 'disable' and 'enable' is available for FreeIPA 4.4.2 version and onwards.
+ required: false
+ default: present
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Ensure IPA Sub CA is present
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: present
+ subca_name: AnsibleSubCA1
+ subca_subject: 'CN=AnsibleSubCA1,O=example.com'
+ subca_desc: Ansible Sub CA
+
+- name: Ensure that IPA Sub CA is removed
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: absent
+ subca_name: AnsibleSubCA1
+
+- name: Ensure that IPA Sub CA is disabled
+ community.general.ipa_subca:
+ ipa_host: spider.example.com
+ ipa_pass: Passw0rd!
+ state: disable
+ subca_name: AnsibleSubCA1
+'''
+
+RETURN = r'''
+subca:
+ description: IPA Sub CA record as returned by IPA API.
+ returned: always
+ type: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+class SubCAIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SubCAIPAClient, self).__init__(module, host, port, protocol)
+
+ def subca_find(self, subca_name):
+ return self._post_json(method='ca_find', name=subca_name, item=None)
+
+ def subca_add(self, subca_name=None, subject_dn=None, details=None):
+ item = dict(ipacasubjectdn=subject_dn)
+ subca_desc = details.get('description', None)
+ if subca_desc is not None:
+ item.update(description=subca_desc)
+ return self._post_json(method='ca_add', name=subca_name, item=item)
+
+ def subca_mod(self, subca_name=None, diff=None, details=None):
+ item = get_subca_dict(details)
+ for change in diff:
+ update_detail = dict()
+ if item[change] is not None:
+ update_detail.update(setattr="{0}={1}".format(change, item[change]))
+ self._post_json(method='ca_mod', name=subca_name, item=update_detail)
+
+ def subca_del(self, subca_name=None):
+ return self._post_json(method='ca_del', name=subca_name)
+
+ def subca_disable(self, subca_name=None):
+ return self._post_json(method='ca_disable', name=subca_name)
+
+ def subca_enable(self, subca_name=None):
+ return self._post_json(method='ca_enable', name=subca_name)
+
+
+def get_subca_dict(details=None):
+ module_subca = dict()
+ if details['description'] is not None:
+ module_subca['description'] = details['description']
+ if details['subca_subject'] is not None:
+ module_subca['ipacasubjectdn'] = details['subca_subject']
+ return module_subca
+
+
+def get_subca_diff(client, ipa_subca, module_subca):
+ details = get_subca_dict(module_subca)
+ return client.get_diff(ipa_data=ipa_subca, module_data=details)
+
+
+def ensure(module, client):
+ subca_name = module.params['subca_name']
+ subca_subject_dn = module.params['subca_subject']
+ subca_desc = module.params['subca_desc']
+
+ state = module.params['state']
+
+ ipa_subca = client.subca_find(subca_name)
+ module_subca = dict(description=subca_desc,
+ subca_subject=subca_subject_dn)
+
+ changed = False
+ if state == 'present':
+ if not ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_add(subca_name=subca_name, subject_dn=subca_subject_dn, details=module_subca)
+ else:
+ diff = get_subca_diff(client, ipa_subca, module_subca)
+ # IPA does not allow to modify Sub CA's subject DN
+ # So skip it for now.
+ if 'ipacasubjectdn' in diff:
+ diff.remove('ipacasubjectdn')
+ del module_subca['subca_subject']
+
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ client.subca_mod(subca_name=subca_name, diff=diff, details=module_subca)
+ elif state == 'absent':
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_del(subca_name=subca_name)
+ elif state == 'disable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA disable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_disable(subca_name=subca_name)
+ elif state == 'enable':
+ ipa_version = client.get_ipa_version()
+ if LooseVersion(ipa_version) < LooseVersion('4.4.2'):
+ module.fail_json(msg="Current version of IPA server [%s] does not support 'CA enable' option. Please upgrade to "
+ "version greater than 4.4.2")
+ if ipa_subca:
+ changed = True
+ if not module.check_mode:
+ client.subca_enable(subca_name=subca_name)
+
+ return changed, client.subca_find(subca_name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(subca_name=dict(type='str', required=True, aliases=['name']),
+ subca_subject=dict(type='str', required=True),
+ subca_desc=dict(type='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,)
+
+ client = SubCAIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, record = ensure(module, client)
+ module.exit_json(changed=changed, record=record)
+ except Exception as exc:
+ module.fail_json(msg=to_native(exc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py b/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py
new file mode 100644
index 000000000..d3139ba1c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_sudocmd.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmd
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command
+description:
+- Add, modify or delete sudo command within FreeIPA server using FreeIPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ sudocmd:
+ description:
+ - Sudo command.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - A description of this command.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command exists
+ community.general.ipa_sudocmd:
+ name: su
+ description: Allow to run su via sudo
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command does not exist
+ community.general.ipa_sudocmd:
+ name: su
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmd:
+ description: Sudo command as return from IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class SudoCmdIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmd_find(self, name):
+ return self._post_json(method='sudocmd_find', name=None, item={'all': True, 'sudocmd': name})
+
+ def sudocmd_add(self, name, item):
+ return self._post_json(method='sudocmd_add', name=name, item=item)
+
+ def sudocmd_mod(self, name, item):
+ return self._post_json(method='sudocmd_mod', name=name, item=item)
+
+ def sudocmd_del(self, name):
+ return self._post_json(method='sudocmd_del', name=name)
+
+
+def get_sudocmd_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd):
+ return client.get_diff(ipa_data=ipa_sudocmd, module_data=module_sudocmd)
+
+
+def ensure(module, client):
+ name = module.params['sudocmd']
+ state = module.params['state']
+
+ module_sudocmd = get_sudocmd_dict(description=module.params['description'])
+ ipa_sudocmd = client.sudocmd_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_add(name=name, item=module_sudocmd)
+ else:
+ diff = get_sudocmd_diff(client, ipa_sudocmd, module_sudocmd)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmd.get(key)
+ client.sudocmd_mod(name=name, item=data)
+ else:
+ if ipa_sudocmd:
+ changed = True
+ if not module.check_mode:
+ client.sudocmd_del(name=name)
+
+ return changed, client.sudocmd_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='str', required=True, aliases=['name']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmd = ensure(module, client)
+ module.exit_json(changed=changed, sudocmd=sudocmd)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py b/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py
new file mode 100644
index 000000000..a768e74a1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_sudocmdgroup.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudocmdgroup
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo command group
+description:
+- Add, modify or delete sudo command group within IPA server using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ cn:
+ description:
+ - Sudo Command Group.
+ aliases: ['name']
+ required: true
+ type: str
+ description:
+ description:
+ - Group description.
+ type: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+ sudocmd:
+ description:
+ - List of sudo commands to assign to the group.
+ - If an empty list is passed all assigned commands will be removed from the group.
+ - If option is omitted sudo commands will not be checked or changed.
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo command group exists
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ description: Group of important commands
+ sudocmd:
+ - su
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure sudo command group does not exist
+ community.general.ipa_sudocmdgroup:
+ name: group01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudocmdgroup:
+ description: Sudo command group as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class SudoCmdGroupIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoCmdGroupIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudocmdgroup_find(self, name):
+ return self._post_json(method='sudocmdgroup_find', name=None, item={'all': True, 'cn': name})
+
+ def sudocmdgroup_add(self, name, item):
+ return self._post_json(method='sudocmdgroup_add', name=name, item=item)
+
+ def sudocmdgroup_mod(self, name, item):
+ return self._post_json(method='sudocmdgroup_mod', name=name, item=item)
+
+ def sudocmdgroup_del(self, name):
+ return self._post_json(method='sudocmdgroup_del', name=name)
+
+ def sudocmdgroup_add_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_add_member', name=name, item=item)
+
+ def sudocmdgroup_add_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_add_member(name=name, item={'sudocmd': item})
+
+ def sudocmdgroup_remove_member(self, name, item):
+ return self._post_json(method='sudocmdgroup_remove_member', name=name, item=item)
+
+ def sudocmdgroup_remove_member_sudocmd(self, name, item):
+ return self.sudocmdgroup_remove_member(name=name, item={'sudocmd': item})
+
+
+def get_sudocmdgroup_dict(description=None):
+ data = {}
+ if description is not None:
+ data['description'] = description
+ return data
+
+
+def get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup):
+ return client.get_diff(ipa_data=ipa_sudocmdgroup, module_data=module_sudocmdgroup)
+
+
+def ensure(module, client):
+ name = module.params['cn']
+ state = module.params['state']
+ sudocmd = module.params['sudocmd']
+
+ module_sudocmdgroup = get_sudocmdgroup_dict(description=module.params['description'])
+ ipa_sudocmdgroup = client.sudocmdgroup_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ ipa_sudocmdgroup = client.sudocmdgroup_add(name=name, item=module_sudocmdgroup)
+ else:
+ diff = get_sudocmdgroup_diff(client, ipa_sudocmdgroup, module_sudocmdgroup)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_sudocmdgroup.get(key)
+ client.sudocmdgroup_mod(name=name, item=data)
+
+ if sudocmd is not None:
+ changed = client.modify_if_diff(name, ipa_sudocmdgroup.get('member_sudocmd', []), sudocmd,
+ client.sudocmdgroup_add_member_sudocmd,
+ client.sudocmdgroup_remove_member_sudocmd)
+ else:
+ if ipa_sudocmdgroup:
+ changed = True
+ if not module.check_mode:
+ client.sudocmdgroup_del(name=name)
+
+ return changed, client.sudocmdgroup_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ sudocmd=dict(type='list', elements='str'))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = SudoCmdGroupIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudocmdgroup = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudocmdgroup)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_sudorule.py b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
new file mode 100644
index 000000000..59b4eb19e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_sudorule.py
@@ -0,0 +1,471 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_sudorule
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA sudo rule
+description:
+- Add, modify or delete sudo rule within IPA server using IPA API.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ cn:
+ description:
+ - Canonical name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ['name']
+ type: str
+ cmdcategory:
+ description:
+ - Command category the rule applies to.
+ choices: ['all']
+ type: str
+ cmd:
+ description:
+ - List of commands assigned to the rule.
+ - If an empty list is passed all commands will be removed from the rule.
+ - If option is omitted commands will not be checked or changed.
+ type: list
+ elements: str
+ cmdgroup:
+ description:
+ - List of command groups assigned to the rule.
+ - If an empty list is passed all command groups will be removed from the rule.
+ - If option is omitted command groups will not be checked or changed.
+ type: list
+ elements: str
+ version_added: 2.0.0
+ description:
+ description:
+ - Description of the sudo rule.
+ type: str
+ host:
+ description:
+ - List of hosts assigned to the rule.
+ - If an empty list is passed all hosts will be removed from the rule.
+ - If option is omitted hosts will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign hosts.
+ type: list
+ elements: str
+ hostcategory:
+ description:
+ - Host category the rule applies to.
+ - If 'all' is passed one must omit C(host) and C(hostgroup).
+ - Option C(host) and C(hostgroup) must be omitted to assign 'all'.
+ choices: ['all']
+ type: str
+ hostgroup:
+ description:
+ - List of host groups assigned to the rule.
+ - If an empty list is passed all host groups will be removed from the rule.
+ - If option is omitted host groups will not be checked or changed.
+ - Option C(hostcategory) must be omitted to assign host groups.
+ type: list
+ elements: str
+ runasextusers:
+ description:
+ - List of external RunAs users
+ type: list
+ elements: str
+ version_added: 2.3.0
+ runasusercategory:
+ description:
+ - RunAs User category the rule applies to.
+ choices: ['all']
+ type: str
+ runasgroupcategory:
+ description:
+ - RunAs Group category the rule applies to.
+ choices: ['all']
+ type: str
+ sudoopt:
+ description:
+ - List of options to add to the sudo rule.
+ type: list
+ elements: str
+ user:
+ description:
+ - List of users assigned to the rule.
+ - If an empty list is passed all users will be removed from the rule.
+ - If option is omitted users will not be checked or changed.
+ type: list
+ elements: str
+ usercategory:
+ description:
+ - User category the rule applies to.
+ choices: ['all']
+ type: str
+ usergroup:
+ description:
+ - List of user groups assigned to the rule.
+ - If an empty list is passed all user groups will be removed from the rule.
+ - If option is omitted user groups will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: present
+ choices: ['absent', 'disabled', 'enabled', 'present']
+ type: str
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure sudo rule is present that's allows all every body to execute any command on any host without being asked for a password.
+ community.general.ipa_sudorule:
+ name: sudo_all_nopasswd
+ cmdcategory: all
+ description: Allow to run every command with sudo without password
+ hostcategory: all
+ sudoopt:
+ - '!authenticate'
+ usercategory: all
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
+ community.general.ipa_sudorule:
+ name: sudo_dev_dbserver
+ description: Allow developers to run every command with sudo on all database server
+ cmdcategory: all
+ host:
+ - db01.example.com
+ hostgroup:
+ - db-server
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - developers
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure user group operations can run any commands that is part of operations-cmdgroup on any host as user root.
+ community.general.ipa_sudorule:
+ name: sudo_operations_all
+ description: Allow operators to run any commands that is part of operations-cmdgroup on any host as user root.
+ cmdgroup:
+ - operations-cmdgroup
+ hostcategory: all
+ runasextusers:
+ - root
+ sudoopt:
+ - '!authenticate'
+ usergroup:
+ - operators
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+sudorule:
+ description: Sudorule as returned by IPA
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class SudoRuleIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
+
+ def sudorule_find(self, name):
+ return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
+
+ def sudorule_add(self, name, item):
+ return self._post_json(method='sudorule_add', name=name, item=item)
+
+ def sudorule_add_runasuser(self, name, item):
+ return self._post_json(method='sudorule_add_runasuser', name=name, item={'user': item})
+
+ def sudorule_remove_runasuser(self, name, item):
+ return self._post_json(method='sudorule_remove_runasuser', name=name, item={'user': item})
+
+ def sudorule_mod(self, name, item):
+ return self._post_json(method='sudorule_mod', name=name, item=item)
+
+ def sudorule_del(self, name):
+ return self._post_json(method='sudorule_del', name=name)
+
+ def sudorule_add_option(self, name, item):
+ return self._post_json(method='sudorule_add_option', name=name, item=item)
+
+ def sudorule_add_option_ipasudoopt(self, name, item):
+ return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_remove_option(self, name, item):
+ return self._post_json(method='sudorule_remove_option', name=name, item=item)
+
+ def sudorule_remove_option_ipasudoopt(self, name, item):
+ return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
+
+ def sudorule_add_host(self, name, item):
+ return self._post_json(method='sudorule_add_host', name=name, item=item)
+
+ def sudorule_add_host_host(self, name, item):
+ return self.sudorule_add_host(name=name, item={'host': item})
+
+ def sudorule_add_host_hostgroup(self, name, item):
+ return self.sudorule_add_host(name=name, item={'hostgroup': item})
+
+ def sudorule_remove_host(self, name, item):
+ return self._post_json(method='sudorule_remove_host', name=name, item=item)
+
+ def sudorule_remove_host_host(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'host': item})
+
+ def sudorule_remove_host_hostgroup(self, name, item):
+ return self.sudorule_remove_host(name=name, item={'hostgroup': item})
+
+ def sudorule_add_allow_command(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmd': item})
+
+ def sudorule_add_allow_command_group(self, name, item):
+ return self._post_json(method='sudorule_add_allow_command', name=name, item={'sudocmdgroup': item})
+
+ def sudorule_remove_allow_command(self, name, item):
+ return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
+
+ def sudorule_add_user(self, name, item):
+ return self._post_json(method='sudorule_add_user', name=name, item=item)
+
+ def sudorule_add_user_user(self, name, item):
+ return self.sudorule_add_user(name=name, item={'user': item})
+
+ def sudorule_add_user_group(self, name, item):
+ return self.sudorule_add_user(name=name, item={'group': item})
+
+ def sudorule_remove_user(self, name, item):
+ return self._post_json(method='sudorule_remove_user', name=name, item=item)
+
+ def sudorule_remove_user_user(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'user': item})
+
+ def sudorule_remove_user_group(self, name, item):
+ return self.sudorule_remove_user(name=name, item={'group': item})
+
+
+def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None,
+ runasgroupcategory=None, runasusercategory=None):
+ data = {}
+ if cmdcategory is not None:
+ data['cmdcategory'] = cmdcategory
+ if description is not None:
+ data['description'] = description
+ if hostcategory is not None:
+ data['hostcategory'] = hostcategory
+ if ipaenabledflag is not None:
+ data['ipaenabledflag'] = ipaenabledflag
+ if usercategory is not None:
+ data['usercategory'] = usercategory
+ if runasusercategory is not None:
+ data['ipasudorunasusercategory'] = runasusercategory
+ if runasgroupcategory is not None:
+ data['ipasudorunasgroupcategory'] = runasgroupcategory
+ return data
+
+
+def category_changed(module, client, category_name, ipa_sudorule):
+ if ipa_sudorule.get(category_name, None) == ['all']:
+ if not module.check_mode:
+ # cn is returned as list even with only a single value.
+ client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
+ return True
+ return False
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ cmd = module.params['cmd']
+ cmdgroup = module.params['cmdgroup']
+ cmdcategory = module.params['cmdcategory']
+ host = module.params['host']
+ hostcategory = module.params['hostcategory']
+ hostgroup = module.params['hostgroup']
+ runasusercategory = module.params['runasusercategory']
+ runasgroupcategory = module.params['runasgroupcategory']
+ runasextusers = module.params['runasextusers']
+
+ if state in ['present', 'enabled']:
+ ipaenabledflag = 'TRUE'
+ else:
+ ipaenabledflag = 'FALSE'
+
+ sudoopt = module.params['sudoopt']
+ user = module.params['user']
+ usercategory = module.params['usercategory']
+ usergroup = module.params['usergroup']
+
+ module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
+ description=module.params['description'],
+ hostcategory=hostcategory,
+ ipaenabledflag=ipaenabledflag,
+ usercategory=usercategory,
+ runasusercategory=runasusercategory,
+ runasgroupcategory=runasgroupcategory)
+ ipa_sudorule = client.sudorule_find(name=name)
+
+ changed = False
+ if state in ['present', 'disabled', 'enabled']:
+ if not ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
+ else:
+ diff = client.get_diff(ipa_sudorule, module_sudorule)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ if 'hostcategory' in diff:
+ if ipa_sudorule.get('memberhost_host', None) is not None:
+ client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
+ if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
+ client.sudorule_remove_host_hostgroup(name=name,
+ item=ipa_sudorule.get('memberhost_hostgroup'))
+
+ client.sudorule_mod(name=name, item=module_sudorule)
+
+ if cmd is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command(name=name, item=cmd)
+
+ if cmdgroup is not None:
+ changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
+ if not module.check_mode:
+ client.sudorule_add_allow_command_group(name=name, item=cmdgroup)
+
+ if runasusercategory is not None:
+ changed = category_changed(module, client, 'iparunasusercategory', ipa_sudorule) or changed
+
+ if runasgroupcategory is not None:
+ changed = category_changed(module, client, 'iparunasgroupcategory', ipa_sudorule) or changed
+
+ if host is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
+ client.sudorule_add_host_host,
+ client.sudorule_remove_host_host) or changed
+
+ if hostgroup is not None:
+ changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
+ client.sudorule_add_host_hostgroup,
+ client.sudorule_remove_host_hostgroup) or changed
+ if sudoopt is not None:
+ # client.modify_if_diff does not work as each option must be removed/added by its own
+ ipa_list = ipa_sudorule.get('ipasudoopt', [])
+ module_list = sudoopt
+ diff = list(set(ipa_list) - set(module_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_remove_option_ipasudoopt(name, item)
+ diff = list(set(module_list) - set(ipa_list))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_add_option_ipasudoopt(name, item)
+
+ if runasextusers is not None:
+ ipa_sudorule_run_as_user = ipa_sudorule.get('ipasudorunasextuser', [])
+ diff = list(set(ipa_sudorule_run_as_user) - set(runasextusers))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_remove_runasuser(name=name, item=item)
+ diff = list(set(runasextusers) - set(ipa_sudorule_run_as_user))
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ for item in diff:
+ client.sudorule_add_runasuser(name=name, item=item)
+
+ if user is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
+ client.sudorule_add_user_user,
+ client.sudorule_remove_user_user) or changed
+ if usergroup is not None:
+ changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
+ changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
+ client.sudorule_add_user_group,
+ client.sudorule_remove_user_group) or changed
+ else:
+ if ipa_sudorule:
+ changed = True
+ if not module.check_mode:
+ client.sudorule_del(name)
+
+ return changed, client.sudorule_find(name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cmd=dict(type='list', elements='str'),
+ cmdgroup=dict(type='list', elements='str'),
+ cmdcategory=dict(type='str', choices=['all']),
+ cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ host=dict(type='list', elements='str'),
+ hostcategory=dict(type='str', choices=['all']),
+ hostgroup=dict(type='list', elements='str'),
+ runasusercategory=dict(type='str', choices=['all']),
+ runasgroupcategory=dict(type='str', choices=['all']),
+ sudoopt=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
+ user=dict(type='list', elements='str'),
+ usercategory=dict(type='str', choices=['all']),
+ usergroup=dict(type='list', elements='str'),
+ runasextusers=dict(type='list', elements='str'))
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[['cmdcategory', 'cmd'],
+ ['cmdcategory', 'cmdgroup'],
+ ['hostcategory', 'host'],
+ ['hostcategory', 'hostgroup'],
+ ['usercategory', 'user'],
+ ['usercategory', 'usergroup']],
+ supports_check_mode=True)
+
+ client = SudoRuleIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, sudorule = ensure(module, client)
+ module.exit_json(changed=changed, sudorule=sudorule)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_user.py b/ansible_collections/community/general/plugins/modules/ipa_user.py
new file mode 100644
index 000000000..17b72176e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_user.py
@@ -0,0 +1,404 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_user
+author: Thomas Krahn (@Nosmoht)
+short_description: Manage FreeIPA users
+description:
+- Add, modify and delete user within IPA server.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ displayname:
+ description: Display name.
+ type: str
+ update_password:
+ description:
+ - Set password for a user.
+ type: str
+ default: 'always'
+ choices: [ always, on_create ]
+ givenname:
+ description: First name.
+ type: str
+ krbpasswordexpiration:
+ description:
+ - Date at which the user password will expire.
+ - In the format YYYYMMddHHmmss.
+ - e.g. 20180121182022 will expire on 21 January 2018 at 18:20:22.
+ type: str
+ loginshell:
+ description: Login shell.
+ type: str
+ mail:
+ description:
+ - List of mail addresses assigned to the user.
+ - If an empty list is passed all assigned email addresses will be deleted.
+ - If None is passed email addresses will not be checked or changed.
+ type: list
+ elements: str
+ password:
+ description:
+ - Password for a user.
+ - Will not be set for an existing user unless I(update_password=always), which is the default.
+ type: str
+ sn:
+ description: Surname.
+ type: str
+ sshpubkey:
+ description:
+ - List of public SSH key.
+ - If an empty list is passed all assigned public keys will be deleted.
+ - If None is passed SSH public keys will not be checked or changed.
+ type: list
+ elements: str
+ state:
+ description: State to ensure.
+ default: "present"
+ choices: ["absent", "disabled", "enabled", "present"]
+ type: str
+ telephonenumber:
+ description:
+ - List of telephone numbers assigned to the user.
+ - If an empty list is passed all assigned telephone numbers will be deleted.
+ - If None is passed telephone numbers will not be checked or changed.
+ type: list
+ elements: str
+ title:
+ description: Title.
+ type: str
+ uid:
+ description: uid of the user.
+ required: true
+ aliases: ["name"]
+ type: str
+ uidnumber:
+ description:
+ - Account Settings UID/Posix User ID number.
+ type: str
+ gidnumber:
+ description:
+ - Posix Group ID.
+ type: str
+ homedirectory:
+ description:
+ - Default home directory of the user.
+ type: str
+ version_added: '0.2.0'
+ userauthtype:
+ description:
+ - The authentication type to use for the user.
+ choices: ["password", "radius", "otp", "pkinit", "hardened"]
+ type: list
+ elements: str
+ version_added: '1.2.0'
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+requirements:
+- base64
+- hashlib
+'''
+
+EXAMPLES = r'''
+- name: Ensure pinky is present and always reset password
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ krbpasswordexpiration: 20200119235959
+ givenname: Pinky
+ sn: Acme
+ mail:
+ - pinky@acme.com
+ telephonenumber:
+ - '+555123456'
+ sshpubkey:
+ - ssh-rsa ....
+ - ssh-dsa ....
+ uidnumber: '1001'
+ gidnumber: '100'
+ homedirectory: /home/pinky
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure brain is absent
+ community.general.ipa_user:
+ name: brain
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure pinky is present but don't reset password if already exists
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ givenname: Pinky
+ sn: Acme
+ password: zounds
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ update_password: on_create
+
+- name: Ensure pinky is present and using one time password and RADIUS authentication
+ community.general.ipa_user:
+ name: pinky
+ state: present
+ userauthtype:
+ - otp
+ - radius
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+user:
+ description: User as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import base64
+import hashlib
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class UserIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(UserIPAClient, self).__init__(module, host, port, protocol)
+
+ def user_find(self, name):
+ return self._post_json(method='user_find', name=None, item={'all': True, 'uid': name})
+
+ def user_add(self, name, item):
+ return self._post_json(method='user_add', name=name, item=item)
+
+ def user_mod(self, name, item):
+ return self._post_json(method='user_mod', name=name, item=item)
+
+ def user_del(self, name):
+ return self._post_json(method='user_del', name=name)
+
+ def user_disable(self, name):
+ return self._post_json(method='user_disable', name=name)
+
+ def user_enable(self, name):
+ return self._post_json(method='user_enable', name=name)
+
+
+def get_user_dict(displayname=None, givenname=None, krbpasswordexpiration=None, loginshell=None,
+ mail=None, nsaccountlock=False, sn=None, sshpubkey=None, telephonenumber=None,
+ title=None, userpassword=None, gidnumber=None, uidnumber=None, homedirectory=None,
+ userauthtype=None):
+ user = {}
+ if displayname is not None:
+ user['displayname'] = displayname
+ if krbpasswordexpiration is not None:
+ user['krbpasswordexpiration'] = krbpasswordexpiration + "Z"
+ if givenname is not None:
+ user['givenname'] = givenname
+ if loginshell is not None:
+ user['loginshell'] = loginshell
+ if mail is not None:
+ user['mail'] = mail
+ user['nsaccountlock'] = nsaccountlock
+ if sn is not None:
+ user['sn'] = sn
+ if sshpubkey is not None:
+ user['ipasshpubkey'] = sshpubkey
+ if telephonenumber is not None:
+ user['telephonenumber'] = telephonenumber
+ if title is not None:
+ user['title'] = title
+ if userpassword is not None:
+ user['userpassword'] = userpassword
+ if gidnumber is not None:
+ user['gidnumber'] = gidnumber
+ if uidnumber is not None:
+ user['uidnumber'] = uidnumber
+ if homedirectory is not None:
+ user['homedirectory'] = homedirectory
+ if userauthtype is not None:
+ user['ipauserauthtype'] = userauthtype
+
+ return user
+
+
+def get_user_diff(client, ipa_user, module_user):
+ """
+ Return the keys of each dict whereas values are different. Unfortunately the IPA
+ API returns everything as a list even if only a single value is possible.
+ Therefore some more complexity is needed.
+ The method will check if the value type of module_user.attr is not a list and
+ create a list with that element if the same attribute in ipa_user is list. In this way I hope that the method
+ must not be changed if the returned API dict is changed.
+ :param ipa_user:
+ :param module_user:
+ :return:
+ """
+ # sshpubkeyfp is the list of ssh key fingerprints. IPA doesn't return the keys itself but instead the fingerprints.
+ # These are used for comparison.
+ sshpubkey = None
+ if 'ipasshpubkey' in module_user:
+ hash_algo = 'md5'
+ if 'sshpubkeyfp' in ipa_user and ipa_user['sshpubkeyfp'][0][:7].upper() == 'SHA256:':
+ hash_algo = 'sha256'
+ module_user['sshpubkeyfp'] = [get_ssh_key_fingerprint(pubkey, hash_algo) for pubkey in module_user['ipasshpubkey']]
+ # Remove the ipasshpubkey element as it is not returned from IPA but save it's value to be used later on
+ sshpubkey = module_user['ipasshpubkey']
+ del module_user['ipasshpubkey']
+
+ result = client.get_diff(ipa_data=ipa_user, module_data=module_user)
+
+ # If there are public keys, remove the fingerprints and add them back to the dict
+ if sshpubkey is not None:
+ del module_user['sshpubkeyfp']
+ module_user['ipasshpubkey'] = sshpubkey
+ return result
+
+
+def get_ssh_key_fingerprint(ssh_key, hash_algo='sha256'):
+ """
+ Return the public key fingerprint of a given public SSH key
+ in format "[fp] [comment] (ssh-rsa)" where fp is of the format:
+ FB:0C:AC:0A:07:94:5B:CE:75:6E:63:32:13:AD:AD:D7
+ for md5 or
+ SHA256:[base64]
+ for sha256
+ Comments are assumed to be all characters past the second
+ whitespace character in the sshpubkey string.
+ :param ssh_key:
+ :param hash_algo:
+ :return:
+ """
+ parts = ssh_key.strip().split(None, 2)
+ if len(parts) == 0:
+ return None
+ key_type = parts[0]
+ key = base64.b64decode(parts[1].encode('ascii'))
+
+ if hash_algo == 'md5':
+ fp_plain = hashlib.md5(key).hexdigest()
+ key_fp = ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2])).upper()
+ elif hash_algo == 'sha256':
+ fp_plain = base64.b64encode(hashlib.sha256(key).digest()).decode('ascii').rstrip('=')
+ key_fp = 'SHA256:{fp}'.format(fp=fp_plain)
+ if len(parts) < 3:
+ return "%s (%s)" % (key_fp, key_type)
+ else:
+ comment = parts[2]
+ return "%s %s (%s)" % (key_fp, comment, key_type)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['uid']
+ nsaccountlock = state == 'disabled'
+
+ module_user = get_user_dict(displayname=module.params.get('displayname'),
+ krbpasswordexpiration=module.params.get('krbpasswordexpiration'),
+ givenname=module.params.get('givenname'),
+ loginshell=module.params['loginshell'],
+ mail=module.params['mail'], sn=module.params['sn'],
+ sshpubkey=module.params['sshpubkey'], nsaccountlock=nsaccountlock,
+ telephonenumber=module.params['telephonenumber'], title=module.params['title'],
+ userpassword=module.params['password'],
+ gidnumber=module.params.get('gidnumber'), uidnumber=module.params.get('uidnumber'),
+ homedirectory=module.params.get('homedirectory'),
+ userauthtype=module.params.get('userauthtype'))
+
+ update_password = module.params.get('update_password')
+ ipa_user = client.user_find(name=name)
+
+ changed = False
+ if state in ['present', 'enabled', 'disabled']:
+ if not ipa_user:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_add(name=name, item=module_user)
+ else:
+ if update_password == 'on_create':
+ module_user.pop('userpassword', None)
+ diff = get_user_diff(client, ipa_user, module_user)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ ipa_user = client.user_mod(name=name, item=module_user)
+ else:
+ if ipa_user:
+ changed = True
+ if not module.check_mode:
+ client.user_del(name)
+
+ return changed, ipa_user
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(displayname=dict(type='str'),
+ givenname=dict(type='str'),
+ update_password=dict(type='str', default="always",
+ choices=['always', 'on_create'],
+ no_log=False),
+ krbpasswordexpiration=dict(type='str', no_log=False),
+ loginshell=dict(type='str'),
+ mail=dict(type='list', elements='str'),
+ sn=dict(type='str'),
+ uid=dict(type='str', required=True, aliases=['name']),
+ gidnumber=dict(type='str'),
+ uidnumber=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ sshpubkey=dict(type='list', elements='str'),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'enabled', 'disabled']),
+ telephonenumber=dict(type='list', elements='str'),
+ title=dict(type='str'),
+ homedirectory=dict(type='str'),
+ userauthtype=dict(type='list', elements='str',
+ choices=['password', 'radius', 'otp', 'pkinit', 'hardened']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ client = UserIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+
+ # If sshpubkey is defined as None than module.params['sshpubkey'] is [None]. IPA itself returns None (not a list).
+ # Therefore a small check here to replace list(None) by None. Otherwise get_user_diff() would return sshpubkey
+ # as different which should be avoided.
+ if module.params['sshpubkey'] is not None:
+ if len(module.params['sshpubkey']) == 1 and module.params['sshpubkey'][0] == "":
+ module.params['sshpubkey'] = None
+
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, user = ensure(module, client)
+ module.exit_json(changed=changed, user=user)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipa_vault.py b/ansible_collections/community/general/plugins/modules/ipa_vault.py
new file mode 100644
index 000000000..84b72c1ab
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipa_vault.py
@@ -0,0 +1,256 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Juan Manuel Parrilla <jparrill@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ipa_vault
+author: Juan Manuel Parrilla (@jparrill)
+short_description: Manage FreeIPA vaults
+description:
+- Add, modify and delete vaults and secret vaults.
+- KRA service should be enabled to use this module.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ cn:
+ description:
+ - Vault name.
+ - Can not be changed as it is the unique identifier.
+ required: true
+ aliases: ["name"]
+ type: str
+ description:
+ description:
+ - Description.
+ type: str
+ ipavaulttype:
+ description:
+ - Vault types are based on security level.
+ default: "symmetric"
+ choices: ["asymmetric", "standard", "symmetric"]
+ aliases: ["vault_type"]
+ type: str
+ ipavaultpublickey:
+ description:
+ - Public key.
+ aliases: ["vault_public_key"]
+ type: str
+ ipavaultsalt:
+ description:
+ - Vault Salt.
+ aliases: ["vault_salt"]
+ type: str
+ username:
+ description:
+ - Any user can own one or more user vaults.
+ - Mutually exclusive with service.
+ aliases: ["user"]
+ type: list
+ elements: str
+ service:
+ description:
+ - Any service can own one or more service vaults.
+ - Mutually exclusive with user.
+ type: str
+ state:
+ description:
+ - State to ensure.
+ default: "present"
+ choices: ["absent", "present"]
+ type: str
+ replace:
+ description:
+ - Force replace the existent vault on IPA server.
+ type: bool
+ default: false
+ choices: ["True", "False"]
+ validate_certs:
+ description:
+ - Validate IPA server certificates.
+ type: bool
+ default: true
+extends_documentation_fragment:
+ - community.general.ipa.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Ensure vault is present
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ validate_certs: false
+
+- name: Ensure vault is present for Admin user
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Ensure vault is absent
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ user: user01
+ state: absent
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+
+- name: Modify vault if already exists
+ community.general.ipa_vault:
+ name: vault01
+ vault_type: standard
+ description: "Vault for test"
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+ replace: true
+
+- name: Get vault info if already exists
+ community.general.ipa_vault:
+ name: vault01
+ ipa_host: ipa.example.com
+ ipa_user: admin
+ ipa_pass: topsecret
+'''
+
+RETURN = r'''
+vault:
+ description: Vault as returned by IPA API
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ipa import IPAClient, ipa_argument_spec
+from ansible.module_utils.common.text.converters import to_native
+
+
+class VaultIPAClient(IPAClient):
+ def __init__(self, module, host, port, protocol):
+ super(VaultIPAClient, self).__init__(module, host, port, protocol)
+
+ def vault_find(self, name):
+ return self._post_json(method='vault_find', name=None, item={'all': True, 'cn': name})
+
+ def vault_add_internal(self, name, item):
+ return self._post_json(method='vault_add_internal', name=name, item=item)
+
+ def vault_mod_internal(self, name, item):
+ return self._post_json(method='vault_mod_internal', name=name, item=item)
+
+ def vault_del(self, name):
+ return self._post_json(method='vault_del', name=name)
+
+
+def get_vault_dict(description=None, vault_type=None, vault_salt=None, vault_public_key=None, service=None):
+ vault = {}
+
+ if description is not None:
+ vault['description'] = description
+ if vault_type is not None:
+ vault['ipavaulttype'] = vault_type
+ if vault_salt is not None:
+ vault['ipavaultsalt'] = vault_salt
+ if vault_public_key is not None:
+ vault['ipavaultpublickey'] = vault_public_key
+ if service is not None:
+ vault['service'] = service
+ return vault
+
+
+def get_vault_diff(client, ipa_vault, module_vault, module):
+ return client.get_diff(ipa_data=ipa_vault, module_data=module_vault)
+
+
+def ensure(module, client):
+ state = module.params['state']
+ name = module.params['cn']
+ user = module.params['username']
+ replace = module.params['replace']
+
+ module_vault = get_vault_dict(description=module.params['description'], vault_type=module.params['ipavaulttype'],
+ vault_salt=module.params['ipavaultsalt'],
+ vault_public_key=module.params['ipavaultpublickey'],
+ service=module.params['service'])
+ ipa_vault = client.vault_find(name=name)
+
+ changed = False
+ if state == 'present':
+ if not ipa_vault:
+ # New vault
+ changed = True
+ if not module.check_mode:
+ ipa_vault = client.vault_add_internal(name, item=module_vault)
+ else:
+ # Already exists
+ if replace:
+ diff = get_vault_diff(client, ipa_vault, module_vault, module)
+ if len(diff) > 0:
+ changed = True
+ if not module.check_mode:
+ data = {}
+ for key in diff:
+ data[key] = module_vault.get(key)
+ client.vault_mod_internal(name=name, item=data)
+
+ else:
+ if ipa_vault:
+ changed = True
+ if not module.check_mode:
+ client.vault_del(name)
+
+ return changed, client.vault_find(name=name)
+
+
+def main():
+ argument_spec = ipa_argument_spec()
+ argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
+ description=dict(type='str'),
+ ipavaulttype=dict(type='str', default='symmetric',
+ choices=['standard', 'symmetric', 'asymmetric'], aliases=['vault_type']),
+ ipavaultsalt=dict(type='str', aliases=['vault_salt']),
+ ipavaultpublickey=dict(type='str', aliases=['vault_public_key']),
+ service=dict(type='str'),
+ replace=dict(type='bool', default=False, choices=[True, False]),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ username=dict(type='list', elements='str', aliases=['user']))
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['username', 'service']])
+
+ client = VaultIPAClient(module=module,
+ host=module.params['ipa_host'],
+ port=module.params['ipa_port'],
+ protocol=module.params['ipa_prot'])
+ try:
+ client.login(username=module.params['ipa_user'],
+ password=module.params['ipa_pass'])
+ changed, vault = ensure(module, client)
+ module.exit_json(changed=changed, vault=vault)
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipify_facts.py b/ansible_collections/community/general/plugins/modules/ipify_facts.py
new file mode 100644
index 000000000..ab96d7e94
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipify_facts.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ipify_facts
+short_description: Retrieve the public IP of your internet gateway
+description:
+ - If behind NAT and need to know the public IP of your internet gateway.
+author:
+- René Moser (@resmo)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ api_url:
+ description:
+ - URL of the ipify.org API service.
+ - C(?format=json) will be appended per default.
+ type: str
+ default: https://api.ipify.org/
+ timeout:
+ description:
+ - HTTP connection timeout in seconds.
+ type: int
+ default: 10
+ validate_certs:
+ description:
+ - When set to C(NO), SSL certificates will not be validated.
+ type: bool
+ default: true
+notes:
+ - Visit https://www.ipify.org to get more information.
+'''
+
+EXAMPLES = r'''
+# Gather IP facts from ipify.org
+- name: Get my public IP
+ community.general.ipify_facts:
+
+# Gather IP facts from your own ipify service endpoint with a custom timeout
+- name: Get my public IP
+ community.general.ipify_facts:
+ api_url: http://api.example.com/ipify
+ timeout: 20
+'''
+
+RETURN = r'''
+---
+ipify_public_ip:
+ description: Public IP of the internet gateway.
+ returned: success
+ type: str
+ sample: 1.2.3.4
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_text
+
+
+class IpifyFacts(object):
+
+ def __init__(self):
+ self.api_url = module.params.get('api_url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'ipify_public_ip': None
+ }
+ (response, info) = fetch_url(module=module, url=self.api_url + "?format=json", force=True, timeout=self.timeout)
+
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.api_url, self.timeout))
+
+ data = json.loads(to_text(response.read()))
+ result['ipify_public_ip'] = data.get('ip')
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_url=dict(type='str', default='https://api.ipify.org/'),
+ timeout=dict(type='int', default=10),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipify_facts = IpifyFacts().run()
+ ipify_facts_result = dict(changed=False, ansible_facts=ipify_facts)
+ module.exit_json(**ipify_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py b/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py
new file mode 100644
index 000000000..f29b3cbf4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipinfoio_facts.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Aleksei Kostiuk <unitoff@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipinfoio_facts
+short_description: Retrieve IP geolocation facts of a host's IP address
+description:
+ - "Gather IP geolocation facts of a host's IP address using ipinfo.io API"
+author: "Aleksei Kostiuk (@akostyuk)"
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ timeout:
+ description:
+ - HTTP connection timeout in seconds
+ required: false
+ default: 10
+ type: int
+ http_agent:
+ description:
+ - Set http user agent
+ required: false
+ default: "ansible-ipinfoio-module/0.0.1"
+ type: str
+notes:
+ - "Check http://ipinfo.io/ for more information"
+'''
+
+EXAMPLES = '''
+# Retrieve geolocation data of a host's IP address
+- name: Get IP geolocation data
+ community.general.ipinfoio_facts:
+'''
+
+RETURN = '''
+ansible_facts:
+ description: "Dictionary of ip geolocation facts for a host's IP address"
+ returned: changed
+ type: complex
+ contains:
+ ip:
+ description: "Public IP address of a host"
+ type: str
+ sample: "8.8.8.8"
+ hostname:
+ description: Domain name
+ type: str
+ sample: "google-public-dns-a.google.com"
+ country:
+ description: ISO 3166-1 alpha-2 country code
+ type: str
+ sample: "US"
+ region:
+ description: State or province name
+ type: str
+ sample: "California"
+ city:
+ description: City name
+ type: str
+ sample: "Mountain View"
+ loc:
+ description: Latitude and Longitude of the location
+ type: str
+ sample: "37.3860,-122.0838"
+ org:
+ description: "organization's name"
+ type: str
+ sample: "AS3356 Level 3 Communications, Inc."
+ postal:
+ description: Postal code
+ type: str
+ sample: "94035"
+'''
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible.module_utils.urls import fetch_url
+
+
+USER_AGENT = 'ansible-ipinfoio-module/0.0.1'
+
+
+class IpinfoioFacts(object):
+
+ def __init__(self, module):
+ self.url = 'https://ipinfo.io/json'
+ self.timeout = module.params.get('timeout')
+ self.module = module
+
+ def get_geo_data(self):
+ response, info = fetch_url(self.module, self.url, force=True, # NOQA
+ timeout=self.timeout)
+ try:
+ info['status'] == 200
+ except AssertionError:
+ self.module.fail_json(msg='Could not get {0} page, '
+ 'check for connectivity!'.format(self.url))
+ else:
+ try:
+ content = response.read()
+ result = self.module.from_json(content.decode('utf8'))
+ except ValueError:
+ self.module.fail_json(
+ msg='Failed to parse the ipinfo.io response: '
+ '{0} {1}'.format(self.url, content))
+ else:
+ return result
+
+
+def main():
+ module = AnsibleModule( # NOQA
+ argument_spec=dict(
+ http_agent=dict(default=USER_AGENT),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ ipinfoio = IpinfoioFacts(module)
+ ipinfoio_result = dict(
+ changed=False, ansible_facts=ipinfoio.get_geo_data())
+ module.exit_json(**ipinfoio_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipmi_boot.py b/ansible_collections/community/general/plugins/modules/ipmi_boot.py
new file mode 100644
index 000000000..7a4d2b6ec
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipmi_boot.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_boot
+short_description: Management of order of boot devices
+description:
+ - Use this module to manage order of boot devices
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ type: str
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ type: int
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ type: str
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ type: str
+ key:
+ description:
+ - Encryption key to connect to the BMC in hex format.
+ required: false
+ type: str
+ version_added: 4.1.0
+ bootdev:
+ description:
+ - Set boot device to use on next reboot
+ - "The choices for the device are:
+ - network -- Request network boot
+ - floppy -- Boot from floppy
+ - hd -- Boot from hard drive
+ - safe -- Boot from hard drive, requesting 'safe mode'
+ - optical -- boot from CD/DVD/BD drive
+ - setup -- Boot into setup utility
+ - default -- remove any IPMI directed boot device request"
+ required: true
+ choices:
+ - network
+ - floppy
+ - hd
+ - safe
+ - optical
+ - setup
+ - default
+ type: str
+ state:
+ description:
+ - Whether to ensure that boot devices is desired.
+ - "The choices for the state are:
+ - present -- Request system turn on
+ - absent -- Request system turn on"
+ default: present
+ choices: [ present, absent ]
+ type: str
+ persistent:
+ description:
+ - If set, ask that system firmware uses this device beyond next boot.
+ Be aware many systems do not honor this.
+ type: bool
+ default: false
+ uefiboot:
+ description:
+ - If set, request UEFI boot explicitly.
+ Strictly speaking, the spec suggests that if not set, the system should BIOS boot and offers no "don't care" option.
+ In practice, this flag not being set does not preclude UEFI boot on any system I've encountered.
+ type: bool
+ default: false
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+bootdev:
+ description: The boot device name which will be used beyond next boot.
+ returned: success
+ type: str
+ sample: default
+persistent:
+ description: If True, system firmware will use this device beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+uefimode:
+ description: If True, system firmware will use UEFI boot explicitly beyond next boot.
+ returned: success
+ type: bool
+ sample: false
+'''
+
+EXAMPLES = '''
+- name: Ensure bootdevice is HD
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ bootdev: hd
+
+- name: Ensure bootdevice is not Network
+ community.general.ipmi_boot:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ key: 1234567890AABBCCDEFF000000EEEE12
+ bootdev: network
+ state: absent
+'''
+
+import traceback
+import binascii
+
+PYGHMI_IMP_ERR = None
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ key=dict(type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ bootdev=dict(required=True, choices=['network', 'hd', 'floppy', 'safe', 'optical', 'setup', 'default']),
+ persistent=dict(default=False, type='bool'),
+ uefiboot=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True,
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ bootdev = module.params['bootdev']
+ persistent = module.params['persistent']
+ uefiboot = module.params['uefiboot']
+ request = dict()
+
+ if state == 'absent' and bootdev == 'default':
+ module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")
+
+ try:
+ if module.params['key']:
+ key = binascii.unhexlify(module.params['key'])
+ else:
+ key = None
+ except Exception as e:
+ module.fail_json(msg="Unable to convert 'key' from hex string.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port, kg=key
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+ current = ipmi_cmd.get_bootdev()
+ # uefimode may not supported by BMC, so use desired value as default
+ current.setdefault('uefimode', uefiboot)
+ if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
+ request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
+ elif state == 'absent' and current['bootdev'] == bootdev:
+ request = dict(bootdev='default')
+ else:
+ module.exit_json(changed=False, **current)
+
+ if module.check_mode:
+ response = dict(bootdev=request['bootdev'])
+ else:
+ response = ipmi_cmd.set_bootdev(**request)
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ if 'persist' in request:
+ response['persistent'] = request['persist']
+ if 'uefiboot' in request:
+ response['uefimode'] = request['uefiboot']
+
+ module.exit_json(changed=True, **response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipmi_power.py b/ansible_collections/community/general/plugins/modules/ipmi_power.py
new file mode 100644
index 000000000..e152f35eb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipmi_power.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ipmi_power
+short_description: Power management for machine
+description:
+ - Use this module for power management
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Hostname or ip address of the BMC.
+ required: true
+ type: str
+ port:
+ description:
+ - Remote RMCP port.
+ default: 623
+ type: int
+ user:
+ description:
+ - Username to use to connect to the BMC.
+ required: true
+ type: str
+ password:
+ description:
+ - Password to connect to the BMC.
+ required: true
+ type: str
+ key:
+ description:
+ - Encryption key to connect to the BMC in hex format.
+ required: false
+ type: str
+ version_added: 4.1.0
+ state:
+ description:
+ - Whether to ensure that the machine in desired state.
+ - "The choices for state are:
+ - on -- Request system turn on
+ - off -- Request system turn off without waiting for OS to shutdown
+ - shutdown -- Have system request OS proper shutdown
+ - reset -- Request system reset without waiting for OS
+ - boot -- If system is off, then 'on', else 'reset'"
+ - Either this option or I(machine) is required.
+ choices: ['on', 'off', shutdown, reset, boot]
+ type: str
+ timeout:
+ description:
+ - Maximum number of seconds before interrupt request.
+ default: 300
+ type: int
+ machine:
+ description:
+ - Provide a list of the remote target address for the bridge IPMI request,
+ and the power status.
+ - Either this option or I(state) is required.
+ required: false
+ type: list
+ elements: dict
+ version_added: 4.3.0
+ suboptions:
+ targetAddress:
+ description:
+ - Remote target address for the bridge IPMI request.
+ type: int
+ required: true
+ state:
+ description:
+ - Whether to ensure that the machine specified by I(targetAddress) in desired state.
+ - If this option is not set, the power state is set by I(state).
+ - If both this option and I(state) are set, this option takes precedence over I(state).
+ choices: ['on', 'off', shutdown, reset, boot]
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - pyghmi
+author: "Bulat Gaifullin (@bgaifullin) <gaifullinbf@gmail.com>"
+'''
+
+RETURN = '''
+powerstate:
+ description: The current power state of the machine.
+ returned: success and I(machine) is not provided
+ type: str
+ sample: 'on'
+status:
+ description: The current power state of the machine when the machine option is set.
+ returned: success and I(machine) is provided
+ type: list
+ elements: dict
+ version_added: 4.3.0
+ contains:
+ powerstate:
+ description: The current power state of the machine specified by I(targetAddress).
+ type: str
+ targetAddress:
+ description: The remote target address.
+ type: int
+ sample: [
+ {
+ "powerstate": "on",
+ "targetAddress": 48,
+ },
+ {
+ "powerstate": "on",
+ "targetAddress": 50,
+ },
+ ]
+'''
+
+EXAMPLES = '''
+- name: Ensure machine is powered on
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: 'on'
+
+- name: Ensure machines of which remote target address is 48 and 50 are powered off
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ state: 'off'
+ machine:
+ - targetAddress: 48
+ - targetAddress: 50
+
+- name: Ensure machine of which remote target address is 48 is powered on, and 50 is powered off
+ community.general.ipmi_power:
+ name: test.testdomain.com
+ user: admin
+ password: password
+ machine:
+ - targetAddress: 48
+ state: 'on'
+ - targetAddress: 50
+ state: 'off'
+'''
+
+import traceback
+import binascii
+
+PYGHMI_IMP_ERR = None
+INVALID_TARGET_ADDRESS = 0x100
+try:
+ from pyghmi.ipmi import command
+except ImportError:
+ PYGHMI_IMP_ERR = traceback.format_exc()
+ command = None
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ port=dict(default=623, type='int'),
+ state=dict(choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ user=dict(required=True, no_log=True),
+ password=dict(required=True, no_log=True),
+ key=dict(type='str', no_log=True),
+ timeout=dict(default=300, type='int'),
+ machine=dict(
+ type='list', elements='dict',
+ options=dict(
+ targetAddress=dict(required=True, type='int'),
+ state=dict(type='str', choices=['on', 'off', 'shutdown', 'reset', 'boot']),
+ ),
+ ),
+ ),
+ supports_check_mode=True,
+ required_one_of=(
+ ['state', 'machine'],
+ ),
+ )
+
+ if command is None:
+ module.fail_json(msg=missing_required_lib('pyghmi'), exception=PYGHMI_IMP_ERR)
+
+ name = module.params['name']
+ port = module.params['port']
+ user = module.params['user']
+ password = module.params['password']
+ state = module.params['state']
+ timeout = module.params['timeout']
+ machine = module.params['machine']
+
+ try:
+ if module.params['key']:
+ key = binascii.unhexlify(module.params['key'])
+ else:
+ key = None
+ except Exception:
+ module.fail_json(msg="Unable to convert 'key' from hex string.")
+
+ # --- run command ---
+ try:
+ ipmi_cmd = command.Command(
+ bmc=name, userid=user, password=password, port=port, kg=key
+ )
+ module.debug('ipmi instantiated - name: "%s"' % name)
+
+ changed = False
+ if machine is None:
+ current = ipmi_cmd.get_power()
+ if current['powerstate'] != state:
+ response = {'powerstate': state} if module.check_mode \
+ else ipmi_cmd.set_power(state, wait=timeout)
+ changed = True
+ else:
+ response = current
+
+ if 'error' in response:
+ module.fail_json(msg=response['error'])
+
+ module.exit_json(changed=changed, **response)
+ else:
+ response = []
+ for entry in machine:
+ taddr = entry['targetAddress']
+ if taddr >= INVALID_TARGET_ADDRESS:
+ module.fail_json(msg="targetAddress should be set between 0 to 255.")
+
+ try:
+ # bridge_request is supported on pyghmi 1.5.30 and later
+ current = ipmi_cmd.get_power(bridge_request={"addr": taddr})
+ except TypeError:
+ module.fail_json(
+ msg="targetAddress isn't supported on the installed pyghmi.")
+
+ if entry['state']:
+ tstate = entry['state']
+ elif state:
+ tstate = state
+ else:
+ module.fail_json(msg="Either state or suboption of machine state should be set.")
+
+ if current['powerstate'] != tstate:
+ changed = True
+ if not module.check_mode:
+ new = ipmi_cmd.set_power(tstate, wait=timeout, bridge_request={"addr": taddr})
+ if 'error' in new:
+ module.fail_json(msg=new['error'])
+
+ response.append(
+ {'targetAddress:': taddr, 'powerstate': new['powerstate']})
+
+ if current['powerstate'] == tstate or module.check_mode:
+ response.append({'targetAddress:': taddr, 'powerstate': tstate})
+
+ module.exit_json(changed=changed, status=response)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/iptables_state.py b/ansible_collections/community/general/plugins/modules/iptables_state.py
new file mode 100644
index 000000000..d0ea7ad79
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/iptables_state.py
@@ -0,0 +1,654 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, quidame <quidame@poivron.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: iptables_state
+short_description: Save iptables state into a file or restore it from a file
+version_added: '1.1.0'
+author: quidame (@quidame)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.flow
+description:
+ - C(iptables) is used to set up, maintain, and inspect the tables of IP
+ packet filter rules in the Linux kernel.
+ - This module handles the saving and/or loading of rules. This is the same
+ as the behaviour of the C(iptables-save) and C(iptables-restore) (or
+ C(ip6tables-save) and C(ip6tables-restore) for IPv6) commands which this
+ module uses internally.
+ - Modifying the state of the firewall remotely may lead to loose access to
+ the host in case of mistake in new ruleset. This module embeds a rollback
+ feature to avoid this, by telling the host to restore previous rules if a
+ cookie is still there after a given delay, and all this time telling the
+ controller to try to remove this cookie on the host through a new
+ connection.
+notes:
+ - The rollback feature is not a module option and depends on task's
+ attributes. To enable it, the module must be played asynchronously, i.e.
+ by setting task attributes I(poll) to C(0), and I(async) to a value less
+ or equal to C(ANSIBLE_TIMEOUT). If I(async) is greater, the rollback will
+ still happen if it shall happen, but you will experience a connection
+ timeout instead of more relevant info returned by the module after its
+ failure.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ action:
+ support: full
+ async:
+ support: full
+options:
+ counters:
+ description:
+ - Save or restore the values of all packet and byte counters.
+ - When C(true), the module is not idempotent.
+ type: bool
+ default: false
+ ip_version:
+ description:
+ - Which version of the IP protocol this module should apply to.
+ type: str
+ choices: [ ipv4, ipv6 ]
+ default: ipv4
+ modprobe:
+ description:
+ - Specify the path to the C(modprobe) program internally used by iptables
+ related commands to load kernel modules.
+ - By default, C(/proc/sys/kernel/modprobe) is inspected to determine the
+ executable's path.
+ type: path
+ noflush:
+ description:
+ - For I(state=restored), ignored otherwise.
+ - If C(false), restoring iptables rules from a file flushes (deletes)
+ all previous contents of the respective table(s). If C(true), the
+ previous rules are left untouched (but policies are updated anyway,
+ for all built-in chains).
+ type: bool
+ default: false
+ path:
+ description:
+ - The file the iptables state should be saved to.
+ - The file the iptables state should be restored from.
+ type: path
+ required: true
+ state:
+ description:
+ - Whether the firewall state should be saved (into a file) or restored
+ (from a file).
+ type: str
+ choices: [ saved, restored ]
+ required: true
+ table:
+ description:
+ - When I(state=restored), restore only the named table even if the input
+ file contains other tables. Fail if the named table is not declared in
+ the file.
+ - When I(state=saved), restrict output to the specified table. If not
+ specified, output includes all active tables.
+ type: str
+ choices: [ filter, nat, mangle, raw, security ]
+ wait:
+ description:
+ - Wait N seconds for the xtables lock to prevent instant failure in case
+ multiple instances of the program are running concurrently.
+ type: int
+requirements: [iptables, ip6tables]
+'''
+
+EXAMPLES = r'''
+# This will apply to all loaded/active IPv4 tables.
+- name: Save current state of the firewall in system file
+ community.general.iptables_state:
+ state: saved
+ path: /etc/sysconfig/iptables
+
+# This will apply only to IPv6 filter table.
+- name: save current state of the firewall in system file
+ community.general.iptables_state:
+ ip_version: ipv6
+ table: filter
+ state: saved
+ path: /etc/iptables/rules.v6
+
+# This will load a state from a file, with a rollback in case of access loss
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will load new rules by appending them to the current ones
+- name: restore firewall state from a file
+ community.general.iptables_state:
+ state: restored
+ path: /run/iptables.apply
+ noflush: true
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+# This will only retrieve information
+- name: get current state of the firewall
+ community.general.iptables_state:
+ state: saved
+ path: /tmp/iptables
+ check_mode: true
+ changed_when: false
+ register: iptables_state
+
+- name: show current state of the firewall
+ ansible.builtin.debug:
+ var: iptables_state.initial_state
+'''
+
+RETURN = r'''
+applied:
+ description: Whether or not the wanted state has been successfully restored.
+ type: bool
+ returned: always
+ sample: true
+initial_state:
+ description: The current state of the firewall when module starts.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD ACCEPT [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+restored:
+ description: The state the module restored, whenever it is finally applied or not.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT DROP [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT",
+ "-A INPUT -m conntrack --ctstate INVALID -j DROP",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "COMMIT",
+ "# Completed"
+ ]
+saved:
+ description: The iptables state the module saved.
+ type: list
+ elements: str
+ returned: always
+ sample: [
+ "# Generated by xtables-save v1.8.2",
+ "*filter",
+ ":INPUT ACCEPT [0:0]",
+ ":FORWARD DROP [0:0]",
+ ":OUTPUT ACCEPT [0:0]",
+ "COMMIT",
+ "# Completed"
+ ]
+tables:
+ description: The iptables we have interest for when module starts.
+ type: dict
+ contains:
+ table:
+ description: Policies and rules for all chains of the named table.
+ type: list
+ elements: str
+ sample: |-
+ {
+ "filter": [
+ ":INPUT ACCEPT",
+ ":FORWARD ACCEPT",
+ ":OUTPUT ACCEPT",
+ "-A INPUT -i lo -j ACCEPT",
+ "-A INPUT -p icmp -j ACCEPT",
+ "-A INPUT -p tcp -m tcp --dport 22 -j ACCEPT",
+ "-A INPUT -j REJECT --reject-with icmp-host-prohibited"
+ ],
+ "nat": [
+ ":PREROUTING ACCEPT",
+ ":INPUT ACCEPT",
+ ":OUTPUT ACCEPT",
+ ":POSTROUTING ACCEPT"
+ ]
+ }
+ returned: always
+'''
+
+
+import re
+import os
+import time
+import tempfile
+import filecmp
+import shutil
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+
+IPTABLES = dict(
+ ipv4='iptables',
+ ipv6='ip6tables',
+)
+
+SAVE = dict(
+ ipv4='iptables-save',
+ ipv6='ip6tables-save',
+)
+
+RESTORE = dict(
+ ipv4='iptables-restore',
+ ipv6='ip6tables-restore',
+)
+
+TABLES = ['filter', 'mangle', 'nat', 'raw', 'security']
+
+
+def read_state(b_path):
+ '''
+ Read a file and store its content in a variable as a list.
+ '''
+ with open(b_path, 'r') as f:
+ text = f.read()
+ return [t for t in text.splitlines() if t != '']
+
+
+def write_state(b_path, lines, changed):
+ '''
+ Write given contents to the given path, and return changed status.
+ '''
+ # Populate a temporary file
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write("{0}\n".format("\n".join(lines)))
+
+ # Prepare to copy temporary file to the final destination
+ if not os.path.exists(b_path):
+ b_destdir = os.path.dirname(b_path)
+ destdir = to_native(b_destdir, errors='surrogate_or_strict')
+ if b_destdir and not os.path.exists(b_destdir) and not module.check_mode:
+ try:
+ os.makedirs(b_destdir)
+ except Exception as err:
+ module.fail_json(
+ msg='Error creating %s: %s' % (destdir, to_native(err)),
+ initial_state=lines)
+ changed = True
+
+ elif not filecmp.cmp(tmpfile, b_path):
+ changed = True
+
+ # Do it
+ if changed and not module.check_mode:
+ try:
+ shutil.copyfile(tmpfile, b_path)
+ except Exception as err:
+ path = to_native(b_path, errors='surrogate_or_strict')
+ module.fail_json(
+ msg='Error saving state into %s: %s' % (path, to_native(err)),
+ initial_state=lines)
+
+ return changed
+
+
+def initialize_from_null_state(initializer, initcommand, fallbackcmd, table):
+ '''
+ This ensures iptables-state output is suitable for iptables-restore to roll
+ back to it, i.e. iptables-save output is not empty. This also works for the
+ iptables-nft-save alternative.
+ '''
+ if table is None:
+ table = 'filter'
+
+ commandline = list(initializer)
+ commandline += ['-t', table]
+ dummy = module.run_command(commandline, check_rc=True)
+ (rc, out, err) = module.run_command(initcommand, check_rc=True)
+ if '*%s' % table not in out.splitlines():
+ # The last resort.
+ iptables_input = '*%s\n:OUTPUT ACCEPT\nCOMMIT\n' % table
+ dummy = module.run_command(fallbackcmd, data=iptables_input, check_rc=True)
+ (rc, out, err) = module.run_command(initcommand, check_rc=True)
+
+ return rc, out, err
+
+
+def filter_and_format_state(string):
+ '''
+ Remove timestamps to ensure idempotence between runs. Also remove counters
+ by default. And return the result as a list.
+ '''
+ string = re.sub(r'((^|\n)# (Generated|Completed)[^\n]*) on [^\n]*', r'\1', string)
+ if not module.params['counters']:
+ string = re.sub(r'\[[0-9]+:[0-9]+\]', r'[0:0]', string)
+ lines = [line for line in string.splitlines() if line != '']
+ return lines
+
+
+def per_table_state(command, state):
+ '''
+ Convert raw iptables-save output into usable datastructure, for reliable
+ comparisons between initial and final states.
+ '''
+ tables = dict()
+ for t in TABLES:
+ COMMAND = list(command)
+ if '*%s' % t in state.splitlines():
+ COMMAND.extend(['--table', t])
+ dummy, out, dummy = module.run_command(COMMAND, check_rc=True)
+ out = re.sub(r'(^|\n)(# Generated|# Completed|[*]%s|COMMIT)[^\n]*' % t, r'', out)
+ out = re.sub(r' *\[[0-9]+:[0-9]+\] *', r'', out)
+ tables[t] = [tt for tt in out.splitlines() if tt != '']
+ return tables
+
+
+def main():
+
+ global module
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ state=dict(type='str', choices=['saved', 'restored'], required=True),
+ table=dict(type='str', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
+ noflush=dict(type='bool', default=False),
+ counters=dict(type='bool', default=False),
+ modprobe=dict(type='path'),
+ ip_version=dict(type='str', choices=['ipv4', 'ipv6'], default='ipv4'),
+ wait=dict(type='int'),
+ _timeout=dict(type='int'),
+ _back=dict(type='path'),
+ ),
+ required_together=[
+ ['_timeout', '_back'],
+ ],
+ supports_check_mode=True,
+ )
+
+ # We'll parse iptables-restore stderr
+ module.run_command_environ_update = dict(LANG='C', LC_MESSAGES='C')
+
+ path = module.params['path']
+ state = module.params['state']
+ table = module.params['table']
+ noflush = module.params['noflush']
+ counters = module.params['counters']
+ modprobe = module.params['modprobe']
+ ip_version = module.params['ip_version']
+ wait = module.params['wait']
+ _timeout = module.params['_timeout']
+ _back = module.params['_back']
+
+ bin_iptables = module.get_bin_path(IPTABLES[ip_version], True)
+ bin_iptables_save = module.get_bin_path(SAVE[ip_version], True)
+ bin_iptables_restore = module.get_bin_path(RESTORE[ip_version], True)
+
+ os.umask(0o077)
+ changed = False
+ COMMANDARGS = []
+ INITCOMMAND = [bin_iptables_save]
+ INITIALIZER = [bin_iptables, '-L', '-n']
+ TESTCOMMAND = [bin_iptables_restore, '--test']
+ FALLBACKCMD = [bin_iptables_restore]
+
+ if counters:
+ COMMANDARGS.append('--counters')
+
+ if table is not None:
+ COMMANDARGS.extend(['--table', table])
+
+ if wait is not None:
+ TESTCOMMAND.extend(['--wait', '%s' % wait])
+
+ if modprobe is not None:
+ b_modprobe = to_bytes(modprobe, errors='surrogate_or_strict')
+ if not os.path.exists(b_modprobe):
+ module.fail_json(msg="modprobe %s not found" % modprobe)
+ if not os.path.isfile(b_modprobe):
+ module.fail_json(msg="modprobe %s not a file" % modprobe)
+ if not os.access(b_modprobe, os.R_OK):
+ module.fail_json(msg="modprobe %s not readable" % modprobe)
+ if not os.access(b_modprobe, os.X_OK):
+ module.fail_json(msg="modprobe %s not executable" % modprobe)
+ COMMANDARGS.extend(['--modprobe', modprobe])
+ INITIALIZER.extend(['--modprobe', modprobe])
+ INITCOMMAND.extend(['--modprobe', modprobe])
+ TESTCOMMAND.extend(['--modprobe', modprobe])
+ FALLBACKCMD.extend(['--modprobe', modprobe])
+
+ SAVECOMMAND = list(COMMANDARGS)
+ SAVECOMMAND.insert(0, bin_iptables_save)
+
+ b_path = to_bytes(path, errors='surrogate_or_strict')
+
+ if state == 'restored':
+ if not os.path.exists(b_path):
+ module.fail_json(msg="Source %s not found" % path)
+ if not os.path.isfile(b_path):
+ module.fail_json(msg="Source %s not a file" % path)
+ if not os.access(b_path, os.R_OK):
+ module.fail_json(msg="Source %s not readable" % path)
+ state_to_restore = read_state(b_path)
+ else:
+ cmd = ' '.join(SAVECOMMAND)
+
+ (rc, stdout, stderr) = module.run_command(INITCOMMAND, check_rc=True)
+
+ # The issue comes when wanting to restore state from empty iptable-save's
+ # output... what happens when, say:
+ # - no table is specified, and iptables-save's output is only nat table;
+ # - we give filter's ruleset to iptables-restore, that locks ourselve out
+ # of the host;
+ # then trying to roll iptables state back to the previous (working) setup
+ # doesn't override current filter table because no filter table is stored
+ # in the backup ! So we have to ensure tables to be restored have a backup
+ # in case of rollback.
+ if table is None:
+ if state == 'restored':
+ for t in TABLES:
+ if '*%s' % t in state_to_restore:
+ if len(stdout) == 0 or '*%s' % t not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, t)
+ elif len(stdout) == 0:
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, 'filter')
+
+ elif state == 'restored' and '*%s' % table not in state_to_restore:
+ module.fail_json(msg="Table %s to restore not defined in %s" % (table, path))
+
+ elif len(stdout) == 0 or '*%s' % table not in stdout.splitlines():
+ (rc, stdout, stderr) = initialize_from_null_state(INITIALIZER, INITCOMMAND, FALLBACKCMD, table)
+
+ initial_state = filter_and_format_state(stdout)
+ if initial_state is None:
+ module.fail_json(msg="Unable to initialize firewall from NULL state.")
+
+ # Depending on the value of 'table', initref_state may differ from
+ # initial_state.
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_before = per_table_state(SAVECOMMAND, stdout)
+ initref_state = filter_and_format_state(stdout)
+
+ if state == 'saved':
+ changed = write_state(b_path, initref_state, changed)
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ saved=initref_state)
+
+ #
+ # All remaining code is for state=restored
+ #
+
+ MAINCOMMAND = list(COMMANDARGS)
+ MAINCOMMAND.insert(0, bin_iptables_restore)
+
+ if wait is not None:
+ MAINCOMMAND.extend(['--wait', '%s' % wait])
+
+ if _back is not None:
+ b_back = to_bytes(_back, errors='surrogate_or_strict')
+ dummy = write_state(b_back, initref_state, changed)
+ BACKCOMMAND = list(MAINCOMMAND)
+ BACKCOMMAND.append(_back)
+
+ if noflush:
+ MAINCOMMAND.append('--noflush')
+
+ MAINCOMMAND.append(path)
+ cmd = ' '.join(MAINCOMMAND)
+
+ TESTCOMMAND = list(MAINCOMMAND)
+ TESTCOMMAND.insert(1, '--test')
+ error_msg = "Source %s is not suitable for input to %s" % (path, os.path.basename(bin_iptables_restore))
+
+ # Due to a bug in iptables-nft-restore --test, we have to validate tables
+ # one by one (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=960003).
+ for t in tables_before:
+ testcommand = list(TESTCOMMAND)
+ testcommand.extend(['--table', t])
+ (rc, stdout, stderr) = module.run_command(testcommand)
+
+ if 'Another app is currently holding the xtables lock' in stderr:
+ error_msg = stderr
+
+ if rc != 0:
+ cmd = ' '.join(testcommand)
+ module.fail_json(
+ msg=error_msg,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ if module.check_mode:
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write("{0}\n".format("\n".join(initial_state)))
+
+ if filecmp.cmp(tmpfile, b_path):
+ restored_state = initial_state
+ else:
+ restored_state = state_to_restore
+
+ else:
+ # Let time enough to the plugin to retrieve async status of the module
+ # in case of bad option type/value and the like.
+ if _back is not None:
+ b_starter = to_bytes('%s.starter' % _back, errors='surrogate_or_strict')
+ while True:
+ if os.path.exists(b_starter):
+ os.remove(b_starter)
+ break
+ time.sleep(0.01)
+
+ (rc, stdout, stderr) = module.run_command(MAINCOMMAND)
+ if 'Another app is currently holding the xtables lock' in stderr:
+ module.fail_json(
+ msg=stderr,
+ cmd=cmd,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=state_to_restore,
+ applied=False)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ restored_state = filter_and_format_state(stdout)
+
+ if restored_state not in (initref_state, initial_state):
+ if module.check_mode:
+ changed = True
+ else:
+ tables_after = per_table_state(SAVECOMMAND, stdout)
+ if tables_after != tables_before:
+ changed = True
+
+ if _back is None or module.check_mode:
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # The rollback implementation currently needs:
+ # Here:
+ # * test existence of the backup file, exit with success if it doesn't exist
+ # * otherwise, restore iptables from this file and return failure
+ # Action plugin:
+ # * try to remove the backup file
+ # * wait async task is finished and retrieve its final status
+ # * modify it and return the result
+ # Task:
+ # * task attribute 'async' set to the same value (or lower) than ansible
+ # timeout
+ # * task attribute 'poll' equals 0
+ #
+ for dummy in range(_timeout):
+ if os.path.exists(b_back):
+ time.sleep(1)
+ continue
+ module.exit_json(
+ changed=changed,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=True)
+
+ # Here we are: for whatever reason, but probably due to the current ruleset,
+ # the action plugin (i.e. on the controller) was unable to remove the backup
+ # cookie, so we restore initial state from it.
+ (rc, stdout, stderr) = module.run_command(BACKCOMMAND, check_rc=True)
+ os.remove(b_back)
+
+ (rc, stdout, stderr) = module.run_command(SAVECOMMAND, check_rc=True)
+ tables_rollback = per_table_state(SAVECOMMAND, stdout)
+
+ msg = (
+ "Failed to confirm state restored from %s after %ss. "
+ "Firewall has been rolled back to its initial state." % (path, _timeout)
+ )
+
+ module.fail_json(
+ changed=(tables_before != tables_rollback),
+ msg=msg,
+ cmd=cmd,
+ tables=tables_before,
+ initial_state=initial_state,
+ restored=restored_state,
+ applied=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ipwcli_dns.py b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
new file mode 100644
index 000000000..7b05aefb7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ipwcli_dns.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Christian Wollinger <cwollinger@web.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ipwcli_dns
+
+short_description: Manage DNS Records for Ericsson IPWorks via ipwcli
+
+version_added: '0.2.0'
+
+description:
+ - "Manage DNS records for the Ericsson IPWorks DNS server. The module will use the ipwcli to deploy the DNS records."
+
+requirements:
+ - ipwcli (installed on Ericsson IPWorks)
+
+notes:
+ - To make the DNS record changes effective, you need to run C(update dnsserver) on the ipwcli.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ dnsname:
+ description:
+ - Name of the record.
+ required: true
+ type: str
+ type:
+ description:
+ - Type of the record.
+ required: true
+ type: str
+ choices: [ NAPTR, SRV, A, AAAA ]
+ container:
+ description:
+ - Sets the container zone for the record.
+ required: true
+ type: str
+ address:
+ description:
+ - The IP address for the A or AAAA record.
+ - Required for I(type=A) or I(type=AAAA).
+ type: str
+ ttl:
+ description:
+ - Sets the TTL of the record.
+ type: int
+ default: 3600
+ state:
+ description:
+ - Whether the record should exist or not.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ priority:
+ description:
+ - Sets the priority of the SRV record.
+ type: int
+ default: 10
+ weight:
+ description:
+ - Sets the weight of the SRV record.
+ type: int
+ default: 10
+ port:
+ description:
+ - Sets the port of the SRV record.
+ - Required for I(type=SRV).
+ type: int
+ target:
+ description:
+ - Sets the target of the SRV record.
+ - Required for I(type=SRV).
+ type: str
+ order:
+ description:
+ - Sets the order of the NAPTR record.
+ - Required for I(type=NAPTR).
+ type: int
+ preference:
+ description:
+ - Sets the preference of the NAPTR record.
+ - Required for I(type=NAPTR).
+ type: int
+ flags:
+ description:
+ - Sets one of the possible flags of NAPTR record.
+ - Required for I(type=NAPTR).
+ type: str
+ choices: ['S', 'A', 'U', 'P']
+ service:
+ description:
+ - Sets the service of the NAPTR record.
+ - Required for I(type=NAPTR).
+ type: str
+ replacement:
+ description:
+ - Sets the replacement of the NAPTR record.
+ - Required for I(type=NAPTR).
+ type: str
+ username:
+ description:
+ - Username to login on ipwcli.
+ type: str
+ required: true
+ password:
+ description:
+ - Password to login on ipwcli.
+ type: str
+ required: true
+
+author:
+ - Christian Wollinger (@cwollinger)
+'''
+
+EXAMPLES = '''
+- name: Create A record
+ community.general.ipwcli_dns:
+ dnsname: example.com
+ type: A
+ container: ZoneOne
+ address: 127.0.0.1
+
+- name: Remove SRV record if exists
+ community.general.ipwcli_dns:
+ dnsname: _sip._tcp.test.example.com
+ type: SRV
+ container: ZoneOne
+ ttl: 100
+ state: absent
+ target: example.com
+ port: 5060
+
+- name: Create NAPTR record
+ community.general.ipwcli_dns:
+ dnsname: test.example.com
+ type: NAPTR
+ preference: 10
+ container: ZoneOne
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.com.'
+ flags: S
+'''
+
+RETURN = '''
+record:
+ description: The created record from the input params
+ type: str
+ returned: always
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ResourceRecord(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.dnsname = module.params['dnsname']
+ self.dnstype = module.params['type']
+ self.container = module.params['container']
+ self.address = module.params['address']
+ self.ttl = module.params['ttl']
+ self.state = module.params['state']
+ self.priority = module.params['priority']
+ self.weight = module.params['weight']
+ self.port = module.params['port']
+ self.target = module.params['target']
+ self.order = module.params['order']
+ self.preference = module.params['preference']
+ self.flags = module.params['flags']
+ self.service = module.params['service']
+ self.replacement = module.params['replacement']
+ self.user = module.params['username']
+ self.password = module.params['password']
+
+ def create_naptrrecord(self):
+ # create NAPTR record with the given params
+ record = ('naptrrecord %s -set ttl=%s;container=%s;order=%s;preference=%s;flags="%s";service="%s";replacement="%s"'
+ % (self.dnsname, self.ttl, self.container, self.order, self.preference, self.flags, self.service, self.replacement))
+ return record
+
+ def create_srvrecord(self):
+ # create SRV record with the given params
+ record = ('srvrecord %s -set ttl=%s;container=%s;priority=%s;weight=%s;port=%s;target=%s'
+ % (self.dnsname, self.ttl, self.container, self.priority, self.weight, self.port, self.target))
+ return record
+
+ def create_arecord(self):
+ # create A record with the given params
+ if self.dnstype == 'AAAA':
+ record = 'aaaarecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+ else:
+ record = 'arecord %s %s -set ttl=%s;container=%s' % (self.dnsname, self.address, self.ttl, self.container)
+
+ return record
+
+ def list_record(self, record):
+ # check if the record exists via list on ipwcli
+ search = 'list %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [
+ self.module.get_bin_path('ipwcli', True),
+ '-user=%s' % self.user,
+ '-password=%s' % self.password,
+ ]
+ rc, out, err = self.module.run_command(cmd, data=search)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if (('ARecord %s' % self.dnsname in out and rc == 0) or ('SRVRecord %s' % self.dnsname in out and rc == 0) or
+ ('NAPTRRecord %s' % self.dnsname in out and rc == 0)):
+ return True, rc, out, err
+
+ return False, rc, out, err
+
+ def deploy_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'create %s' % (record)
+ cmd = [
+ self.module.get_bin_path('ipwcli', True),
+ '-user=%s' % self.user,
+ '-password=%s' % self.password,
+ ]
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) created.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record creation failed', stderr=out)
+
+ def delete_record(self, record):
+ # check what happens if create fails on ipworks
+ stdin = 'delete %s' % (record.replace(';', '&&').replace('set', 'where'))
+ cmd = [
+ self.module.get_bin_path('ipwcli', True),
+ '-user=%s' % self.user,
+ '-password=%s' % self.password,
+ ]
+ rc, out, err = self.module.run_command(cmd, data=stdin)
+
+ if 'Invalid username or password' in out:
+ self.module.fail_json(msg='access denied at ipwcli login: Invalid username or password')
+
+ if '1 object(s) were updated.' in out:
+ return rc, out, err
+ else:
+ self.module.fail_json(msg='record deletion failed', stderr=out)
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ dnsname=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=['A', 'AAAA', 'SRV', 'NAPTR']),
+ container=dict(type='str', required=True),
+ address=dict(type='str', required=False),
+ ttl=dict(type='int', required=False, default=3600),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ priority=dict(type='int', required=False, default=10),
+ weight=dict(type='int', required=False, default=10),
+ port=dict(type='int', required=False),
+ target=dict(type='str', required=False),
+ order=dict(type='int', required=False),
+ preference=dict(type='int', required=False),
+ flags=dict(type='str', required=False, choices=['S', 'A', 'U', 'P']),
+ service=dict(type='str', required=False),
+ replacement=dict(type='str', required=False),
+ username=dict(type='str', required=True),
+ password=dict(type='str', required=True, no_log=True)
+ )
+
+ # define result
+ result = dict(
+ changed=False,
+ stdout='',
+ stderr='',
+ rc=0,
+ record=''
+ )
+
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_if=[
+ ['type', 'A', ['address']],
+ ['type', 'AAAA', ['address']],
+ ['type', 'SRV', ['port', 'target']],
+ ['type', 'NAPTR', ['preference', 'order', 'service', 'replacement']],
+ ],
+ supports_check_mode=True
+ )
+
+ user = ResourceRecord(module)
+
+ if user.dnstype == 'NAPTR':
+ record = user.create_naptrrecord()
+ elif user.dnstype == 'SRV':
+ record = user.create_srvrecord()
+ elif user.dnstype == 'A' or user.dnstype == 'AAAA':
+ record = user.create_arecord()
+
+ found, rc, out, err = user.list_record(record)
+
+ if found and user.state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.delete_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ elif not found and user.state == 'present':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = user.deploy_record(record)
+ result['changed'] = True
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+ else:
+ result['changed'] = False
+ result['record'] = record
+ result['rc'] = rc
+ result['stdout'] = out
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/irc.py b/ansible_collections/community/general/plugins/modules/irc.py
new file mode 100644
index 000000000..6cd7bc120
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/irc.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: irc
+short_description: Send a message to an IRC channel or a nick
+description:
+ - Send a message to an IRC channel or a nick. This is a very simplistic implementation.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ server:
+ type: str
+ description:
+ - IRC server name/address
+ default: localhost
+ port:
+ type: int
+ description:
+ - IRC server port number
+ default: 6667
+ nick:
+ type: str
+ description:
+ - Nickname to send the message from. May be shortened, depending on server's NICKLEN setting.
+ default: ansible
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ topic:
+ type: str
+ description:
+ - Set the channel topic
+ color:
+ type: str
+ description:
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
+ Added 11 more colors in version 2.0.
+ default: "none"
+ choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray", "light_gray"]
+ aliases: [colour]
+ channel:
+ type: str
+ description:
+ - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
+ nick_to:
+ type: list
+ elements: str
+ description:
+ - A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
+ key:
+ type: str
+ description:
+ - Channel key
+ passwd:
+ type: str
+ description:
+ - Server password
+ timeout:
+ type: int
+ description:
+ - Timeout to use while waiting for successful registration and join
+ messages, this is to prevent an endless loop
+ default: 30
+ use_ssl:
+ description:
+ - Designates whether TLS/SSL should be used when connecting to the IRC server
+ type: bool
+ default: false
+ part:
+ description:
+ - Designates whether user should part from channel after sending message or not.
+ Useful for when using a faux bot and not wanting join/parts between messages.
+ type: bool
+ default: true
+ style:
+ type: str
+ description:
+ - Text style for the message. Note italic does not work on some clients
+ choices: [ "bold", "underline", "reverse", "italic", "none" ]
+ default: none
+
+# informational: requirements for nodes
+requirements: [ socket ]
+author:
+ - "Jan-Piet Mens (@jpmens)"
+ - "Matt Martz (@sivel)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to an IRC channel from nick ansible
+ community.general.irc:
+ server: irc.example.net
+ channel: #t1
+ msg: Hello world
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+
+- name: Send a message to an IRC channel
+ local_action:
+ module: irc
+ port: 6669
+ server: irc.example.net
+ channel: #t1
+ nick_to:
+ - nick1
+ - nick2
+ msg: 'All finished at {{ ansible_date_time.iso8601 }}'
+ color: red
+ nick: ansibleIRC
+'''
+
+# ===========================================
+# IRC module support methods.
+#
+
+import re
+import socket
+import ssl
+import time
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=None, key=None, topic=None,
+ nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
+ '''send message to IRC'''
+ nick_to = [] if nick_to is None else nick_to
+
+ colornumbers = {
+ 'white': "00",
+ 'black': "01",
+ 'blue': "02",
+ 'green': "03",
+ 'red': "04",
+ 'brown': "05",
+ 'purple': "06",
+ 'orange': "07",
+ 'yellow': "08",
+ 'light_green': "09",
+ 'teal': "10",
+ 'light_cyan': "11",
+ 'light_blue': "12",
+ 'pink': "13",
+ 'gray': "14",
+ 'light_gray': "15",
+ }
+
+ stylechoices = {
+ 'bold': "\x02",
+ 'underline': "\x1F",
+ 'reverse': "\x16",
+ 'italic': "\x1D",
+ }
+
+ try:
+ styletext = stylechoices[style]
+ except Exception:
+ styletext = ""
+
+ try:
+ colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
+ except Exception:
+ colortext = ""
+
+ message = styletext + colortext + msg
+
+ irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if use_ssl:
+ irc = ssl.wrap_socket(irc)
+ irc.connect((server, int(port)))
+
+ if passwd:
+ irc.send(to_bytes('PASS %s\r\n' % passwd))
+ irc.send(to_bytes('NICK %s\r\n' % nick))
+ irc.send(to_bytes('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)))
+ motd = ''
+ start = time.time()
+ while 1:
+ motd += to_native(irc.recv(1024))
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search(r'^:\S+ 00[1-4] (?P<nick>\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC server welcome response')
+ time.sleep(0.5)
+
+ if channel:
+ if key:
+ irc.send(to_bytes('JOIN %s %s\r\n' % (channel, key)))
+ else:
+ irc.send(to_bytes('JOIN %s\r\n' % channel))
+
+ join = ''
+ start = time.time()
+ while 1:
+ join += to_native(irc.recv(1024))
+ if re.search(r'^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M | re.I):
+ break
+ elif time.time() - start > timeout:
+ raise Exception('Timeout waiting for IRC JOIN response')
+ time.sleep(0.5)
+
+ if topic is not None:
+ irc.send(to_bytes('TOPIC %s :%s\r\n' % (channel, topic)))
+ time.sleep(1)
+
+ if nick_to:
+ for nick in nick_to:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (nick, message)))
+ if channel:
+ irc.send(to_bytes('PRIVMSG %s :%s\r\n' % (channel, message)))
+ time.sleep(1)
+ if part:
+ if channel:
+ irc.send(to_bytes('PART %s\r\n' % channel))
+ irc.send(to_bytes('QUIT\r\n'))
+ time.sleep(1)
+ irc.close()
+
+# ===========================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(type='int', default=6667),
+ nick=dict(default='ansible'),
+ nick_to=dict(required=False, type='list', elements='str'),
+ msg=dict(required=True),
+ color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
+ "green", "red", "brown",
+ "purple", "orange", "yellow",
+ "light_green", "teal", "light_cyan",
+ "light_blue", "pink", "gray",
+ "light_gray", "none"]),
+ style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
+ channel=dict(required=False),
+ key=dict(no_log=True),
+ topic=dict(),
+ passwd=dict(no_log=True),
+ timeout=dict(type='int', default=30),
+ part=dict(type='bool', default=True),
+ use_ssl=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ required_one_of=[['channel', 'nick_to']]
+ )
+
+ server = module.params["server"]
+ port = module.params["port"]
+ nick = module.params["nick"]
+ nick_to = module.params["nick_to"]
+ msg = module.params["msg"]
+ color = module.params["color"]
+ channel = module.params["channel"]
+ topic = module.params["topic"]
+ if topic and not channel:
+ module.fail_json(msg="When topic is specified, a channel is required.")
+ key = module.params["key"]
+ passwd = module.params["passwd"]
+ timeout = module.params["timeout"]
+ use_ssl = module.params["use_ssl"]
+ part = module.params["part"]
+ style = module.params["style"]
+
+ try:
+ send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
+ except Exception as e:
+ module.fail_json(msg="unable to send to IRC: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, channel=channel, nick=nick,
+ msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/iso_create.py b/ansible_collections/community/general/plugins/modules/iso_create.py
new file mode 100644
index 000000000..4b51be96d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/iso_create.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Ansible Project
+# Copyright (c) 2020, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: iso_create
+short_description: Generate ISO file with specified files or folders
+description:
+ - This module is used to generate ISO file with specified path of files.
+author:
+ - Diane Wang (@Tomorrow9) <dianew@vmware.com>
+requirements:
+ - "pycdlib"
+ - "python >= 2.7"
+version_added: '0.2.0'
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ src_files:
+ description:
+ - This is a list of absolute paths of source files or folders which will be contained in the new generated ISO file.
+ - Will fail if specified file or folder in C(src_files) does not exist on local machine.
+ - 'Note: With all ISO9660 levels from 1 to 3, all file names are restricted to uppercase letters, numbers and
+ underscores (_). File names are limited to 31 characters, directory nesting is limited to 8 levels, and path
+ names are limited to 255 characters.'
+ type: list
+ required: true
+ elements: path
+ dest_iso:
+ description:
+ - The absolute path with file name of the new generated ISO file on local machine.
+ - Will create intermediate folders when they does not exist.
+ type: path
+ required: true
+ interchange_level:
+ description:
+ - The ISO9660 interchange level to use, it dictates the rules on the names of files.
+ - Levels and valid values C(1), C(2), C(3), C(4) are supported.
+ - The default value is level C(1), which is the most conservative, level C(3) is recommended.
+ - ISO9660 file names at interchange level C(1) cannot have more than 8 characters or 3 characters in the extension.
+ type: int
+ default: 1
+ choices: [1, 2, 3, 4]
+ vol_ident:
+ description:
+ - The volume identification string to use on the new generated ISO image.
+ type: str
+ rock_ridge:
+ description:
+ - Whether to make this ISO have the Rock Ridge extensions or not.
+ - Valid values are C(1.09), C(1.10) or C(1.12), means adding the specified Rock Ridge version to the ISO.
+ - If unsure, set C(1.09) to ensure maximum compatibility.
+ - If not specified, then not add Rock Ridge extension to the ISO.
+ type: str
+ choices: ['1.09', '1.10', '1.12']
+ joliet:
+ description:
+ - Support levels and valid values are C(1), C(2), or C(3).
+ - Level C(3) is by far the most common.
+ - If not specified, then no Joliet support is added.
+ type: int
+ choices: [1, 2, 3]
+ udf:
+ description:
+ - Whether to add UDF support to this ISO.
+ - If set to C(True), then version 2.60 of the UDF spec is used.
+ - If not specified or set to C(False), then no UDF support is added.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Create an ISO file
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ interchange_level: 3
+
+- name: Create an ISO file with Rock Ridge extension
+ community.general.iso_create:
+ src_files:
+ - /root/testfile.yml
+ - /root/testfolder
+ dest_iso: /tmp/test.iso
+ rock_ridge: 1.09
+
+- name: Create an ISO file with Joliet support
+ community.general.iso_create:
+ src_files:
+ - ./windows_config/Autounattend.xml
+ dest_iso: ./test.iso
+ interchange_level: 3
+ joliet: 3
+ vol_ident: WIN_AUTOINSTALL
+'''
+
+RETURN = r'''
+source_file:
+ description: Configured source files or directories list.
+ returned: on success
+ type: list
+ elements: path
+ sample: ["/path/to/file.txt", "/path/to/folder"]
+created_iso:
+ description: Created iso file path.
+ returned: on success
+ type: str
+ sample: "/path/to/test.iso"
+interchange_level:
+ description: Configured interchange level.
+ returned: on success
+ type: int
+ sample: 3
+vol_ident:
+ description: Configured volume identification string.
+ returned: on success
+ type: str
+ sample: "OEMDRV"
+joliet:
+ description: Configured Joliet support level.
+ returned: on success
+ type: int
+ sample: 3
+rock_ridge:
+ description: Configured Rock Ridge version.
+ returned: on success
+ type: str
+ sample: "1.09"
+udf:
+ description: Configured UDF support.
+ returned: on success
+ type: bool
+ sample: false
+'''
+
+import os
+import traceback
+
+PYCDLIB_IMP_ERR = None
+try:
+ import pycdlib
+ HAS_PYCDLIB = True
+except ImportError:
+ PYCDLIB_IMP_ERR = traceback.format_exc()
+ HAS_PYCDLIB = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def add_file(module, iso_file=None, src_file=None, file_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ # In standard ISO interchange level 1, file names have a maximum of 8 characters, followed by a required dot,
+ # followed by a maximum 3 character extension, followed by a semicolon and a version
+ file_name = os.path.basename(file_path)
+ if '.' not in file_name:
+ file_in_iso_path = file_path.upper() + '.;1'
+ else:
+ file_in_iso_path = file_path.upper() + ';1'
+ if rock_ridge:
+ rr_name = file_name
+ if use_joliet:
+ joliet_path = file_path
+ if use_udf:
+ udf_path = file_path
+ try:
+ iso_file.add_file(src_file, iso_path=file_in_iso_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to add file %s to ISO file due to %s" % (src_file, to_native(err)))
+
+
+def add_directory(module, iso_file=None, dir_path=None, rock_ridge=None, use_joliet=None, use_udf=None):
+ rr_name = None
+ joliet_path = None
+ udf_path = None
+ iso_dir_path = dir_path.upper()
+ if rock_ridge:
+ rr_name = os.path.basename(dir_path)
+ if use_joliet:
+ joliet_path = dir_path
+ if use_udf:
+ udf_path = dir_path
+ try:
+ iso_file.add_directory(iso_path=iso_dir_path, rr_name=rr_name, joliet_path=joliet_path, udf_path=udf_path)
+ except Exception as err:
+ module.fail_json(msg="Failed to directory %s to ISO file due to %s" % (dir_path, to_native(err)))
+
+
+def main():
+ argument_spec = dict(
+ src_files=dict(type='list', required=True, elements='path'),
+ dest_iso=dict(type='path', required=True),
+ interchange_level=dict(type='int', choices=[1, 2, 3, 4], default=1),
+ vol_ident=dict(type='str'),
+ rock_ridge=dict(type='str', choices=['1.09', '1.10', '1.12']),
+ joliet=dict(type='int', choices=[1, 2, 3]),
+ udf=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if not HAS_PYCDLIB:
+ module.fail_json(missing_required_lib('pycdlib'), exception=PYCDLIB_IMP_ERR)
+
+ src_file_list = module.params.get('src_files')
+ if src_file_list and len(src_file_list) == 0:
+ module.fail_json(msg='Please specify source file and/or directory list using src_files parameter.')
+ for src_file in src_file_list:
+ if not os.path.exists(src_file):
+ module.fail_json(msg="Specified source file/directory path does not exist on local machine, %s" % src_file)
+
+ dest_iso = module.params.get('dest_iso')
+ if dest_iso and len(dest_iso) == 0:
+ module.fail_json(msg='Please specify the absolute path of the new created ISO file using dest_iso parameter.')
+
+ dest_iso_dir = os.path.dirname(dest_iso)
+ if dest_iso_dir and not os.path.exists(dest_iso_dir):
+ # will create intermediate dir for new ISO file
+ try:
+ os.makedirs(dest_iso_dir)
+ except OSError as err:
+ module.fail_json(msg='Exception caught when creating folder %s, with error %s' % (dest_iso_dir, to_native(err)))
+
+ volume_id = module.params.get('vol_ident')
+ if volume_id is None:
+ volume_id = ''
+ inter_level = module.params.get('interchange_level')
+ rock_ridge = module.params.get('rock_ridge')
+ use_joliet = module.params.get('joliet')
+ use_udf = None
+ if module.params['udf']:
+ use_udf = '2.60'
+
+ result = dict(
+ changed=False,
+ source_file=src_file_list,
+ created_iso=dest_iso,
+ interchange_level=inter_level,
+ vol_ident=volume_id,
+ rock_ridge=rock_ridge,
+ joliet=use_joliet,
+ udf=use_udf
+ )
+ if not module.check_mode:
+ iso_file = pycdlib.PyCdlib(always_consistent=True)
+ iso_file.new(interchange_level=inter_level, vol_ident=volume_id, rock_ridge=rock_ridge, joliet=use_joliet, udf=use_udf)
+
+ for src_file in src_file_list:
+ # if specify a dir then go through the dir to add files and dirs
+ if os.path.isdir(src_file):
+ dir_list = []
+ file_list = []
+ src_file = src_file.rstrip('/')
+ dir_name = os.path.basename(src_file)
+ add_directory(module, iso_file=iso_file, dir_path='/' + dir_name, rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+
+ # get dir list and file list
+ for path, dirs, files in os.walk(src_file):
+ for filename in files:
+ file_list.append(os.path.join(path, filename))
+ for dir in dirs:
+ dir_list.append(os.path.join(path, dir))
+ for new_dir in dir_list:
+ add_directory(module, iso_file=iso_file, dir_path=new_dir.split(os.path.dirname(src_file))[1],
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+ for new_file in file_list:
+ add_file(module, iso_file=iso_file, src_file=new_file,
+ file_path=new_file.split(os.path.dirname(src_file))[1], rock_ridge=rock_ridge,
+ use_joliet=use_joliet, use_udf=use_udf)
+ # if specify a file then add this file directly to the '/' path in ISO
+ else:
+ add_file(module, iso_file=iso_file, src_file=src_file, file_path='/' + os.path.basename(src_file),
+ rock_ridge=rock_ridge, use_joliet=use_joliet, use_udf=use_udf)
+
+ iso_file.write(dest_iso)
+ iso_file.close()
+
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/iso_customize.py b/ansible_collections/community/general/plugins/modules/iso_customize.py
new file mode 100644
index 000000000..9add080b1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/iso_customize.py
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: iso_customize
+short_description: Add/remove/change files in ISO file
+description:
+ - This module is used to add/remove/change files in ISO file.
+ - The file inside ISO will be overwritten if it exists by option I(add_files).
+author:
+ - Yuhua Zou (@ZouYuhua) <zouy@vmware.com>
+requirements:
+ - "pycdlib"
+ - "python >= 2.7"
+version_added: '5.8.0'
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ src_iso:
+ description:
+ - This is the path of source ISO file.
+ type: path
+ required: true
+ dest_iso:
+ description:
+ - The path of the customized ISO file.
+ type: path
+ required: true
+ delete_files:
+ description:
+ - Absolute paths for files inside the ISO file that should be removed.
+ type: list
+ required: false
+ elements: str
+ default: []
+ add_files:
+ description:
+ - Allows to add and replace files in the ISO file.
+ - Will create intermediate folders inside the ISO file when they do not exist.
+ type: list
+ required: false
+ elements: dict
+ default: []
+ suboptions:
+ src_file:
+ description:
+ - The path with file name on the machine the module is executed on.
+ type: path
+ required: true
+ dest_file:
+ description:
+ - The absolute path of the file inside the ISO file.
+ type: str
+ required: true
+notes:
+- The C(pycdlib) library states it supports Python 2.7 and 3.4 only.
+- >
+ The function I(add_file) in pycdlib will overwrite the existing file in ISO with type ISO9660 / Rock Ridge 1.12 / Joliet / UDF.
+ But it will not overwrite the existing file in ISO with Rock Ridge 1.09 / 1.10.
+ So we take workaround "delete the existing file and then add file for ISO with Rock Ridge".
+'''
+
+EXAMPLES = r'''
+- name: "Customize ISO file"
+ community.general.iso_customize:
+ src_iso: "/path/to/ubuntu-22.04-desktop-amd64.iso"
+ dest_iso: "/path/to/ubuntu-22.04-desktop-amd64-customized.iso"
+ delete_files:
+ - "/boot.catalog"
+ add_files:
+ - src_file: "/path/to/grub.cfg"
+ dest_file: "/boot/grub/grub.cfg"
+ - src_file: "/path/to/ubuntu.seed"
+ dest_file: "/preseed/ubuntu.seed"
+ register: customize_iso_result
+'''
+
+RETURN = r'''
+src_iso:
+ description: Path of source ISO file.
+ returned: on success
+ type: str
+ sample: "/path/to/file.iso"
+dest_iso:
+ description: Path of the customized ISO file.
+ returned: on success
+ type: str
+ sample: "/path/to/customized.iso"
+'''
+
+import os
+
+from ansible_collections.community.general.plugins.module_utils import deps
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+with deps.declare("pycdlib"):
+ import pycdlib
+
+
+# The upper dir exist, we only add subdirectoy
+def iso_add_dir(module, opened_iso, iso_type, dir_path):
+ parent_dir, check_dirname = dir_path.rsplit("/", 1)
+ if not parent_dir.strip():
+ parent_dir = "/"
+ check_dirname = check_dirname.strip()
+
+ for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()):
+ if dirname == parent_dir.upper():
+ if check_dirname.upper() in dirlist:
+ return
+
+ if parent_dir == "/":
+ current_dirpath = "/%s" % check_dirname
+ else:
+ current_dirpath = "%s/%s" % (parent_dir, check_dirname)
+
+ current_dirpath_upper = current_dirpath.upper()
+ try:
+ if iso_type == "iso9660":
+ opened_iso.add_directory(current_dirpath_upper)
+ elif iso_type == "rr":
+ opened_iso.add_directory(current_dirpath_upper, rr_name=check_dirname)
+ elif iso_type == "joliet":
+ opened_iso.add_directory(current_dirpath_upper, joliet_path=current_dirpath)
+ elif iso_type == "udf":
+ opened_iso.add_directory(current_dirpath_upper, udf_path=current_dirpath)
+ except Exception as err:
+ msg = "Failed to create dir %s with error: %s" % (current_dirpath, to_native(err))
+ module.fail_json(msg=msg)
+
+
+def iso_add_dirs(module, opened_iso, iso_type, dir_path):
+ dirnames = dir_path.strip().split("/")
+
+ current_dirpath = "/"
+ for item in dirnames:
+ if not item.strip():
+ continue
+ if current_dirpath == "/":
+ current_dirpath = "/%s" % item
+ else:
+ current_dirpath = "%s/%s" % (current_dirpath, item)
+
+ iso_add_dir(module, opened_iso, iso_type, current_dirpath)
+
+
+def iso_check_file_exists(opened_iso, dest_file):
+ file_dir = os.path.dirname(dest_file).strip()
+ file_name = os.path.basename(dest_file)
+ dirnames = file_dir.strip().split("/")
+
+ parent_dir = "/"
+ for item in dirnames:
+ if not item.strip():
+ continue
+
+ for dirname, dirlist, dummy_filelist in opened_iso.walk(iso_path=parent_dir.upper()):
+ if dirname != parent_dir.upper():
+ break
+
+ if item.upper() not in dirlist:
+ return False
+
+ if parent_dir == "/":
+ parent_dir = "/%s" % item
+ else:
+ parent_dir = "%s/%s" % (parent_dir, item)
+
+ if '.' not in file_name:
+ file_in_iso_path = file_name.upper() + '.;1'
+ else:
+ file_in_iso_path = file_name.upper() + ';1'
+
+ for dirname, dummy_dirlist, filelist in opened_iso.walk(iso_path=parent_dir.upper()):
+ if dirname != parent_dir.upper():
+ return False
+
+ return file_name.upper() in filelist or file_in_iso_path in filelist
+
+
+def iso_add_file(module, opened_iso, iso_type, src_file, dest_file):
+ dest_file = dest_file.strip()
+ if dest_file[0] != "/":
+ dest_file = "/%s" % dest_file
+
+ file_local = src_file.strip()
+
+ file_dir = os.path.dirname(dest_file).strip()
+ file_name = os.path.basename(dest_file)
+ if '.' not in file_name:
+ file_in_iso_path = dest_file.upper() + '.;1'
+ else:
+ file_in_iso_path = dest_file.upper() + ';1'
+
+ if file_dir and file_dir != "/":
+ iso_add_dirs(module, opened_iso, iso_type, file_dir)
+
+ try:
+ if iso_type == "iso9660":
+ opened_iso.add_file(file_local, iso_path=file_in_iso_path)
+ elif iso_type == "rr":
+ # For ISO with Rock Ridge 1.09 / 1.10, it won't overwrite the existing file
+ # So we take workaround here: delete the existing file and then add file
+ if iso_check_file_exists(opened_iso, dest_file):
+ opened_iso.rm_file(iso_path=file_in_iso_path)
+ opened_iso.add_file(file_local, iso_path=file_in_iso_path, rr_name=file_name)
+ elif iso_type == "joliet":
+ opened_iso.add_file(file_local, iso_path=file_in_iso_path, joliet_path=dest_file)
+ elif iso_type == "udf":
+ # For ISO with UDF, it won't always succeed to overwrite the existing file
+ # So we take workaround here: delete the existing file and then add file
+ if iso_check_file_exists(opened_iso, dest_file):
+ opened_iso.rm_file(udf_path=dest_file)
+ opened_iso.add_file(file_local, iso_path=file_in_iso_path, udf_path=dest_file)
+ except Exception as err:
+ msg = "Failed to add local file %s to ISO with error: %s" % (file_local, to_native(err))
+ module.fail_json(msg=msg)
+
+
+def iso_delete_file(module, opened_iso, iso_type, dest_file):
+ dest_file = dest_file.strip()
+ if dest_file[0] != "/":
+ dest_file = "/%s" % dest_file
+ file_name = os.path.basename(dest_file)
+
+ if not iso_check_file_exists(opened_iso, dest_file):
+ module.fail_json(msg="The file %s does not exist." % dest_file)
+
+ if '.' not in file_name:
+ file_in_iso_path = dest_file.upper() + '.;1'
+ else:
+ file_in_iso_path = dest_file.upper() + ';1'
+
+ try:
+ if iso_type == "iso9660":
+ opened_iso.rm_file(iso_path=file_in_iso_path)
+ elif iso_type == "rr":
+ opened_iso.rm_file(iso_path=file_in_iso_path)
+ elif iso_type == "joliet":
+ opened_iso.rm_file(joliet_path=dest_file)
+ elif iso_type == "udf":
+ opened_iso.rm_file(udf_path=dest_file)
+ except Exception as err:
+ msg = "Failed to delete iso file %s with error: %s" % (dest_file, to_native(err))
+ module.fail_json(msg=msg)
+
+
+def iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list):
+ iso = None
+ iso_type = "iso9660"
+
+ try:
+ iso = pycdlib.PyCdlib(always_consistent=True)
+ iso.open(src_iso)
+ if iso.has_rock_ridge():
+ iso_type = "rr"
+ elif iso.has_joliet():
+ iso_type = "joliet"
+ elif iso.has_udf():
+ iso_type = "udf"
+
+ for item in delete_files_list:
+ iso_delete_file(module, iso, iso_type, item)
+
+ for item in add_files_list:
+ iso_add_file(module, iso, iso_type, item['src_file'], item['dest_file'])
+
+ iso.write(dest_iso)
+ except Exception as err:
+ msg = "Failed to rebuild ISO %s with error: %s" % (src_iso, to_native(err))
+ module.fail_json(msg=msg)
+ finally:
+ if iso:
+ iso.close()
+
+
+def main():
+ argument_spec = dict(
+ src_iso=dict(type='path', required=True),
+ dest_iso=dict(type='path', required=True),
+ delete_files=dict(type='list', elements='str', default=[]),
+ add_files=dict(
+ type='list', elements='dict', default=[],
+ options=dict(
+ src_file=dict(type='path', required=True),
+ dest_file=dict(type='str', required=True),
+ ),
+ ),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_one_of=[('delete_files', 'add_files'), ],
+ supports_check_mode=True,
+ )
+ deps.validate(module)
+
+ src_iso = module.params['src_iso']
+ if not os.path.exists(src_iso):
+ module.fail_json(msg="ISO file %s does not exist." % src_iso)
+
+ dest_iso = module.params['dest_iso']
+ dest_iso_dir = os.path.dirname(dest_iso)
+ if dest_iso_dir and not os.path.exists(dest_iso_dir):
+ module.fail_json(msg="The dest directory %s does not exist" % dest_iso_dir)
+
+ delete_files_list = [s.strip() for s in module.params['delete_files']]
+ add_files_list = module.params['add_files']
+ if add_files_list:
+ for item in add_files_list:
+ if not os.path.exists(item['src_file']):
+ module.fail_json(msg="The file %s does not exist." % item['src_file'])
+
+ result = dict(
+ src_iso=src_iso,
+ customized_iso=dest_iso,
+ delete_files=delete_files_list,
+ add_files=add_files_list,
+ changed=True,
+ )
+
+ if not module.check_mode:
+ iso_rebuild(module, src_iso, dest_iso, delete_files_list, add_files_list)
+
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/iso_extract.py b/ansible_collections/community/general/plugins/modules/iso_extract.py
new file mode 100644
index 000000000..599cbe4de
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/iso_extract.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# Copyright (c) 2016, Matt Robinson <git@nerdoftheherd.com>
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - Matt Robinson (@ribbons)
+ - Dag Wieers (@dagwieers)
+module: iso_extract
+short_description: Extract files from an ISO image
+description:
+ - This module has two possible ways of operation.
+ - If 7zip is installed on the system, this module extracts files from an ISO
+ into a temporary directory and copies files to a given destination,
+ if needed.
+ - If the user has mount-capabilities (CAP_SYS_ADMIN on Linux) this module
+ mounts the ISO image to a temporary location, and copies files to a given
+ destination, if needed.
+requirements:
+ - Either 7z (from C(7zip) or C(p7zip) package)
+ - Or mount capabilities (root-access, or CAP_SYS_ADMIN capability on Linux)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ image:
+ description:
+ - The ISO image to extract files from.
+ type: path
+ required: true
+ aliases: [ path, src ]
+ dest:
+ description:
+ - The destination directory to extract files to.
+ type: path
+ required: true
+ files:
+ description:
+ - A list of files to extract from the image.
+ - Extracting directories does not work.
+ type: list
+ elements: str
+ required: true
+ force:
+ description:
+ - If C(true), which will replace the remote file when contents are different than the source.
+ - If C(false), the file will only be extracted and copied if the destination does not already exist.
+ type: bool
+ default: true
+ executable:
+ description:
+ - The path to the C(7z) executable to use for extracting files from the ISO.
+ - If not provided, it will assume the value C(7z).
+ type: path
+notes:
+- Only the file checksum (content) is taken into account when extracting files
+ from the ISO image. If I(force=false), only checks the presence of the file.
+- In Ansible 2.3 this module was using C(mount) and C(umount) commands only,
+ requiring root access. This is no longer needed with the introduction of 7zip
+ for extraction.
+'''
+
+EXAMPLES = r'''
+- name: Extract kernel and ramdisk from a LiveCD
+ community.general.iso_extract:
+ image: /tmp/rear-test.iso
+ dest: /tmp/virt-rear/
+ files:
+ - isolinux/kernel
+ - isolinux/initrd.cgz
+'''
+
+RETURN = r'''
+#
+'''
+
+import os.path
+import shutil
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(type='path', required=True, aliases=['path', 'src']),
+ dest=dict(type='path', required=True),
+ files=dict(type='list', elements='str', required=True),
+ force=dict(type='bool', default=True),
+ executable=dict(type='path'), # No default on purpose
+ ),
+ supports_check_mode=True,
+ )
+ image = module.params['image']
+ dest = module.params['dest']
+ files = module.params['files']
+ force = module.params['force']
+ executable = module.params['executable']
+
+ result = dict(
+ changed=False,
+ dest=dest,
+ image=image,
+ )
+
+ # We want to know if the user provided it or not, so we set default here
+ if executable is None:
+ executable = '7z'
+
+ binary = module.get_bin_path(executable, None)
+
+ # When executable was provided and binary not found, warn user !
+ if module.params['executable'] is not None and not binary:
+ module.warn("Executable '%s' is not found on the system, trying to mount ISO instead." % executable)
+
+ if not os.path.exists(dest):
+ module.fail_json(msg="Directory '%s' does not exist" % dest)
+
+ if not os.path.exists(os.path.dirname(image)):
+ module.fail_json(msg="ISO image '%s' does not exist" % image)
+
+ result['files'] = []
+ extract_files = list(files)
+
+ if not force:
+ # Check if we have to process any files based on existence
+ for f in files:
+ dest_file = os.path.join(dest, os.path.basename(f))
+ if os.path.exists(dest_file):
+ result['files'].append(dict(
+ checksum=None,
+ dest=dest_file,
+ src=f,
+ ))
+ extract_files.remove(f)
+
+ if not extract_files:
+ module.exit_json(**result)
+
+ tmp_dir = tempfile.mkdtemp()
+
+ # Use 7zip when we have a binary, otherwise try to mount
+ if binary:
+ cmd = [binary, 'x', image, '-o%s' % tmp_dir] + extract_files
+ else:
+ cmd = [module.get_bin_path('mount'), '-o', 'loop,ro', image, tmp_dir]
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ result.update(dict(
+ cmd=cmd,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ ))
+ shutil.rmtree(tmp_dir)
+
+ if binary:
+ module.fail_json(msg="Failed to extract from ISO image '%s' to '%s'" % (image, tmp_dir), **result)
+ else:
+ module.fail_json(msg="Failed to mount ISO image '%s' to '%s', and we could not find executable '%s'." % (image, tmp_dir, executable), **result)
+
+ try:
+ for f in extract_files:
+ tmp_src = os.path.join(tmp_dir, f)
+ if not os.path.exists(tmp_src):
+ module.fail_json(msg="Failed to extract '%s' from ISO image" % f, **result)
+
+ src_checksum = module.sha1(tmp_src)
+
+ dest_file = os.path.join(dest, os.path.basename(f))
+
+ if os.path.exists(dest_file):
+ dest_checksum = module.sha1(dest_file)
+ else:
+ dest_checksum = None
+
+ result['files'].append(dict(
+ checksum=src_checksum,
+ dest=dest_file,
+ src=f,
+ ))
+
+ if src_checksum != dest_checksum:
+ if not module.check_mode:
+ shutil.copy(tmp_src, dest_file)
+
+ result['changed'] = True
+ finally:
+ if not binary:
+ module.run_command([module.get_bin_path('umount'), tmp_dir])
+
+ shutil.rmtree(tmp_dir)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jabber.py b/ansible_collections/community/general/plugins/modules/jabber.py
new file mode 100644
index 000000000..650b29957
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jabber.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jabber
+short_description: Send a message to jabber user or chat room
+description:
+ - Send a message to jabber
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ user:
+ type: str
+ description:
+ - User as which to connect
+ required: true
+ password:
+ type: str
+ description:
+ - password for user to connect
+ required: true
+ to:
+ type: str
+ description:
+ - user ID or name of the room, when using room use a slash to indicate your nick.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ host:
+ type: str
+ description:
+ - host to connect, overrides user info
+ port:
+ type: int
+ description:
+ - port to connect to, overrides default
+ default: 5222
+ encoding:
+ type: str
+ description:
+ - message encoding
+
+# informational: requirements for nodes
+requirements:
+ - python xmpp (xmpppy)
+author: "Brian Coca (@bcoca)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to a user
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: friend@example.net
+ msg: Ansible task finished
+
+- name: Send a message to a room
+ community.general.jabber:
+ user: mybot@example.net
+ password: secret
+ to: mychaps@conference.example.net/ansiblebot
+ msg: Ansible task finished
+
+- name: Send a message, specifying the host and port
+ community.general.jabber:
+ user: mybot@example.net
+ host: talk.example.net
+ port: 5223
+ password: secret
+ to: mychaps@example.net
+ msg: Ansible task finished
+'''
+
+import time
+import traceback
+
+HAS_XMPP = True
+XMPP_IMP_ERR = None
+try:
+ import xmpp
+except ImportError:
+ XMPP_IMP_ERR = traceback.format_exc()
+ HAS_XMPP = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ password=dict(required=True, no_log=True),
+ to=dict(required=True),
+ msg=dict(required=True),
+ host=dict(required=False),
+ port=dict(required=False, default=5222, type='int'),
+ encoding=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_XMPP:
+ module.fail_json(msg=missing_required_lib('xmpppy'), exception=XMPP_IMP_ERR)
+
+ jid = xmpp.JID(module.params['user'])
+ user = jid.getNode()
+ server = jid.getDomain()
+ port = module.params['port']
+ password = module.params['password']
+ try:
+ to, nick = module.params['to'].split('/', 1)
+ except ValueError:
+ to, nick = module.params['to'], None
+
+ if module.params['host']:
+ host = module.params['host']
+ else:
+ host = server
+ if module.params['encoding']:
+ xmpp.simplexml.ENCODING = module.params['encoding']
+
+ msg = xmpp.protocol.Message(body=module.params['msg'])
+
+ try:
+ conn = xmpp.Client(server, debug=[])
+ if not conn.connect(server=(host, port)):
+ module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
+ if not conn.auth(user, password, 'Ansible'):
+ module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user, server))
+ # some old servers require this, also the sleep following send
+ conn.sendInitPresence(requestRoster=0)
+
+ if nick: # sending to room instead of user, need to join
+ msg.setType('groupchat')
+ msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
+ join = xmpp.Presence(to=module.params['to'])
+ join.setTag('x', namespace='http://jabber.org/protocol/muc')
+ conn.send(join)
+ time.sleep(1)
+ else:
+ msg.setType('chat')
+
+ msg.setTo(to)
+ if not module.check_mode:
+ conn.send(msg)
+ time.sleep(1)
+ conn.disconnect()
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/java_cert.py b/ansible_collections/community/general/plugins/modules/java_cert.py
new file mode 100644
index 000000000..a188b16c3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/java_cert.py
@@ -0,0 +1,585 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, RSD Services S.A
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: java_cert
+
+short_description: Uses keytool to import/remove certificate to/from java keystore (cacerts)
+description:
+ - This is a wrapper module around keytool, which can be used to import certificates
+ and optionally private keys to a given java keystore, or remove them from it.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ cert_url:
+ description:
+ - Basic URL to fetch SSL certificate from.
+ - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate.
+ type: str
+ cert_port:
+ description:
+ - Port to connect to URL.
+ - This will be used to create server URL:PORT.
+ type: int
+ default: 443
+ cert_path:
+ description:
+ - Local path to load certificate from.
+ - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate.
+ type: path
+ cert_alias:
+ description:
+ - Imported certificate alias.
+ - The alias is used when checking for the presence of a certificate in the keystore.
+ type: str
+ trust_cacert:
+ description:
+ - Trust imported cert as CAcert.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ pkcs12_path:
+ description:
+ - Local path to load PKCS12 keystore from.
+ - Unlike C(cert_url) and C(cert_path), the PKCS12 keystore embeds the private key matching
+ the certificate, and is used to import both the certificate and its private key into the
+ java keystore.
+ - Exactly one of C(cert_url), C(cert_path) or C(pkcs12_path) is required to load certificate.
+ type: path
+ pkcs12_password:
+ description:
+ - Password for importing from PKCS12 keystore.
+ type: str
+ pkcs12_alias:
+ description:
+ - Alias in the PKCS12 keystore.
+ type: str
+ keystore_path:
+ description:
+ - Path to keystore.
+ type: path
+ keystore_pass:
+ description:
+ - Keystore password.
+ type: str
+ required: true
+ keystore_create:
+ description:
+ - Create keystore if it does not exist.
+ type: bool
+ default: false
+ keystore_type:
+ description:
+ - Keystore type (JCEKS, JKS).
+ type: str
+ executable:
+ description:
+ - Path to keytool binary if not used we search in PATH for it.
+ type: str
+ default: keytool
+ state:
+ description:
+ - Defines action which can be either certificate import or removal.
+ - When state is present, the certificate will always idempotently be inserted
+ into the keystore, even if there already exists a cert alias that is different.
+ type: str
+ choices: [ absent, present ]
+ default: present
+requirements: [openssl, keytool]
+author:
+- Adam Hamsik (@haad)
+'''
+
+EXAMPLES = r'''
+- name: Import SSL certificate from google.com to a given cacerts keystore
+ community.general.java_cert:
+ cert_url: google.com
+ cert_port: 443
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ state: present
+
+- name: Remove certificate with given alias from a keystore
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /usr/lib/jvm/jre7/lib/security/cacerts
+ keystore_pass: changeit
+ executable: /usr/lib/jvm/jre7/bin/keytool
+ state: absent
+
+- name: Import trusted CA from SSL certificate
+ community.general.java_cert:
+ cert_path: /opt/certs/rootca.crt
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: true
+ state: present
+ cert_alias: LE_RootCA
+ trust_cacert: true
+
+- name: Import SSL certificate from google.com to a keystore, create it if it doesn't exist
+ community.general.java_cert:
+ cert_url: google.com
+ keystore_path: /tmp/cacerts
+ keystore_pass: changeit
+ keystore_create: true
+ state: present
+
+- name: Import a pkcs12 keystore with a specified alias, create it if it doesn't exist
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ cert_alias: default
+ keystore_path: /opt/wildfly/standalone/configuration/defaultkeystore.jks
+ keystore_pass: changeit
+ keystore_create: true
+ state: present
+
+- name: Import SSL certificate to JCEKS keystore
+ community.general.java_cert:
+ pkcs12_path: "/tmp/importkeystore.p12"
+ pkcs12_alias: default
+ pkcs12_password: somepass
+ cert_alias: default
+ keystore_path: /opt/someapp/security/keystore.jceks
+ keystore_type: "JCEKS"
+ keystore_pass: changeit
+ keystore_create: true
+ state: present
+'''
+
+RETURN = r'''
+msg:
+ description: Output from stdout of keytool command after execution of given command.
+ returned: success
+ type: str
+ sample: "Module require existing keystore at keystore_path '/tmp/test/cacerts'"
+
+rc:
+ description: Keytool command execution return value.
+ returned: success
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done.
+ returned: success
+ type: str
+ sample: "keytool -importcert -noprompt -keystore"
+'''
+
+import os
+import tempfile
+import re
+
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.six.moves.urllib.request import getproxies
+
+
+def _get_keystore_type_keytool_parameters(keystore_type):
+ ''' Check that custom keystore is presented in parameters '''
+ if keystore_type:
+ return ["-storetype", keystore_type]
+ return []
+
+
+def _check_cert_present(module, executable, keystore_path, keystore_pass, alias, keystore_type):
+ ''' Check if certificate with alias is present in keystore
+ located at keystore_path '''
+ test_cmd = [
+ executable,
+ "-list",
+ "-keystore",
+ keystore_path,
+ "-alias",
+ alias,
+ "-rfc"
+ ]
+ test_cmd += _get_keystore_type_keytool_parameters(keystore_type)
+
+ (check_rc, stdout, dummy) = module.run_command(test_cmd, data=keystore_pass, check_rc=False)
+ if check_rc == 0:
+ return (True, stdout)
+ return (False, '')
+
+
+def _get_certificate_from_url(module, executable, url, port, pem_certificate_output):
+ remote_cert_pem_chain = _download_cert_url(module, executable, url, port)
+ with open(pem_certificate_output, 'w') as f:
+ f.write(remote_cert_pem_chain)
+
+
+def _get_first_certificate_from_x509_file(module, pem_certificate_file, pem_certificate_output, openssl_bin):
+ """ Read a X509 certificate chain file and output the first certificate in the list """
+ extract_cmd = [
+ openssl_bin,
+ "x509",
+ "-in",
+ pem_certificate_file,
+ "-out",
+ pem_certificate_output
+ ]
+ (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False)
+
+ if extract_rc != 0:
+ # trying der encoded file
+ extract_cmd += ["-inform", "der"]
+ (extract_rc, dummy, extract_stderr) = module.run_command(extract_cmd, check_rc=False)
+
+ if extract_rc != 0:
+ # this time it's a real failure
+ module.fail_json(msg="Internal module failure, cannot extract certificate, error: %s" % extract_stderr,
+ rc=extract_rc, cmd=extract_cmd)
+
+ return extract_rc
+
+
+def _get_digest_from_x509_file(module, pem_certificate_file, openssl_bin):
+ """ Read a X509 certificate file and output sha256 digest using openssl """
+ # cleanup file before to compare
+ (dummy, tmp_certificate) = tempfile.mkstemp()
+ module.add_cleanup_file(tmp_certificate)
+ _get_first_certificate_from_x509_file(module, pem_certificate_file, tmp_certificate, openssl_bin)
+ dgst_cmd = [
+ openssl_bin,
+ "dgst",
+ "-r",
+ "-sha256",
+ tmp_certificate
+ ]
+ (dgst_rc, dgst_stdout, dgst_stderr) = module.run_command(dgst_cmd, check_rc=False)
+
+ if dgst_rc != 0:
+ module.fail_json(msg="Internal module failure, cannot compute digest for certificate, error: %s" % dgst_stderr,
+ rc=dgst_rc, cmd=dgst_cmd)
+
+ return dgst_stdout.split(' ')[0]
+
+
+def _export_public_cert_from_pkcs12(module, executable, pkcs_file, alias, password, dest):
+ """ Runs keytools to extract the public cert from a PKCS12 archive and write it to a file. """
+ export_cmd = [
+ executable,
+ "-list",
+ "-noprompt",
+ "-keystore",
+ pkcs_file,
+ "-alias",
+ alias,
+ "-storetype",
+ "pkcs12",
+ "-rfc"
+ ]
+ (export_rc, export_stdout, export_err) = module.run_command(export_cmd, data=password, check_rc=False)
+
+ if export_rc != 0:
+ module.fail_json(msg="Internal module failure, cannot extract public certificate from PKCS12, message: %s" % export_stdout,
+ stderr=export_err,
+ rc=export_rc)
+
+ with open(dest, 'w') as f:
+ f.write(export_stdout)
+
+
+def get_proxy_settings(scheme='https'):
+ """ Returns a tuple containing (proxy_host, proxy_port). (False, False) if no proxy is found """
+ proxy_url = getproxies().get(scheme, '')
+ if not proxy_url:
+ return (False, False)
+ else:
+ parsed_url = urlparse(proxy_url)
+ if parsed_url.scheme:
+ (proxy_host, proxy_port) = parsed_url.netloc.split(':')
+ else:
+ (proxy_host, proxy_port) = parsed_url.path.split(':')
+ return (proxy_host, proxy_port)
+
+
+def build_proxy_options():
+ """ Returns list of valid proxy options for keytool """
+ (proxy_host, proxy_port) = get_proxy_settings()
+ no_proxy = os.getenv("no_proxy")
+
+ proxy_opts = []
+ if proxy_host:
+ proxy_opts.extend(["-J-Dhttps.proxyHost=%s" % proxy_host, "-J-Dhttps.proxyPort=%s" % proxy_port])
+
+ if no_proxy is not None:
+ # For Java's nonProxyHosts property, items are separated by '|',
+ # and patterns have to start with "*".
+ non_proxy_hosts = no_proxy.replace(',', '|')
+ non_proxy_hosts = re.sub(r'(^|\|)\.', r'\1*.', non_proxy_hosts)
+
+ # The property name is http.nonProxyHosts, there is no
+ # separate setting for HTTPS.
+ proxy_opts.extend(["-J-Dhttp.nonProxyHosts=%s" % non_proxy_hosts])
+ return proxy_opts
+
+
+def _download_cert_url(module, executable, url, port):
+ """ Fetches the certificate from the remote URL using `keytool -printcert...`
+ The PEM formatted string is returned """
+ proxy_opts = build_proxy_options()
+ fetch_cmd = [executable, "-printcert", "-rfc", "-sslserver"] + proxy_opts + ["%s:%d" % (url, port)]
+
+ # Fetch SSL certificate from remote host.
+ (fetch_rc, fetch_out, fetch_err) = module.run_command(fetch_cmd, check_rc=False)
+
+ if fetch_rc != 0:
+ module.fail_json(msg="Internal module failure, cannot download certificate, error: %s" % fetch_err,
+ rc=fetch_rc, cmd=fetch_cmd)
+
+ return fetch_out
+
+
+def import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias,
+ keystore_path, keystore_pass, keystore_alias, keystore_type):
+ ''' Import pkcs12 from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = [
+ executable,
+ "-importkeystore",
+ "-noprompt",
+ "-srcstoretype",
+ "pkcs12",
+ "-srckeystore",
+ pkcs12_path,
+ "-srcalias",
+ pkcs12_alias,
+ "-destkeystore",
+ keystore_path,
+ "-destalias",
+ keystore_alias
+ ]
+ import_cmd += _get_keystore_type_keytool_parameters(keystore_type)
+
+ secret_data = "%s\n%s" % (keystore_pass, pkcs12_pass)
+ # Password of a new keystore must be entered twice, for confirmation
+ if not os.path.exists(keystore_path):
+ secret_data = "%s\n%s" % (keystore_pass, secret_data)
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd, data=secret_data, check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % keystore_alias}
+ if import_rc == 0 and os.path.exists(keystore_path):
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd, error=import_err)
+
+
+def import_cert_path(module, executable, path, keystore_path, keystore_pass, alias, keystore_type, trust_cacert):
+ ''' Import certificate from path into keystore located on
+ keystore_path as alias '''
+ import_cmd = [
+ executable,
+ "-importcert",
+ "-noprompt",
+ "-keystore",
+ keystore_path,
+ "-file",
+ path,
+ "-alias",
+ alias
+ ]
+ import_cmd += _get_keystore_type_keytool_parameters(keystore_type)
+
+ if trust_cacert:
+ import_cmd.extend(["-trustcacerts"])
+
+ # Use local certificate from local path and import it to a java keystore
+ (import_rc, import_out, import_err) = module.run_command(import_cmd,
+ data="%s\n%s" % (keystore_pass, keystore_pass),
+ check_rc=False)
+
+ diff = {'before': '\n', 'after': '%s\n' % alias}
+ if import_rc == 0:
+ module.exit_json(changed=True, msg=import_out,
+ rc=import_rc, cmd=import_cmd, stdout=import_out,
+ error=import_err, diff=diff)
+ else:
+ module.fail_json(msg=import_out, rc=import_rc, cmd=import_cmd)
+
+
+def delete_cert(module, executable, keystore_path, keystore_pass, alias, keystore_type, exit_after=True):
+ ''' Delete certificate identified with alias from keystore on keystore_path '''
+ del_cmd = [
+ executable,
+ "-delete",
+ "-noprompt",
+ "-keystore",
+ keystore_path,
+ "-alias",
+ alias
+ ]
+
+ del_cmd += _get_keystore_type_keytool_parameters(keystore_type)
+
+ # Delete SSL certificate from keystore
+ (del_rc, del_out, del_err) = module.run_command(del_cmd, data=keystore_pass, check_rc=True)
+
+ if exit_after:
+ diff = {'before': '%s\n' % alias, 'after': None}
+
+ module.exit_json(changed=True, msg=del_out,
+ rc=del_rc, cmd=del_cmd, stdout=del_out,
+ error=del_err, diff=diff)
+
+
+def test_keytool(module, executable):
+ ''' Test if keytool is actually executable or not '''
+ module.run_command([executable], check_rc=True)
+
+
+def test_keystore(module, keystore_path):
+ ''' Check if we can access keystore as file or not '''
+ if keystore_path is None:
+ keystore_path = ''
+
+ if not os.path.exists(keystore_path) and not os.path.isfile(keystore_path):
+ # Keystore doesn't exist we want to create it
+ module.fail_json(changed=False, msg="Module require existing keystore at keystore_path '%s'" % keystore_path)
+
+
+def main():
+ argument_spec = dict(
+ cert_url=dict(type='str'),
+ cert_path=dict(type='path'),
+ pkcs12_path=dict(type='path'),
+ pkcs12_password=dict(type='str', no_log=True),
+ pkcs12_alias=dict(type='str'),
+ cert_alias=dict(type='str'),
+ cert_port=dict(type='int', default=443),
+ keystore_path=dict(type='path'),
+ keystore_pass=dict(type='str', required=True, no_log=True),
+ trust_cacert=dict(type='bool', default=False),
+ keystore_create=dict(type='bool', default=False),
+ keystore_type=dict(type='str'),
+ executable=dict(type='str', default='keytool'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[['state', 'present', ('cert_path', 'cert_url', 'pkcs12_path'), True],
+ ['state', 'absent', ('cert_url', 'cert_alias'), True]],
+ required_together=[['keystore_path', 'keystore_pass']],
+ mutually_exclusive=[
+ ['cert_url', 'cert_path', 'pkcs12_path']
+ ],
+ supports_check_mode=True,
+ )
+
+ url = module.params.get('cert_url')
+ path = module.params.get('cert_path')
+ port = module.params.get('cert_port')
+
+ pkcs12_path = module.params.get('pkcs12_path')
+ pkcs12_pass = module.params.get('pkcs12_password', '')
+ pkcs12_alias = module.params.get('pkcs12_alias', '1')
+
+ cert_alias = module.params.get('cert_alias') or url
+ trust_cacert = module.params.get('trust_cacert')
+
+ keystore_path = module.params.get('keystore_path')
+ keystore_pass = module.params.get('keystore_pass')
+ keystore_create = module.params.get('keystore_create')
+ keystore_type = module.params.get('keystore_type')
+ executable = module.params.get('executable')
+ state = module.params.get('state')
+
+ # openssl dependency resolution
+ openssl_bin = module.get_bin_path('openssl', True)
+
+ if path and not cert_alias:
+ module.fail_json(changed=False,
+ msg="Using local path import from %s requires alias argument."
+ % keystore_path)
+
+ test_keytool(module, executable)
+
+ if not keystore_create:
+ test_keystore(module, keystore_path)
+
+ alias_exists, alias_exists_output = _check_cert_present(
+ module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+
+ (dummy, new_certificate) = tempfile.mkstemp()
+ (dummy, old_certificate) = tempfile.mkstemp()
+ module.add_cleanup_file(new_certificate)
+ module.add_cleanup_file(old_certificate)
+
+ if state == 'absent' and alias_exists:
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # delete and exit
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type)
+
+ # dump certificate to enroll in the keystore on disk and compute digest
+ if state == 'present':
+ # The alias exists in the keystore so we must now compare the SHA256 hash of the
+ # public certificate already in the keystore, and the certificate we are wanting to add
+ if alias_exists:
+ with open(old_certificate, "w") as f:
+ f.write(alias_exists_output)
+ keystore_cert_digest = _get_digest_from_x509_file(module, old_certificate, openssl_bin)
+
+ else:
+ keystore_cert_digest = ''
+
+ if pkcs12_path:
+ # Extracting certificate with openssl
+ _export_public_cert_from_pkcs12(module, executable, pkcs12_path, pkcs12_alias, pkcs12_pass, new_certificate)
+
+ elif path:
+ # Extracting the X509 digest is a bit easier. Keytool will print the PEM
+ # certificate to stdout so we don't need to do any transformations.
+ new_certificate = path
+
+ elif url:
+ # Getting the X509 digest from a URL is the same as from a path, we just have
+ # to download the cert first
+ _get_certificate_from_url(module, executable, url, port, new_certificate)
+
+ new_cert_digest = _get_digest_from_x509_file(module, new_certificate, openssl_bin)
+
+ if keystore_cert_digest != new_cert_digest:
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if alias_exists:
+ # The certificate in the keystore does not match with the one we want to be present
+ # The existing certificate must first be deleted before we insert the correct one
+ delete_cert(module, executable, keystore_path, keystore_pass, cert_alias, keystore_type, exit_after=False)
+
+ if pkcs12_path:
+ import_pkcs12_path(module, executable, pkcs12_path, pkcs12_pass, pkcs12_alias,
+ keystore_path, keystore_pass, cert_alias, keystore_type)
+ else:
+ import_cert_path(module, executable, new_certificate, keystore_path,
+ keystore_pass, cert_alias, keystore_type, trust_cacert)
+
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/java_keystore.py b/ansible_collections/community/general/plugins/modules/java_keystore.py
new file mode 100644
index 000000000..7c2c4884d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/java_keystore.py
@@ -0,0 +1,584 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, quidame <quidame@poivron.org>
+# Copyright (c) 2016, Guillaume Grossetie <ggrossetie@yuzutech.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: java_keystore
+short_description: Create a Java keystore in JKS format
+description:
+ - Bundle a x509 certificate and its private key into a Java Keystore in JKS format.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the certificate in the keystore.
+ - If the provided name does not exist in the keystore, the module
+ will re-create the keystore. This behavior changed in community.general 3.0.0,
+ before that the module would fail when the name did not match.
+ type: str
+ required: true
+ certificate:
+ description:
+ - Content of the certificate used to create the keystore.
+ - If the fingerprint of the provided certificate does not match the
+ fingerprint of the certificate bundled in the keystore, the keystore
+ is regenerated with the provided certificate.
+ - Exactly one of I(certificate) or I(certificate_path) is required.
+ type: str
+ certificate_path:
+ description:
+ - Location of the certificate used to create the keystore.
+ - If the fingerprint of the provided certificate does not match the
+ fingerprint of the certificate bundled in the keystore, the keystore
+ is regenerated with the provided certificate.
+ - Exactly one of I(certificate) or I(certificate_path) is required.
+ type: path
+ version_added: '3.0.0'
+ private_key:
+ description:
+ - Content of the private key used to create the keystore.
+ - Exactly one of I(private_key) or I(private_key_path) is required.
+ type: str
+ private_key_path:
+ description:
+ - Location of the private key used to create the keystore.
+ - Exactly one of I(private_key) or I(private_key_path) is required.
+ type: path
+ version_added: '3.0.0'
+ private_key_passphrase:
+ description:
+ - Passphrase used to read the private key, if required.
+ type: str
+ version_added: '0.2.0'
+ password:
+ description:
+ - Password that should be used to secure the keystore.
+ - If the provided password fails to unlock the keystore, the module
+ will re-create the keystore with the new passphrase. This behavior
+ changed in community.general 3.0.0, before that the module would fail
+ when the password did not match.
+ type: str
+ required: true
+ dest:
+ description:
+ - Absolute path of the generated keystore.
+ type: path
+ required: true
+ force:
+ description:
+ - Keystore is created even if it already exists.
+ type: bool
+ default: false
+ owner:
+ description:
+ - Name of the user that should own jks file.
+ required: false
+ group:
+ description:
+ - Name of the group that should own jks file.
+ required: false
+ mode:
+ description:
+ - Mode the file should be.
+ required: false
+ ssl_backend:
+ description:
+ - Backend for loading private keys and certificates.
+ type: str
+ default: openssl
+ choices:
+ - openssl
+ - cryptography
+ version_added: 3.1.0
+ keystore_type:
+ description:
+ - Type of the Java keystore.
+ - When this option is omitted and the keystore doesn't already exist, the
+ behavior follows C(keytool)'s default store type which depends on
+ Java version; C(pkcs12) since Java 9 and C(jks) prior (may also
+ be C(pkcs12) if new default has been backported to this version).
+ - When this option is omitted and the keystore already exists, the current
+ type is left untouched, unless another option leads to overwrite the
+ keystore (in that case, this option behaves like for keystore creation).
+ - When I(keystore_type) is set, the keystore is created with this type if
+ it doesn't already exist, or is overwritten to match the given type in
+ case of mismatch.
+ type: str
+ choices:
+ - jks
+ - pkcs12
+ version_added: 3.3.0
+requirements:
+ - openssl in PATH (when I(ssl_backend=openssl))
+ - keytool in PATH
+ - cryptography >= 3.0 (when I(ssl_backend=cryptography))
+author:
+ - Guillaume Grossetie (@Mogztter)
+ - quidame (@quidame)
+extends_documentation_fragment:
+ - ansible.builtin.files
+ - community.general.attributes
+seealso:
+ - module: community.crypto.openssl_pkcs12
+ - module: community.general.java_cert
+notes:
+ - I(certificate) and I(private_key) require that their contents are available
+ on the controller (either inline in a playbook, or with the C(file) lookup),
+ while I(certificate_path) and I(private_key_path) require that the files are
+ available on the target host.
+ - By design, any change of a value of options I(keystore_type), I(name) or
+ I(password), as well as changes of key or certificate materials will cause
+ the existing I(dest) to be overwritten.
+'''
+
+EXAMPLES = '''
+- name: Create a keystore for the given certificate/private key pair (inline)
+ community.general.java_keystore:
+ name: example
+ certificate: |
+ -----BEGIN CERTIFICATE-----
+ h19dUZ2co2fI/ibYiwxWk4aeNE6KWvCaTQOMQ8t6Uo2XKhpL/xnjoAgh1uCQN/69
+ MG+34+RhUWzCfdZH7T8/qDxJw2kEPKluaYh7KnMsba+5jHjmtzix5QIDAQABo4IB
+ -----END CERTIFICATE-----
+ private_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ DBVFTEVDVFJJQ0lURSBERSBGUkFOQ0UxFzAVBgNVBAsMDjAwMDIgNTUyMDgxMzE3
+ GLlDNMw/uHyME7gHFsqJA7O11VY6O5WQ4IDP3m/s5ZV6s+Nn6Lerz17VZ99
+ -----END RSA PRIVATE KEY-----
+ password: changeit
+ dest: /etc/security/keystore.jks
+
+- name: Create a keystore for the given certificate/private key pair (with files on controller)
+ community.general.java_keystore:
+ name: example
+ certificate: "{{ lookup('file', '/path/to/certificate.crt') }}"
+ private_key: "{{ lookup('file', '/path/to/private.key') }}"
+ password: changeit
+ dest: /etc/security/keystore.jks
+
+- name: Create a keystore for the given certificate/private key pair (with files on target host)
+ community.general.java_keystore:
+ name: snakeoil
+ certificate_path: /etc/ssl/certs/ssl-cert-snakeoil.pem
+ private_key_path: /etc/ssl/private/ssl-cert-snakeoil.key
+ password: changeit
+ dest: /etc/security/keystore.jks
+'''
+
+RETURN = '''
+msg:
+ description: Output from stdout of keytool/openssl command after execution of given command or an error.
+ returned: changed and failure
+ type: str
+ sample: "Unable to find the current certificate fingerprint in ..."
+
+err:
+ description: Output from stderr of keytool/openssl command after error of given command.
+ returned: failure
+ type: str
+ sample: "Keystore password is too short - must be at least 6 characters\n"
+
+rc:
+ description: keytool/openssl command execution return value
+ returned: changed and failure
+ type: int
+ sample: "0"
+
+cmd:
+ description: Executed command to get action done
+ returned: changed and failure
+ type: str
+ sample: "/usr/bin/openssl x509 -noout -in /tmp/user/1000/tmp8jd_lh23 -fingerprint -sha256"
+'''
+
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.six import PY2
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+try:
+ from cryptography.hazmat.primitives.serialization.pkcs12 import serialize_key_and_certificates
+ from cryptography.hazmat.primitives.serialization import (
+ BestAvailableEncryption,
+ NoEncryption,
+ load_pem_private_key,
+ load_der_private_key,
+ )
+ from cryptography.x509 import (
+ load_pem_x509_certificate,
+ load_der_x509_certificate,
+ )
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.exceptions import UnsupportedAlgorithm
+ from cryptography.hazmat.backends.openssl import backend
+ HAS_CRYPTOGRAPHY_PKCS12 = True
+except ImportError:
+ HAS_CRYPTOGRAPHY_PKCS12 = False
+
+
+class JavaKeystore:
+ def __init__(self, module):
+ self.module = module
+ self.result = dict()
+
+ self.keytool_bin = module.get_bin_path('keytool', True)
+
+ self.certificate = module.params['certificate']
+ self.keypass = module.params['private_key_passphrase']
+ self.keystore_path = module.params['dest']
+ self.name = module.params['name']
+ self.password = module.params['password']
+ self.private_key = module.params['private_key']
+ self.ssl_backend = module.params['ssl_backend']
+ self.keystore_type = module.params['keystore_type']
+
+ if self.ssl_backend == 'openssl':
+ self.openssl_bin = module.get_bin_path('openssl', True)
+ else:
+ if not HAS_CRYPTOGRAPHY_PKCS12:
+ self.module.fail_json(msg=missing_required_lib('cryptography >= 3.0'))
+
+ if module.params['certificate_path'] is None:
+ self.certificate_path = create_file(self.certificate)
+ self.module.add_cleanup_file(self.certificate_path)
+ else:
+ self.certificate_path = module.params['certificate_path']
+
+ if module.params['private_key_path'] is None:
+ self.private_key_path = create_file(self.private_key)
+ self.module.add_cleanup_file(self.private_key_path)
+ else:
+ self.private_key_path = module.params['private_key_path']
+
+ def update_permissions(self):
+ file_args = self.module.load_file_common_arguments(self.module.params, path=self.keystore_path)
+ return self.module.set_fs_attributes_if_different(file_args, False)
+
+ def read_certificate_fingerprint(self, cert_format='PEM'):
+ if self.ssl_backend == 'cryptography':
+ if cert_format == 'PEM':
+ cert_loader = load_pem_x509_certificate
+ else:
+ cert_loader = load_der_x509_certificate
+
+ try:
+ with open(self.certificate_path, 'rb') as cert_file:
+ cert = cert_loader(
+ cert_file.read(),
+ backend=backend
+ )
+ except (OSError, ValueError) as e:
+ self.module.fail_json(msg="Unable to read the provided certificate: %s" % to_native(e))
+
+ fp = hex_decode(cert.fingerprint(hashes.SHA256())).upper()
+ fingerprint = ':'.join([fp[i:i + 2] for i in range(0, len(fp), 2)])
+ else:
+ current_certificate_fingerprint_cmd = [
+ self.openssl_bin, "x509", "-noout", "-in", self.certificate_path, "-fingerprint", "-sha256"
+ ]
+ (rc, current_certificate_fingerprint_out, current_certificate_fingerprint_err) = self.module.run_command(
+ current_certificate_fingerprint_cmd,
+ environ_update=None,
+ check_rc=False
+ )
+ if rc != 0:
+ return self.module.fail_json(
+ msg=current_certificate_fingerprint_out,
+ err=current_certificate_fingerprint_err,
+ cmd=current_certificate_fingerprint_cmd,
+ rc=rc
+ )
+
+ current_certificate_match = re.search(r"=([\w:]+)", current_certificate_fingerprint_out)
+ if not current_certificate_match:
+ return self.module.fail_json(
+ msg="Unable to find the current certificate fingerprint in %s" % (
+ current_certificate_fingerprint_out
+ ),
+ cmd=current_certificate_fingerprint_cmd,
+ rc=rc
+ )
+
+ fingerprint = current_certificate_match.group(1)
+ return fingerprint
+
+ def read_stored_certificate_fingerprint(self):
+ stored_certificate_fingerprint_cmd = [
+ self.keytool_bin, "-list", "-alias", self.name,
+ "-keystore", self.keystore_path, "-v"
+ ]
+ (rc, stored_certificate_fingerprint_out, stored_certificate_fingerprint_err) = self.module.run_command(
+ stored_certificate_fingerprint_cmd, data=self.password, check_rc=False)
+ if rc != 0:
+ if "keytool error: java.lang.Exception: Alias <%s> does not exist" % self.name \
+ in stored_certificate_fingerprint_out:
+ return "alias mismatch"
+ if re.match(
+ r'keytool error: java\.io\.IOException: ' +
+ '[Kk]eystore( was tampered with, or)? password was incorrect',
+ stored_certificate_fingerprint_out
+ ):
+ return "password mismatch"
+ return self.module.fail_json(
+ msg=stored_certificate_fingerprint_out,
+ err=stored_certificate_fingerprint_err,
+ cmd=stored_certificate_fingerprint_cmd,
+ rc=rc
+ )
+
+ if self.keystore_type not in (None, self.current_type()):
+ return "keystore type mismatch"
+
+ stored_certificate_match = re.search(r"SHA256: ([\w:]+)", stored_certificate_fingerprint_out)
+ if not stored_certificate_match:
+ return self.module.fail_json(
+ msg="Unable to find the stored certificate fingerprint in %s" % stored_certificate_fingerprint_out,
+ cmd=stored_certificate_fingerprint_cmd,
+ rc=rc
+ )
+
+ return stored_certificate_match.group(1)
+
+ def current_type(self):
+ magic_bytes = b'\xfe\xed\xfe\xed'
+ with open(self.keystore_path, 'rb') as fd:
+ header = fd.read(4)
+ if header == magic_bytes:
+ return 'jks'
+ return 'pkcs12'
+
+ def cert_changed(self):
+ current_certificate_fingerprint = self.read_certificate_fingerprint()
+ stored_certificate_fingerprint = self.read_stored_certificate_fingerprint()
+ return current_certificate_fingerprint != stored_certificate_fingerprint
+
+ def cryptography_create_pkcs12_bundle(self, keystore_p12_path, key_format='PEM', cert_format='PEM'):
+ if key_format == 'PEM':
+ key_loader = load_pem_private_key
+ else:
+ key_loader = load_der_private_key
+
+ if cert_format == 'PEM':
+ cert_loader = load_pem_x509_certificate
+ else:
+ cert_loader = load_der_x509_certificate
+
+ try:
+ with open(self.private_key_path, 'rb') as key_file:
+ private_key = key_loader(
+ key_file.read(),
+ password=to_bytes(self.keypass),
+ backend=backend
+ )
+ except TypeError:
+ # Re-attempt with no password to match existing behavior
+ try:
+ with open(self.private_key_path, 'rb') as key_file:
+ private_key = key_loader(
+ key_file.read(),
+ password=None,
+ backend=backend
+ )
+ except (OSError, TypeError, ValueError, UnsupportedAlgorithm) as e:
+ self.module.fail_json(
+ msg="The following error occurred while loading the provided private_key: %s" % to_native(e)
+ )
+ except (OSError, ValueError, UnsupportedAlgorithm) as e:
+ self.module.fail_json(
+ msg="The following error occurred while loading the provided private_key: %s" % to_native(e)
+ )
+ try:
+ with open(self.certificate_path, 'rb') as cert_file:
+ cert = cert_loader(
+ cert_file.read(),
+ backend=backend
+ )
+ except (OSError, ValueError, UnsupportedAlgorithm) as e:
+ self.module.fail_json(
+ msg="The following error occurred while loading the provided certificate: %s" % to_native(e)
+ )
+
+ if self.password:
+ encryption = BestAvailableEncryption(to_bytes(self.password))
+ else:
+ encryption = NoEncryption()
+
+ pkcs12_bundle = serialize_key_and_certificates(
+ name=to_bytes(self.name),
+ key=private_key,
+ cert=cert,
+ cas=None,
+ encryption_algorithm=encryption
+ )
+
+ with open(keystore_p12_path, 'wb') as p12_file:
+ p12_file.write(pkcs12_bundle)
+
+ self.result.update(msg="PKCS#12 bundle created by cryptography backend")
+
+ def openssl_create_pkcs12_bundle(self, keystore_p12_path):
+ export_p12_cmd = [self.openssl_bin, "pkcs12", "-export", "-name", self.name, "-in", self.certificate_path,
+ "-inkey", self.private_key_path, "-out", keystore_p12_path, "-passout", "stdin"]
+
+ # when keypass is provided, add -passin
+ cmd_stdin = ""
+ if self.keypass:
+ export_p12_cmd.append("-passin")
+ export_p12_cmd.append("stdin")
+ cmd_stdin = "%s\n" % self.keypass
+ cmd_stdin += "%s\n%s" % (self.password, self.password)
+
+ (rc, export_p12_out, export_p12_err) = self.module.run_command(
+ export_p12_cmd, data=cmd_stdin, environ_update=None, check_rc=False
+ )
+
+ self.result = dict(msg=export_p12_out, cmd=export_p12_cmd, rc=rc)
+ if rc != 0:
+ self.result['err'] = export_p12_err
+ self.module.fail_json(**self.result)
+
+ def create(self):
+ """Create the keystore, or replace it with a rollback in case of
+ keytool failure.
+ """
+ if self.module.check_mode:
+ self.result['changed'] = True
+ return self.result
+
+ keystore_p12_path = create_path()
+ self.module.add_cleanup_file(keystore_p12_path)
+
+ if self.ssl_backend == 'cryptography':
+ self.cryptography_create_pkcs12_bundle(keystore_p12_path)
+ else:
+ self.openssl_create_pkcs12_bundle(keystore_p12_path)
+
+ if self.keystore_type == 'pkcs12':
+ # Preserve properties of the destination file, if any.
+ self.module.atomic_move(keystore_p12_path, self.keystore_path)
+ self.update_permissions()
+ self.result['changed'] = True
+ return self.result
+
+ import_keystore_cmd = [self.keytool_bin, "-importkeystore",
+ "-destkeystore", self.keystore_path,
+ "-srckeystore", keystore_p12_path,
+ "-srcstoretype", "pkcs12",
+ "-alias", self.name,
+ "-noprompt"]
+
+ if self.keystore_type == 'jks':
+ keytool_help = self.module.run_command([self.keytool_bin, '-importkeystore', '-help'])
+ if '-deststoretype' in keytool_help[1] + keytool_help[2]:
+ import_keystore_cmd.insert(4, "-deststoretype")
+ import_keystore_cmd.insert(5, self.keystore_type)
+
+ keystore_backup = None
+ if self.exists():
+ keystore_backup = self.keystore_path + '.tmpbak'
+ # Preserve properties of the source file
+ self.module.preserved_copy(self.keystore_path, keystore_backup)
+ os.remove(self.keystore_path)
+
+ (rc, import_keystore_out, import_keystore_err) = self.module.run_command(
+ import_keystore_cmd, data='%s\n%s\n%s' % (self.password, self.password, self.password), check_rc=False
+ )
+
+ self.result = dict(msg=import_keystore_out, cmd=import_keystore_cmd, rc=rc)
+
+ # keytool may return 0 whereas the keystore has not been created.
+ if rc != 0 or not self.exists():
+ if keystore_backup is not None:
+ self.module.preserved_copy(keystore_backup, self.keystore_path)
+ os.remove(keystore_backup)
+ self.result['err'] = import_keystore_err
+ return self.module.fail_json(**self.result)
+
+ self.update_permissions()
+ if keystore_backup is not None:
+ os.remove(keystore_backup)
+ self.result['changed'] = True
+ return self.result
+
+ def exists(self):
+ return os.path.exists(self.keystore_path)
+
+
+# Utility functions
+def create_path():
+ dummy, tmpfile = tempfile.mkstemp()
+ os.remove(tmpfile)
+ return tmpfile
+
+
+def create_file(content):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ with os.fdopen(tmpfd, 'w') as f:
+ f.write(content)
+ return tmpfile
+
+
+def hex_decode(s):
+ if PY2:
+ return s.decode('hex')
+ return s.hex()
+
+
+def main():
+ choose_between = (['certificate', 'certificate_path'],
+ ['private_key', 'private_key_path'])
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ dest=dict(type='path', required=True),
+ certificate=dict(type='str', no_log=True),
+ certificate_path=dict(type='path'),
+ private_key=dict(type='str', no_log=True),
+ private_key_path=dict(type='path', no_log=False),
+ private_key_passphrase=dict(type='str', no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']),
+ keystore_type=dict(type='str', choices=['jks', 'pkcs12']),
+ force=dict(type='bool', default=False),
+ ),
+ required_one_of=choose_between,
+ mutually_exclusive=choose_between,
+ supports_check_mode=True,
+ add_file_common_args=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ result = dict()
+ jks = JavaKeystore(module)
+
+ if jks.exists():
+ if module.params['force'] or jks.cert_changed():
+ result = jks.create()
+ else:
+ result['changed'] = jks.update_permissions()
+ else:
+ result = jks.create()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jboss.py b/ansible_collections/community/general/plugins/modules/jboss.py
new file mode 100644
index 000000000..b389e7e66
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jboss.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: jboss
+short_description: Deploy applications to JBoss
+description:
+ - Deploy applications to JBoss standalone using the filesystem.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ deployment:
+ required: true
+ description:
+ - The name of the deployment.
+ type: str
+ src:
+ description:
+ - The remote path of the application ear or war to deploy.
+ - Required when I(state=present).
+ - Ignored when I(state=absent).
+ type: path
+ deploy_path:
+ default: /var/lib/jbossas/standalone/deployments
+ description:
+ - The location in the filesystem where the deployment scanner listens.
+ type: path
+ state:
+ choices: [ present, absent ]
+ default: "present"
+ description:
+ - Whether the application should be deployed or undeployed.
+ type: str
+notes:
+ - The JBoss standalone deployment-scanner has to be enabled in standalone.xml
+ - The module can wait until I(deployment) file is deployed/undeployed by deployment-scanner.
+ Duration of waiting time depends on scan-interval parameter from standalone.xml.
+ - Ensure no identically named application is deployed through the JBoss CLI
+seealso:
+- name: WildFly reference
+ description: Complete reference of the WildFly documentation.
+ link: https://docs.wildfly.org
+author:
+ - Jeroen Hoekx (@jhoekx)
+'''
+
+EXAMPLES = r"""
+- name: Deploy a hello world application to the default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.0-SNAPSHOT.war
+ deployment: hello.war
+ state: present
+
+- name: Update the hello world application to the non-default deploy_path
+ community.general.jboss:
+ src: /tmp/hello-1.1-SNAPSHOT.war
+ deploy_path: /opt/wildfly/deployment
+ deployment: hello.war
+ state: present
+
+- name: Undeploy the hello world application from the default deploy_path
+ community.general.jboss:
+ deployment: hello.war
+ state: absent
+"""
+
+RETURN = r""" # """
+
+import os
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+
+DEFAULT_DEPLOY_PATH = '/var/lib/jbossas/standalone/deployments'
+
+
+def is_deployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.deployed" % deployment))
+
+
+def is_undeployed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.undeployed" % deployment))
+
+
+def is_failed(deploy_path, deployment):
+ return os.path.exists(os.path.join(deploy_path, "%s.failed" % deployment))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ src=dict(type='path'),
+ deployment=dict(type='str', required=True),
+ deploy_path=dict(type='path', default=DEFAULT_DEPLOY_PATH),
+ state=dict(type='str', choices=['absent', 'present'], default='present'),
+ ),
+ required_if=[('state', 'present', ('src',))],
+ supports_check_mode=True
+ )
+
+ result = dict(changed=False)
+
+ src = module.params['src']
+ deployment = module.params['deployment']
+ deploy_path = module.params['deploy_path']
+ state = module.params['state']
+
+ if not os.path.exists(deploy_path):
+ module.fail_json(msg="deploy_path does not exist.")
+
+ if state == 'absent' and src:
+ module.warn('Parameter src is ignored when state=absent')
+ elif state == 'present' and not os.path.exists(src):
+ module.fail_json(msg='Source file %s does not exist.' % src)
+
+ deployed = is_deployed(deploy_path, deployment)
+
+ # === when check_mode ===
+ if module.check_mode:
+ if state == 'present':
+ if not deployed:
+ result['changed'] = True
+
+ elif deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ result['changed'] = True
+
+ elif state == 'absent' and deployed:
+ result['changed'] = True
+
+ module.exit_json(**result)
+ # =======================
+
+ if state == 'present' and not deployed:
+ if is_failed(deploy_path, deployment):
+ # Clean up old failed deployment
+ os.remove(os.path.join(deploy_path, "%s.failed" % deployment))
+
+ module.preserved_copy(src, os.path.join(deploy_path, deployment))
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'present' and deployed:
+ if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ module.preserved_copy(src, os.path.join(deploy_path, deployment))
+ deployed = False
+ while not deployed:
+ deployed = is_deployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Deploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ if state == 'absent' and deployed:
+ os.remove(os.path.join(deploy_path, "%s.deployed" % deployment))
+ while deployed:
+ deployed = not is_undeployed(deploy_path, deployment)
+ if is_failed(deploy_path, deployment):
+ module.fail_json(msg='Undeploying %s failed.' % deployment)
+ time.sleep(1)
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_build.py b/ansible_collections/community/general/plugins/modules/jenkins_build.py
new file mode 100644
index 000000000..4f9520224
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jenkins_build.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_build
+short_description: Manage jenkins builds
+version_added: 2.2.0
+description:
+ - Manage Jenkins builds with Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author:
+ - Brett Milford (@brettmilford)
+ - Tong He (@unnecessary-username)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ args:
+ description:
+ - A list of parameters to pass to the build.
+ type: dict
+ name:
+ description:
+ - Name of the Jenkins job to build.
+ required: true
+ type: str
+ build_number:
+ description:
+ - An integer which specifies a build of a job. Is required to remove a build from the queue.
+ type: int
+ password:
+ description:
+ - Password to authenticate with the Jenkins server.
+ type: str
+ state:
+ description:
+ - Attribute that specifies if the build is to be created, deleted or stopped.
+ - The C(stopped) state has been added in community.general 3.3.0.
+ default: present
+ choices: ['present', 'absent', 'stopped']
+ type: str
+ token:
+ description:
+ - API token used to authenticate with the Jenkins server.
+ type: str
+ url:
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ type: str
+ user:
+ description:
+ - User to authenticate with the Jenkins server.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Create a jenkins build using basic authentication
+ community.general.jenkins_build:
+ name: "test-check"
+ args:
+ cloud: "test"
+ availability_zone: "test_az"
+ state: present
+ user: admin
+ password: asdfg
+ url: http://localhost:8080
+
+- name: Stop a running jenkins build anonymously
+ community.general.jenkins_build:
+ name: "stop-check"
+ build_number: 3
+ state: stopped
+ url: http://localhost:8080
+
+- name: Delete a jenkins build using token authentication
+ community.general.jenkins_build:
+ name: "delete-experiment"
+ build_number: 30
+ state: absent
+ user: Jenkins
+ token: abcdefghijklmnopqrstuvwxyz123456
+ url: http://localhost:8080
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: "test-job"
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+build_info:
+ description: Build info of the jenkins job.
+ returned: success
+ type: dict
+'''
+
+import traceback
+from time import sleep
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class JenkinsBuild:
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.args = module.params.get('args')
+ self.state = module.params.get('state')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.build_number = module.params.get('build_number')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ }
+
+ self.EXCL_STATE = "excluded state"
+
+ def get_jenkins_connection(self):
+ try:
+ if (self.user and self.password):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif (self.user and self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif (self.user and not (self.password or self.token)):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e))
+
+ def get_next_build(self):
+ try:
+ build_number = self.server.get_job_info(self.name)['nextBuildNumber']
+ except Exception as e:
+ self.module.fail_json(msg='Unable to get job info from Jenkins server, %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ return build_number
+
+ def get_build_status(self):
+ try:
+ response = self.server.get_build_info(self.name, self.build_number)
+ return response
+ except jenkins.JenkinsException as e:
+ response = {}
+ response["result"] = "ABSENT"
+ return response
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch build information, %s' % to_native(e),
+ exception=traceback.format_exc())
+
+ def present_build(self):
+ self.build_number = self.get_next_build()
+
+ try:
+ if self.args is None:
+ self.server.build_job(self.name)
+ else:
+ self.server.build_job(self.name, self.args)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to create build for %s: %s' % (self.jenkins_url, to_native(e)),
+ exception=traceback.format_exc())
+
+ def stopped_build(self):
+ build_info = None
+ try:
+ build_info = self.server.get_build_info(self.name, self.build_number)
+ if build_info['building'] is True:
+ self.server.stop_build(self.name, self.build_number)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to stop build for %s: %s' % (self.jenkins_url, to_native(e)),
+ exception=traceback.format_exc())
+ else:
+ if build_info['building'] is False:
+ self.module.exit_json(**self.result)
+
+ def absent_build(self):
+ try:
+ self.server.delete_build(self.name, self.build_number)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete build for %s: %s' % (self.jenkins_url, to_native(e)),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ build_status = self.get_build_status()
+
+ if build_status['result'] is None:
+ sleep(10)
+ self.get_result()
+ else:
+ if self.state == "stopped" and build_status['result'] == "ABORTED":
+ result['changed'] = True
+ result['build_info'] = build_status
+ elif self.state == "absent" and build_status['result'] == "ABSENT":
+ result['changed'] = True
+ result['build_info'] = build_status
+ elif self.state != "absent" and build_status['result'] == "SUCCESS":
+ result['changed'] = True
+ result['build_info'] = build_status
+ else:
+ result['failed'] = True
+ result['build_info'] = build_status
+
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ args=dict(type='dict'),
+ build_number=dict(type='int'),
+ name=dict(required=True),
+ password=dict(no_log=True),
+ state=dict(choices=['present', 'absent', 'stopped'], default="present"),
+ token=dict(no_log=True),
+ url=dict(default="http://localhost:8080"),
+ user=dict(),
+ ),
+ mutually_exclusive=[['password', 'token']],
+ required_if=[['state', 'absent', ['build_number'], True], ['state', 'stopped', ['build_number'], True]],
+ )
+
+ test_dependencies(module)
+ jenkins_build = JenkinsBuild(module)
+
+ if module.params.get('state') == "present":
+ jenkins_build.present_build()
+ elif module.params.get('state') == "stopped":
+ jenkins_build.stopped_build()
+ else:
+ jenkins_build.absent_build()
+
+ sleep(10)
+ result = jenkins_build.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job.py b/ansible_collections/community/general/plugins/modules/jenkins_job.py
new file mode 100644
index 000000000..09b006448
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jenkins_job.py
@@ -0,0 +1,386 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job
+short_description: Manage jenkins jobs
+description:
+ - Manage Jenkins jobs by using Jenkins REST API.
+requirements:
+ - "python-jenkins >= 0.4.12"
+author: "Sergio Millan Rodriguez (@sermilrod)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ config:
+ type: str
+ description:
+ - config in XML format.
+ - Required if job does not yet exist.
+ - Mutually exclusive with I(enabled).
+ - Considered if I(state=present).
+ required: false
+ enabled:
+ description:
+ - Whether the job should be enabled or disabled.
+ - Mutually exclusive with I(config).
+ - Considered if I(state=present).
+ type: bool
+ required: false
+ name:
+ type: str
+ description:
+ - Name of the Jenkins job.
+ required: true
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ required: false
+ state:
+ type: str
+ description:
+ - Attribute that specifies if the job has to be created or deleted.
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ token:
+ type: str
+ description:
+ - API token used to authenticate alternatively to password.
+ required: false
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ required: false
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ required: false
+ validate_certs:
+ type: bool
+ default: true
+ description:
+ - If set to C(false), the SSL certificates will not be validated.
+ This should only set to C(false) used on personally controlled sites
+ using self-signed certificates as it avoids verifying the source site.
+ - The C(python-jenkins) library only handles this by using the environment variable C(PYTHONHTTPSVERIFY).
+ version_added: 2.3.0
+'''
+
+EXAMPLES = '''
+- name: Create a jenkins job using basic authentication
+ community.general.jenkins_job:
+ config: "{{ lookup('file', 'templates/test.xml') }}"
+ name: test
+ password: admin
+ url: http://localhost:8080
+ user: admin
+
+- name: Create a jenkins job using the token
+ community.general.jenkins_job:
+ config: "{{ lookup('template', 'templates/test.xml.j2') }}"
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Delete a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ state: absent
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using basic authentication
+ community.general.jenkins_job:
+ name: test
+ password: admin
+ enabled: false
+ url: http://localhost:8080
+ user: admin
+
+- name: Disable a jenkins job using the token
+ community.general.jenkins_job:
+ name: test
+ token: asdfasfasfasdfasdfadfasfasdfasdfc
+ enabled: false
+ url: http://localhost:8080
+ user: admin
+'''
+
+RETURN = '''
+---
+name:
+ description: Name of the jenkins job.
+ returned: success
+ type: str
+ sample: test-job
+state:
+ description: State of the jenkins job.
+ returned: success
+ type: str
+ sample: present
+enabled:
+ description: Whether the jenkins job is enabled or not.
+ returned: success
+ type: bool
+ sample: true
+user:
+ description: User used for authentication.
+ returned: success
+ type: str
+ sample: admin
+url:
+ description: Url to connect to the Jenkins server.
+ returned: success
+ type: str
+ sample: https://jenkins.mydomain.com
+'''
+
+import os
+import traceback
+import xml.etree.ElementTree as ET
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ python_jenkins_installed = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ python_jenkins_installed = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class JenkinsJob(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.config = module.params.get('config')
+ self.name = module.params.get('name')
+ self.password = module.params.get('password')
+ self.state = module.params.get('state')
+ self.enabled = module.params.get('enabled')
+ self.token = module.params.get('token')
+ self.user = module.params.get('user')
+ self.jenkins_url = module.params.get('url')
+ self.server = self.get_jenkins_connection()
+
+ self.result = {
+ 'changed': False,
+ 'url': self.jenkins_url,
+ 'name': self.name,
+ 'user': self.user,
+ 'state': self.state,
+ 'diff': {
+ 'before': "",
+ 'after': ""
+ }
+ }
+
+ self.EXCL_STATE = "excluded state"
+ if not module.params['validate_certs']:
+ os.environ['PYTHONHTTPSVERIFY'] = '0'
+
+ def get_jenkins_connection(self):
+ try:
+ if self.user and self.password:
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.password)
+ elif self.user and self.token:
+ return jenkins.Jenkins(self.jenkins_url, self.user, self.token)
+ elif self.user and not (self.password or self.token):
+ return jenkins.Jenkins(self.jenkins_url, self.user)
+ else:
+ return jenkins.Jenkins(self.jenkins_url)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to connect to Jenkins server, %s' % to_native(e), exception=traceback.format_exc())
+
+ def get_job_status(self):
+ try:
+ response = self.server.get_job_info(self.name)
+ if "color" not in response:
+ return self.EXCL_STATE
+ else:
+ return to_native(response['color'])
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to fetch job information, %s' % to_native(e), exception=traceback.format_exc())
+
+ def job_exists(self):
+ try:
+ return bool(self.server.job_exists(self.name))
+ except Exception as e:
+ self.module.fail_json(msg='Unable to validate if job exists, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_config(self):
+ return job_config_to_string(self.config)
+
+ def get_current_config(self):
+ return job_config_to_string(self.server.get_job_config(self.name).encode('utf-8'))
+
+ def has_config_changed(self):
+ # config is optional, if not provided we keep the current config as is
+ if self.config is None:
+ return False
+
+ config_file = self.get_config()
+ machine_file = self.get_current_config()
+
+ self.result['diff']['after'] = config_file
+ self.result['diff']['before'] = machine_file
+
+ if machine_file != config_file:
+ return True
+ return False
+
+ def present_job(self):
+ if self.config is None and self.enabled is None:
+ self.module.fail_json(msg='one of the following params is required on state=present: config,enabled')
+
+ if not self.job_exists():
+ self.create_job()
+ else:
+ self.update_job()
+
+ def has_state_changed(self, status):
+ # Keep in current state if enabled arg_spec is not given
+ if self.enabled is None:
+ return False
+
+ return (self.enabled is False and status != "disabled") or (self.enabled is True and status == "disabled")
+
+ def switch_state(self):
+ if self.enabled is False:
+ self.server.disable_job(self.name)
+ else:
+ self.server.enable_job(self.name)
+
+ def update_job(self):
+ try:
+ status = self.get_job_status()
+
+ # Handle job config
+ if self.has_config_changed():
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.server.reconfig_job(self.name, self.get_config())
+
+ # Handle job disable/enable
+ elif status != self.EXCL_STATE and self.has_state_changed(status):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.switch_state()
+
+ except Exception as e:
+ self.module.fail_json(msg='Unable to reconfigure job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def create_job(self):
+ if self.config is None:
+ self.module.fail_json(msg='missing required param: config')
+
+ self.result['changed'] = True
+ try:
+ config_file = self.get_config()
+ self.result['diff']['after'] = config_file
+ if not self.module.check_mode:
+ self.server.create_job(self.name, config_file)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to create job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def absent_job(self):
+ if self.job_exists():
+ self.result['changed'] = True
+ self.result['diff']['before'] = self.get_current_config()
+ if not self.module.check_mode:
+ try:
+ self.server.delete_job(self.name)
+ except Exception as e:
+ self.module.fail_json(msg='Unable to delete job, %s for %s' % (to_native(e), self.jenkins_url),
+ exception=traceback.format_exc())
+
+ def get_result(self):
+ result = self.result
+ if self.job_exists():
+ result['enabled'] = self.get_job_status() != "disabled"
+ else:
+ result['enabled'] = None
+ return result
+
+
+def test_dependencies(module):
+ if not python_jenkins_installed:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def job_config_to_string(xml_str):
+ return ET.tostring(ET.fromstring(xml_str)).decode('ascii')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ config=dict(type='str', required=False),
+ name=dict(type='str', required=True),
+ password=dict(type='str', required=False, no_log=True),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default="present"),
+ enabled=dict(required=False, type='bool'),
+ token=dict(type='str', required=False, no_log=True),
+ url=dict(type='str', required=False, default="http://localhost:8080"),
+ user=dict(type='str', required=False),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['config', 'enabled'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jenkins_job = JenkinsJob(module)
+
+ if module.params.get('state') == "present":
+ jenkins_job.present_job()
+ else:
+ jenkins_job.absent_job()
+
+ result = jenkins_job.get_result()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_job_info.py b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
new file mode 100644
index 000000000..ba6a53117
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jenkins_job_info.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: jenkins_job_info
+short_description: Get information about Jenkins jobs
+description:
+ - This module can be used to query information about which Jenkins jobs which already exists.
+ - This module was called C(jenkins_job_info) before Ansible 2.9. The usage did not change.
+requirements:
+ - "python-jenkins >= 0.4.12"
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ name:
+ type: str
+ description:
+ - Exact name of the Jenkins job to fetch information about.
+ glob:
+ type: str
+ description:
+ - A shell glob of Jenkins job names to fetch information about.
+ color:
+ type: str
+ description:
+ - Only fetch jobs with the given status color.
+ password:
+ type: str
+ description:
+ - Password to authenticate with the Jenkins server.
+ - This is mutually exclusive with I(token).
+ token:
+ type: str
+ description:
+ - API token used to authenticate with the Jenkins server.
+ - This is mutually exclusive with I(password).
+ url:
+ type: str
+ description:
+ - URL where the Jenkins server is accessible.
+ default: http://localhost:8080
+ user:
+ type: str
+ description:
+ - User to authenticate with the Jenkins server.
+ validate_certs:
+ description:
+ - If set to C(False), the SSL certificates will not be validated.
+ - This should only set to C(False) used on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+author:
+ - "Chris St. Pierre (@stpierre)"
+'''
+
+EXAMPLES = '''
+# Get all Jenkins jobs anonymously
+- community.general.jenkins_job_info:
+ user: admin
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using basic auth
+- community.general.jenkins_job_info:
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get all Jenkins jobs using the token
+- community.general.jenkins_job_info:
+ user: admin
+ token: abcdefghijklmnop
+ register: my_jenkins_job_info
+
+# Get info about a single job using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about a single job in a folder using basic auth
+- community.general.jenkins_job_info:
+ name: some-folder-name/some-job-name
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ glob: some-job-*
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about all failing jobs using basic auth
+- community.general.jenkins_job_info:
+ color: red
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+# Get info about passing jobs matching a shell glob using basic auth
+- community.general.jenkins_job_info:
+ name: some-job-*
+ color: blue
+ user: admin
+ password: hunter2
+ register: my_jenkins_job_info
+
+- name: Get the info from custom URL with token and validate_certs=False
+ community.general.jenkins_job_info:
+ user: admin
+ token: 126df5c60d66c66e3b75b11104a16a8a
+ url: https://jenkins.example.com
+ validate_certs: false
+ register: my_jenkins_job_info
+'''
+
+RETURN = '''
+---
+jobs:
+ description: All jobs found matching the specified criteria
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "name": "test-job",
+ "fullname": "test-folder/test-job",
+ "url": "http://localhost:8080/job/test-job/",
+ "color": "blue"
+ },
+ ]
+'''
+
+import ssl
+import fnmatch
+import traceback
+
+JENKINS_IMP_ERR = None
+try:
+ import jenkins
+ HAS_JENKINS = True
+except ImportError:
+ JENKINS_IMP_ERR = traceback.format_exc()
+ HAS_JENKINS = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def get_jenkins_connection(module):
+ url = module.params["url"]
+ username = module.params.get("user")
+ password = module.params.get("password")
+ token = module.params.get("token")
+
+ validate_certs = module.params.get('validate_certs')
+ if not validate_certs and hasattr(ssl, 'SSLContext'):
+ ssl._create_default_https_context = ssl._create_unverified_context
+ if validate_certs and not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg="Module does not support changing verification mode with python < 2.7.9."
+ " Either update Python or use validate_certs=false.")
+
+ if username and (password or token):
+ return jenkins.Jenkins(url, username, password or token)
+ elif username:
+ return jenkins.Jenkins(url, username)
+ else:
+ return jenkins.Jenkins(url)
+
+
+def test_dependencies(module):
+ if not HAS_JENKINS:
+ module.fail_json(
+ msg=missing_required_lib("python-jenkins",
+ url="https://python-jenkins.readthedocs.io/en/latest/install.html"),
+ exception=JENKINS_IMP_ERR)
+
+
+def get_jobs(module):
+ jenkins_conn = get_jenkins_connection(module)
+ jobs = []
+ if module.params.get("name"):
+ try:
+ job_info = jenkins_conn.get_job_info(module.params.get("name"))
+ except jenkins.NotFoundException:
+ pass
+ else:
+ jobs.append({
+ "name": job_info["name"],
+ "fullname": job_info["fullName"],
+ "url": job_info["url"],
+ "color": job_info["color"]
+ })
+
+ else:
+ all_jobs = jenkins_conn.get_all_jobs()
+ if module.params.get("glob"):
+ jobs.extend(
+ j for j in all_jobs
+ if fnmatch.fnmatch(j["fullname"], module.params.get("glob")))
+ else:
+ jobs = all_jobs
+ # python-jenkins includes the internal Jenkins class used for each job
+ # in its return value; we strip that out because the leading underscore
+ # (and the fact that it's not documented in the python-jenkins docs)
+ # indicates that it's not part of the dependable public interface.
+ for job in jobs:
+ if "_class" in job:
+ del job["_class"]
+
+ if module.params.get("color"):
+ jobs = [j for j in jobs if j["color"] == module.params.get("color")]
+
+ return jobs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str'),
+ glob=dict(type='str'),
+ color=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ url=dict(type='str', default="http://localhost:8080"),
+ user=dict(type='str'),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ['password', 'token'],
+ ['name', 'glob'],
+ ],
+ supports_check_mode=True,
+ )
+
+ test_dependencies(module)
+ jobs = list()
+
+ try:
+ jobs = get_jobs(module)
+ except jenkins.JenkinsException as err:
+ module.fail_json(
+ msg='Unable to connect to Jenkins server, %s' % to_native(err),
+ exception=traceback.format_exc())
+
+ module.exit_json(changed=False, jobs=jobs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_plugin.py b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
new file mode 100644
index 000000000..2fbc83e03
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jenkins_plugin.py
@@ -0,0 +1,854 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: jenkins_plugin
+author: Jiri Tyr (@jtyr)
+short_description: Add or remove Jenkins plugin
+description:
+ - Ansible module which helps to manage Jenkins plugins.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ group:
+ type: str
+ description:
+ - Name of the Jenkins group on the OS.
+ default: jenkins
+ jenkins_home:
+ type: path
+ description:
+ - Home directory of the Jenkins user.
+ default: /var/lib/jenkins
+ mode:
+ type: raw
+ description:
+ - File mode applied on versioned plugins.
+ default: '0644'
+ name:
+ type: str
+ description:
+ - Plugin name.
+ required: true
+ owner:
+ type: str
+ description:
+ - Name of the Jenkins user on the OS.
+ default: jenkins
+ state:
+ type: str
+ description:
+ - Desired plugin state.
+ - If the C(latest) is set, the check for new version will be performed
+ every time. This is suitable to keep the plugin up-to-date.
+ choices: [absent, present, pinned, unpinned, enabled, disabled, latest]
+ default: present
+ timeout:
+ type: int
+ description:
+ - Server connection timeout in secs.
+ default: 30
+ updates_expiration:
+ type: int
+ description:
+ - Number of seconds after which a new copy of the I(update-center.json)
+ file is downloaded. This is used to avoid the need to download the
+ plugin to calculate its checksum when C(latest) is specified.
+ - Set it to C(0) if no cache file should be used. In that case, the
+ plugin file will always be downloaded to calculate its checksum when
+ C(latest) is specified.
+ default: 86400
+ updates_url:
+ type: list
+ elements: str
+ description:
+ - A list of base URL(s) to retrieve I(update-center.json), and direct plugin files from.
+ - This can be a list since community.general 3.3.0.
+ default: ['https://updates.jenkins.io', 'http://mirrors.jenkins.io']
+ update_json_url_segment:
+ type: list
+ elements: str
+ description:
+ - A list of URL segment(s) to retrieve the update center json file from.
+ default: ['update-center.json', 'updates/update-center.json']
+ version_added: 3.3.0
+ latest_plugins_url_segments:
+ type: list
+ elements: str
+ description:
+ - Path inside the I(updates_url) to get latest plugins from.
+ default: ['latest']
+ version_added: 3.3.0
+ versioned_plugins_url_segments:
+ type: list
+ elements: str
+ description:
+ - Path inside the I(updates_url) to get specific version of plugins from.
+ default: ['download/plugins', 'plugins']
+ version_added: 3.3.0
+ url:
+ type: str
+ description:
+ - URL of the Jenkins server.
+ default: http://localhost:8080
+ version:
+ type: str
+ description:
+ - Plugin version number.
+ - If this option is specified, all plugin dependencies must be installed
+ manually.
+ - It might take longer to verify that the correct version is installed.
+ This is especially true if a specific version number is specified.
+ - Quote the version to prevent the value to be interpreted as float. For
+ example if C(1.20) would be unquoted, it would become C(1.2).
+ with_dependencies:
+ description:
+ - Defines whether to install plugin dependencies.
+ - This option takes effect only if the I(version) is not defined.
+ type: bool
+ default: true
+
+notes:
+ - Plugin installation should be run under root or the same user which owns
+ the plugin files on the disk. Only if the plugin is not installed yet and
+ no version is specified, the API installation is performed which requires
+ only the Web UI credentials.
+ - It's necessary to notify the handler or call the I(service) module to
+ restart the Jenkins service after a new plugin was installed.
+ - Pinning works only if the plugin is installed and Jenkins service was
+ successfully restarted after the plugin installation.
+ - It is not possible to run the module remotely by changing the I(url)
+ parameter to point to the Jenkins server. The module must be used on the
+ host where Jenkins runs as it needs direct access to the plugin files.
+extends_documentation_fragment:
+ - ansible.builtin.url
+ - ansible.builtin.files
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+
+- name: Install plugin without its dependencies
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ with_dependencies: false
+
+- name: Make sure the plugin is always up-to-date
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: latest
+
+- name: Install specific version of the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ version: "1.15"
+
+- name: Pin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: pinned
+
+- name: Unpin the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: unpinned
+
+- name: Enable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: enabled
+
+- name: Disable the plugin
+ community.general.jenkins_plugin:
+ name: token-macro
+ state: disabled
+
+- name: Uninstall plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ state: absent
+
+#
+# Example of how to authenticate
+#
+- name: Install plugin
+ community.general.jenkins_plugin:
+ name: build-pipeline-plugin
+ url_username: admin
+ url_password: p4ssw0rd
+ url: http://localhost:8888
+
+#
+# Example of a Play which handles Jenkins restarts during the state changes
+#
+- name: Jenkins Master play
+ hosts: jenkins-master
+ vars:
+ my_jenkins_plugins:
+ token-macro:
+ enabled: true
+ build-pipeline-plugin:
+ version: "1.4.9"
+ pinned: false
+ enabled: true
+ tasks:
+ - name: Install plugins without a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ register: my_jenkins_plugin_unversioned
+ when: >
+ 'version' not in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Install plugins with a specific version
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ version: "{{ item.value['version'] }}"
+ register: my_jenkins_plugin_versioned
+ when: >
+ 'version' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Initiate the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: false
+
+ - name: Check if restart is required by any of the versioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: true
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_versioned.results }}"
+
+ - name: Check if restart is required by any of the unversioned plugins
+ ansible.builtin.set_fact:
+ jenkins_restart_required: true
+ when: item.changed
+ with_items: "{{ my_jenkins_plugin_unversioned.results }}"
+
+ - name: Restart Jenkins if required
+ ansible.builtin.service:
+ name: jenkins
+ state: restarted
+ when: jenkins_restart_required
+
+ - name: Wait for Jenkins to start up
+ ansible.builtin.uri:
+ url: http://localhost:8080
+ status_code: 200
+ timeout: 5
+ register: jenkins_service_status
+ # Keep trying for 5 mins in 5 sec intervals
+ retries: 60
+ delay: 5
+ until: >
+ 'status' in jenkins_service_status and
+ jenkins_service_status['status'] == 200
+ when: jenkins_restart_required
+
+ - name: Reset the fact
+ ansible.builtin.set_fact:
+ jenkins_restart_required: false
+ when: jenkins_restart_required
+
+ - name: Plugin pinning
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'pinned' if item.value['pinned'] else 'unpinned'}}"
+ when: >
+ 'pinned' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+
+ - name: Plugin enabling
+ community.general.jenkins_plugin:
+ name: "{{ item.key }}"
+ state: "{{ 'enabled' if item.value['enabled'] else 'disabled'}}"
+ when: >
+ 'enabled' in item.value
+ with_dict: "{{ my_jenkins_plugins }}"
+'''
+
+RETURN = '''
+plugin:
+ description: plugin name
+ returned: success
+ type: str
+ sample: build-pipeline-plugin
+state:
+ description: state of the target, after execution
+ returned: success
+ type: str
+ sample: "present"
+'''
+
+import hashlib
+import io
+import json
+import os
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule, to_bytes
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+from ansible.module_utils.six import text_type, binary_type
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.jenkins import download_updates_file
+
+
+class FailedInstallingWithPluginManager(Exception):
+ pass
+
+
+class JenkinsPlugin(object):
+ def __init__(self, module):
+ # To be able to call fail_json
+ self.module = module
+
+ # Shortcuts for the params
+ self.params = self.module.params
+ self.url = self.params['url']
+ self.timeout = self.params['timeout']
+
+ # Crumb
+ self.crumb = {}
+ # Cookie jar for crumb session
+ self.cookies = None
+
+ if self._csrf_enabled():
+ self.cookies = cookiejar.LWPCookieJar()
+ self.crumb = self._get_crumb()
+
+ # Get list of installed plugins
+ self._get_installed_plugins()
+
+ def _csrf_enabled(self):
+ csrf_data = self._get_json_data(
+ "%s/%s" % (self.url, "api/json"), 'CSRF')
+
+ if 'useCrumbs' not in csrf_data:
+ self.module.fail_json(
+ msg="Required fields not found in the Crumbs response.",
+ details=csrf_data)
+
+ return csrf_data['useCrumbs']
+
+ def _get_json_data(self, url, what, **kwargs):
+ # Get the JSON data
+ r = self._get_url_data(url, what, **kwargs)
+
+ # Parse the JSON data
+ try:
+ json_data = json.loads(to_native(r.read()))
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot parse %s JSON data." % what,
+ details=to_native(e))
+
+ return json_data
+
+ def _get_urls_data(self, urls, what=None, msg_status=None, msg_exception=None, **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ errors = {}
+ for url in urls:
+ err_msg = None
+ try:
+ self.module.debug("fetching url: %s" % url)
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, cookies=self.cookies,
+ headers=self.crumb, **kwargs)
+
+ if info['status'] == 200:
+ return response
+ else:
+ err_msg = ("%s. fetching url %s failed. response code: %s" % (msg_status, url, info['status']))
+ if info['status'] > 400: # extend error message
+ err_msg = "%s. response body: %s" % (err_msg, info['body'])
+ except Exception as e:
+ err_msg = "%s. fetching url %s failed. error msg: %s" % (msg_status, url, to_native(e))
+ finally:
+ if err_msg is not None:
+ self.module.debug(err_msg)
+ errors[url] = err_msg
+
+ # failed on all urls
+ self.module.fail_json(msg=msg_exception, details=errors)
+
+ def _get_url_data(
+ self, url, what=None, msg_status=None, msg_exception=None,
+ dont_fail=False, **kwargs):
+ # Compose default messages
+ if msg_status is None:
+ msg_status = "Cannot get %s" % what
+
+ if msg_exception is None:
+ msg_exception = "Retrieval of %s failed." % what
+
+ # Get the URL data
+ try:
+ response, info = fetch_url(
+ self.module, url, timeout=self.timeout, cookies=self.cookies,
+ headers=self.crumb, **kwargs)
+
+ if info['status'] != 200:
+ if dont_fail:
+ raise FailedInstallingWithPluginManager(info['msg'])
+ else:
+ self.module.fail_json(msg=msg_status, details=info['msg'])
+ except Exception as e:
+ if dont_fail:
+ raise FailedInstallingWithPluginManager(e)
+ else:
+ self.module.fail_json(msg=msg_exception, details=to_native(e))
+
+ return response
+
+ def _get_crumb(self):
+ crumb_data = self._get_json_data(
+ "%s/%s" % (self.url, "crumbIssuer/api/json"), 'Crumb')
+
+ if 'crumbRequestField' in crumb_data and 'crumb' in crumb_data:
+ ret = {
+ crumb_data['crumbRequestField']: crumb_data['crumb']
+ }
+ else:
+ self.module.fail_json(
+ msg="Required fields not found in the Crum response.",
+ details=crumb_data)
+
+ return ret
+
+ def _get_installed_plugins(self):
+ plugins_data = self._get_json_data(
+ "%s/%s" % (self.url, "pluginManager/api/json?depth=1"),
+ 'list of plugins')
+
+ # Check if we got valid data
+ if 'plugins' not in plugins_data:
+ self.module.fail_json(msg="No valid plugin data found.")
+
+ # Create final list of installed/pined plugins
+ self.is_installed = False
+ self.is_pinned = False
+ self.is_enabled = False
+
+ for p in plugins_data['plugins']:
+ if p['shortName'] == self.params['name']:
+ self.is_installed = True
+
+ if p['pinned']:
+ self.is_pinned = True
+
+ if p['enabled']:
+ self.is_enabled = True
+
+ break
+
+ def _install_with_plugin_manager(self):
+ if not self.module.check_mode:
+ # Install the plugin (with dependencies)
+ install_script = (
+ 'd = Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.deploy(); d.get();' % self.params['name'])
+
+ if self.params['with_dependencies']:
+ install_script = (
+ 'Jenkins.instance.updateCenter.getPlugin("%s")'
+ '.getNeededDependencies().each{it.deploy()}; %s' % (
+ self.params['name'], install_script))
+
+ script_data = {
+ 'script': install_script
+ }
+ data = urlencode(script_data)
+
+ # Send the installation request
+ r = self._get_url_data(
+ "%s/scriptText" % self.url,
+ msg_status="Cannot install plugin.",
+ msg_exception="Plugin installation has failed.",
+ data=data,
+ dont_fail=True)
+
+ hpi_file = '%s/plugins/%s.hpi' % (
+ self.params['jenkins_home'],
+ self.params['name'])
+
+ if os.path.isfile(hpi_file):
+ os.remove(hpi_file)
+
+ def install(self):
+ changed = False
+ plugin_file = (
+ '%s/plugins/%s.jpi' % (
+ self.params['jenkins_home'],
+ self.params['name']))
+
+ if not self.is_installed and self.params['version'] in [None, 'latest']:
+ try:
+ self._install_with_plugin_manager()
+ changed = True
+ except FailedInstallingWithPluginManager: # Fallback to manually downloading the plugin
+ pass
+
+ if not changed:
+ # Check if the plugin directory exists
+ if not os.path.isdir(self.params['jenkins_home']):
+ self.module.fail_json(
+ msg="Jenkins home directory doesn't exist.")
+
+ checksum_old = None
+ if os.path.isfile(plugin_file):
+ # Make the checksum of the currently installed plugin
+ with open(plugin_file, 'rb') as plugin_fh:
+ plugin_content = plugin_fh.read()
+ checksum_old = hashlib.sha1(plugin_content).hexdigest()
+
+ if self.params['version'] in [None, 'latest']:
+ # Take latest version
+ plugin_urls = self._get_latest_plugin_urls()
+ else:
+ # Take specific version
+ plugin_urls = self._get_versioned_plugin_urls()
+ if (
+ self.params['updates_expiration'] == 0 or
+ self.params['version'] not in [None, 'latest'] or
+ checksum_old is None):
+
+ # Download the plugin file directly
+ r = self._download_plugin(plugin_urls)
+
+ # Write downloaded plugin into file if checksums don't match
+ if checksum_old is None:
+ # No previously installed plugin
+ if not self.module.check_mode:
+ self._write_file(plugin_file, r)
+
+ changed = True
+ else:
+ # Get data for the MD5
+ data = r.read()
+
+ # Make new checksum
+ checksum_new = hashlib.sha1(data).hexdigest()
+
+ # If the checksum is different from the currently installed
+ # plugin, store the new plugin
+ if checksum_old != checksum_new:
+ if not self.module.check_mode:
+ self._write_file(plugin_file, data)
+
+ changed = True
+ elif self.params['version'] == 'latest':
+ # Check for update from the updates JSON file
+ plugin_data = self._download_updates()
+
+ # If the latest version changed, download it
+ if checksum_old != to_bytes(plugin_data['sha1']):
+ if not self.module.check_mode:
+ r = self._download_plugin(plugin_urls)
+ self._write_file(plugin_file, r)
+
+ changed = True
+
+ # Change file attributes if needed
+ if os.path.isfile(plugin_file):
+ params = {
+ 'dest': plugin_file
+ }
+ params.update(self.params)
+ file_args = self.module.load_file_common_arguments(params)
+
+ if not self.module.check_mode:
+ # Not sure how to run this in the check mode
+ changed = self.module.set_fs_attributes_if_different(
+ file_args, changed)
+ else:
+ # See the comment above
+ changed = True
+
+ return changed
+
+ def _get_latest_plugin_urls(self):
+ urls = []
+ for base_url in self.params['updates_url']:
+ for update_segment in self.params['latest_plugins_url_segments']:
+ urls.append("{0}/{1}/{2}.hpi".format(base_url, update_segment, self.params['name']))
+ return urls
+
+ def _get_versioned_plugin_urls(self):
+ urls = []
+ for base_url in self.params['updates_url']:
+ for versioned_segment in self.params['versioned_plugins_url_segments']:
+ urls.append("{0}/{1}/{2}/{3}/{2}.hpi".format(base_url, versioned_segment, self.params['name'], self.params['version']))
+ return urls
+
+ def _get_update_center_urls(self):
+ urls = []
+ for base_url in self.params['updates_url']:
+ for update_json in self.params['update_json_url_segment']:
+ urls.append("{0}/{1}".format(base_url, update_json))
+ return urls
+
+ def _download_updates(self):
+ try:
+ updates_file, download_updates = download_updates_file(self.params['updates_expiration'])
+ except OSError as e:
+ self.module.fail_json(
+ msg="Cannot create temporal directory.",
+ details=to_native(e))
+
+ # Download the updates file if needed
+ if download_updates:
+ urls = self._get_update_center_urls()
+
+ # Get the data
+ r = self._get_urls_data(
+ urls,
+ msg_status="Remote updates not found.",
+ msg_exception="Updates download failed.")
+
+ # Write the updates file
+ tmp_update_fd, tmp_updates_file = tempfile.mkstemp()
+ os.write(tmp_update_fd, r.read())
+
+ try:
+ os.close(tmp_update_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot close the tmp updates file %s." % tmp_updates_file,
+ details=to_native(e))
+ else:
+ tmp_updates_file = updates_file
+
+ # Open the updates file
+ try:
+ f = io.open(tmp_updates_file, encoding='utf-8')
+
+ # Read only the second line
+ dummy = f.readline()
+ data = json.loads(f.readline())
+ except IOError as e:
+ self.module.fail_json(
+ msg="Cannot open%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""),
+ details=to_native(e))
+ except Exception as e:
+ self.module.fail_json(
+ msg="Cannot load JSON data from the%s updates file." % (" temporary" if tmp_updates_file != updates_file else ""),
+ details=to_native(e))
+
+ # Move the updates file to the right place if we could read it
+ if tmp_updates_file != updates_file:
+ self.module.atomic_move(tmp_updates_file, updates_file)
+
+ # Check if we have the plugin data available
+ if not data.get('plugins', {}).get(self.params['name']):
+ self.module.fail_json(msg="Cannot find plugin data in the updates file.")
+
+ return data['plugins'][self.params['name']]
+
+ def _download_plugin(self, plugin_urls):
+ # Download the plugin
+
+ return self._get_urls_data(
+ plugin_urls,
+ msg_status="Plugin not found.",
+ msg_exception="Plugin download failed.")
+
+ def _write_file(self, f, data):
+ # Store the plugin into a temp file and then move it
+ tmp_f_fd, tmp_f = tempfile.mkstemp()
+
+ if isinstance(data, (text_type, binary_type)):
+ os.write(tmp_f_fd, data)
+ else:
+ os.write(tmp_f_fd, data.read())
+
+ try:
+ os.close(tmp_f_fd)
+ except IOError as e:
+ self.module.fail_json(
+ msg='Cannot close the temporal plugin file %s.' % tmp_f,
+ details=to_native(e))
+
+ # Move the file onto the right place
+ self.module.atomic_move(tmp_f, f)
+
+ def uninstall(self):
+ changed = False
+
+ # Perform the action
+ if self.is_installed:
+ if not self.module.check_mode:
+ self._pm_query('doUninstall', 'Uninstallation')
+
+ changed = True
+
+ return changed
+
+ def pin(self):
+ return self._pinning('pin')
+
+ def unpin(self):
+ return self._pinning('unpin')
+
+ def _pinning(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'pin' and not self.is_pinned or
+ action == 'unpin' and self.is_pinned):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(action, "%sning" % action.capitalize())
+
+ changed = True
+
+ return changed
+
+ def enable(self):
+ return self._enabling('enable')
+
+ def disable(self):
+ return self._enabling('disable')
+
+ def _enabling(self, action):
+ changed = False
+
+ # Check if the plugin is pinned/unpinned
+ if (
+ action == 'enable' and not self.is_enabled or
+ action == 'disable' and self.is_enabled):
+
+ # Perform the action
+ if not self.module.check_mode:
+ self._pm_query(
+ "make%sd" % action.capitalize(),
+ "%sing" % action[:-1].capitalize())
+
+ changed = True
+
+ return changed
+
+ def _pm_query(self, action, msg):
+ url = "%s/pluginManager/plugin/%s/%s" % (
+ self.params['url'], self.params['name'], action)
+
+ # Send the request
+ self._get_url_data(
+ url,
+ msg_status="Plugin not found. %s" % url,
+ msg_exception="%s has failed." % msg,
+ method="POST")
+
+
+def main():
+ # Module arguments
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ group=dict(type='str', default='jenkins'),
+ jenkins_home=dict(type='path', default='/var/lib/jenkins'),
+ mode=dict(default='0644', type='raw'),
+ name=dict(type='str', required=True),
+ owner=dict(type='str', default='jenkins'),
+ state=dict(
+ choices=[
+ 'present',
+ 'absent',
+ 'pinned',
+ 'unpinned',
+ 'enabled',
+ 'disabled',
+ 'latest'],
+ default='present'),
+ timeout=dict(default=30, type="int"),
+ updates_expiration=dict(default=86400, type="int"),
+ updates_url=dict(type="list", elements="str", default=['https://updates.jenkins.io',
+ 'http://mirrors.jenkins.io']),
+ update_json_url_segment=dict(type="list", elements="str", default=['update-center.json',
+ 'updates/update-center.json']),
+ latest_plugins_url_segments=dict(type="list", elements="str", default=['latest']),
+ versioned_plugins_url_segments=dict(type="list", elements="str", default=['download/plugins', 'plugins']),
+ url=dict(default='http://localhost:8080'),
+ url_password=dict(no_log=True),
+ version=dict(),
+ with_dependencies=dict(default=True, type='bool'),
+ )
+ # Module settings
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ # Force basic authentication
+ module.params['force_basic_auth'] = True
+
+ # Convert timeout to float
+ try:
+ module.params['timeout'] = float(module.params['timeout'])
+ except ValueError as e:
+ module.fail_json(
+ msg='Cannot convert %s to float.' % module.params['timeout'],
+ details=to_native(e))
+
+ # Set version to latest if state is latest
+ if module.params['state'] == 'latest':
+ module.params['state'] = 'present'
+ module.params['version'] = 'latest'
+
+ # Create some shortcuts
+ name = module.params['name']
+ state = module.params['state']
+
+ # Initial change state of the task
+ changed = False
+
+ # Instantiate the JenkinsPlugin object
+ jp = JenkinsPlugin(module)
+
+ # Perform action depending on the requested state
+ if state == 'present':
+ changed = jp.install()
+ elif state == 'absent':
+ changed = jp.uninstall()
+ elif state == 'pinned':
+ changed = jp.pin()
+ elif state == 'unpinned':
+ changed = jp.unpin()
+ elif state == 'enabled':
+ changed = jp.enable()
+ elif state == 'disabled':
+ changed = jp.disable()
+
+ # Print status of the change
+ module.exit_json(changed=changed, plugin=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jenkins_script.py b/ansible_collections/community/general/plugins/modules/jenkins_script.py
new file mode 100644
index 000000000..7f83ebcdb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jenkins_script.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, James Hogarth <james.hogarth@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: James Hogarth (@hogarthj)
+module: jenkins_script
+short_description: Executes a groovy script in the jenkins instance
+description:
+ - The C(jenkins_script) module takes a script plus a dict of values
+ to use within the script and returns the result of the script being run.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ script:
+ type: str
+ description:
+ - The groovy script to be executed.
+ This gets passed as a string Template if args is defined.
+ required: true
+ url:
+ type: str
+ description:
+ - The jenkins server to execute the script against. The default is a local
+ jenkins instance that is not being proxied through a webserver.
+ default: http://localhost:8080
+ validate_certs:
+ description:
+ - If set to C(false), the SSL certificates will not be validated.
+ This should only set to C(false) used on personally controlled sites
+ using self-signed certificates as it avoids verifying the source site.
+ type: bool
+ default: true
+ user:
+ type: str
+ description:
+ - The username to connect to the jenkins server with.
+ password:
+ type: str
+ description:
+ - The password to connect to the jenkins server with.
+ timeout:
+ type: int
+ description:
+ - The request timeout in seconds
+ default: 10
+ args:
+ type: dict
+ description:
+ - A dict of key-value pairs used in formatting the script using string.Template (see https://docs.python.org/2/library/string.html#template-strings).
+
+notes:
+ - Since the script can do anything this does not report on changes.
+ Knowing the script is being run it's important to set changed_when
+ for the ansible output to be clear on any alterations made.
+
+'''
+
+EXAMPLES = '''
+- name: Obtaining a list of plugins
+ community.general.jenkins_script:
+ script: 'println(Jenkins.instance.pluginManager.plugins)'
+ user: admin
+ password: admin
+
+- name: Setting master using a variable to hold a more complicate script
+ ansible.builtin.set_fact:
+ setmaster_mode: |
+ import jenkins.model.*
+ instance = Jenkins.getInstance()
+ instance.setMode(${jenkins_mode})
+ instance.save()
+
+- name: Use the variable as the script
+ community.general.jenkins_script:
+ script: "{{ setmaster_mode }}"
+ args:
+ jenkins_mode: Node.Mode.EXCLUSIVE
+
+- name: Interacting with an untrusted HTTPS connection
+ community.general.jenkins_script:
+ script: "println(Jenkins.instance.pluginManager.plugins)"
+ user: admin
+ password: admin
+ url: https://localhost
+ validate_certs: false
+'''
+
+RETURN = '''
+output:
+ description: Result of script
+ returned: success
+ type: str
+ sample: 'Result: true'
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import http_cookiejar as cookiejar
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_native
+
+
+def is_csrf_protection_enabled(module):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/api/json',
+ timeout=module.params['timeout'],
+ method='GET')
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content).get('useCrumbs', False)
+
+
+def get_crumb(module, cookies):
+ resp, info = fetch_url(module,
+ module.params['url'] + '/crumbIssuer/api/json',
+ method='GET',
+ timeout=module.params['timeout'],
+ cookies=cookies)
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ content = to_native(resp.read())
+ return json.loads(content)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ script=dict(required=True, type="str"),
+ url=dict(required=False, type="str", default="http://localhost:8080"),
+ validate_certs=dict(required=False, type="bool", default=True),
+ user=dict(required=False, type="str", default=None),
+ password=dict(required=False, no_log=True, type="str", default=None),
+ timeout=dict(required=False, type="int", default=10),
+ args=dict(required=False, type="dict", default=None)
+ )
+ )
+
+ if module.params['user'] is not None:
+ if module.params['password'] is None:
+ module.fail_json(msg="password required when user provided", output='')
+ module.params['url_username'] = module.params['user']
+ module.params['url_password'] = module.params['password']
+ module.params['force_basic_auth'] = True
+
+ if module.params['args'] is not None:
+ from string import Template
+ try:
+ script_contents = Template(module.params['script']).substitute(module.params['args'])
+ except KeyError as err:
+ module.fail_json(msg="Error with templating variable: %s" % err, output='')
+ else:
+ script_contents = module.params['script']
+
+ headers = {}
+ cookies = None
+ if is_csrf_protection_enabled(module):
+ cookies = cookiejar.LWPCookieJar()
+ crumb = get_crumb(module, cookies)
+ headers = {crumb['crumbRequestField']: crumb['crumb']}
+
+ resp, info = fetch_url(module,
+ module.params['url'] + "/scriptText",
+ data=urlencode({'script': script_contents}),
+ headers=headers,
+ method="POST",
+ timeout=module.params['timeout'],
+ cookies=cookies)
+
+ if info["status"] != 200:
+ module.fail_json(msg="HTTP error " + str(info["status"]) + " " + info["msg"], output='')
+
+ result = to_native(resp.read())
+
+ if 'Exception:' in result and 'at java.lang.Thread' in result:
+ module.fail_json(msg="script failed with stacktrace:\n " + result, output='')
+
+ module.exit_json(
+ output=result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/jira.py b/ansible_collections/community/general/plugins/modules/jira.py
new file mode 100644
index 000000000..85097c4b7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/jira.py
@@ -0,0 +1,828 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Steve Smith <ssmith@atlassian.com>
+# Atlassian open-source approval reference OSR-76.
+#
+# Copyright (c) 2020, Per Abildgaard Toft <per@minfejl.dk> Search and update function
+# Copyright (c) 2021, Brandon McNama <brandonmcnama@outlook.com> Issue attachment functionality
+# Copyright (c) 2022, Hugo Prudente <hugo.kenshin+oss@gmail.com> Worklog functionality
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+module: jira
+short_description: Create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ uri:
+ type: str
+ required: true
+ description:
+ - Base URI for the JIRA instance.
+
+ operation:
+ type: str
+ required: true
+ aliases: [ command ]
+ choices: [ attach, comment, create, edit, fetch, link, search, transition, update, worklog ]
+ description:
+ - The operation to perform.
+ - C(worklog) was added in community.genereal 6.5.0.
+
+ username:
+ type: str
+ description:
+ - The username to log-in with.
+ - Must be used with I(password). Mutually exclusive with I(token).
+
+ password:
+ type: str
+ description:
+ - The password to log-in with.
+ - Must be used with I(username). Mutually exclusive with I(token).
+
+ token:
+ type: str
+ description:
+ - The personal access token to log-in with.
+ - Mutually exclusive with I(username) and I(password).
+ version_added: 4.2.0
+
+ project:
+ type: str
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ type: str
+ required: false
+ description:
+ - The issue summary, where appropriate.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
+
+ description:
+ type: str
+ required: false
+ description:
+ - The issue description, where appropriate.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
+
+ issuetype:
+ type: str
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ type: str
+ required: false
+ description:
+ - An existing issue key to operate on.
+ aliases: ['ticket']
+
+ comment:
+ type: str
+ required: false
+ description:
+ - The comment text to add.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
+
+ comment_visibility:
+ type: dict
+ description:
+ - Used to specify comment comment visibility.
+ - See U(https://developer.atlassian.com/cloud/jira/platform/rest/v2/api-group-issue-comments/#api-rest-api-2-issue-issueidorkey-comment-post) for details.
+ suboptions:
+ type:
+ description:
+ - Use type to specify which of the JIRA visibility restriction types will be used.
+ type: str
+ required: true
+ choices: [group, role]
+ value:
+ description:
+ - Use value to specify value corresponding to the type of visibility restriction. For example name of the group or role.
+ type: str
+ required: true
+ version_added: '3.2.0'
+
+ status:
+ type: str
+ required: false
+ description:
+ - Only used when I(operation) is C(transition), and a bit of a misnomer, it actually refers to the transition name.
+
+ assignee:
+ type: str
+ required: false
+ description:
+ - Sets the the assignee when I(operation) is C(create), C(transition) or C(edit).
+ - Recent versions of JIRA no longer accept a user name as a user identifier. In that case, use I(account_id) instead.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
+
+ account_id:
+ type: str
+ description:
+ - Sets the account identifier for the assignee when I(operation) is C(create), C(transition) or C(edit).
+ - Note that JIRA may not allow changing field values on specific transitions or states.
+ version_added: 2.5.0
+
+ linktype:
+ type: str
+ required: false
+ description:
+ - Set type of link, when action 'link' selected.
+
+ inwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue from which link will be created.
+
+ outwardissue:
+ type: str
+ required: false
+ description:
+ - Set issue to which link will be created.
+
+ fields:
+ type: dict
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API
+ (possibly after merging with other required data, as when passed to create). See examples for more information,
+ and the JIRA REST API for the structure required for various fields.
+ - When passed to comment, the data structure is merged at the first level since community.general 4.6.0. Useful to add JIRA properties for example.
+ - Note that JIRA may not allow changing field values on specific transitions or states.
+ default: {}
+ jql:
+ required: false
+ description:
+ - Query JIRA in JQL Syntax, e.g. 'CMDB Hostname'='test.example.com'.
+ type: str
+ version_added: '0.2.0'
+
+ maxresults:
+ required: false
+ description:
+ - Limit the result of I(operation=search). If no value is specified, the default jira limit will be used.
+ - Used when I(operation=search) only, ignored otherwise.
+ type: int
+ version_added: '0.2.0'
+
+ timeout:
+ type: float
+ required: false
+ description:
+ - Set timeout, in seconds, on requests to JIRA API.
+ default: 10
+
+ validate_certs:
+ required: false
+ description:
+ - Require valid SSL certificates (set to C(false) if you'd like to use self-signed certificates)
+ default: true
+ type: bool
+
+ attachment:
+ type: dict
+ version_added: 2.5.0
+ description:
+ - Information about the attachment being uploaded.
+ suboptions:
+ filename:
+ required: true
+ type: path
+ description:
+ - The path to the file to upload (from the remote node) or, if I(content) is specified,
+ the filename to use for the attachment.
+ content:
+ type: str
+ description:
+ - The Base64 encoded contents of the file to attach. If not specified, the contents of I(filename) will be
+ used instead.
+ mimetype:
+ type: str
+ description:
+ - The MIME type to supply for the upload. If not specified, best-effort detection will be
+ done.
+
+notes:
+ - "Currently this only works with basic-auth, or tokens."
+ - "To use with JIRA Cloud, pass the login e-mail as the I(username) and the API token as I(password)."
+
+author:
+- "Steve Smith (@tarka)"
+- "Per Abildgaard Toft (@pertoft)"
+- "Brandon McNama (@DWSR)"
+"""
+
+EXAMPLES = r"""
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Example Issue
+ description: Created using Ansible
+ issuetype: Task
+ args:
+ fields:
+ customfield_13225: "test"
+ customfield_12931: {"value": "Test"}
+ register: issue
+
+- name: Comment on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+
+- name: Comment on issue with restricted visibility
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+ comment_visibility:
+ type: role
+ value: Developers
+
+- name: Comment on issue with property to mark it internal
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: comment
+ comment: A comment added by Ansible
+ fields:
+ properties:
+ - key: 'sd.public.comment'
+ value:
+ internal: true
+
+# Add an workog to an existing issue
+- name: Worklog on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: worklog
+ comment: A worklog added by Ansible
+ fields:
+ timeSpentSeconds: 12000
+
+- name: Workflow on issue with comment restricted visibility
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: worklog
+ comment: A worklog added by Ansible
+ comment_visibility:
+ type: role
+ value: Developers
+ fields:
+ timeSpentSeconds: 12000
+
+- name: Workflow on issue with comment property to mark it internal
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: worklog
+ comment: A worklog added by Ansible
+ fields:
+ properties:
+ - key: 'sd.public.comment'
+ value:
+ internal: true
+ timeSpentSeconds: 12000
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key}}'
+ operation: edit
+ assignee: ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: create
+ summary: Assigned issue
+ description: Created and assigned using Ansible
+ issuetype: Task
+ assignee: ssmith
+
+# Edit an issue
+- name: Set the labels on an issue using free-form fields
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: edit
+ args:
+ fields:
+ labels:
+ - autocreated
+ - ansible
+
+# Updating a field using operations: add, set & remove
+- name: Change the value of a Select dropdown
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: update
+ args:
+ fields:
+ customfield_12931: [ {'set': {'value': 'Virtual'}} ]
+ customfield_13820: [ {'set': {'value':'Manually'}} ]
+ register: cmdb_issue
+ delegate_to: localhost
+
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: fetch
+ issue: ANS-63
+ register: issue
+
+# Search for an issue
+# You can limit the search for specific fields by adding optional args. Note! It must be a dict, hence, lastViewed: null
+- name: Search for an issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ project: ANS
+ operation: search
+ maxresults: 10
+ jql: project=cmdb AND cf[13225]="test"
+ args:
+ fields:
+ lastViewed: null
+ register: issue
+
+- name: Create a unix account for the reporter
+ become: true
+ user:
+ name: '{{ issue.meta.fields.creator.name }}'
+ comment: '{{ issue.meta.fields.creator.displayName }}'
+
+# You can get list of valid linktypes at /rest/api/2/issueLinkType
+# url of your jira installation.
+- name: Create link from HSP-1 to MKY-1
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ operation: link
+ linktype: Relates
+ inwardissue: HSP-1
+ outwardissue: MKY-1
+
+# Transition an issue
+- name: Resolve the issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: transition
+ status: Resolve Issue
+ account_id: 112233445566778899aabbcc
+ fields:
+ resolution:
+ name: Done
+ description: I am done! This is the last description I will ever give you.
+
+# Attach a file to an issue
+- name: Attach a file
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: HSP-1
+ operation: attach
+ attachment:
+ filename: topsecretreport.xlsx
+"""
+
+import base64
+import binascii
+import json
+import mimetypes
+import os
+import random
+import string
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper, cause_changes
+from ansible.module_utils.six.moves.urllib.request import pathname2url
+from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
+from ansible.module_utils.urls import fetch_url
+
+
+class JIRA(StateModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ attachment=dict(type='dict', options=dict(
+ content=dict(type='str'),
+ filename=dict(type='path', required=True),
+ mimetype=dict(type='str')
+ )),
+ uri=dict(type='str', required=True),
+ operation=dict(
+ type='str',
+ choices=['attach', 'create', 'comment', 'edit', 'update', 'fetch', 'transition', 'link', 'search', 'worklog'],
+ aliases=['command'], required=True
+ ),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ token=dict(type='str', no_log=True),
+ project=dict(type='str', ),
+ summary=dict(type='str', ),
+ description=dict(type='str', ),
+ issuetype=dict(type='str', ),
+ issue=dict(type='str', aliases=['ticket']),
+ comment=dict(type='str', ),
+ comment_visibility=dict(type='dict', options=dict(
+ type=dict(type='str', choices=['group', 'role'], required=True),
+ value=dict(type='str', required=True)
+ )),
+ status=dict(type='str', ),
+ assignee=dict(type='str', ),
+ fields=dict(default={}, type='dict'),
+ linktype=dict(type='str', ),
+ inwardissue=dict(type='str', ),
+ outwardissue=dict(type='str', ),
+ jql=dict(type='str', ),
+ maxresults=dict(type='int'),
+ timeout=dict(type='float', default=10),
+ validate_certs=dict(default=True, type='bool'),
+ account_id=dict(type='str'),
+ ),
+ mutually_exclusive=[
+ ['username', 'token'],
+ ['password', 'token'],
+ ['assignee', 'account_id'],
+ ],
+ required_together=[
+ ['username', 'password'],
+ ],
+ required_one_of=[
+ ['username', 'token'],
+ ],
+ required_if=(
+ ('operation', 'attach', ['issue', 'attachment']),
+ ('operation', 'create', ['project', 'issuetype', 'summary']),
+ ('operation', 'comment', ['issue', 'comment']),
+ ('operation', 'workflow', ['issue', 'comment']),
+ ('operation', 'fetch', ['issue']),
+ ('operation', 'transition', ['issue', 'status']),
+ ('operation', 'link', ['linktype', 'inwardissue', 'outwardissue']),
+ ('operation', 'search', ['jql']),
+ ),
+ supports_check_mode=False
+ )
+
+ state_param = 'operation'
+
+ def __init_module__(self):
+ if self.vars.fields is None:
+ self.vars.fields = {}
+ if self.vars.assignee:
+ self.vars.fields['assignee'] = {'name': self.vars.assignee}
+ if self.vars.account_id:
+ self.vars.fields['assignee'] = {'accountId': self.vars.account_id}
+ self.vars.uri = self.vars.uri.strip('/')
+ self.vars.set('restbase', self.vars.uri + '/rest/api/2')
+
+ @cause_changes(on_success=True)
+ def operation_create(self):
+ createfields = {
+ 'project': {'key': self.vars.project},
+ 'summary': self.vars.summary,
+ 'issuetype': {'name': self.vars.issuetype}}
+
+ if self.vars.description:
+ createfields['description'] = self.vars.description
+
+ # Merge in any additional or overridden fields
+ if self.vars.fields:
+ createfields.update(self.vars.fields)
+
+ data = {'fields': createfields}
+ url = self.vars.restbase + '/issue/'
+ self.vars.meta = self.post(url, data)
+
+ @cause_changes(on_success=True)
+ def operation_comment(self):
+ data = {
+ 'body': self.vars.comment
+ }
+ # if comment_visibility is specified restrict visibility
+ if self.vars.comment_visibility is not None:
+ data['visibility'] = self.vars.comment_visibility
+
+ # Use 'fields' to merge in any additional data
+ if self.vars.fields:
+ data.update(self.vars.fields)
+
+ url = self.vars.restbase + '/issue/' + self.vars.issue + '/comment'
+ self.vars.meta = self.post(url, data)
+
+ @cause_changes(on_success=True)
+ def operation_worklog(self):
+ data = {
+ 'comment': self.vars.comment
+ }
+ # if comment_visibility is specified restrict visibility
+ if self.vars.comment_visibility is not None:
+ data['visibility'] = self.vars.comment_visibility
+
+ # Use 'fields' to merge in any additional data
+ if self.vars.fields:
+ data.update(self.vars.fields)
+
+ url = self.vars.restbase + '/issue/' + self.vars.issue + '/worklog'
+ self.vars.meta = self.post(url, data)
+
+ @cause_changes(on_success=True)
+ def operation_edit(self):
+ data = {
+ 'fields': self.vars.fields
+ }
+ url = self.vars.restbase + '/issue/' + self.vars.issue
+ self.vars.meta = self.put(url, data)
+
+ @cause_changes(on_success=True)
+ def operation_update(self):
+ data = {
+ "update": self.vars.fields,
+ }
+ url = self.vars.restbase + '/issue/' + self.vars.issue
+ self.vars.meta = self.put(url, data)
+
+ def operation_fetch(self):
+ url = self.vars.restbase + '/issue/' + self.vars.issue
+ self.vars.meta = self.get(url)
+
+ def operation_search(self):
+ url = self.vars.restbase + '/search?jql=' + pathname2url(self.vars.jql)
+ if self.vars.fields:
+ fields = self.vars.fields.keys()
+ url = url + '&fields=' + '&fields='.join([pathname2url(f) for f in fields])
+ if self.vars.maxresults:
+ url = url + '&maxResults=' + str(self.vars.maxresults)
+
+ self.vars.meta = self.get(url)
+
+ @cause_changes(on_success=True)
+ def operation_transition(self):
+ # Find the transition id
+ turl = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions"
+ tmeta = self.get(turl)
+
+ target = self.vars.status
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+ else:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ fields = dict(self.vars.fields)
+ if self.vars.summary is not None:
+ fields.update({'summary': self.vars.summary})
+ if self.vars.description is not None:
+ fields.update({'description': self.vars.description})
+
+ # Perform it
+ data = {'transition': {"id": tid},
+ 'fields': fields}
+ if self.vars.comment is not None:
+ data.update({"update": {
+ "comment": [{
+ "add": {"body": self.vars.comment}
+ }],
+ }})
+ url = self.vars.restbase + '/issue/' + self.vars.issue + "/transitions"
+ self.vars.meta = self.post(url, data)
+
+ @cause_changes(on_success=True)
+ def operation_link(self):
+ data = {
+ 'type': {'name': self.vars.linktype},
+ 'inwardIssue': {'key': self.vars.inwardissue},
+ 'outwardIssue': {'key': self.vars.outwardissue},
+ }
+ url = self.vars.restbase + '/issueLink/'
+ self.vars.meta = self.post(url, data)
+
+ @cause_changes(on_success=True)
+ def operation_attach(self):
+ v = self.vars
+ filename = v.attachment.get('filename')
+ content = v.attachment.get('content')
+
+ if not any((filename, content)):
+ raise ValueError('at least one of filename or content must be provided')
+ mime = v.attachment.get('mimetype')
+
+ if not os.path.isfile(filename):
+ raise ValueError('The provided filename does not exist: %s' % filename)
+
+ content_type, data = self._prepare_attachment(filename, content, mime)
+
+ url = v.restbase + '/issue/' + v.issue + '/attachments'
+ return True, self.post(
+ url, data, content_type=content_type, additional_headers={"X-Atlassian-Token": "no-check"}
+ )
+
+ # Ideally we'd just use prepare_multipart from ansible.module_utils.urls, but
+ # unfortunately it does not support specifying the encoding and also defaults to
+ # base64. Jira doesn't support base64 encoded attachments (and is therefore not
+ # spec compliant. Go figure). I originally wrote this function as an almost
+ # exact copypasta of prepare_multipart, but ran into some encoding issues when
+ # using the noop encoder. Hand rolling the entire message body seemed to work
+ # out much better.
+ #
+ # https://community.atlassian.com/t5/Jira-questions/Jira-dosen-t-decode-base64-attachment-request-REST-API/qaq-p/916427
+ #
+ # content is expected to be a base64 encoded string since Ansible doesn't
+ # support passing raw bytes objects.
+ @staticmethod
+ def _prepare_attachment(filename, content=None, mime_type=None):
+ def escape_quotes(s):
+ return s.replace('"', '\\"')
+
+ boundary = "".join(random.choice(string.digits + string.ascii_letters) for dummy in range(30))
+ name = to_native(os.path.basename(filename))
+
+ if not mime_type:
+ try:
+ mime_type = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream'
+ except Exception:
+ mime_type = 'application/octet-stream'
+ main_type, sep, sub_type = mime_type.partition('/')
+
+ if not content and filename:
+ with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f:
+ content = f.read()
+ else:
+ try:
+ content = base64.b64decode(content)
+ except binascii.Error as e:
+ raise Exception("Unable to base64 decode file content: %s" % e)
+
+ lines = [
+ "--{0}".format(boundary),
+ 'Content-Disposition: form-data; name="file"; filename={0}'.format(escape_quotes(name)),
+ "Content-Type: {0}".format("{0}/{1}".format(main_type, sub_type)),
+ '',
+ to_text(content),
+ "--{0}--".format(boundary),
+ ""
+ ]
+
+ return (
+ "multipart/form-data; boundary={0}".format(boundary),
+ "\r\n".join(lines)
+ )
+
+ def request(
+ self,
+ url,
+ data=None,
+ method=None,
+ content_type='application/json',
+ additional_headers=None
+ ):
+ if data and content_type == 'application/json':
+ data = json.dumps(data)
+
+ headers = {}
+ if isinstance(additional_headers, dict):
+ headers = additional_headers.copy()
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+
+ if self.vars.token is not None:
+ headers.update({
+ "Content-Type": content_type,
+ "Authorization": "Bearer %s" % self.vars.token,
+ })
+ else:
+ auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(self.vars.username, self.vars.password),
+ errors='surrogate_or_strict')))
+ headers.update({
+ "Content-Type": content_type,
+ "Authorization": "Basic %s" % auth,
+ })
+
+ response, info = fetch_url(
+ self.module, url, data=data, method=method, timeout=self.vars.timeout, headers=headers
+ )
+
+ if info['status'] not in (200, 201, 204):
+ error = None
+ try:
+ error = json.loads(info['body'])
+ except Exception:
+ msg = 'The request "{method} {url}" returned the unexpected status code {status} {msg}\n{body}'.format(
+ status=info['status'],
+ msg=info['msg'],
+ body=info.get('body'),
+ url=url,
+ method=method,
+ )
+ self.module.fail_json(msg=to_native(msg), exception=traceback.format_exc())
+ if error:
+ msg = []
+ for key in ('errorMessages', 'errors'):
+ if error.get(key):
+ msg.append(to_native(error[key]))
+ if msg:
+ self.module.fail_json(msg=', '.join(msg))
+ self.module.fail_json(msg=to_native(error))
+ # Fallback print body, if it cant be decoded
+ self.module.fail_json(msg=to_native(info['body']))
+
+ body = response.read()
+
+ if body:
+ return json.loads(to_text(body, errors='surrogate_or_strict'))
+ return {}
+
+ def post(self, url, data, content_type='application/json', additional_headers=None):
+ return self.request(url, data=data, method='POST', content_type=content_type,
+ additional_headers=additional_headers)
+
+ def put(self, url, data):
+ return self.request(url, data=data, method='PUT')
+
+ def get(self, url):
+ return self.request(url)
+
+
+def main():
+ jira = JIRA()
+ jira.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/kdeconfig.py b/ansible_collections/community/general/plugins/modules/kdeconfig.py
new file mode 100644
index 000000000..42a08dd64
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/kdeconfig.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+
+# Copyright (c) 2023, Salvatore Mesoraca <s.mesoraca16@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: kdeconfig
+short_description: Manage KDE configuration files
+version_added: "6.5.0"
+description:
+ - Add or change individual settings in KDE configuration files.
+ - It uses B(kwriteconfig) under the hood.
+
+options:
+ path:
+ description:
+ - Path to the config file. If the file does not exist it will be created.
+ type: path
+ required: true
+ kwriteconfig_path:
+ description:
+ - Path to the kwriteconfig executable. If not specified, Ansible will try
+ to discover it.
+ type: path
+ values:
+ description:
+ - List of values to set.
+ type: list
+ elements: dict
+ suboptions:
+ group:
+ description:
+ - The option's group. One between this and I(groups) is required.
+ type: str
+ groups:
+ description:
+ - List of the option's groups. One between this and I(group) is required.
+ type: list
+ elements: str
+ key:
+ description:
+ - The option's name.
+ type: str
+ required: true
+ value:
+ description:
+ - The option's value. One between this and I(bool_value) is required.
+ type: str
+ bool_value:
+ description:
+ - Boolean value.
+ - One between this and I(value) is required.
+ type: bool
+ required: true
+ backup:
+ description:
+ - Create a backup file.
+ type: bool
+ default: false
+extends_documentation_fragment:
+ - files
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+requirements:
+ - kwriteconfig
+author:
+ - Salvatore Mesoraca (@smeso)
+'''
+
+EXAMPLES = r'''
+- name: Ensure "Homepage=https://www.ansible.com/" in group "Branding"
+ community.general.kdeconfig:
+ path: /etc/xdg/kickoffrc
+ values:
+ - group: Branding
+ key: Homepage
+ value: https://www.ansible.com/
+ mode: '0644'
+
+- name: Ensure "KEY=true" in groups "Group" and "Subgroup", and "KEY=VALUE" in Group2
+ community.general.kdeconfig:
+ path: /etc/xdg/someconfigrc
+ values:
+ - groups: [Group, Subgroup]
+ key: KEY
+ bool_value: true
+ - group: Group2
+ key: KEY
+ value: VALUE
+ backup: true
+'''
+
+RETURN = r''' # '''
+
+import os
+import shutil
+import tempfile
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+
+
+class TemporaryDirectory(object):
+ """Basic backport of tempfile.TemporaryDirectory"""
+
+ def __init__(self, suffix="", prefix="tmp", dir=None):
+ self.name = None
+ self.name = tempfile.mkdtemp(suffix, prefix, dir)
+
+ def __enter__(self):
+ return self.name
+
+ def rm(self):
+ if self.name:
+ shutil.rmtree(self.name, ignore_errors=True)
+ self.name = None
+
+ def __exit__(self, exc, value, tb):
+ self.rm()
+
+ def __del__(self):
+ self.rm()
+
+
+def run_kwriteconfig(module, cmd, path, groups, key, value):
+ """Invoke kwriteconfig with arguments"""
+ args = [cmd, '--file', path, '--key', key]
+ for group in groups:
+ args.extend(['--group', group])
+ if isinstance(value, bool):
+ args.extend(['--type', 'bool'])
+ if value:
+ args.append('true')
+ else:
+ args.append('false')
+ else:
+ args.append(value)
+ module.run_command(args, check_rc=True)
+
+
+def run_module(module, tmpdir, kwriteconfig):
+ result = dict(changed=False, msg='OK', path=module.params['path'])
+ b_path = to_bytes(module.params['path'])
+ tmpfile = os.path.join(tmpdir, 'file')
+ b_tmpfile = to_bytes(tmpfile)
+ diff = dict(
+ before='',
+ after='',
+ before_header=result['path'],
+ after_header=result['path'],
+ )
+ try:
+ with open(b_tmpfile, 'wb') as dst:
+ try:
+ with open(b_path, 'rb') as src:
+ b_data = src.read()
+ except IOError:
+ result['changed'] = True
+ else:
+ dst.write(b_data)
+ try:
+ diff['before'] = to_text(b_data)
+ except UnicodeError:
+ diff['before'] = repr(b_data)
+ except IOError:
+ module.fail_json(msg='Unable to create temporary file', traceback=traceback.format_exc())
+
+ for row in module.params['values']:
+ groups = row['groups']
+ if groups is None:
+ groups = [row['group']]
+ key = row['key']
+ value = row['bool_value']
+ if value is None:
+ value = row['value']
+ run_kwriteconfig(module, kwriteconfig, tmpfile, groups, key, value)
+
+ with open(b_tmpfile, 'rb') as tmpf:
+ b_data = tmpf.read()
+ try:
+ diff['after'] = to_text(b_data)
+ except UnicodeError:
+ diff['after'] = repr(b_data)
+
+ result['changed'] = result['changed'] or diff['after'] != diff['before']
+
+ file_args = module.load_file_common_arguments(module.params)
+
+ if module.check_mode:
+ if not result['changed']:
+ shutil.copystat(b_path, b_tmpfile)
+ uid, gid = module.user_and_group(b_path)
+ os.chown(b_tmpfile, uid, gid)
+ if module._diff:
+ diff = {}
+ else:
+ diff = None
+ result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
+ if module._diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+
+ if result['changed']:
+ if module.params['backup'] and os.path.exists(b_path):
+ result['backup_file'] = module.backup_local(result['path'])
+ try:
+ module.atomic_move(b_tmpfile, b_path)
+ except IOError:
+ module.ansible.fail_json(msg='Unable to move temporary file %s to %s, IOError' % (tmpfile, result['path']), traceback=traceback.format_exc())
+
+ if result['changed']:
+ module.set_fs_attributes_if_different(file_args, result['changed'])
+ else:
+ if module._diff:
+ diff = {}
+ else:
+ diff = None
+ result['changed'] = module.set_fs_attributes_if_different(file_args, result['changed'], diff=diff)
+ if module._diff:
+ result['diff'] = diff
+ module.exit_json(**result)
+
+
+def main():
+ single_value_arg = dict(group=dict(type='str'),
+ groups=dict(type='list', elements='str'),
+ key=dict(type='str', required=True, no_log=False),
+ value=dict(type='str'),
+ bool_value=dict(type='bool'))
+ required_alternatives = [('group', 'groups'), ('value', 'bool_value')]
+ module_args = dict(
+ values=dict(type='list',
+ elements='dict',
+ options=single_value_arg,
+ mutually_exclusive=required_alternatives,
+ required_one_of=required_alternatives,
+ required=True),
+ path=dict(type='path', required=True),
+ kwriteconfig_path=dict(type='path'),
+ backup=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ kwriteconfig = None
+ if module.params['kwriteconfig_path'] is not None:
+ kwriteconfig = module.get_bin_path(module.params['kwriteconfig_path'], required=True)
+ else:
+ for progname in ('kwriteconfig5', 'kwriteconfig', 'kwriteconfig4'):
+ kwriteconfig = module.get_bin_path(progname)
+ if kwriteconfig is not None:
+ break
+ if kwriteconfig is None:
+ module.fail_json(msg='kwriteconfig is not installed')
+ for v in module.params['values']:
+ if not v['key']:
+ module.fail_json(msg="'key' cannot be empty")
+ with TemporaryDirectory(dir=module.tmpdir) as tmpdir:
+ run_module(module, tmpdir, kwriteconfig)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/kernel_blacklist.py b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
new file mode 100644
index 000000000..1b40999ca
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/kernel_blacklist.py
@@ -0,0 +1,126 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Alexei Znamensky (@russoz) <russoz@gmail.com>
+# Copyright (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: kernel_blacklist
+author:
+ - Matthias Vogelgesang (@matze)
+short_description: Blacklist kernel modules
+description:
+ - Add or remove kernel modules from blacklist.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ name:
+ type: str
+ description:
+ - Name of kernel module to black- or whitelist.
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the module should be present in the blacklist or absent.
+ choices: [ absent, present ]
+ default: present
+ blacklist_file:
+ type: str
+ description:
+ - If specified, use this blacklist file instead of
+ C(/etc/modprobe.d/blacklist-ansible.conf).
+ default: /etc/modprobe.d/blacklist-ansible.conf
+'''
+
+EXAMPLES = '''
+- name: Blacklist the nouveau driver module
+ community.general.kernel_blacklist:
+ name: nouveau
+ state: present
+'''
+
+import os
+import re
+import tempfile
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+
+
+class Blacklist(StateModuleHelper):
+ output_params = ('name', 'state')
+ module = dict(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ blacklist_file=dict(type='str', default='/etc/modprobe.d/blacklist-ansible.conf'),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.pattern = re.compile(r'^blacklist\s+{0}$'.format(re.escape(self.vars.name)))
+ self.vars.filename = self.vars.blacklist_file
+ self.vars.set('file_exists', os.path.exists(self.vars.filename), output=False, change=True)
+ if not self.vars.file_exists:
+ with open(self.vars.filename, 'a'):
+ pass
+ self.vars.file_exists = True
+ self.vars.set('lines', [], change=True, diff=True)
+ else:
+ with open(self.vars.filename) as fd:
+ self.vars.set('lines', [x.rstrip() for x in fd.readlines()], change=True, diff=True)
+ self.vars.set('is_blacklisted', self._is_module_blocked(), change=True)
+
+ def _is_module_blocked(self):
+ for line in self.vars.lines:
+ stripped = line.strip()
+ if stripped.startswith('#'):
+ continue
+ if self.pattern.match(stripped):
+ return True
+ return False
+
+ def state_absent(self):
+ if not self.vars.is_blacklisted:
+ return
+ self.vars.is_blacklisted = False
+ self.vars.lines = [line for line in self.vars.lines if not self.pattern.match(line.strip())]
+
+ def state_present(self):
+ if self.vars.is_blacklisted:
+ return
+ self.vars.is_blacklisted = True
+ self.vars.lines = self.vars.lines + ['blacklist %s' % self.vars.name]
+
+ def __quit_module__(self):
+ if self.has_changed() and not self.module.check_mode:
+ dummy, tmpfile = tempfile.mkstemp()
+ try:
+ os.remove(tmpfile)
+ self.module.preserved_copy(self.vars.filename, tmpfile) # ensure right perms/ownership
+ with open(tmpfile, 'w') as fd:
+ fd.writelines(["{0}\n".format(x) for x in self.vars.lines])
+ self.module.atomic_move(tmpfile, self.vars.filename)
+ finally:
+ if os.path.exists(tmpfile):
+ os.remove(tmpfile)
+
+
+def main():
+ Blacklist.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authentication.py b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py
new file mode 100644
index 000000000..6143d9d5c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authentication.py
@@ -0,0 +1,483 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, INSPQ <philippe.gauthier@inspq.qc.ca>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_authentication
+
+short_description: Configure authentication in Keycloak
+
+description:
+ - This module actually can only make a copy of an existing authentication flow, add an execution to it and configure it.
+ - It can also delete the flow.
+
+version_added: "3.3.0"
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ realm:
+ description:
+ - The name of the realm in which is the authentication.
+ required: true
+ type: str
+ alias:
+ description:
+ - Alias for the authentication flow.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the flow.
+ type: str
+ providerId:
+ description:
+ - C(providerId) for the new flow when not copied from an existing flow.
+ type: str
+ copyFrom:
+ description:
+ - C(flowAlias) of the authentication flow to use for the copy.
+ type: str
+ authenticationExecutions:
+ description:
+ - Configuration structure for the executions.
+ type: list
+ elements: dict
+ suboptions:
+ providerId:
+ description:
+ - C(providerID) for the new flow when not copied from an existing flow.
+ type: str
+ displayName:
+ description:
+ - Name of the execution or subflow to create or update.
+ type: str
+ requirement:
+ description:
+ - Control status of the subflow or execution.
+ choices: [ "REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL" ]
+ type: str
+ flowAlias:
+ description:
+ - Alias of parent flow.
+ type: str
+ authenticationConfig:
+ description:
+ - Describe the config of the authentication.
+ type: dict
+ index:
+ description:
+ - Priority order of the execution.
+ type: int
+ subFlowType:
+ description:
+ - For new subflows, optionally specify the type.
+ - Is only used at creation.
+ choices: ["basic-flow", "form-flow"]
+ default: "basic-flow"
+ type: str
+ version_added: 6.6.0
+ state:
+ description:
+ - Control if the authentication flow must exists or not.
+ choices: [ "present", "absent" ]
+ default: present
+ type: str
+ force:
+ type: bool
+ default: false
+ description:
+ - If C(true), allows to remove the authentication flow and recreate it.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Philippe Gauthier (@elfelip)
+ - Gaëtan Daubresse (@Gaetan2907)
+'''
+
+EXAMPLES = '''
+ - name: Create an authentication flow from first broker login and add an execution to it.
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ copyFrom: "first broker login"
+ authenticationExecutions:
+ - providerId: "test-execution1"
+ requirement: "REQUIRED"
+ authenticationConfig:
+ alias: "test.execution1.property"
+ config:
+ test1.property: "value"
+ - providerId: "test-execution2"
+ requirement: "REQUIRED"
+ authenticationConfig:
+ alias: "test.execution2.property"
+ config:
+ test2.property: "value"
+ state: present
+
+ - name: Re-create the authentication flow
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ copyFrom: "first broker login"
+ authenticationExecutions:
+ - providerId: "test-provisioning"
+ requirement: "REQUIRED"
+ authenticationConfig:
+ alias: "test.provisioning.property"
+ config:
+ test.provisioning.property: "value"
+ state: present
+ force: true
+
+ - name: Create an authentication flow with subflow containing an execution.
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ copyFrom: "first broker login"
+ authenticationExecutions:
+ - providerId: "test-execution1"
+ requirement: "REQUIRED"
+ - displayName: "New Subflow"
+ requirement: "REQUIRED"
+ - providerId: "auth-cookie"
+ requirement: "REQUIRED"
+ flowAlias: "New Sublow"
+ state: present
+
+ - name: Remove authentication.
+ community.general.keycloak_authentication:
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: master
+ alias: "Copy of first broker login"
+ state: absent
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the authentication after module execution.
+ returned: on success
+ type: dict
+ sample: {
+ "alias": "Copy of first broker login",
+ "authenticationExecutions": [
+ {
+ "alias": "review profile config",
+ "authenticationConfig": {
+ "alias": "review profile config",
+ "config": { "update.profile.on.first.login": "missing" },
+ "id": "6f09e4fb-aad4-496a-b873-7fa9779df6d7"
+ },
+ "configurable": true,
+ "displayName": "Review Profile",
+ "id": "8f77dab8-2008-416f-989e-88b09ccf0b4c",
+ "index": 0,
+ "level": 0,
+ "providerId": "idp-review-profile",
+ "requirement": "REQUIRED",
+ "requirementChoices": [ "REQUIRED", "ALTERNATIVE", "DISABLED" ]
+ }
+ ],
+ "builtIn": false,
+ "description": "Actions taken after first broker login with identity provider account, which is not yet linked to any Keycloak account",
+ "id": "bc228863-5887-4297-b898-4d988f8eaa5c",
+ "providerId": "basic-flow",
+ "topLevel": true
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak \
+ import KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError, is_struct_included
+from ansible.module_utils.basic import AnsibleModule
+
+
+def find_exec_in_executions(searched_exec, executions):
+ """
+ Search if exec is contained in the executions.
+ :param searched_exec: Execution to search for.
+ :param executions: List of executions.
+ :return: Index of the execution, -1 if not found..
+ """
+ for i, existing_exec in enumerate(executions, start=0):
+ if ("providerId" in existing_exec and "providerId" in searched_exec and
+ existing_exec["providerId"] == searched_exec["providerId"] or
+ "displayName" in existing_exec and "displayName" in searched_exec and
+ existing_exec["displayName"] == searched_exec["displayName"]):
+ return i
+ return -1
+
+
+def create_or_update_executions(kc, config, realm='master'):
+ """
+ Create or update executions for an authentication flow.
+ :param kc: Keycloak API access.
+ :param config: Representation of the authentication flow including it's executions.
+ :param realm: Realm
+ :return: tuple (changed, dict(before, after)
+ WHERE
+ bool changed indicates if changes have been made
+ dict(str, str) shows state before and after creation/update
+ """
+ try:
+ changed = False
+ after = ""
+ before = ""
+ if "authenticationExecutions" in config:
+ # Get existing executions on the Keycloak server for this alias
+ existing_executions = kc.get_executions_representation(config, realm=realm)
+ for new_exec_index, new_exec in enumerate(config["authenticationExecutions"], start=0):
+ if new_exec["index"] is not None:
+ new_exec_index = new_exec["index"]
+ exec_found = False
+ # Get flowalias parent if given
+ if new_exec["flowAlias"] is not None:
+ flow_alias_parent = new_exec["flowAlias"]
+ else:
+ flow_alias_parent = config["alias"]
+ # Check if same providerId or displayName name between existing and new execution
+ exec_index = find_exec_in_executions(new_exec, existing_executions)
+ if exec_index != -1:
+ # Remove key that doesn't need to be compared with existing_exec
+ exclude_key = ["flowAlias", "subFlowType"]
+ for index_key, key in enumerate(new_exec, start=0):
+ if new_exec[key] is None:
+ exclude_key.append(key)
+ # Compare the executions to see if it need changes
+ if not is_struct_included(new_exec, existing_executions[exec_index], exclude_key) or exec_index != new_exec_index:
+ exec_found = True
+ before += str(existing_executions[exec_index]) + '\n'
+ id_to_update = existing_executions[exec_index]["id"]
+ # Remove exec from list in case 2 exec with same name
+ existing_executions[exec_index].clear()
+ elif new_exec["providerId"] is not None:
+ kc.create_execution(new_exec, flowAlias=flow_alias_parent, realm=realm)
+ exec_found = True
+ exec_index = new_exec_index
+ id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
+ after += str(new_exec) + '\n'
+ elif new_exec["displayName"] is not None:
+ kc.create_subflow(new_exec["displayName"], flow_alias_parent, realm=realm, flowType=new_exec["subFlowType"])
+ exec_found = True
+ exec_index = new_exec_index
+ id_to_update = kc.get_executions_representation(config, realm=realm)[exec_index]["id"]
+ after += str(new_exec) + '\n'
+ if exec_found:
+ changed = True
+ if exec_index != -1:
+ # Update the existing execution
+ updated_exec = {
+ "id": id_to_update
+ }
+ # add the execution configuration
+ if new_exec["authenticationConfig"] is not None:
+ kc.add_authenticationConfig_to_execution(updated_exec["id"], new_exec["authenticationConfig"], realm=realm)
+ for key in new_exec:
+ # remove unwanted key for the next API call
+ if key not in ("flowAlias", "authenticationConfig", "subFlowType"):
+ updated_exec[key] = new_exec[key]
+ if new_exec["requirement"] is not None:
+ kc.update_authentication_executions(flow_alias_parent, updated_exec, realm=realm)
+ diff = exec_index - new_exec_index
+ kc.change_execution_priority(updated_exec["id"], diff, realm=realm)
+ after += str(kc.get_executions_representation(config, realm=realm)[new_exec_index]) + '\n'
+ return changed, dict(before=before, after=after)
+ except Exception as e:
+ kc.module.fail_json(msg='Could not create or update executions for authentication flow %s in realm %s: %s'
+ % (config["alias"], realm, str(e)))
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ realm=dict(type='str', required=True),
+ alias=dict(type='str', required=True),
+ providerId=dict(type='str'),
+ description=dict(type='str'),
+ copyFrom=dict(type='str'),
+ authenticationExecutions=dict(type='list', elements='dict',
+ options=dict(
+ providerId=dict(type='str'),
+ displayName=dict(type='str'),
+ requirement=dict(choices=["REQUIRED", "ALTERNATIVE", "DISABLED", "CONDITIONAL"], type='str'),
+ flowAlias=dict(type='str'),
+ authenticationConfig=dict(type='dict'),
+ index=dict(type='int'),
+ subFlowType=dict(choices=["basic-flow", "form-flow"], default='basic-flow', type='str'),
+ )),
+ state=dict(choices=["absent", "present"], default='present'),
+ force=dict(type='bool', default=False),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']])
+ )
+
+ result = dict(changed=False, msg='', flow={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ force = module.params.get('force')
+
+ new_auth_repr = {
+ "alias": module.params.get("alias"),
+ "copyFrom": module.params.get("copyFrom"),
+ "providerId": module.params.get("providerId"),
+ "authenticationExecutions": module.params.get("authenticationExecutions"),
+ "description": module.params.get("description"),
+ "builtIn": module.params.get("builtIn"),
+ "subflow": module.params.get("subflow"),
+ }
+
+ auth_repr = kc.get_authentication_flow_by_alias(alias=new_auth_repr["alias"], realm=realm)
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not auth_repr:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = new_auth_repr["alias"] + ' absent'
+ module.exit_json(**result)
+
+ elif state == 'present':
+ # Process a creation
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before='', after=new_auth_repr)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # If copyFrom is defined, create authentication flow from a copy
+ if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None:
+ auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm)
+ else: # Create an empty authentication flow
+ auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm)
+
+ # If the authentication still not exist on the server, raise an exception.
+ if auth_repr is None:
+ result['msg'] = "Authentication just created not found: " + str(new_auth_repr)
+ module.fail_json(**result)
+
+ # Configure the executions for the flow
+ create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm)
+
+ # Get executions created
+ exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm)
+ if exec_repr is not None:
+ auth_repr["authenticationExecutions"] = exec_repr
+ result['end_state'] = auth_repr
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ if force: # If force option is true
+ # Delete the actual authentication flow
+ result['changed'] = True
+ if module._diff:
+ result['diff'] = dict(before=auth_repr, after=new_auth_repr)
+ if module.check_mode:
+ module.exit_json(**result)
+ kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm)
+ # If copyFrom is defined, create authentication flow from a copy
+ if "copyFrom" in new_auth_repr and new_auth_repr["copyFrom"] is not None:
+ auth_repr = kc.copy_auth_flow(config=new_auth_repr, realm=realm)
+ else: # Create an empty authentication flow
+ auth_repr = kc.create_empty_auth_flow(config=new_auth_repr, realm=realm)
+ # If the authentication still not exist on the server, raise an exception.
+ if auth_repr is None:
+ result['msg'] = "Authentication just created not found: " + str(new_auth_repr)
+ module.fail_json(**result)
+ # Configure the executions for the flow
+
+ if module.check_mode:
+ module.exit_json(**result)
+ changed, diff = create_or_update_executions(kc=kc, config=new_auth_repr, realm=realm)
+ result['changed'] |= changed
+
+ if module._diff:
+ result['diff'] = diff
+
+ # Get executions created
+ exec_repr = kc.get_executions_representation(config=new_auth_repr, realm=realm)
+ if exec_repr is not None:
+ auth_repr["authenticationExecutions"] = exec_repr
+ result['end_state'] = auth_repr
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=auth_repr, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ kc.delete_authentication_flow_by_id(id=auth_repr["id"], realm=realm)
+
+ result['msg'] = 'Authentication flow: {alias} id: {id} is deleted'.format(alias=new_auth_repr['alias'],
+ id=auth_repr["id"])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py b/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py
new file mode 100644
index 000000000..c451d3751
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_authz_authorization_scope.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_authz_authorization_scope
+
+short_description: Allows administration of Keycloak client authorization scopes via Keycloak API
+
+version_added: 6.6.0
+
+description:
+ - This module allows the administration of Keycloak client Authorization Scopes via the Keycloak REST
+ API. Authorization Scopes are only available if a client has Authorization enabled.
+
+ - This module requires access to the REST API via OpenID Connect; the user connecting and the realm
+ being used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase options used by Keycloak.
+ The Authorization Services paths and payloads have not officially been documented by the Keycloak project.
+ U(https://www.puppeteers.net/blog/keycloak-authorization-services-rest-api-paths-and-payload/)
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the authorization scope.
+ - On C(present), the authorization scope will be created (or updated if it exists already).
+ - On C(absent), the authorization scope will be removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ name:
+ description:
+ - Name of the authorization scope to create.
+ type: str
+ required: true
+ display_name:
+ description:
+ - The display name of the authorization scope.
+ type: str
+ required: false
+ icon_uri:
+ description:
+ - The icon URI for the authorization scope.
+ type: str
+ required: false
+ client_id:
+ description:
+ - The C(clientId) of the Keycloak client that should have the authorization scope.
+ - This is usually a human-readable name of the Keycloak client.
+ type: str
+ required: true
+ realm:
+ description:
+ - The name of the Keycloak realm the Keycloak client is in.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Samuli Seppänen (@mattock)
+'''
+
+EXAMPLES = '''
+- name: Manage Keycloak file:delete authorization scope
+ keycloak_authz_authorization_scope:
+ name: file:delete
+ state: present
+ display_name: File delete
+ client_id: myclient
+ realm: myrealm
+ auth_keycloak_url: http://localhost:8080/auth
+ auth_username: keycloak
+ auth_password: keycloak
+ auth_realm: master
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the authorization scope after module execution.
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: ID of the authorization scope.
+ type: str
+ returned: when I(state=present)
+ sample: a6ab1cf2-1001-40ec-9f39-48f23b6a0a41
+ name:
+ description: Name of the authorization scope.
+ type: str
+ returned: when I(state=present)
+ sample: file:delete
+ display_name:
+ description: Display name of the authorization scope.
+ type: str
+ returned: when I(state=present)
+ sample: File delete
+ icon_uri:
+ description: Icon URI for the authorization scope.
+ type: str
+ returned: when I(state=present)
+ sample: http://localhost/icon.png
+
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ display_name=dict(type='str', required=False),
+ icon_uri=dict(type='str', required=False),
+ client_id=dict(type='str', required=True),
+ realm=dict(type='str', required=True)
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=(
+ [['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', end_state={}, diff=dict(before={}, after={}))
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ # Convenience variables
+ state = module.params.get('state')
+ name = module.params.get('name')
+ display_name = module.params.get('display_name')
+ icon_uri = module.params.get('icon_uri')
+ client_id = module.params.get('client_id')
+ realm = module.params.get('realm')
+
+ # Get the "id" of the client based on the usually more human-readable
+ # "clientId"
+ cid = kc.get_client_id(client_id, realm=realm)
+ if not cid:
+ module.fail_json(msg='Invalid client %s for realm %s' %
+ (client_id, realm))
+
+ # Get current state of the Authorization Scope using its name as the search
+ # filter. This returns False if it is not found.
+ before_authz_scope = kc.get_authz_authorization_scope_by_name(
+ name=name, client_id=cid, realm=realm)
+
+ # Generate a JSON payload for Keycloak Admin API. This is needed for
+ # "create" and "update" operations.
+ desired_authz_scope = {}
+ desired_authz_scope['name'] = name
+ desired_authz_scope['displayName'] = display_name
+ desired_authz_scope['iconUri'] = icon_uri
+
+ # Add "id" to payload for modify operations
+ if before_authz_scope:
+ desired_authz_scope['id'] = before_authz_scope['id']
+
+ # Ensure that undefined (null) optional parameters are presented as empty
+ # strings in the desired state. This makes comparisons with current state
+ # much easier.
+ for k, v in desired_authz_scope.items():
+ if not v:
+ desired_authz_scope[k] = ''
+
+ # Do the above for the current state
+ if before_authz_scope:
+ for k in ['displayName', 'iconUri']:
+ if k not in before_authz_scope:
+ before_authz_scope[k] = ''
+
+ if before_authz_scope and state == 'present':
+ changes = False
+ for k, v in desired_authz_scope.items():
+ if before_authz_scope[k] != v:
+ changes = True
+ # At this point we know we have to update the object anyways,
+ # so there's no need to do more work.
+ break
+
+ if changes:
+ if module._diff:
+ result['diff'] = dict(before=before_authz_scope, after=desired_authz_scope)
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = 'Authorization scope would be updated'
+ module.exit_json(**result)
+ else:
+ kc.update_authz_authorization_scope(
+ payload=desired_authz_scope, id=before_authz_scope['id'], client_id=cid, realm=realm)
+ result['changed'] = True
+ result['msg'] = 'Authorization scope updated'
+ else:
+ result['changed'] = False
+ result['msg'] = 'Authorization scope not updated'
+
+ result['end_state'] = desired_authz_scope
+ elif not before_authz_scope and state == 'present':
+ if module._diff:
+ result['diff'] = dict(before={}, after=desired_authz_scope)
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = 'Authorization scope would be created'
+ module.exit_json(**result)
+ else:
+ kc.create_authz_authorization_scope(
+ payload=desired_authz_scope, client_id=cid, realm=realm)
+ result['changed'] = True
+ result['msg'] = 'Authorization scope created'
+ result['end_state'] = desired_authz_scope
+ elif before_authz_scope and state == 'absent':
+ if module._diff:
+ result['diff'] = dict(before=before_authz_scope, after={})
+
+ if module.check_mode:
+ result['changed'] = True
+ result['msg'] = 'Authorization scope would be removed'
+ module.exit_json(**result)
+ else:
+ kc.remove_authz_authorization_scope(
+ id=before_authz_scope['id'], client_id=cid, realm=realm)
+ result['changed'] = True
+ result['msg'] = 'Authorization scope removed'
+ elif not before_authz_scope and state == 'absent':
+ result['changed'] = False
+ else:
+ module.fail_json(msg='Unable to determine what to do with authorization scope %s of client %s in realm %s' % (
+ name, client_id, realm))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client.py b/ansible_collections/community/general/plugins/modules/keycloak_client.py
new file mode 100644
index 000000000..ee687fcb4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_client.py
@@ -0,0 +1,984 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_client
+
+short_description: Allows administration of Keycloak clients via Keycloak API
+
+
+description:
+ - This module allows the administration of Keycloak clients via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - The Keycloak API does not always sanity check inputs e.g. you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the client
+ - On C(present), the client will be created (or updated if it exists already).
+ - On C(absent), the client will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ realm:
+ description:
+ - The realm to create the client in.
+ type: str
+ default: master
+
+ client_id:
+ description:
+ - Client id of client to be worked on. This is usually an alphanumeric name chosen by
+ you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
+ This is 'clientId' in the Keycloak REST API.
+ aliases:
+ - clientId
+ type: str
+
+ id:
+ description:
+ - Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
+ is required. If you specify both, this takes precedence.
+ type: str
+
+ name:
+ description:
+ - Name of the client (this is not the same as I(client_id)).
+ type: str
+
+ description:
+ description:
+ - Description of the client in Keycloak.
+ type: str
+
+ root_url:
+ description:
+ - Root URL appended to relative URLs for this client.
+ This is 'rootUrl' in the Keycloak REST API.
+ aliases:
+ - rootUrl
+ type: str
+
+ admin_url:
+ description:
+ - URL to the admin interface of the client.
+ This is 'adminUrl' in the Keycloak REST API.
+ aliases:
+ - adminUrl
+ type: str
+
+ base_url:
+ description:
+ - Default URL to use when the auth server needs to redirect or link back to the client
+ This is 'baseUrl' in the Keycloak REST API.
+ aliases:
+ - baseUrl
+ type: str
+
+ enabled:
+ description:
+ - Is this client enabled or not?
+ type: bool
+
+ client_authenticator_type:
+ description:
+ - How do clients authenticate with the auth server? Either C(client-secret) or
+ C(client-jwt) can be chosen. When using C(client-secret), the module parameter
+ I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
+ C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
+ to configure its behavior.
+ This is 'clientAuthenticatorType' in the Keycloak REST API.
+ choices: ['client-secret', 'client-jwt']
+ aliases:
+ - clientAuthenticatorType
+ type: str
+
+ secret:
+ description:
+ - When using I(client_authenticator_type) C(client-secret) (the default), you can
+ specify a secret here (otherwise one will be generated if it does not exit). If
+ changing this secret, the module will not register a change currently (but the
+ changed secret will be saved).
+ type: str
+
+ registration_access_token:
+ description:
+ - The registration access token provides access for clients to the client registration
+ service.
+ This is 'registrationAccessToken' in the Keycloak REST API.
+ aliases:
+ - registrationAccessToken
+ type: str
+
+ default_roles:
+ description:
+ - list of default roles for this client. If the client roles referenced do not exist
+ yet, they will be created.
+ This is 'defaultRoles' in the Keycloak REST API.
+ aliases:
+ - defaultRoles
+ type: list
+ elements: str
+
+ redirect_uris:
+ description:
+ - Acceptable redirect URIs for this client.
+ This is 'redirectUris' in the Keycloak REST API.
+ aliases:
+ - redirectUris
+ type: list
+ elements: str
+
+ web_origins:
+ description:
+ - List of allowed CORS origins.
+ This is 'webOrigins' in the Keycloak REST API.
+ aliases:
+ - webOrigins
+ type: list
+ elements: str
+
+ not_before:
+ description:
+ - Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
+ This is 'notBefore' in the Keycloak REST API.
+ type: int
+ aliases:
+ - notBefore
+
+ bearer_only:
+ description:
+ - The access type of this client is bearer-only.
+ This is 'bearerOnly' in the Keycloak REST API.
+ aliases:
+ - bearerOnly
+ type: bool
+
+ consent_required:
+ description:
+ - If enabled, users have to consent to client access.
+ This is 'consentRequired' in the Keycloak REST API.
+ aliases:
+ - consentRequired
+ type: bool
+
+ standard_flow_enabled:
+ description:
+ - Enable standard flow for this client or not (OpenID connect).
+ This is 'standardFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - standardFlowEnabled
+ type: bool
+
+ implicit_flow_enabled:
+ description:
+ - Enable implicit flow for this client or not (OpenID connect).
+ This is 'implicitFlowEnabled' in the Keycloak REST API.
+ aliases:
+ - implicitFlowEnabled
+ type: bool
+
+ direct_access_grants_enabled:
+ description:
+ - Are direct access grants enabled for this client or not (OpenID connect).
+ This is 'directAccessGrantsEnabled' in the Keycloak REST API.
+ aliases:
+ - directAccessGrantsEnabled
+ type: bool
+
+ service_accounts_enabled:
+ description:
+ - Are service accounts enabled for this client or not (OpenID connect).
+ This is 'serviceAccountsEnabled' in the Keycloak REST API.
+ aliases:
+ - serviceAccountsEnabled
+ type: bool
+
+ authorization_services_enabled:
+ description:
+ - Are authorization services enabled for this client or not (OpenID connect).
+ This is 'authorizationServicesEnabled' in the Keycloak REST API.
+ aliases:
+ - authorizationServicesEnabled
+ type: bool
+
+ public_client:
+ description:
+ - Is the access type for this client public or not.
+ This is 'publicClient' in the Keycloak REST API.
+ aliases:
+ - publicClient
+ type: bool
+
+ frontchannel_logout:
+ description:
+ - Is frontchannel logout enabled for this client or not.
+ This is 'frontchannelLogout' in the Keycloak REST API.
+ aliases:
+ - frontchannelLogout
+ type: bool
+
+ protocol:
+ description:
+ - Type of client (either C(openid-connect) or C(saml).
+ type: str
+ choices: ['openid-connect', 'saml']
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ aliases:
+ - fullScopeAllowed
+ type: bool
+
+ node_re_registration_timeout:
+ description:
+ - Cluster node re-registration timeout for this client.
+ This is 'nodeReRegistrationTimeout' in the Keycloak REST API.
+ type: int
+ aliases:
+ - nodeReRegistrationTimeout
+
+ registered_nodes:
+ description:
+ - dict of registered cluster nodes (with C(nodename) as the key and last registration
+ time as the value).
+ This is 'registeredNodes' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - registeredNodes
+
+ client_template:
+ description:
+ - Client template to use for this client. If it does not exist this field will silently
+ be dropped.
+ This is 'clientTemplate' in the Keycloak REST API.
+ type: str
+ aliases:
+ - clientTemplate
+
+ use_template_config:
+ description:
+ - Whether or not to use configuration from the I(client_template).
+ This is 'useTemplateConfig' in the Keycloak REST API.
+ aliases:
+ - useTemplateConfig
+ type: bool
+
+ use_template_scope:
+ description:
+ - Whether or not to use scope configuration from the I(client_template).
+ This is 'useTemplateScope' in the Keycloak REST API.
+ aliases:
+ - useTemplateScope
+ type: bool
+
+ use_template_mappers:
+ description:
+ - Whether or not to use mapper configuration from the I(client_template).
+ This is 'useTemplateMappers' in the Keycloak REST API.
+ aliases:
+ - useTemplateMappers
+ type: bool
+
+ always_display_in_console:
+ description:
+ - Whether or not to display this client in account console, even if the
+ user does not have an active session.
+ aliases:
+ - alwaysDisplayInConsole
+ type: bool
+ version_added: 4.7.0
+
+ surrogate_auth_required:
+ description:
+ - Whether or not surrogate auth is required.
+ This is 'surrogateAuthRequired' in the Keycloak REST API.
+ aliases:
+ - surrogateAuthRequired
+ type: bool
+
+ authorization_settings:
+ description:
+ - a data structure defining the authorization settings for this client. For reference,
+ please see the Keycloak API docs at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html#_resourceserverrepresentation).
+ This is 'authorizationSettings' in the Keycloak REST API.
+ type: dict
+ aliases:
+ - authorizationSettings
+
+ authentication_flow_binding_overrides:
+ description:
+ - Override realm authentication flow bindings.
+ type: dict
+ aliases:
+ - authenticationFlowBindingOverrides
+ version_added: 3.4.0
+
+ default_client_scopes:
+ description:
+ - List of default client scopes.
+ aliases:
+ - defaultClientScopes
+ type: list
+ elements: str
+ version_added: 4.7.0
+
+ optional_client_scopes:
+ description:
+ - List of optional client scopes.
+ aliases:
+ - optionalClientScopes
+ type: list
+ elements: str
+ version_added: 4.7.0
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client.
+ This is 'protocolMappers' in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper.
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the I(existing) field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client. This can contain various configuration
+ settings; an example is given in the examples section. While an exhaustive list of
+ permissible options is not available; possible options as of Keycloak 3.4 are listed below. The Keycloak
+ API does not validate whether a given option is appropriate for the protocol used; if specified
+ anyway, Keycloak will simply not use it.
+ type: dict
+ suboptions:
+ saml.authnstatement:
+ description:
+ - For SAML clients, boolean specifying whether or not a statement containing method and timestamp
+ should be included in the login response.
+
+ saml.client.signature:
+ description:
+ - For SAML clients, boolean specifying whether a client signature is required and validated.
+
+ saml.encrypt:
+ description:
+ - Boolean specifying whether SAML assertions should be encrypted with the client's public key.
+
+ saml.force.post.binding:
+ description:
+ - For SAML clients, boolean specifying whether always to use POST binding for responses.
+
+ saml.onetimeuse.condition:
+ description:
+ - For SAML clients, boolean specifying whether a OneTimeUse condition should be included in login responses.
+
+ saml.server.signature:
+ description:
+ - Boolean specifying whether SAML documents should be signed by the realm.
+
+ saml.server.signature.keyinfo.ext:
+ description:
+ - For SAML clients, boolean specifying whether REDIRECT signing key lookup should be optimized through inclusion
+ of the signing key id in the SAML Extensions element.
+
+ saml.signature.algorithm:
+ description:
+ - Signature algorithm used to sign SAML documents. One of C(RSA_SHA256), C(RSA_SHA1), C(RSA_SHA512), or C(DSA_SHA1).
+
+ saml.signing.certificate:
+ description:
+ - SAML signing key certificate, base64-encoded.
+
+ saml.signing.private.key:
+ description:
+ - SAML signing key private key, base64-encoded.
+
+ saml_assertion_consumer_url_post:
+ description:
+ - SAML POST Binding URL for the client's assertion consumer service (login responses).
+
+ saml_assertion_consumer_url_redirect:
+ description:
+ - SAML Redirect Binding URL for the client's assertion consumer service (login responses).
+
+
+ saml_force_name_id_format:
+ description:
+ - For SAML clients, Boolean specifying whether to ignore requested NameID subject format and using the configured one instead.
+
+ saml_name_id_format:
+ description:
+ - For SAML clients, the NameID format to use (one of C(username), C(email), C(transient), or C(persistent))
+
+ saml_signature_canonicalization_method:
+ description:
+ - SAML signature canonicalization method. This is one of four values, namely
+ C(http://www.w3.org/2001/10/xml-exc-c14n#) for EXCLUSIVE,
+ C(http://www.w3.org/2001/10/xml-exc-c14n#WithComments) for EXCLUSIVE_WITH_COMMENTS,
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315) for INCLUSIVE, and
+ C(http://www.w3.org/TR/2001/REC-xml-c14n-20010315#WithComments) for INCLUSIVE_WITH_COMMENTS.
+
+ saml_single_logout_service_url_post:
+ description:
+ - SAML POST binding url for the client's single logout service.
+
+ saml_single_logout_service_url_redirect:
+ description:
+ - SAML redirect binding url for the client's single logout service.
+
+ user.info.response.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm for signed UserInfo-endpoint responses. One of C(RS256) or C(unsigned).
+
+ request.object.signature.alg:
+ description:
+ - For OpenID-Connect clients, JWA algorithm which the client needs to use when sending
+ OIDC request object. One of C(any), C(none), C(RS256).
+
+ use.jwks.url:
+ description:
+ - For OpenID-Connect clients, boolean specifying whether to use a JWKS URL to obtain client
+ public keys.
+
+ jwks.url:
+ description:
+ - For OpenID-Connect clients, URL where client keys in JWK are stored.
+
+ jwt.credential.certificate:
+ description:
+ - For OpenID-Connect clients, client certificate for validating JWT issued by
+ client and signed by its key, base64-encoded.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client (minimal example), authentication with credentials
+ community.general.keycloak_client:
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: present
+ delegate_to: localhost
+
+
+- name: Create or update Keycloak client (minimal example), authentication with token
+ community.general.keycloak_client:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ token: TOKEN
+ client_id: test
+ state: present
+ delegate_to: localhost
+
+
+- name: Delete a Keycloak client
+ community.general.keycloak_client:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ client_id: test
+ state: absent
+ delegate_to: localhost
+
+
+- name: Create or update a Keycloak client (with all the bells and whistles)
+ community.general.keycloak_client:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ realm: master
+ client_id: test
+ id: d8b127a3-31f6-44c8-a7e4-4ab9a3e78d95
+ name: this_is_a_test
+ description: Description of this wonderful client
+ root_url: https://www.example.com/
+ admin_url: https://www.example.com/admin_url
+ base_url: basepath
+ enabled: true
+ client_authenticator_type: client-secret
+ secret: REALLYWELLKEPTSECRET
+ redirect_uris:
+ - https://www.example.com/*
+ - http://localhost:8888/
+ web_origins:
+ - https://www.example.com/*
+ not_before: 1507825725
+ bearer_only: false
+ consent_required: false
+ standard_flow_enabled: true
+ implicit_flow_enabled: false
+ direct_access_grants_enabled: false
+ service_accounts_enabled: false
+ authorization_services_enabled: false
+ public_client: false
+ frontchannel_logout: false
+ protocol: openid-connect
+ full_scope_allowed: false
+ node_re_registration_timeout: -1
+ client_template: test
+ use_template_config: false
+ use_template_scope: false
+ use_template_mappers: false
+ always_display_in_console: true
+ registered_nodes:
+ node01.example.com: 1507828202
+ registration_access_token: eyJWT_TOKEN
+ surrogate_auth_required: false
+ default_roles:
+ - test01
+ - test02
+ authentication_flow_binding_overrides:
+ browser: 4c90336b-bf1d-4b87-916d-3677ba4e5fbb
+ protocol_mappers:
+ - config:
+ access.token.claim: true
+ claim.name: "family_name"
+ id.token.claim: true
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: true
+ consentRequired: true
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ - config:
+ attribute.name: Role
+ attribute.nameformat: Basic
+ single: false
+ consentRequired: false
+ name: role list
+ protocol: saml
+ protocolMapper: saml-role-list-mapper
+ attributes:
+ saml.authnstatement: true
+ saml.client.signature: true
+ saml.force.post.binding: true
+ saml.server.signature: true
+ saml.signature.algorithm: RSA_SHA256
+ saml.signing.certificate: CERTIFICATEHERE
+ saml.signing.private.key: PRIVATEKEYHERE
+ saml_force_name_id_format: false
+ saml_name_id_format: username
+ saml_signature_canonicalization_method: "http://www.w3.org/2001/10/xml-exc-c14n#"
+ user.info.response.signature.alg: RS256
+ request.object.signature.alg: RS256
+ use.jwks.url: true
+ jwks.url: JWKS_URL_FOR_CLIENT_AUTH_JWT
+ jwt.credential.certificate: JWT_CREDENTIAL_CERTIFICATE_FOR_CLIENT_AUTH
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client testclient has been updated"
+
+proposed:
+ description: Representation of proposed client.
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+
+existing:
+ description: Representation of existing client (sample is truncated).
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+
+end_state:
+ description: Representation of client after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+import copy
+
+
+def normalise_cr(clientrep, remove_ids=False):
+ """ Re-sorts any properties where the order so that diff's is minimised, and adds default values where appropriate so that the
+ the change detection is more effective.
+
+ :param clientrep: the clientrep dict to be sanitized
+ :param remove_ids: If set to true, then the unique ID's of objects is removed to make the diff and checks for changed
+ not alert when the ID's of objects are not usually known, (e.g. for protocol_mappers)
+ :return: normalised clientrep dict
+ """
+ # Avoid the dict passed in to be modified
+ clientrep = clientrep.copy()
+
+ if 'attributes' in clientrep:
+ clientrep['attributes'] = list(sorted(clientrep['attributes']))
+
+ if 'redirectUris' in clientrep:
+ clientrep['redirectUris'] = list(sorted(clientrep['redirectUris']))
+
+ if 'protocolMappers' in clientrep:
+ clientrep['protocolMappers'] = sorted(clientrep['protocolMappers'], key=lambda x: (x.get('name'), x.get('protocol'), x.get('protocolMapper')))
+ for mapper in clientrep['protocolMappers']:
+ if remove_ids:
+ mapper.pop('id', None)
+
+ # Set to a default value.
+ mapper['consentRequired'] = mapper.get('consentRequired', False)
+
+ return clientrep
+
+
+def sanitize_cr(clientrep):
+ """ Removes probably sensitive details from a client representation.
+
+ :param clientrep: the clientrep dict to be sanitized
+ :return: sanitized clientrep dict
+ """
+ result = copy.deepcopy(clientrep)
+ if 'secret' in result:
+ result['secret'] = 'no_log'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes']['saml.signing.private.key'] = 'no_log'
+ return normalise_cr(result)
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+
+ id=dict(type='str'),
+ client_id=dict(type='str', aliases=['clientId']),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ root_url=dict(type='str', aliases=['rootUrl']),
+ admin_url=dict(type='str', aliases=['adminUrl']),
+ base_url=dict(type='str', aliases=['baseUrl']),
+ surrogate_auth_required=dict(type='bool', aliases=['surrogateAuthRequired']),
+ enabled=dict(type='bool'),
+ client_authenticator_type=dict(type='str', choices=['client-secret', 'client-jwt'], aliases=['clientAuthenticatorType']),
+ secret=dict(type='str', no_log=True),
+ registration_access_token=dict(type='str', aliases=['registrationAccessToken'], no_log=True),
+ default_roles=dict(type='list', elements='str', aliases=['defaultRoles']),
+ redirect_uris=dict(type='list', elements='str', aliases=['redirectUris']),
+ web_origins=dict(type='list', elements='str', aliases=['webOrigins']),
+ not_before=dict(type='int', aliases=['notBefore']),
+ bearer_only=dict(type='bool', aliases=['bearerOnly']),
+ consent_required=dict(type='bool', aliases=['consentRequired']),
+ standard_flow_enabled=dict(type='bool', aliases=['standardFlowEnabled']),
+ implicit_flow_enabled=dict(type='bool', aliases=['implicitFlowEnabled']),
+ direct_access_grants_enabled=dict(type='bool', aliases=['directAccessGrantsEnabled']),
+ service_accounts_enabled=dict(type='bool', aliases=['serviceAccountsEnabled']),
+ authorization_services_enabled=dict(type='bool', aliases=['authorizationServicesEnabled']),
+ public_client=dict(type='bool', aliases=['publicClient']),
+ frontchannel_logout=dict(type='bool', aliases=['frontchannelLogout']),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool', aliases=['fullScopeAllowed']),
+ node_re_registration_timeout=dict(type='int', aliases=['nodeReRegistrationTimeout']),
+ registered_nodes=dict(type='dict', aliases=['registeredNodes']),
+ client_template=dict(type='str', aliases=['clientTemplate']),
+ use_template_config=dict(type='bool', aliases=['useTemplateConfig']),
+ use_template_scope=dict(type='bool', aliases=['useTemplateScope']),
+ use_template_mappers=dict(type='bool', aliases=['useTemplateMappers']),
+ always_display_in_console=dict(type='bool', aliases=['alwaysDisplayInConsole']),
+ authentication_flow_binding_overrides=dict(type='dict', aliases=['authenticationFlowBindingOverrides']),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
+ authorization_settings=dict(type='dict', aliases=['authorizationSettings']),
+ default_client_scopes=dict(type='list', elements='str', aliases=['defaultClientScopes']),
+ optional_client_scopes=dict(type='list', elements='str', aliases=['optionalClientScopes']),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['client_id', 'id'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ cid = module.params.get('id')
+ state = module.params.get('state')
+
+ # Filter and map the parameters names that apply to the client
+ client_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ if cid is None:
+ before_client = kc.get_client_by_clientid(module.params.get('client_id'), realm=realm)
+ if before_client is not None:
+ cid = before_client['id']
+ else:
+ before_client = kc.get_client_by_id(cid, realm=realm)
+
+ if before_client is None:
+ before_client = {}
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for client_param in client_params:
+ new_param_value = module.params.get(client_param)
+
+ # some lists in the Keycloak API are sorted, some are not.
+ if isinstance(new_param_value, list):
+ if client_param in ['attributes']:
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ # Unfortunately, the ansible argument spec checker introduces variables with null values when
+ # they are not specified
+ if client_param == 'protocol_mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+
+ changeset[camel(client_param)] = new_param_value
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_client = before_client.copy()
+ desired_client.update(changeset)
+
+ result['proposed'] = sanitize_cr(changeset)
+ result['existing'] = sanitize_cr(before_client)
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_client:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Client does not exist; doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if 'clientId' not in desired_client:
+ module.fail_json(msg='client_id needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(desired_client))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ kc.create_client(desired_client, realm=realm)
+ after_client = kc.get_client_by_clientid(desired_client['clientId'], realm=realm)
+
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been created.' % desired_client['clientId']
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+ result['changed'] = True
+
+ if module.check_mode:
+ # We can only compare the current client with the proposed updates we have
+ before_norm = normalise_cr(before_client, remove_ids=True)
+ desired_norm = normalise_cr(desired_client, remove_ids=True)
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_norm),
+ after=sanitize_cr(desired_norm))
+ result['changed'] = (before_norm != desired_norm)
+
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_client(cid, desired_client, realm=realm)
+
+ after_client = kc.get_client_by_id(cid, realm=realm)
+ if before_client == after_client:
+ result['changed'] = False
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client),
+ after=sanitize_cr(after_client))
+
+ result['end_state'] = sanitize_cr(after_client)
+
+ result['msg'] = 'Client %s has been updated.' % desired_client['clientId']
+ module.exit_json(**result)
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_client), after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ kc.delete_client(cid, realm=realm)
+ result['proposed'] = {}
+
+ result['end_state'] = {}
+
+ result['msg'] = 'Client %s has been deleted.' % before_client['clientId']
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py
new file mode 100644
index 000000000..57dcac48d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_client_rolemapping.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_client_rolemapping
+
+short_description: Allows administration of Keycloak client_rolemapping with the Keycloak API
+
+version_added: 3.5.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak client_rolemapping with the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a client_rolemapping, where possible provide the role ID to the module. This removes a lookup
+ to the API to translate the name into the role ID.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the client_rolemapping.
+ - On C(present), the client_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the client_rolemapping will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this role_representation resides.
+ default: 'master'
+
+ group_name:
+ type: str
+ description:
+ - Name of the group to be mapped.
+ - This parameter is required (can be replaced by gid for less API call).
+
+ gid:
+ type: str
+ description:
+ - Id of the group to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but
+ providing it will reduce the number of API calls required.
+
+ client_id:
+ type: str
+ description:
+ - Name of the client to be mapped (different than I(cid)).
+ - This parameter is required (can be replaced by cid for less API call).
+
+ cid:
+ type: str
+ description:
+ - Id of the client to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but
+ providing it will reduce the number of API calls required.
+
+ roles:
+ description:
+ - Roles to be mapped to the group.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description:
+ - Name of the role_representation.
+ - This parameter is required only when creating or updating the role_representation.
+ id:
+ type: str
+ description:
+ - The unique identifier for this role_representation.
+ - This parameter is not required for updating or deleting a role_representation but
+ providing it will reduce the number of API calls required.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Gaëtan Daubresse (@Gaetan2907)
+'''
+
+EXAMPLES = '''
+- name: Map a client role to a group, authentication with credentials
+ community.general.keycloak_client_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ client_id: client1
+ group_name: group1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Map a client role to a group, authentication with token
+ community.general.keycloak_client_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ state: present
+ client_id: client1
+ group_name: group1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Unmap client role from a group
+ community.general.keycloak_client_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: absent
+ client_id: client1
+ group_name: group1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role role1 assigned to group group1."
+
+proposed:
+ description: Representation of proposed client role mapping.
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+
+existing:
+ description:
+ - Representation of existing client role mapping.
+ - The sample is truncated.
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+
+end_state:
+ description:
+ - Representation of client role mapping after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ KeycloakAPI, keycloak_argument_spec, get_token, KeycloakError,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ roles_spec = dict(
+ name=dict(type='str'),
+ id=dict(type='str'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ gid=dict(type='str'),
+ group_name=dict(type='str'),
+ cid=dict(type='str'),
+ client_id=dict(type='str'),
+ roles=dict(type='list', elements='dict', options=roles_spec),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('cid')
+ client_id = module.params.get('client_id')
+ gid = module.params.get('gid')
+ group_name = module.params.get('group_name')
+ roles = module.params.get('roles')
+
+ # Check the parameters
+ if cid is None and client_id is None:
+ module.fail_json(msg='Either the `client_id` or `cid` has to be specified.')
+ if gid is None and group_name is None:
+ module.fail_json(msg='Either the `group_name` or `gid` has to be specified.')
+
+ # Get the potential missing parameters
+ if gid is None:
+ group_rep = kc.get_group_by_name(group_name, realm=realm)
+ if group_rep is not None:
+ gid = group_rep['id']
+ else:
+ module.fail_json(msg='Could not fetch group %s:' % group_name)
+ if cid is None:
+ cid = kc.get_client_id(client_id, realm=realm)
+ if cid is None:
+ module.fail_json(msg='Could not fetch client %s:' % client_id)
+ if roles is None:
+ module.exit_json(msg="Nothing to do (no roles specified).")
+ else:
+ for role_index, role in enumerate(roles, start=0):
+ if role['name'] is None and role['id'] is None:
+ module.fail_json(msg='Either the `name` or `id` has to be specified on each role.')
+ # Fetch missing role_id
+ if role['id'] is None:
+ role_id = kc.get_client_role_id_by_name(cid, role['name'], realm=realm)
+ if role_id is not None:
+ role['id'] = role_id
+ else:
+ module.fail_json(msg='Could not fetch role %s:' % (role['name']))
+ # Fetch missing role_name
+ else:
+ role['name'] = kc.get_client_group_rolemapping_by_id(gid, cid, role['id'], realm=realm)['name']
+ if role['name'] is None:
+ module.fail_json(msg='Could not fetch role %s' % (role['id']))
+
+ # Get effective client-level role mappings
+ available_roles_before = kc.get_client_group_available_rolemappings(gid, cid, realm=realm)
+ assigned_roles_before = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm)
+
+ result['existing'] = assigned_roles_before
+ result['proposed'] = list(assigned_roles_before) if assigned_roles_before else []
+
+ update_roles = []
+ for role_index, role in enumerate(roles, start=0):
+ # Fetch roles to assign if state present
+ if state == 'present':
+ for available_role in available_roles_before:
+ if role['name'] == available_role['name']:
+ update_roles.append({
+ 'id': role['id'],
+ 'name': role['name'],
+ })
+ result['proposed'].append(available_role)
+ # Fetch roles to remove if state absent
+ else:
+ for assigned_role in assigned_roles_before:
+ if role['name'] == assigned_role['name']:
+ update_roles.append({
+ 'id': role['id'],
+ 'name': role['name'],
+ })
+ if assigned_role in result['proposed']: # Handle double removal
+ result['proposed'].remove(assigned_role)
+
+ if len(update_roles):
+ if state == 'present':
+ # Assign roles
+ result['changed'] = True
+ if module._diff:
+ result['diff'] = dict(before=assigned_roles_before, after=result['proposed'])
+ if module.check_mode:
+ module.exit_json(**result)
+ kc.add_group_rolemapping(gid, cid, update_roles, realm=realm)
+ result['msg'] = 'Roles %s assigned to group %s.' % (update_roles, group_name)
+ assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm)
+ result['end_state'] = assigned_roles_after
+ module.exit_json(**result)
+ else:
+ # Remove mapping of role
+ result['changed'] = True
+ if module._diff:
+ result['diff'] = dict(before=assigned_roles_before, after=result['proposed'])
+ if module.check_mode:
+ module.exit_json(**result)
+ kc.delete_group_rolemapping(gid, cid, update_roles, realm=realm)
+ result['msg'] = 'Roles %s removed from group %s.' % (update_roles, group_name)
+ assigned_roles_after = kc.get_client_group_composite_rolemappings(gid, cid, realm=realm)
+ result['end_state'] = assigned_roles_after
+ module.exit_json(**result)
+ # Do nothing
+ else:
+ result['changed'] = False
+ result['msg'] = 'Nothing to do, roles %s are %s with group %s.' % (roles, 'mapped' if state == 'present' else 'not mapped', group_name)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py
new file mode 100644
index 000000000..a23d92867
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientscope.py
@@ -0,0 +1,506 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clientscope
+
+short_description: Allows administration of Keycloak client_scopes via Keycloak API
+
+version_added: 3.4.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak client_scopes via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a client_scope, where possible provide the client_scope ID to the module. This removes a lookup
+ to the API to translate the name into the client_scope ID.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the client_scope.
+ - On C(present), the client_scope will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the client_scope will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ description:
+ - Name of the client_scope.
+ - This parameter is required only when creating or updating the client_scope.
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this client_scope resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this client_scope.
+ - This parameter is not required for updating or deleting a client_scope but
+ providing it will reduce the number of API calls required.
+
+ description:
+ type: str
+ description:
+ - Description for this client_scope.
+ - This parameter is not required for updating or deleting a client_scope.
+
+ protocol:
+ description:
+ - Type of client.
+ choices: ['openid-connect', 'saml', 'wsfed']
+ type: str
+
+ protocol_mappers:
+ description:
+ - A list of dicts defining protocol mappers for this client.
+ - This is 'protocolMappers' in the Keycloak REST API.
+ aliases:
+ - protocolMappers
+ type: list
+ elements: dict
+ suboptions:
+ protocol:
+ description:
+ - This specifies for which protocol this protocol mapper.
+ - is active.
+ choices: ['openid-connect', 'saml', 'wsfed']
+ type: str
+
+ protocolMapper:
+ description:
+ - "The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least:"
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the C(existing) return value.
+ type: dict
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the client_scope.
+ - Values may be single values (for example a string) or a list of strings.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Gaëtan Daubresse (@Gaetan2907)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak client_scopes, authentication with credentials
+ community.general.keycloak_clientscope:
+ name: my-new-kc-clientscope
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a Keycloak client_scopes, authentication with token
+ community.general.keycloak_clientscope:
+ name: my-new-kc-clientscope
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+
+- name: Delete a keycloak client_scopes
+ community.general.keycloak_clientscope:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ state: absent
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak client_scope based on name
+ community.general.keycloak_clientscope:
+ name: my-clientscope-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Update the name of a Keycloak client_scope
+ community.general.keycloak_clientscope:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ name: an-updated-kc-clientscope-name
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a Keycloak client_scope with some custom attributes
+ community.general.keycloak_clientscope:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new_clientscope
+ description: description-of-clientscope
+ protocol: openid-connect
+ protocol_mappers:
+ - config:
+ access.token.claim: true
+ claim.name: "family_name"
+ id.token.claim: true
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: true
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ - config:
+ attribute.name: Role
+ attribute.nameformat: Basic
+ single: false
+ name: role list
+ protocol: saml
+ protocolMapper: saml-role-list-mapper
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client_scope testclientscope has been updated"
+
+proposed:
+ description: Representation of proposed client scope.
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+
+existing:
+ description: Representation of existing client scope (sample is truncated).
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+
+end_state:
+ description: Representation of client scope after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_cr(clientscoperep):
+ """ Removes probably sensitive details from a clientscoperep representation.
+
+ :param clientscoperep: the clientscoperep dict to be sanitized
+ :return: sanitized clientrep dict
+ """
+ result = clientscoperep.copy()
+ if 'secret' in result:
+ result['secret'] = 'no_log'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes']['saml.signing.private.key'] = 'no_log'
+ return result
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml', 'wsfed']),
+ attributes=dict(type='dict'),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec, aliases=['protocolMappers']),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('id')
+ name = module.params.get('name')
+ protocol_mappers = module.params.get('protocol_mappers')
+
+ # Filter and map the parameters names that apply to the client scope
+ clientscope_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm'] and
+ module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ if cid is None:
+ before_clientscope = kc.get_clientscope_by_name(name, realm=realm)
+ else:
+ before_clientscope = kc.get_clientscope_by_clientscopeid(cid, realm=realm)
+
+ if before_clientscope is None:
+ before_clientscope = {}
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for clientscope_param in clientscope_params:
+ new_param_value = module.params.get(clientscope_param)
+
+ # some lists in the Keycloak API are sorted, some are not.
+ if isinstance(new_param_value, list):
+ if clientscope_param in ['attributes']:
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ # Unfortunately, the ansible argument spec checker introduces variables with null values when
+ # they are not specified
+ if clientscope_param == 'protocol_mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+ changeset[camel(clientscope_param)] = new_param_value
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_clientscope = before_clientscope.copy()
+ desired_clientscope.update(changeset)
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_clientscope:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Clientscope does not exist; doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new clientscope')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(desired_clientscope))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ kc.create_clientscope(desired_clientscope, realm=realm)
+ after_clientscope = kc.get_clientscope_by_name(name, realm)
+
+ result['end_state'] = sanitize_cr(after_clientscope)
+
+ result['msg'] = 'Clientscope {name} has been created with ID {id}'.format(name=after_clientscope['name'],
+ id=after_clientscope['id'])
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ # no changes
+ if desired_clientscope == before_clientscope:
+ result['changed'] = False
+ result['end_state'] = sanitize_cr(desired_clientscope)
+ result['msg'] = "No changes required to clientscope {name}.".format(name=before_clientscope['name'])
+ module.exit_json(**result)
+
+ # doing an update
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_clientscope), after=sanitize_cr(desired_clientscope))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_clientscope(desired_clientscope, realm=realm)
+
+ # do the protocolmappers update
+ if protocol_mappers is not None:
+ for protocol_mapper in protocol_mappers:
+ # update if protocolmapper exist
+ current_protocolmapper = kc.get_clientscope_protocolmapper_by_name(desired_clientscope['id'], protocol_mapper['name'], realm=realm)
+ if current_protocolmapper is not None:
+ protocol_mapper['id'] = current_protocolmapper['id']
+ kc.update_clientscope_protocolmappers(desired_clientscope['id'], protocol_mapper, realm=realm)
+ # create otherwise
+ else:
+ kc.create_clientscope_protocolmapper(desired_clientscope['id'], protocol_mapper, realm=realm)
+
+ after_clientscope = kc.get_clientscope_by_clientscopeid(desired_clientscope['id'], realm=realm)
+
+ result['end_state'] = after_clientscope
+
+ result['msg'] = "Clientscope {id} has been updated".format(id=after_clientscope['id'])
+ module.exit_json(**result)
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize_cr(before_clientscope), after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ cid = before_clientscope['id']
+ kc.delete_clientscope(cid=cid, realm=realm)
+
+ result['end_state'] = {}
+
+ result['msg'] = "Clientscope {name} has been deleted".format(name=before_clientscope['name'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py b/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py
new file mode 100644
index 000000000..facf02aa4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientscope_type.py
@@ -0,0 +1,285 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clientscope_type
+
+short_description: Set the type of aclientscope in realm or client via Keycloak API
+
+version_added: 6.6.0
+
+description:
+ - This module allows you to set the type (optional, default) of clientscopes
+ via the Keycloak REST API. It requires access to the REST API via OpenID
+ Connect; the user connecting and the client being used must have the
+ requisite access rights. In a default Keycloak installation, admin-cli and
+ an admin user would work, as would a separate client definition with the
+ scope tailored to your needs and a user having the expected roles.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ realm:
+ type: str
+ description:
+ - The Keycloak realm.
+ default: 'master'
+
+ client_id:
+ description:
+ - The I(client_id) of the client. If not set the clientscop types are set as a default for the realm.
+ aliases:
+ - clientId
+ type: str
+
+ default_clientscopes:
+ description:
+ - Client scopes that should be of type default.
+ type: list
+ elements: str
+
+ optional_clientscopes:
+ description:
+ - Client scopes that should be of type optional.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Simon Pahl (@simonpahl)
+'''
+
+EXAMPLES = '''
+- name: Set default client scopes on realm level
+ community.general.keycloak_clientsecret_info:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: "MyCustomRealm"
+ default_clientscopes: ['profile', 'roles']
+ delegate_to: localhost
+
+
+- name: Set default and optional client scopes on client level with token auth
+ community.general.keycloak_clientsecret_info:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ realm: "MyCustomRealm"
+ client_id: "MyCustomClient"
+ default_clientscopes: ['profile', 'roles']
+ optional_clientscopes: ['phone']
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: ""
+proposed:
+ description: Representation of proposed client-scope types mapping.
+ returned: always
+ type: dict
+ sample: {
+ default_clientscopes: ["profile", "role"],
+ optional_clientscopes: []
+ }
+existing:
+ description:
+ - Representation of client scopes before module execution.
+ returned: always
+ type: dict
+ sample: {
+ default_clientscopes: ["profile", "role"],
+ optional_clientscopes: ["phone"]
+ }
+end_state:
+ description:
+ - Representation of client scopes after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample: {
+ default_clientscopes: ["profile", "role"],
+ optional_clientscopes: []
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ KeycloakAPI, KeycloakError, get_token)
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import \
+ keycloak_argument_spec
+
+
+def keycloak_clientscope_type_module():
+ """
+ Returns an AnsibleModule definition.
+
+ :return: argument_spec dict
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ realm=dict(default='master'),
+ client_id=dict(type='str', aliases=['clientId']),
+ default_clientscopes=dict(type='list', elements='str'),
+ optional_clientscopes=dict(type='list', elements='str'),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([
+ ['token', 'auth_realm', 'auth_username', 'auth_password'],
+ ['default_clientscopes', 'optional_clientscopes']
+ ]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]),
+ mutually_exclusive=[
+ ['token', 'auth_realm'],
+ ['token', 'auth_username'],
+ ['token', 'auth_password']
+ ])
+
+ return module
+
+
+def clientscopes_to_add(existing, proposed):
+ to_add = []
+ existing_clientscope_ids = extract_field(existing, 'id')
+ for clientscope in proposed:
+ if not clientscope['id'] in existing_clientscope_ids:
+ to_add.append(clientscope)
+ return to_add
+
+
+def clientscopes_to_delete(existing, proposed):
+ to_delete = []
+ proposed_clientscope_ids = extract_field(proposed, 'id')
+ for clientscope in existing:
+ if not clientscope['id'] in proposed_clientscope_ids:
+ to_delete.append(clientscope)
+ return to_delete
+
+
+def extract_field(dictionary, field='name'):
+ return [cs[field] for cs in dictionary]
+
+
+def main():
+ """
+ Module keycloak_clientscope_type
+
+ :return:
+ """
+
+ module = keycloak_clientscope_type_module()
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ client_id = module.params.get('client_id')
+ default_clientscopes = module.params.get('default_clientscopes')
+ optional_clientscopes = module.params.get('optional_clientscopes')
+
+ result = dict(changed=False, msg='', proposed={}, existing={}, end_state={})
+
+ all_clientscopes = kc.get_clientscopes(realm)
+ default_clientscopes_real = []
+ optional_clientscopes_real = []
+
+ for client_scope in all_clientscopes:
+ if default_clientscopes is not None and client_scope["name"] in default_clientscopes:
+ default_clientscopes_real.append(client_scope)
+ if optional_clientscopes is not None and client_scope["name"] in optional_clientscopes:
+ optional_clientscopes_real.append(client_scope)
+
+ if default_clientscopes is not None and len(default_clientscopes_real) != len(default_clientscopes):
+ module.fail_json(msg='At least one of the default_clientscopes does not exist!')
+
+ if optional_clientscopes is not None and len(optional_clientscopes_real) != len(optional_clientscopes):
+ module.fail_json(msg='At least one of the optional_clientscopes does not exist!')
+
+ result['proposed'].update({
+ 'default_clientscopes': 'no-change' if default_clientscopes is None else default_clientscopes,
+ 'optional_clientscopes': 'no-change' if optional_clientscopes is None else optional_clientscopes
+ })
+
+ default_clientscopes_existing = kc.get_default_clientscopes(realm, client_id)
+ optional_clientscopes_existing = kc.get_optional_clientscopes(realm, client_id)
+
+ result['existing'].update({
+ 'default_clientscopes': extract_field(default_clientscopes_existing),
+ 'optional_clientscopes': extract_field(optional_clientscopes_existing)
+ })
+
+ if module._diff:
+ result['diff'] = dict(before=result['existing'], after=result['proposed'])
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ default_clientscopes_add = clientscopes_to_add(default_clientscopes_existing, default_clientscopes_real)
+ optional_clientscopes_add = clientscopes_to_add(optional_clientscopes_existing, optional_clientscopes_real)
+
+ default_clientscopes_delete = clientscopes_to_delete(default_clientscopes_existing, default_clientscopes_real)
+ optional_clientscopes_delete = clientscopes_to_delete(optional_clientscopes_existing, optional_clientscopes_real)
+
+ # first delete so clientscopes can change type
+ for clientscope in default_clientscopes_delete:
+ kc.delete_default_clientscope(clientscope['id'], realm, client_id)
+ for clientscope in optional_clientscopes_delete:
+ kc.delete_optional_clientscope(clientscope['id'], realm, client_id)
+
+ for clientscope in default_clientscopes_add:
+ kc.add_default_clientscope(clientscope['id'], realm, client_id)
+ for clientscope in optional_clientscopes_add:
+ kc.add_optional_clientscope(clientscope['id'], realm, client_id)
+
+ result["changed"] = (
+ len(default_clientscopes_add) > 0
+ or len(optional_clientscopes_add) > 0
+ or len(default_clientscopes_delete) > 0
+ or len(optional_clientscopes_delete) > 0
+ )
+
+ result['end_state'].update({
+ 'default_clientscopes': extract_field(kc.get_default_clientscopes(realm, client_id)),
+ 'optional_clientscopes': extract_field(kc.get_optional_clientscopes(realm, client_id))
+ })
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py
new file mode 100644
index 000000000..98a41ad20
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_info.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Fynn Chen <ethan.cfchen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clientsecret_info
+
+short_description: Retrieve client secret via Keycloak API
+
+version_added: 6.1.0
+
+description:
+ - This module allows you to get a Keycloak client secret via the Keycloak
+ REST API. It requires access to the REST API via OpenID Connect; the user
+ connecting and the client being used must have the requisite access rights.
+ In a default Keycloak installation, admin-cli and an admin user would work,
+ as would a separate client definition with the scope tailored to your needs
+ and a user having the expected roles.
+
+ - When retrieving a new client secret, where possible provide the client's
+ I(id) (not I(client_id)) to the module. This removes a lookup to the API to
+ translate the I(client_id) into the client ID.
+
+ - "Note that this module returns the client secret. To avoid this showing up in the logs,
+ please add C(no_log: true) to the task."
+
+options:
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this client resides.
+ default: 'master'
+
+ id:
+ description:
+ - The unique identifier for this client.
+ - This parameter is not required for getting or generating a client secret but
+ providing it will reduce the number of API calls required.
+ type: str
+
+ client_id:
+ description:
+ - The I(client_id) of the client. Passing this instead of I(id) results in an
+ extra API call.
+ aliases:
+ - clientId
+ type: str
+
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+author:
+ - Fynn Chen (@fynncfchen)
+ - John Cant (@johncant)
+'''
+
+EXAMPLES = '''
+- name: Get a Keycloak client secret, authentication with credentials
+ community.general.keycloak_clientsecret_info:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+ no_log: true
+
+- name: Get a new Keycloak client secret, authentication with token
+ community.general.keycloak_clientsecret_info:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+ no_log: true
+
+- name: Get a new Keycloak client secret, passing client_id instead of id
+ community.general.keycloak_clientsecret_info:
+ client_id: 'myClientId'
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+ no_log: true
+'''
+
+RETURN = '''
+msg:
+ description: Textual description of whether we succeeded or failed
+ returned: always
+ type: str
+
+clientsecret_info:
+ description: Representation of the client secret
+ returned: on success
+ type: complex
+ contains:
+ type:
+ description: Credential type.
+ type: str
+ returned: always
+ sample: secret
+ value:
+ description: Client secret.
+ type: str
+ returned: always
+ sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ KeycloakAPI, KeycloakError, get_token)
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import (
+ keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params)
+
+
+def main():
+ """
+ Module keycloak_clientsecret_info
+
+ :return:
+ """
+
+ module = keycloak_clientsecret_module()
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ id, realm = keycloak_clientsecret_module_resolve_params(module, kc)
+
+ clientsecret = kc.get_clientsecret(id=id, realm=realm)
+
+ result = {
+ 'clientsecret_info': clientsecret,
+ 'msg': 'Get client secret successful for ID {id}'.format(id=id)
+ }
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py
new file mode 100644
index 000000000..7e8b29543
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clientsecret_regenerate.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Fynn Chen <ethan.cfchen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clientsecret_regenerate
+
+short_description: Regenerate Keycloak client secret via Keycloak API
+
+version_added: 6.1.0
+
+description:
+ - This module allows you to regenerate a Keycloak client secret via the
+ Keycloak REST API. It requires access to the REST API via OpenID Connect;
+ the user connecting and the client being used must have the requisite access
+ rights. In a default Keycloak installation, admin-cli and an admin user
+ would work, as would a separate client definition with the scope tailored to
+ your needs and a user having the expected roles.
+
+ - When regenerating a client secret, where possible provide the client's id
+ (not client_id) to the module. This removes a lookup to the API to
+ translate the client_id into the client ID.
+
+ - "Note that this module returns the client secret. To avoid this showing up in the logs,
+ please add C(no_log: true) to the task."
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this client resides.
+ default: 'master'
+
+ id:
+ description:
+ - The unique identifier for this client.
+ - This parameter is not required for getting or generating a client secret but
+ providing it will reduce the number of API calls required.
+ type: str
+
+ client_id:
+ description:
+ - The client_id of the client. Passing this instead of id results in an
+ extra API call.
+ aliases:
+ - clientId
+ type: str
+
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Fynn Chen (@fynncfchen)
+ - John Cant (@johncant)
+'''
+
+EXAMPLES = '''
+- name: Regenerate a Keycloak client secret, authentication with credentials
+ community.general.keycloak_clientsecret_regenerate:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+ no_log: true
+
+- name: Regenerate a Keycloak client secret, authentication with token
+ community.general.keycloak_clientsecret_regenerate:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+ no_log: true
+
+- name: Regenerate a Keycloak client secret, passing client_id instead of id
+ community.general.keycloak_clientsecret_info:
+ client_id: 'myClientId'
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+ no_log: true
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the client credential after module execution
+ returned: on success
+ type: complex
+ contains:
+ type:
+ description: Credential type.
+ type: str
+ returned: always
+ sample: secret
+ value:
+ description: Client secret.
+ type: str
+ returned: always
+ sample: cUGnX1EIeTtPPAkcyGMv0ncyqDPu68P1
+
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ KeycloakAPI, KeycloakError, get_token)
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak_clientsecret import (
+ keycloak_clientsecret_module, keycloak_clientsecret_module_resolve_params)
+
+
+def main():
+ """
+ Module keycloak_clientsecret_regenerate
+
+ :return:
+ """
+
+ module = keycloak_clientsecret_module()
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ id, realm = keycloak_clientsecret_module_resolve_params(module, kc)
+
+ if module.check_mode:
+ dummy_result = {
+ "msg": 'No action taken while in check mode',
+ "end_state": {'type': 'secret', 'value': 'X' * 32}
+ }
+ module.exit_json(**dummy_result)
+
+ # Create new secret
+ clientsecret = kc.create_clientsecret(id=id, realm=realm)
+
+ result = {
+ "msg": 'New client secret has been generated for ID {id}'.format(id=id),
+ "end_state": clientsecret
+ }
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
new file mode 100644
index 000000000..d2555afc5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_clienttemplate.py
@@ -0,0 +1,456 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_clienttemplate
+
+short_description: Allows administration of Keycloak client templates via Keycloak API
+
+description:
+ - This module allows the administration of Keycloak client templates via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html)
+
+ - The Keycloak API does not always enforce for only sensible settings to be used -- you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the client template.
+ - On C(present), the client template will be created (or updated if it exists already).
+ - On C(absent), the client template will be removed if it exists
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - Id of client template to be worked on. This is usually a UUID.
+ type: str
+
+ realm:
+ description:
+ - Realm this client template is found in.
+ type: str
+ default: master
+
+ name:
+ description:
+ - Name of the client template.
+ type: str
+
+ description:
+ description:
+ - Description of the client template in Keycloak.
+ type: str
+
+ protocol:
+ description:
+ - Type of client template (either C(openid-connect) or C(saml).
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ full_scope_allowed:
+ description:
+ - Is the "Full Scope Allowed" feature set for this client template or not.
+ This is 'fullScopeAllowed' in the Keycloak REST API.
+ type: bool
+
+ protocol_mappers:
+ description:
+ - a list of dicts defining protocol mappers for this client template.
+ This is 'protocolMappers' in the Keycloak REST API.
+ type: list
+ elements: dict
+ suboptions:
+ consentRequired:
+ description:
+ - Specifies whether a user needs to provide consent to a client for this mapper to be active.
+ type: bool
+
+ consentText:
+ description:
+ - The human-readable name of the consent the user is presented to accept.
+ type: str
+
+ id:
+ description:
+ - Usually a UUID specifying the internal ID of this protocol mapper instance.
+ type: str
+
+ name:
+ description:
+ - The name of this protocol mapper.
+ type: str
+
+ protocol:
+ description:
+ - This is either C(openid-connect) or C(saml), this specifies for which protocol this protocol mapper.
+ is active.
+ choices: ['openid-connect', 'saml']
+ type: str
+
+ protocolMapper:
+ description:
+ - The Keycloak-internal name of the type of this protocol-mapper. While an exhaustive list is
+ impossible to provide since this may be extended through SPIs by the user of Keycloak,
+ by default Keycloak as of 3.4 ships with at least
+ - C(docker-v2-allow-all-mapper)
+ - C(oidc-address-mapper)
+ - C(oidc-full-name-mapper)
+ - C(oidc-group-membership-mapper)
+ - C(oidc-hardcoded-claim-mapper)
+ - C(oidc-hardcoded-role-mapper)
+ - C(oidc-role-name-mapper)
+ - C(oidc-script-based-protocol-mapper)
+ - C(oidc-sha256-pairwise-sub-mapper)
+ - C(oidc-usermodel-attribute-mapper)
+ - C(oidc-usermodel-client-role-mapper)
+ - C(oidc-usermodel-property-mapper)
+ - C(oidc-usermodel-realm-role-mapper)
+ - C(oidc-usersessionmodel-note-mapper)
+ - C(saml-group-membership-mapper)
+ - C(saml-hardcode-attribute-mapper)
+ - C(saml-hardcode-role-mapper)
+ - C(saml-role-list-mapper)
+ - C(saml-role-name-mapper)
+ - C(saml-user-attribute-mapper)
+ - C(saml-user-property-mapper)
+ - C(saml-user-session-note-mapper)
+ - An exhaustive list of available mappers on your installation can be obtained on
+ the admin console by going to Server Info -> Providers and looking under
+ 'protocol-mapper'.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the protocol mapper; the
+ contents differ depending on the value of I(protocolMapper) and are not documented
+ other than by the source of the mappers and its parent class(es). An example is given
+ below. It is easiest to obtain valid config values by dumping an already-existing
+ protocol mapper configuration through check-mode in the I(existing) field.
+ type: dict
+
+ attributes:
+ description:
+ - A dict of further attributes for this client template. This can contain various
+ configuration settings, though in the default installation of Keycloak as of 3.4, none
+ are documented or known, so this is usually empty.
+ type: dict
+
+notes:
+ - The Keycloak REST API defines further fields (namely I(bearerOnly), I(consentRequired), I(standardFlowEnabled),
+ I(implicitFlowEnabled), I(directAccessGrantsEnabled), I(serviceAccountsEnabled), I(publicClient), and
+ I(frontchannelLogout)) which, while available with keycloak_client, do not have any effect on
+ Keycloak client-templates and are discarded if supplied with an API request changing client-templates. As such,
+ they are not available through this module.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Eike Frost (@eikef)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak client template (minimal), authentication with credentials
+ community.general.keycloak_client:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+ delegate_to: localhost
+
+- name: Create or update Keycloak client template (minimal), authentication with token
+ community.general.keycloak_clienttemplate:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ token: TOKEN
+ realm: master
+ name: this_is_a_test
+ delegate_to: localhost
+
+- name: Delete Keycloak client template
+ community.general.keycloak_client:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ state: absent
+ name: test01
+ delegate_to: localhost
+
+- name: Create or update Keycloak client template (with a protocol mapper)
+ community.general.keycloak_client:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ realm: master
+ name: this_is_a_test
+ protocol_mappers:
+ - config:
+ access.token.claim: true
+ claim.name: "family_name"
+ id.token.claim: true
+ jsonType.label: String
+ user.attribute: lastName
+ userinfo.token.claim: true
+ consentRequired: true
+ consentText: "${familyName}"
+ name: family name
+ protocol: openid-connect
+ protocolMapper: oidc-usermodel-property-mapper
+ full_scope_allowed: false
+ id: bce6f5e9-d7d3-4955-817e-c5b7f8d65b3f
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Client template testclient has been updated"
+
+proposed:
+ description: Representation of proposed client template.
+ returned: always
+ type: dict
+ sample: {
+ name: "test01"
+ }
+
+existing:
+ description: Representation of existing client template (sample is truncated).
+ returned: always
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+
+end_state:
+ description: Representation of client template after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample: {
+ "description": "test01",
+ "fullScopeAllowed": false,
+ "id": "9c3712ab-decd-481e-954f-76da7b006e5f",
+ "name": "test01",
+ "protocol": "saml"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ protmapper_spec = dict(
+ consentRequired=dict(type='bool'),
+ consentText=dict(type='str'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ protocolMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ realm=dict(type='str', default='master'),
+ state=dict(default='present', choices=['present', 'absent']),
+
+ id=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ protocol=dict(type='str', choices=['openid-connect', 'saml']),
+ attributes=dict(type='dict'),
+ full_scope_allowed=dict(type='bool'),
+ protocol_mappers=dict(type='list', elements='dict', options=protmapper_spec),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('id')
+
+ # Filter and map the parameters names that apply to the client template
+ clientt_params = [x for x in module.params
+ if x not in ['state', 'auth_keycloak_url', 'auth_client_id', 'auth_realm',
+ 'auth_client_secret', 'auth_username', 'auth_password',
+ 'validate_certs', 'realm'] and module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ if cid is None:
+ before_clientt = kc.get_client_template_by_name(module.params.get('name'), realm=realm)
+ if before_clientt is not None:
+ cid = before_clientt['id']
+ else:
+ before_clientt = kc.get_client_template_by_id(cid, realm=realm)
+
+ if before_clientt is None:
+ before_clientt = {}
+
+ result['existing'] = before_clientt
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for clientt_param in clientt_params:
+ # lists in the Keycloak API are sorted
+ new_param_value = module.params.get(clientt_param)
+ if isinstance(new_param_value, list):
+ try:
+ new_param_value = sorted(new_param_value)
+ except TypeError:
+ pass
+ changeset[camel(clientt_param)] = new_param_value
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_clientt = before_clientt.copy()
+ desired_clientt.update(changeset)
+
+ result['proposed'] = changeset
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_clientt:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Client template does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if 'name' not in desired_clientt:
+ module.fail_json(msg='name needs to be specified when creating a new client')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=desired_clientt)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ kc.create_client_template(desired_clientt, realm=realm)
+ after_clientt = kc.get_client_template_by_name(desired_clientt['name'], realm=realm)
+
+ result['end_state'] = after_clientt
+
+ result['msg'] = 'Client template %s has been created.' % desired_clientt['name']
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current client template with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=before_clientt,
+ after=desired_clientt)
+
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_client_template(cid, desired_clientt, realm=realm)
+
+ after_clientt = kc.get_client_template_by_id(cid, realm=realm)
+ if before_clientt == after_clientt:
+ result['changed'] = False
+
+ result['end_state'] = after_clientt
+
+ if module._diff:
+ result['diff'] = dict(before=before_clientt, after=after_clientt)
+
+ result['msg'] = 'Client template %s has been updated.' % desired_clientt['name']
+ module.exit_json(**result)
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_clientt, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ kc.delete_client_template(cid, realm=realm)
+ result['proposed'] = {}
+
+ result['end_state'] = {}
+
+ result['msg'] = 'Client template %s has been deleted.' % before_clientt['name']
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_group.py b/ansible_collections/community/general/plugins/modules/keycloak_group.py
new file mode 100644
index 000000000..399bc5b4f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_group.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Adam Goossens <adam.goossens@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_group
+
+short_description: Allows administration of Keycloak groups via Keycloak API
+
+description:
+ - This module allows you to add, remove or modify Keycloak groups via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a group, where possible provide the group ID to the module. This removes a lookup
+ to the API to translate the name into the group ID.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the group.
+ - On C(present), the group will be created if it does not yet exist, or updated with the parameters you provide.
+ - >-
+ On C(absent), the group will be removed if it exists. Be aware that absenting
+ a group with subgroups will automatically delete all its subgroups too.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ description:
+ - Name of the group.
+ - This parameter is required only when creating or updating the group.
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this group resides.
+ default: 'master'
+
+ id:
+ type: str
+ description:
+ - The unique identifier for this group.
+ - This parameter is not required for updating or deleting a group but
+ providing it will reduce the number of API calls required.
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the group.
+ - Values may be single values (e.g. a string) or a list of strings.
+
+ parents:
+ version_added: "6.4.0"
+ type: list
+ description:
+ - List of parent groups for the group to handle sorted top to bottom.
+ - >-
+ Set this to create a group as a subgroup of another group or groups (parents) or
+ when accessing an existing subgroup by name.
+ - >-
+ Not necessary to set when accessing an existing subgroup by its C(ID) because in
+ that case the group can be directly queried without necessarily knowing its parent(s).
+ elements: dict
+ suboptions:
+ id:
+ type: str
+ description:
+ - Identify parent by ID.
+ - Needs less API calls than using I(name).
+ - A deep parent chain can be started at any point when first given parent is given as ID.
+ - Note that in principle both ID and name can be specified at the same time
+ but current implementation only always use just one of them, with ID
+ being preferred.
+ name:
+ type: str
+ description:
+ - Identify parent by name.
+ - Needs more internal API calls than using I(id) to map names to ID's under the hood.
+ - When giving a parent chain with only names it must be complete up to the top.
+ - Note that in principle both ID and name can be specified at the same time
+ but current implementation only always use just one of them, with ID
+ being preferred.
+
+notes:
+ - Presently, the I(realmRoles), I(clientRoles) and I(access) attributes returned by the Keycloak API
+ are read-only for groups. This limitation will be removed in a later version of this module.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Adam Goossens (@adamgoossens)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak group, authentication with credentials
+ community.general.keycloak_group:
+ name: my-new-kc-group
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ register: result_new_kcgrp
+ delegate_to: localhost
+
+- name: Create a Keycloak group, authentication with token
+ community.general.keycloak_group:
+ name: my-new-kc-group
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+
+- name: Delete a keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ state: absent
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak group based on name
+ community.general.keycloak_group:
+ name: my-group-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Update the name of a Keycloak group
+ community.general.keycloak_group:
+ id: '9d59aa76-2755-48c6-b1af-beb70a82c3cd'
+ name: an-updated-kc-group-name
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a keycloak group with some custom attributes
+ community.general.keycloak_group:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new_group
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+
+- name: Create a Keycloak subgroup of a base group (using parent name)
+ community.general.keycloak_group:
+ name: my-new-kc-group-sub
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ parents:
+ - name: my-new-kc-group
+ register: result_new_kcgrp_sub
+ delegate_to: localhost
+
+- name: Create a Keycloak subgroup of a base group (using parent id)
+ community.general.keycloak_group:
+ name: my-new-kc-group-sub2
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ parents:
+ - id: "{{ result_new_kcgrp.end_state.id }}"
+ delegate_to: localhost
+
+- name: Create a Keycloak subgroup of a subgroup (using parent names)
+ community.general.keycloak_group:
+ name: my-new-kc-group-sub-sub
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ parents:
+ - name: my-new-kc-group
+ - name: my-new-kc-group-sub
+ delegate_to: localhost
+
+- name: Create a Keycloak subgroup of a subgroup (using direct parent id)
+ community.general.keycloak_group:
+ name: my-new-kc-group-sub-sub
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ parents:
+ - id: "{{ result_new_kcgrp_sub.end_state.id }}"
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+end_state:
+ description: Representation of the group after module execution (sample is truncated).
+ returned: on success
+ type: complex
+ contains:
+ id:
+ description: GUID that identifies the group.
+ type: str
+ returned: always
+ sample: 23f38145-3195-462c-97e7-97041ccea73e
+ name:
+ description: Name of the group.
+ type: str
+ returned: always
+ sample: grp-test-123
+ attributes:
+ description: Attributes applied to this group.
+ type: dict
+ returned: always
+ sample:
+ attr1: ["val1", "val2", "val3"]
+ path:
+ description: URI path to the group.
+ type: str
+ returned: always
+ sample: /grp-test-123
+ realmRoles:
+ description: An array of the realm-level roles granted to this group.
+ type: list
+ returned: always
+ sample: []
+ subGroups:
+ description: A list of groups that are children of this group. These groups will have the same parameters as
+ documented here.
+ type: list
+ returned: always
+ clientRoles:
+ description: A list of client-level roles granted to this group.
+ type: list
+ returned: always
+ sample: []
+ access:
+ description: A dict describing the accesses you have to this group based on the credentials used.
+ type: dict
+ returned: always
+ sample:
+ manage: true
+ manageMembership: true
+ view: true
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ attributes=dict(type='dict'),
+ parents=dict(
+ type='list', elements='dict',
+ options=dict(
+ id=dict(type='str'),
+ name=dict(type='str')
+ ),
+ ),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, group='')
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ gid = module.params.get('id')
+ name = module.params.get('name')
+ attributes = module.params.get('attributes')
+
+ parents = module.params.get('parents')
+
+ # attributes in Keycloak have their values returned as lists
+ # via the API. attributes is a dict, so we'll transparently convert
+ # the values to lists.
+ if attributes is not None:
+ for key, val in module.params['attributes'].items():
+ module.params['attributes'][key] = [val] if not isinstance(val, list) else val
+
+ # Filter and map the parameters names that apply to the group
+ group_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'parents'] and
+ module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ if gid is None:
+ before_group = kc.get_group_by_name(name, realm=realm, parents=parents)
+ else:
+ before_group = kc.get_group_by_groupid(gid, realm=realm)
+
+ if before_group is None:
+ before_group = {}
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for param in group_params:
+ new_param_value = module.params.get(param)
+ old_value = before_group[param] if param in before_group else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_group = before_group.copy()
+ desired_group.update(changeset)
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_group:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Group does not exist; doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new group')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=desired_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it ...
+ if parents:
+ # ... as subgroup of another parent group
+ kc.create_subgroup(parents, desired_group, realm=realm)
+ else:
+ # ... as toplvl base group
+ kc.create_group(desired_group, realm=realm)
+
+ after_group = kc.get_group_by_name(name, realm, parents=parents)
+
+ result['end_state'] = after_group
+
+ result['msg'] = 'Group {name} has been created with ID {id}'.format(name=after_group['name'],
+ id=after_group['id'])
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ # no changes
+ if desired_group == before_group:
+ result['changed'] = False
+ result['end_state'] = desired_group
+ result['msg'] = "No changes required to group {name}.".format(name=before_group['name'])
+ module.exit_json(**result)
+
+ # doing an update
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after=desired_group)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_group(desired_group, realm=realm)
+
+ after_group = kc.get_group_by_groupid(desired_group['id'], realm=realm)
+
+ result['end_state'] = after_group
+
+ result['msg'] = "Group {id} has been updated".format(id=after_group['id'])
+ module.exit_json(**result)
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_group, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ gid = before_group['id']
+ kc.delete_group(groupid=gid, realm=realm)
+
+ result['end_state'] = {}
+
+ result['msg'] = "Group {name} has been deleted".format(name=before_group['name'])
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
new file mode 100644
index 000000000..0d12ae03a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_identity_provider.py
@@ -0,0 +1,654 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_identity_provider
+
+short_description: Allows administration of Keycloak identity providers via Keycloak API
+
+version_added: 3.6.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak identity providers via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/15.0/rest-api/index.html).
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the identity provider.
+ - On C(present), the identity provider will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the identity provider will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ realm:
+ description:
+ - The Keycloak realm under which this identity provider resides.
+ default: 'master'
+ type: str
+
+ alias:
+ description:
+ - The alias uniquely identifies an identity provider and it is also used to build the redirect URI.
+ required: true
+ type: str
+
+ display_name:
+ description:
+ - Friendly name for identity provider.
+ aliases:
+ - displayName
+ type: str
+
+ enabled:
+ description:
+ - Enable/disable this identity provider.
+ type: bool
+
+ store_token:
+ description:
+ - Enable/disable whether tokens must be stored after authenticating users.
+ aliases:
+ - storeToken
+ type: bool
+
+ add_read_token_role_on_create:
+ description:
+ - Enable/disable whether new users can read any stored tokens. This assigns the C(broker.read-token) role.
+ aliases:
+ - addReadTokenRoleOnCreate
+ type: bool
+
+ trust_email:
+ description:
+ - If enabled, email provided by this provider is not verified even if verification is enabled for the realm.
+ aliases:
+ - trustEmail
+ type: bool
+
+ link_only:
+ description:
+ - If true, users cannot log in through this provider. They can only link to this provider.
+ This is useful if you don't want to allow login from the provider, but want to integrate with a provider.
+ aliases:
+ - linkOnly
+ type: bool
+
+ first_broker_login_flow_alias:
+ description:
+ - Alias of authentication flow, which is triggered after first login with this identity provider.
+ aliases:
+ - firstBrokerLoginFlowAlias
+ type: str
+
+ post_broker_login_flow_alias:
+ description:
+ - Alias of authentication flow, which is triggered after each login with this identity provider.
+ aliases:
+ - postBrokerLoginFlowAlias
+ type: str
+
+ authenticate_by_default:
+ description:
+ - Specifies if this identity provider should be used by default for authentication even before displaying login screen.
+ aliases:
+ - authenticateByDefault
+ type: bool
+
+ provider_id:
+ description:
+ - Protocol used by this provider (supported values are C(oidc) or C(saml)).
+ aliases:
+ - providerId
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the provider; the contents differ depending on the value of I(providerId).
+ Examples are given below for C(oidc) and C(saml). It is easiest to obtain valid config values by dumping an already-existing
+ identity provider configuration through check-mode in the I(existing) field.
+ type: dict
+ suboptions:
+ hide_on_login_page:
+ description:
+ - If hidden, login with this provider is possible only if requested explicitly, for example using the C(kc_idp_hint) parameter.
+ aliases:
+ - hideOnLoginPage
+ type: bool
+
+ gui_order:
+ description:
+ - Number defining order of the provider in GUI (for example, on Login page).
+ aliases:
+ - guiOrder
+ type: int
+
+ sync_mode:
+ description:
+ - Default sync mode for all mappers. The sync mode determines when user data will be synced using the mappers.
+ aliases:
+ - syncMode
+ type: str
+
+ issuer:
+ description:
+ - The issuer identifier for the issuer of the response. If not provided, no validation will be performed.
+ type: str
+
+ authorizationUrl:
+ description:
+ - The Authorization URL.
+ type: str
+
+ tokenUrl:
+ description:
+ - The Token URL.
+ type: str
+
+ logoutUrl:
+ description:
+ - End session endpoint to use to logout user from external IDP.
+ type: str
+
+ userInfoUrl:
+ description:
+ - The User Info URL.
+ type: str
+
+ clientAuthMethod:
+ description:
+ - The client authentication method.
+ type: str
+
+ clientId:
+ description:
+ - The client or client identifier registered within the identity provider.
+ type: str
+
+ clientSecret:
+ description:
+ - The client or client secret registered within the identity provider.
+ type: str
+
+ defaultScope:
+ description:
+ - The scopes to be sent when asking for authorization.
+ type: str
+
+ validateSignature:
+ description:
+ - Enable/disable signature validation of external IDP signatures.
+ type: bool
+
+ useJwksUrl:
+ description:
+ - If the switch is on, identity provider public keys will be downloaded from given JWKS URL.
+ type: bool
+
+ jwksUrl:
+ description:
+ - URL where identity provider keys in JWK format are stored. See JWK specification for more details.
+ type: str
+
+ entityId:
+ description:
+ - The Entity ID that will be used to uniquely identify this SAML Service Provider.
+ type: str
+
+ singleSignOnServiceUrl:
+ description:
+ - The URL that must be used to send authentication requests (SAML AuthnRequest).
+ type: str
+
+ singleLogoutServiceUrl:
+ description:
+ - The URL that must be used to send logout requests.
+ type: str
+
+ backchannelSupported:
+ description:
+ - Does the external IDP support backchannel logout?
+ type: str
+
+ nameIDPolicyFormat:
+ description:
+ - Specifies the URI reference corresponding to a name identifier format.
+ type: str
+
+ principalType:
+ description:
+ - Way to identify and track external users from the assertion.
+ type: str
+
+ mappers:
+ description:
+ - A list of dicts defining mappers associated with this Identity Provider.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description:
+ - Unique ID of this mapper.
+ type: str
+
+ name:
+ description:
+ - Name of the mapper.
+ type: str
+
+ identityProviderAlias:
+ description:
+ - Alias of the identity provider for this mapper.
+ type: str
+
+ identityProviderMapper:
+ description:
+ - Type of mapper.
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the mapper; the contents differ depending on the value of I(identityProviderMapper).
+ type: dict
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Laurent Paumier (@laurpaum)
+'''
+
+EXAMPLES = '''
+- name: Create OIDC identity provider, authentication with credentials
+ community.general.keycloak_identity_provider:
+ state: present
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: admin
+ realm: myrealm
+ alias: oidc-idp
+ display_name: OpenID Connect IdP
+ enabled: true
+ provider_id: oidc
+ config:
+ issuer: https://idp.example.com
+ authorizationUrl: https://idp.example.com/auth
+ tokenUrl: https://idp.example.com/token
+ userInfoUrl: https://idp.example.com/userinfo
+ clientAuthMethod: client_secret_post
+ clientId: my-client
+ clientSecret: secret
+ syncMode: FORCE
+ mappers:
+ - name: first_name
+ identityProviderMapper: oidc-user-attribute-idp-mapper
+ config:
+ claim: first_name
+ user.attribute: first_name
+ syncMode: INHERIT
+ - name: last_name
+ identityProviderMapper: oidc-user-attribute-idp-mapper
+ config:
+ claim: last_name
+ user.attribute: last_name
+ syncMode: INHERIT
+
+- name: Create SAML identity provider, authentication with credentials
+ community.general.keycloak_identity_provider:
+ state: present
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: admin
+ realm: myrealm
+ alias: saml-idp
+ display_name: SAML IdP
+ enabled: true
+ provider_id: saml
+ config:
+ entityId: https://auth.example.com/auth/realms/myrealm
+ singleSignOnServiceUrl: https://idp.example.com/login
+ wantAuthnRequestsSigned: true
+ wantAssertionsSigned: true
+ mappers:
+ - name: roles
+ identityProviderMapper: saml-user-attribute-idp-mapper
+ config:
+ user.attribute: roles
+ attribute.friendly.name: User Roles
+ attribute.name: roles
+ syncMode: INHERIT
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Identity provider my-idp has been created"
+
+proposed:
+ description: Representation of proposed identity provider.
+ returned: always
+ type: dict
+ sample: {
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "secret",
+ "issuer": "https://idp.example.com",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "providerId": "oidc"
+ }
+
+existing:
+ description: Representation of existing identity provider.
+ returned: always
+ type: dict
+ sample: {
+ "addReadTokenRoleOnCreate": false,
+ "alias": "my-idp",
+ "authenticateByDefault": false,
+ "config": {
+ "authorizationUrl": "https://old.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "**********",
+ "issuer": "https://old.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://old.example.com/token",
+ "userInfoUrl": "https://old.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": true,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
+ "linkOnly": false,
+ "providerId": "oidc",
+ "storeToken": false,
+ "trustEmail": false,
+ }
+
+end_state:
+ description: Representation of identity provider after module execution.
+ returned: on success
+ type: dict
+ sample: {
+ "addReadTokenRoleOnCreate": false,
+ "alias": "my-idp",
+ "authenticateByDefault": false,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "**********",
+ "issuer": "https://idp.example.com",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": true,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "4d28d7e3-1b80-45bb-8a30-5822bf55aa1c",
+ "linkOnly": false,
+ "providerId": "oidc",
+ "storeToken": false,
+ "trustEmail": false,
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+from copy import deepcopy
+
+
+def sanitize(idp):
+ idpcopy = deepcopy(idp)
+ if 'config' in idpcopy:
+ if 'clientSecret' in idpcopy['config']:
+ idpcopy['clientSecret'] = '**********'
+ return idpcopy
+
+
+def get_identity_provider_with_mappers(kc, alias, realm):
+ idp = kc.get_identity_provider(alias, realm)
+ if idp is not None:
+ idp['mappers'] = sorted(kc.get_identity_provider_mappers(alias, realm), key=lambda x: x.get('name'))
+ if idp is None:
+ idp = {}
+ return idp
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ mapper_spec = dict(
+ id=dict(type='str'),
+ name=dict(type='str'),
+ identityProviderAlias=dict(type='str'),
+ identityProviderMapper=dict(type='str'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+ alias=dict(type='str', required=True),
+ add_read_token_role_on_create=dict(type='bool', aliases=['addReadTokenRoleOnCreate']),
+ authenticate_by_default=dict(type='bool', aliases=['authenticateByDefault']),
+ config=dict(type='dict'),
+ display_name=dict(type='str', aliases=['displayName']),
+ enabled=dict(type='bool'),
+ first_broker_login_flow_alias=dict(type='str', aliases=['firstBrokerLoginFlowAlias']),
+ link_only=dict(type='bool', aliases=['linkOnly']),
+ post_broker_login_flow_alias=dict(type='str', aliases=['postBrokerLoginFlowAlias']),
+ provider_id=dict(type='str', aliases=['providerId']),
+ store_token=dict(type='bool', aliases=['storeToken']),
+ trust_email=dict(type='bool', aliases=['trustEmail']),
+ mappers=dict(type='list', elements='dict', options=mapper_spec),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ # Filter and map the parameters names that apply to the identity provider.
+ idp_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and
+ module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ before_idp = get_identity_provider_with_mappers(kc, alias, realm)
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for param in idp_params:
+ new_param_value = module.params.get(param)
+ old_value = before_idp[camel(param)] if camel(param) in before_idp else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # special handling of mappers list to allow change detection
+ if module.params.get('mappers') is not None:
+ for change in module.params['mappers']:
+ change = dict((k, v) for k, v in change.items() if change[k] is not None)
+ if change.get('id') is None and change.get('name') is None:
+ module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
+ if before_idp == dict():
+ old_mapper = dict()
+ elif change.get('id') is not None:
+ old_mapper = kc.get_identity_provider_mapper(change['id'], alias, realm)
+ if old_mapper is None:
+ old_mapper = dict()
+ else:
+ found = [x for x in kc.get_identity_provider_mappers(alias, realm) if x['name'] == change['name']]
+ if len(found) == 1:
+ old_mapper = found[0]
+ else:
+ old_mapper = dict()
+ new_mapper = old_mapper.copy()
+ new_mapper.update(change)
+ if new_mapper != old_mapper:
+ if changeset.get('mappers') is None:
+ changeset['mappers'] = list()
+ changeset['mappers'].append(new_mapper)
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_idp = before_idp.copy()
+ desired_idp.update(changeset)
+
+ result['proposed'] = sanitize(changeset)
+ result['existing'] = sanitize(before_idp)
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_idp:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Identity provider does not exist; doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize(desired_idp))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ desired_idp = desired_idp.copy()
+ mappers = desired_idp.pop('mappers', [])
+ kc.create_identity_provider(desired_idp, realm)
+ for mapper in mappers:
+ if mapper.get('identityProviderAlias') is None:
+ mapper['identityProviderAlias'] = alias
+ kc.create_identity_provider_mapper(mapper, alias, realm)
+ after_idp = get_identity_provider_with_mappers(kc, alias, realm)
+
+ result['end_state'] = sanitize(after_idp)
+
+ result['msg'] = 'Identity provider {alias} has been created'.format(alias=alias)
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ # no changes
+ if desired_idp == before_idp:
+ result['changed'] = False
+ result['end_state'] = sanitize(desired_idp)
+ result['msg'] = "No changes required to identity provider {alias}.".format(alias=alias)
+ module.exit_json(**result)
+
+ # doing an update
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize(before_idp), after=sanitize(desired_idp))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ desired_idp = desired_idp.copy()
+ updated_mappers = desired_idp.pop('mappers', [])
+ kc.update_identity_provider(desired_idp, realm)
+ for mapper in updated_mappers:
+ if mapper.get('id') is not None:
+ kc.update_identity_provider_mapper(mapper, alias, realm)
+ else:
+ if mapper.get('identityProviderAlias') is None:
+ mapper['identityProviderAlias'] = alias
+ kc.create_identity_provider_mapper(mapper, alias, realm)
+ for mapper in [x for x in before_idp['mappers']
+ if [y for y in updated_mappers if y["name"] == x['name']] == []]:
+ kc.delete_identity_provider_mapper(mapper['id'], alias, realm)
+
+ after_idp = get_identity_provider_with_mappers(kc, alias, realm)
+
+ result['end_state'] = sanitize(after_idp)
+
+ result['msg'] = "Identity provider {alias} has been updated".format(alias=alias)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ # Process a deletion
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize(before_idp), after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ kc.delete_identity_provider(alias, realm)
+
+ result['end_state'] = {}
+
+ result['msg'] = "Identity provider {alias} has been deleted".format(alias=alias)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm.py b/ansible_collections/community/general/plugins/modules/keycloak_realm.py
new file mode 100644
index 000000000..53f81be48
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_realm.py
@@ -0,0 +1,826 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Eike Frost <ei@kefro.st>
+# Copyright (c) 2021, Christophe Gilles <christophe.gilles54@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_realm
+
+short_description: Allows administration of Keycloak realm via Keycloak API
+
+version_added: 3.0.0
+
+description:
+ - This module allows the administration of Keycloak realm via the Keycloak REST API. It
+ requires access to the REST API via OpenID Connect; the user connecting and the realm being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate realm definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+ Aliases are provided so camelCased versions can be used as well.
+
+ - The Keycloak API does not always sanity check inputs e.g. you can set
+ SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
+ If you do not specify a setting, usually a sensible default is chosen.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the realm.
+ - On C(present), the realm will be created (or updated if it exists already).
+ - On C(absent), the realm will be removed if it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+ id:
+ description:
+ - The realm to create.
+ type: str
+ realm:
+ description:
+ - The realm name.
+ type: str
+ access_code_lifespan:
+ description:
+ - The realm access code lifespan.
+ aliases:
+ - accessCodeLifespan
+ type: int
+ access_code_lifespan_login:
+ description:
+ - The realm access code lifespan login.
+ aliases:
+ - accessCodeLifespanLogin
+ type: int
+ access_code_lifespan_user_action:
+ description:
+ - The realm access code lifespan user action.
+ aliases:
+ - accessCodeLifespanUserAction
+ type: int
+ access_token_lifespan:
+ description:
+ - The realm access token lifespan.
+ aliases:
+ - accessTokenLifespan
+ type: int
+ access_token_lifespan_for_implicit_flow:
+ description:
+ - The realm access token lifespan for implicit flow.
+ aliases:
+ - accessTokenLifespanForImplicitFlow
+ type: int
+ account_theme:
+ description:
+ - The realm account theme.
+ aliases:
+ - accountTheme
+ type: str
+ action_token_generated_by_admin_lifespan:
+ description:
+ - The realm action token generated by admin lifespan.
+ aliases:
+ - actionTokenGeneratedByAdminLifespan
+ type: int
+ action_token_generated_by_user_lifespan:
+ description:
+ - The realm action token generated by user lifespan.
+ aliases:
+ - actionTokenGeneratedByUserLifespan
+ type: int
+ admin_events_details_enabled:
+ description:
+ - The realm admin events details enabled.
+ aliases:
+ - adminEventsDetailsEnabled
+ type: bool
+ admin_events_enabled:
+ description:
+ - The realm admin events enabled.
+ aliases:
+ - adminEventsEnabled
+ type: bool
+ admin_theme:
+ description:
+ - The realm admin theme.
+ aliases:
+ - adminTheme
+ type: str
+ attributes:
+ description:
+ - The realm attributes.
+ type: dict
+ browser_flow:
+ description:
+ - The realm browser flow.
+ aliases:
+ - browserFlow
+ type: str
+ browser_security_headers:
+ description:
+ - The realm browser security headers.
+ aliases:
+ - browserSecurityHeaders
+ type: dict
+ brute_force_protected:
+ description:
+ - The realm brute force protected.
+ aliases:
+ - bruteForceProtected
+ type: bool
+ client_authentication_flow:
+ description:
+ - The realm client authentication flow.
+ aliases:
+ - clientAuthenticationFlow
+ type: str
+ client_scope_mappings:
+ description:
+ - The realm client scope mappings.
+ aliases:
+ - clientScopeMappings
+ type: dict
+ default_default_client_scopes:
+ description:
+ - The realm default default client scopes.
+ aliases:
+ - defaultDefaultClientScopes
+ type: list
+ elements: str
+ default_groups:
+ description:
+ - The realm default groups.
+ aliases:
+ - defaultGroups
+ type: list
+ elements: str
+ default_locale:
+ description:
+ - The realm default locale.
+ aliases:
+ - defaultLocale
+ type: str
+ default_optional_client_scopes:
+ description:
+ - The realm default optional client scopes.
+ aliases:
+ - defaultOptionalClientScopes
+ type: list
+ elements: str
+ default_roles:
+ description:
+ - The realm default roles.
+ aliases:
+ - defaultRoles
+ type: list
+ elements: str
+ default_signature_algorithm:
+ description:
+ - The realm default signature algorithm.
+ aliases:
+ - defaultSignatureAlgorithm
+ type: str
+ direct_grant_flow:
+ description:
+ - The realm direct grant flow.
+ aliases:
+ - directGrantFlow
+ type: str
+ display_name:
+ description:
+ - The realm display name.
+ aliases:
+ - displayName
+ type: str
+ display_name_html:
+ description:
+ - The realm display name HTML.
+ aliases:
+ - displayNameHtml
+ type: str
+ docker_authentication_flow:
+ description:
+ - The realm docker authentication flow.
+ aliases:
+ - dockerAuthenticationFlow
+ type: str
+ duplicate_emails_allowed:
+ description:
+ - The realm duplicate emails allowed option.
+ aliases:
+ - duplicateEmailsAllowed
+ type: bool
+ edit_username_allowed:
+ description:
+ - The realm edit username allowed option.
+ aliases:
+ - editUsernameAllowed
+ type: bool
+ email_theme:
+ description:
+ - The realm email theme.
+ aliases:
+ - emailTheme
+ type: str
+ enabled:
+ description:
+ - The realm enabled option.
+ type: bool
+ enabled_event_types:
+ description:
+ - The realm enabled event types.
+ aliases:
+ - enabledEventTypes
+ type: list
+ elements: str
+ events_enabled:
+ description:
+ - Enables or disables login events for this realm.
+ aliases:
+ - eventsEnabled
+ type: bool
+ version_added: 3.6.0
+ events_expiration:
+ description:
+ - The realm events expiration.
+ aliases:
+ - eventsExpiration
+ type: int
+ events_listeners:
+ description:
+ - The realm events listeners.
+ aliases:
+ - eventsListeners
+ type: list
+ elements: str
+ failure_factor:
+ description:
+ - The realm failure factor.
+ aliases:
+ - failureFactor
+ type: int
+ internationalization_enabled:
+ description:
+ - The realm internationalization enabled option.
+ aliases:
+ - internationalizationEnabled
+ type: bool
+ login_theme:
+ description:
+ - The realm login theme.
+ aliases:
+ - loginTheme
+ type: str
+ login_with_email_allowed:
+ description:
+ - The realm login with email allowed option.
+ aliases:
+ - loginWithEmailAllowed
+ type: bool
+ max_delta_time_seconds:
+ description:
+ - The realm max delta time in seconds.
+ aliases:
+ - maxDeltaTimeSeconds
+ type: int
+ max_failure_wait_seconds:
+ description:
+ - The realm max failure wait in seconds.
+ aliases:
+ - maxFailureWaitSeconds
+ type: int
+ minimum_quick_login_wait_seconds:
+ description:
+ - The realm minimum quick login wait in seconds.
+ aliases:
+ - minimumQuickLoginWaitSeconds
+ type: int
+ not_before:
+ description:
+ - The realm not before.
+ aliases:
+ - notBefore
+ type: int
+ offline_session_idle_timeout:
+ description:
+ - The realm offline session idle timeout.
+ aliases:
+ - offlineSessionIdleTimeout
+ type: int
+ offline_session_max_lifespan:
+ description:
+ - The realm offline session max lifespan.
+ aliases:
+ - offlineSessionMaxLifespan
+ type: int
+ offline_session_max_lifespan_enabled:
+ description:
+ - The realm offline session max lifespan enabled option.
+ aliases:
+ - offlineSessionMaxLifespanEnabled
+ type: bool
+ otp_policy_algorithm:
+ description:
+ - The realm otp policy algorithm.
+ aliases:
+ - otpPolicyAlgorithm
+ type: str
+ otp_policy_digits:
+ description:
+ - The realm otp policy digits.
+ aliases:
+ - otpPolicyDigits
+ type: int
+ otp_policy_initial_counter:
+ description:
+ - The realm otp policy initial counter.
+ aliases:
+ - otpPolicyInitialCounter
+ type: int
+ otp_policy_look_ahead_window:
+ description:
+ - The realm otp policy look ahead window.
+ aliases:
+ - otpPolicyLookAheadWindow
+ type: int
+ otp_policy_period:
+ description:
+ - The realm otp policy period.
+ aliases:
+ - otpPolicyPeriod
+ type: int
+ otp_policy_type:
+ description:
+ - The realm otp policy type.
+ aliases:
+ - otpPolicyType
+ type: str
+ otp_supported_applications:
+ description:
+ - The realm otp supported applications.
+ aliases:
+ - otpSupportedApplications
+ type: list
+ elements: str
+ password_policy:
+ description:
+ - The realm password policy.
+ aliases:
+ - passwordPolicy
+ type: str
+ permanent_lockout:
+ description:
+ - The realm permanent lockout.
+ aliases:
+ - permanentLockout
+ type: bool
+ quick_login_check_milli_seconds:
+ description:
+ - The realm quick login check in milliseconds.
+ aliases:
+ - quickLoginCheckMilliSeconds
+ type: int
+ refresh_token_max_reuse:
+ description:
+ - The realm refresh token max reuse.
+ aliases:
+ - refreshTokenMaxReuse
+ type: int
+ registration_allowed:
+ description:
+ - The realm registration allowed option.
+ aliases:
+ - registrationAllowed
+ type: bool
+ registration_email_as_username:
+ description:
+ - The realm registration email as username option.
+ aliases:
+ - registrationEmailAsUsername
+ type: bool
+ registration_flow:
+ description:
+ - The realm registration flow.
+ aliases:
+ - registrationFlow
+ type: str
+ remember_me:
+ description:
+ - The realm remember me option.
+ aliases:
+ - rememberMe
+ type: bool
+ reset_credentials_flow:
+ description:
+ - The realm reset credentials flow.
+ aliases:
+ - resetCredentialsFlow
+ type: str
+ reset_password_allowed:
+ description:
+ - The realm reset password allowed option.
+ aliases:
+ - resetPasswordAllowed
+ type: bool
+ revoke_refresh_token:
+ description:
+ - The realm revoke refresh token option.
+ aliases:
+ - revokeRefreshToken
+ type: bool
+ smtp_server:
+ description:
+ - The realm smtp server.
+ aliases:
+ - smtpServer
+ type: dict
+ ssl_required:
+ description:
+ - The realm ssl required option.
+ choices: ['all', 'external', 'none']
+ aliases:
+ - sslRequired
+ type: str
+ sso_session_idle_timeout:
+ description:
+ - The realm sso session idle timeout.
+ aliases:
+ - ssoSessionIdleTimeout
+ type: int
+ sso_session_idle_timeout_remember_me:
+ description:
+ - The realm sso session idle timeout remember me.
+ aliases:
+ - ssoSessionIdleTimeoutRememberMe
+ type: int
+ sso_session_max_lifespan:
+ description:
+ - The realm sso session max lifespan.
+ aliases:
+ - ssoSessionMaxLifespan
+ type: int
+ sso_session_max_lifespan_remember_me:
+ description:
+ - The realm sso session max lifespan remember me.
+ aliases:
+ - ssoSessionMaxLifespanRememberMe
+ type: int
+ supported_locales:
+ description:
+ - The realm supported locales.
+ aliases:
+ - supportedLocales
+ type: list
+ elements: str
+ user_managed_access_allowed:
+ description:
+ - The realm user managed access allowed option.
+ aliases:
+ - userManagedAccessAllowed
+ type: bool
+ verify_email:
+ description:
+ - The realm verify email option.
+ aliases:
+ - verifyEmail
+ type: bool
+ wait_increment_seconds:
+ description:
+ - The realm wait increment in seconds.
+ aliases:
+ - waitIncrementSeconds
+ type: int
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Christophe Gilles (@kris2kris)
+'''
+
+EXAMPLES = '''
+- name: Create or update Keycloak realm (minimal example)
+ community.general.keycloak_realm:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ id: realm
+ realm: realm
+ state: present
+
+- name: Delete a Keycloak realm
+ community.general.keycloak_realm:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ id: test
+ state: absent
+
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Realm testrealm has been updated"
+
+proposed:
+ description: Representation of proposed realm.
+ returned: always
+ type: dict
+ sample: {
+ id: "test"
+ }
+
+existing:
+ description: Representation of existing realm (sample is truncated).
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+
+end_state:
+ description: Representation of realm after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sanitize_cr(realmrep):
+ """ Removes probably sensitive details from a realm representation.
+
+ :param realmrep: the realmrep dict to be sanitized
+ :return: sanitized realmrep dict
+ """
+ result = realmrep.copy()
+ if 'secret' in result:
+ result['secret'] = '********'
+ if 'attributes' in result:
+ if 'saml.signing.private.key' in result['attributes']:
+ result['attributes'] = result['attributes'].copy()
+ result['attributes']['saml.signing.private.key'] = '********'
+ return result
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+
+ id=dict(type='str'),
+ realm=dict(type='str'),
+ access_code_lifespan=dict(type='int', aliases=['accessCodeLifespan']),
+ access_code_lifespan_login=dict(type='int', aliases=['accessCodeLifespanLogin']),
+ access_code_lifespan_user_action=dict(type='int', aliases=['accessCodeLifespanUserAction']),
+ access_token_lifespan=dict(type='int', aliases=['accessTokenLifespan'], no_log=False),
+ access_token_lifespan_for_implicit_flow=dict(type='int', aliases=['accessTokenLifespanForImplicitFlow'], no_log=False),
+ account_theme=dict(type='str', aliases=['accountTheme']),
+ action_token_generated_by_admin_lifespan=dict(type='int', aliases=['actionTokenGeneratedByAdminLifespan'], no_log=False),
+ action_token_generated_by_user_lifespan=dict(type='int', aliases=['actionTokenGeneratedByUserLifespan'], no_log=False),
+ admin_events_details_enabled=dict(type='bool', aliases=['adminEventsDetailsEnabled']),
+ admin_events_enabled=dict(type='bool', aliases=['adminEventsEnabled']),
+ admin_theme=dict(type='str', aliases=['adminTheme']),
+ attributes=dict(type='dict'),
+ browser_flow=dict(type='str', aliases=['browserFlow']),
+ browser_security_headers=dict(type='dict', aliases=['browserSecurityHeaders']),
+ brute_force_protected=dict(type='bool', aliases=['bruteForceProtected']),
+ client_authentication_flow=dict(type='str', aliases=['clientAuthenticationFlow']),
+ client_scope_mappings=dict(type='dict', aliases=['clientScopeMappings']),
+ default_default_client_scopes=dict(type='list', elements='str', aliases=['defaultDefaultClientScopes']),
+ default_groups=dict(type='list', elements='str', aliases=['defaultGroups']),
+ default_locale=dict(type='str', aliases=['defaultLocale']),
+ default_optional_client_scopes=dict(type='list', elements='str', aliases=['defaultOptionalClientScopes']),
+ default_roles=dict(type='list', elements='str', aliases=['defaultRoles']),
+ default_signature_algorithm=dict(type='str', aliases=['defaultSignatureAlgorithm']),
+ direct_grant_flow=dict(type='str', aliases=['directGrantFlow']),
+ display_name=dict(type='str', aliases=['displayName']),
+ display_name_html=dict(type='str', aliases=['displayNameHtml']),
+ docker_authentication_flow=dict(type='str', aliases=['dockerAuthenticationFlow']),
+ duplicate_emails_allowed=dict(type='bool', aliases=['duplicateEmailsAllowed']),
+ edit_username_allowed=dict(type='bool', aliases=['editUsernameAllowed']),
+ email_theme=dict(type='str', aliases=['emailTheme']),
+ enabled=dict(type='bool'),
+ enabled_event_types=dict(type='list', elements='str', aliases=['enabledEventTypes']),
+ events_enabled=dict(type='bool', aliases=['eventsEnabled']),
+ events_expiration=dict(type='int', aliases=['eventsExpiration']),
+ events_listeners=dict(type='list', elements='str', aliases=['eventsListeners']),
+ failure_factor=dict(type='int', aliases=['failureFactor']),
+ internationalization_enabled=dict(type='bool', aliases=['internationalizationEnabled']),
+ login_theme=dict(type='str', aliases=['loginTheme']),
+ login_with_email_allowed=dict(type='bool', aliases=['loginWithEmailAllowed']),
+ max_delta_time_seconds=dict(type='int', aliases=['maxDeltaTimeSeconds']),
+ max_failure_wait_seconds=dict(type='int', aliases=['maxFailureWaitSeconds']),
+ minimum_quick_login_wait_seconds=dict(type='int', aliases=['minimumQuickLoginWaitSeconds']),
+ not_before=dict(type='int', aliases=['notBefore']),
+ offline_session_idle_timeout=dict(type='int', aliases=['offlineSessionIdleTimeout']),
+ offline_session_max_lifespan=dict(type='int', aliases=['offlineSessionMaxLifespan']),
+ offline_session_max_lifespan_enabled=dict(type='bool', aliases=['offlineSessionMaxLifespanEnabled']),
+ otp_policy_algorithm=dict(type='str', aliases=['otpPolicyAlgorithm']),
+ otp_policy_digits=dict(type='int', aliases=['otpPolicyDigits']),
+ otp_policy_initial_counter=dict(type='int', aliases=['otpPolicyInitialCounter']),
+ otp_policy_look_ahead_window=dict(type='int', aliases=['otpPolicyLookAheadWindow']),
+ otp_policy_period=dict(type='int', aliases=['otpPolicyPeriod']),
+ otp_policy_type=dict(type='str', aliases=['otpPolicyType']),
+ otp_supported_applications=dict(type='list', elements='str', aliases=['otpSupportedApplications']),
+ password_policy=dict(type='str', aliases=['passwordPolicy'], no_log=False),
+ permanent_lockout=dict(type='bool', aliases=['permanentLockout']),
+ quick_login_check_milli_seconds=dict(type='int', aliases=['quickLoginCheckMilliSeconds']),
+ refresh_token_max_reuse=dict(type='int', aliases=['refreshTokenMaxReuse'], no_log=False),
+ registration_allowed=dict(type='bool', aliases=['registrationAllowed']),
+ registration_email_as_username=dict(type='bool', aliases=['registrationEmailAsUsername']),
+ registration_flow=dict(type='str', aliases=['registrationFlow']),
+ remember_me=dict(type='bool', aliases=['rememberMe']),
+ reset_credentials_flow=dict(type='str', aliases=['resetCredentialsFlow']),
+ reset_password_allowed=dict(type='bool', aliases=['resetPasswordAllowed'], no_log=False),
+ revoke_refresh_token=dict(type='bool', aliases=['revokeRefreshToken']),
+ smtp_server=dict(type='dict', aliases=['smtpServer']),
+ ssl_required=dict(choices=["external", "all", "none"], aliases=['sslRequired']),
+ sso_session_idle_timeout=dict(type='int', aliases=['ssoSessionIdleTimeout']),
+ sso_session_idle_timeout_remember_me=dict(type='int', aliases=['ssoSessionIdleTimeoutRememberMe']),
+ sso_session_max_lifespan=dict(type='int', aliases=['ssoSessionMaxLifespan']),
+ sso_session_max_lifespan_remember_me=dict(type='int', aliases=['ssoSessionMaxLifespanRememberMe']),
+ supported_locales=dict(type='list', elements='str', aliases=['supportedLocales']),
+ user_managed_access_allowed=dict(type='bool', aliases=['userManagedAccessAllowed']),
+ verify_email=dict(type='bool', aliases=['verifyEmail']),
+ wait_increment_seconds=dict(type='int', aliases=['waitIncrementSeconds']),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'realm', 'enabled'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+
+ # convert module parameters to realm representation parameters (if they belong in there)
+ params_to_ignore = list(keycloak_argument_spec().keys()) + ['state']
+
+ # Filter and map the parameters names that apply to the role
+ realm_params = [x for x in module.params
+ if x not in params_to_ignore and
+ module.params.get(x) is not None]
+
+ # See whether the realm already exists in Keycloak
+ before_realm = kc.get_realm_by_id(realm=realm)
+
+ if before_realm is None:
+ before_realm = {}
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for realm_param in realm_params:
+ new_param_value = module.params.get(realm_param)
+ changeset[camel(realm_param)] = new_param_value
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_realm = before_realm.copy()
+ desired_realm.update(changeset)
+
+ result['proposed'] = sanitize_cr(changeset)
+ before_realm_sanitized = sanitize_cr(before_realm)
+ result['existing'] = before_realm_sanitized
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_realm:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Realm does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if 'id' not in desired_realm:
+ module.fail_json(msg='id needs to be specified when creating a new realm')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize_cr(desired_realm))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ kc.create_realm(desired_realm)
+ after_realm = kc.get_realm_by_id(desired_realm['id'])
+
+ result['end_state'] = sanitize_cr(after_realm)
+
+ result['msg'] = 'Realm %s has been created.' % desired_realm['id']
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ # doing an update
+ result['changed'] = True
+ if module.check_mode:
+ # We can only compare the current realm with the proposed updates we have
+ if module._diff:
+ result['diff'] = dict(before=before_realm_sanitized,
+ after=sanitize_cr(desired_realm))
+ result['changed'] = (before_realm != desired_realm)
+
+ module.exit_json(**result)
+
+ # do the update
+ kc.update_realm(desired_realm, realm=realm)
+
+ after_realm = kc.get_realm_by_id(realm=realm)
+
+ if before_realm == after_realm:
+ result['changed'] = False
+
+ result['end_state'] = sanitize_cr(after_realm)
+
+ if module._diff:
+ result['diff'] = dict(before=before_realm_sanitized,
+ after=sanitize_cr(after_realm))
+
+ result['msg'] = 'Realm %s has been updated.' % desired_realm['id']
+ module.exit_json(**result)
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_realm_sanitized, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ kc.delete_realm(realm=realm)
+
+ result['proposed'] = {}
+ result['end_state'] = {}
+
+ result['msg'] = 'Realm %s has been deleted.' % before_realm['id']
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py b/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py
new file mode 100644
index 000000000..5c2ebb4c9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_realm_info.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_realm_info
+
+short_description: Allows obtaining Keycloak realm public information via Keycloak API
+
+version_added: 4.3.0
+
+description:
+ - This module allows you to get Keycloak realm public information via the Keycloak REST API.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ auth_keycloak_url:
+ description:
+ - URL to the Keycloak instance.
+ type: str
+ required: true
+ aliases:
+ - url
+ validate_certs:
+ description:
+ - Verify TLS certificates (do not disable this in production).
+ type: bool
+ default: true
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm ID.
+ default: 'master'
+
+author:
+ - Fynn Chen (@fynncfchen)
+'''
+
+EXAMPLES = '''
+- name: Get a Keycloak public key
+ community.general.keycloak_realm_info:
+ realm: MyCustomRealm
+ auth_keycloak_url: https://auth.example.com/auth
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+
+realm_info:
+ description:
+ - Representation of the realm public information.
+ returned: always
+ type: dict
+ contains:
+ realm:
+ description: Realm ID.
+ type: str
+ returned: always
+ sample: MyRealm
+ public_key:
+ description: Public key of the realm.
+ type: str
+ returned: always
+ sample: MIIBIjANBgkqhkiG9w0BAQEFAAO...
+ token-service:
+ description: Token endpoint URL.
+ type: str
+ returned: always
+ sample: https://auth.example.com/auth/realms/MyRealm/protocol/openid-connect
+ account-service:
+ description: Account console URL.
+ type: str
+ returned: always
+ sample: https://auth.example.com/auth/realms/MyRealm/account
+ tokens-not-before:
+ description: The token not before.
+ type: int
+ returned: always
+ sample: 0
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = dict(
+ auth_keycloak_url=dict(type='str', aliases=['url'], required=True, no_log=False),
+ validate_certs=dict(type='bool', default=True),
+
+ realm=dict(default='master'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ result = dict(changed=False, msg='', realm_info='')
+
+ kc = KeycloakAPI(module, {})
+
+ realm = module.params.get('realm')
+
+ realm_info = kc.get_realm_info_by_id(realm=realm)
+
+ result['realm_info'] = realm_info
+ result['msg'] = 'Get realm public info successful for ID {realm}'.format(realm=realm)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_role.py b/ansible_collections/community/general/plugins/modules/keycloak_role.py
new file mode 100644
index 000000000..bbec5f591
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_role.py
@@ -0,0 +1,374 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Adam Goossens <adam.goossens@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_role
+
+short_description: Allows administration of Keycloak roles via Keycloak API
+
+version_added: 3.4.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak roles via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the role.
+ - On C(present), the role will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the role will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the role.
+ - This parameter is required.
+
+ description:
+ type: str
+ description:
+ - The role description.
+
+ realm:
+ type: str
+ description:
+ - The Keycloak realm under which this role resides.
+ default: 'master'
+
+ client_id:
+ type: str
+ description:
+ - If the role is a client role, the client id under which it resides.
+ - If this parameter is absent, the role is considered a realm role.
+
+ attributes:
+ type: dict
+ description:
+ - A dict of key/value pairs to set as custom attributes for the role.
+ - Values may be single values (e.g. a string) or a list of strings.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Laurent Paumier (@laurpaum)
+'''
+
+EXAMPLES = '''
+- name: Create a Keycloak realm role, authentication with credentials
+ community.general.keycloak_role:
+ name: my-new-kc-role
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a Keycloak realm role, authentication with token
+ community.general.keycloak_role:
+ name: my-new-kc-role
+ realm: MyCustomRealm
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ delegate_to: localhost
+
+- name: Create a Keycloak client role
+ community.general.keycloak_role:
+ name: my-new-kc-role
+ realm: MyCustomRealm
+ client_id: MyClient
+ state: present
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Delete a Keycloak role
+ community.general.keycloak_role:
+ name: my-role-for-deletion
+ state: absent
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ delegate_to: localhost
+
+- name: Create a keycloak role with some custom attributes
+ community.general.keycloak_role:
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ name: my-new-role
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - with
+ - numerous
+ - individual
+ - list
+ - items
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role myrole has been updated"
+
+proposed:
+ description: Representation of proposed role.
+ returned: always
+ type: dict
+ sample: {
+ "description": "My updated test description"
+ }
+
+existing:
+ description: Representation of existing role.
+ returned: always
+ type: dict
+ sample: {
+ "attributes": {},
+ "clientRole": true,
+ "composite": false,
+ "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
+ "description": "My client test role",
+ "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
+ "name": "myrole"
+ }
+
+end_state:
+ description: Representation of role after module execution (sample is truncated).
+ returned: on success
+ type: dict
+ sample: {
+ "attributes": {},
+ "clientRole": true,
+ "composite": false,
+ "containerId": "9f03eb61-a826-4771-a9fd-930e06d2d36a",
+ "description": "My updated client test role",
+ "id": "561703dd-0f38-45ff-9a5a-0c978f794547",
+ "name": "myrole"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ meta_args = dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ realm=dict(type='str', default='master'),
+ client_id=dict(type='str'),
+ attributes=dict(type='dict'),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ clientid = module.params.get('client_id')
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ # attributes in Keycloak have their values returned as lists
+ # via the API. attributes is a dict, so we'll transparently convert
+ # the values to lists.
+ if module.params.get('attributes') is not None:
+ for key, val in module.params['attributes'].items():
+ module.params['attributes'][key] = [val] if not isinstance(val, list) else val
+
+ # Filter and map the parameters names that apply to the role
+ role_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'client_id', 'composites'] and
+ module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ if clientid is None:
+ before_role = kc.get_realm_role(name, realm)
+ else:
+ before_role = kc.get_client_role(name, clientid, realm)
+
+ if before_role is None:
+ before_role = {}
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for param in role_params:
+ new_param_value = module.params.get(param)
+ old_value = before_role[param] if param in before_role else None
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_role = before_role.copy()
+ desired_role.update(changeset)
+
+ result['proposed'] = changeset
+ result['existing'] = before_role
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_role:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'Role does not exist, doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if name is None:
+ module.fail_json(msg='name must be specified when creating a new role')
+
+ if module._diff:
+ result['diff'] = dict(before='', after=desired_role)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ if clientid is None:
+ kc.create_realm_role(desired_role, realm)
+ after_role = kc.get_realm_role(name, realm)
+ else:
+ kc.create_client_role(desired_role, clientid, realm)
+ after_role = kc.get_client_role(name, clientid, realm)
+
+ result['end_state'] = after_role
+
+ result['msg'] = 'Role {name} has been created'.format(name=name)
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ # no changes
+ if desired_role == before_role:
+ result['changed'] = False
+ result['end_state'] = desired_role
+ result['msg'] = "No changes required to role {name}.".format(name=name)
+ module.exit_json(**result)
+
+ # doing an update
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_role, after=desired_role)
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ if clientid is None:
+ kc.update_realm_role(desired_role, realm)
+ after_role = kc.get_realm_role(name, realm)
+ else:
+ kc.update_client_role(desired_role, clientid, realm)
+ after_role = kc.get_client_role(name, clientid, realm)
+
+ result['end_state'] = after_role
+
+ result['msg'] = "Role {name} has been updated".format(name=name)
+ module.exit_json(**result)
+
+ else:
+ # Process a deletion (because state was not 'present')
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=before_role, after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ if clientid is None:
+ kc.delete_realm_role(name, realm)
+ else:
+ kc.delete_client_role(name, clientid, realm)
+
+ result['end_state'] = {}
+
+ result['msg'] = "Role {name} has been deleted".format(name=name)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
new file mode 100644
index 000000000..c0dc5d271
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_user_federation.py
@@ -0,0 +1,1021 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_user_federation
+
+short_description: Allows administration of Keycloak user federations via Keycloak API
+
+version_added: 3.7.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak user federations via the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/20.0.2/rest-api/index.html).
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the user federation.
+ - On C(present), the user federation will be created if it does not yet exist, or updated with
+ the parameters you provide.
+ - On C(absent), the user federation will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ realm:
+ description:
+ - The Keycloak realm under which this user federation resides.
+ default: 'master'
+ type: str
+
+ id:
+ description:
+ - The unique ID for this user federation. If left empty, the user federation will be searched
+ by its I(name).
+ type: str
+
+ name:
+ description:
+ - Display name of provider when linked in admin console.
+ type: str
+
+ provider_id:
+ description:
+ - Provider for this user federation.
+ aliases:
+ - providerId
+ type: str
+ choices:
+ - ldap
+ - kerberos
+ - sssd
+
+ provider_type:
+ description:
+ - Component type for user federation (only supported value is C(org.keycloak.storage.UserStorageProvider)).
+ aliases:
+ - providerType
+ default: org.keycloak.storage.UserStorageProvider
+ type: str
+
+ parent_id:
+ description:
+ - Unique ID for the parent of this user federation. Realm ID will be automatically used if left blank.
+ aliases:
+ - parentId
+ type: str
+
+ config:
+ description:
+ - Dict specifying the configuration options for the provider; the contents differ depending on
+ the value of I(provider_id). Examples are given below for C(ldap), C(kerberos) and C(sssd).
+ It is easiest to obtain valid config values by dumping an already-existing user federation
+ configuration through check-mode in the I(existing) field.
+ - The value C(sssd) has been supported since community.general 4.2.0.
+ type: dict
+ suboptions:
+ enabled:
+ description:
+ - Enable/disable this user federation.
+ default: true
+ type: bool
+
+ priority:
+ description:
+ - Priority of provider when doing a user lookup. Lowest first.
+ default: 0
+ type: int
+
+ importEnabled:
+ description:
+ - If C(true), LDAP users will be imported into Keycloak DB and synced by the configured
+ sync policies.
+ default: true
+ type: bool
+
+ editMode:
+ description:
+ - C(READ_ONLY) is a read-only LDAP store. C(WRITABLE) means data will be synced back to LDAP
+ on demand. C(UNSYNCED) means user data will be imported, but not synced back to LDAP.
+ type: str
+ choices:
+ - READ_ONLY
+ - WRITABLE
+ - UNSYNCED
+
+ syncRegistrations:
+ description:
+ - Should newly created users be created within LDAP store? Priority effects which
+ provider is chosen to sync the new user.
+ default: false
+ type: bool
+
+ vendor:
+ description:
+ - LDAP vendor (provider).
+ - Use short name. For instance, write C(rhds) for "Red Hat Directory Server".
+ type: str
+
+ usernameLDAPAttribute:
+ description:
+ - Name of LDAP attribute, which is mapped as Keycloak username. For many LDAP server
+ vendors it can be C(uid). For Active directory it can be C(sAMAccountName) or C(cn).
+ The attribute should be filled for all LDAP user records you want to import from
+ LDAP to Keycloak.
+ type: str
+
+ rdnLDAPAttribute:
+ description:
+ - Name of LDAP attribute, which is used as RDN (top attribute) of typical user DN.
+ Usually it's the same as Username LDAP attribute, however it is not required. For
+ example for Active directory, it is common to use C(cn) as RDN attribute when
+ username attribute might be C(sAMAccountName).
+ type: str
+
+ uuidLDAPAttribute:
+ description:
+ - Name of LDAP attribute, which is used as unique object identifier (UUID) for objects
+ in LDAP. For many LDAP server vendors, it is C(entryUUID); however some are different.
+ For example for Active directory it should be C(objectGUID). If your LDAP server does
+ not support the notion of UUID, you can use any other attribute that is supposed to
+ be unique among LDAP users in tree.
+ type: str
+
+ userObjectClasses:
+ description:
+ - All values of LDAP objectClass attribute for users in LDAP divided by comma.
+ For example C(inetOrgPerson, organizationalPerson). Newly created Keycloak users
+ will be written to LDAP with all those object classes and existing LDAP user records
+ are found just if they contain all those object classes.
+ type: str
+
+ connectionUrl:
+ description:
+ - Connection URL to your LDAP server.
+ type: str
+
+ usersDn:
+ description:
+ - Full DN of LDAP tree where your users are. This DN is the parent of LDAP users.
+ type: str
+
+ customUserSearchFilter:
+ description:
+ - Additional LDAP Filter for filtering searched users. Leave this empty if you don't
+ need additional filter.
+ type: str
+
+ searchScope:
+ description:
+ - For one level, the search applies only for users in the DNs specified by User DNs.
+ For subtree, the search applies to the whole subtree. See LDAP documentation for
+ more details.
+ default: '1'
+ type: str
+ choices:
+ - '1'
+ - '2'
+
+ authType:
+ description:
+ - Type of the Authentication method used during LDAP Bind operation. It is used in
+ most of the requests sent to the LDAP server.
+ default: 'none'
+ type: str
+ choices:
+ - none
+ - simple
+
+ bindDn:
+ description:
+ - DN of LDAP user which will be used by Keycloak to access LDAP server.
+ type: str
+
+ bindCredential:
+ description:
+ - Password of LDAP admin.
+ type: str
+
+ startTls:
+ description:
+ - Encrypts the connection to LDAP using STARTTLS, which will disable connection pooling.
+ default: false
+ type: bool
+
+ usePasswordModifyExtendedOp:
+ description:
+ - Use the LDAPv3 Password Modify Extended Operation (RFC-3062). The password modify
+ extended operation usually requires that LDAP user already has password in the LDAP
+ server. So when this is used with 'Sync Registrations', it can be good to add also
+ 'Hardcoded LDAP attribute mapper' with randomly generated initial password.
+ default: false
+ type: bool
+
+ validatePasswordPolicy:
+ description:
+ - Determines if Keycloak should validate the password with the realm password policy
+ before updating it.
+ default: false
+ type: bool
+
+ trustEmail:
+ description:
+ - If enabled, email provided by this provider is not verified even if verification is
+ enabled for the realm.
+ default: false
+ type: bool
+
+ useTruststoreSpi:
+ description:
+ - Specifies whether LDAP connection will use the truststore SPI with the truststore
+ configured in standalone.xml/domain.xml. C(Always) means that it will always use it.
+ C(Never) means that it will not use it. C(Only for ldaps) means that it will use if
+ your connection URL use ldaps. Note even if standalone.xml/domain.xml is not
+ configured, the default Java cacerts or certificate specified by
+ C(javax.net.ssl.trustStore) property will be used.
+ default: ldapsOnly
+ type: str
+ choices:
+ - always
+ - ldapsOnly
+ - never
+
+ connectionTimeout:
+ description:
+ - LDAP Connection Timeout in milliseconds.
+ type: int
+
+ readTimeout:
+ description:
+ - LDAP Read Timeout in milliseconds. This timeout applies for LDAP read operations.
+ type: int
+
+ pagination:
+ description:
+ - Does the LDAP server support pagination.
+ default: true
+ type: bool
+
+ connectionPooling:
+ description:
+ - Determines if Keycloak should use connection pooling for accessing LDAP server.
+ default: true
+ type: bool
+
+ connectionPoolingAuthentication:
+ description:
+ - A list of space-separated authentication types of connections that may be pooled.
+ type: str
+ choices:
+ - none
+ - simple
+ - DIGEST-MD5
+
+ connectionPoolingDebug:
+ description:
+ - A string that indicates the level of debug output to produce. Example valid values are
+ C(fine) (trace connection creation and removal) and C(all) (all debugging information).
+ type: str
+
+ connectionPoolingInitSize:
+ description:
+ - The number of connections per connection identity to create when initially creating a
+ connection for the identity.
+ type: int
+
+ connectionPoolingMaxSize:
+ description:
+ - The maximum number of connections per connection identity that can be maintained
+ concurrently.
+ type: int
+
+ connectionPoolingPrefSize:
+ description:
+ - The preferred number of connections per connection identity that should be maintained
+ concurrently.
+ type: int
+
+ connectionPoolingProtocol:
+ description:
+ - A list of space-separated protocol types of connections that may be pooled.
+ Valid types are C(plain) and C(ssl).
+ type: str
+
+ connectionPoolingTimeout:
+ description:
+ - The number of milliseconds that an idle connection may remain in the pool without
+ being closed and removed from the pool.
+ type: int
+
+ allowKerberosAuthentication:
+ description:
+ - Enable/disable HTTP authentication of users with SPNEGO/Kerberos tokens. The data
+ about authenticated users will be provisioned from this LDAP server.
+ default: false
+ type: bool
+
+ kerberosRealm:
+ description:
+ - Name of kerberos realm.
+ type: str
+
+ serverPrincipal:
+ description:
+ - Full name of server principal for HTTP service including server and domain name. For
+ example C(HTTP/host.foo.org@FOO.ORG). Use C(*) to accept any service principal in the
+ KeyTab file.
+ type: str
+
+ keyTab:
+ description:
+ - Location of Kerberos KeyTab file containing the credentials of server principal. For
+ example C(/etc/krb5.keytab).
+ type: str
+
+ debug:
+ description:
+ - Enable/disable debug logging to standard output for Krb5LoginModule.
+ type: bool
+
+ useKerberosForPasswordAuthentication:
+ description:
+ - Use Kerberos login module for authenticate username/password against Kerberos server
+ instead of authenticating against LDAP server with Directory Service API.
+ default: false
+ type: bool
+
+ allowPasswordAuthentication:
+ description:
+ - Enable/disable possibility of username/password authentication against Kerberos database.
+ type: bool
+
+ batchSizeForSync:
+ description:
+ - Count of LDAP users to be imported from LDAP to Keycloak within a single transaction.
+ default: 1000
+ type: int
+
+ fullSyncPeriod:
+ description:
+ - Period for full synchronization in seconds.
+ default: -1
+ type: int
+
+ changedSyncPeriod:
+ description:
+ - Period for synchronization of changed or newly created LDAP users in seconds.
+ default: -1
+ type: int
+
+ updateProfileFirstLogin:
+ description:
+ - Update profile on first login.
+ type: bool
+
+ cachePolicy:
+ description:
+ - Cache Policy for this storage provider.
+ type: str
+ default: 'DEFAULT'
+ choices:
+ - DEFAULT
+ - EVICT_DAILY
+ - EVICT_WEEKLY
+ - MAX_LIFESPAN
+ - NO_CACHE
+
+ evictionDay:
+ description:
+ - Day of the week the entry will become invalid on.
+ type: str
+
+ evictionHour:
+ description:
+ - Hour of day the entry will become invalid on.
+ type: str
+
+ evictionMinute:
+ description:
+ - Minute of day the entry will become invalid on.
+ type: str
+
+ maxLifespan:
+ description:
+ - Max lifespan of cache entry in milliseconds.
+ type: int
+
+ mappers:
+ description:
+ - A list of dicts defining mappers associated with this Identity Provider.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description:
+ - Unique ID of this mapper.
+ type: str
+
+ name:
+ description:
+ - Name of the mapper. If no ID is given, the mapper will be searched by name.
+ type: str
+
+ parentId:
+ description:
+ - Unique ID for the parent of this mapper. ID of the user federation will automatically
+ be used if left blank.
+ type: str
+
+ providerId:
+ description:
+ - The mapper type for this mapper (for instance C(user-attribute-ldap-mapper)).
+ type: str
+
+ providerType:
+ description:
+ - Component type for this mapper.
+ type: str
+ default: org.keycloak.storage.ldap.mappers.LDAPStorageMapper
+
+ config:
+ description:
+ - Dict specifying the configuration options for the mapper; the contents differ
+ depending on the value of I(identityProviderMapper).
+ type: dict
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Laurent Paumier (@laurpaum)
+'''
+
+EXAMPLES = '''
+ - name: Create LDAP user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-ldap
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ priority: 0
+ enabled: true
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: inetOrgPerson, organizationalPerson
+ connectionUrl: ldaps://ldap.example.com:636
+ usersDn: ou=Users,dc=example,dc=com
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: password
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: ldapsOnly
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ debug: false
+ useKerberosForPasswordAuthentication: false
+ mappers:
+ - name: "full name"
+ providerId: "full-name-ldap-mapper"
+ providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ config:
+ ldap.full.name.attribute: cn
+ read.only: true
+ write.only: false
+
+ - name: Create Kerberos user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-kerberos
+ state: present
+ provider_id: kerberos
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ priority: 0
+ enabled: true
+ cachePolicy: DEFAULT
+ kerberosRealm: EXAMPLE.COM
+ serverPrincipal: HTTP/host.example.com@EXAMPLE.COM
+ keyTab: keytab
+ allowPasswordAuthentication: false
+ updateProfileFirstLogin: false
+
+ - name: Create sssd user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-sssd
+ state: present
+ provider_id: sssd
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ priority: 0
+ enabled: true
+ cachePolicy: DEFAULT
+
+ - name: Delete user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: https://keycloak.example.com/auth
+ auth_realm: master
+ auth_username: admin
+ auth_password: password
+ realm: my-realm
+ name: my-federation
+ state: absent
+
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "No changes required to user federation 164bb483-c613-482e-80fe-7f1431308799."
+
+proposed:
+ description: Representation of proposed user federation.
+ returned: always
+ type: dict
+ sample: {
+ "config": {
+ "allowKerberosAuthentication": "false",
+ "authType": "simple",
+ "batchSizeForSync": "1000",
+ "bindCredential": "**********",
+ "bindDn": "cn=directory reader",
+ "cachePolicy": "DEFAULT",
+ "connectionPooling": "true",
+ "connectionUrl": "ldaps://ldap.example.com:636",
+ "debug": "false",
+ "editMode": "READ_ONLY",
+ "enabled": "true",
+ "importEnabled": "true",
+ "pagination": "true",
+ "priority": "0",
+ "rdnLDAPAttribute": "uid",
+ "searchScope": "1",
+ "syncRegistrations": "false",
+ "trustEmail": "false",
+ "useKerberosForPasswordAuthentication": "false",
+ "useTruststoreSpi": "ldapsOnly",
+ "userObjectClasses": "inetOrgPerson, organizationalPerson",
+ "usernameLDAPAttribute": "uid",
+ "usersDn": "ou=Users,dc=example,dc=com",
+ "uuidLDAPAttribute": "entryUUID",
+ "validatePasswordPolicy": "false",
+ "vendor": "other"
+ },
+ "name": "ldap",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
+ }
+
+existing:
+ description: Representation of existing user federation.
+ returned: always
+ type: dict
+ sample: {
+ "config": {
+ "allowKerberosAuthentication": "false",
+ "authType": "simple",
+ "batchSizeForSync": "1000",
+ "bindCredential": "**********",
+ "bindDn": "cn=directory reader",
+ "cachePolicy": "DEFAULT",
+ "changedSyncPeriod": "-1",
+ "connectionPooling": "true",
+ "connectionUrl": "ldaps://ldap.example.com:636",
+ "debug": "false",
+ "editMode": "READ_ONLY",
+ "enabled": "true",
+ "fullSyncPeriod": "-1",
+ "importEnabled": "true",
+ "pagination": "true",
+ "priority": "0",
+ "rdnLDAPAttribute": "uid",
+ "searchScope": "1",
+ "syncRegistrations": "false",
+ "trustEmail": "false",
+ "useKerberosForPasswordAuthentication": "false",
+ "useTruststoreSpi": "ldapsOnly",
+ "userObjectClasses": "inetOrgPerson, organizationalPerson",
+ "usernameLDAPAttribute": "uid",
+ "usersDn": "ou=Users,dc=example,dc=com",
+ "uuidLDAPAttribute": "entryUUID",
+ "validatePasswordPolicy": "false",
+ "vendor": "other"
+ },
+ "id": "01122837-9047-4ae4-8ca0-6e2e891a765f",
+ "mappers": [
+ {
+ "config": {
+ "always.read.value.from.ldap": "false",
+ "is.mandatory.in.ldap": "false",
+ "ldap.attribute": "mail",
+ "read.only": "true",
+ "user.model.attribute": "email"
+ },
+ "id": "17d60ce2-2d44-4c2c-8b1f-1fba601b9a9f",
+ "name": "email",
+ "parentId": "01122837-9047-4ae4-8ca0-6e2e891a765f",
+ "providerId": "user-attribute-ldap-mapper",
+ "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ }
+ ],
+ "name": "myfed",
+ "parentId": "myrealm",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
+ }
+
+end_state:
+ description: Representation of user federation after module execution.
+ returned: on success
+ type: dict
+ sample: {
+ "config": {
+ "allowPasswordAuthentication": "false",
+ "cachePolicy": "DEFAULT",
+ "enabled": "true",
+ "kerberosRealm": "EXAMPLE.COM",
+ "keyTab": "/etc/krb5.keytab",
+ "priority": "0",
+ "serverPrincipal": "HTTP/host.example.com@EXAMPLE.COM",
+ "updateProfileFirstLogin": "false"
+ },
+ "id": "cf52ae4f-4471-4435-a0cf-bb620cadc122",
+ "mappers": [],
+ "name": "kerberos",
+ "parentId": "myrealm",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, camel, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from copy import deepcopy
+
+
+def sanitize(comp):
+ compcopy = deepcopy(comp)
+ if 'config' in compcopy:
+ compcopy['config'] = dict((k, v[0]) for k, v in compcopy['config'].items())
+ if 'bindCredential' in compcopy['config']:
+ compcopy['config']['bindCredential'] = '**********'
+ if 'mappers' in compcopy:
+ for mapper in compcopy['mappers']:
+ if 'config' in mapper:
+ mapper['config'] = dict((k, v[0]) for k, v in mapper['config'].items())
+ return compcopy
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ config_spec = dict(
+ allowKerberosAuthentication=dict(type='bool', default=False),
+ allowPasswordAuthentication=dict(type='bool'),
+ authType=dict(type='str', choices=['none', 'simple'], default='none'),
+ batchSizeForSync=dict(type='int', default=1000),
+ bindCredential=dict(type='str', no_log=True),
+ bindDn=dict(type='str'),
+ cachePolicy=dict(type='str', choices=['DEFAULT', 'EVICT_DAILY', 'EVICT_WEEKLY', 'MAX_LIFESPAN', 'NO_CACHE'], default='DEFAULT'),
+ changedSyncPeriod=dict(type='int', default=-1),
+ connectionPooling=dict(type='bool', default=True),
+ connectionPoolingAuthentication=dict(type='str', choices=['none', 'simple', 'DIGEST-MD5']),
+ connectionPoolingDebug=dict(type='str'),
+ connectionPoolingInitSize=dict(type='int'),
+ connectionPoolingMaxSize=dict(type='int'),
+ connectionPoolingPrefSize=dict(type='int'),
+ connectionPoolingProtocol=dict(type='str'),
+ connectionPoolingTimeout=dict(type='int'),
+ connectionTimeout=dict(type='int'),
+ connectionUrl=dict(type='str'),
+ customUserSearchFilter=dict(type='str'),
+ debug=dict(type='bool'),
+ editMode=dict(type='str', choices=['READ_ONLY', 'WRITABLE', 'UNSYNCED']),
+ enabled=dict(type='bool', default=True),
+ evictionDay=dict(type='str'),
+ evictionHour=dict(type='str'),
+ evictionMinute=dict(type='str'),
+ fullSyncPeriod=dict(type='int', default=-1),
+ importEnabled=dict(type='bool', default=True),
+ kerberosRealm=dict(type='str'),
+ keyTab=dict(type='str', no_log=False),
+ maxLifespan=dict(type='int'),
+ pagination=dict(type='bool', default=True),
+ priority=dict(type='int', default=0),
+ rdnLDAPAttribute=dict(type='str'),
+ readTimeout=dict(type='int'),
+ searchScope=dict(type='str', choices=['1', '2'], default='1'),
+ serverPrincipal=dict(type='str'),
+ startTls=dict(type='bool', default=False),
+ syncRegistrations=dict(type='bool', default=False),
+ trustEmail=dict(type='bool', default=False),
+ updateProfileFirstLogin=dict(type='bool'),
+ useKerberosForPasswordAuthentication=dict(type='bool', default=False),
+ usePasswordModifyExtendedOp=dict(type='bool', default=False, no_log=False),
+ useTruststoreSpi=dict(type='str', choices=['always', 'ldapsOnly', 'never'], default='ldapsOnly'),
+ userObjectClasses=dict(type='str'),
+ usernameLDAPAttribute=dict(type='str'),
+ usersDn=dict(type='str'),
+ uuidLDAPAttribute=dict(type='str'),
+ validatePasswordPolicy=dict(type='bool', default=False),
+ vendor=dict(type='str'),
+ )
+
+ mapper_spec = dict(
+ id=dict(type='str'),
+ name=dict(type='str'),
+ parentId=dict(type='str'),
+ providerId=dict(type='str'),
+ providerType=dict(type='str', default='org.keycloak.storage.ldap.mappers.LDAPStorageMapper'),
+ config=dict(type='dict'),
+ )
+
+ meta_args = dict(
+ config=dict(type='dict', options=config_spec),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ realm=dict(type='str', default='master'),
+ id=dict(type='str'),
+ name=dict(type='str'),
+ provider_id=dict(type='str', aliases=['providerId'], choices=['ldap', 'kerberos', 'sssd']),
+ provider_type=dict(type='str', aliases=['providerType'], default='org.keycloak.storage.UserStorageProvider'),
+ parent_id=dict(type='str', aliases=['parentId']),
+ mappers=dict(type='list', elements='dict', options=mapper_spec),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['id', 'name'],
+ ['token', 'auth_realm', 'auth_username', 'auth_password']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ config = module.params.get('config')
+ mappers = module.params.get('mappers')
+ cid = module.params.get('id')
+ name = module.params.get('name')
+
+ # Keycloak API expects config parameters to be arrays containing a single string element
+ if config is not None:
+ module.params['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v])
+ for k, v in config.items() if config[k] is not None)
+
+ if mappers is not None:
+ for mapper in mappers:
+ if mapper.get('config') is not None:
+ mapper['config'] = dict((k, [str(v).lower() if not isinstance(v, str) else v])
+ for k, v in mapper['config'].items() if mapper['config'][k] is not None)
+
+ # Filter and map the parameters names that apply
+ comp_params = [x for x in module.params
+ if x not in list(keycloak_argument_spec().keys()) + ['state', 'realm', 'mappers'] and
+ module.params.get(x) is not None]
+
+ # See if it already exists in Keycloak
+ if cid is None:
+ found = kc.get_components(urlencode(dict(type='org.keycloak.storage.UserStorageProvider', name=name)), realm)
+ if len(found) > 1:
+ module.fail_json(msg='No ID given and found multiple user federations with name `{name}`. Cannot continue.'.format(name=name))
+ before_comp = next(iter(found), None)
+ if before_comp is not None:
+ cid = before_comp['id']
+ else:
+ before_comp = kc.get_component(cid, realm)
+
+ if before_comp is None:
+ before_comp = {}
+
+ # if user federation exists, get associated mappers
+ if cid is not None and before_comp:
+ before_comp['mappers'] = sorted(kc.get_components(urlencode(dict(parent=cid)), realm), key=lambda x: x.get('name'))
+
+ # Build a proposed changeset from parameters given to this module
+ changeset = {}
+
+ for param in comp_params:
+ new_param_value = module.params.get(param)
+ old_value = before_comp[camel(param)] if camel(param) in before_comp else None
+ if param == 'mappers':
+ new_param_value = [dict((k, v) for k, v in x.items() if x[k] is not None) for x in new_param_value]
+ if new_param_value != old_value:
+ changeset[camel(param)] = new_param_value
+
+ # special handling of mappers list to allow change detection
+ if module.params.get('mappers') is not None:
+ if module.params['provider_id'] in ['kerberos', 'sssd']:
+ module.fail_json(msg='Cannot configure mappers for {type} provider.'.format(type=module.params['provider_id']))
+ for change in module.params['mappers']:
+ change = dict((k, v) for k, v in change.items() if change[k] is not None)
+ if change.get('id') is None and change.get('name') is None:
+ module.fail_json(msg='Either `name` or `id` has to be specified on each mapper.')
+ if cid is None:
+ old_mapper = {}
+ elif change.get('id') is not None:
+ old_mapper = kc.get_component(change['id'], realm)
+ if old_mapper is None:
+ old_mapper = {}
+ else:
+ found = kc.get_components(urlencode(dict(parent=cid, name=change['name'])), realm)
+ if len(found) > 1:
+ module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=change['name']))
+ if len(found) == 1:
+ old_mapper = found[0]
+ else:
+ old_mapper = {}
+ new_mapper = old_mapper.copy()
+ new_mapper.update(change)
+ if new_mapper != old_mapper:
+ if changeset.get('mappers') is None:
+ changeset['mappers'] = list()
+ changeset['mappers'].append(new_mapper)
+
+ # Prepare the desired values using the existing values (non-existence results in a dict that is save to use as a basis)
+ desired_comp = before_comp.copy()
+ desired_comp.update(changeset)
+
+ result['proposed'] = sanitize(changeset)
+ result['existing'] = sanitize(before_comp)
+
+ # Cater for when it doesn't exist (an empty dict)
+ if not before_comp:
+ if state == 'absent':
+ # Do nothing and exit
+ if module._diff:
+ result['diff'] = dict(before='', after='')
+ result['changed'] = False
+ result['end_state'] = {}
+ result['msg'] = 'User federation does not exist; doing nothing.'
+ module.exit_json(**result)
+
+ # Process a creation
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before='', after=sanitize(desired_comp))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # create it
+ desired_comp = desired_comp.copy()
+ updated_mappers = desired_comp.pop('mappers', [])
+ after_comp = kc.create_component(desired_comp, realm)
+
+ cid = after_comp['id']
+
+ for mapper in updated_mappers:
+ found = kc.get_components(urlencode(dict(parent=cid, name=mapper['name'])), realm)
+ if len(found) > 1:
+ module.fail_json(msg='Found multiple mappers with name `{name}`. Cannot continue.'.format(name=mapper['name']))
+ if len(found) == 1:
+ old_mapper = found[0]
+ else:
+ old_mapper = {}
+
+ new_mapper = old_mapper.copy()
+ new_mapper.update(mapper)
+
+ if new_mapper.get('id') is not None:
+ kc.update_component(new_mapper, realm)
+ else:
+ if new_mapper.get('parentId') is None:
+ new_mapper['parentId'] = after_comp['id']
+ mapper = kc.create_component(new_mapper, realm)
+
+ after_comp['mappers'] = updated_mappers
+ result['end_state'] = sanitize(after_comp)
+
+ result['msg'] = "User federation {id} has been created".format(id=after_comp['id'])
+ module.exit_json(**result)
+
+ else:
+ if state == 'present':
+ # Process an update
+
+ # no changes
+ if desired_comp == before_comp:
+ result['changed'] = False
+ result['end_state'] = sanitize(desired_comp)
+ result['msg'] = "No changes required to user federation {id}.".format(id=cid)
+ module.exit_json(**result)
+
+ # doing an update
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize(before_comp), after=sanitize(desired_comp))
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # do the update
+ desired_comp = desired_comp.copy()
+ updated_mappers = desired_comp.pop('mappers', [])
+ kc.update_component(desired_comp, realm)
+ after_comp = kc.get_component(cid, realm)
+
+ for mapper in updated_mappers:
+ if mapper.get('id') is not None:
+ kc.update_component(mapper, realm)
+ else:
+ if mapper.get('parentId') is None:
+ mapper['parentId'] = desired_comp['id']
+ mapper = kc.create_component(mapper, realm)
+
+ after_comp['mappers'] = updated_mappers
+ result['end_state'] = sanitize(after_comp)
+
+ result['msg'] = "User federation {id} has been updated".format(id=cid)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ # Process a deletion
+ result['changed'] = True
+
+ if module._diff:
+ result['diff'] = dict(before=sanitize(before_comp), after='')
+
+ if module.check_mode:
+ module.exit_json(**result)
+
+ # delete it
+ kc.delete_component(cid, realm)
+
+ result['end_state'] = {}
+
+ result['msg'] = "User federation {id} has been deleted".format(id=cid)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py b/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py
new file mode 100644
index 000000000..d754e313a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keycloak_user_rolemapping.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Dušan Marković (@bratwurzt)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: keycloak_user_rolemapping
+
+short_description: Allows administration of Keycloak user_rolemapping with the Keycloak API
+
+version_added: 5.7.0
+
+description:
+ - This module allows you to add, remove or modify Keycloak user_rolemapping with the Keycloak REST API.
+ It requires access to the REST API via OpenID Connect; the user connecting and the client being
+ used must have the requisite access rights. In a default Keycloak installation, admin-cli
+ and an admin user would work, as would a separate client definition with the scope tailored
+ to your needs and a user having the expected roles.
+
+ - The names of module options are snake_cased versions of the camelCase ones found in the
+ Keycloak API and its documentation at U(https://www.keycloak.org/docs-api/8.0/rest-api/index.html).
+
+ - Attributes are multi-valued in the Keycloak API. All attributes are lists of individual values and will
+ be returned that way by this module. You may pass single values for attributes when calling the module,
+ and this will be translated into a list suitable for the API.
+
+ - When updating a user_rolemapping, where possible provide the role ID to the module. This removes a lookup
+ to the API to translate the name into the role ID.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ state:
+ description:
+ - State of the user_rolemapping.
+ - On C(present), the user_rolemapping will be created if it does not yet exist, or updated with the parameters you provide.
+ - On C(absent), the user_rolemapping will be removed if it exists.
+ default: 'present'
+ type: str
+ choices:
+ - present
+ - absent
+
+ realm:
+ type: str
+ description:
+ - They Keycloak realm under which this role_representation resides.
+ default: 'master'
+
+ target_username:
+ type: str
+ description:
+ - Username of the user roles are mapped to.
+ - This parameter is not required (can be replaced by uid for less API call).
+
+ uid:
+ type: str
+ description:
+ - ID of the user to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but
+ providing it will reduce the number of API calls required.
+
+ service_account_user_client_id:
+ type: str
+ description:
+ - Client ID of the service-account-user to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but
+ providing it will reduce the number of API calls required.
+
+ client_id:
+ type: str
+ description:
+ - Name of the client to be mapped (different than I(cid)).
+ - This parameter is required if I(cid) is not provided (can be replaced by I(cid)
+ to reduce the number of API calls that must be made).
+
+ cid:
+ type: str
+ description:
+ - ID of the client to be mapped.
+ - This parameter is not required for updating or deleting the rolemapping but
+ providing it will reduce the number of API calls required.
+
+ roles:
+ description:
+ - Roles to be mapped to the user.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ description:
+ - Name of the role representation.
+ - This parameter is required only when creating or updating the role_representation.
+ id:
+ type: str
+ description:
+ - The unique identifier for this role_representation.
+ - This parameter is not required for updating or deleting a role_representation but
+ providing it will reduce the number of API calls required.
+
+extends_documentation_fragment:
+ - community.general.keycloak
+ - community.general.attributes
+
+author:
+ - Dušan Marković (@bratwurzt)
+'''
+
+EXAMPLES = '''
+- name: Map a client role to a user, authentication with credentials
+ community.general.keycloak_user_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ client_id: client1
+ user_id: user1Id
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Map a client role to a service account user for a client, authentication with credentials
+ community.general.keycloak_user_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: present
+ client_id: client1
+ service_account_user_client_id: clientIdOfServiceAccount
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Map a client role to a user, authentication with token
+ community.general.keycloak_user_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ token: TOKEN
+ state: present
+ client_id: client1
+ target_username: user1
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+
+- name: Unmap client role from a user
+ community.general.keycloak_user_rolemapping:
+ realm: MyCustomRealm
+ auth_client_id: admin-cli
+ auth_keycloak_url: https://auth.example.com/auth
+ auth_realm: master
+ auth_username: USERNAME
+ auth_password: PASSWORD
+ state: absent
+ client_id: client1
+ uid: 70e3ae72-96b6-11e6-9056-9737fd4d0764
+ roles:
+ - name: role_name1
+ id: role_id1
+ - name: role_name2
+ id: role_id2
+ delegate_to: localhost
+'''
+
+RETURN = '''
+msg:
+ description: Message as to what action was taken.
+ returned: always
+ type: str
+ sample: "Role role1 assigned to user user1."
+
+proposed:
+ description: Representation of proposed client role mapping.
+ returned: always
+ type: dict
+ sample: {
+ clientId: "test"
+ }
+
+existing:
+ description:
+ - Representation of existing client role mapping.
+ - The sample is truncated.
+ returned: always
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+
+end_state:
+ description:
+ - Representation of client role mapping after module execution.
+ - The sample is truncated.
+ returned: on success
+ type: dict
+ sample: {
+ "adminUrl": "http://www.example.com/admin_url",
+ "attributes": {
+ "request.object.signature.alg": "RS256",
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import KeycloakAPI, \
+ keycloak_argument_spec, get_token, KeycloakError
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ """
+ Module execution
+
+ :return:
+ """
+ argument_spec = keycloak_argument_spec()
+
+ roles_spec = dict(
+ name=dict(type='str'),
+ id=dict(type='str'),
+ )
+
+ meta_args = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ realm=dict(default='master'),
+ uid=dict(type='str'),
+ target_username=dict(type='str'),
+ service_account_user_client_id=dict(type='str'),
+ cid=dict(type='str'),
+ client_id=dict(type='str'),
+ roles=dict(type='list', elements='dict', options=roles_spec),
+ )
+
+ argument_spec.update(meta_args)
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=([['token', 'auth_realm', 'auth_username', 'auth_password'],
+ ['uid', 'target_username', 'service_account_user_client_id']]),
+ required_together=([['auth_realm', 'auth_username', 'auth_password']]))
+
+ result = dict(changed=False, msg='', diff={}, proposed={}, existing={}, end_state={})
+
+ # Obtain access token, initialize API
+ try:
+ connection_header = get_token(module.params)
+ except KeycloakError as e:
+ module.fail_json(msg=str(e))
+
+ kc = KeycloakAPI(module, connection_header)
+
+ realm = module.params.get('realm')
+ state = module.params.get('state')
+ cid = module.params.get('cid')
+ client_id = module.params.get('client_id')
+ uid = module.params.get('uid')
+ target_username = module.params.get('target_username')
+ service_account_user_client_id = module.params.get('service_account_user_client_id')
+ roles = module.params.get('roles')
+
+ # Check the parameters
+ if uid is None and target_username is None and service_account_user_client_id is None:
+ module.fail_json(msg='Either the `target_username`, `uid` or `service_account_user_client_id` has to be specified.')
+
+ # Get the potential missing parameters
+ if uid is None and service_account_user_client_id is None:
+ user_rep = kc.get_user_by_username(username=target_username, realm=realm)
+ if user_rep is not None:
+ uid = user_rep.get('id')
+ else:
+ module.fail_json(msg='Could not fetch user for username %s:' % target_username)
+ else:
+ if uid is None and target_username is None:
+ user_rep = kc.get_service_account_user_by_client_id(client_id=service_account_user_client_id, realm=realm)
+ if user_rep is not None:
+ uid = user_rep['id']
+ else:
+ module.fail_json(msg='Could not fetch service-account-user for client_id %s:' % target_username)
+
+ if cid is None and client_id is not None:
+ cid = kc.get_client_id(client_id=client_id, realm=realm)
+ if cid is None:
+ module.fail_json(msg='Could not fetch client %s:' % client_id)
+ if roles is None:
+ module.exit_json(msg="Nothing to do (no roles specified).")
+ else:
+ for role_index, role in enumerate(roles, start=0):
+ if role.get('name') is None and role.get('id') is None:
+ module.fail_json(msg='Either the `name` or `id` has to be specified on each role.')
+ # Fetch missing role_id
+ if role.get('id') is None:
+ if cid is None:
+ role_id = kc.get_realm_role(name=role.get('name'), realm=realm)['id']
+ else:
+ role_id = kc.get_client_role_id_by_name(cid=cid, name=role.get('name'), realm=realm)
+ if role_id is not None:
+ role['id'] = role_id
+ else:
+ module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('name'), client_id, realm))
+ # Fetch missing role_name
+ else:
+ if cid is None:
+ role['name'] = kc.get_realm_user_rolemapping_by_id(uid=uid, rid=role.get('id'), realm=realm)['name']
+ else:
+ role['name'] = kc.get_client_user_rolemapping_by_id(uid=uid, cid=cid, rid=role.get('id'), realm=realm)['name']
+ if role.get('name') is None:
+ module.fail_json(msg='Could not fetch role %s for client_id %s or realm %s' % (role.get('id'), client_id, realm))
+
+ # Get effective role mappings
+ if cid is None:
+ available_roles_before = kc.get_realm_user_available_rolemappings(uid=uid, realm=realm)
+ assigned_roles_before = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm)
+ else:
+ available_roles_before = kc.get_client_user_available_rolemappings(uid=uid, cid=cid, realm=realm)
+ assigned_roles_before = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm)
+
+ result['existing'] = assigned_roles_before
+ result['proposed'] = roles
+
+ update_roles = []
+ for role_index, role in enumerate(roles, start=0):
+ # Fetch roles to assign if state present
+ if state == 'present':
+ for available_role in available_roles_before:
+ if role.get('name') == available_role.get('name'):
+ update_roles.append({
+ 'id': role.get('id'),
+ 'name': role.get('name'),
+ })
+ # Fetch roles to remove if state absent
+ else:
+ for assigned_role in assigned_roles_before:
+ if role.get('name') == assigned_role.get('name'):
+ update_roles.append({
+ 'id': role.get('id'),
+ 'name': role.get('name'),
+ })
+
+ if len(update_roles):
+ if state == 'present':
+ # Assign roles
+ result['changed'] = True
+ if module._diff:
+ result['diff'] = dict(before=assigned_roles_before, after=update_roles)
+ if module.check_mode:
+ module.exit_json(**result)
+ kc.add_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm)
+ result['msg'] = 'Roles %s assigned to userId %s.' % (update_roles, uid)
+ if cid is None:
+ assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm)
+ else:
+ assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm)
+ result['end_state'] = assigned_roles_after
+ module.exit_json(**result)
+ else:
+ # Remove mapping of role
+ result['changed'] = True
+ if module._diff:
+ result['diff'] = dict(before=assigned_roles_before, after=update_roles)
+ if module.check_mode:
+ module.exit_json(**result)
+ kc.delete_user_rolemapping(uid=uid, cid=cid, role_rep=update_roles, realm=realm)
+ result['msg'] = 'Roles %s removed from userId %s.' % (update_roles, uid)
+ if cid is None:
+ assigned_roles_after = kc.get_realm_user_composite_rolemappings(uid=uid, realm=realm)
+ else:
+ assigned_roles_after = kc.get_client_user_composite_rolemappings(uid=uid, cid=cid, realm=realm)
+ result['end_state'] = assigned_roles_after
+ module.exit_json(**result)
+ # Do nothing
+ else:
+ result['changed'] = False
+ result['msg'] = 'Nothing to do, roles %s are correctly mapped to user for username %s.' % (roles, target_username)
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keyring.py b/ansible_collections/community/general/plugins/modules/keyring.py
new file mode 100644
index 000000000..ada22ed58
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keyring.py
@@ -0,0 +1,279 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Alexander Hussey <ahussey@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""
+Ansible Module - community.general.keyring
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: keyring
+version_added: 5.2.0
+author:
+ - Alexander Hussey (@ahussey-redhat)
+short_description: Set or delete a passphrase using the Operating System's native keyring
+description: >-
+ This module uses the L(keyring Python library, https://pypi.org/project/keyring/)
+ to set or delete passphrases for a given service and username from the OS' native keyring.
+requirements:
+ - keyring (Python library)
+ - gnome-keyring (application - required for headless Gnome keyring access)
+ - dbus-run-session (application - required for headless Gnome keyring access)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ service:
+ description: The name of the service.
+ required: true
+ type: str
+ username:
+ description: The user belonging to the service.
+ required: true
+ type: str
+ user_password:
+ description: The password to set.
+ required: false
+ type: str
+ aliases:
+ - password
+ keyring_password:
+ description: Password to unlock keyring.
+ required: true
+ type: str
+ state:
+ description: Whether the password should exist.
+ required: false
+ default: present
+ type: str
+ choices:
+ - present
+ - absent
+"""
+
+EXAMPLES = r"""
+- name: Set a password for test/test1
+ community.general.keyring:
+ service: test
+ username: test1
+ user_password: "{{ user_password }}"
+ keyring_password: "{{ keyring_password }}"
+
+- name: Delete the password for test/test1
+ community.general.keyring:
+ service: test
+ username: test1
+ user_password: "{{ user_password }}"
+ keyring_password: "{{ keyring_password }}"
+ state: absent
+"""
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+try:
+ import keyring
+
+ HAS_KEYRING = True
+ KEYRING_IMP_ERR = None
+except ImportError:
+ HAS_KEYRING = False
+ KEYRING_IMP_ERR = traceback.format_exc()
+
+
+def del_passphrase(module):
+ """
+ Attempt to delete a passphrase in the keyring using the Python API and fallback to using a shell.
+ """
+ if module.check_mode:
+ return None
+ try:
+ keyring.delete_password(module.params["service"], module.params["username"])
+ return None
+ except keyring.errors.KeyringLocked as keyring_locked_err: # pylint: disable=unused-variable
+ delete_argument = (
+ 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring del %s %s\n'
+ % (
+ quote(module.params["keyring_password"]),
+ quote(module.params["service"]),
+ quote(module.params["username"]),
+ )
+ )
+ dummy, dummy, stderr = module.run_command(
+ "dbus-run-session -- /bin/bash",
+ use_unsafe_shell=True,
+ data=delete_argument,
+ encoding=None,
+ )
+
+ if not stderr.decode("UTF-8"):
+ return None
+ return stderr.decode("UTF-8")
+
+
+def set_passphrase(module):
+ """
+ Attempt to set passphrase in the keyring using the Python API and fallback to using a shell.
+ """
+ if module.check_mode:
+ return None
+ try:
+ keyring.set_password(
+ module.params["service"],
+ module.params["username"],
+ module.params["user_password"],
+ )
+ return None
+ except keyring.errors.KeyringLocked as keyring_locked_err: # pylint: disable=unused-variable
+ set_argument = (
+ 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring set %s %s\n%s\n'
+ % (
+ quote(module.params["keyring_password"]),
+ quote(module.params["service"]),
+ quote(module.params["username"]),
+ quote(module.params["user_password"]),
+ )
+ )
+ dummy, dummy, stderr = module.run_command(
+ "dbus-run-session -- /bin/bash",
+ use_unsafe_shell=True,
+ data=set_argument,
+ encoding=None,
+ )
+ if not stderr.decode("UTF-8"):
+ return None
+ return stderr.decode("UTF-8")
+
+
+def get_passphrase(module):
+ """
+ Attempt to retrieve passphrase from keyring using the Python API and fallback to using a shell.
+ """
+ try:
+ passphrase = keyring.get_password(
+ module.params["service"], module.params["username"]
+ )
+ return passphrase
+ except keyring.errors.KeyringLocked:
+ pass
+ except keyring.errors.InitError:
+ pass
+ except AttributeError:
+ pass
+ get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % (
+ quote(module.params["keyring_password"]),
+ quote(module.params["service"]),
+ quote(module.params["username"]),
+ )
+ dummy, stdout, dummy = module.run_command(
+ "dbus-run-session -- /bin/bash",
+ use_unsafe_shell=True,
+ data=get_argument,
+ encoding=None,
+ )
+ try:
+ return stdout.decode("UTF-8").splitlines()[1] # Only return the line containing the password
+ except IndexError:
+ return None
+
+
+def run_module():
+ """
+ Attempts to retrieve a passphrase from a keyring.
+ """
+ result = dict(
+ changed=False,
+ msg="",
+ )
+
+ module_args = dict(
+ service=dict(type="str", required=True),
+ username=dict(type="str", required=True),
+ keyring_password=dict(type="str", required=True, no_log=True),
+ user_password=dict(
+ type="str", required=False, no_log=True, aliases=["password"]
+ ),
+ state=dict(
+ type="str", required=False, default="present", choices=["absent", "present"]
+ ),
+ )
+
+ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+
+ if not HAS_KEYRING:
+ module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR)
+
+ passphrase = get_passphrase(module)
+ if module.params["state"] == "present":
+ if passphrase is not None:
+ if passphrase == module.params["user_password"]:
+ result["msg"] = "Passphrase already set for %s@%s" % (
+ module.params["service"],
+ module.params["username"],
+ )
+ if passphrase != module.params["user_password"]:
+ set_result = set_passphrase(module)
+ if set_result is None:
+ result["changed"] = True
+ result["msg"] = "Passphrase has been updated for %s@%s" % (
+ module.params["service"],
+ module.params["username"],
+ )
+ if set_result is not None:
+ module.fail_json(msg=set_result)
+ if passphrase is None:
+ set_result = set_passphrase(module)
+ if set_result is None:
+ result["changed"] = True
+ result["msg"] = "Passphrase has been updated for %s@%s" % (
+ module.params["service"],
+ module.params["username"],
+ )
+ if set_result is not None:
+ module.fail_json(msg=set_result)
+
+ if module.params["state"] == "absent":
+ if not passphrase:
+ result["result"] = "Passphrase already absent for %s@%s" % (
+ module.params["service"],
+ module.params["username"],
+ )
+ if passphrase:
+ del_result = del_passphrase(module)
+ if del_result is None:
+ result["changed"] = True
+ result["msg"] = "Passphrase has been removed for %s@%s" % (
+ module.params["service"],
+ module.params["username"],
+ )
+ if del_result is not None:
+ module.fail_json(msg=del_result)
+
+ module.exit_json(**result)
+
+
+def main():
+ """
+ main module loop
+ """
+ run_module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/keyring_info.py b/ansible_collections/community/general/plugins/modules/keyring_info.py
new file mode 100644
index 000000000..5c41ecc4d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/keyring_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022, Alexander Hussey <ahussey@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""
+Ansible Module - community.general.keyring_info
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: keyring_info
+version_added: 5.2.0
+author:
+ - Alexander Hussey (@ahussey-redhat)
+short_description: Get a passphrase using the Operating System's native keyring
+description: >-
+ This module uses the L(keyring Python library, https://pypi.org/project/keyring/)
+ to retrieve passphrases for a given service and username from the OS' native keyring.
+requirements:
+ - keyring (Python library)
+ - gnome-keyring (application - required for headless Linux keyring access)
+ - dbus-run-session (application - required for headless Linux keyring access)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ service:
+ description: The name of the service.
+ required: true
+ type: str
+ username:
+ description: The user belonging to the service.
+ required: true
+ type: str
+ keyring_password:
+ description: Password to unlock keyring.
+ required: true
+ type: str
+"""
+
+EXAMPLES = r"""
+ - name: Retrieve password for service_name/user_name
+ community.general.keyring_info:
+ service: test
+ username: test1
+ keyring_password: "{{ keyring_password }}"
+ register: test_password
+
+ - name: Display password
+ ansible.builtin.debug:
+ msg: "{{ test_password.passphrase }}"
+"""
+
+RETURN = r"""
+ passphrase:
+ description: A string containing the password.
+ returned: success and the password exists
+ type: str
+ sample: Password123
+"""
+
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+try:
+ import keyring
+
+ HAS_KEYRING = True
+ KEYRING_IMP_ERR = None
+except ImportError:
+ HAS_KEYRING = False
+ KEYRING_IMP_ERR = traceback.format_exc()
+
+
+def _alternate_retrieval_method(module):
+ get_argument = 'echo "%s" | gnome-keyring-daemon --unlock\nkeyring get %s %s\n' % (
+ quote(module.params["keyring_password"]),
+ quote(module.params["service"]),
+ quote(module.params["username"]),
+ )
+ dummy, stdout, dummy = module.run_command(
+ "dbus-run-session -- /bin/bash",
+ use_unsafe_shell=True,
+ data=get_argument,
+ encoding=None,
+ )
+ try:
+ return stdout.decode("UTF-8").splitlines()[1]
+ except IndexError:
+ return None
+
+
+def run_module():
+ """
+ Attempts to retrieve a passphrase from a keyring.
+ """
+ result = dict(changed=False, msg="")
+
+ module_args = dict(
+ service=dict(type="str", required=True),
+ username=dict(type="str", required=True),
+ keyring_password=dict(type="str", required=True, no_log=True),
+ )
+
+ module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
+
+ if not HAS_KEYRING:
+ module.fail_json(msg=missing_required_lib("keyring"), exception=KEYRING_IMP_ERR)
+ try:
+ passphrase = keyring.get_password(
+ module.params["service"], module.params["username"]
+ )
+ except keyring.errors.KeyringLocked:
+ pass
+ except keyring.errors.InitError:
+ pass
+ except AttributeError:
+ pass
+
+ if passphrase is None:
+ passphrase = _alternate_retrieval_method(module)
+
+ if passphrase is not None:
+ result["msg"] = "Successfully retrieved password for %s@%s" % (
+ module.params["service"],
+ module.params["username"],
+ )
+ result["passphrase"] = passphrase
+ if passphrase is None:
+ result["msg"] = "Password for %s@%s does not exist." % (
+ module.params["service"],
+ module.params["username"],
+ )
+ module.exit_json(**result)
+
+
+def main():
+ """
+ main module loop
+ """
+ run_module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/kibana_plugin.py b/ansible_collections/community/general/plugins/modules/kibana_plugin.py
new file mode 100644
index 000000000..a52eda2fd
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/kibana_plugin.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Thierno IB. BARRY @barryib
+# Sponsored by Polyconseil http://polyconseil.fr.
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: kibana_plugin
+short_description: Manage Kibana plugins
+description:
+ - This module can be used to manage Kibana plugins.
+author: Thierno IB. BARRY (@barryib)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the plugin to install.
+ required: true
+ type: str
+ state:
+ description:
+ - Desired state of a plugin.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ url:
+ description:
+ - Set exact URL to download the plugin from.
+ - For local file, prefix its absolute path with file://
+ type: str
+ timeout:
+ description:
+ - "Timeout setting: 30s, 1m, 1h etc."
+ default: 1m
+ type: str
+ plugin_bin:
+ description:
+ - Location of the Kibana binary.
+ default: /opt/kibana/bin/kibana
+ type: path
+ plugin_dir:
+ description:
+ - Your configured plugin directory specified in Kibana.
+ default: /opt/kibana/installedPlugins/
+ type: path
+ version:
+ description:
+ - Version of the plugin to be installed.
+ - If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
+ type: str
+ force:
+ description:
+ - Delete and re-install the plugin. Can be useful for plugins update.
+ type: bool
+ default: false
+ allow_root:
+ description:
+ - Whether to allow C(kibana) and C(kibana-plugin) to be run as root. Passes the C(--allow-root) flag to these commands.
+ type: bool
+ default: false
+ version_added: 2.3.0
+'''
+
+EXAMPLES = '''
+- name: Install Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+
+- name: Install specific version of a plugin
+ community.general.kibana_plugin:
+ state: present
+ name: elasticsearch/marvel
+ version: '2.3.3'
+
+- name: Uninstall Elasticsearch head plugin
+ community.general.kibana_plugin:
+ state: absent
+ name: elasticsearch/marvel
+'''
+
+RETURN = '''
+cmd:
+ description: the launched command during plugin management (install / remove)
+ returned: success
+ type: str
+name:
+ description: the plugin name to install or remove
+ returned: success
+ type: str
+url:
+ description: the url from where the plugin is installed from
+ returned: success
+ type: str
+timeout:
+ description: the timeout for plugin download
+ returned: success
+ type: str
+stdout:
+ description: the command stdout
+ returned: success
+ type: str
+stderr:
+ description: the command stderr
+ returned: success
+ type: str
+state:
+ description: the state for the managed plugin
+ returned: success
+ type: str
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+PACKAGE_STATE_MAP = dict(
+ present="--install",
+ absent="--remove"
+)
+
+
+def parse_plugin_repo(string):
+ elements = string.split("/")
+
+ # We first consider the simplest form: pluginname
+ repo = elements[0]
+
+ # We consider the form: username/pluginname
+ if len(elements) > 1:
+ repo = elements[1]
+
+ # remove elasticsearch- prefix
+ # remove es- prefix
+ for string in ("elasticsearch-", "es-"):
+ if repo.startswith(string):
+ return repo[len(string):]
+
+ return repo
+
+
+def is_plugin_present(plugin_dir, working_dir):
+ return os.path.isdir(os.path.join(working_dir, plugin_dir))
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, url, timeout, allow_root, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "install"]
+ if url:
+ cmd_args.append(url)
+ else:
+ cmd_args.append(plugin_name)
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if url:
+ cmd_args.extend(["--url", url])
+
+ if timeout:
+ cmd_args.extend(["--timeout", timeout])
+
+ if allow_root:
+ cmd_args.append('--allow-root')
+
+ if module.check_mode:
+ return True, " ".join(cmd_args), "check mode", ""
+
+ rc, out, err = module.run_command(cmd_args)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, " ".join(cmd_args), out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name, allow_root, kibana_version='4.6'):
+ if LooseVersion(kibana_version) > LooseVersion('4.6'):
+ kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
+ cmd_args = [kibana_plugin_bin, "remove", plugin_name]
+ else:
+ cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ if allow_root:
+ cmd_args.append('--allow-root')
+
+ if module.check_mode:
+ return True, " ".join(cmd_args), "check mode", ""
+
+ rc, out, err = module.run_command(cmd_args)
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, " ".join(cmd_args), out, err
+
+
+def get_kibana_version(module, plugin_bin, allow_root):
+ cmd_args = [plugin_bin, '--version']
+
+ if allow_root:
+ cmd_args.append('--allow-root')
+
+ rc, out, err = module.run_command(cmd_args)
+ if rc != 0:
+ module.fail_json(msg="Failed to get Kibana version : %s" % err)
+
+ return out.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
+ url=dict(default=None),
+ timeout=dict(default="1m"),
+ plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
+ plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
+ version=dict(default=None),
+ force=dict(default=False, type="bool"),
+ allow_root=dict(default=False, type="bool"),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ url = module.params["url"]
+ timeout = module.params["timeout"]
+ plugin_bin = module.params["plugin_bin"]
+ plugin_dir = module.params["plugin_dir"]
+ version = module.params["version"]
+ force = module.params["force"]
+ allow_root = module.params["allow_root"]
+
+ changed, cmd, out, err = False, '', '', ''
+
+ kibana_version = get_kibana_version(module, plugin_bin, allow_root)
+
+ present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
+
+ # skip if the state is correct
+ if (present and state == "present" and not force) or (state == "absent" and not present and not force):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if version:
+ name = name + '/' + version
+
+ if state == "present":
+ if force:
+ remove_plugin(module, plugin_bin, name, allow_root, kibana_version)
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, allow_root, kibana_version)
+
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name, allow_root, kibana_version)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/launchd.py b/ansible_collections/community/general/plugins/modules/launchd.py
new file mode 100644
index 000000000..13a8ce086
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/launchd.py
@@ -0,0 +1,522 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Martin Migasiewicz <migasiew.nk@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: launchd
+author:
+ - Martin Migasiewicz (@martinm82)
+short_description: Manage macOS services
+version_added: 1.0.0
+description:
+ - Manage launchd services on target macOS hosts.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the service.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ - Launchd does not support C(restarted) nor C(reloaded) natively.
+ These will trigger a stop/start (restarted) or an unload/load
+ (reloaded).
+ - C(restarted) unloads and loads the service before start to ensure
+ that the latest job definition (plist) is used.
+ - C(reloaded) unloads and loads the service to ensure that the latest
+ job definition (plist) is used. Whether a service is started or
+ stopped depends on the content of the definition file.
+ type: str
+ choices: [ reloaded, restarted, started, stopped, unloaded ]
+ enabled:
+ description:
+ - Whether the service should start on boot.
+ - B(At least one of state and enabled are required.)
+ type: bool
+ force_stop:
+ description:
+ - Whether the service should not be restarted automatically by launchd.
+ - Services might have the 'KeepAlive' attribute set to true in a launchd configuration.
+ In case this is set to true, stopping a service will cause that launchd starts the service again.
+ - Set this option to C(true) to let this module change the 'KeepAlive' attribute to false.
+ type: bool
+ default: false
+notes:
+- A user must privileged to manage services using this module.
+requirements:
+- A system managed by launchd
+- The plistlib python library
+'''
+
+EXAMPLES = r'''
+- name: Make sure spotify webhelper is started
+ community.general.launchd:
+ name: com.spotify.webhelper
+ state: started
+
+- name: Deploy custom memcached job definition
+ template:
+ src: org.memcached.plist.j2
+ dest: /Library/LaunchDaemons/org.memcached.plist
+
+- name: Run memcached
+ community.general.launchd:
+ name: org.memcached
+ state: started
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+
+- name: Stop memcached
+ community.general.launchd:
+ name: org.memcached
+ state: stopped
+ force_stop: true
+
+- name: Restart memcached
+ community.general.launchd:
+ name: org.memcached
+ state: restarted
+
+- name: Unload memcached
+ community.general.launchd:
+ name: org.memcached
+ state: unloaded
+'''
+
+RETURN = r'''
+status:
+ description: Metadata about service status
+ returned: always
+ type: dict
+ sample:
+ {
+ "current_pid": "-",
+ "current_state": "stopped",
+ "previous_pid": "82636",
+ "previous_state": "running"
+ }
+'''
+
+import os
+import plistlib
+from abc import ABCMeta, abstractmethod
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class ServiceState:
+ UNKNOWN = 0
+ LOADED = 1
+ STOPPED = 2
+ STARTED = 3
+ UNLOADED = 4
+
+ @staticmethod
+ def to_string(state):
+ strings = {
+ ServiceState.UNKNOWN: 'unknown',
+ ServiceState.LOADED: 'loaded',
+ ServiceState.STOPPED: 'stopped',
+ ServiceState.STARTED: 'started',
+ ServiceState.UNLOADED: 'unloaded'
+ }
+ return strings[state]
+
+
+class Plist:
+ def __init__(self, module, service):
+ self.__changed = False
+ self.__service = service
+
+ state, pid, dummy, dummy = LaunchCtlList(module, self.__service).run()
+
+ # Check if readPlist is available or not
+ self.old_plistlib = hasattr(plistlib, 'readPlist')
+
+ self.__file = self.__find_service_plist(self.__service)
+ if self.__file is None:
+ msg = 'Unable to infer the path of %s service plist file' % self.__service
+ if pid is None and state == ServiceState.UNLOADED:
+ msg += ' and it was not found among active services'
+ module.fail_json(msg=msg)
+ self.__update(module)
+
+ @staticmethod
+ def __find_service_plist(service_name):
+ """Finds the plist file associated with a service"""
+
+ launchd_paths = [
+ os.path.join(os.getenv('HOME'), 'Library/LaunchAgents'),
+ '/Library/LaunchAgents',
+ '/Library/LaunchDaemons',
+ '/System/Library/LaunchAgents',
+ '/System/Library/LaunchDaemons'
+ ]
+
+ for path in launchd_paths:
+ try:
+ files = os.listdir(path)
+ except OSError:
+ continue
+
+ filename = '%s.plist' % service_name
+ if filename in files:
+ return os.path.join(path, filename)
+ return None
+
+ def __update(self, module):
+ self.__handle_param_enabled(module)
+ self.__handle_param_force_stop(module)
+
+ def __read_plist_file(self, module):
+ service_plist = {}
+ if self.old_plistlib:
+ return plistlib.readPlist(self.__file)
+
+ # readPlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'rb') as plist_fp:
+ service_plist = plistlib.load(plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to read plist file "
+ "%s due to %s" % (self.__file, to_native(e)))
+ return service_plist
+
+ def __write_plist_file(self, module, service_plist=None):
+ if not service_plist:
+ service_plist = {}
+
+ if self.old_plistlib:
+ plistlib.writePlist(service_plist, self.__file)
+ return
+ # writePlist is deprecated in Python 3 and onwards
+ try:
+ with open(self.__file, 'wb') as plist_fp:
+ plistlib.dump(service_plist, plist_fp)
+ except Exception as e:
+ module.fail_json(msg="Failed to write to plist file "
+ " %s due to %s" % (self.__file, to_native(e)))
+
+ def __handle_param_enabled(self, module):
+ if module.params['enabled'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Enable/disable service startup at boot if requested
+ # Launchctl does not expose functionality to set the RunAtLoad
+ # attribute of a job definition. So we parse and modify the job
+ # definition plist file directly for this purpose.
+ if module.params['enabled'] is not None:
+ enabled = service_plist.get('RunAtLoad', False)
+ if module.params['enabled'] != enabled:
+ service_plist['RunAtLoad'] = module.params['enabled']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def __handle_param_force_stop(self, module):
+ if module.params['force_stop'] is not None:
+ service_plist = self.__read_plist_file(module)
+
+ # Set KeepAlive to false in case force_stop is defined to avoid
+ # that the service gets restarted when stopping was requested.
+ if module.params['force_stop'] is not None:
+ keep_alive = service_plist.get('KeepAlive', False)
+ if module.params['force_stop'] and keep_alive:
+ service_plist['KeepAlive'] = not module.params['force_stop']
+
+ # Update the plist with one of the changes done.
+ if not module.check_mode:
+ self.__write_plist_file(module, service_plist)
+ self.__changed = True
+
+ def is_changed(self):
+ return self.__changed
+
+ def get_file(self):
+ return self.__file
+
+
+class LaunchCtlTask(object):
+ __metaclass__ = ABCMeta
+ WAITING_TIME = 5 # seconds
+
+ def __init__(self, module, service, plist):
+ self._module = module
+ self._service = service
+ self._plist = plist
+ self._launch = self._module.get_bin_path('launchctl', True)
+
+ def run(self):
+ """Runs a launchd command like 'load', 'unload', 'start', 'stop', etc.
+ and returns the new state and pid.
+ """
+ self.runCommand()
+ return self.get_state()
+
+ @abstractmethod
+ def runCommand(self):
+ pass
+
+ def get_state(self):
+ rc, out, err = self._launchctl("list")
+ if rc != 0:
+ self._module.fail_json(
+ msg='Failed to get status of %s' % (self._launch))
+
+ state = ServiceState.UNLOADED
+ service_pid = "-"
+ status_code = None
+ for line in out.splitlines():
+ if line.strip():
+ pid, last_exit_code, label = line.split('\t')
+ if label.strip() == self._service:
+ service_pid = pid
+ status_code = last_exit_code
+
+ # From launchctl man page:
+ # If the number [...] is negative, it represents the
+ # negative of the signal which killed the job. Thus,
+ # "-15" would indicate that the job was terminated with
+ # SIGTERM.
+ if last_exit_code not in ['0', '-2', '-3', '-9', '-15']:
+ # Something strange happened and we have no clue in
+ # which state the service is now. Therefore we mark
+ # the service state as UNKNOWN.
+ state = ServiceState.UNKNOWN
+ elif pid != '-':
+ # PID seems to be an integer so we assume the service
+ # is started.
+ state = ServiceState.STARTED
+ else:
+ # Exit code is 0 and PID is not available so we assume
+ # the service is stopped.
+ state = ServiceState.STOPPED
+ break
+ return (state, service_pid, status_code, err)
+
+ def start(self):
+ rc, out, err = self._launchctl("start")
+ # Unfortunately launchd does not wait until the process really started.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def stop(self):
+ rc, out, err = self._launchctl("stop")
+ # Unfortunately launchd does not wait until the process really stopped.
+ sleep(self.WAITING_TIME)
+ return (rc, out, err)
+
+ def restart(self):
+ # TODO: check for rc, out, err
+ self.stop()
+ return self.start()
+
+ def reload(self):
+ # TODO: check for rc, out, err
+ self.unload()
+ return self.load()
+
+ def load(self):
+ return self._launchctl("load")
+
+ def unload(self):
+ return self._launchctl("unload")
+
+ def _launchctl(self, command):
+ service_or_plist = self._plist.get_file() if command in [
+ 'load', 'unload'] else self._service if command in ['start', 'stop'] else ""
+
+ rc, out, err = self._module.run_command(
+ '%s %s %s' % (self._launch, command, service_or_plist))
+
+ if rc != 0:
+ msg = "Unable to %s '%s' (%s): '%s'" % (
+ command, self._service, self._plist.get_file(), err)
+ self._module.fail_json(msg=msg)
+
+ return (rc, out, err)
+
+
+class LaunchCtlStart(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state in (ServiceState.STOPPED, ServiceState.LOADED):
+ self.reload()
+ self.start()
+ elif state == ServiceState.STARTED:
+ # In case the service is already in started state but the
+ # job definition was changed we need to unload/load the
+ # service and start the service again.
+ if self._plist.is_changed():
+ self.reload()
+ self.start()
+ elif state == ServiceState.UNLOADED:
+ self.load()
+ self.start()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and start the service again.
+ self.reload()
+ self.start()
+
+
+class LaunchCtlStop(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlStop, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.STOPPED:
+ # In case the service is stopped and we might later decide
+ # to start it, we need to reload the job definition by
+ # forcing an unload and load first.
+ # Afterwards we need to stop it as it might have been
+ # started again (KeepAlive or RunAtLoad).
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state in (ServiceState.STARTED, ServiceState.LOADED):
+ if self._plist.is_changed():
+ self.reload()
+ self.stop()
+ elif state == ServiceState.UNKNOWN:
+ # We are in an unknown state, let's try to reload the config
+ # and stop the service gracefully.
+ self.reload()
+ self.stop()
+
+
+class LaunchCtlReload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlReload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+
+ if state == ServiceState.UNLOADED:
+ # launchd throws an error if we do an unload on an already
+ # unloaded service.
+ self.load()
+ else:
+ self.reload()
+
+
+class LaunchCtlUnload(LaunchCtlTask):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlUnload, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ state, dummy, dummy, dummy = self.get_state()
+ self.unload()
+
+
+class LaunchCtlRestart(LaunchCtlReload):
+ def __init__(self, module, service, plist):
+ super(LaunchCtlRestart, self).__init__(module, service, plist)
+
+ def runCommand(self):
+ super(LaunchCtlRestart, self).runCommand()
+ self.start()
+
+
+class LaunchCtlList(LaunchCtlTask):
+ def __init__(self, module, service):
+ super(LaunchCtlList, self).__init__(module, service, None)
+
+ def runCommand(self):
+ # Do nothing, the list functionality is done by the
+ # base class run method.
+ pass
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped', 'unloaded']),
+ enabled=dict(type='bool'),
+ force_stop=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_one_of=[
+ ['state', 'enabled'],
+ ],
+ )
+
+ service = module.params['name']
+ action = module.params['state']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': {},
+ }
+
+ # We will tailor the plist file in case one of the options
+ # (enabled, force_stop) was specified.
+ plist = Plist(module, service)
+ result['changed'] = plist.is_changed()
+
+ # Gather information about the service to be controlled.
+ state, pid, dummy, dummy = LaunchCtlList(module, service).run()
+ result['status']['previous_state'] = ServiceState.to_string(state)
+ result['status']['previous_pid'] = pid
+
+ # Map the actions to specific tasks
+ tasks = {
+ 'started': LaunchCtlStart(module, service, plist),
+ 'stopped': LaunchCtlStop(module, service, plist),
+ 'restarted': LaunchCtlRestart(module, service, plist),
+ 'reloaded': LaunchCtlReload(module, service, plist),
+ 'unloaded': LaunchCtlUnload(module, service, plist)
+ }
+
+ status_code = '0'
+ # Run the requested task
+ if not module.check_mode:
+ state, pid, status_code, err = tasks[action].run()
+
+ result['status']['current_state'] = ServiceState.to_string(state)
+ result['status']['current_pid'] = pid
+ result['status']['status_code'] = status_code
+ result['status']['error'] = err
+
+ if (result['status']['current_state'] != result['status']['previous_state'] or
+ result['status']['current_pid'] != result['status']['previous_pid']):
+ result['changed'] = True
+ if module.check_mode:
+ result['changed'] = True
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/layman.py b/ansible_collections/community/general/plugins/modules/layman.py
new file mode 100644
index 000000000..940ac30d1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/layman.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Jakub Jirutka <jakub@jirutka.cz>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: layman
+author: "Jakub Jirutka (@jirutka)"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+requirements:
+ - "python >= 2.6"
+ - layman python module
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when I(state=updated)).
+ required: true
+ type: str
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ aliases: [url]
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ default: present
+ choices: [present, absent, updated]
+ type: str
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be
+ set to C(false) when no other option exists. Prior to 1.9.3 the code
+ defaulted to C(false).
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+- name: Install the overlay mozilla which is on the central overlays list
+ community.general.layman:
+ name: mozilla
+
+- name: Install the overlay cvut from the specified alternative list
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+
+- name: Update (sync) the overlay cvut or install if not installed yet
+ community.general.layman:
+ name: cvut
+ list_url: 'http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml'
+ state: updated
+
+- name: Update (sync) all of the installed overlays
+ community.general.layman:
+ name: ALL
+ state: updated
+
+- name: Uninstall the overlay cvut
+ community.general.layman:
+ name: cvut
+ state: absent
+'''
+
+import shutil
+import traceback
+
+from os import path
+
+LAYMAN_IMP_ERR = None
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ LAYMAN_IMP_ERR = traceback.format_exc()
+ HAS_LAYMAN_API = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.urls import fetch_url
+
+
+USERAGENT = 'ansible-httpget'
+
+
+class ModuleError(Exception):
+ pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None:
+ config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(module, url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+
+ # Hack to add params in the form that fetch_url expects
+ module.params['http_agent'] = USERAGENT
+ response, info = fetch_url(module, url)
+ if info['status'] != 200:
+ raise ModuleError("Failed to get %s: %s" % (url, info['msg']))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError as e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(module, name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would add layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ if not layman.is_repo(name):
+ if not list_url:
+ raise ModuleError("Overlay '%s' is not on the list of known "
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(module, list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name):
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(module, name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ if module.check_mode:
+ mymsg = 'Would remove layman repo \'' + name + '\''
+ module.exit_json(changed=True, msg=mymsg)
+
+ layman.delete_repos(name)
+ if layman.get_errors():
+ raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [str(item[1]) for item in layman.sync_results[2]]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ list_url=dict(aliases=['url']),
+ state=dict(default="present", choices=['present', 'absent', 'updated']),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg=missing_required_lib('Layman'), exception=LAYMAN_IMP_ERR)
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(module, name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(module, name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(module, name)
+
+ except ModuleError as e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lbu.py b/ansible_collections/community/general/plugins/modules/lbu.py
new file mode 100644
index 000000000..c961b6060
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lbu.py
@@ -0,0 +1,138 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Kaarle Ritvanen <kaarle.ritvanen@datakunkku.fi>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lbu
+
+short_description: Local Backup Utility for Alpine Linux
+
+version_added: '0.2.0'
+
+description:
+ - Manage Local Backup Utility of Alpine Linux in run-from-RAM mode
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ commit:
+ description:
+ - Control whether to commit changed files.
+ type: bool
+ exclude:
+ description:
+ - List of paths to exclude.
+ type: list
+ elements: str
+ include:
+ description:
+ - List of paths to include.
+ type: list
+ elements: str
+
+author:
+ - Kaarle Ritvanen (@kunkku)
+'''
+
+EXAMPLES = '''
+# Commit changed files (if any)
+- name: Commit
+ community.general.lbu:
+ commit: true
+
+# Exclude path and commit
+- name: Exclude directory
+ community.general.lbu:
+ commit: true
+ exclude:
+ - /etc/opt
+
+# Include paths without committing
+- name: Include file and directory
+ community.general.lbu:
+ include:
+ - /root/.ssh/authorized_keys
+ - /var/lib/misc
+'''
+
+RETURN = '''
+msg:
+ description: Error message
+ type: str
+ returned: on failure
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import os.path
+
+
+def run_module():
+ module = AnsibleModule(
+ argument_spec={
+ 'commit': {'type': 'bool'},
+ 'exclude': {'type': 'list', 'elements': 'str'},
+ 'include': {'type': 'list', 'elements': 'str'}
+ },
+ supports_check_mode=True
+ )
+
+ changed = False
+
+ def run_lbu(*args):
+ code, stdout, stderr = module.run_command(
+ [module.get_bin_path('lbu', required=True)] + list(args)
+ )
+ if code:
+ module.fail_json(changed=changed, msg=stderr)
+ return stdout
+
+ update = False
+ commit = False
+
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ paths = run_lbu(param, '-l').split('\n')
+ for path in module.params[param]:
+ if os.path.normpath('/' + path)[1:] not in paths:
+ update = True
+
+ if module.params['commit']:
+ commit = update or run_lbu('status') > ''
+
+ if module.check_mode:
+ module.exit_json(changed=update or commit)
+
+ if update:
+ for param in ('include', 'exclude'):
+ if module.params[param]:
+ run_lbu(param, *module.params[param])
+ changed = True
+
+ if commit:
+ run_lbu('commit')
+ changed = True
+
+ module.exit_json(changed=changed)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ldap_attrs.py b/ansible_collections/community/general/plugins/modules/ldap_attrs.py
new file mode 100644
index 000000000..c2cac8644
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ldap_attrs.py
@@ -0,0 +1,337 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Maciej Delmanowski <drybjed@gmail.com>
+# Copyright (c) 2017, Alexander Korinek <noles@a3k.net>
+# Copyright (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: ldap_attrs
+short_description: Add or remove multiple LDAP attribute values
+description:
+ - Add or remove multiple LDAP attribute values.
+notes:
+ - This only deals with attributes on existing entries. To add or remove
+ whole entries, see M(community.general.ldap_entry).
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+ - For I(state=present) and I(state=absent), all value comparisons are
+ performed on the server for maximum accuracy. For I(state=exact), values
+ have to be compared in Python, which obviously ignores LDAP matching
+ rules. This should work out in most cases, but it is theoretically
+ possible to see spurious changes when target and actual values are
+ semantically identical but lexically distinct.
+version_added: '0.2.0'
+author:
+ - Jiri Tyr (@jtyr)
+ - Alexander Korinek (@noles)
+ - Maciej Delmanowski (@drybjed)
+requirements:
+ - python-ldap
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ required: false
+ type: str
+ choices: [present, absent, exact]
+ default: present
+ description:
+ - The state of the attribute values. If C(present), all given attribute
+ values will be added if they're missing. If C(absent), all given
+ attribute values will be removed if present. If C(exact), the set of
+ attribute values will be forced to exactly those provided and no others.
+ If I(state=exact) and the attribute I(value) is empty, all values for
+ this attribute will be removed.
+ attributes:
+ required: true
+ type: dict
+ description:
+ - The attribute(s) and value(s) to add or remove.
+ - Each attribute value can be a string for single-valued attributes or
+ a list of strings for multi-valued attributes.
+ - If you specify values for this option in YAML, please note that you can improve
+ readability for long string values by using YAML block modifiers as seen in the
+ examples for this module.
+ - Note that when using values that YAML/ansible-core interprets as other types,
+ like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if
+ these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
+ ordered:
+ required: false
+ type: bool
+ default: false
+ description:
+ - If C(true), prepend list values with X-ORDERED index numbers in all
+ attributes specified in the current task. This is useful mostly with
+ I(olcAccess) attribute to easily manage LDAP Access Control Lists.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+ - community.general.attributes
+
+'''
+
+
+EXAMPLES = r'''
+- name: Configure directory number 1 for example.com
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcSuffix: dc=example,dc=com
+ state: exact
+
+# The complex argument format is required here to pass a list of ACL strings.
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ state: exact
+
+# An alternative approach with automatic X-ORDERED numbering
+- name: Set up the ACL
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcAccess:
+ - >-
+ to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+ ordered: true
+ state: exact
+
+- name: Declare some indexes
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcDbIndex:
+ - objectClass eq
+ - uid eq
+
+- name: Set up a root user, which we can use later to bootstrap the directory
+ community.general.ldap_attrs:
+ dn: olcDatabase={1}hdb,cn=config
+ attributes:
+ olcRootDN: cn=root,dc=example,dc=com
+ olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+ state: exact
+
+- name: Remove an attribute with a specific value
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: "An example user account"
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+- name: Remove specified attribute(s) from an entry
+ community.general.ldap_attrs:
+ dn: uid=jdoe,ou=people,dc=example,dc=com
+ attributes:
+ description: []
+ state: exact
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+'''
+
+
+RETURN = r'''
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample:
+ - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]]
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native, to_bytes, to_text
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+import re
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+ import ldap.filter
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapAttrs(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.attrs = self.module.params['attributes']
+ self.state = self.module.params['state']
+ self.ordered = self.module.params['ordered']
+
+ def _order_values(self, values):
+ """ Preprend X-ORDERED index numbers to attribute's values. """
+ ordered_values = []
+
+ if isinstance(values, list):
+ for index, value in enumerate(values):
+ cleaned_value = re.sub(r'^\{\d+\}', '', value)
+ ordered_values.append('{' + str(index) + '}' + cleaned_value)
+
+ return ordered_values
+
+ def _normalize_values(self, values):
+ """ Normalize attribute's values. """
+ norm_values = []
+
+ if isinstance(values, list):
+ if self.ordered:
+ norm_values = list(map(to_bytes,
+ self._order_values(list(map(str,
+ values)))))
+ else:
+ norm_values = list(map(to_bytes, values))
+ else:
+ norm_values = [to_bytes(str(values))]
+
+ return norm_values
+
+ def add(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_absent(name, value):
+ modlist.append((ldap.MOD_ADD, name, value))
+
+ return modlist
+
+ def delete(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ for value in norm_values:
+ if self._is_value_present(name, value):
+ modlist.append((ldap.MOD_DELETE, name, value))
+
+ return modlist
+
+ def exact(self):
+ modlist = []
+ for name, values in self.module.params['attributes'].items():
+ norm_values = self._normalize_values(values)
+ try:
+ results = self.connection.search_s(
+ self.dn, ldap.SCOPE_BASE, attrlist=[name])
+ except ldap.LDAPError as e:
+ self.fail("Cannot search for attribute %s" % name, e)
+
+ current = results[0][1].get(name, [])
+
+ if frozenset(norm_values) != frozenset(current):
+ if len(current) == 0:
+ modlist.append((ldap.MOD_ADD, name, norm_values))
+ elif len(norm_values) == 0:
+ modlist.append((ldap.MOD_DELETE, name, None))
+ else:
+ modlist.append((ldap.MOD_REPLACE, name, norm_values))
+
+ return modlist
+
+ def _is_value_present(self, name, value):
+ """ True if the target attribute has the given value. """
+ try:
+ escaped_value = ldap.filter.escape_filter_chars(to_text(value))
+ filterstr = "(%s=%s)" % (name, escaped_value)
+ dns = self.connection.search_s(self.dn, ldap.SCOPE_BASE, filterstr)
+ is_present = len(dns) == 1
+ except ldap.NO_SUCH_OBJECT:
+ is_present = False
+
+ return is_present
+
+ def _is_value_absent(self, name, value):
+ """ True if the target attribute doesn't have the given value. """
+ return not self._is_value_present(name, value)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(type='dict', required=True),
+ ordered=dict(type='bool', default=False, required=False),
+ state=dict(type='str', default='present', choices=['absent', 'exact', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ # Instantiate the LdapAttr object
+ ldap = LdapAttrs(module)
+
+ state = module.params['state']
+
+ # Perform action
+ if state == 'present':
+ modlist = ldap.add()
+ elif state == 'absent':
+ modlist = ldap.delete()
+ elif state == 'exact':
+ modlist = ldap.exact()
+
+ changed = False
+
+ if len(modlist) > 0:
+ changed = True
+
+ if not module.check_mode:
+ try:
+ ldap.connection.modify_s(ldap.dn, modlist)
+ except Exception as e:
+ module.fail_json(msg="Attribute action failed.", details=to_native(e))
+
+ module.exit_json(changed=changed, modlist=modlist)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ldap_entry.py b/ansible_collections/community/general/plugins/modules/ldap_entry.py
new file mode 100644
index 000000000..619bbf927
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ldap_entry.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright (c) 2016, Jiri Tyr <jiri.tyr@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_entry
+short_description: Add or remove LDAP entries
+description:
+ - Add or remove LDAP entries. This module only asserts the existence or
+ non-existence of an LDAP entry, not its attributes. To assert the
+ attribute values of an entry, see M(community.general.ldap_attrs).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Jiri Tyr (@jtyr)
+requirements:
+ - python-ldap
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ attributes:
+ description:
+ - If I(state=present), attributes necessary to create an entry. Existing
+ entries are never modified. To assert specific attribute values on an
+ existing entry, use M(community.general.ldap_attrs) module instead.
+ - Each attribute value can be a string for single-valued attributes or
+ a list of strings for multi-valued attributes.
+ - If you specify values for this option in YAML, please note that you can improve
+ readability for long string values by using YAML block modifiers as seen in the
+ examples for this module.
+ - Note that when using values that YAML/ansible-core interprets as other types,
+ like C(yes), C(no) (booleans), or C(2.10) (float), make sure to quote them if
+ these are meant to be strings. Otherwise the wrong values may be sent to LDAP.
+ type: dict
+ default: {}
+ objectClass:
+ description:
+ - If I(state=present), value or list of values to use when creating
+ the entry. It can either be a string or an actual list of
+ strings.
+ type: list
+ elements: str
+ state:
+ description:
+ - The target state of the entry.
+ choices: [present, absent]
+ default: present
+ type: str
+ recursive:
+ description:
+ - If I(state=delete), a flag indicating whether a single entry or the
+ whole branch must be deleted.
+ type: bool
+ default: false
+ version_added: 4.6.0
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+ - community.general.attributes
+
+'''
+
+
+EXAMPLES = """
+- name: Make sure we have a parent entry for users
+ community.general.ldap_entry:
+ dn: ou=users,dc=example,dc=com
+ objectClass: organizationalUnit
+
+- name: Make sure we have an admin user
+ community.general.ldap_entry:
+ dn: cn=admin,dc=example,dc=com
+ objectClass:
+ - simpleSecurityObject
+ - organizationalRole
+ attributes:
+ description: An LDAP administrator
+ userPassword: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
+
+- name: Set possible values for attributes elements
+ community.general.ldap_entry:
+ dn: cn=admin,dc=example,dc=com
+ objectClass:
+ - simpleSecurityObject
+ - organizationalRole
+ attributes:
+ description: An LDAP Administrator
+ roleOccupant:
+ - cn=Chocs Puddington,ou=Information Technology,dc=example,dc=com
+ - cn=Alice Stronginthebrain,ou=Information Technology,dc=example,dc=com
+ olcAccess:
+ - >-
+ {0}to attrs=userPassword,shadowLastChange
+ by self write
+ by anonymous auth
+ by dn="cn=admin,dc=example,dc=com" write
+ by * none'
+ - >-
+ {1}to dn.base="dc=example,dc=com"
+ by dn="cn=admin,dc=example,dc=com" write
+ by * read
+
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ server_uri: ldap://localhost/
+ bind_dn: cn=admin,dc=example,dc=com
+ bind_pw: password
+
+#
+# The same as in the previous example but with the authentication details
+# stored in the ldap_auth variable:
+#
+# ldap_auth:
+# server_uri: ldap://localhost/
+# bind_dn: cn=admin,dc=example,dc=com
+# bind_pw: password
+#
+# In the example below, 'args' is a task keyword, passed at the same level as the module
+- name: Get rid of an old entry
+ community.general.ldap_entry:
+ dn: ou=stuff,dc=example,dc=com
+ state: absent
+ args: "{{ ldap_auth }}"
+"""
+
+
+RETURN = """
+# Default return values
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap.modlist
+ import ldap.controls
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapEntry(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.state = self.module.params['state']
+ self.recursive = self.module.params['recursive']
+
+ # Add the objectClass into the list of attributes
+ self.module.params['attributes']['objectClass'] = (
+ self.module.params['objectClass'])
+
+ # Load attributes
+ if self.state == 'present':
+ self.attrs = self._load_attrs()
+
+ def _load_attrs(self):
+ """ Turn attribute's value to array. """
+ attrs = {}
+
+ for name, value in self.module.params['attributes'].items():
+ if isinstance(value, list):
+ attrs[name] = list(map(to_bytes, value))
+ else:
+ attrs[name] = [to_bytes(value)]
+
+ return attrs
+
+ def add(self):
+ """ If self.dn does not exist, returns a callable that will add it. """
+ def _add():
+ self.connection.add_s(self.dn, modlist)
+
+ if not self._is_entry_present():
+ modlist = ldap.modlist.addModlist(self.attrs)
+ action = _add
+ else:
+ action = None
+
+ return action
+
+ def delete(self):
+ """ If self.dn exists, returns a callable that will delete either
+ the item itself if the recursive option is not set or the whole branch
+ if it is. """
+ def _delete():
+ self.connection.delete_s(self.dn)
+
+ def _delete_recursive():
+ """ Attempt recurive deletion using the subtree-delete control.
+ If that fails, do it manually. """
+ try:
+ subtree_delete = ldap.controls.ValueLessRequestControl('1.2.840.113556.1.4.805')
+ self.connection.delete_ext_s(self.dn, serverctrls=[subtree_delete])
+ except ldap.NOT_ALLOWED_ON_NONLEAF:
+ search = self.connection.search_s(self.dn, ldap.SCOPE_SUBTREE, attrlist=('dn',))
+ search.reverse()
+ for entry in search:
+ self.connection.delete_s(entry[0])
+
+ if self._is_entry_present():
+ if self.recursive:
+ action = _delete_recursive
+ else:
+ action = _delete
+ else:
+ action = None
+
+ return action
+
+ def _is_entry_present(self):
+ try:
+ self.connection.search_s(self.dn, ldap.SCOPE_BASE)
+ except ldap.NO_SUCH_OBJECT:
+ is_present = False
+ else:
+ is_present = True
+
+ return is_present
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ attributes=dict(default={}, type='dict'),
+ objectClass=dict(type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ recursive=dict(default=False, type='bool'),
+ ),
+ required_if=[('state', 'present', ['objectClass'])],
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ state = module.params['state']
+
+ # Instantiate the LdapEntry object
+ ldap = LdapEntry(module)
+
+ # Get the action function
+ if state == 'present':
+ action = ldap.add()
+ elif state == 'absent':
+ action = ldap.delete()
+
+ # Perform the action
+ if action is not None and not module.check_mode:
+ try:
+ action()
+ except Exception as e:
+ module.fail_json(msg="Entry action failed.", details=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=(action is not None))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ldap_passwd.py b/ansible_collections/community/general/plugins/modules/ldap_passwd.py
new file mode 100644
index 000000000..f47fa330e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ldap_passwd.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018, Keller Fuchs <kellerfuchs@hashbang.sh>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ldap_passwd
+short_description: Set passwords in LDAP
+description:
+ - Set a password for an LDAP entry. This module only asserts that
+ a given password is valid for a given entry. To assert the
+ existence of an entry, see M(community.general.ldap_entry).
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a cn=peercred,cn=external,cn=auth ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Keller Fuchs (@KellerFuchs)
+requirements:
+ - python-ldap
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ passwd:
+ description:
+ - The (plaintext) password to be set for I(dn).
+ type: str
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+ - community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Set a password for the admin user
+ community.general.ldap_passwd:
+ dn: cn=admin,dc=example,dc=com
+ passwd: "{{ vault_secret }}"
+
+- name: Setting passwords in bulk
+ community.general.ldap_passwd:
+ dn: "{{ item.key }}"
+ passwd: "{{ item.value }}"
+ with_dict:
+ alice: alice123123
+ bob: "|30b!"
+ admin: "{{ vault_secret }}"
+"""
+
+RETURN = """
+modlist:
+ description: list of modified parameters
+ returned: success
+ type: list
+ sample:
+ - [2, "olcRootDN", ["cn=root,dc=example,dc=com"]]
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+class LdapPasswd(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ # Shortcuts
+ self.passwd = self.module.params['passwd']
+
+ def passwd_check(self):
+ try:
+ tmp_con = ldap.initialize(self.server_uri)
+ except ldap.LDAPError as e:
+ self.fail("Cannot initialize LDAP connection", e)
+
+ if self.start_tls:
+ try:
+ tmp_con.start_tls_s()
+ except ldap.LDAPError as e:
+ self.fail("Cannot start TLS.", e)
+
+ try:
+ tmp_con.simple_bind_s(self.dn, self.passwd)
+ except ldap.INVALID_CREDENTIALS:
+ return True
+ except ldap.LDAPError as e:
+ self.fail("Cannot bind to the server.", e)
+ else:
+ return False
+ finally:
+ tmp_con.unbind()
+
+ def passwd_set(self):
+ # Exit early if the password is already valid
+ if not self.passwd_check():
+ return False
+
+ # Change the password (or throw an exception)
+ try:
+ self.connection.passwd_s(self.dn, None, self.passwd)
+ except ldap.LDAPError as e:
+ self.fail("Unable to set password", e)
+
+ # Password successfully changed
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(passwd=dict(no_log=True)),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ ldap = LdapPasswd(module)
+
+ if module.check_mode:
+ module.exit_json(changed=ldap.passwd_check())
+
+ module.exit_json(changed=ldap.passwd_set())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ldap_search.py b/ansible_collections/community/general/plugins/modules/ldap_search.py
new file mode 100644
index 000000000..ad79a2d73
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ldap_search.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Peter Sagerson <psagers@ignorare.net>
+# Copyright (c) 2020, Sebastian Pfahl <eryx@gmx.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: ldap_search
+version_added: '0.2.0'
+short_description: Search for entries in a LDAP server
+description:
+ - Return the results of an LDAP search.
+notes:
+ - The default authentication settings will attempt to use a SASL EXTERNAL
+ bind over a UNIX domain socket. This works well with the default Ubuntu
+ install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL
+ rule allowing root to modify the server configuration. If you need to use
+ a simple bind to access your server, pass the credentials in I(bind_dn)
+ and I(bind_pw).
+author:
+ - Sebastian Pfahl (@eryx12o45)
+requirements:
+ - python-ldap
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ dn:
+ required: true
+ type: str
+ description:
+ - The LDAP DN to search in.
+ scope:
+ choices: [base, onelevel, subordinate, children]
+ default: base
+ type: str
+ description:
+ - The LDAP scope to use.
+ filter:
+ default: '(objectClass=*)'
+ type: str
+ description:
+ - Used for filtering the LDAP search result.
+ attrs:
+ type: list
+ elements: str
+ description:
+ - A list of attributes for limiting the result. Use an
+ actual list or a comma-separated string.
+ schema:
+ default: false
+ type: bool
+ description:
+ - Set to C(true) to return the full attribute schema of entries, not
+ their attribute values. Overrides I(attrs) when provided.
+extends_documentation_fragment:
+ - community.general.ldap.documentation
+ - community.general.attributes
+"""
+
+EXAMPLES = r"""
+- name: Return all entries within the 'groups' organizational unit.
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ register: ldap_groups
+
+- name: Return GIDs for all groups
+ community.general.ldap_search:
+ dn: "ou=groups,dc=example,dc=com"
+ scope: "onelevel"
+ attrs:
+ - "gidNumber"
+ register: ldap_group_gids
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.ldap import LdapGeneric, gen_specs
+
+LDAP_IMP_ERR = None
+try:
+ import ldap
+
+ HAS_LDAP = True
+except ImportError:
+ LDAP_IMP_ERR = traceback.format_exc()
+ HAS_LDAP = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=gen_specs(
+ dn=dict(type='str', required=True),
+ scope=dict(type='str', default='base', choices=['base', 'onelevel', 'subordinate', 'children']),
+ filter=dict(type='str', default='(objectClass=*)'),
+ attrs=dict(type='list', elements='str'),
+ schema=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_LDAP:
+ module.fail_json(msg=missing_required_lib('python-ldap'),
+ exception=LDAP_IMP_ERR)
+
+ try:
+ LdapSearch(module).main()
+ except Exception as exception:
+ module.fail_json(msg="Attribute action failed.", details=to_native(exception))
+
+ module.exit_json(changed=False)
+
+
+def _extract_entry(dn, attrs):
+ extracted = {'dn': dn}
+ for attr, val in list(attrs.items()):
+ if len(val) == 1:
+ extracted[attr] = val[0]
+ else:
+ extracted[attr] = val
+ return extracted
+
+
+class LdapSearch(LdapGeneric):
+ def __init__(self, module):
+ LdapGeneric.__init__(self, module)
+
+ self.filterstr = self.module.params['filter']
+ self.attrlist = []
+ self._load_scope()
+ self._load_attrs()
+ self._load_schema()
+
+ def _load_schema(self):
+ self.schema = self.module.boolean(self.module.params['schema'])
+ if self.schema:
+ self.attrsonly = 1
+ else:
+ self.attrsonly = 0
+
+ def _load_scope(self):
+ spec = dict(
+ base=ldap.SCOPE_BASE,
+ onelevel=ldap.SCOPE_ONELEVEL,
+ subordinate=ldap.SCOPE_SUBORDINATE,
+ children=ldap.SCOPE_SUBTREE,
+ )
+ self.scope = spec[self.module.params['scope']]
+
+ def _load_attrs(self):
+ self.attrlist = self.module.params['attrs'] or None
+
+ def main(self):
+ results = self.perform_search()
+ self.module.exit_json(changed=False, results=results)
+
+ def perform_search(self):
+ try:
+ results = self.connection.search_s(
+ self.dn,
+ self.scope,
+ filterstr=self.filterstr,
+ attrlist=self.attrlist,
+ attrsonly=self.attrsonly
+ )
+ ldap_entries = []
+ for result in results:
+ if isinstance(result[1], dict):
+ if self.schema:
+ ldap_entries.append(dict(dn=result[0], attrs=list(result[1].keys())))
+ else:
+ ldap_entries.append(_extract_entry(result[0], result[1]))
+ return ldap_entries
+ except ldap.NO_SUCH_OBJECT:
+ self.module.fail_json(msg="Base not found: {0}".format(self.dn))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/librato_annotation.py b/ansible_collections/community/general/plugins/modules/librato_annotation.py
new file mode 100644
index 000000000..ebfb75154
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/librato_annotation.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Seth Edwards, 2014
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: Create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+author: "Seth Edwards (@Sedward)"
+requirements: []
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ user:
+ type: str
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ type: str
+ description:
+ - Librato account api key
+ required: true
+ name:
+ type: str
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ type: str
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ type: str
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ type: str
+ description:
+ - The description contains extra metadata about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ type: int
+ description:
+ - The unix timestamp indicating the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ type: list
+ elements: dict
+ description:
+ - See examples
+'''
+
+EXAMPLES = '''
+- name: Create a simple annotation event with a source
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: App Config Change
+ source: foo.bar
+ description: This is a detailed description of the config change
+
+- name: Create an annotation that includes a link
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: code.deploy
+ title: app code deploy
+ description: this is a detailed description of a deployment
+ links:
+ - rel: example
+ href: http://www.example.com/deploy
+
+- name: Create an annotation with a start_time and end_time
+ community.general.librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: maintenance
+ title: Maintenance window
+ description: This is a detailed description of maintenance
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] is not None:
+ params['source'] = module.params['source']
+ if module.params['description'] is not None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] is not None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] is not None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] is not None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+
+ # Hack send parameters the way fetch_url wants them
+ module.params['url_username'] = user
+ module.params['url_password'] = api_key
+ response, info = fetch_url(module, url, data=json_body, headers=headers)
+ response_code = str(info['status'])
+ response_body = info['body']
+ if info['status'] != 201:
+ if info['status'] >= 400:
+ module.fail_json(msg="Request Failed. Response code: " + response_code + " Response body: " + response_body)
+ else:
+ module.fail_json(msg="Request Failed. Response code: " + response_code)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ name=dict(required=False),
+ title=dict(required=True),
+ source=dict(required=False),
+ description=dict(required=False),
+ start_time=dict(required=False, default=None, type='int'),
+ end_time=dict(required=False, default=None, type='int'),
+ links=dict(type='list', elements='dict')
+ )
+ )
+
+ post_annotation(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/linode.py b/ansible_collections/community/general/plugins/modules/linode.py
new file mode 100644
index 000000000..404e7a393
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/linode.py
@@ -0,0 +1,691 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode
+short_description: Manage instances on the Linode Public Cloud
+description:
+ - Manage Linode Public Cloud instances and optionally wait for it to be 'running'.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: [ absent, active, deleted, present, restarted, started, stopped ]
+ default: present
+ type: str
+ api_key:
+ description:
+ - Linode API key.
+ - C(LINODE_API_KEY) env variable can be used instead.
+ type: str
+ required: true
+ name:
+ description:
+ - Name to give the instance (alphanumeric, dashes, underscore).
+ - To keep sanity on the Linode Web Console, name is prepended with C(LinodeID-).
+ required: true
+ type: str
+ displaygroup:
+ description:
+ - Add the instance to a Display Group in Linode Manager.
+ type: str
+ default: ''
+ linode_id:
+ description:
+ - Unique ID of a linode server. This value is read-only in the sense that
+ if you specify it on creation of a Linode it will not be used. The
+ Linode API generates these IDs and we can those generated value here to
+ reference a Linode more specifically. This is useful for idempotence.
+ aliases: [ lid ]
+ type: int
+ additional_disks:
+ description:
+ - List of dictionaries for creating additional disks that are added to the Linode configuration settings.
+ - Dictionary takes Size, Label, Type. Size is in MB.
+ type: list
+ elements: dict
+ alert_bwin_enabled:
+ description:
+ - Set status of bandwidth in alerts.
+ type: bool
+ alert_bwin_threshold:
+ description:
+ - Set threshold in MB of bandwidth in alerts.
+ type: int
+ alert_bwout_enabled:
+ description:
+ - Set status of bandwidth out alerts.
+ type: bool
+ alert_bwout_threshold:
+ description:
+ - Set threshold in MB of bandwidth out alerts.
+ type: int
+ alert_bwquota_enabled:
+ description:
+ - Set status of bandwidth quota alerts as percentage of network transfer quota.
+ type: bool
+ alert_bwquota_threshold:
+ description:
+ - Set threshold in MB of bandwidth quota alerts.
+ type: int
+ alert_cpu_enabled:
+ description:
+ - Set status of receiving CPU usage alerts.
+ type: bool
+ alert_cpu_threshold:
+ description:
+ - Set percentage threshold for receiving CPU usage alerts. Each CPU core adds 100% to total.
+ type: int
+ alert_diskio_enabled:
+ description:
+ - Set status of receiving disk IO alerts.
+ type: bool
+ alert_diskio_threshold:
+ description:
+ - Set threshold for average IO ops/sec over 2 hour period.
+ type: int
+ backupweeklyday:
+ description:
+ - Day of the week to take backups.
+ type: int
+ backupwindow:
+ description:
+ - The time window in which backups will be taken.
+ type: int
+ plan:
+ description:
+ - plan to use for the instance (Linode plan)
+ type: int
+ payment_term:
+ description:
+ - payment term to use for the instance (payment term in months)
+ default: 1
+ choices: [ 1, 12, 24 ]
+ type: int
+ password:
+ description:
+ - root password to apply to a new server (auto generated if missing)
+ type: str
+ private_ip:
+ description:
+ - Add private IPv4 address when Linode is created.
+ - Default is C(false).
+ type: bool
+ ssh_pub_key:
+ description:
+ - SSH public key applied to root user
+ type: str
+ swap:
+ description:
+ - swap size in MB
+ default: 512
+ type: int
+ distribution:
+ description:
+ - distribution to use for the instance (Linode Distribution)
+ type: int
+ datacenter:
+ description:
+ - datacenter to create an instance in (Linode Datacenter)
+ type: int
+ kernel_id:
+ description:
+ - kernel to use for the instance (Linode Kernel)
+ type: int
+ wait:
+ description:
+ - wait for the instance to be in state C(running) before returning
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+ type: int
+ watchdog:
+ description:
+ - Set status of Lassie watchdog.
+ type: bool
+ default: true
+requirements:
+ - python >= 2.6
+ - linode-python
+author:
+- Vincent Viallet (@zbal)
+notes:
+ - Please note, linode-python does not have python 3 support.
+ - This module uses the now deprecated v3 of the Linode API.
+ - Please review U(https://www.linode.com/api/linode) for determining the required parameters.
+'''
+
+EXAMPLES = '''
+
+- name: Create a new Linode
+ community.general.linode:
+ name: linode-test1
+ plan: 1
+ datacenter: 7
+ distribution: 129
+ state: present
+ register: linode_creation
+
+- name: Create a server with a private IP Address
+ community.general.linode:
+ module: linode
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ private_ip: true
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: true
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Fully configure new server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 4
+ datacenter: 2
+ distribution: 99
+ kernel_id: 138
+ password: 'superSecureRootPassword'
+ private_ip: true
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: true
+ wait_timeout: 600
+ state: present
+ alert_bwquota_enabled: true
+ alert_bwquota_threshold: 80
+ alert_bwin_enabled: true
+ alert_bwin_threshold: 10
+ alert_cpu_enabled: true
+ alert_cpu_threshold: 210
+ alert_bwout_enabled: true
+ alert_bwout_threshold: 10
+ alert_diskio_enabled: true
+ alert_diskio_threshold: 10000
+ backupweeklyday: 1
+ backupwindow: 2
+ displaygroup: 'test'
+ additional_disks:
+ - {Label: 'disk1', Size: 2500, Type: 'raw'}
+ - {Label: 'newdisk', Size: 2000}
+ watchdog: true
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Ensure a running server (create if missing)
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ plan: 1
+ datacenter: 2
+ distribution: 99
+ password: 'superSecureRootPassword'
+ ssh_pub_key: 'ssh-rsa qwerty'
+ swap: 768
+ wait: true
+ wait_timeout: 600
+ state: present
+ delegate_to: localhost
+ register: linode_creation
+
+- name: Delete a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: absent
+ delegate_to: localhost
+
+- name: Stop a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: stopped
+ delegate_to: localhost
+
+- name: Reboot a server
+ community.general.linode:
+ api_key: 'longStringFromLinodeApi'
+ name: linode-test1
+ linode_id: "{{ linode_creation.instance.id }}"
+ state: restarted
+ delegate_to: localhost
+'''
+
+import time
+import traceback
+
+LINODE_IMP_ERR = None
+try:
+ from linode import api as linode_api
+ HAS_LINODE = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
+
+
+def randompass():
+ '''
+ Generate a long random password that comply to Linode requirements
+ '''
+ # Linode API currently requires the following:
+ # It must contain at least two of these four character classes:
+ # lower case letters - upper case letters - numbers - punctuation
+ # we play it safe :)
+ import random
+ import string
+ # as of python 2.4, this reseeds the PRNG from urandom
+ random.seed()
+ lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
+ upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
+ number = ''.join(random.choice(string.digits) for x in range(6))
+ punct = ''.join(random.choice(string.punctuation) for x in range(6))
+ p = lower + upper + number + punct
+ return ''.join(random.sample(p, len(p)))
+
+
+def getInstanceDetails(api, server):
+ '''
+ Return the details of an instance, populating IPs, etc.
+ '''
+ instance = {'id': server['LINODEID'],
+ 'name': server['LABEL'],
+ 'public': [],
+ 'private': []}
+
+ # Populate with ips
+ for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
+ if ip['ISPUBLIC'] and 'ipv4' not in instance:
+ instance['ipv4'] = ip['IPADDRESS']
+ instance['fqdn'] = ip['RDNS_NAME']
+ if ip['ISPUBLIC']:
+ instance['public'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ else:
+ instance['private'].append({'ipv4': ip['IPADDRESS'],
+ 'fqdn': ip['RDNS_NAME'],
+ 'ip_id': ip['IPADDRESSID']})
+ return instance
+
+
+def linodeServers(module, api, state, name,
+ displaygroup, plan, additional_disks, distribution,
+ datacenter, kernel_id, linode_id, payment_term, password,
+ private_ip, ssh_pub_key, swap, wait, wait_timeout, watchdog, **kwargs):
+ instances = []
+ changed = False
+ new_server = False
+ servers = []
+ disks = []
+ configs = []
+ jobs = []
+
+ # See if we can match an existing server details with the provided linode_id
+ if linode_id:
+ # For the moment we only consider linode_id as criteria for match
+ # Later we can use more (size, name, etc.) and update existing
+ servers = api.linode_list(LinodeId=linode_id)
+ # Attempt to fetch details about disks and configs only if servers are
+ # found with linode_id
+ if servers:
+ disks = api.linode_disk_list(LinodeId=linode_id)
+ configs = api.linode_config_list(LinodeId=linode_id)
+
+ # Act on the state
+ if state in ('active', 'present', 'started'):
+ # TODO: validate all the plan / distribution / datacenter are valid
+
+ # Multi step process/validation:
+ # - need linode_id (entity)
+ # - need disk_id for linode_id - create disk from distrib
+ # - need config_id for linode_id - create config (need kernel)
+
+ # Any create step triggers a job that need to be waited for.
+ if not servers:
+ for arg in (name, plan, distribution, datacenter):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+ # Create linode entity
+ new_server = True
+
+ # Get size of all individually listed disks to subtract from Distribution disk
+ used_disk_space = 0 if additional_disks is None else sum(disk['Size'] for disk in additional_disks)
+
+ try:
+ res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
+ PaymentTerm=payment_term)
+ linode_id = res['LinodeID']
+ # Update linode Label to match name
+ api.linode_update(LinodeId=linode_id, Label='%s-%s' % (linode_id, name))
+ # Update Linode with Ansible configuration options
+ api.linode_update(LinodeId=linode_id, LPM_DISPLAYGROUP=displaygroup, WATCHDOG=watchdog, **kwargs)
+ # Save server
+ servers = api.linode_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'])
+
+ # Add private IP to Linode
+ if private_ip:
+ try:
+ res = api.linode_ip_addprivate(LinodeID=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+
+ if not disks:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+ # Create disks (1 from distrib, 1 for SWAP)
+ new_server = True
+ try:
+ if not password:
+ # Password is required on creation, if not provided generate one
+ password = randompass()
+ if not swap:
+ swap = 512
+ # Create data disk
+ size = servers[0]['TOTALHD'] - used_disk_space - swap
+
+ if ssh_pub_key:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password, rootSSHKey=ssh_pub_key,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ else:
+ res = api.linode_disk_createfromdistribution(
+ LinodeId=linode_id, DistributionID=distribution,
+ rootPass=password,
+ Label='%s data disk (lid: %s)' % (name, linode_id),
+ Size=size)
+ jobs.append(res['JobID'])
+ # Create SWAP disk
+ res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
+ Label='%s swap disk (lid: %s)' % (name, linode_id),
+ Size=swap)
+ # Create individually listed disks at specified size
+ if additional_disks:
+ for disk in additional_disks:
+ # If a disk Type is not passed in, default to ext4
+ if disk.get('Type') is None:
+ disk['Type'] = 'ext4'
+ res = api.linode_disk_create(LinodeID=linode_id, Label=disk['Label'], Size=disk['Size'], Type=disk['Type'])
+
+ jobs.append(res['JobID'])
+ except Exception as e:
+ # TODO: destroy linode ?
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+
+ if not configs:
+ for arg in (name, linode_id, distribution):
+ if not arg:
+ module.fail_json(msg='%s is required for %s state' % (arg, state))
+
+ # Check architecture
+ for distrib in api.avail_distributions():
+ if distrib['DISTRIBUTIONID'] != distribution:
+ continue
+ arch = '32'
+ if distrib['IS64BIT']:
+ arch = '64'
+ break
+
+ # Get latest kernel matching arch if kernel_id is not specified
+ if not kernel_id:
+ for kernel in api.avail_kernels():
+ if not kernel['LABEL'].startswith('Latest %s' % arch):
+ continue
+ kernel_id = kernel['KERNELID']
+ break
+
+ # Get disk list
+ disks_id = []
+ for disk in api.linode_disk_list(LinodeId=linode_id):
+ if disk['TYPE'] == 'ext3':
+ disks_id.insert(0, str(disk['DISKID']))
+ continue
+ disks_id.append(str(disk['DISKID']))
+ # Trick to get the 9 items in the list
+ while len(disks_id) < 9:
+ disks_id.append('')
+ disks_list = ','.join(disks_id)
+
+ # Create config
+ new_server = True
+ try:
+ api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
+ Disklist=disks_list, Label='%s config' % name)
+ configs = api.linode_config_list(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+
+ # Start / Ensure servers are running
+ for server in servers:
+ # Refresh server state
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # Ensure existing servers are up and running, boot if necessary
+ if server['STATUS'] != 1:
+ res = api.linode_boot(LinodeId=linode_id)
+ jobs.append(res['JobID'])
+ changed = True
+
+ # wait here until the instances are up
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time():
+ # refresh the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ # status:
+ # -2: Boot failed
+ # 1: Running
+ if server['STATUS'] in (-2, 1):
+ break
+ time.sleep(5)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg='Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID']))
+ # Get a fresh copy of the server details
+ server = api.linode_list(LinodeId=server['LINODEID'])[0]
+ if server['STATUS'] == -2:
+ module.fail_json(msg='%s (lid: %s) failed to boot' %
+ (server['LABEL'], server['LINODEID']))
+ # From now on we know the task is a success
+ # Build instance report
+ instance = getInstanceDetails(api, server)
+ # depending on wait flag select the status
+ if wait:
+ instance['status'] = 'Running'
+ else:
+ instance['status'] = 'Starting'
+
+ # Return the root password if this is a new box and no SSH key
+ # has been provided
+ if new_server and not ssh_pub_key:
+ instance['password'] = password
+ instances.append(instance)
+
+ elif state in ('stopped',):
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ if server['STATUS'] != 2:
+ try:
+ res = api.linode_shutdown(LinodeId=linode_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+ instance['status'] = 'Stopping'
+ changed = True
+ else:
+ instance['status'] = 'Stopped'
+ instances.append(instance)
+
+ elif state in ('restarted',):
+ if not servers:
+ module.fail_json(msg='Server (lid: %s) not found' % (linode_id))
+
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ res = api.linode_reboot(LinodeId=server['LINODEID'])
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+ instance['status'] = 'Restarting'
+ changed = True
+ instances.append(instance)
+
+ elif state in ('absent', 'deleted'):
+ for server in servers:
+ instance = getInstanceDetails(api, server)
+ try:
+ api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+ instance['status'] = 'Deleting'
+ changed = True
+ instances.append(instance)
+
+ # Ease parsing if only 1 instance
+ if len(instances) == 1:
+ module.exit_json(changed=changed, instance=instances[0])
+
+ module.exit_json(changed=changed, instances=instances)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'active', 'deleted', 'present', 'restarted', 'started', 'stopped']),
+ api_key=dict(type='str', no_log=True, required=True, fallback=(env_fallback, ['LINODE_API_KEY'])),
+ name=dict(type='str', required=True),
+ alert_bwin_enabled=dict(type='bool'),
+ alert_bwin_threshold=dict(type='int'),
+ alert_bwout_enabled=dict(type='bool'),
+ alert_bwout_threshold=dict(type='int'),
+ alert_bwquota_enabled=dict(type='bool'),
+ alert_bwquota_threshold=dict(type='int'),
+ alert_cpu_enabled=dict(type='bool'),
+ alert_cpu_threshold=dict(type='int'),
+ alert_diskio_enabled=dict(type='bool'),
+ alert_diskio_threshold=dict(type='int'),
+ backupweeklyday=dict(type='int'),
+ backupwindow=dict(type='int'),
+ displaygroup=dict(type='str', default=''),
+ plan=dict(type='int'),
+ additional_disks=dict(type='list', elements='dict'),
+ distribution=dict(type='int'),
+ datacenter=dict(type='int'),
+ kernel_id=dict(type='int'),
+ linode_id=dict(type='int', aliases=['lid']),
+ payment_term=dict(type='int', default=1, choices=[1, 12, 24]),
+ password=dict(type='str', no_log=True),
+ private_ip=dict(type='bool'),
+ ssh_pub_key=dict(type='str'),
+ swap=dict(type='int', default=512),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=300),
+ watchdog=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ('state', 'restarted', ['linode_id']),
+ ('state', 'stopped', ['linode_id']),
+ ]
+ )
+
+ if not HAS_LINODE:
+ module.fail_json(msg=missing_required_lib('linode-python'), exception=LINODE_IMP_ERR)
+
+ state = module.params.get('state')
+ api_key = module.params.get('api_key')
+ name = module.params.get('name')
+ alert_bwin_enabled = module.params.get('alert_bwin_enabled')
+ alert_bwin_threshold = module.params.get('alert_bwin_threshold')
+ alert_bwout_enabled = module.params.get('alert_bwout_enabled')
+ alert_bwout_threshold = module.params.get('alert_bwout_threshold')
+ alert_bwquota_enabled = module.params.get('alert_bwquota_enabled')
+ alert_bwquota_threshold = module.params.get('alert_bwquota_threshold')
+ alert_cpu_enabled = module.params.get('alert_cpu_enabled')
+ alert_cpu_threshold = module.params.get('alert_cpu_threshold')
+ alert_diskio_enabled = module.params.get('alert_diskio_enabled')
+ alert_diskio_threshold = module.params.get('alert_diskio_threshold')
+ backupweeklyday = module.params.get('backupweeklyday')
+ backupwindow = module.params.get('backupwindow')
+ displaygroup = module.params.get('displaygroup')
+ plan = module.params.get('plan')
+ additional_disks = module.params.get('additional_disks')
+ distribution = module.params.get('distribution')
+ datacenter = module.params.get('datacenter')
+ kernel_id = module.params.get('kernel_id')
+ linode_id = module.params.get('linode_id')
+ payment_term = module.params.get('payment_term')
+ password = module.params.get('password')
+ private_ip = module.params.get('private_ip')
+ ssh_pub_key = module.params.get('ssh_pub_key')
+ swap = module.params.get('swap')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ watchdog = int(module.params.get('watchdog'))
+
+ check_items = dict(
+ alert_bwin_enabled=alert_bwin_enabled,
+ alert_bwin_threshold=alert_bwin_threshold,
+ alert_bwout_enabled=alert_bwout_enabled,
+ alert_bwout_threshold=alert_bwout_threshold,
+ alert_bwquota_enabled=alert_bwquota_enabled,
+ alert_bwquota_threshold=alert_bwquota_threshold,
+ alert_cpu_enabled=alert_cpu_enabled,
+ alert_cpu_threshold=alert_cpu_threshold,
+ alert_diskio_enabled=alert_diskio_enabled,
+ alert_diskio_threshold=alert_diskio_threshold,
+ backupweeklyday=backupweeklyday,
+ backupwindow=backupwindow,
+ )
+
+ kwargs = dict((k, v) for k, v in check_items.items() if v is not None)
+
+ # setup the auth
+ try:
+ api = linode_api.Api(api_key)
+ api.test_echo()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.value[0]['ERRORMESSAGE'], exception=traceback.format_exc())
+
+ linodeServers(module, api, state, name,
+ displaygroup, plan,
+ additional_disks, distribution, datacenter, kernel_id, linode_id,
+ payment_term, password, private_ip, ssh_pub_key, swap, wait,
+ wait_timeout, watchdog, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/linode_v4.py b/ansible_collections/community/general/plugins/modules/linode_v4.py
new file mode 100644
index 000000000..f213af125
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/linode_v4.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: linode_v4
+short_description: Manage instances on the Linode cloud
+description: Manage instances on the Linode cloud.
+requirements:
+ - python >= 2.7
+ - linode_api4 >= 2.0.0
+author:
+ - Luke Murphy (@decentral1se)
+notes:
+ - No Linode resizing is currently implemented. This module will, in time,
+ replace the current Linode module which uses deprecated API bindings on the
+ Linode side.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ region:
+ description:
+ - The region of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/regions/).
+ type: str
+ image:
+ description:
+ - The image of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/images/).
+ type: str
+ type:
+ description:
+ - The type of the instance. This is a required parameter only when
+ creating Linode instances. See
+ U(https://www.linode.com/docs/api/linode-types/).
+ type: str
+ label:
+ description:
+ - The instance label. This label is used as the main determiner for
+ idempotence for the module and is therefore mandatory.
+ type: str
+ required: true
+ group:
+ description:
+ - The group that the instance should be marked under. Please note, that
+ group labelling is deprecated but still supported. The encouraged
+ method for marking instances is to use tags.
+ type: str
+ private_ip:
+ description:
+ - If C(true), the created Linode will have private networking enabled and
+ assigned a private IPv4 address.
+ type: bool
+ default: false
+ version_added: 3.0.0
+ tags:
+ description:
+ - The tags that the instance should be marked under. See
+ U(https://www.linode.com/docs/api/tags/).
+ type: list
+ elements: str
+ root_pass:
+ description:
+ - The password for the root user. If not specified, one will be
+ generated. This generated password will be available in the task
+ success JSON.
+ type: str
+ authorized_keys:
+ description:
+ - A list of SSH public key parts to deploy for the root user.
+ type: list
+ elements: str
+ state:
+ description:
+ - The desired instance state.
+ type: str
+ choices:
+ - present
+ - absent
+ required: true
+ access_token:
+ description:
+ - The Linode API v4 access token. It may also be specified by exposing
+ the C(LINODE_ACCESS_TOKEN) environment variable. See
+ U(https://www.linode.com/docs/api#access-and-authentication).
+ required: true
+ type: str
+ stackscript_id:
+ description:
+ - The numeric ID of the StackScript to use when creating the instance.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: int
+ version_added: 1.3.0
+ stackscript_data:
+ description:
+ - An object containing arguments to any User Defined Fields present in
+ the StackScript used when creating the instance.
+ Only valid when a stackscript_id is provided.
+ See U(https://www.linode.com/docs/api/stackscripts/).
+ type: dict
+ version_added: 1.3.0
+'''
+
+EXAMPLES = """
+- name: Create a new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ type: g6-nanode-1
+ region: eu-west
+ image: linode/debian9
+ root_pass: passw0rd
+ authorized_keys:
+ - "ssh-rsa ..."
+ stackscript_id: 1337
+ stackscript_data:
+ variable: value
+ state: present
+
+- name: Delete that new Linode.
+ community.general.linode_v4:
+ label: new-linode
+ state: absent
+"""
+
+RETURN = """
+instance:
+ description: The instance description in JSON serialized form.
+ returned: Always.
+ type: dict
+ sample: {
+ "root_pass": "foobar", # if auto-generated
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": false,
+ "schedule": {
+ "day": null,
+ "window": null
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": true
+ }
+"""
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+
+LINODE_IMP_ERR = None
+try:
+ from linode_api4 import Instance, LinodeClient
+ HAS_LINODE_DEPENDENCY = True
+except ImportError:
+ LINODE_IMP_ERR = traceback.format_exc()
+ HAS_LINODE_DEPENDENCY = False
+
+
+def create_linode(module, client, **kwargs):
+ """Creates a Linode instance and handles return format."""
+ if kwargs['root_pass'] is None:
+ kwargs.pop('root_pass')
+
+ try:
+ response = client.linode.instance_create(**kwargs)
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+ try:
+ if isinstance(response, tuple):
+ instance, root_pass = response
+ instance_json = instance._raw_json
+ instance_json.update({'root_pass': root_pass})
+ return instance_json
+ else:
+ return response._raw_json
+ except TypeError:
+ module.fail_json(msg='Unable to parse Linode instance creation response. Please raise a bug against this'
+ ' module on https://github.com/ansible-collections/community.general/issues'
+ )
+
+
+def maybe_instance_from_label(module, client):
+ """Try to retrieve an instance based on a label."""
+ try:
+ label = module.params['label']
+ result = client.linode.instances(Instance.label == label)
+ return result[0]
+ except IndexError:
+ return None
+ except Exception as exception:
+ module.fail_json(msg='Unable to query the Linode API. Saw: %s' % exception)
+
+
+def initialise_module():
+ """Initialise the module parameter specification."""
+ return AnsibleModule(
+ argument_spec=dict(
+ label=dict(type='str', required=True),
+ state=dict(
+ type='str',
+ required=True,
+ choices=['present', 'absent']
+ ),
+ access_token=dict(
+ type='str',
+ required=True,
+ no_log=True,
+ fallback=(env_fallback, ['LINODE_ACCESS_TOKEN']),
+ ),
+ authorized_keys=dict(type='list', elements='str', no_log=False),
+ group=dict(type='str'),
+ image=dict(type='str'),
+ private_ip=dict(type='bool', default=False),
+ region=dict(type='str'),
+ root_pass=dict(type='str', no_log=True),
+ tags=dict(type='list', elements='str'),
+ type=dict(type='str'),
+ stackscript_id=dict(type='int'),
+ stackscript_data=dict(type='dict'),
+ ),
+ supports_check_mode=False,
+ required_one_of=(
+ ['state', 'label'],
+ ),
+ required_together=(
+ ['region', 'image', 'type'],
+ )
+ )
+
+
+def build_client(module):
+ """Build a LinodeClient."""
+ return LinodeClient(
+ module.params['access_token'],
+ user_agent=get_user_agent('linode_v4_module')
+ )
+
+
+def main():
+ """Module entrypoint."""
+ module = initialise_module()
+
+ if not HAS_LINODE_DEPENDENCY:
+ module.fail_json(msg=missing_required_lib('linode-api4'), exception=LINODE_IMP_ERR)
+
+ client = build_client(module)
+ instance = maybe_instance_from_label(module, client)
+
+ if module.params['state'] == 'present' and instance is not None:
+ module.exit_json(changed=False, instance=instance._raw_json)
+
+ elif module.params['state'] == 'present' and instance is None:
+ instance_json = create_linode(
+ module, client,
+ authorized_keys=module.params['authorized_keys'],
+ group=module.params['group'],
+ image=module.params['image'],
+ label=module.params['label'],
+ private_ip=module.params['private_ip'],
+ region=module.params['region'],
+ root_pass=module.params['root_pass'],
+ tags=module.params['tags'],
+ ltype=module.params['type'],
+ stackscript=module.params['stackscript_id'],
+ stackscript_data=module.params['stackscript_data'],
+ )
+ module.exit_json(changed=True, instance=instance_json)
+
+ elif module.params['state'] == 'absent' and instance is not None:
+ instance.delete()
+ module.exit_json(changed=True, instance=instance._raw_json)
+
+ elif module.params['state'] == 'absent' and instance is None:
+ module.exit_json(changed=False, instance={})
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/listen_ports_facts.py b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
new file mode 100644
index 000000000..bc630e1d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/listen_ports_facts.py
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017, Nathan Davison <ndavison85@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: listen_ports_facts
+author:
+ - Nathan Davison (@ndavison)
+description:
+ - Gather facts on processes listening on TCP and UDP ports using the C(netstat) or C(ss) commands.
+ - This module currently supports Linux only.
+requirements:
+ - netstat or ss
+short_description: Gather facts on processes listening on TCP and UDP ports
+notes:
+ - |
+ C(ss) returns all processes for each listen address and port.
+ This plugin will return each of them, so multiple entries for the same listen address and port are likely in results.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ command:
+ description:
+ - Override which command to use for fetching listen ports.
+ - 'By default module will use first found supported command on the system (in alphanumerical order).'
+ type: str
+ choices:
+ - netstat
+ - ss
+ version_added: 4.1.0
+ include_non_listening:
+ description:
+ - Show both listening and non-listening sockets (for TCP this means established connections).
+ - Adds the return values C(state) and C(foreign_address) to the returned facts.
+ type: bool
+ default: false
+ version_added: 5.4.0
+'''
+
+EXAMPLES = r'''
+- name: Gather facts on listening ports
+ community.general.listen_ports_facts:
+
+- name: TCP whitelist violation
+ ansible.builtin.debug:
+ msg: TCP port {{ item.port }} by pid {{ item.pid }} violates the whitelist
+ vars:
+ tcp_listen_violations: "{{ ansible_facts.tcp_listen | selectattr('port', 'in', tcp_whitelist) | list }}"
+ tcp_whitelist:
+ - 22
+ - 25
+ loop: "{{ tcp_listen_violations }}"
+
+- name: List TCP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.tcp_listen | map(attribute='port') | sort | list }}"
+
+- name: List UDP ports
+ ansible.builtin.debug:
+ msg: "{{ ansible_facts.udp_listen | map(attribute='port') | sort | list }}"
+
+- name: List all ports
+ ansible.builtin.debug:
+ msg: "{{ (ansible_facts.tcp_listen + ansible_facts.udp_listen) | map(attribute='port') | unique | sort | list }}"
+
+- name: Gather facts on all ports and override which command to use
+ community.general.listen_ports_facts:
+ command: 'netstat'
+ include_non_listening: true
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Dictionary containing details of TCP and UDP ports with listening servers
+ returned: always
+ type: complex
+ contains:
+ tcp_listen:
+ description: A list of processes that are listening on a TCP port.
+ returned: if TCP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ foreign_address:
+ description: The address of the remote end of the socket.
+ returned: if I(include_non_listening=true)
+ type: str
+ sample: "10.80.0.1"
+ version_added: 5.4.0
+ state:
+ description: The state of the socket.
+ returned: if I(include_non_listening=true)
+ type: str
+ sample: "ESTABLISHED"
+ version_added: 5.4.0
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "mysqld"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 1223
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 3306
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "tcp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "mysql"
+ udp_listen:
+ description: A list of processes that are listening on a UDP port.
+ returned: if UDP servers were found
+ type: list
+ contains:
+ address:
+ description: The address the server is listening on.
+ returned: always
+ type: str
+ sample: "0.0.0.0"
+ foreign_address:
+ description: The address of the remote end of the socket.
+ returned: if I(include_non_listening=true)
+ type: str
+ sample: "10.80.0.1"
+ version_added: 5.4.0
+ state:
+ description: The state of the socket. UDP is a connectionless protocol. Shows UCONN or ESTAB.
+ returned: if I(include_non_listening=true)
+ type: str
+ sample: "UCONN"
+ version_added: 5.4.0
+ name:
+ description: The name of the listening process.
+ returned: if user permissions allow
+ type: str
+ sample: "rsyslogd"
+ pid:
+ description: The pid of the listening process.
+ returned: always
+ type: int
+ sample: 609
+ port:
+ description: The port the server is listening on.
+ returned: always
+ type: int
+ sample: 514
+ protocol:
+ description: The network protocol of the server.
+ returned: always
+ type: str
+ sample: "udp"
+ stime:
+ description: The start time of the listening process.
+ returned: always
+ type: str
+ sample: "Thu Feb 2 13:29:45 2017"
+ user:
+ description: The user who is running the listening process.
+ returned: always
+ type: str
+ sample: "root"
+'''
+
+import re
+import platform
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.basic import AnsibleModule
+
+
+def split_pid_name(pid_name):
+ """
+ Split the entry PID/Program name into the PID (int) and the name (str)
+ :param pid_name: PID/Program String seperated with a dash. E.g 51/sshd: returns pid = 51 and name = sshd
+ :return: PID (int) and the program name (str)
+ """
+ try:
+ pid, name = pid_name.split("/", 1)
+ except ValueError:
+ # likely unprivileged user, so add empty name & pid
+ return 0, ""
+ else:
+ name = name.rstrip(":")
+ return int(pid), name
+
+
+def netStatParse(raw):
+ """
+ The netstat result can be either split in 6,7 or 8 elements depending on the values of state, process and name.
+ For UDP the state is always empty. For UDP and TCP the process can be empty.
+ So these cases have to be checked.
+ :param raw: Netstat raw output String. First line explains the format, each following line contains a connection.
+ :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one
+ connection.
+ """
+ results = list()
+ for line in raw.splitlines():
+ if line.startswith(("tcp", "udp")):
+ # set variables to default state, in case they are not specified
+ state = ""
+ pid_and_name = ""
+ process = ""
+ formatted_line = line.split()
+ protocol, recv_q, send_q, address, foreign_address, rest = \
+ formatted_line[0], formatted_line[1], formatted_line[2], formatted_line[3], formatted_line[4], formatted_line[5:]
+ address, port = address.rsplit(":", 1)
+
+ if protocol.startswith("tcp"):
+ # nestat distinguishes between tcp6 and tcp
+ protocol = "tcp"
+ if len(rest) == 3:
+ state, pid_and_name, process = rest
+ if len(rest) == 2:
+ state, pid_and_name = rest
+
+ if protocol.startswith("udp"):
+ # safety measure, similar to tcp6
+ protocol = "udp"
+ if len(rest) == 2:
+ pid_and_name, process = rest
+ if len(rest) == 1:
+ pid_and_name = rest[0]
+
+ pid, name = split_pid_name(pid_name=pid_and_name)
+ result = {
+ 'protocol': protocol,
+ 'state': state,
+ 'address': address,
+ 'foreign_address': foreign_address,
+ 'port': int(port),
+ 'name': name,
+ 'pid': int(pid),
+ }
+ if result not in results:
+ results.append(result)
+ return results
+
+
+def ss_parse(raw):
+ """
+ The ss_parse result can be either split in 6 or 7 elements depending on the process column,
+ e.g. due to unprivileged user.
+ :param raw: ss raw output String. First line explains the format, each following line contains a connection.
+ :return: List of dicts, each dict contains protocol, state, local address, foreign address, port, name, pid for one
+ connection.
+ """
+ results = list()
+ regex_conns = re.compile(pattern=r'\[?(.+?)\]?:([0-9]+)$')
+ regex_pid = re.compile(pattern=r'"(.*?)",pid=(\d+)')
+
+ lines = raw.splitlines()
+
+ if len(lines) == 0 or not lines[0].startswith('Netid '):
+ # unexpected stdout from ss
+ raise EnvironmentError('Unknown stdout format of `ss`: {0}'.format(raw))
+
+ # skip headers (-H arg is not present on e.g. Ubuntu 16)
+ lines = lines[1:]
+
+ for line in lines:
+ cells = line.split(None, 6)
+ try:
+ if len(cells) == 6:
+ # no process column, e.g. due to unprivileged user
+ process = str()
+ protocol, state, recv_q, send_q, local_addr_port, peer_addr_port = cells
+ else:
+ protocol, state, recv_q, send_q, local_addr_port, peer_addr_port, process = cells
+ except ValueError:
+ # unexpected stdout from ss
+ raise EnvironmentError(
+ 'Expected `ss` table layout "Netid, State, Recv-Q, Send-Q, Local Address:Port, Peer Address:Port" and \
+ optionally "Process", but got something else: {0}'.format(line)
+ )
+
+ conns = regex_conns.search(local_addr_port)
+ pids = regex_pid.findall(process)
+ if conns is None and pids is None:
+ continue
+
+ if pids is None:
+ # likely unprivileged user, so add empty name & pid
+ # as we do in netstat logic to be consistent with output
+ pids = [(str(), 0)]
+
+ address = conns.group(1)
+ port = conns.group(2)
+ for name, pid in pids:
+ result = {
+ 'protocol': protocol,
+ 'state': state,
+ 'address': address,
+ 'foreign_address': peer_addr_port,
+ 'port': int(port),
+ 'name': name,
+ 'pid': int(pid),
+ }
+ results.append(result)
+ return results
+
+
+def main():
+ command_args = ['-p', '-l', '-u', '-n', '-t']
+ commands_map = {
+ 'netstat': {
+ 'args': [],
+ 'parse_func': netStatParse
+ },
+ 'ss': {
+ 'args': [],
+ 'parse_func': ss_parse
+ },
+ }
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(type='str', choices=list(sorted(commands_map))),
+ include_non_listening=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ if module.params['include_non_listening']:
+ command_args = ['-p', '-u', '-n', '-t', '-a']
+
+ commands_map['netstat']['args'] = command_args
+ commands_map['ss']['args'] = command_args
+
+ if platform.system() != 'Linux':
+ module.fail_json(msg='This module requires Linux.')
+
+ def getPidSTime(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'lstart', '-p', str(pid)])
+ stime = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if 'started' not in line:
+ stime = line
+ return stime
+
+ def getPidUser(pid):
+ ps_cmd = module.get_bin_path('ps', True)
+ rc, ps_output, stderr = module.run_command([ps_cmd, '-o', 'user', '-p', str(pid)])
+ user = ''
+ if rc == 0:
+ for line in ps_output.splitlines():
+ if line != 'USER':
+ user = line
+ return user
+
+ result = {
+ 'changed': False,
+ 'ansible_facts': {
+ 'tcp_listen': [],
+ 'udp_listen': [],
+ },
+ }
+
+ try:
+ command = None
+ bin_path = None
+ if module.params['command'] is not None:
+ command = module.params['command']
+ bin_path = module.get_bin_path(command, required=True)
+ else:
+ for c in sorted(commands_map):
+ bin_path = module.get_bin_path(c, required=False)
+ if bin_path is not None:
+ command = c
+ break
+
+ if bin_path is None:
+ raise EnvironmentError(msg='Unable to find any of the supported commands in PATH: {0}'.format(", ".join(sorted(commands_map))))
+
+ # which ports are listening for connections?
+ args = commands_map[command]['args']
+ rc, stdout, stderr = module.run_command([bin_path] + args)
+ if rc == 0:
+ parse_func = commands_map[command]['parse_func']
+ results = parse_func(stdout)
+
+ for connection in results:
+ # only display state and foreign_address for include_non_listening.
+ if not module.params['include_non_listening']:
+ connection.pop('state', None)
+ connection.pop('foreign_address', None)
+ connection['stime'] = getPidSTime(connection['pid'])
+ connection['user'] = getPidUser(connection['pid'])
+ if connection['protocol'].startswith('tcp'):
+ result['ansible_facts']['tcp_listen'].append(connection)
+ elif connection['protocol'].startswith('udp'):
+ result['ansible_facts']['udp_listen'].append(connection)
+ except (KeyError, EnvironmentError) as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lldp.py b/ansible_collections/community/general/plugins/modules/lldp.py
new file mode 100644
index 000000000..fb608ff13
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lldp.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lldp
+requirements: [ lldpctl ]
+short_description: Get details reported by lldp
+description:
+ - Reads data out of lldpctl
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options: {}
+author: "Andy Hill (@andyhky)"
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ community.general.lldp:
+
+ - name: Print each switch/port
+ ansible.builtin.debug:
+ msg: "{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifname'] }}"
+ with_items: "{{ lldp.keys() }}"
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def gather_lldp(module):
+ cmd = [module.get_bin_path('lldpctl'), '-f', 'keyvalue']
+ rc, output, err = module.run_command(cmd)
+ if output:
+ output_dict = {}
+ current_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry.startswith('lldp'):
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+ else:
+ value = current_dict[final] + '\n' + entry
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp(module)
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/locale_gen.py b/ansible_collections/community/general/plugins/modules/locale_gen.py
new file mode 100644
index 000000000..fccdf977a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/locale_gen.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates or removes locales
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+author:
+ - Augustus Kling (@AugustusKling)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the locale shall be present.
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = '''
+- name: Ensure a locale exists
+ community.general.locale_gen:
+ name: de_CH.UTF-8
+ state: present
+'''
+
+import os
+import re
+from subprocess import Popen, PIPE, call
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+LOCALE_NORMALIZATION = {
+ ".utf8": ".UTF-8",
+ ".eucjp": ".EUC-JP",
+ ".iso885915": ".ISO-8859-15",
+ ".cp1251": ".CP1251",
+ ".koi8r": ".KOI8-R",
+ ".armscii8": ".ARMSCII-8",
+ ".euckr": ".EUC-KR",
+ ".gbk": ".GBK",
+ ".gb18030": ".GB18030",
+ ".euctw": ".EUC-TW",
+}
+
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_available(name, ubuntuMode):
+ """Check if the given locale is available on the system. This is done by
+ checking either :
+ * if the locale is present in /etc/locales.gen
+ * or if the locale is present in /usr/share/i18n/SUPPORTED"""
+ if ubuntuMode:
+ __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/usr/share/i18n/SUPPORTED'
+ else:
+ __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
+ __locales_available = '/etc/locale.gen'
+
+ re_compiled = re.compile(__regexp)
+ fd = open(__locales_available, 'r')
+ for line in fd:
+ result = re_compiled.match(line)
+ if result and result.group('locale') == name:
+ return True
+ fd.close()
+ return False
+
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ output = to_native(output)
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ for s, r in LOCALE_NORMALIZATION.items():
+ name = name.replace(s, r)
+ return name
+
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [line.replace(existing_line, new_line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def set_locale(name, enabled=True):
+ """ Sets the state of the locale. Defaults to enabled. """
+ search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
+ if enabled:
+ new_string = r'%s \g<charset>' % (name)
+ else:
+ new_string = r'# %s \g<charset>' % (name)
+ try:
+ f = open("/etc/locale.gen", "r")
+ lines = [re.sub(search_string, new_string, line) for line in f]
+ finally:
+ f.close()
+ try:
+ f = open("/etc/locale.gen", "w")
+ f.write("".join(lines))
+ finally:
+ f.close()
+
+
+def apply_change(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ set_locale(name, enabled=True)
+ else:
+ # Delete locale.
+ set_locale(name, enabled=False)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def apply_change_ubuntu(targetState, name):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, either present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ """
+ if targetState == "present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ try:
+ f = open("/var/lib/locales/supported.d/local", "r")
+ content = f.readlines()
+ finally:
+ f.close()
+ try:
+ f = open("/var/lib/locales/supported.d/local", "w")
+ for line in content:
+ locale, charset = line.split(' ')
+ if locale != name:
+ f.write(line)
+ finally:
+ f.close()
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue != 0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+
+ if not os.path.exists("/var/lib/locales/supported.d/"):
+ if os.path.exists("/etc/locale.gen"):
+ # We found the common way to manage locales.
+ ubuntuMode = False
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
+ else:
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+
+ if not is_available(name, ubuntuMode):
+ module.fail_json(msg="The locale you've entered is not available "
+ "on your system.")
+
+ if is_present(name):
+ prev_state = "present"
+ else:
+ prev_state = "absent"
+ changed = (prev_state != state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ if changed:
+ try:
+ if ubuntuMode is False:
+ apply_change(state, name)
+ else:
+ apply_change_ubuntu(state, name)
+ except EnvironmentError as e:
+ module.fail_json(msg=to_native(e), exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/logentries.py b/ansible_collections/community/general/plugins/modules/logentries.py
new file mode 100644
index 000000000..f177cf454
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/logentries.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Ivan Vanderbyl <ivan@app.io>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: "Ivan Vanderbyl (@ivanvanderbyl)"
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ path:
+ type: str
+ description:
+ - path to a log file
+ required: true
+ state:
+ type: str
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent', 'followed', 'unfollowed' ]
+ required: false
+ default: present
+ name:
+ type: str
+ description:
+ - name of the log
+ required: false
+ logtype:
+ type: str
+ description:
+ - type of the log
+ required: false
+ aliases: [type]
+
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- name: Track nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/access.log
+ state: present
+ name: nginx-access-log
+
+- name: Stop tracking nginx logs
+ community.general.logentries:
+ path: /var/log/nginx/error.log
+ state: absent
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command([le_path, "followed", path])
+ if rc == 0:
+ return True
+
+ return False
+
+
+def follow_log(module, le_path, logs, name=None, logtype=None):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ cmd = [le_path, 'follow', log]
+ if name:
+ cmd.extend(['--name', name])
+ if logtype:
+ cmd.extend(['--type', logtype])
+ rc, out, err = module.run_command(cmd)
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True),
+ state=dict(default="present", choices=["present", "followed", "absent", "unfollowed"]),
+ name=dict(required=False, default=None, type='str'),
+ logtype=dict(required=False, default=None, type='str', aliases=['type'])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs, name=p['name'], logtype=p['logtype'])
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/logentries_msg.py b/ansible_collections/community/general/plugins/modules/logentries_msg.py
new file mode 100644
index 000000000..03851ad1f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/logentries_msg.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logentries_msg
+short_description: Send a message to logentries
+description:
+ - Send a message to logentries
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - Log token.
+ required: true
+ msg:
+ type: str
+ description:
+ - The message body.
+ required: true
+ api:
+ type: str
+ description:
+ - API endpoint
+ default: data.logentries.com
+ port:
+ type: int
+ description:
+ - API endpoint port
+ default: 80
+author: "Jimmy Tang (@jcftang) <jimmy_tang@rapid7.com>"
+'''
+
+RETURN = '''# '''
+
+EXAMPLES = '''
+- name: Send a message to logentries
+ community.general.logentries_msg:
+ token=00000000-0000-0000-0000-000000000000
+ msg="{{ ansible_hostname }}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def send_msg(module, token, msg, api, port):
+
+ message = "{0} {1}\n".format(token, msg)
+
+ api_ip = socket.gethostbyname(api)
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((api_ip, port))
+ try:
+ if not module.check_mode:
+ s.send(message)
+ except Exception as e:
+ module.fail_json(msg="failed to send message, msg=%s" % e)
+ s.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str', required=True),
+ api=dict(type='str', default="data.logentries.com"),
+ port=dict(type='int', default=80)),
+ supports_check_mode=True
+ )
+
+ token = module.params["token"]
+ msg = module.params["msg"]
+ api = module.params["api"]
+ port = module.params["port"]
+
+ changed = False
+ try:
+ send_msg(module, token, msg, api, port)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg="unable to send msg: %s" % e)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/logstash_plugin.py b/ansible_collections/community/general/plugins/modules/logstash_plugin.py
new file mode 100644
index 000000000..7ee118ff2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/logstash_plugin.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: logstash_plugin
+short_description: Manage Logstash plugins
+description:
+ - Manages Logstash plugins.
+author: Loic Blot (@nerzhul)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Install plugin with that name.
+ required: true
+ state:
+ type: str
+ description:
+ - Apply plugin state.
+ choices: ["present", "absent"]
+ default: present
+ plugin_bin:
+ type: path
+ description:
+ - Specify logstash-plugin to use for plugin management.
+ default: /usr/share/logstash/bin/logstash-plugin
+ proxy_host:
+ type: str
+ description:
+ - Proxy host to use during plugin installation.
+ proxy_port:
+ type: str
+ description:
+ - Proxy port to use during plugin installation.
+ version:
+ type: str
+ description:
+ - Specify plugin Version of the plugin to install.
+ If plugin exists with previous version, it will NOT be updated.
+'''
+
+EXAMPLES = '''
+- name: Install Logstash beats input plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+
+- name: Install specific version of a plugin
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-syslog
+ version: '3.2.0'
+
+- name: Uninstall Logstash plugin
+ community.general.logstash_plugin:
+ state: absent
+ name: logstash-filter-multiline
+
+- name: Install Logstash plugin with alternate heap size
+ community.general.logstash_plugin:
+ state: present
+ name: logstash-input-beats
+ environment:
+ LS_JAVA_OPTS: "-Xms256m -Xmx256m"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+PACKAGE_STATE_MAP = dict(
+ present="install",
+ absent="remove"
+)
+
+
+def is_plugin_present(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, "list", plugin_name]
+ rc, out, err = module.run_command(cmd_args)
+ return rc == 0
+
+
+def parse_error(string):
+ reason = "reason: "
+ try:
+ return string[string.index(reason) + len(reason):].strip()
+ except ValueError:
+ return string
+
+
+def install_plugin(module, plugin_bin, plugin_name, version, proxy_host, proxy_port):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"], plugin_name]
+
+ if version:
+ cmd_args.append("--version %s" % version)
+
+ if proxy_host and proxy_port:
+ cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def remove_plugin(module, plugin_bin, plugin_name):
+ cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], plugin_name]
+
+ cmd = " ".join(cmd_args)
+
+ if module.check_mode:
+ rc, out, err = 0, "check mode", ""
+ else:
+ rc, out, err = module.run_command(cmd)
+
+ if rc != 0:
+ reason = parse_error(out)
+ module.fail_json(msg=reason)
+
+ return True, cmd, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())),
+ plugin_bin=dict(default="/usr/share/logstash/bin/logstash-plugin", type="path"),
+ proxy_host=dict(),
+ proxy_port=dict(),
+ version=dict()
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params["name"]
+ state = module.params["state"]
+ plugin_bin = module.params["plugin_bin"]
+ proxy_host = module.params["proxy_host"]
+ proxy_port = module.params["proxy_port"]
+ version = module.params["version"]
+
+ present = is_plugin_present(module, plugin_bin, name)
+
+ # skip if the state is correct
+ if (present and state == "present") or (state == "absent" and not present):
+ module.exit_json(changed=False, name=name, state=state)
+
+ if state == "present":
+ changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, proxy_host, proxy_port)
+ elif state == "absent":
+ changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
+
+ module.exit_json(changed=changed, cmd=cmd, name=name, state=state, stdout=out, stderr=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lvg.py b/ansible_collections/community/general/plugins/modules/lvg.py
new file mode 100644
index 000000000..60eaaa42b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lvg.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
+# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+- Alexander Bulimov (@abulimov)
+module: lvg
+short_description: Configure LVM volume groups
+description:
+ - This module creates, removes or resizes volume groups.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ vg:
+ description:
+ - The name of the volume group.
+ type: str
+ required: true
+ pvs:
+ description:
+ - List of comma-separated devices to use as physical devices in this volume group.
+ - Required when creating or resizing volume group.
+ - The module will take care of running pvcreate if needed.
+ type: list
+ elements: str
+ pesize:
+ description:
+ - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
+ (where the sector size is the largest sector size of the PVs currently used in the VG),
+ or at least 128KiB."
+ - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
+ type: str
+ default: "4"
+ pv_options:
+ description:
+ - Additional options to pass to C(pvcreate) when creating the volume group.
+ type: str
+ default: ''
+ pvresize:
+ description:
+ - If C(true), resize the physical volume to the maximum available size.
+ type: bool
+ default: false
+ version_added: '0.2.0'
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ type: str
+ default: ''
+ state:
+ description:
+ - Control if the volume group exists.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ force:
+ description:
+ - If C(true), allows to remove volume group with logical volumes.
+ type: bool
+ default: false
+seealso:
+- module: community.general.filesystem
+- module: community.general.lvol
+- module: community.general.parted
+notes:
+ - This module does not modify PE size for already present volume group.
+'''
+
+EXAMPLES = r'''
+- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sda1
+ pesize: 32
+
+- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb
+ pesize: 128K
+
+# If, for example, we already have VG vg.services on top of /dev/sdb1,
+# this VG will be extended by /dev/sdc5. Or if vg.services was created on
+# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
+# and then reduce by /dev/sda5.
+- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
+ community.general.lvg:
+ vg: vg.services
+ pvs: /dev/sdb1,/dev/sdc5
+
+- name: Remove a volume group with name vg.services
+ community.general.lvg:
+ vg: vg.services
+ state: absent
+
+- name: Create a volume group on top of /dev/sda3 and resize the volume group /dev/sda3 to the maximum possible
+ community.general.lvg:
+ vg: resizableVG
+ pvs: /dev/sda3
+ pvresize: true
+'''
+
+import itertools
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'pv_count': int(parts[1]),
+ 'lv_count': int(parts[2]),
+ })
+ return vgs
+
+
+def find_mapper_device_name(module, dm_device):
+ dmsetup_cmd = module.get_bin_path('dmsetup', True)
+ mapper_prefix = '/dev/mapper/'
+ rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
+ if rc != 0:
+ module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
+ mapper_device = mapper_prefix + dm_name.rstrip()
+ return mapper_device
+
+
+def parse_pvs(module, data):
+ pvs = []
+ dm_prefix = '/dev/dm-'
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ if parts[0].startswith(dm_prefix):
+ parts[0] = find_mapper_device_name(module, parts[0])
+ pvs.append({
+ 'name': parts[0],
+ 'vg_name': parts[1],
+ })
+ return pvs
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ pvs=dict(type='list', elements='str'),
+ pesize=dict(type='str', default='4'),
+ pv_options=dict(type='str', default=''),
+ pvresize=dict(type='bool', default=False),
+ vg_options=dict(type='str', default=''),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ vg = module.params['vg']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ pvresize = module.boolean(module.params['pvresize'])
+ pesize = module.params['pesize']
+ pvoptions = module.params['pv_options'].split()
+ vgoptions = module.params['vg_options'].split()
+
+ dev_list = []
+ if module.params['pvs']:
+ dev_list = list(module.params['pvs'])
+ elif state == 'present':
+ module.fail_json(msg="No physical volumes given.")
+
+ # LVM always uses real paths not symlinks so replace symlinks with actual path
+ for idx, dev in enumerate(dev_list):
+ dev_list[idx] = os.path.realpath(dev)
+
+ if state == 'present':
+ # check given devices
+ for test_dev in dev_list:
+ if not os.path.exists(test_dev):
+ module.fail_json(msg="Device %s not found." % test_dev)
+
+ # get pv list
+ pvs_cmd = module.get_bin_path('pvs', True)
+ if dev_list:
+ pvs_filter_pv_name = ' || '.join(
+ 'pv_name = {0}'.format(x)
+ for x in itertools.chain(dev_list, module.params['pvs'])
+ )
+ pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
+ pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
+ else:
+ pvs_filter = ''
+ rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
+
+ # check pv for devices
+ pvs = parse_pvs(module, current_pvs)
+ used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
+ if used_pvs:
+ module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
+
+ vgs_cmd = module.get_bin_path('vgs', True)
+ rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
+
+ if rc != 0:
+ module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
+
+ changed = False
+
+ vgs = parse_vgs(current_vgs)
+
+ for test_vg in vgs:
+ if test_vg['name'] == vg:
+ this_vg = test_vg
+ break
+ else:
+ this_vg = None
+
+ if this_vg is None:
+ if state == 'present':
+ # create VG
+ if module.check_mode:
+ changed = True
+ else:
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in dev_list:
+ rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ vgcreate_cmd = module.get_bin_path('vgcreate')
+ rc, dummy, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ if this_vg['lv_count'] == 0 or force:
+ # remove VG
+ vgremove_cmd = module.get_bin_path('vgremove', True)
+ rc, dummy, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
+ else:
+ module.fail_json(msg="Refuse to remove non-empty volume group %s without force=true" % (vg))
+
+ # resize VG
+ current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
+ devs_to_remove = list(set(current_devs) - set(dev_list))
+ devs_to_add = list(set(dev_list) - set(current_devs))
+
+ if current_devs:
+ if state == 'present' and pvresize:
+ for device in current_devs:
+ pvresize_cmd = module.get_bin_path('pvresize', True)
+ pvdisplay_cmd = module.get_bin_path('pvdisplay', True)
+ pvdisplay_ops = ["--units", "b", "--columns", "--noheadings", "--nosuffix"]
+ pvdisplay_cmd_device_options = [pvdisplay_cmd, device] + pvdisplay_ops
+ rc, dev_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "dev_size"])
+ dev_size = int(dev_size.replace(" ", ""))
+ rc, pv_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pv_size"])
+ pv_size = int(pv_size.replace(" ", ""))
+ rc, pe_start, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "pe_start"])
+ pe_start = int(pe_start.replace(" ", ""))
+ rc, vg_extent_size, err = module.run_command(pvdisplay_cmd_device_options + ["-o", "vg_extent_size"])
+ vg_extent_size = int(vg_extent_size.replace(" ", ""))
+ if (dev_size - (pe_start + pv_size)) > vg_extent_size:
+ if module.check_mode:
+ changed = True
+ else:
+ rc, dummy, err = module.run_command([pvresize_cmd, device])
+ if rc != 0:
+ module.fail_json(msg="Failed executing pvresize command.", rc=rc, err=err)
+ else:
+ changed = True
+
+ if devs_to_add or devs_to_remove:
+ if module.check_mode:
+ changed = True
+ else:
+ if devs_to_add:
+ devs_to_add_string = ' '.join(devs_to_add)
+ # create PV
+ pvcreate_cmd = module.get_bin_path('pvcreate', True)
+ for current_dev in devs_to_add:
+ rc, dummy, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
+ # add PV to our VG
+ vgextend_cmd = module.get_bin_path('vgextend', True)
+ rc, dummy, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
+
+ # remove some PV from our VG
+ if devs_to_remove:
+ devs_to_remove_string = ' '.join(devs_to_remove)
+ vgreduce_cmd = module.get_bin_path('vgreduce', True)
+ rc, dummy, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lvol.py b/ansible_collections/community/general/plugins/modules/lvol.py
new file mode 100644
index 000000000..d193a4e83
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lvol.py
@@ -0,0 +1,615 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>, Alexander Bulimov <lazywolf0@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author:
+ - Jeroen Hoekx (@jhoekx)
+ - Alexander Bulimov (@abulimov)
+ - Raoul Baudach (@unkaputtbar112)
+ - Ziga Kern (@zigaSRC)
+module: lvol
+short_description: Configure LVM logical volumes
+description:
+ - This module creates, removes or resizes logical volumes.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ vg:
+ type: str
+ required: true
+ description:
+ - The volume group this logical volume is part of.
+ lv:
+ type: str
+ description:
+ - The name of the logical volume.
+ size:
+ type: str
+ description:
+ - The size of the logical volume, according to lvcreate(8) --size, by
+ default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or
+ according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE];
+ Float values must begin with a digit.
+ - When resizing, apart from specifying an absolute size you may, according to
+ lvextend(8)|lvreduce(8) C(--size), specify the amount to extend the logical volume with
+ the prefix C(+) or the amount to reduce the logical volume by with prefix C(-).
+ - Resizing using C(+) or C(-) was not supported prior to community.general 3.0.0.
+ - Please note that when using C(+) or C(-), the module is B(not idempotent).
+ state:
+ type: str
+ description:
+ - Control if the logical volume exists. If C(present) and the
+ volume does not already exist then the C(size) option is required.
+ choices: [ absent, present ]
+ default: present
+ active:
+ description:
+ - Whether the volume is active and visible to the host.
+ type: bool
+ default: true
+ force:
+ description:
+ - Shrink or remove operations of volumes requires this switch. Ensures that
+ that filesystems get never corrupted/destroyed by mistake.
+ type: bool
+ default: false
+ opts:
+ type: str
+ description:
+ - Free-form options to be passed to the lvcreate command.
+ snapshot:
+ type: str
+ description:
+ - The name of the snapshot volume
+ pvs:
+ type: str
+ description:
+ - Comma separated list of physical volumes (e.g. /dev/sda,/dev/sdb).
+ thinpool:
+ type: str
+ description:
+ - The thin pool volume name. When you want to create a thin provisioned volume, specify a thin pool volume name.
+ shrink:
+ description:
+ - Shrink if current size is higher than size requested.
+ type: bool
+ default: true
+ resizefs:
+ description:
+ - Resize the underlying filesystem together with the logical volume.
+ - Supported for C(ext2), C(ext3), C(ext4), C(reiserfs) and C(XFS) filesystems.
+ Attempts to resize other filesystem types will fail.
+ type: bool
+ default: false
+notes:
+ - You must specify lv (when managing the state of logical volumes) or thinpool (when managing a thin provisioned volume).
+'''
+
+EXAMPLES = '''
+- name: Create a logical volume of 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+
+- name: Create a logical volume of 512m with disks /dev/sda and /dev/sdb
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ pvs: /dev/sda,/dev/sdb
+
+- name: Create cache pool logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: lvcache
+ size: 512m
+ opts: --type cache-pool
+
+- name: Create a logical volume of 512g.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+
+- name: Create a logical volume the size of all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%FREE
+
+- name: Create a logical volume with special options
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ opts: -r 16
+
+- name: Extend the logical volume to 1024m.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 1024
+
+- name: Extend the logical volume to consume all remaining space in the volume group
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: +100%FREE
+
+- name: Extend the logical volume by given space
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: +512M
+
+- name: Extend the logical volume to take all remaining space of the PVs and resize the underlying filesystem
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 100%PVS
+ resizefs: true
+
+- name: Resize the logical volume to % of VG
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 80%VG
+ force: true
+
+- name: Reduce the logical volume to 512m
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ force: true
+
+- name: Reduce the logical volume by given space
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: -512M
+ force: true
+
+- name: Set the logical volume to 512m and do not try to shrink if size is lower than current one
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512
+ shrink: false
+
+- name: Remove the logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ state: absent
+ force: true
+
+- name: Create a snapshot volume of the test logical volume.
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ snapshot: snap1
+ size: 100m
+
+- name: Deactivate a logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ active: false
+
+- name: Create a deactivated logical volume
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ size: 512g
+ active: false
+
+- name: Create a thin pool of 512g
+ community.general.lvol:
+ vg: firefly
+ thinpool: testpool
+ size: 512g
+
+- name: Create a thin volume of 128g
+ community.general.lvol:
+ vg: firefly
+ lv: test
+ thinpool: testpool
+ size: 128g
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+LVOL_ENV_VARS = dict(
+ # make sure we use the C locale when running lvol-related commands
+ LANG='C',
+ LC_ALL='C',
+ LC_MESSAGES='C',
+ LC_CTYPE='C',
+)
+
+
+def mkversion(major, minor, patch):
+ return (1000 * 1000 * int(major)) + (1000 * int(minor)) + int(patch)
+
+
+def parse_lvs(data):
+ lvs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ lvs.append({
+ 'name': parts[0].replace('[', '').replace(']', ''),
+ 'size': float(parts[1]),
+ 'active': (parts[2][4] == 'a'),
+ 'thinpool': (parts[2][0] == 't'),
+ 'thinvol': (parts[2][0] == 'V'),
+ })
+ return lvs
+
+
+def parse_vgs(data):
+ vgs = []
+ for line in data.splitlines():
+ parts = line.strip().split(';')
+ vgs.append({
+ 'name': parts[0],
+ 'size': float(parts[1]),
+ 'free': float(parts[2]),
+ 'ext_size': float(parts[3])
+ })
+ return vgs
+
+
+def get_lvm_version(module):
+ ver_cmd = module.get_bin_path("lvm", required=True)
+ rc, out, err = module.run_command("%s version" % (ver_cmd))
+ if rc != 0:
+ return None
+ m = re.search(r"LVM version:\s+(\d+)\.(\d+)\.(\d+).*(\d{4}-\d{2}-\d{2})", out)
+ if not m:
+ return None
+ return mkversion(m.group(1), m.group(2), m.group(3))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ vg=dict(type='str', required=True),
+ lv=dict(type='str'),
+ size=dict(type='str'),
+ opts=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ force=dict(type='bool', default=False),
+ shrink=dict(type='bool', default=True),
+ active=dict(type='bool', default=True),
+ snapshot=dict(type='str'),
+ pvs=dict(type='str'),
+ resizefs=dict(type='bool', default=False),
+ thinpool=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=(
+ ['lv', 'thinpool'],
+ ),
+ )
+
+ module.run_command_environ_update = LVOL_ENV_VARS
+
+ # Determine if the "--yes" option should be used
+ version_found = get_lvm_version(module)
+ if version_found is None:
+ module.fail_json(msg="Failed to get LVM version number")
+ version_yesopt = mkversion(2, 2, 99) # First LVM with the "--yes" option
+ if version_found >= version_yesopt:
+ yesopt = "--yes"
+ else:
+ yesopt = ""
+
+ vg = module.params['vg']
+ lv = module.params['lv']
+ size = module.params['size']
+ opts = module.params['opts']
+ state = module.params['state']
+ force = module.boolean(module.params['force'])
+ shrink = module.boolean(module.params['shrink'])
+ active = module.boolean(module.params['active'])
+ resizefs = module.boolean(module.params['resizefs'])
+ thinpool = module.params['thinpool']
+ size_opt = 'L'
+ size_unit = 'm'
+ size_operator = None
+ snapshot = module.params['snapshot']
+ pvs = module.params['pvs']
+
+ if pvs is None:
+ pvs = ""
+ else:
+ pvs = pvs.replace(",", " ")
+
+ if opts is None:
+ opts = ""
+
+ # Add --test option when running in check-mode
+ if module.check_mode:
+ test_opt = ' --test'
+ else:
+ test_opt = ''
+
+ if size:
+ # LVEXTEND(8)/LVREDUCE(8) -l, -L options: Check for relative value for resizing
+ if size.startswith('+'):
+ size_operator = '+'
+ size = size[1:]
+ elif size.startswith('-'):
+ size_operator = '-'
+ size = size[1:]
+ # LVCREATE(8) does not support [+-]
+
+ # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -l --extents option with percentage
+ if '%' in size:
+ size_parts = size.split('%', 1)
+ size_percent = int(size_parts[0])
+ if size_percent > 100:
+ module.fail_json(msg="Size percentage cannot be larger than 100%")
+ size_whole = size_parts[1]
+ if size_whole == 'ORIGIN':
+ module.fail_json(msg="Snapshot Volumes are not supported")
+ elif size_whole not in ['VG', 'PVS', 'FREE']:
+ module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE")
+ size_opt = 'l'
+ size_unit = ''
+
+ # LVCREATE(8)/LVEXTEND(8)/LVREDUCE(8) -L --size option unit
+ if '%' not in size:
+ if size[-1].lower() in 'bskmgtpe':
+ size_unit = size[-1]
+ size = size[0:-1]
+
+ try:
+ float(size)
+ if not size[0].isdigit():
+ raise ValueError()
+ except ValueError:
+ module.fail_json(msg="Bad size specification of '%s'" % size)
+
+ # when no unit, megabytes by default
+ if size_opt == 'l':
+ unit = 'm'
+ else:
+ unit = size_unit
+
+ # Get information on volume group requested
+ vgs_cmd = module.get_bin_path("vgs", required=True)
+ rc, current_vgs, err = module.run_command(
+ "%s --noheadings --nosuffix -o vg_name,size,free,vg_extent_size --units %s --separator ';' %s" % (vgs_cmd, unit.lower(), vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ vgs = parse_vgs(current_vgs)
+ this_vg = vgs[0]
+
+ # Get information on logical volume requested
+ lvs_cmd = module.get_bin_path("lvs", required=True)
+ rc, current_lvs, err = module.run_command(
+ "%s -a --noheadings --nosuffix -o lv_name,size,lv_attr --units %s --separator ';' %s" % (lvs_cmd, unit.lower(), vg))
+
+ if rc != 0:
+ if state == 'absent':
+ module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg)
+ else:
+ module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err)
+
+ changed = False
+
+ lvs = parse_lvs(current_lvs)
+
+ if snapshot:
+ # Check snapshot pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == lv or test_lv['name'] == thinpool:
+ if not test_lv['thinpool'] and not thinpool:
+ break
+ else:
+ module.fail_json(msg="Snapshots of thin pool LVs are not supported.")
+ else:
+ module.fail_json(msg="Snapshot origin LV %s does not exist in volume group %s." % (lv, vg))
+ check_lv = snapshot
+ elif thinpool:
+ if lv:
+ # Check thin volume pre-conditions
+ for test_lv in lvs:
+ if test_lv['name'] == thinpool:
+ break
+ else:
+ module.fail_json(msg="Thin pool LV %s does not exist in volume group %s." % (thinpool, vg))
+ check_lv = lv
+ else:
+ check_lv = thinpool
+ else:
+ check_lv = lv
+
+ for test_lv in lvs:
+ if test_lv['name'] in (check_lv, check_lv.rsplit('/', 1)[-1]):
+ this_lv = test_lv
+ break
+ else:
+ this_lv = None
+
+ msg = ''
+ if this_lv is None:
+ if state == 'present':
+ if size_operator is not None:
+ if size_operator == "-" or (size_whole not in ["VG", "PVS", "FREE", "ORIGIN", None]):
+ module.fail_json(msg="Bad size specification of '%s%s' for creating LV" % (size_operator, size))
+ # Require size argument except for snapshot of thin volumes
+ if (lv or thinpool) and not size:
+ for test_lv in lvs:
+ if test_lv['name'] == lv and test_lv['thinvol'] and snapshot:
+ break
+ else:
+ module.fail_json(msg="No size given.")
+
+ # create LV
+ lvcreate_cmd = module.get_bin_path("lvcreate", required=True)
+ if snapshot is not None:
+ if size:
+ cmd = "%s %s %s -%s %s%s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, snapshot, opts, vg, lv)
+ else:
+ cmd = "%s %s %s -s -n %s %s %s/%s" % (lvcreate_cmd, test_opt, yesopt, snapshot, opts, vg, lv)
+ elif thinpool and lv:
+ if size_opt == 'l':
+ module.fail_json(changed=False, msg="Thin volume sizing with percentage not supported.")
+ size_opt = 'V'
+ cmd = "%s %s %s -n %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, thinpool)
+ elif thinpool and not lv:
+ cmd = "%s %s %s -%s %s%s %s -T %s/%s" % (lvcreate_cmd, test_opt, yesopt, size_opt, size, size_unit, opts, vg, thinpool)
+ else:
+ cmd = "%s %s %s -n %s -%s %s%s %s %s %s" % (lvcreate_cmd, test_opt, yesopt, lv, size_opt, size, size_unit, opts, vg, pvs)
+ rc, dummy, err = module.run_command(cmd)
+ if rc == 0:
+ changed = True
+ else:
+ module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err)
+ else:
+ if state == 'absent':
+ # remove LV
+ if not force:
+ module.fail_json(msg="Sorry, no removal of logical volume %s without force=true." % (this_lv['name']))
+ lvremove_cmd = module.get_bin_path("lvremove", required=True)
+ rc, dummy, err = module.run_command("%s %s --force %s/%s" % (lvremove_cmd, test_opt, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err)
+
+ elif not size:
+ pass
+
+ elif size_opt == 'l':
+ # Resize LV based on % value
+ tool = None
+ size_free = this_vg['free']
+ if size_whole == 'VG' or size_whole == 'PVS':
+ size_requested = size_percent * this_vg['size'] / 100
+ else: # size_whole == 'FREE':
+ size_requested = size_percent * this_vg['free'] / 100
+
+ if size_operator == '+':
+ size_requested += this_lv['size']
+ elif size_operator == '-':
+ size_requested = this_lv['size'] - size_requested
+
+ # According to latest documentation (LVM2-2.03.11) all tools round down
+ size_requested -= (size_requested % this_vg['ext_size'])
+
+ if this_lv['size'] < size_requested:
+ if (size_free > 0) and (size_free >= (size_requested - this_lv['size'])):
+ tool = module.get_bin_path("lvextend", required=True)
+ else:
+ module.fail_json(
+ msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
+ (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
+ )
+ elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
+ if size_requested < 1:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ elif not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=true" % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ if size_operator:
+ cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs)
+ else:
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ msg = "Volume %s resized to %s%s" % (this_lv['name'], size_requested, unit)
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ else:
+ # resize LV based on absolute values
+ tool = None
+ if float(size) > this_lv['size'] or size_operator == '+':
+ tool = module.get_bin_path("lvextend", required=True)
+ elif shrink and float(size) < this_lv['size'] or size_operator == '-':
+ if float(size) == 0:
+ module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
+ if not force:
+ module.fail_json(msg="Sorry, no shrinking of %s without force=true." % (this_lv['name']))
+ else:
+ tool = module.get_bin_path("lvreduce", required=True)
+ tool = '%s %s' % (tool, '--force')
+
+ if tool:
+ if resizefs:
+ tool = '%s %s' % (tool, '--resizefs')
+ if size_operator:
+ cmd = "%s %s -%s %s%s%s %s/%s %s" % (tool, test_opt, size_opt, size_operator, size, size_unit, vg, this_lv['name'], pvs)
+ else:
+ cmd = "%s %s -%s %s%s %s/%s %s" % (tool, test_opt, size_opt, size, size_unit, vg, this_lv['name'], pvs)
+ rc, out, err = module.run_command(cmd)
+ if "Reached maximum COW size" in out:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err, out=out)
+ elif rc == 0:
+ changed = True
+ elif "matches existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ elif "not larger than existing size" in err:
+ module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size'], msg="Original size is larger than requested size", err=err)
+ else:
+ module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err)
+
+ if this_lv is not None:
+ if active:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, dummy, err = module.run_command("%s -ay %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=((not this_lv['active']) or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to activate logical volume %s" % (lv), rc=rc, err=err)
+ else:
+ lvchange_cmd = module.get_bin_path("lvchange", required=True)
+ rc, dummy, err = module.run_command("%s -an %s/%s" % (lvchange_cmd, vg, this_lv['name']))
+ if rc == 0:
+ module.exit_json(changed=(this_lv['active'] or changed), vg=vg, lv=this_lv['name'], size=this_lv['size'])
+ else:
+ module.fail_json(msg="Failed to deactivate logical volume %s" % (lv), rc=rc, err=err)
+
+ module.exit_json(changed=changed, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lxc_container.py b/ansible_collections/community/general/plugins/modules/lxc_container.py
new file mode 100644
index 000000000..aec8f12dc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lxc_container.py
@@ -0,0 +1,1742 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: lxc_container
+short_description: Manage LXC Containers
+description:
+ - Management of LXC containers.
+author: "Kevin Carter (@cloudnull)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of a container.
+ type: str
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ type: str
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ type: str
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ type: str
+ config:
+ description:
+ - Path to the LXC configuration file.
+ type: path
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ - If not specified, it defaults to C($CONTAINER_NAME).
+ type: str
+ vg_name:
+ description:
+ - If backend store is lvm, specify the name of the volume group.
+ type: str
+ default: lxc
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ type: str
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ type: str
+ default: ext4
+ fs_size:
+ description:
+ - File system Size.
+ type: str
+ default: 5G
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ type: path
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ type: str
+ container_command:
+ description:
+ - Run a command within a container.
+ type: str
+ lxc_path:
+ description:
+ - Place container under C(PATH).
+ type: path
+ container_log:
+ description:
+ - Enable a container log for host actions to the container.
+ type: bool
+ default: false
+ container_log_level:
+ choices:
+ - Info
+ - info
+ - INFO
+ - Error
+ - error
+ - ERROR
+ - Debug
+ - debug
+ - DEBUG
+ description:
+ - Set the log level for a container where I(container_log) was set.
+ type: str
+ required: false
+ default: INFO
+ clone_name:
+ description:
+ - Name of the new cloned server.
+ - This is only used when state is clone.
+ type: str
+ clone_snapshot:
+ description:
+ - Create a snapshot a container when cloning.
+ - This is not supported by all container storage backends.
+ - Enabling this may fail if the backing store does not support snapshots.
+ type: bool
+ default: false
+ archive:
+ description:
+ - Create an archive of a container.
+ - This will create a tarball of the running container.
+ type: bool
+ default: false
+ archive_path:
+ description:
+ - Path the save the archived container.
+ - If the path does not exist the archive method will attempt to create it.
+ type: path
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ type: str
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ - clone
+ description:
+ - Define the state of a container.
+ - If you clone a container using I(clone_name) the newly cloned
+ container created in a stopped state.
+ - The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ type: str
+ default: started
+ container_config:
+ description:
+ - A list of C(key=value) options to use when configuring a container.
+ type: list
+ elements: str
+requirements:
+ - 'lxc >= 2.0 # OS package'
+ - 'python3 >= 3.5 # OS Package'
+ - 'python3-lxc # OS Package'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The I(container_command) can be used with any state except C(absent). If
+ used with state C(stopped) the container will be C(started), the command
+ executed, and then the container C(stopped) again. Likewise if I(state=stopped)
+ and the container does not exist it will be first created,
+ C(started), the command executed, and then C(stopped). If you use a "|"
+ in the variable you can use common script formatting within the variable
+ itself. The I(container_command) option will always execute as BASH.
+ When using I(container_command), a log file is created in the C(/tmp/) directory
+ which contains both C(stdout) and C(stderr) of any command executed.
+ - If I(archive=true) the system will attempt to create a compressed
+ tarball of the running container. The I(archive) option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for C(python3-lxc), which is a
+ requirement for this module, it can be installed from source at
+ U(https://github.com/lxc/python3-lxc) or installed via pip using the
+ package name C(lxc).
+'''
+
+EXAMPLES = r"""
+- name: Create a started container
+ community.general.lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ community.general.lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ community.general.lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ community.general.lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ community.general.lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ ansible.builtin.debug:
+ var: lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ community.general.lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ community.general.lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ community.general.lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ community.general.lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ community.general.lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ community.general.lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: Debug info on container "test-container"
+ ansible.builtin.debug:
+ var: clone_container_info
+
+- name: Clone a container using snapshot
+ community.general.lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ community.general.lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ community.general.lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ community.general.lxc_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN = r"""
+lxc_container:
+ description: container information
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: str
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: str
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: str
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: bool
+ sample: true
+"""
+
+import os
+import os.path
+import re
+import shutil
+import subprocess
+import tempfile
+import time
+import shlex
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE
+from ansible.module_utils.common.text.converters import to_text, to_bytes
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables-lxc-copy': {
+ 'backing_store': '--backingstorage',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--name',
+ 'clone_name': '--newname'
+ },
+ # lxc-clone is deprecated in favor of lxc-copy
+ 'variables-lxc-clone': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ unset HOSTNAME
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(to_bytes(ATTACH_TEMPLATE % {'container_command': command}, errors='surrogate_or_strict'))
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700', 8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params['state']
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params['lxc_path']
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ return any(c == container_name for c in lxc.list_containers(config_path=lxc_path))
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(str(key))
+ build_command.append(str(value))
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ false_values = BOOLEANS_FALSE.union([None, ''])
+ result = dict(
+ (v, self.module.params[k])
+ for k, v in variables.items()
+ if self.module.params[k] not in false_values
+ )
+ return result
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params['container_config']
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = to_text(f.read(), errors='surrogate_or_strict').splitlines(True)
+
+ parsed_options = [i.split('=', 1) for i in _container_config]
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ dummy, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines([to_bytes(line, errors='surrogate_or_strict') for line in container_config])
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ # lxc-clone is deprecated in favor of lxc-copy
+ clone_vars = 'variables-lxc-copy'
+ clone_cmd = self.module.get_bin_path('lxc-copy')
+ if not clone_cmd:
+ clone_vars = 'variables-lxc-clone'
+ clone_cmd = self.module.get_bin_path('lxc-clone', True)
+
+ build_command = [
+ clone_cmd,
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone'][clone_vars]
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params['clone_snapshot']:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params['backing_store'] == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self.module.run_command(build_command)
+ if rc != 0:
+ message = "Failed executing %s." % os.path.basename(clone_cmd)
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name', self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params['container_log']:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile',
+ os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority',
+ self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params['template_options']
+ if template_options:
+ build_command.append('--')
+ build_command += shlex.split(template_options)
+
+ rc, return_data, err = self.module.run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name': self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params['container_command']
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for dummy in range(timeout):
+ if self._get_state() == 'running':
+ return True
+
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params['archive']:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params['clone_name']
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for dummy in range(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077', 8))
+
+ archive_path = self.module.params['archive_path']
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params['archive_compression']
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(source_dir),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self.module.run_command(
+ build_command
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir,
+ ]
+ rc, stdout, err = self.module.run_command(
+ build_command,
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t', 'overlayfs',
+ '-o', 'lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self.module.run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=list(LXC_BACKING_STORE.keys()),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=list(LXC_ANSIBLE_STATES.keys()),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='list',
+ elements='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default=False
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default=False
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=list(LXC_COMPRESSION_MAP.keys()),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if=([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ if not module.params['lv_name']:
+ module.params['lv_name'] = module.params['name']
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lxca_cmms.py b/ansible_collections/community/general/plugins/modules/lxca_cmms.py
new file mode 100644
index 000000000..1f811a7ef
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lxca_cmms.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_cmms
+short_description: Custom module for lxca cmms inventory utility
+description:
+ - This module returns/displays a inventory details of cmms
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+ type: str
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: cmms
+ choices:
+ - cmms
+ - cmms_by_uuid
+ - cmms_by_chassis_uuid
+ type: str
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+ type: str
+
+extends_documentation_fragment:
+ - community.general.lxca_common
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# get all cmms info
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+
+# get specific cmms info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_uuid
+
+# get specific cmms info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_cmms:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: cmms_by_chassis_uuid
+
+'''
+
+RETURN = r'''
+result:
+ description: cmms detail from lxca
+ returned: success
+ type: dict
+ sample:
+ cmmList:
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: ''
+ model: ''
+ type: 'CMM'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple cmms details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import cmms
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _cmms(module, lxca_con):
+ return cmms(lxca_con)
+
+
+def _cmms_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return cmms(lxca_con, module.params['uuid'])
+
+
+def _cmms_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return cmms(lxca_con, chassis=module.params['chassis'])
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'cmms': _cmms,
+ 'cmms_by_uuid': _cmms_by_uuid,
+ 'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ uuid=dict(default=None),
+ chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join((e) for e in exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lxca_nodes.py b/ansible_collections/community/general/plugins/modules/lxca_nodes.py
new file mode 100644
index 000000000..3b37322ed
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lxca_nodes.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author:
+ - Naval Patel (@navalkp)
+ - Prashant Bhosale (@prabhosa)
+module: lxca_nodes
+short_description: Custom module for lxca nodes inventory utility
+description:
+ - This module returns/displays a inventory details of nodes
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ uuid:
+ description:
+ uuid of device, this is string with length greater than 16.
+ type: str
+
+ command_options:
+ description:
+ options to filter nodes information
+ default: nodes
+ choices:
+ - nodes
+ - nodes_by_uuid
+ - nodes_by_chassis_uuid
+ - nodes_status_managed
+ - nodes_status_unmanaged
+ type: str
+
+ chassis:
+ description:
+ uuid of chassis, this is string with length greater than 16.
+ type: str
+
+extends_documentation_fragment:
+ - community.general.lxca_common
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+# get all nodes info
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes
+
+# get specific nodes info by uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ uuid: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_uuid
+
+# get specific nodes info by chassis uuid
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ chassis: "3C737AA5E31640CE949B10C129A8B01F"
+ command_options: nodes_by_chassis_uuid
+
+# get managed nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_managed
+
+# get unmanaged nodes
+- name: Get nodes data from LXCA
+ community.general.lxca_nodes:
+ login_user: USERID
+ login_password: Password
+ auth_url: "https://10.243.15.168"
+ command_options: nodes_status_unmanaged
+
+'''
+
+RETURN = r'''
+result:
+ description: nodes detail from lxca
+ returned: always
+ type: dict
+ sample:
+ nodeList:
+ - machineType: '6241'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ - machineType: '8871'
+ model: 'AC1'
+ type: 'Rack-TowerServer'
+ uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
+ # bunch of properties
+ # Multiple nodes details
+'''
+
+import traceback
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
+try:
+ from pylxca import nodes
+except ImportError:
+ pass
+
+
+UUID_REQUIRED = 'UUID of device is required for nodes_by_uuid command.'
+CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for nodes_by_chassis_uuid command.'
+SUCCESS_MSG = "Success %s result"
+
+
+def _nodes(module, lxca_con):
+ return nodes(lxca_con)
+
+
+def _nodes_by_uuid(module, lxca_con):
+ if not module.params['uuid']:
+ module.fail_json(msg=UUID_REQUIRED)
+ return nodes(lxca_con, module.params['uuid'])
+
+
+def _nodes_by_chassis_uuid(module, lxca_con):
+ if not module.params['chassis']:
+ module.fail_json(msg=CHASSIS_UUID_REQUIRED)
+ return nodes(lxca_con, chassis=module.params['chassis'])
+
+
+def _nodes_status_managed(module, lxca_con):
+ return nodes(lxca_con, status='managed')
+
+
+def _nodes_status_unmanaged(module, lxca_con):
+ return nodes(lxca_con, status='unmanaged')
+
+
+def setup_module_object():
+ """
+ this function merge argument spec and create ansible module object
+ :return:
+ """
+ args_spec = dict(LXCA_COMMON_ARGS)
+ args_spec.update(INPUT_ARG_SPEC)
+ module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
+
+ return module
+
+
+FUNC_DICT = {
+ 'nodes': _nodes,
+ 'nodes_by_uuid': _nodes_by_uuid,
+ 'nodes_by_chassis_uuid': _nodes_by_chassis_uuid,
+ 'nodes_status_managed': _nodes_status_managed,
+ 'nodes_status_unmanaged': _nodes_status_unmanaged,
+}
+
+
+INPUT_ARG_SPEC = dict(
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ uuid=dict(default=None), chassis=dict(default=None)
+)
+
+
+def execute_module(module):
+ """
+ This function invoke commands
+ :param module: Ansible module object
+ """
+ try:
+ with connection_object(module) as lxca_con:
+ result = FUNC_DICT[module.params['command_options']](module, lxca_con)
+ module.exit_json(changed=False,
+ msg=SUCCESS_MSG % module.params['command_options'],
+ result=result)
+ except Exception as exception:
+ error_msg = '; '.join(exception.args)
+ module.fail_json(msg=error_msg, exception=traceback.format_exc())
+
+
+def main():
+ module = setup_module_object()
+ has_pylxca(module)
+ execute_module(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lxd_container.py b/ansible_collections/community/general/plugins/modules/lxd_container.py
new file mode 100644
index 000000000..f10fc4872
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lxd_container.py
@@ -0,0 +1,862 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD instances
+description:
+ - Management of LXD containers and virtual machines.
+author: "Hiroaki Nakamura (@hnakamur)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ version_added: 6.4.0
+ diff_mode:
+ support: full
+ version_added: 6.4.0
+options:
+ name:
+ description:
+ - Name of an instance.
+ type: str
+ required: true
+ project:
+ description:
+ - 'Project of an instance.
+ See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).'
+ required: false
+ type: str
+ version_added: 4.8.0
+ architecture:
+ description:
+ - 'The architecture for the instance (for example C(x86_64) or C(i686)).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
+ type: str
+ required: false
+ config:
+ description:
+ - 'The config for the instance (for example C({"limits.cpu": "2"})).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
+ - If the instance already exists and its "config" values in metadata
+ obtained from the LXD API U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#instances-containers-and-virtual-machines)
+ are different, this module tries to apply the configurations.
+ - The keys starting with C(volatile.) are ignored for this comparison when I(ignore_volatile_options=true).
+ type: dict
+ required: false
+ ignore_volatile_options:
+ description:
+ - If set to C(true), options starting with C(volatile.) are ignored. As a result,
+ they are reapplied for each execution.
+ - This default behavior can be changed by setting this option to C(false).
+ - The default value changed from C(true) to C(false) in community.general 6.0.0.
+ type: bool
+ required: false
+ default: false
+ version_added: 3.7.0
+ profiles:
+ description:
+ - Profile to be used by the instance.
+ type: list
+ elements: str
+ devices:
+ description:
+ - 'The devices for the instance
+ (for example C({ "rootfs": { "path": "/dev/kvm", "type": "unix-char" }})).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).'
+ type: dict
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the instance is ephemeral (for example C(true) or C(false)).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1).
+ required: false
+ type: bool
+ source:
+ description:
+ - 'The source for the instance
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).'
+ - 'See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1) for complete API documentation.'
+ - 'Note that C(protocol) accepts two choices: C(lxd) or C(simplestreams).'
+ required: false
+ type: dict
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of an instance.
+ required: false
+ default: started
+ type: str
+ target:
+ description:
+ - For cluster deployments. Will attempt to create an instance on a target node.
+ If the instance exists elsewhere in a cluster, then it will not be replaced or moved.
+ The name should respond to same name of the node you see in C(lxc cluster list).
+ type: str
+ required: false
+ version_added: 1.0.0
+ timeout:
+ description:
+ - A timeout for changing the state of the instance.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the instance after
+ starting or restarting.
+ required: false
+ default: 30
+ type: int
+ type:
+ description:
+ - Instance type can be either C(virtual-machine) or C(container).
+ required: false
+ default: container
+ choices:
+ - container
+ - virtual-machine
+ type: str
+ version_added: 4.1.0
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the C(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the instance after
+ starting or restarting.
+ required: false
+ default: false
+ type: bool
+ wait_for_container:
+ description:
+ - If set to C(true), the tasks will wait till the task reports a
+ success status when performing container operations.
+ default: false
+ type: bool
+ version_added: 4.4.0
+ force_stop:
+ description:
+ - If this is true, the C(lxd_container) forces to stop the instance
+ when it stops or restarts the instance.
+ required: false
+ default: false
+ type: bool
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C(${HOME}/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - 'You need to set this password on the LXD server before
+ running this module using the following command:
+ C(lxc config set core.trust_password <some random password>).
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Instances can be a container or a virtual machine, both of them must have unique name. If you attempt to create an instance
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to run commands inside a container or virtual machine, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the instance which can
+ be done with the command module.
+ - You can copy a file from the host to the instance
+ with the Ansible M(ansible.builtin.copy) and M(ansible.builtin.template) module and the C(community.general.lxd) connection plugin.
+ See the example below.
+ - You can copy a file in the created instance to the localhost
+ with C(command=lxc file pull instance_name/dir/filename filename).
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ ignore_volatile_options: true
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd # if you get a 404, try setting protocol: simplestreams
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: Check python is installed in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: Install python in container
+ delegate_to: mycontainer
+ ansible.builtin.raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for creating an Ubuntu 14.04 container using an image fingerprint.
+# This requires changing 'server' and 'protocol' key values, replacing the
+# 'alias' key with with 'fingerprint' and supplying an appropriate value that
+# matches the container image you wish to use.
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ community.general.lxd_container:
+ name: mycontainer
+ ignore_volatile_options: true
+ state: started
+ source:
+ type: image
+ mode: pull
+ # Provides current (and older) Ubuntu images with listed fingerprints
+ server: https://cloud-images.ubuntu.com/releases
+ # Protocol used by 'ubuntu' remote (as shown by 'lxc remote list')
+ protocol: simplestreams
+ # This provides an Ubuntu 14.04 LTS amd64 image from 20150814.
+ fingerprint: e9a8bdfab6dc
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+# An example for creating container in project other than default
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container in project mytestproject
+ community.general.lxd_container:
+ name: mycontainer
+ project: mytestproject
+ ignore_volatile_options: true
+ state: started
+ source:
+ protocol: simplestreams
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ alias: ubuntu/20.04/cloud
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: absent
+ type: container
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ name: mycontainer
+ state: restarted
+ type: container
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ community.general.lxd_container:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: Copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ ansible.builtin.fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+
+# An example for LXD cluster deployments. This example will create two new container on specific
+# nodes - 'node01' and 'node02'. In 'target:', 'node01' and 'node02' are names of LXD cluster
+# members that LXD cluster recognizes, not ansible inventory names, see: 'lxc cluster list'.
+# LXD API calls can be made to any LXD member, in this example, we send API requests to
+#'node01.example.com', which matches ansible inventory name.
+- hosts: node01.example.com
+ tasks:
+ - name: Create LXD container
+ community.general.lxd_container:
+ name: new-container-1
+ ignore_volatile_options: true
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node01
+
+ - name: Create container on another node
+ community.general.lxd_container:
+ name: new-container-2
+ ignore_volatile_options: true
+ state: started
+ source:
+ type: image
+ mode: pull
+ alias: ubuntu/xenial/amd64
+ target: node02
+
+# An example for creating a virtual machine
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create container on another node
+ community.general.lxd_container:
+ name: new-vm-1
+ type: virtual-machine
+ state: started
+ ignore_volatile_options: true
+ wait_for_ipv4_addresses: true
+ profiles: ["default"]
+ source:
+ protocol: simplestreams
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ alias: debian/11
+ timeout: 600
+'''
+
+RETURN = '''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the instance.
+ returned: when state is started or restarted
+ type: dict
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the instance.
+ returned: when state is started or restarted
+ type: str
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the instance.
+ returned: success
+ type: list
+ sample: ["create", "start"]
+'''
+import copy
+import datetime
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+# CONFIG_CREATION_PARAMS is a list of attribute names that are only applied
+# on instance creation.
+CONFIG_CREATION_PARAMS = ['source']
+
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self.project = self.module.params['project']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+ self.target = self.module.params['target']
+ self.wait_for_container = self.module.params['wait_for_container']
+
+ self.type = self.module.params['type']
+
+ # LXD Rest API provides additional endpoints for creating containers and virtual-machines.
+ self.api_endpoint = None
+ if self.type == 'container':
+ self.api_endpoint = '/1.0/containers'
+ elif self.type == 'virtual-machine':
+ self.api_endpoint = '/1.0/virtual-machines'
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+ self.diff = {'before': {}, 'after': {}}
+ self.old_instance_json = {}
+ self.old_sections = {}
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_instance_json(self):
+ url = '{0}/{1}'.format(self.api_endpoint, self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ return self.client.do('GET', url, ok_error_codes=[404])
+
+ def _get_instance_state_json(self):
+ url = '{0}/{1}/state'.format(self.api_endpoint, self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ return self.client.do('GET', url, ok_error_codes=[404])
+
+ @staticmethod
+ def _instance_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ url = '{0}/{1}/state'.format(self.api_endpoint, self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ body_json = {'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ if not self.module.check_mode:
+ return self.client.do('PUT', url, body_json=body_json)
+
+ def _create_instance(self):
+ url = self.api_endpoint
+ url_params = dict()
+ if self.target:
+ url_params['target'] = self.target
+ if self.project:
+ url_params['project'] = self.project
+ if url_params:
+ url = '{0}?{1}'.format(url, urlencode(url_params))
+ config = self.config.copy()
+ config['name'] = self.name
+ if not self.module.check_mode:
+ self.client.do('POST', url, config, wait_for_container=self.wait_for_container)
+ self.actions.append('create')
+
+ def _start_instance(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_instance(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_instance(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_instance(self):
+ url = '{0}/{1}'.format(self.api_endpoint, self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ if not self.module.check_mode:
+ self.client.do('DELETE', url)
+ self.actions.append('delete')
+
+ def _freeze_instance(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_instance(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreeze')
+
+ def _instance_ipv4_addresses(self, ignore_devices=None):
+ ignore_devices = ['lo'] if ignore_devices is None else ignore_devices
+ data = (self._get_instance_state_json() or {}).get('metadata', None) or {}
+ network = dict((k, v) for k, v in (data.get('network', None) or {}).items() if k not in ignore_devices)
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items())
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all(len(v) > 0 for v in addresses.values())
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._instance_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses) or self.module.check_mode:
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_instance()
+ self._start_instance()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_instance()
+ elif self.old_state == 'stopped':
+ self._start_instance()
+ if self._needs_to_apply_instance_configs():
+ self._apply_instance_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_instance()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_instance_configs():
+ self._start_instance()
+ self._apply_instance_configs()
+ self._stop_instance()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_instance()
+ if self._needs_to_apply_instance_configs():
+ self._apply_instance_configs()
+ self._stop_instance()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_instance()
+ self._start_instance()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_instance()
+ if self._needs_to_apply_instance_configs():
+ self._apply_instance_configs()
+ self._restart_instance()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_instance()
+ if self.old_state != 'stopped':
+ self._stop_instance()
+ self._delete_instance()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_instance()
+ self._start_instance()
+ self._freeze_instance()
+ else:
+ if self.old_state == 'stopped':
+ self._start_instance()
+ if self._needs_to_apply_instance_configs():
+ self._apply_instance_configs()
+ self._freeze_instance()
+
+ def _needs_to_change_instance_config(self, key):
+ if key not in self.config:
+ return False
+
+ if key == 'config':
+ # self.old_sections is already filtered for volatile keys if necessary
+ old_configs = dict(self.old_sections.get(key, None) or {})
+ for k, v in self.config['config'].items():
+ if k not in old_configs:
+ return True
+ if old_configs[k] != v:
+ return True
+ return False
+ else:
+ old_configs = self.old_sections.get(key, {})
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_instance_configs(self):
+ for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS):
+ if self._needs_to_change_instance_config(param):
+ return True
+ return False
+
+ def _apply_instance_configs(self):
+ old_metadata = copy.deepcopy(self.old_instance_json).get('metadata', None) or {}
+ body_json = {}
+ for param in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS):
+ if param in old_metadata:
+ body_json[param] = old_metadata[param]
+
+ if self._needs_to_change_instance_config(param):
+ if param == 'config':
+ body_json['config'] = body_json.get('config', None) or {}
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ else:
+ body_json[param] = self.config[param]
+ self.diff['after']['instance'] = body_json
+ url = '{0}/{1}'.format(self.api_endpoint, self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ if not self.module.check_mode:
+ self.client.do('PUT', url, body_json=body_json)
+ self.actions.append('apply_instance_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+ self.ignore_volatile_options = self.module.params.get('ignore_volatile_options')
+
+ self.old_instance_json = self._get_instance_json()
+ self.old_sections = dict(
+ (section, content) if not isinstance(content, dict)
+ else (section, dict((k, v) for k, v in content.items()
+ if not (self.ignore_volatile_options and k.startswith('volatile.'))))
+ for section, content in (self.old_instance_json.get('metadata', None) or {}).items()
+ if section in set(CONFIG_PARAMS) - set(CONFIG_CREATION_PARAMS)
+ )
+
+ self.diff['before']['instance'] = self.old_sections
+ # preliminary, will be overwritten in _apply_instance_configs() if called
+ self.diff['after']['instance'] = self.config
+
+ self.old_state = self._instance_json_to_module_state(self.old_instance_json)
+ self.diff['before']['state'] = self.old_state
+ self.diff['after']['state'] = self.state
+
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions,
+ 'diff': self.diff,
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions,
+ 'diff': self.diff,
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True,
+ ),
+ project=dict(
+ type='str',
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ ignore_volatile_options=dict(
+ type='bool',
+ default=False,
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ elements='str',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=list(LXD_ANSIBLE_STATES.keys()),
+ default='started',
+ ),
+ target=dict(
+ type='str',
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ type=dict(
+ type='str',
+ default='container',
+ choices=['container', 'virtual-machine'],
+ ),
+ wait_for_container=dict(
+ type='bool',
+ default=False,
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False,
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False,
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL,
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket',
+ ),
+ client_key=dict(
+ type='path',
+ aliases=['key_file'],
+ ),
+ client_cert=dict(
+ type='path',
+ aliases=['cert_file'],
+ ),
+ trust_password=dict(type='str', no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lxd_profile.py b/ansible_collections/community/general/plugins/modules/lxd_profile.py
new file mode 100644
index 000000000..45f499b78
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lxd_profile.py
@@ -0,0 +1,563 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+# Copyright (c) 2020, Frank Dornheim <dornheim@posteo.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ type: str
+ project:
+ description:
+ - 'Project of a profile.
+ See U(https://github.com/lxc/lxd/blob/master/doc/projects.md).'
+ type: str
+ required: false
+ version_added: 4.8.0
+ description:
+ description:
+ - Description of the profile.
+ type: str
+ config:
+ description:
+ - 'The config for the instance (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ type: dict
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ type: dict
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ type: str
+ merge_profile:
+ description:
+ - Merge the configuration of the present profile with the new desired configuration,
+ instead of replacing it.
+ required: false
+ default: false
+ type: bool
+ version_added: 2.1.0
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile in project mytestproject
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ community.general.lxd_profile:
+ name: testprofile
+ project: mytestproject
+ state: present
+ config: {}
+ description: test profile in project mytestproject
+ devices: {}
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create macvlan profile
+ community.general.lxd_profile:
+ url: https://127.0.0.1:8443
+ # These client_cert and client_key values are equal to the default values.
+ #client_cert: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #client_key: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for modify/merge a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Merge a profile
+ community.general.lxd_profile:
+ merge_profile: true
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ community.general.lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ community.general.lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN = '''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: str
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: ["create"]
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC profiles via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self.project = self.module.params['project']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = '{0}/.config/lxc/client.key'.format(os.environ['HOME'])
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = '{0}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ url = '/1.0/profiles/{0}'.format(self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ return self.client.do('GET', url, ok_error_codes=[404])
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ url = '/1.0/profiles'
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', url, config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ url = '/1.0/profiles/{0}'.format(self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ config = {'name': self.new_name}
+ self.client.do('POST', url, config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _merge_dicts(self, source, destination):
+ """Merge Dictionaries
+
+ Get a list of filehandle numbers from logger to be handed to
+ DaemonContext.files_preserve
+
+ Args:
+ dict(source): source dict
+ dict(destination): destination dict
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(destination): merged dict"""
+ for key, value in source.items():
+ if isinstance(value, dict):
+ # get node or create one
+ node = destination.setdefault(key, {})
+ self._merge_dicts(value, node)
+ else:
+ destination[key] = value
+ return destination
+
+ def _merge_config(self, config):
+ """ merge profile
+
+ Merge Configuration of the present profile and the new desired configitems
+
+ Args:
+ dict(config): Dict with the old config in 'metadata' and new config in 'config'
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(config): new config"""
+ # merge or copy the sections from the existing profile to 'config'
+ for item in ['config', 'description', 'devices', 'name', 'used_by']:
+ if item in config:
+ config[item] = self._merge_dicts(config['metadata'][item], config[item])
+ else:
+ config[item] = config['metadata'][item]
+ # merge or copy the sections from the ansible-task to 'config'
+ return self._merge_dicts(self.config, config)
+
+ def _generate_new_config(self, config):
+ """ rebuild profile
+
+ Rebuild the Profile by the configuration provided in the play.
+ Existing configurations are discarded.
+
+ This ist the default behavior.
+
+ Args:
+ dict(config): Dict with the old config in 'metadata' and new config in 'config'
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(config): new config"""
+ for k, v in self.config.items():
+ config[k] = v
+ return config
+
+ def _apply_profile_configs(self):
+ """ Selection of the procedure: rebuild or merge
+
+ The standard behavior is that all information not contained
+ in the play is discarded.
+
+ If "merge_profile" is provides in the play and "True", then existing
+ configurations from the profile and new ones defined are merged.
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ config = self.old_profile_json.copy()
+ if self.module.params['merge_profile']:
+ config = self._merge_config(config)
+ else:
+ config = self._generate_new_config(config)
+
+ # upload config to lxd
+ url = '/1.0/profiles/{0}'.format(self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ self.client.do('PUT', url, config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ url = '/1.0/profiles/{0}'.format(self.name)
+ if self.project:
+ url = '{0}?{1}'.format(url, urlencode(dict(project=self.project)))
+ self.client.do('DELETE', url)
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ project=dict(
+ type='str',
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ merge_profile=dict(
+ type='bool',
+ default=False
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='path',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='path',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/lxd_project.py b/ansible_collections/community/general/plugins/modules/lxd_project.py
new file mode 100644
index 000000000..983531fa0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/lxd_project.py
@@ -0,0 +1,461 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: lxd_project
+short_description: Manage LXD projects
+version_added: 4.8.0
+description:
+ - Management of LXD projects.
+author: "Raymond Chang (@we10710aa)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the project.
+ required: true
+ type: str
+ description:
+ description:
+ - Description of the project.
+ type: str
+ config:
+ description:
+ - 'The config for the project (for example C({"features.profiles": "true"})).
+ See U(https://linuxcontainers.org/lxd/docs/master/projects/).'
+ - If the project already exists and its "config" value in metadata
+ obtained from
+ C(GET /1.0/projects/<name>)
+ U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_get)
+ are different, then this module tries to apply the configurations.
+ type: dict
+ new_name:
+ description:
+ - A new name of a project.
+ - If this parameter is specified a project will be renamed to this name.
+ See U(https://linuxcontainers.org/lxd/docs/master/api/#/projects/project_post).
+ required: false
+ type: str
+ merge_project:
+ description:
+ - Merge the configuration of the present project with the new desired configuration,
+ instead of replacing it. If configuration is the same after merged, no change will be made.
+ required: false
+ default: false
+ type: bool
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a project.
+ required: false
+ default: present
+ type: str
+ url:
+ description:
+ - The Unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ type: str
+ snap_url:
+ description:
+ - The Unix domain socket path when LXD is installed by snap package manager.
+ required: false
+ default: unix:/var/snap/lxd/common/lxd/unix.socket
+ type: str
+ client_key:
+ description:
+ - The client certificate key file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.key).
+ required: false
+ aliases: [ key_file ]
+ type: path
+ client_cert:
+ description:
+ - The client certificate file path.
+ - If not specified, it defaults to C($HOME/.config/lxc/client.crt).
+ required: false
+ aliases: [ cert_file ]
+ type: path
+ trust_password:
+ description:
+ - The client trusted password.
+ - 'You need to set this password on the LXD server before
+ running this module using the following command:
+ C(lxc config set core.trust_password <some random password>)
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/).'
+ - If I(trust_password) is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+ type: str
+notes:
+ - Projects must have a unique name. If you attempt to create a project
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a project
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a project
+ community.general.lxd_project:
+ name: ansible-test-project
+ state: present
+ config: {}
+ description: my new project
+
+# An example for renaming a project
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename ansible-test-project to ansible-test-project-new-name
+ community.general.lxd_project:
+ name: ansible-test-project
+ new_name: ansible-test-project-new-name
+ state: present
+ config: {}
+ description: my new project
+'''
+
+RETURN = '''
+old_state:
+ description: The old state of the project.
+ returned: success
+ type: str
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ elements: dict
+ contains:
+ type:
+ description: Type of actions performed, currently only C(sent request).
+ type: str
+ sample: "sent request"
+ request:
+ description: HTTP request sent to LXD server.
+ type: dict
+ contains:
+ method:
+ description: Method of HTTP request.
+ type: str
+ sample: "GET"
+ url:
+ description: URL path of HTTP request.
+ type: str
+ sample: "/1.0/projects/test-project"
+ json:
+ description: JSON body of HTTP request.
+ type: str
+ sample: "(too long to be placed here)"
+ timeout:
+ description: Timeout of HTTP request, C(null) if unset.
+ type: int
+ sample: null
+ response:
+ description: HTTP response received from LXD server.
+ type: dict
+ contains:
+ json:
+ description: JSON of HTTP response.
+ type: str
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the project.
+ returned: success
+ type: list
+ elements: str
+ sample: ["create"]
+'''
+
+from ansible_collections.community.general.plugins.module_utils.lxd import (
+ LXDClient, LXDClientException, default_key_file, default_cert_file
+)
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+# ANSIBLE_LXD_DEFAULT_URL is a default value of the lxd endpoint
+ANSIBLE_LXD_DEFAULT_URL = 'unix:/var/lib/lxd/unix.socket'
+
+# PROJECTS_STATES is a list for states supported
+PROJECTS_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description'
+]
+
+
+class LXDProjectManagement(object):
+ def __init__(self, module):
+ """Management of LXC projects via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.key_file = self.module.params.get('client_key')
+ if self.key_file is None:
+ self.key_file = default_key_file()
+ self.cert_file = self.module.params.get('client_cert')
+ if self.cert_file is None:
+ self.cert_file = default_cert_file()
+ self.debug = self.module._verbosity >= 4
+
+ try:
+ if self.module.params['url'] != ANSIBLE_LXD_DEFAULT_URL:
+ self.url = self.module.params['url']
+ elif os.path.exists(self.module.params['snap_url'].replace('unix:', '')):
+ self.url = self.module.params['snap_url']
+ else:
+ self.url = self.module.params['url']
+ except Exception as e:
+ self.module.fail_json(msg=e.msg)
+
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_project_json(self):
+ return self.client.do(
+ 'GET', '/1.0/projects/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _project_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_project(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_project()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the project does not exist and the state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_project()
+ if self._needs_to_apply_project_configs():
+ self._apply_project_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_project()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the project exists and the specified state is absent',
+ changed=False)
+
+ def _create_project(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/projects', config)
+ self.actions.append('create')
+
+ def _rename_project(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/projects/{0}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_project_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_project_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_project_configs(self):
+ return (
+ self._needs_to_change_project_config('config') or
+ self._needs_to_change_project_config('description')
+ )
+
+ def _merge_dicts(self, source, destination):
+ """ Return a new dict that merge two dict,
+ with values in source dict overwrite destination dict
+
+ Args:
+ dict(source): source dict
+ dict(destination): destination dict
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ dict(destination): merged dict"""
+ result = destination.copy()
+ for key, value in source.items():
+ if isinstance(value, dict):
+ # get node or create one
+ node = result.setdefault(key, {})
+ self._merge_dicts(value, node)
+ else:
+ result[key] = value
+ return result
+
+ def _apply_project_configs(self):
+ """ Selection of the procedure: rebuild or merge
+
+ The standard behavior is that all information not contained
+ in the play is discarded.
+
+ If "merge_project" is provides in the play and "True", then existing
+ configurations from the project and new ones defined are merged.
+
+ Args:
+ None
+ Kwargs:
+ None
+ Raises:
+ None
+ Returns:
+ None"""
+ old_config = dict()
+ old_metadata = self.old_project_json['metadata'].copy()
+ for attr in CONFIG_PARAMS:
+ old_config[attr] = old_metadata[attr]
+
+ if self.module.params['merge_project']:
+ config = self._merge_dicts(self.config, old_config)
+ if config == old_config:
+ # no need to call api if merged config is the same
+ # as old config
+ return
+ else:
+ config = self.config.copy()
+ # upload config to lxd
+ self.client.do('PUT', '/1.0/projects/{0}'.format(self.name), config)
+ self.actions.append('apply_projects_configs')
+
+ def _delete_project(self):
+ self.client.do('DELETE', '/1.0/projects/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_project_json = self._get_project_json()
+ self.old_state = self._project_json_to_module_state(
+ self.old_project_json)
+ self._update_project()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ merge_project=dict(
+ type='bool',
+ default=False
+ ),
+ state=dict(
+ choices=PROJECTS_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default=ANSIBLE_LXD_DEFAULT_URL
+ ),
+ snap_url=dict(
+ type='str',
+ default='unix:/var/snap/lxd/common/lxd/unix.socket'
+ ),
+ client_key=dict(
+ type='path',
+ aliases=['key_file']
+ ),
+ client_cert=dict(
+ type='path',
+ aliases=['cert_file']
+ ),
+ trust_password=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProjectManagement(module=module)
+ lxd_manage.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/macports.py b/ansible_collections/community/general/plugins/modules/macports.py
new file mode 100644
index 000000000..6f40d0938
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/macports.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Jimmy Tang <jcftang@gmail.com>
+# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
+# (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: macports
+author: "Jimmy Tang (@jcftang)"
+short_description: Package manager for MacPorts
+description:
+ - Manages MacPorts packages (ports)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - A list of port names.
+ aliases: ['port']
+ type: list
+ elements: str
+ selfupdate:
+ description:
+ - Update Macports and the ports tree, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port selfupdate).
+ aliases: ['update_cache', 'update_ports']
+ default: false
+ type: bool
+ state:
+ description:
+ - Indicates the desired state of the port.
+ choices: [ 'present', 'absent', 'active', 'inactive', 'installed', 'removed']
+ default: present
+ type: str
+ upgrade:
+ description:
+ - Upgrade all outdated ports, either prior to installing ports or as a separate step.
+ - Equivalent to running C(port upgrade outdated).
+ default: false
+ type: bool
+ variant:
+ description:
+ - A port variant specification.
+ - 'C(variant) is only supported with state: I(installed)/I(present).'
+ aliases: ['variants']
+ type: str
+'''
+EXAMPLES = '''
+- name: Install the foo port
+ community.general.macports:
+ name: foo
+
+- name: Install the universal, x11 variant of the foo port
+ community.general.macports:
+ name: foo
+ variant: +universal+x11
+
+- name: Install a list of ports
+ community.general.macports:
+ name: "{{ ports }}"
+ vars:
+ ports:
+ - foo
+ - foo-tools
+
+- name: Update Macports and the ports tree, then upgrade all outdated ports
+ community.general.macports:
+ selfupdate: true
+ upgrade: true
+
+- name: Update Macports and the ports tree, then install the foo port
+ community.general.macports:
+ name: foo
+ selfupdate: true
+
+- name: Remove the foo port
+ community.general.macports:
+ name: foo
+ state: absent
+
+- name: Activate the foo port
+ community.general.macports:
+ name: foo
+ state: active
+
+- name: Deactivate the foo port
+ community.general.macports:
+ name: foo
+ state: inactive
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def selfupdate(module, port_path):
+ """ Update Macports and the ports tree. """
+
+ rc, out, err = module.run_command("%s -v selfupdate" % port_path)
+
+ if rc == 0:
+ updated = any(
+ re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or
+ re.search(r'Installing new Macports release', s.strip())
+ for s in out.split('\n')
+ if s
+ )
+ if updated:
+ changed = True
+ msg = "Macports updated successfully"
+ else:
+ changed = False
+ msg = "Macports already up-to-date"
+
+ return (changed, msg, out, err)
+ else:
+ module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
+
+
+def upgrade(module, port_path):
+ """ Upgrade outdated ports. """
+
+ rc, out, err = module.run_command("%s upgrade outdated" % port_path)
+
+ # rc is 1 when nothing to upgrade so check stdout first.
+ if out.strip() == "Nothing to upgrade.":
+ changed = False
+ msg = "Ports already upgraded"
+ return (changed, msg, out, err)
+ elif rc == 0:
+ changed = True
+ msg = "Outdated ports upgraded successfully"
+ return (changed, msg, out, err)
+ else:
+ module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
+
+
+def query_port(module, port_path, name, state="present"):
+ """ Returns whether a port is installed or not. """
+
+ if state == "present":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and out.strip().startswith(name + " "):
+ return True
+
+ return False
+
+ elif state == "active":
+
+ rc, out, err = module.run_command([port_path, "-q", "installed", name])
+
+ if rc == 0 and "(active)" in out:
+ return True
+
+ return False
+
+
+def remove_ports(module, port_path, ports, stdout, stderr):
+ """ Uninstalls one or more ports if installed. """
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the port that failed
+ for port in ports:
+ # Query the port first, to see if we even need to remove
+ if not query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
+ stdout += out
+ stderr += err
+ if query_port(module, port_path, port):
+ module.fail_json(msg="Failed to remove %s: %s" % (port, err), stdout=stdout, stderr=stderr)
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c, stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=False, msg="Port(s) already absent", stdout=stdout, stderr=stderr)
+
+
+def install_ports(module, port_path, ports, variant, stdout, stderr):
+ """ Installs one or more ports if not already installed. """
+
+ install_c = 0
+
+ for port in ports:
+ if query_port(module, port_path, port):
+ continue
+
+ rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
+ stdout += out
+ stderr += err
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to install %s: %s" % (port, err), stdout=stdout, stderr=stderr)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c), stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=False, msg="Port(s) already present", stdout=stdout, stderr=stderr)
+
+
+def activate_ports(module, port_path, ports, stdout, stderr):
+ """ Activate a port if it's inactive. """
+
+ activate_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to activate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
+
+ if query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s activate %s" % (port_path, port))
+ stdout += out
+ stderr += err
+
+ if not query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to activate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
+
+ activate_c += 1
+
+ if activate_c > 0:
+ module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c), stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=False, msg="Port(s) already active", stdout=stdout, stderr=stderr)
+
+
+def deactivate_ports(module, port_path, ports, stdout, stderr):
+ """ Deactivate a port if it's active. """
+
+ deactivated_c = 0
+
+ for port in ports:
+ if not query_port(module, port_path, port):
+ module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port), stdout=stdout, stderr=stderr)
+
+ if not query_port(module, port_path, port, state="active"):
+ continue
+
+ rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
+ stdout += out
+ stderr += err
+ if query_port(module, port_path, port, state="active"):
+ module.fail_json(msg="Failed to deactivate %s: %s" % (port, err), stdout=stdout, stderr=stderr)
+
+ deactivated_c += 1
+
+ if deactivated_c > 0:
+ module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c), stdout=stdout, stderr=stderr)
+
+ module.exit_json(changed=False, msg="Port(s) already inactive", stdout=stdout, stderr=stderr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', aliases=["port"]),
+ selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
+ upgrade=dict(default=False, type='bool'),
+ variant=dict(aliases=["variants"], default=None, type='str')
+ )
+ )
+
+ stdout = ""
+ stderr = ""
+
+ port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
+
+ p = module.params
+
+ if p["selfupdate"]:
+ (changed, msg, out, err) = selfupdate(module, port_path)
+ stdout += out
+ stderr += err
+ if not (p["name"] or p["upgrade"]):
+ module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
+
+ if p["upgrade"]:
+ (changed, msg, out, err) = upgrade(module, port_path)
+ stdout += out
+ stderr += err
+ if not p["name"]:
+ module.exit_json(changed=changed, msg=msg, stdout=stdout, stderr=stderr)
+
+ pkgs = p["name"]
+
+ variant = p["variant"]
+
+ if p["state"] in ["present", "installed"]:
+ install_ports(module, port_path, pkgs, variant, stdout, stderr)
+
+ elif p["state"] in ["absent", "removed"]:
+ remove_ports(module, port_path, pkgs, stdout, stderr)
+
+ elif p["state"] == "active":
+ activate_ports(module, port_path, pkgs, stdout, stderr)
+
+ elif p["state"] == "inactive":
+ deactivate_ports(module, port_path, pkgs, stdout, stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mail.py b/ansible_collections/community/general/plugins/modules/mail.py
new file mode 100644
index 000000000..feaac6923
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mail.py
@@ -0,0 +1,418 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+- Dag Wieers (@dagwieers)
+module: mail
+short_description: Send an email
+description:
+- This module is useful for sending emails from playbooks.
+- One may wonder why automate sending emails? In complex environments
+ there are from time to time processes that cannot be automated, either
+ because you lack the authority to make it so, or because not everyone
+ agrees to a common approach.
+- If you cannot automate a specific step, but the step is non-blocking,
+ sending out an email to the responsible party to make them perform their
+ part of the bargain is an elegant way to put the responsibility in
+ someone else's lap.
+- Of course sending out a mail can be equally useful as a way to notify
+ one or more people in a team that a specific action has been
+ (successfully) taken.
+extends_documentation_fragment:
+- community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ sender:
+ description:
+ - The email-address the mail is sent from. May contain address and phrase.
+ type: str
+ default: root
+ aliases: [ from ]
+ to:
+ description:
+ - The email-address(es) the mail is being sent to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ elements: str
+ default: root
+ aliases: [ recipients ]
+ cc:
+ description:
+ - The email-address(es) the mail is being copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ elements: str
+ default: []
+ bcc:
+ description:
+ - The email-address(es) the mail is being 'blind' copied to.
+ - This is a list, which may contain address and phrase portions.
+ type: list
+ elements: str
+ default: []
+ subject:
+ description:
+ - The subject of the email being sent.
+ required: true
+ type: str
+ aliases: [ msg ]
+ body:
+ description:
+ - The body of the email being sent.
+ type: str
+ username:
+ description:
+ - If SMTP requires username.
+ type: str
+ password:
+ description:
+ - If SMTP requires password.
+ type: str
+ host:
+ description:
+ - The mail server.
+ type: str
+ default: localhost
+ port:
+ description:
+ - The mail server port.
+ - This must be a valid integer between 1 and 65534
+ type: int
+ default: 25
+ attach:
+ description:
+ - A list of pathnames of files to attach to the message.
+ - Attached files will have their content-type set to C(application/octet-stream).
+ type: list
+ elements: path
+ default: []
+ headers:
+ description:
+ - A list of headers which should be added to the message.
+ - Each individual header is specified as C(header=value) (see example below).
+ type: list
+ elements: str
+ default: []
+ charset:
+ description:
+ - The character set of email being sent.
+ type: str
+ default: utf-8
+ subtype:
+ description:
+ - The minor mime type, can be either C(plain) or C(html).
+ - The major type is always C(text).
+ type: str
+ choices: [ html, plain ]
+ default: plain
+ secure:
+ description:
+ - If C(always), the connection will only send email if the connection is Encrypted.
+ If the server doesn't accept the encrypted connection it will fail.
+ - If C(try), the connection will attempt to setup a secure SSL/TLS session, before trying to send.
+ - If C(never), the connection will not attempt to setup a secure SSL/TLS session, before sending
+ - If C(starttls), the connection will try to upgrade to a secure SSL/TLS connection, before sending.
+ If it is unable to do so it will fail.
+ type: str
+ choices: [ always, never, starttls, try ]
+ default: try
+ timeout:
+ description:
+ - Sets the timeout in seconds for connection attempts.
+ type: int
+ default: 20
+ ehlohost:
+ description:
+ - Allows for manual specification of host for EHLO.
+ type: str
+ version_added: 3.8.0
+'''
+
+EXAMPLES = r'''
+- name: Example playbook sending mail to root
+ community.general.mail:
+ subject: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Sending an e-mail using Gmail SMTP servers
+ community.general.mail:
+ host: smtp.gmail.com
+ port: 587
+ username: username@gmail.com
+ password: mysecret
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ delegate_to: localhost
+
+- name: Send e-mail to a bunch of users, attaching files
+ community.general.mail:
+ host: 127.0.0.1
+ port: 2025
+ subject: Ansible-report
+ body: Hello, this is an e-mail. I hope you like it ;-)
+ from: jane@example.net (Jane Jolie)
+ to:
+ - John Doe <j.d@example.org>
+ - Suzie Something <sue@example.com>
+ cc: Charlie Root <root@localhost>
+ attach:
+ - /etc/group
+ - /tmp/avatar2.png
+ headers:
+ - Reply-To=john@example.com
+ - X-Special="Something or other"
+ charset: us-ascii
+ delegate_to: localhost
+
+- name: Sending an e-mail using the remote machine, not the Ansible controller node
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+
+- name: Sending an e-mail using Legacy SSL to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: always
+
+- name: Sending an e-mail using StartTLS to the remote machine
+ community.general.mail:
+ host: localhost
+ port: 25
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: starttls
+
+- name: Sending an e-mail using StartTLS, remote server, custom EHLO
+ community.general.mail:
+ host: some.smtp.host.tld
+ port: 25
+ ehlohost: my-resolvable-hostname.tld
+ to: John Smith <john.smith@example.com>
+ subject: Ansible-report
+ body: System {{ ansible_hostname }} has been successfully provisioned.
+ secure: starttls
+'''
+
+import os
+import smtplib
+import ssl
+import traceback
+from email import encoders
+from email.utils import parseaddr, formataddr, formatdate
+from email.mime.base import MIMEBase
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.header import Header
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import PY3
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=25),
+ ehlohost=dict(type='str', default=None),
+ sender=dict(type='str', default='root', aliases=['from']),
+ to=dict(type='list', elements='str', default=['root'], aliases=['recipients']),
+ cc=dict(type='list', elements='str', default=[]),
+ bcc=dict(type='list', elements='str', default=[]),
+ subject=dict(type='str', required=True, aliases=['msg']),
+ body=dict(type='str'),
+ attach=dict(type='list', elements='path', default=[]),
+ headers=dict(type='list', elements='str', default=[]),
+ charset=dict(type='str', default='utf-8'),
+ subtype=dict(type='str', default='plain', choices=['html', 'plain']),
+ secure=dict(type='str', default='try', choices=['always', 'never', 'starttls', 'try']),
+ timeout=dict(type='int', default=20),
+ ),
+ required_together=[['password', 'username']],
+ )
+
+ username = module.params.get('username')
+ password = module.params.get('password')
+ host = module.params.get('host')
+ port = module.params.get('port')
+ local_hostname = module.params.get('ehlohost')
+ sender = module.params.get('sender')
+ recipients = module.params.get('to')
+ copies = module.params.get('cc')
+ blindcopies = module.params.get('bcc')
+ subject = module.params.get('subject')
+ body = module.params.get('body')
+ attach_files = module.params.get('attach')
+ headers = module.params.get('headers')
+ charset = module.params.get('charset')
+ subtype = module.params.get('subtype')
+ secure = module.params.get('secure')
+ timeout = module.params.get('timeout')
+
+ code = 0
+ secure_state = False
+ sender_phrase, sender_addr = parseaddr(sender)
+
+ if not body:
+ body = subject
+
+ try:
+ if secure != 'never':
+ try:
+ if PY3:
+ smtp = smtplib.SMTP_SSL(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP_SSL(local_hostname=local_hostname, timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+ secure_state = True
+ except ssl.SSLError as e:
+ if secure == 'always':
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ except Exception:
+ pass
+
+ if not secure_state:
+ if PY3:
+ smtp = smtplib.SMTP(host=host, port=port, local_hostname=local_hostname, timeout=timeout)
+ else:
+ smtp = smtplib.SMTP(local_hostname=local_hostname, timeout=timeout)
+ code, smtpmessage = smtp.connect(host, port)
+
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to Connect %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+
+ if int(code) > 0:
+ if not secure_state and secure in ('starttls', 'try'):
+ if smtp.has_extn('STARTTLS'):
+ try:
+ smtp.starttls()
+ secure_state = True
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Unable to start an encrypted session to %s:%s: %s' %
+ (host, port, to_native(e)), exception=traceback.format_exc())
+ try:
+ smtp.ehlo()
+ except smtplib.SMTPException as e:
+ module.fail_json(rc=1, msg='Helo failed for host %s:%s: %s' % (host, port, to_native(e)), exception=traceback.format_exc())
+ else:
+ if secure == 'starttls':
+ module.fail_json(rc=1, msg='StartTLS is not offered on server %s:%s' % (host, port))
+
+ if username and password:
+ if smtp.has_extn('AUTH'):
+ try:
+ smtp.login(username, password)
+ except smtplib.SMTPAuthenticationError:
+ module.fail_json(rc=1, msg='Authentication to %s:%s failed, please check your username and/or password' % (host, port))
+ except smtplib.SMTPException:
+ module.fail_json(rc=1, msg='No Suitable authentication method was found on %s:%s' % (host, port))
+ else:
+ module.fail_json(rc=1, msg="No Authentication on the server at %s:%s" % (host, port))
+
+ if not secure_state and (username and password):
+ module.warn('Username and Password was sent without encryption')
+
+ msg = MIMEMultipart(_charset=charset)
+ msg['From'] = formataddr((sender_phrase, sender_addr))
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = Header(subject, charset)
+ msg.preamble = "Multipart message"
+
+ for header in headers:
+ # NOTE: Backward compatible with old syntax using '|' as delimiter
+ for hdr in [x.strip() for x in header.split('|')]:
+ try:
+ h_key, h_val = hdr.split('=')
+ h_val = to_native(Header(h_val, charset))
+ msg.add_header(h_key, h_val)
+ except Exception:
+ module.warn("Skipping header '%s', unable to parse" % hdr)
+
+ if 'X-Mailer' not in msg:
+ msg.add_header('X-Mailer', 'Ansible mail module')
+
+ addr_list = []
+ for addr in [x.strip() for x in blindcopies]:
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+
+ to_list = []
+ for addr in [x.strip() for x in recipients]:
+ to_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['To'] = ", ".join(to_list)
+
+ cc_list = []
+ for addr in [x.strip() for x in copies]:
+ cc_list.append(formataddr(parseaddr(addr)))
+ addr_list.append(parseaddr(addr)[1]) # address only, w/o phrase
+ msg['Cc'] = ", ".join(cc_list)
+
+ part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
+ msg.attach(part)
+
+ # NOTE: Backware compatibility with old syntax using space as delimiter is not retained
+ # This breaks files with spaces in it :-(
+ for filename in attach_files:
+ try:
+ part = MIMEBase('application', 'octet-stream')
+ with open(filename, 'rb') as fp:
+ part.set_payload(fp.read())
+ encoders.encode_base64(part)
+ part.add_header('Content-disposition', 'attachment', filename=os.path.basename(filename))
+ msg.attach(part)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send community.general.mail: can't attach file %s: %s" %
+ (filename, to_native(e)), exception=traceback.format_exc())
+
+ composed = msg.as_string()
+
+ try:
+ result = smtp.sendmail(sender_addr, set(addr_list), composed)
+ except Exception as e:
+ module.fail_json(rc=1, msg="Failed to send mail to '%s': %s" %
+ (", ".join(set(addr_list)), to_native(e)), exception=traceback.format_exc())
+
+ smtp.quit()
+
+ if result:
+ for key in result:
+ module.warn("Failed to send mail to '%s': %s %s" % (key, result[key][0], result[key][1]))
+ module.exit_json(msg='Failed to send mail to at least one recipient', result=result)
+
+ module.exit_json(msg='Mail sent successfully', result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/make.py b/ansible_collections/community/general/plugins/modules/make.py
new file mode 100644
index 000000000..ebff6cfe1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/make.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: make
+short_description: Run targets in a Makefile
+requirements:
+ - make
+author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
+description:
+ - Run targets in a Makefile.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ chdir:
+ description:
+ - Change to this directory before running make.
+ type: path
+ required: true
+ file:
+ description:
+ - Use a custom Makefile.
+ type: path
+ jobs:
+ description:
+ - Set the number of make jobs to run concurrently.
+ - Typically if set, this would be the number of processors and/or threads available to the machine.
+ - This is not supported by all make implementations.
+ type: int
+ version_added: 2.0.0
+ make:
+ description:
+ - Use a specific make binary.
+ type: path
+ version_added: '0.2.0'
+ params:
+ description:
+ - Any extra parameters to pass to make.
+ type: dict
+ target:
+ description:
+ - The target to run.
+ - Typically this would be something like C(install), C(test), or C(all).
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Build the default target
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+
+- name: Run 'install' target as root
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: install
+ become: true
+
+- name: Build 'all' target with extra arguments
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ params:
+ NUM_THREADS: 4
+ BACKEND: lapack
+
+- name: Build 'all' target with a custom Makefile
+ community.general.make:
+ chdir: /home/ubuntu/cool-project
+ target: all
+ file: /some-project/Makefile
+'''
+
+RETURN = r'''
+chdir:
+ description:
+ - The value of the module parameter I(chdir).
+ type: str
+ returned: success
+command:
+ description:
+ - The command built and executed by the module.
+ type: str
+ returned: success
+ version_added: 6.5.0
+file:
+ description:
+ - The value of the module parameter I(file).
+ type: str
+ returned: success
+jobs:
+ description:
+ - The value of the module parameter I(jobs).
+ type: int
+ returned: success
+params:
+ description:
+ - The value of the module parameter I(params).
+ type: dict
+ returned: success
+target:
+ description:
+ - The value of the module parameter I(target).
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_command(command, module, check_rc=True):
+ """
+ Run a command using the module, return
+ the result code and std{err,out} content.
+
+ :param command: list of command arguments
+ :param module: Ansible make module instance
+ :return: return code, stdout content, stderr content
+ """
+ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
+ return rc, sanitize_output(out), sanitize_output(err)
+
+
+def sanitize_output(output):
+ """
+ Sanitize the output string before we
+ pass it to module.fail_json. Defaults
+ the string to empty if it is None, else
+ strips trailing newlines.
+
+ :param output: output to sanitize
+ :return: sanitized output
+ """
+ if output is None:
+ return ''
+ else:
+ return output.rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ target=dict(type='str'),
+ params=dict(type='dict'),
+ chdir=dict(type='path', required=True),
+ file=dict(type='path'),
+ make=dict(type='path'),
+ jobs=dict(type='int'),
+ ),
+ supports_check_mode=True,
+ )
+
+ make_path = module.params['make']
+ if make_path is None:
+ # Build up the invocation of `make` we are going to use
+ # For non-Linux OSes, prefer gmake (GNU make) over make
+ make_path = module.get_bin_path('gmake', required=False)
+ if not make_path:
+ # Fall back to system make
+ make_path = module.get_bin_path('make', required=True)
+ make_target = module.params['target']
+ if module.params['params'] is not None:
+ make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])]
+ else:
+ make_parameters = []
+
+ # build command:
+ # handle any make specific arguments included in params
+ base_command = [make_path]
+ if module.params['jobs'] is not None:
+ jobs = str(module.params['jobs'])
+ base_command.extend(["-j", jobs])
+ if module.params['file'] is not None:
+ base_command.extend(["-f", module.params['file']])
+
+ # add make target
+ base_command.append(make_target)
+
+ # add makefile parameters
+ base_command.extend(make_parameters)
+
+ # Check if the target is already up to date
+ rc, out, err = run_command(base_command + ['-q'], module, check_rc=False)
+ if module.check_mode:
+ # If we've been asked to do a dry run, we only need
+ # to report whether or not the target is up to date
+ changed = (rc != 0)
+ else:
+ if rc == 0:
+ # The target is up to date, so we don't have to
+ # do anything
+ changed = False
+ else:
+ # The target isn't up to date, so we need to run it
+ rc, out, err = run_command(base_command, module,
+ check_rc=True)
+ changed = True
+
+ # We don't report the return code, as if this module failed
+ # we would be calling fail_json from run_command, so even if
+ # we had a non-zero return code, we did not fail. However, if
+ # we report a non-zero return code here, we will be marked as
+ # failed regardless of what we signal using the failed= kwarg.
+ module.exit_json(
+ changed=changed,
+ failed=False,
+ stdout=out,
+ stderr=err,
+ target=module.params['target'],
+ params=module.params['params'],
+ chdir=module.params['chdir'],
+ file=module.params['file'],
+ jobs=module.params['jobs'],
+ command=' '.join([shlex_quote(part) for part in base_command]),
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
new file mode 100644
index 000000000..c6cefad6a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_alert_profiles.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alert_profiles
+
+short_description: Configuration of alert profiles for ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
+description:
+ - The manageiq_alert_profiles module supports adding, updating and deleting alert profiles in ManageIQ.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert profile should not exist,
+ - present - alert profile should exist,
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The unique alert profile name in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The resource type for the alert profile in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ alerts:
+ type: list
+ elements: str
+ description:
+ - List of alert descriptions to assign to this profile.
+ - Required if state is "present"
+ notes:
+ type: str
+ description:
+ - Optional notes for this profile
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert profile to ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: present
+ name: Test profile
+ resource_type: ContainerNode
+ alerts:
+ - Test Alert 01
+ - Test Alert 02
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Delete an alert profile from ManageIQ
+ community.general.manageiq_alert_profiles:
+ state: absent
+ name: Test profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlertProfiles(object):
+ """ Object to execute alert profile management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.url = '{api_url}/alert_definition_profiles'.format(api_url=self.api_url)
+
+ def get_profiles(self):
+ """ Get all alert profiles from ManageIQ
+ """
+ try:
+ response = self.client.get(self.url + '?expand=alert_definitions,resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alert profiles: {error}".format(error=e))
+ return response.get('resources') or []
+
+ def get_alerts(self, alert_descriptions):
+ """ Get a list of alert hrefs from a list of alert descriptions
+ """
+ alerts = []
+ for alert_description in alert_descriptions:
+ alert = self.manageiq.find_collection_resource_or_fail("alert_definitions",
+ description=alert_description)
+ alerts.append(alert['href'])
+
+ return alerts
+
+ def add_profile(self, profile):
+ """ Add a new alert profile to ManageIQ
+ """
+ # find all alerts to add to the profile
+ # we do this first to fail early if one is missing.
+ alerts = self.get_alerts(profile['alerts'])
+
+ # build the profile dict to send to the server
+
+ profile_dict = dict(name=profile['name'],
+ description=profile['name'],
+ mode=profile['resource_type'])
+ if profile['notes']:
+ profile_dict['set_data'] = dict(notes=profile['notes'])
+
+ # send it to the server
+ try:
+ result = self.client.post(self.url, resource=profile_dict, action="create")
+ except Exception as e:
+ self.module.fail_json(msg="Creating profile failed {error}".format(error=e))
+
+ # now that it has been created, we can assign the alerts
+ self.assign_or_unassign(result['results'][0], alerts, "assign")
+
+ msg = "Profile {name} created successfully"
+ msg = msg.format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def delete_profile(self, profile):
+ """ Delete an alert profile from ManageIQ
+ """
+ try:
+ self.client.post(profile['href'], action="delete")
+ except Exception as e:
+ self.module.fail_json(msg="Deleting profile failed: {error}".format(error=e))
+
+ msg = "Successfully deleted profile {name}".format(name=profile['name'])
+ return dict(changed=True, msg=msg)
+
+ def get_alert_href(self, alert):
+ """ Get an absolute href for an alert
+ """
+ return "{url}/alert_definitions/{id}".format(url=self.api_url, id=alert['id'])
+
+ def assign_or_unassign(self, profile, resources, action):
+ """ Assign or unassign alerts to profile, and validate the result.
+ """
+ alerts = [dict(href=href) for href in resources]
+
+ subcollection_url = profile['href'] + '/alert_definitions'
+ try:
+ result = self.client.post(subcollection_url, resources=alerts, action=action)
+ if len(result['results']) != len(alerts):
+ msg = "Failed to {action} alerts to profile '{name}'," +\
+ "expected {expected} alerts to be {action}ed," +\
+ "but only {changed} were {action}ed"
+ msg = msg.format(action=action,
+ name=profile['name'],
+ expected=len(alerts),
+ changed=result['results'])
+ self.module.fail_json(msg=msg)
+ except Exception as e:
+ msg = "Failed to {action} alerts to profile '{name}': {error}"
+ msg = msg.format(action=action, name=profile['name'], error=e)
+ self.module.fail_json(msg=msg)
+
+ return result['results']
+
+ def update_profile(self, old_profile, desired_profile):
+ """ Update alert profile in ManageIQ
+ """
+ changed = False
+ # we need to use client.get to query the alert definitions
+ old_profile = self.client.get(old_profile['href'] + '?expand=alert_definitions')
+
+ # figure out which alerts we need to assign / unassign
+ # alerts listed by the user:
+ desired_alerts = set(self.get_alerts(desired_profile['alerts']))
+
+ # alert which currently exist in the profile
+ if 'alert_definitions' in old_profile:
+ # we use get_alert_href to have a direct href to the alert
+ existing_alerts = set([self.get_alert_href(alert) for alert in old_profile['alert_definitions']])
+ else:
+ # no alerts in this profile
+ existing_alerts = set()
+
+ to_add = list(desired_alerts - existing_alerts)
+ to_remove = list(existing_alerts - desired_alerts)
+
+ # assign / unassign the alerts, if needed
+
+ if to_remove:
+ self.assign_or_unassign(old_profile, to_remove, "unassign")
+ changed = True
+ if to_add:
+ self.assign_or_unassign(old_profile, to_add, "assign")
+ changed = True
+
+ # update other properties
+ profile_dict = dict()
+
+ if old_profile['mode'] != desired_profile['resource_type']:
+ # mode needs to be updated
+ profile_dict['mode'] = desired_profile['resource_type']
+
+ # check if notes need to be updated
+ old_notes = old_profile.get('set_data', {}).get('notes')
+
+ if desired_profile['notes'] != old_notes:
+ profile_dict['set_data'] = dict(notes=desired_profile['notes'])
+
+ if profile_dict:
+ # if we have any updated values
+ changed = True
+ try:
+ result = self.client.post(old_profile['href'],
+ resource=profile_dict,
+ action="edit")
+ except Exception as e:
+ msg = "Updating profile '{name}' failed: {error}"
+ msg = msg.format(name=old_profile['name'], error=e)
+ self.module.fail_json(msg=msg)
+
+ if changed:
+ msg = "Profile {name} updated successfully".format(name=desired_profile['name'])
+ else:
+ msg = "No update needed for profile {name}".format(name=desired_profile['name'])
+ return dict(changed=changed, msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ alerts=dict(type='list', elements='str'),
+ notes=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['name', 'resource_type']),
+ ('state', 'absent', ['name'])])
+
+ state = module.params['state']
+ name = module.params['name']
+
+ manageiq = ManageIQ(module)
+ manageiq_alert_profiles = ManageIQAlertProfiles(manageiq)
+
+ existing_profile = manageiq.find_collection_resource_by("alert_definition_profiles",
+ name=name)
+
+ # we need to add or update the alert profile
+ if state == "present":
+ if not existing_profile:
+ # a profile with this name doesn't exist yet, let's create it
+ res_args = manageiq_alert_profiles.add_profile(module.params)
+ else:
+ # a profile with this name exists, we might need to update it
+ res_args = manageiq_alert_profiles.update_profile(existing_profile, module.params)
+
+ # this alert profile should not exist
+ if state == "absent":
+ # if we have an alert profile with this name, delete it
+ if existing_profile:
+ res_args = manageiq_alert_profiles.delete_profile(existing_profile)
+ else:
+ # This alert profile does not exist in ManageIQ, and that's okay
+ msg = "Alert profile '{name}' does not exist in ManageIQ"
+ msg = msg.format(name=name)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_alerts.py b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
new file mode 100644
index 000000000..518b29f1f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_alerts.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_alerts
+
+short_description: Configuration of alerts in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Elad Alfassa (@elad661) <ealfassa@redhat.com>
+description:
+ - The manageiq_alerts module supports adding, updating and deleting alerts in ManageIQ.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - absent - alert should not exist,
+ - present - alert should exist,
+ required: false
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The unique alert description in ManageIQ.
+ - Required when state is "absent" or "present".
+ resource_type:
+ type: str
+ description:
+ - The entity type for the alert in ManageIQ. Required when state is "present".
+ choices: ['Vm', 'ContainerNode', 'MiqServer', 'Host', 'Storage', 'EmsCluster',
+ 'ExtManagementSystem', 'MiddlewareServer']
+ expression_type:
+ type: str
+ description:
+ - Expression type.
+ default: hash
+ choices: ["hash", "miq"]
+ expression:
+ type: dict
+ description:
+ - The alert expression for ManageIQ.
+ - Can either be in the "Miq Expression" format or the "Hash Expression format".
+ - Required if state is "present".
+ enabled:
+ description:
+ - Enable or disable the alert. Required if state is "present".
+ type: bool
+ options:
+ type: dict
+ description:
+ - Additional alert options, such as notification type and frequency
+
+
+'''
+
+EXAMPLES = '''
+- name: Add an alert with a "hash expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 01
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: ContainerNode
+ expression:
+ eval_method: hostd_log_threshold
+ mode: internal
+ options: {}
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Add an alert with a "miq expression" to ManageIQ
+ community.general.manageiq_alerts:
+ state: present
+ description: Test Alert 02
+ options:
+ notifications:
+ email:
+ to: ["example@example.com"]
+ from: "example@example.com"
+ resource_type: Vm
+ expression_type: miq
+ expression:
+ and:
+ - CONTAINS:
+ tag: Vm.managed-environment
+ value: prod
+ - not:
+ CONTAINS:
+ tag: Vm.host.managed-environment
+ value: prod
+ enabled: true
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Delete an alert from ManageIQ
+ community.general.manageiq_alerts:
+ state: absent
+ description: Test Alert 01
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQAlert(object):
+ """ Represent a ManageIQ alert. Can be initialized with both the format
+ we receive from the server and the format we get from the user.
+ """
+ def __init__(self, alert):
+ self.description = alert['description']
+ self.db = alert['db']
+ self.enabled = alert['enabled']
+ self.options = alert['options']
+ self.hash_expression = None
+ self.miq_expressipn = None
+
+ if 'hash_expression' in alert:
+ self.hash_expression = alert['hash_expression']
+ if 'miq_expression' in alert:
+ self.miq_expression = alert['miq_expression']
+ if 'exp' in self.miq_expression:
+ # miq_expression is a field that needs a special case, because
+ # it's returned surrounded by a dict named exp even though we don't
+ # send it with that dict.
+ self.miq_expression = self.miq_expression['exp']
+
+ def __eq__(self, other):
+ """ Compare two ManageIQAlert objects
+ """
+ return self.__dict__ == other.__dict__
+
+
+class ManageIQAlerts(object):
+ """ Object to execute alert management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+ self.alerts_url = '{api_url}/alert_definitions'.format(api_url=self.api_url)
+
+ def get_alerts(self):
+ """ Get all alerts from ManageIQ
+ """
+ try:
+ response = self.client.get(self.alerts_url + '?expand=resources')
+ except Exception as e:
+ self.module.fail_json(msg="Failed to query alerts: {error}".format(error=e))
+ return response.get('resources', [])
+
+ def validate_hash_expression(self, expression):
+ """ Validate a 'hash expression' alert definition
+ """
+ # hash expressions must have the following fields
+ for key in ['options', 'eval_method', 'mode']:
+ if key not in expression:
+ msg = "Hash expression is missing required field {key}".format(key=key)
+ self.module.fail_json(msg)
+
+ def create_alert_dict(self, params):
+ """ Create a dict representing an alert
+ """
+ if params['expression_type'] == 'hash':
+ # hash expression supports depends on https://github.com/ManageIQ/manageiq-api/pull/76
+ self.validate_hash_expression(params['expression'])
+ expression_type = 'hash_expression'
+ else:
+ # actually miq_expression, but we call it "expression" for backwards-compatibility
+ expression_type = 'expression'
+
+ # build the alret
+ alert = dict(description=params['description'],
+ db=params['resource_type'],
+ options=params['options'],
+ enabled=params['enabled'])
+
+ # add the actual expression.
+ alert.update({expression_type: params['expression']})
+
+ return alert
+
+ def add_alert(self, alert):
+ """ Add a new alert to ManageIQ
+ """
+ try:
+ result = self.client.post(self.alerts_url, action='create', resource=alert)
+
+ msg = "Alert {description} created successfully: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Creating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to create a hash expression
+ msg = msg.format(description=alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def delete_alert(self, alert):
+ """ Delete an alert
+ """
+ try:
+ result = self.client.post('{url}/{id}'.format(url=self.alerts_url,
+ id=alert['id']),
+ action="delete")
+ msg = "Alert {description} deleted: {details}"
+ msg = msg.format(description=alert['description'], details=result)
+ return dict(changed=True, msg=msg)
+ except Exception as e:
+ msg = "Deleting alert {description} failed: {error}"
+ msg = msg.format(description=alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+ def update_alert(self, existing_alert, new_alert):
+ """ Update an existing alert with the values from `new_alert`
+ """
+ new_alert_obj = ManageIQAlert(new_alert)
+ if new_alert_obj == ManageIQAlert(existing_alert):
+ # no change needed - alerts are identical
+ return dict(changed=False, msg="No update needed")
+ else:
+ try:
+ url = '{url}/{id}'.format(url=self.alerts_url, id=existing_alert['id'])
+ result = self.client.post(url, action="edit", resource=new_alert)
+
+ # make sure that the update was indeed successful by comparing
+ # the result to the expected result.
+ if new_alert_obj == ManageIQAlert(result):
+ # success!
+ msg = "Alert {description} updated successfully: {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ return dict(changed=True, msg=msg)
+ else:
+ # unexpected result
+ msg = "Updating alert {description} failed, unexpected result {details}"
+ msg = msg.format(description=existing_alert['description'], details=result)
+
+ self.module.fail_json(msg=msg)
+
+ except Exception as e:
+ msg = "Updating alert {description} failed: {error}"
+ if "Resource expression needs be specified" in str(e):
+ # Running on an older version of ManageIQ and trying to update a hash expression
+ msg = msg.format(description=existing_alert['description'],
+ error="Your version of ManageIQ does not support hash_expression")
+ else:
+ msg = msg.format(description=existing_alert['description'], error=e)
+ self.module.fail_json(msg=msg)
+
+
+def main():
+ argument_spec = dict(
+ description=dict(type='str'),
+ resource_type=dict(type='str', choices=['Vm',
+ 'ContainerNode',
+ 'MiqServer',
+ 'Host',
+ 'Storage',
+ 'EmsCluster',
+ 'ExtManagementSystem',
+ 'MiddlewareServer']),
+ expression_type=dict(type='str', default='hash', choices=['miq', 'hash']),
+ expression=dict(type='dict'),
+ options=dict(type='dict'),
+ enabled=dict(type='bool'),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ required_if=[('state', 'present', ['description',
+ 'resource_type',
+ 'expression',
+ 'enabled',
+ 'options']),
+ ('state', 'absent', ['description'])])
+
+ state = module.params['state']
+ description = module.params['description']
+
+ manageiq = ManageIQ(module)
+ manageiq_alerts = ManageIQAlerts(manageiq)
+
+ existing_alert = manageiq.find_collection_resource_by("alert_definitions",
+ description=description)
+
+ # we need to add or update the alert
+ if state == "present":
+ alert = manageiq_alerts.create_alert_dict(module.params)
+
+ if not existing_alert:
+ # an alert with this description doesn't exist yet, let's create it
+ res_args = manageiq_alerts.add_alert(alert)
+ else:
+ # an alert with this description exists, we might need to update it
+ res_args = manageiq_alerts.update_alert(existing_alert, alert)
+
+ # this alert should not exist
+ elif state == "absent":
+ # if we have an alert with this description, delete it
+ if existing_alert:
+ res_args = manageiq_alerts.delete_alert(existing_alert)
+ else:
+ # it doesn't exist, and that's okay
+ msg = "Alert '{description}' does not exist in ManageIQ"
+ msg = msg.format(description=description)
+ res_args = dict(changed=False, msg=msg)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_group.py b/ansible_collections/community/general/plugins/modules/manageiq_group.py
new file mode 100644
index 000000000..a142a939f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_group.py
@@ -0,0 +1,642 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_group
+
+short_description: Management of groups in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
+requirements:
+ - manageiq-client
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - absent - group should not exist, present - group should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ description:
+ type: str
+ description:
+ - The group description.
+ required: true
+ default: null
+ role_id:
+ type: int
+ description:
+ - The the group role id
+ required: false
+ default: null
+ role:
+ type: str
+ description:
+ - The the group role name
+ - The C(role_id) has precedence over the C(role) when supplied.
+ required: false
+ default: null
+ tenant_id:
+ type: int
+ description:
+ - The tenant for the group identified by the tenant id.
+ required: false
+ default: null
+ tenant:
+ type: str
+ description:
+ - The tenant for the group identified by the tenant name.
+ - The C(tenant_id) has precedence over the C(tenant) when supplied.
+ - Tenant names are case sensitive.
+ required: false
+ default: null
+ managed_filters:
+ description: The tag values per category
+ type: dict
+ required: false
+ default: null
+ managed_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing categories are kept or updated, new categories are added.
+ - In replace mode all categories will be replaced with the supplied C(managed_filters).
+ choices: [ merge, replace ]
+ default: replace
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ type: list
+ elements: str
+ required: false
+ default: null
+ belongsto_filters_merge_mode:
+ type: str
+ description:
+ - In merge mode existing settings are merged with the supplied C(belongsto_filters).
+ - In replace mode current values are replaced with the supplied C(belongsto_filters).
+ choices: [ merge, replace ]
+ default: replace
+'''
+
+EXAMPLES = '''
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: 'my_tenant'
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant_id: 4
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name:
+ - Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
+ - Apply 3 prov_max_cpu and 2 department tags to the group.
+ - Limit access to a cluster for the group.
+ community.general.manageiq_group:
+ description: 'MyGroup-user'
+ role: 'EvmRole-user'
+ tenant: my_tenant
+ managed_filters:
+ prov_max_cpu:
+ - '1'
+ - '2'
+ - '4'
+ department:
+ - defense
+ - engineering
+ managed_filters_merge_mode: replace
+ belongsto_filters:
+ - "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
+ belongsto_filters_merge_mode: merge
+ manageiq_connection:
+ url: 'https://manageiq_server'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Delete a group in ManageIQ
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+
+- name: Delete a group in ManageIQ using a token
+ community.general.manageiq_group:
+ state: 'absent'
+ description: 'MyGroup-user'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+'''
+
+RETURN = '''
+group:
+ description: The group.
+ returned: success
+ type: complex
+ contains:
+ description:
+ description: The group description
+ returned: success
+ type: str
+ id:
+ description: The group id
+ returned: success
+ type: int
+ group_type:
+ description: The group type, system or user
+ returned: success
+ type: str
+ role:
+ description: The group role name
+ returned: success
+ type: str
+ tenant:
+ description: The group tenant name
+ returned: success
+ type: str
+ managed_filters:
+ description: The tag values per category
+ returned: success
+ type: dict
+ belongsto_filters:
+ description: A list of strings with a reference to the allowed host, cluster or folder
+ returned: success
+ type: list
+ created_on:
+ description: Group creation date
+ returned: success
+ type: str
+ sample: "2018-08-12T08:37:55+00:00"
+ updated_on:
+ description: Group update date
+ returned: success
+ type: int
+ sample: "2018-08-12T08:37:55+00:00"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQgroup(object):
+ """
+ Object to execute group management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group(self, description):
+ """ Search for group object by description.
+ Returns:
+ the group, or None if group was not found.
+ """
+ groups = self.client.collections.groups.find_by(description=description)
+ if len(groups) == 0:
+ return None
+ else:
+ return groups[0]
+
+ def tenant(self, tenant_id, tenant_name):
+ """ Search for tenant entity by name or id
+ Returns:
+ the tenant entity, None if no id or name was supplied
+ """
+
+ if tenant_id:
+ tenant = self.client.get_entity('tenants', tenant_id)
+ if not tenant:
+ self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
+ return tenant
+ else:
+ if tenant_name:
+ tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
+ if not tenant_res:
+ self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
+ if len(tenant_res) > 1:
+ self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
+ tenant = tenant_res[0]
+ return tenant
+ else:
+ # No tenant name or tenant id supplied
+ return None
+
+ def role(self, role_id, role_name):
+ """ Search for a role object by name or id.
+ Returns:
+ the role entity, None no id or name was supplied
+
+ the role, or send a module Fail signal if role not found.
+ """
+ if role_id:
+ role = self.client.get_entity('roles', role_id)
+ if not role:
+ self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
+ return role
+ else:
+ if role_name:
+ role_res = self.client.collections.roles.find_by(name=role_name)
+ if not role_res:
+ self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
+ if len(role_res) > 1:
+ self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
+ return role_res[0]
+ else:
+ # No role name or role id supplied
+ return None
+
+ @staticmethod
+ def merge_dict_values(norm_current_values, norm_updated_values):
+ """ Create an merged update object for manageiq group filters.
+
+ The input dict contain the tag values per category.
+ If the new values contain the category, all tags for that category are replaced
+ If the new values do not contain the category, the existing tags are kept
+
+ Returns:
+ the nested array with the merged values, used in the update post body
+ """
+
+ # If no updated values are supplied, in merge mode, the original values must be returned
+ # otherwise the existing tag filters will be removed.
+ if norm_current_values and (not norm_updated_values):
+ return norm_current_values
+
+ # If no existing tag filters exist, use the user supplied values
+ if (not norm_current_values) and norm_updated_values:
+ return norm_updated_values
+
+ # start with norm_current_values's keys and values
+ res = norm_current_values.copy()
+ # replace res with norm_updated_values's keys and values
+ res.update(norm_updated_values)
+ return res
+
+ def delete_group(self, group):
+ """ Deletes a group from manageiq.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+ try:
+ url = '%s/groups/%s' % (self.api_url, group['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(
+ changed=True,
+ msg="deleted group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group.
+
+ Returns:
+ a dict of:
+ changed: boolean indicating if the entity was updated.
+ msg: a short message describing the operation executed.
+ """
+
+ if role or norm_managed_filters or belongsto_filters:
+ group.reload(attributes=['miq_user_role_name', 'entitlement'])
+
+ try:
+ current_role = group['miq_user_role_name']
+ except AttributeError:
+ current_role = None
+
+ changed = False
+ resource = {}
+
+ if description and group['description'] != description:
+ resource['description'] = description
+ changed = True
+
+ if tenant and group['tenant_id'] != tenant['id']:
+ resource['tenant'] = dict(id=tenant['id'])
+ changed = True
+
+ if role and current_role != role['name']:
+ resource['role'] = dict(id=role['id'])
+ changed = True
+
+ if norm_managed_filters or belongsto_filters:
+
+ # Only compare if filters are supplied
+ entitlement = group['entitlement']
+
+ if 'filters' not in entitlement:
+ # No existing filters exist, use supplied filters
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+ changed = True
+ else:
+ current_filters = entitlement['filters']
+ new_filters = self.edit_group_edit_filters(current_filters,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+ if new_filters:
+ resource['filters'] = new_filters
+ changed = True
+
+ if not changed:
+ return dict(
+ changed=False,
+ msg="group %s is not changed." % group['description'])
+
+ # try to update group
+ try:
+ self.client.post(group['href'], action='edit', resource=resource)
+ changed = True
+ except Exception as e:
+ self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
+
+ return dict(
+ changed=changed,
+ msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
+
+ def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode):
+ """ Edit a manageiq group filters.
+
+ Returns:
+ None if no the group was not updated
+ If the group was updated the post body part for updating the group
+ """
+ filters_updated = False
+ new_filters_resource = {}
+
+ current_belongsto_set = current_filters.get('belongsto', set())
+
+ if belongsto_filters:
+ new_belongsto_set = set(belongsto_filters)
+ else:
+ new_belongsto_set = set()
+
+ if current_belongsto_set == new_belongsto_set:
+ new_filters_resource['belongsto'] = current_filters['belongsto']
+ else:
+ if belongsto_filters_merge_mode == 'merge':
+ current_belongsto_set.update(new_belongsto_set)
+ new_filters_resource['belongsto'] = list(current_belongsto_set)
+ else:
+ new_filters_resource['belongsto'] = list(new_belongsto_set)
+ filters_updated = True
+
+ # Process belongsto managed filter tags
+ # The input is in the form dict with keys are the categories and the tags are supplied string array
+ # ManageIQ, the current_managed, uses an array of arrays. One array of categories.
+ # We normalize the user input from a dict with arrays to a dict of sorted arrays
+ # We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
+ norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
+
+ if norm_current_filters == norm_managed_filters:
+ if 'managed' in current_filters:
+ new_filters_resource['managed'] = current_filters['managed']
+ else:
+ if managed_filters_merge_mode == 'merge':
+ merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
+ else:
+ new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ filters_updated = True
+
+ if not filters_updated:
+ return None
+
+ return new_filters_resource
+
+ def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
+ """ Creates the group in manageiq.
+
+ Returns:
+ the created group id, name, created_on timestamp,
+ updated_on timestamp.
+ """
+ # check for required arguments
+ for key, value in dict(description=description).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/groups' % self.api_url
+
+ resource = {'description': description}
+
+ if role is not None:
+ resource['role'] = dict(id=role['id'])
+
+ if tenant is not None:
+ resource['tenant'] = dict(id=tenant['id'])
+
+ if norm_managed_filters or belongsto_filters:
+ managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
+ resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created group %s" % description,
+ group_id=result['results'][0]['id']
+ )
+
+ @staticmethod
+ def normalized_managed_tag_filters_to_miq(norm_managed_filters):
+ if not norm_managed_filters:
+ return None
+
+ return list(norm_managed_filters.values())
+
+ @staticmethod
+ def manageiq_filters_to_sorted_dict(current_filters):
+ current_managed_filters = current_filters.get('managed')
+ if not current_managed_filters:
+ return None
+
+ res = {}
+ for tag_list in current_managed_filters:
+ tag_list.sort()
+ key = tag_list[0].split('/')[2]
+ res[key] = tag_list
+
+ return res
+
+ @staticmethod
+ def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
+ if not managed_filters:
+ return None
+
+ res = {}
+ for cat_key in managed_filters:
+ cat_array = []
+ if not isinstance(managed_filters[cat_key], list):
+ module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
+ for tags in managed_filters[cat_key]:
+ miq_managed_tag = "/managed/" + cat_key + "/" + tags
+ cat_array.append(miq_managed_tag)
+ # Do not add empty categories. ManageIQ will remove all categories that are not supplied
+ if cat_array:
+ cat_array.sort()
+ res[cat_key] = cat_array
+ return res
+
+ @staticmethod
+ def create_result_group(group):
+ """ Creates the ansible result object from a manageiq group entity
+
+ Returns:
+ a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
+ """
+ try:
+ role_name = group['miq_user_role_name']
+ except AttributeError:
+ role_name = None
+
+ managed_filters = None
+ belongsto_filters = None
+ if 'filters' in group['entitlement']:
+ filters = group['entitlement']['filters']
+ belongsto_filters = filters.get('belongsto')
+ group_managed_filters = filters.get('managed')
+ if group_managed_filters:
+ managed_filters = {}
+ for tag_list in group_managed_filters:
+ key = tag_list[0].split('/')[2]
+ tags = []
+ for t in tag_list:
+ tags.append(t.split('/')[3])
+ managed_filters[key] = tags
+
+ return dict(
+ id=group['id'],
+ description=group['description'],
+ role=role_name,
+ tenant=group['tenant']['name'],
+ managed_filters=managed_filters,
+ belongsto_filters=belongsto_filters,
+ group_type=group['group_type'],
+ created_on=group['created_on'],
+ updated_on=group['updated_on'],
+ )
+
+
+def main():
+ argument_spec = dict(
+ description=dict(required=True, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ role_id=dict(required=False, type='int'),
+ role=dict(required=False, type='str'),
+ tenant_id=dict(required=False, type='int'),
+ tenant=dict(required=False, type='str'),
+ managed_filters=dict(required=False, type='dict'),
+ managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ belongsto_filters=dict(required=False, type='list', elements='str'),
+ belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ description = module.params['description']
+ state = module.params['state']
+ role_id = module.params['role_id']
+ role_name = module.params['role']
+ tenant_id = module.params['tenant_id']
+ tenant_name = module.params['tenant']
+ managed_filters = module.params['managed_filters']
+ managed_filters_merge_mode = module.params['managed_filters_merge_mode']
+ belongsto_filters = module.params['belongsto_filters']
+ belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
+
+ manageiq = ManageIQ(module)
+ manageiq_group = ManageIQgroup(manageiq)
+
+ group = manageiq_group.group(description)
+
+ # group should not exist
+ if state == "absent":
+ # if we have a group, delete it
+ if group:
+ res_args = manageiq_group.delete_group(group)
+ # if we do not have a group, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="group '%s' does not exist in manageiq" % description)
+
+ # group should exist
+ if state == "present":
+
+ tenant = manageiq_group.tenant(tenant_id, tenant_name)
+ role = manageiq_group.role(role_id, role_name)
+ norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
+ # if we have a group, edit it
+ if group:
+ res_args = manageiq_group.edit_group(group, description, role, tenant,
+ norm_managed_filters, managed_filters_merge_mode,
+ belongsto_filters, belongsto_filters_merge_mode)
+
+ # if we do not have a group, create it
+ else:
+ res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
+ group = manageiq.client.get_entity('groups', res_args['group_id'])
+
+ group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
+ res_args['group'] = manageiq_group.create_result_group(group)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies.py b/ansible_collections/community/general/plugins/modules/manageiq_policies.py
new file mode 100644
index 000000000..061168f7f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_policies.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# Copyright (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_policies
+
+short_description: Management of resource policy_profiles in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_policies module supports adding and deleting policy_profiles in ManageIQ.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - C(absent) - policy_profiles should not exist,
+ - C(present) - policy_profiles should exist,
+ - >
+ C(list) - list current policy_profiles and policies.
+ This state is deprecated and will be removed 8.0.0.
+ Please use the module M(community.general.manageiq_policies_info) instead.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ policy_profiles:
+ type: list
+ elements: dict
+ description:
+ - List of dictionaries, each includes the policy_profile C(name) key.
+ - Required if I(state) is C(present) or C(absent).
+ resource_type:
+ type: str
+ description:
+ - The type of the resource to which the profile should be [un]assigned.
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - The name of the resource to which the profile should be [un]assigned.
+ - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ resource_id:
+ type: int
+ description:
+ - The ID of the resource to which the profile should be [un]assigned.
+ - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ version_added: 2.2.0
+'''
+
+EXAMPLES = '''
+- name: Assign new policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Unassign a policy_profile for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ policy_profiles:
+ - name: openscap profile
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: List current policy_profile and policies for a provider in ManageIQ
+ community.general.manageiq_policies:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+'''
+
+RETURN = '''
+manageiq_policies:
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ
+ returned: always
+ type: dict
+ sample: '{
+ "changed": false,
+ "profiles": [
+ {
+ "policies": [
+ {
+ "active": true,
+ "description": "OpenSCAP",
+ "name": "openscap policy"
+ },
+ {
+ "active": true,
+ "description": "Analyse incoming container images",
+ "name": "analyse incoming container images"
+ },
+ {
+ "active": true,
+ "description": "Schedule compliance after smart state analysis",
+ "name": "schedule compliance after smart state analysis"
+ }
+ ],
+ "profile_description": "OpenSCAP profile",
+ "profile_name": "openscap profile"
+ }
+ ]
+ }'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ policy_profiles=dict(type='list', elements='dict'),
+ resource_id=dict(type='int'),
+ resource_name=dict(type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["resource_id", "resource_name"]],
+ required_one_of=[["resource_id", "resource_name"]],
+ required_if=[
+ ('state', 'present', ['policy_profiles']),
+ ('state', 'absent', ['policy_profiles'])
+ ],
+ )
+
+ policy_profiles = module.params['policy_profiles']
+ resource_id = module.params['resource_id']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ if state == "list":
+ module.deprecate(
+ 'The value "list" for "state" is deprecated. Please use community.general.manageiq_policies_info instead.',
+ version='8.0.0',
+ collection_name='community.general'
+ )
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+ manageiq_policies = manageiq.policies(resource_id, resource_type, resource_name)
+
+ if action == 'list':
+ # return a list of current profiles for this object
+ current_profiles = manageiq_policies.query_resource_profiles()
+ res_args = dict(changed=False, profiles=current_profiles)
+ else:
+ # assign or unassign the profiles
+ res_args = manageiq_policies.assign_or_unassign_profiles(policy_profiles, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py b/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py
new file mode 100644
index 000000000..8a75ef646
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_policies_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# Copyright (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_policies_info
+version_added: 5.8.0
+
+short_description: Listing of resource policy_profiles in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+author: Alexei Znamensky (@russoz)
+description:
+ - The manageiq_policies module supports listing policy_profiles in ManageIQ.
+
+options:
+ resource_type:
+ type: str
+ description:
+ - The type of the resource to obtain the profile for.
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - The name of the resource to obtain the profile for.
+ - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ resource_id:
+ type: int
+ description:
+ - The ID of the resource to obtain the profile for.
+ - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+'''
+
+EXAMPLES = '''
+- name: List current policy_profile and policies for a provider in ManageIQ
+ community.general.manageiq_policies_info:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ register: result
+'''
+
+RETURN = '''
+profiles:
+ description:
+ - List current policy_profile and policies for a provider in ManageIQ.
+ returned: always
+ type: list
+ elements: dict
+ sample:
+ - policies:
+ - active: true
+ description: OpenSCAP
+ name: openscap policy
+ - active: true,
+ description: Analyse incoming container images
+ name: analyse incoming container images
+ - active: true
+ description: Schedule compliance after smart state analysis
+ name: schedule compliance after smart state analysis
+ profile_description: OpenSCAP profile
+ profile_name: openscap profile
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec, manageiq_entities
+
+
+def main():
+ argument_spec = dict(
+ resource_id=dict(required=False, type='int'),
+ resource_name=dict(required=False, type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["resource_id", "resource_name"]],
+ required_one_of=[["resource_id", "resource_name"]],
+ supports_check_mode=True,
+ )
+
+ resource_id = module.params['resource_id']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+
+ # get the resource type
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq_policies = ManageIQ(module).policies(resource_id, resource_type, resource_name)
+
+ # return a list of current profiles for this object
+ current_profiles = manageiq_policies.query_resource_profiles()
+ res_args = dict(changed=False, profiles=current_profiles)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_provider.py b/ansible_collections/community/general/plugins/modules/manageiq_provider.py
new file mode 100644
index 000000000..bbc27214b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_provider.py
@@ -0,0 +1,939 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# Copyright (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: manageiq_provider
+short_description: Management of provider in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_provider module supports adding, updating, and deleting provider in ManageIQ.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - absent - provider should not exist, present - provider should be present, refresh - provider will be refreshed
+ choices: ['absent', 'present', 'refresh']
+ default: 'present'
+ name:
+ type: str
+ description: The provider's name.
+ required: true
+ type:
+ type: str
+ description: The provider's type.
+ choices: ['Openshift', 'Amazon', 'oVirt', 'VMware', 'Azure', 'Director', 'OpenStack', 'GCE']
+ zone:
+ type: str
+ description: The ManageIQ zone name that will manage the provider.
+ default: 'default'
+ provider_region:
+ type: str
+ description: The provider region name to connect to (e.g. AWS region for Amazon).
+ host_default_vnc_port_start:
+ type: str
+ description: The first port in the host VNC range. defaults to None.
+ host_default_vnc_port_end:
+ type: str
+ description: The last port in the host VNC range. defaults to None.
+ subscription:
+ type: str
+ description: Microsoft Azure subscription ID. defaults to None.
+ project:
+ type: str
+ description: Google Compute Engine Project ID. defaults to None.
+ azure_tenant_id:
+ type: str
+ description: Tenant ID. defaults to None.
+ aliases: [ keystone_v3_domain_id ]
+ tenant_mapping_enabled:
+ type: bool
+ default: false
+ description: Whether to enable mapping of existing tenants. defaults to False.
+ api_version:
+ type: str
+ description: The OpenStack Keystone API version. defaults to None.
+ choices: ['v2', 'v3']
+
+ provider:
+ description: Default endpoint connection information, required if state is true.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ security_protocol:
+ type: str
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ metrics:
+ description: Metrics endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation','non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+ path:
+ type: str
+ description: Database name for oVirt metrics. Defaults to C(ovirt_engine_history).
+
+ alerts:
+ description: Alerts endpoint connection information.
+ suboptions:
+ hostname:
+ type: str
+ description: The provider's api hostname.
+ required: true
+ port:
+ type: int
+ description: The provider's api port.
+ userid:
+ type: str
+ description: Provider's api endpoint authentication userid. defaults to None.
+ password:
+ type: str
+ description: Provider's api endpoint authentication password. defaults to None.
+ auth_key:
+ type: str
+ description: Provider's api endpoint authentication bearer token. defaults to None.
+ validate_certs:
+ type: bool
+ description: Whether SSL certificates should be verified for HTTPS requests (deprecated). defaults to True.
+ default: true
+ aliases: [ verify_ssl ]
+ security_protocol:
+ type: str
+ choices: ['ssl-with-validation','ssl-with-validation-custom-ca','ssl-without-validation', 'non-ssl']
+ description: How SSL certificates should be used for HTTPS requests. defaults to None.
+ certificate_authority:
+ type: str
+ description: The CA bundle string with custom certificates. defaults to None.
+
+ ssh_keypair:
+ description: SSH key pair used for SSH connections to all hosts in this provider.
+ suboptions:
+ hostname:
+ type: str
+ description: Director hostname.
+ required: true
+ userid:
+ type: str
+ description: SSH username.
+ auth_key:
+ type: str
+ description: SSH private key.
+ validate_certs:
+ description:
+ - Whether certificates should be verified for connections.
+ type: bool
+ default: true
+ aliases: [ verify_ssl ]
+'''
+
+EXAMPLES = '''
+- name: Create a new provider in ManageIQ ('Hawkular' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'OpenShift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ role: 'hawkular'
+ hostname: 'example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1:80'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Update an existing provider named 'EngLab' (defaults to 'Prometheus' metrics)
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'present'
+ provider:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 8443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ auth_key: 'topSecret'
+ hostname: 'next.example.com'
+ port: 443
+ validate_certs: true
+ security_protocol: 'ssl-with-validation-custom-ca'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Delete a provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngLab'
+ type: 'Openshift'
+ state: 'absent'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+
+- name: Create a new Amazon provider in ManageIQ using token authentication
+ community.general.manageiq_provider:
+ name: 'EngAmazon'
+ type: 'Amazon'
+ state: 'present'
+ provider:
+ hostname: 'amazon.example.com'
+ userid: 'hello'
+ password: 'world'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+
+- name: Create a new oVirt provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'RHEV'
+ type: 'oVirt'
+ state: 'present'
+ provider:
+ hostname: 'rhev01.example.com'
+ userid: 'admin@internal'
+ password: 'password'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ hostname: 'metrics.example.com'
+ path: 'ovirt_engine_history'
+ userid: 'user_id_metrics'
+ password: 'password_metrics'
+ validate_certs: true
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ username: 'admin'
+ password: 'password'
+ validate_certs: true
+
+- name: Create a new VMware provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngVMware'
+ type: 'VMware'
+ state: 'present'
+ provider:
+ hostname: 'vcenter.example.com'
+ host_default_vnc_port_start: 5800
+ host_default_vnc_port_end: 5801
+ userid: 'root'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://127.0.0.1'
+ token: 'VeryLongToken'
+ validate_certs: true
+
+- name: Create a new Azure provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngAzure'
+ type: 'Azure'
+ provider_region: 'northeurope'
+ subscription: 'e272bd74-f661-484f-b223-88dd128a4049'
+ azure_tenant_id: 'e272bd74-f661-484f-b223-88dd128a4048'
+ state: 'present'
+ provider:
+ hostname: 'azure.example.com'
+ userid: 'e272bd74-f661-484f-b223-88dd128a4049'
+ password: 'password'
+ manageiq_connection:
+ url: 'https://cf-6af0.rhpds.opentlc.com'
+ username: 'admin'
+ password: 'password'
+ validate_certs: false
+
+- name: Create a new OpenStack Director provider in ManageIQ with rsa keypair
+ community.general.manageiq_provider:
+ name: 'EngDirector'
+ type: 'Director'
+ api_version: 'v3'
+ state: 'present'
+ provider:
+ hostname: 'director.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ ssh_keypair:
+ hostname: director.example.com
+ userid: heat-admin
+ auth_key: 'SecretSSHPrivateKey'
+
+- name: Create a new OpenStack provider in ManageIQ with amqp metrics
+ community.general.manageiq_provider:
+ name: 'EngOpenStack'
+ type: 'OpenStack'
+ api_version: 'v3'
+ state: 'present'
+ provider_region: 'europe'
+ tenant_mapping_enabled: 'False'
+ keystone_v3_domain_id: 'mydomain'
+ provider:
+ hostname: 'openstack.example.com'
+ userid: 'admin'
+ password: 'password'
+ security_protocol: 'ssl-with-validation'
+ validate_certs: 'true'
+ certificate_authority: |
+ -----BEGIN CERTIFICATE-----
+ FAKECERTsdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu
+ c2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkwHhcNMTcwODIxMTI1NTE5WhcNMjIwODIw
+ MTI1NTIwWjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1MDMzMjAxMTkw
+ ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUDnL2tQ2xf/zO7F7hmZ4S
+ ZuwKENdI4IYuWSxye4i3hPhKg6eKPzGzmDNWkIMDOrDAj1EgVSNPtPwsOL8OWvJm
+ AaTjr070D7ZGWWnrrDrWEClBx9Rx/6JAM38RT8Pu7c1hXBm0J81KufSLLYiZ/gOw
+ Znks5v5RUSGcAXvLkBJeATbsbh6fKX0RgQ3fFTvqQaE/r8LxcTN1uehPX1g5AaRa
+ z/SNDHaFtQlE3XcqAAukyMn4N5kdNcuwF3GlQ+tJnJv8SstPkfQcZbTMUQ7I2KpJ
+ ajXnMxmBhV5fCN4rb0QUNCrk2/B+EUMBY4MnxIakqNxnN1kvgI7FBbFgrHUe6QvJ
+ AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
+ SIb3DQEBCwUAA4IBAQAYRV57LUsqznSLZHA77o9+0fQetIE115DYP7wea42PODJI
+ QJ+JETEfoCr0+YOMAbVmznP9GH5cMTKEWHExcIpbMBU7nMZp6A3htcJgF2fgPzOA
+ aTUtzkuVCSrV//mbbYVxoFOc6sR3Br0wBs5+5iz3dBSt7xmgpMzZvqsQl655i051
+ gGSTIY3z5EJmBZBjwuTjal9mMoPGA4eoTPqlITJDHQ2bdCV2oDbc7zqupGrUfZFA
+ qzgieEyGzdCSRwjr1/PibA3bpwHyhD9CGD0PRVVTLhw6h6L5kuN1jA20OfzWxf/o
+ XUsdmRaWiF+l4s6Dcd56SuRp5SGNa2+vP9Of/FX5
+ -----END CERTIFICATE-----
+ metrics:
+ role: amqp
+ hostname: 'amqp.example.com'
+ security_protocol: 'non-ssl'
+ port: 5666
+ userid: admin
+ password: password
+
+
+- name: Create a new GCE provider in ManageIQ
+ community.general.manageiq_provider:
+ name: 'EngGoogle'
+ type: 'GCE'
+ provider_region: 'europe-west1'
+ project: 'project1'
+ state: 'present'
+ provider:
+ hostname: 'gce.example.com'
+ auth_key: 'google_json_key'
+ validate_certs: 'false'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+def supported_providers():
+ return dict(
+ Openshift=dict(
+ class_name='ManageIQ::Providers::Openshift::ContainerManager',
+ authtype='bearer',
+ default_role='default',
+ metrics_role='prometheus',
+ alerts_role='prometheus_alerts',
+ ),
+ Amazon=dict(
+ class_name='ManageIQ::Providers::Amazon::CloudManager',
+ ),
+ oVirt=dict(
+ class_name='ManageIQ::Providers::Redhat::InfraManager',
+ default_role='default',
+ metrics_role='metrics',
+ ),
+ VMware=dict(
+ class_name='ManageIQ::Providers::Vmware::InfraManager',
+ ),
+ Azure=dict(
+ class_name='ManageIQ::Providers::Azure::CloudManager',
+ ),
+ Director=dict(
+ class_name='ManageIQ::Providers::Openstack::InfraManager',
+ ssh_keypair_role="ssh_keypair"
+ ),
+ OpenStack=dict(
+ class_name='ManageIQ::Providers::Openstack::CloudManager',
+ ),
+ GCE=dict(
+ class_name='ManageIQ::Providers::Google::CloudManager',
+ ),
+ )
+
+
+def endpoint_list_spec():
+ return dict(
+ provider=dict(type='dict', options=endpoint_argument_spec()),
+ metrics=dict(type='dict', options=endpoint_argument_spec()),
+ alerts=dict(type='dict', options=endpoint_argument_spec()),
+ ssh_keypair=dict(type='dict', options=endpoint_argument_spec(), no_log=False),
+ )
+
+
+def endpoint_argument_spec():
+ return dict(
+ role=dict(),
+ hostname=dict(required=True),
+ port=dict(type='int'),
+ validate_certs=dict(default=True, type='bool', aliases=['verify_ssl']),
+ certificate_authority=dict(),
+ security_protocol=dict(
+ choices=[
+ 'ssl-with-validation',
+ 'ssl-with-validation-custom-ca',
+ 'ssl-without-validation',
+ 'non-ssl',
+ ],
+ ),
+ userid=dict(),
+ password=dict(no_log=True),
+ auth_key=dict(no_log=True),
+ subscription=dict(no_log=True),
+ project=dict(),
+ uid_ems=dict(),
+ path=dict(),
+ )
+
+
+def delete_nulls(h):
+ """ Remove null entries from a hash
+
+ Returns:
+ a hash without nulls
+ """
+ if isinstance(h, list):
+ return [delete_nulls(i) for i in h]
+ if isinstance(h, dict):
+ return dict((k, delete_nulls(v)) for k, v in h.items() if v is not None)
+
+ return h
+
+
+class ManageIQProvider(object):
+ """
+ Object to execute provider management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def class_name_to_type(self, class_name):
+ """ Convert class_name to type
+
+ Returns:
+ the type
+ """
+ out = [k for k, v in supported_providers().items() if v['class_name'] == class_name]
+ if len(out) == 1:
+ return out[0]
+
+ return None
+
+ def zone_id(self, name):
+ """ Search for zone id by zone name.
+
+ Returns:
+ the zone id, or send a module Fail signal if zone not found.
+ """
+ zone = self.manageiq.find_collection_resource_by('zones', name=name)
+ if not zone: # zone doesn't exist
+ self.module.fail_json(
+ msg="zone %s does not exist in manageiq" % (name))
+
+ return zone['id']
+
+ def provider(self, name):
+ """ Search for provider object by name.
+
+ Returns:
+ the provider, or None if provider not found.
+ """
+ return self.manageiq.find_collection_resource_by('providers', name=name)
+
+ def build_connection_configurations(self, provider_type, endpoints):
+ """ Build "connection_configurations" objects from
+ requested endpoints provided by user
+
+ Returns:
+ the user requested provider endpoints list
+ """
+ connection_configurations = []
+ endpoint_keys = endpoint_list_spec().keys()
+ provider_defaults = supported_providers().get(provider_type, {})
+
+ # get endpoint defaults
+ endpoint = endpoints.get('provider')
+ default_auth_key = endpoint.get('auth_key')
+
+ # build a connection_configuration object for each endpoint
+ for endpoint_key in endpoint_keys:
+ endpoint = endpoints.get(endpoint_key)
+ if endpoint:
+ # get role and authtype
+ role = endpoint.get('role') or provider_defaults.get(endpoint_key + '_role', 'default')
+ if role == 'default':
+ authtype = provider_defaults.get('authtype') or role
+ else:
+ authtype = role
+
+ # set a connection_configuration
+ connection_configurations.append({
+ 'endpoint': {
+ 'role': role,
+ 'hostname': endpoint.get('hostname'),
+ 'port': endpoint.get('port'),
+ 'verify_ssl': [0, 1][endpoint.get('validate_certs', True)],
+ 'security_protocol': endpoint.get('security_protocol'),
+ 'certificate_authority': endpoint.get('certificate_authority'),
+ 'path': endpoint.get('path'),
+ },
+ 'authentication': {
+ 'authtype': authtype,
+ 'userid': endpoint.get('userid'),
+ 'password': endpoint.get('password'),
+ 'auth_key': endpoint.get('auth_key') or default_auth_key,
+ }
+ })
+
+ return connection_configurations
+
+ def delete_provider(self, provider):
+ """ Deletes a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_provider(self, provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Edit a provider from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ connection_configurations=endpoints,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ )
+
+ # NOTE: we do not check for diff's between requested and current
+ # provider, we always submit endpoints with password or auth_keys,
+ # since we can not compare with current password or auth_key,
+ # every edit request is sent to ManageIQ API without comparing
+ # it to current state.
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to update provider
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update provider %s: %s" % (provider['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the provider %s: %s" % (provider['name'], result))
+
+ def create_provider(self, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version):
+ """ Creates the provider in manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ resource = dict(
+ name=name,
+ zone={'id': zone_id},
+ provider_region=provider_region,
+ host_default_vnc_port_start=host_default_vnc_port_start,
+ host_default_vnc_port_end=host_default_vnc_port_end,
+ subscription=subscription,
+ project=project,
+ uid_ems=uid_ems,
+ tenant_mapping_enabled=tenant_mapping_enabled,
+ api_version=api_version,
+ connection_configurations=endpoints,
+ )
+
+ # clean nulls, we do not send nulls to the api
+ resource = delete_nulls(resource)
+
+ # try to create a new provider
+ try:
+ url = '%s/providers' % (self.api_url)
+ result = self.client.post(url, type=supported_providers()[provider_type]['class_name'], **resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the provider %s: %s" % (name, result['results']))
+
+ def refresh(self, provider, name):
+ """ Trigger provider refresh.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/providers/%s' % (self.api_url, provider['id'])
+ result = self.client.post(url, action='refresh')
+ except Exception as e:
+ self.module.fail_json(msg="failed to refresh provider %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="refreshing provider %s" % name)
+
+
+def main():
+ zone_id = None
+ endpoints = []
+ argument_spec = dict(
+ state=dict(choices=['absent', 'present', 'refresh'], default='present'),
+ name=dict(required=True),
+ zone=dict(default='default'),
+ provider_region=dict(),
+ host_default_vnc_port_start=dict(),
+ host_default_vnc_port_end=dict(),
+ subscription=dict(),
+ project=dict(),
+ azure_tenant_id=dict(aliases=['keystone_v3_domain_id']),
+ tenant_mapping_enabled=dict(default=False, type='bool'),
+ api_version=dict(choices=['v2', 'v3']),
+ type=dict(choices=list(supported_providers().keys())),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+ # add the endpoint arguments to the arguments
+ argument_spec.update(endpoint_list_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ('state', 'present', ['provider']),
+ ('state', 'refresh', ['name'])],
+ required_together=[
+ ['host_default_vnc_port_start', 'host_default_vnc_port_end']
+ ],
+ )
+
+ name = module.params['name']
+ zone_name = module.params['zone']
+ provider_type = module.params['type']
+ raw_endpoints = module.params
+ provider_region = module.params['provider_region']
+ host_default_vnc_port_start = module.params['host_default_vnc_port_start']
+ host_default_vnc_port_end = module.params['host_default_vnc_port_end']
+ subscription = module.params['subscription']
+ uid_ems = module.params['azure_tenant_id']
+ project = module.params['project']
+ tenant_mapping_enabled = module.params['tenant_mapping_enabled']
+ api_version = module.params['api_version']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_provider = ManageIQProvider(manageiq)
+
+ provider = manageiq_provider.provider(name)
+
+ # provider should not exist
+ if state == "absent":
+ # if we have a provider, delete it
+ if provider:
+ res_args = manageiq_provider.delete_provider(provider)
+ # if we do not have a provider, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ # provider should exist
+ if state == "present":
+ # get data user did not explicitly give
+ if zone_name:
+ zone_id = manageiq_provider.zone_id(zone_name)
+
+ # if we do not have a provider_type, use the current provider_type
+ if provider and not provider_type:
+ provider_type = manageiq_provider.class_name_to_type(provider['type'])
+
+ # check supported_providers types
+ if not provider_type:
+ manageiq_provider.module.fail_json(
+ msg="missing required argument: provider_type")
+
+ # check supported_providers types
+ if provider_type not in supported_providers().keys():
+ manageiq_provider.module.fail_json(
+ msg="provider_type %s is not supported" % (provider_type))
+
+ # build "connection_configurations" objects from user requested endpoints
+ # "provider" is a required endpoint, if we have it, we have endpoints
+ if raw_endpoints.get("provider"):
+ endpoints = manageiq_provider.build_connection_configurations(provider_type, raw_endpoints)
+
+ # if we have a provider, edit it
+ if provider:
+ res_args = manageiq_provider.edit_provider(provider, name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+ # if we do not have a provider, create it
+ else:
+ res_args = manageiq_provider.create_provider(name, provider_type, endpoints, zone_id, provider_region,
+ host_default_vnc_port_start, host_default_vnc_port_end,
+ subscription, project, uid_ems, tenant_mapping_enabled, api_version)
+
+ # refresh provider (trigger sync)
+ if state == "refresh":
+ if provider:
+ res_args = manageiq_provider.refresh(provider, name)
+ else:
+ res_args = dict(
+ changed=False,
+ msg="provider %s: does not exist in manageiq" % (name))
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags.py b/ansible_collections/community/general/plugins/modules/manageiq_tags.py
new file mode 100644
index 000000000..7e190d49c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_tags.py
@@ -0,0 +1,189 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# Copyright (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_tags
+
+short_description: Management of resource tags in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_tags module supports adding, updating and deleting tags in ManageIQ.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - C(absent) - tags should not exist.
+ - C(present) - tags should exist.
+ - C(list) - list current tags.
+ choices: ['absent', 'present', 'list']
+ default: 'present'
+ tags:
+ type: list
+ elements: dict
+ description:
+ - C(tags) - list of dictionaries, each includes C(name) and c(category) keys.
+ - Required if I(state) is C(present) or C(absent).
+ resource_type:
+ type: str
+ description:
+ - The relevant resource type in manageiq.
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - The name of the resource at which tags will be controlled.
+ - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ resource_id:
+ description:
+ - The ID of the resource at which tags will be controlled.
+ - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ type: int
+ version_added: 2.2.0
+'''
+
+EXAMPLES = '''
+- name: Create new tags for a provider in ManageIQ.
+ community.general.manageiq_tags:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Create new tags for a provider in ManageIQ.
+ community.general.manageiq_tags:
+ resource_id: 23000000790497
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Remove tags for a provider in ManageIQ.
+ community.general.manageiq_tags:
+ state: absent
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ tags:
+ - category: environment
+ name: prod
+ - category: owner
+ name: prod_ops
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: List current tags for a provider in ManageIQ.
+ community.general.manageiq_tags:
+ state: list
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import (
+ ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities
+)
+
+
+def main():
+ actions = {'present': 'assign', 'absent': 'unassign', 'list': 'list'}
+ argument_spec = dict(
+ tags=dict(type='list', elements='dict'),
+ resource_id=dict(type='int'),
+ resource_name=dict(type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ state=dict(required=False, type='str',
+ choices=['present', 'absent', 'list'], default='present'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["resource_id", "resource_name"]],
+ required_one_of=[["resource_id", "resource_name"]],
+ required_if=[
+ ('state', 'present', ['tags']),
+ ('state', 'absent', ['tags'])
+ ],
+ )
+
+ tags = module.params['tags']
+ resource_id = module.params['resource_id']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+ state = module.params['state']
+
+ # get the action and resource type
+ action = actions[state]
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ if resource_id is None:
+ resource_id = manageiq.query_resource_id(resource_type, resource_name)
+
+ manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
+
+ if action == 'list':
+ # return a list of current tags for this object
+ current_tags = manageiq_tags.query_resource_tags()
+ res_args = dict(changed=False, tags=current_tags)
+ else:
+ # assign or unassign the tags
+ res_args = manageiq_tags.assign_or_unassign_tags(tags, action)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py b/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py
new file mode 100644
index 000000000..af71e150c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_tags_info.py
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# Copyright (c) 2017, Yaacov Zamir <yzamir@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_tags_info
+version_added: 5.8.0
+short_description: Retrieve resource tags in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+author: Alexei Znamensky (@russoz)
+description:
+ - This module supports retrieving resource tags from ManageIQ.
+
+options:
+ resource_type:
+ type: str
+ description:
+ - The relevant resource type in ManageIQ.
+ required: true
+ choices: ['provider', 'host', 'vm', 'blueprint', 'category', 'cluster',
+ 'data store', 'group', 'resource pool', 'service', 'service template',
+ 'template', 'tenant', 'user']
+ resource_name:
+ type: str
+ description:
+ - The name of the resource at which tags will be controlled.
+ - Must be specified if I(resource_id) is not set. Both options are mutually exclusive.
+ resource_id:
+ description:
+ - The ID of the resource at which tags will be controlled.
+ - Must be specified if I(resource_name) is not set. Both options are mutually exclusive.
+ type: int
+'''
+
+EXAMPLES = '''
+- name: List current tags for a provider in ManageIQ.
+ community.general.manageiq_tags_info:
+ resource_name: 'EngLab'
+ resource_type: 'provider'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ register: result
+'''
+
+RETURN = '''
+tags:
+ description: List of tags associated with the resource.
+ returned: on success
+ type: list
+ elements: dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import (
+ ManageIQ, ManageIQTags, manageiq_argument_spec, manageiq_entities
+)
+
+
+def main():
+ argument_spec = dict(
+ resource_id=dict(type='int'),
+ resource_name=dict(type='str'),
+ resource_type=dict(required=True, type='str',
+ choices=list(manageiq_entities().keys())),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[["resource_id", "resource_name"]],
+ required_one_of=[["resource_id", "resource_name"]],
+ supports_check_mode=True,
+ )
+
+ resource_id = module.params['resource_id']
+ resource_type_key = module.params['resource_type']
+ resource_name = module.params['resource_name']
+
+ # get the action and resource type
+ resource_type = manageiq_entities()[resource_type_key]
+
+ manageiq = ManageIQ(module)
+
+ # query resource id, fail if resource does not exist
+ if resource_id is None:
+ resource_id = manageiq.query_resource_id(resource_type, resource_name)
+
+ manageiq_tags = ManageIQTags(manageiq, resource_type, resource_id)
+
+ # return a list of current tags for this object
+ current_tags = manageiq_tags.query_resource_tags()
+ res_args = dict(changed=False, tags=current_tags)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_tenant.py b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
new file mode 100644
index 000000000..d68e26a73
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_tenant.py
@@ -0,0 +1,550 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Evert Mulder (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+
+module: manageiq_tenant
+
+short_description: Management of tenants in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Evert Mulder (@evertmulder)
+description:
+ - The manageiq_tenant module supports adding, updating and deleting tenants in ManageIQ.
+requirements:
+ - manageiq-client
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - absent - tenant should not exist, present - tenant should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - The tenant name.
+ required: true
+ default: null
+ description:
+ type: str
+ description:
+ - The tenant description.
+ required: true
+ default: null
+ parent_id:
+ type: int
+ description:
+ - The id of the parent tenant. If not supplied the root tenant is used.
+ - The C(parent_id) takes president over C(parent) when supplied
+ required: false
+ default: null
+ parent:
+ type: str
+ description:
+ - The name of the parent tenant. If not supplied and no C(parent_id) is supplied the root tenant is used.
+ required: false
+ default: null
+ quotas:
+ type: dict
+ description:
+ - The tenant quotas.
+ - All parameters case sensitive.
+ - 'Valid attributes are:'
+ - ' - C(cpu_allocated) (int): use null to remove the quota.'
+ - ' - C(mem_allocated) (GB): use null to remove the quota.'
+ - ' - C(storage_allocated) (GB): use null to remove the quota.'
+ - ' - C(vms_allocated) (int): use null to remove the quota.'
+ - ' - C(templates_allocated) (int): use null to remove the quota.'
+ required: false
+ default: {}
+'''
+
+EXAMPLES = '''
+- name: Update the root tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'My Company'
+ description: 'My company name'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Create a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ description: 'Manufacturing department'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Delete a tenant in ManageIQ
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Set tenant quota for cpu_allocated, mem_allocated, remove quota for vms_allocated
+ community.general.manageiq_tenant:
+ name: 'Dep1'
+ parent_id: 1
+ quotas:
+ - cpu_allocated: 100
+ - mem_allocated: 50
+ - vms_allocated: null
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+
+- name: Delete a tenant in ManageIQ using a token
+ community.general.manageiq_tenant:
+ state: 'absent'
+ name: 'Dep1'
+ parent_id: 1
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: false
+'''
+
+RETURN = '''
+tenant:
+ description: The tenant.
+ returned: success
+ type: complex
+ contains:
+ id:
+ description: The tenant id
+ returned: success
+ type: int
+ name:
+ description: The tenant name
+ returned: success
+ type: str
+ description:
+ description: The tenant description
+ returned: success
+ type: str
+ parent_id:
+ description: The id of the parent tenant
+ returned: success
+ type: int
+ quotas:
+ description: List of tenant quotas
+ returned: success
+ type: list
+ sample:
+ cpu_allocated: 100
+ mem_allocated: 50
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQTenant(object):
+ """
+ Object to execute tenant management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def tenant(self, name, parent_id, parent):
+ """ Search for tenant object by name and parent_id or parent
+ or the root tenant if no parent or parent_id is supplied.
+ Returns:
+ the parent tenant, None for the root tenant
+ the tenant or None if tenant was not found.
+ """
+
+ if parent_id:
+ parent_tenant_res = self.client.collections.tenants.find_by(id=parent_id)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant with id '%s' not found in manageiq" % str(parent_id))
+ parent_tenant = parent_tenant_res[0]
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if int(tenant_parent_id) == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ if parent:
+ parent_tenant_res = self.client.collections.tenants.find_by(name=parent)
+ if not parent_tenant_res:
+ self.module.fail_json(msg="Parent tenant '%s' not found in manageiq" % parent)
+
+ if len(parent_tenant_res) > 1:
+ self.module.fail_json(msg="Multiple parent tenants not found in manageiq with name '%s" % parent)
+
+ parent_tenant = parent_tenant_res[0]
+ parent_id = int(parent_tenant['id'])
+ tenants = self.client.collections.tenants.find_by(name=name)
+
+ for tenant in tenants:
+ try:
+ ancestry = tenant['ancestry']
+ except AttributeError:
+ ancestry = None
+
+ if ancestry:
+ tenant_parent_id = int(ancestry.split("/")[-1])
+ if tenant_parent_id == parent_id:
+ return parent_tenant, tenant
+
+ return parent_tenant, None
+ else:
+ # No parent or parent id supplied we select the root tenant
+ return None, self.client.collections.tenants.find_by(ancestry=None)[0]
+
+ def compare_tenant(self, tenant, name, description):
+ """ Compare tenant fields with new field values.
+
+ Returns:
+ false if tenant fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and tenant['name'] != name) or
+ (description and tenant['description'] != description)
+ )
+
+ return not found_difference
+
+ def delete_tenant(self, tenant):
+ """ Deletes a tenant from manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ try:
+ url = '%s/tenants/%s' % (self.api_url, tenant['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant %s: %s" % (tenant['name'], str(e)))
+
+ if result['success'] is False:
+ self.module.fail_json(msg=result['message'])
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_tenant(self, tenant, name, description):
+ """ Edit a manageiq tenant.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+ resource = dict(name=name, description=description, use_config_for_attributes=False)
+
+ # check if we need to update ( compare_tenant is true is no difference found )
+ if self.compare_tenant(tenant, name, description):
+ return dict(
+ changed=False,
+ msg="tenant %s is not changed." % tenant['name'],
+ tenant=tenant['_data'])
+
+ # try to update tenant
+ try:
+ result = self.client.post(tenant['href'], action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant %s: %s" % (tenant['name'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the tenant with id %s" % (tenant['id']))
+
+ def create_tenant(self, name, description, parent_tenant):
+ """ Creates the tenant in manageiq.
+
+ Returns:
+ dict with `msg`, `changed` and `tenant_id`
+ """
+ parent_id = parent_tenant['id']
+ # check for required arguments
+ for key, value in dict(name=name, description=description, parent_id=parent_id).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % key)
+
+ url = '%s/tenants' % self.api_url
+
+ resource = {'name': name, 'description': description, 'parent': {'id': parent_id}}
+
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ tenant_id = result['results'][0]['id']
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant %s: %s" % (name, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant '%s' with id '%s'" % (name, tenant_id),
+ tenant_id=tenant_id)
+
+ def tenant_quota(self, tenant, quota_key):
+ """ Search for tenant quota object by tenant and quota_key.
+ Returns:
+ the quota for the tenant, or None if the tenant quota was not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources&filter[]=name=%s" % (tenant['href'], quota_key))
+
+ return tenant_quotas['resources']
+
+ def tenant_quotas(self, tenant):
+ """ Search for tenant quotas object by tenant.
+ Returns:
+ the quotas for the tenant, or None if no tenant quotas were not found.
+ """
+
+ tenant_quotas = self.client.get("%s/quotas?expand=resources" % (tenant['href']))
+
+ return tenant_quotas['resources']
+
+ def update_tenant_quotas(self, tenant, quotas):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ dict with `msg` and `changed`
+ """
+
+ changed = False
+ messages = []
+ for quota_key, quota_value in quotas.items():
+ current_quota_filtered = self.tenant_quota(tenant, quota_key)
+ if current_quota_filtered:
+ current_quota = current_quota_filtered[0]
+ else:
+ current_quota = None
+
+ if quota_value:
+ # Change the byte values to GB
+ if quota_key in ['storage_allocated', 'mem_allocated']:
+ quota_value_int = int(quota_value) * 1024 * 1024 * 1024
+ else:
+ quota_value_int = int(quota_value)
+ if current_quota:
+ res = self.edit_tenant_quota(tenant, current_quota, quota_key, quota_value_int)
+ else:
+ res = self.create_tenant_quota(tenant, quota_key, quota_value_int)
+ else:
+ if current_quota:
+ res = self.delete_tenant_quota(tenant, current_quota)
+ else:
+ res = dict(changed=False, msg="tenant quota '%s' does not exist" % quota_key)
+
+ if res['changed']:
+ changed = True
+
+ messages.append(res['msg'])
+
+ return dict(
+ changed=changed,
+ msg=', '.join(messages))
+
+ def edit_tenant_quota(self, tenant, current_quota, quota_key, quota_value):
+ """ Update the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+
+ if current_quota['value'] == quota_value:
+ return dict(
+ changed=False,
+ msg="tenant quota %s already has value %s" % (quota_key, quota_value))
+ else:
+
+ url = '%s/quotas/%s' % (tenant['href'], current_quota['id'])
+ resource = {'value': quota_value}
+ try:
+ self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated tenant quota %s" % quota_key)
+
+ def create_tenant_quota(self, tenant, quota_key, quota_value):
+ """ Creates the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ url = '%s/quotas' % (tenant['href'])
+ resource = {'name': quota_key, 'value': quota_value}
+ try:
+ self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create tenant quota %s: %s" % (quota_key, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created tenant quota %s" % quota_key)
+
+ def delete_tenant_quota(self, tenant, quota):
+ """ deletes the tenant quotas in manageiq.
+
+ Returns:
+ result
+ """
+ try:
+ result = self.client.post(quota['href'], action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete tenant quota '%s': %s" % (quota['name'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def create_tenant_response(self, tenant, parent_tenant):
+ """ Creates the ansible result object from a manageiq tenant entity
+
+ Returns:
+ a dict with the tenant id, name, description, parent id,
+ quota's
+ """
+ tenant_quotas = self.create_tenant_quotas_response(tenant['tenant_quotas'])
+
+ try:
+ ancestry = tenant['ancestry']
+ tenant_parent_id = ancestry.split("/")[-1]
+ except AttributeError:
+ # The root tenant does not return the ancestry attribute
+ tenant_parent_id = None
+
+ return dict(
+ id=tenant['id'],
+ name=tenant['name'],
+ description=tenant['description'],
+ parent_id=tenant_parent_id,
+ quotas=tenant_quotas
+ )
+
+ @staticmethod
+ def create_tenant_quotas_response(tenant_quotas):
+ """ Creates the ansible result object from a manageiq tenant_quotas entity
+
+ Returns:
+ a dict with the applied quotas, name and value
+ """
+
+ if not tenant_quotas:
+ return {}
+
+ result = {}
+ for quota in tenant_quotas:
+ if quota['unit'] == 'bytes':
+ value = float(quota['value']) / (1024 * 1024 * 1024)
+ else:
+ value = quota['value']
+ result[quota['name']] = value
+ return result
+
+
+def main():
+ argument_spec = dict(
+ name=dict(required=True, type='str'),
+ description=dict(required=True, type='str'),
+ parent_id=dict(required=False, type='int'),
+ parent=dict(required=False, type='str'),
+ state=dict(choices=['absent', 'present'], default='present'),
+ quotas=dict(type='dict', default={})
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ name = module.params['name']
+ description = module.params['description']
+ parent_id = module.params['parent_id']
+ parent = module.params['parent']
+ state = module.params['state']
+ quotas = module.params['quotas']
+
+ manageiq = ManageIQ(module)
+ manageiq_tenant = ManageIQTenant(manageiq)
+
+ parent_tenant, tenant = manageiq_tenant.tenant(name, parent_id, parent)
+
+ # tenant should not exist
+ if state == "absent":
+ # if we have a tenant, delete it
+ if tenant:
+ res_args = manageiq_tenant.delete_tenant(tenant)
+ # if we do not have a tenant, nothing to do
+ else:
+ if parent_id:
+ msg = "tenant '%s' with parent_id %i does not exist in manageiq" % (name, parent_id)
+ else:
+ msg = "tenant '%s' with parent '%s' does not exist in manageiq" % (name, parent)
+
+ res_args = dict(
+ changed=False,
+ msg=msg)
+
+ # tenant should exist
+ if state == "present":
+ # if we have a tenant, edit it
+ if tenant:
+ res_args = manageiq_tenant.edit_tenant(tenant, name, description)
+
+ # if we do not have a tenant, create it
+ else:
+ res_args = manageiq_tenant.create_tenant(name, description, parent_tenant)
+ tenant = manageiq.client.get_entity('tenants', res_args['tenant_id'])
+
+ # quotas as supplied and we have a tenant
+ if quotas:
+ tenant_quotas_res = manageiq_tenant.update_tenant_quotas(tenant, quotas)
+ if tenant_quotas_res['changed']:
+ res_args['changed'] = True
+ res_args['tenant_quotas_msg'] = tenant_quotas_res['msg']
+
+ tenant.reload(expand='resources', attributes=['tenant_quotas'])
+ res_args['tenant'] = manageiq_tenant.create_tenant_response(tenant, parent_tenant)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/manageiq_user.py b/ansible_collections/community/general/plugins/modules/manageiq_user.py
new file mode 100644
index 000000000..0d3d8718b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/manageiq_user.py
@@ -0,0 +1,325 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Daniel Korn <korndaniel1@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: manageiq_user
+
+short_description: Management of users in ManageIQ
+extends_documentation_fragment:
+ - community.general.manageiq
+ - community.general.attributes
+
+author: Daniel Korn (@dkorn)
+description:
+ - The manageiq_user module supports adding, updating and deleting users in ManageIQ.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - absent - user should not exist, present - user should be.
+ choices: ['absent', 'present']
+ default: 'present'
+ userid:
+ type: str
+ description:
+ - The unique userid in manageiq, often mentioned as username.
+ required: true
+ name:
+ type: str
+ description:
+ - The users' full name.
+ password:
+ type: str
+ description:
+ - The users' password.
+ group:
+ type: str
+ description:
+ - The name of the group to which the user belongs.
+ email:
+ type: str
+ description:
+ - The users' E-mail address.
+ update_password:
+ type: str
+ default: always
+ choices: ['always', 'on_create']
+ description:
+ - C(always) will update passwords unconditionally. C(on_create) will only set the password for a newly created user.
+'''
+
+EXAMPLES = '''
+- name: Create a new user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Create a new user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ name: 'Jane Doe'
+ password: 'VerySecret'
+ group: 'EvmGroup-user'
+ email: 'jdoe@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: false
+
+- name: Delete a user in ManageIQ
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Delete a user in ManageIQ using a token
+ community.general.manageiq_user:
+ state: 'absent'
+ userid: 'jdoe'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: false
+
+- name: Update email of user in ManageIQ
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ username: 'admin'
+ password: 'smartvm'
+ validate_certs: false
+
+- name: Update email of user in ManageIQ using a token
+ community.general.manageiq_user:
+ userid: 'jdoe'
+ email: 'jaustine@example.com'
+ manageiq_connection:
+ url: 'http://127.0.0.1:3000'
+ token: 'sometoken'
+ validate_certs: false
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.manageiq import ManageIQ, manageiq_argument_spec
+
+
+class ManageIQUser(object):
+ """
+ Object to execute user management operations in manageiq.
+ """
+
+ def __init__(self, manageiq):
+ self.manageiq = manageiq
+
+ self.module = self.manageiq.module
+ self.api_url = self.manageiq.api_url
+ self.client = self.manageiq.client
+
+ def group_id(self, description):
+ """ Search for group id by group description.
+
+ Returns:
+ the group id, or send a module Fail signal if group not found.
+ """
+ group = self.manageiq.find_collection_resource_by('groups', description=description)
+ if not group: # group doesn't exist
+ self.module.fail_json(
+ msg="group %s does not exist in manageiq" % (description))
+
+ return group['id']
+
+ def user(self, userid):
+ """ Search for user object by userid.
+
+ Returns:
+ the user, or None if user not found.
+ """
+ return self.manageiq.find_collection_resource_by('users', userid=userid)
+
+ def compare_user(self, user, name, group_id, password, email):
+ """ Compare user fields with new field values.
+
+ Returns:
+ false if user fields have some difference from new fields, true o/w.
+ """
+ found_difference = (
+ (name and user['name'] != name) or
+ (password is not None) or
+ (email and user['email'] != email) or
+ (group_id and user['current_group_id'] != group_id)
+ )
+
+ return not found_difference
+
+ def delete_user(self, user):
+ """ Deletes a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ try:
+ url = '%s/users/%s' % (self.api_url, user['id'])
+ result = self.client.post(url, action='delete')
+ except Exception as e:
+ self.module.fail_json(msg="failed to delete user %s: %s" % (user['userid'], str(e)))
+
+ return dict(changed=True, msg=result['message'])
+
+ def edit_user(self, user, name, group, password, email):
+ """ Edit a user from manageiq.
+
+ Returns:
+ a short message describing the operation executed.
+ """
+ group_id = None
+ url = '%s/users/%s' % (self.api_url, user['id'])
+
+ resource = dict(userid=user['userid'])
+ if group is not None:
+ group_id = self.group_id(group)
+ resource['group'] = dict(id=group_id)
+ if name is not None:
+ resource['name'] = name
+ if email is not None:
+ resource['email'] = email
+
+ # if there is a password param, but 'update_password' is 'on_create'
+ # then discard the password (since we're editing an existing user)
+ if self.module.params['update_password'] == 'on_create':
+ password = None
+ if password is not None:
+ resource['password'] = password
+
+ # check if we need to update ( compare_user is true is no difference found )
+ if self.compare_user(user, name, group_id, password, email):
+ return dict(
+ changed=False,
+ msg="user %s is not changed." % (user['userid']))
+
+ # try to update user
+ try:
+ result = self.client.post(url, action='edit', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to update user %s: %s" % (user['userid'], str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully updated the user %s: %s" % (user['userid'], result))
+
+ def create_user(self, userid, name, group, password, email):
+ """ Creates the user in manageiq.
+
+ Returns:
+ the created user id, name, created_on timestamp,
+ updated_on timestamp, userid and current_group_id.
+ """
+ # check for required arguments
+ for key, value in dict(name=name, group=group, password=password).items():
+ if value in (None, ''):
+ self.module.fail_json(msg="missing required argument: %s" % (key))
+
+ group_id = self.group_id(group)
+ url = '%s/users' % (self.api_url)
+
+ resource = {'userid': userid, 'name': name, 'password': password, 'group': {'id': group_id}}
+ if email is not None:
+ resource['email'] = email
+
+ # try to create a new user
+ try:
+ result = self.client.post(url, action='create', resource=resource)
+ except Exception as e:
+ self.module.fail_json(msg="failed to create user %s: %s" % (userid, str(e)))
+
+ return dict(
+ changed=True,
+ msg="successfully created the user %s: %s" % (userid, result['results']))
+
+
+def main():
+ argument_spec = dict(
+ userid=dict(required=True, type='str'),
+ name=dict(),
+ password=dict(no_log=True),
+ group=dict(),
+ email=dict(),
+ state=dict(choices=['absent', 'present'], default='present'),
+ update_password=dict(choices=['always', 'on_create'],
+ default='always'),
+ )
+ # add the manageiq connection arguments to the arguments
+ argument_spec.update(manageiq_argument_spec())
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ userid = module.params['userid']
+ name = module.params['name']
+ password = module.params['password']
+ group = module.params['group']
+ email = module.params['email']
+ state = module.params['state']
+
+ manageiq = ManageIQ(module)
+ manageiq_user = ManageIQUser(manageiq)
+
+ user = manageiq_user.user(userid)
+
+ # user should not exist
+ if state == "absent":
+ # if we have a user, delete it
+ if user:
+ res_args = manageiq_user.delete_user(user)
+ # if we do not have a user, nothing to do
+ else:
+ res_args = dict(
+ changed=False,
+ msg="user %s: does not exist in manageiq" % (userid))
+
+ # user should exist
+ if state == "present":
+ # if we have a user, edit it
+ if user:
+ res_args = manageiq_user.edit_user(user, name, group, password, email)
+ # if we do not have a user, create it
+ else:
+ res_args = manageiq_user.create_user(userid, name, group, password, email)
+
+ module.exit_json(**res_args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mas.py b/ansible_collections/community/general/plugins/modules/mas.py
new file mode 100644
index 000000000..5b8958beb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mas.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# Copyright (c) 2017, Michael Heap <m@michaelheap.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mas
+short_description: Manage Mac App Store applications with mas-cli
+description:
+ - Installs, uninstalls and updates macOS applications from the Mac App Store using the C(mas-cli).
+version_added: '0.2.0'
+author:
+ - Michael Heap (@mheap)
+ - Lukas Bestle (@lukasbestle)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ id:
+ description:
+ - The Mac App Store identifier of the app(s) you want to manage.
+ - This can be found by running C(mas search APP_NAME) on your machine.
+ type: list
+ elements: int
+ state:
+ description:
+ - Desired state of the app installation.
+ - The C(absent) value requires root permissions, also see the examples.
+ type: str
+ choices:
+ - absent
+ - latest
+ - present
+ default: present
+ upgrade_all:
+ description:
+ - Upgrade all installed Mac App Store apps.
+ type: bool
+ default: false
+ aliases: ["upgrade"]
+requirements:
+ - macOS 10.11+
+ - "mas-cli (U(https://github.com/mas-cli/mas)) 1.5.0+ available as C(mas) in the bin path"
+ - The Apple ID to use already needs to be signed in to the Mac App Store (check with C(mas account)).
+'''
+
+EXAMPLES = '''
+- name: Install Keynote
+ community.general.mas:
+ id: 409183694
+ state: present
+
+- name: Install Divvy with command mas installed in /usr/local/bin
+ community.general.mas:
+ id: 413857545
+ state: present
+ environment:
+ PATH: /usr/local/bin:{{ ansible_facts.env.PATH }}
+
+- name: Install a list of apps
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+
+- name: Ensure the latest Keynote version is installed
+ community.general.mas:
+ id: 409183694
+ state: latest
+
+- name: Upgrade all installed Mac App Store apps
+ community.general.mas:
+ upgrade_all: true
+
+- name: Install specific apps and also upgrade all others
+ community.general.mas:
+ id:
+ - 409183694 # Keynote
+ - 413857545 # Divvy
+ state: present
+ upgrade_all: true
+
+- name: Uninstall Divvy
+ community.general.mas:
+ id: 413857545
+ state: absent
+ become: true # Uninstallation requires root permissions
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+class Mas(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # Initialize data properties
+ self.mas_path = self.module.get_bin_path('mas')
+ self._checked_signin = False
+ self._installed = None # Populated only if needed
+ self._outdated = None # Populated only if needed
+ self.count_install = 0
+ self.count_upgrade = 0
+ self.count_uninstall = 0
+ self.result = {
+ 'changed': False
+ }
+
+ self.check_mas_tool()
+
+ def app_command(self, command, id):
+ ''' Runs a `mas` command on a given app; command can be 'install', 'upgrade' or 'uninstall' '''
+
+ if not self.module.check_mode:
+ if command != 'uninstall':
+ self.check_signin()
+
+ rc, out, err = self.run([command, str(id)])
+ if rc != 0:
+ self.module.fail_json(
+ msg="Error running command '{0}' on app '{1}': {2}".format(command, str(id), out.rstrip())
+ )
+
+ # No error or dry run
+ self.__dict__['count_' + command] += 1
+
+ def check_mas_tool(self):
+ ''' Verifies that the `mas` tool is available in a recent version '''
+
+ # Is the `mas` tool available at all?
+ if not self.mas_path:
+ self.module.fail_json(msg='Required `mas` tool is not installed')
+
+ # Is the version recent enough?
+ rc, out, err = self.run(['version'])
+ if rc != 0 or not out.strip() or LooseVersion(out.strip()) < LooseVersion('1.5.0'):
+ self.module.fail_json(msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
+
+ def check_signin(self):
+ ''' Verifies that the user is signed in to the Mac App Store '''
+
+ # Only check this once per execution
+ if self._checked_signin:
+ return
+
+ rc, out, err = self.run(['account'])
+ if out.split("\n", 1)[0].rstrip() == 'Not signed in':
+ self.module.fail_json(msg='You must be signed in to the Mac App Store')
+
+ self._checked_signin = True
+
+ def exit(self):
+ ''' Exit with the data we have collected over time '''
+
+ msgs = []
+ if self.count_install > 0:
+ msgs.append('Installed {0} app(s)'.format(self.count_install))
+ if self.count_upgrade > 0:
+ msgs.append('Upgraded {0} app(s)'.format(self.count_upgrade))
+ if self.count_uninstall > 0:
+ msgs.append('Uninstalled {0} app(s)'.format(self.count_uninstall))
+
+ if msgs:
+ self.result['changed'] = True
+ self.result['msg'] = ', '.join(msgs)
+
+ self.module.exit_json(**self.result)
+
+ def get_current_state(self, command):
+ ''' Returns the list of all app IDs; command can either be 'list' or 'outdated' '''
+
+ rc, raw_apps, err = self.run([command])
+ rows = raw_apps.split("\n")
+ if rows[0] == "No installed apps found":
+ rows = []
+ apps = []
+ for r in rows:
+ # Format: "123456789 App Name"
+ r = r.split(' ', 1)
+ if len(r) == 2:
+ apps.append(int(r[0]))
+
+ return apps
+
+ def installed(self):
+ ''' Returns the list of installed apps '''
+
+ # Populate cache if not already done
+ if self._installed is None:
+ self._installed = self.get_current_state('list')
+
+ return self._installed
+
+ def is_installed(self, id):
+ ''' Checks whether the given app is installed '''
+
+ return int(id) in self.installed()
+
+ def is_outdated(self, id):
+ ''' Checks whether the given app is installed, but outdated '''
+
+ return int(id) in self.outdated()
+
+ def outdated(self):
+ ''' Returns the list of installed, but outdated apps '''
+
+ # Populate cache if not already done
+ if self._outdated is None:
+ self._outdated = self.get_current_state('outdated')
+
+ return self._outdated
+
+ def run(self, cmd):
+ ''' Runs a command of the `mas` tool '''
+
+ cmd.insert(0, self.mas_path)
+ return self.module.run_command(cmd, False)
+
+ def upgrade_all(self):
+ ''' Upgrades all installed apps and sets the correct result data '''
+
+ outdated = self.outdated()
+
+ if not self.module.check_mode:
+ self.check_signin()
+
+ rc, out, err = self.run(['upgrade'])
+ if rc != 0:
+ self.module.fail_json(msg='Could not upgrade all apps: ' + out.rstrip())
+
+ self.count_upgrade += len(outdated)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='list', elements='int'),
+ state=dict(type='str', default='present', choices=['absent', 'latest', 'present']),
+ upgrade_all=dict(type='bool', default=False, aliases=['upgrade']),
+ ),
+ supports_check_mode=True
+ )
+ mas = Mas(module)
+
+ if module.params['id']:
+ apps = module.params['id']
+ else:
+ apps = []
+
+ state = module.params['state']
+ upgrade = module.params['upgrade_all']
+
+ # Run operations on the given app IDs
+ for app in sorted(set(apps)):
+ if state == 'present':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+
+ elif state == 'absent':
+ if mas.is_installed(app):
+ # Ensure we are root
+ if os.getuid() != 0:
+ module.fail_json(msg="Uninstalling apps requires root permissions ('become: true')")
+
+ mas.app_command('uninstall', app)
+
+ elif state == 'latest':
+ if not mas.is_installed(app):
+ mas.app_command('install', app)
+ elif mas.is_outdated(app):
+ mas.app_command('upgrade', app)
+
+ # Upgrade all apps if requested
+ mas._outdated = None # Clear cache
+ if upgrade and mas.outdated():
+ mas.upgrade_all()
+
+ # Exit with the collected data
+ mas.exit()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/matrix.py b/ansible_collections/community/general/plugins/modules/matrix.py
new file mode 100644
index 000000000..0b419c8d9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/matrix.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# coding: utf-8
+
+# Copyright (c) 2018, Jan Christian Grünhage <jan.christian@gruenhage.xyz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: "Jan Christian Grünhage (@jcgruenhage)"
+module: matrix
+short_description: Send notifications to matrix
+description:
+ - This module sends html formatted notifications to matrix rooms.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ msg_plain:
+ type: str
+ description:
+ - Plain text form of the message to send to matrix, usually markdown
+ required: true
+ msg_html:
+ type: str
+ description:
+ - HTML form of the message to send to matrix
+ required: true
+ room_id:
+ type: str
+ description:
+ - ID of the room to send the notification to
+ required: true
+ hs_url:
+ type: str
+ description:
+ - URL of the homeserver, where the CS-API is reachable
+ required: true
+ token:
+ type: str
+ description:
+ - Authentication token for the API call. If provided, user_id and password are not required
+ user_id:
+ type: str
+ description:
+ - The user id of the user
+ password:
+ type: str
+ description:
+ - The password to log in with
+requirements:
+ - matrix-client (Python library)
+'''
+
+EXAMPLES = '''
+- name: Send matrix notification with token
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ token: "{{ matrix_auth_token }}"
+
+- name: Send matrix notification with user_id and password
+ community.general.matrix:
+ msg_plain: "**hello world**"
+ msg_html: "<b>hello world</b>"
+ room_id: "!12345678:server.tld"
+ hs_url: "https://matrix.org"
+ user_id: "ansible_notification_bot"
+ password: "{{ matrix_auth_password }}"
+'''
+
+RETURN = '''
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+MATRIX_IMP_ERR = None
+try:
+ from matrix_client.client import MatrixClient
+except ImportError:
+ MATRIX_IMP_ERR = traceback.format_exc()
+ matrix_found = False
+else:
+ matrix_found = True
+
+
+def run_module():
+ module_args = dict(
+ msg_plain=dict(type='str', required=True),
+ msg_html=dict(type='str', required=True),
+ room_id=dict(type='str', required=True),
+ hs_url=dict(type='str', required=True),
+ token=dict(type='str', required=False, no_log=True),
+ user_id=dict(type='str', required=False),
+ password=dict(type='str', required=False, no_log=True),
+ )
+
+ result = dict(
+ changed=False,
+ message=''
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ mutually_exclusive=[['password', 'token']],
+ required_one_of=[['password', 'token']],
+ required_together=[['user_id', 'password']],
+ supports_check_mode=True
+ )
+
+ if not matrix_found:
+ module.fail_json(msg=missing_required_lib('matrix-client'), exception=MATRIX_IMP_ERR)
+
+ if module.check_mode:
+ return result
+
+ # create a client object
+ client = MatrixClient(module.params['hs_url'])
+ if module.params['token'] is not None:
+ client.api.token = module.params['token']
+ else:
+ client.login(module.params['user_id'], module.params['password'], sync=False)
+
+ # make sure we are in a given room and return a room object for it
+ room = client.join_room(module.params['room_id'])
+ # send an html formatted messages
+ room.send_html(module.params['msg_html'], module.params['msg_plain'])
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mattermost.py b/ansible_collections/community/general/plugins/modules/mattermost.py
new file mode 100644
index 000000000..29894c3a7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mattermost.py
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Benjamin Jolivot <bjolivot@gmail.com>
+# Inspired by slack module :
+# # Copyright (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# # Copyright (c) 2016, René Moser <mail@renemoser.net>
+# # Copyright (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# # Copyright (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: mattermost
+short_description: Send Mattermost notifications
+description:
+ - Sends notifications to U(http://your.mattermost.url) via the Incoming WebHook integration.
+author: "Benjamin Jolivot (@bjolivot)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ url:
+ type: str
+ description:
+ - Mattermost url (i.e. http://mattermost.yourcompany.com).
+ required: true
+ api_key:
+ type: str
+ description:
+ - Mattermost webhook api key. Log into your mattermost site, go to
+ Menu -> Integration -> Incoming Webhook -> Add Incoming Webhook.
+ This will give you full URL. api_key is the last part.
+ http://mattermost.example.com/hooks/C(API_KEY)
+ required: true
+ text:
+ type: str
+ description:
+ - Text to send. Note that the module does not handle escaping characters.
+ - Required when I(attachments) is not set.
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments.
+ - For more information, see U(https://developers.mattermost.com/integrate/admin-guide/admin-message-attachments/).
+ - Required when I(text) is not set.
+ version_added: 4.3.0
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(api_key).
+ username:
+ type: str
+ description:
+ - This is the sender of the message (Username Override need to be enabled by mattermost admin, see mattermost doc.
+ default: Ansible
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon.
+ default: https://docs.ansible.com/favicon.ico
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+'''
+
+EXAMPLES = """
+- name: Send notification message via Mattermost
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+
+- name: Send notification message via Mattermost all options
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ text: '{{ inventory_hostname }} completed'
+ channel: notifications
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+
+- name: Send attachments message via Mattermost
+ community.general.mattermost:
+ url: http://mattermost.example.com
+ api_key: my_api_key
+ attachments:
+ - text: Display my system load on host A and B
+ color: '#ff00dd'
+ title: System load
+ fields:
+ - title: System A
+ value: "load average: 0,74, 0,66, 0,63"
+ short: true
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: true
+"""
+
+RETURN = '''
+payload:
+ description: Mattermost payload
+ returned: success
+ type: str
+webhook_url:
+ description: URL the webhook is sent to
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ api_key=dict(type='str', required=True, no_log=True),
+ text=dict(type='str'),
+ channel=dict(type='str', default=None),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'),
+ validate_certs=dict(default=True, type='bool'),
+ attachments=dict(type='list', elements='dict'),
+ ),
+ required_one_of=[
+ ('text', 'attachments'),
+ ],
+ )
+ # init return dict
+ result = dict(changed=False, msg="OK")
+
+ # define webhook
+ webhook_url = "{0}/hooks/{1}".format(module.params['url'], module.params['api_key'])
+ result['webhook_url'] = webhook_url
+
+ # define payload
+ payload = {}
+ for param in ['text', 'channel', 'username', 'icon_url', 'attachments']:
+ if module.params[param] is not None:
+ payload[param] = module.params[param]
+
+ payload = module.jsonify(payload)
+ result['payload'] = payload
+
+ # http headers
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ # notes:
+ # Nothing is done in check mode
+ # it'll pass even if your server is down or/and if your token is invalid.
+ # If someone find good way to check...
+
+ # send request if not in test mode
+ if module.check_mode is False:
+ response, info = fetch_url(module=module, url=webhook_url, headers=headers, method='POST', data=payload)
+
+ # something's wrong
+ if info['status'] != 200:
+ # some problem
+ result['msg'] = "Failed to send mattermost message, the error was: {0}".format(info['msg'])
+ module.fail_json(**result)
+
+ # Looks good
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/maven_artifact.py b/ansible_collections/community/general/plugins/modules/maven_artifact.py
new file mode 100644
index 000000000..3f9defa52
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/maven_artifact.py
@@ -0,0 +1,762 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Chris Schmidt <chris.schmidt () contrastsecurity.com>
+#
+# Built using https://github.com/hamnis/useful-scripts/blob/master/python/download-maven-artifact
+# as a reference and starting point.
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: maven_artifact
+short_description: Downloads an Artifact from a Maven Repository
+description:
+ - Downloads an artifact from a maven repository given the maven coordinates provided to the module.
+ - Can retrieve snapshots or release versions of the artifact and will resolve the latest available
+ version if one is not available.
+author: "Chris Schmidt (@chrisisbeef)"
+requirements:
+ - lxml
+ - boto if using a S3 repository (s3://...)
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ group_id:
+ type: str
+ description:
+ - The Maven groupId coordinate
+ required: true
+ artifact_id:
+ type: str
+ description:
+ - The maven artifactId coordinate
+ required: true
+ version:
+ type: str
+ description:
+ - The maven version coordinate
+ - Mutually exclusive with I(version_by_spec).
+ version_by_spec:
+ type: str
+ description:
+ - The maven dependency version ranges.
+ - See supported version ranges on U(https://cwiki.apache.org/confluence/display/MAVENOLD/Dependency+Mediation+and+Conflict+Resolution)
+ - The range type "(,1.0],[1.2,)" and "(,1.1),(1.1,)" is not supported.
+ - Mutually exclusive with I(version).
+ version_added: '0.2.0'
+ classifier:
+ type: str
+ description:
+ - The maven classifier coordinate
+ default: ''
+ extension:
+ type: str
+ description:
+ - The maven type/extension coordinate
+ default: jar
+ repository_url:
+ type: str
+ description:
+ - The URL of the Maven Repository to download from.
+ - Use s3://... if the repository is hosted on Amazon S3, added in version 2.2.
+ - Use file://... if the repository is local, added in version 2.6
+ default: https://repo1.maven.org/maven2
+ username:
+ type: str
+ description:
+ - The username to authenticate as to the Maven Repository. Use AWS secret key of the repository is hosted on S3
+ aliases: [ "aws_secret_key" ]
+ password:
+ type: str
+ description:
+ - The password to authenticate with to the Maven Repository. Use AWS secret access key of the repository is hosted on S3
+ aliases: [ "aws_secret_access_key" ]
+ headers:
+ description:
+ - Add custom HTTP headers to a request in hash/dict format.
+ type: dict
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the uri module only sends authentication information when a webservice
+ responds to an initial request with a 401 status. Since some basic auth services do not properly
+ send a 401, logins will fail. This option forces the sending of the Basic authentication header
+ upon initial request.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ dest:
+ type: path
+ description:
+ - The path where the artifact should be written to
+ - If file mode or ownerships are specified and destination path already exists, they affect the downloaded file
+ required: true
+ state:
+ type: str
+ description:
+ - The desired state of the artifact
+ default: present
+ choices: [present,absent]
+ timeout:
+ type: int
+ description:
+ - Specifies a timeout in seconds for the connection attempt
+ default: 10
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be set to C(false) when no other option exists.
+ type: bool
+ default: true
+ client_cert:
+ description:
+ - PEM formatted certificate chain file to be used for SSL client authentication.
+ - This file can also include the key as well, and if the key is included, I(client_key) is not required.
+ type: path
+ version_added: '1.3.0'
+ client_key:
+ description:
+ - PEM formatted file that contains your private key to be used for SSL client authentication.
+ - If I(client_cert) contains both the certificate and key, this option is not required.
+ type: path
+ version_added: '1.3.0'
+ keep_name:
+ description:
+ - If C(true), the downloaded artifact's name is preserved, i.e the version number remains part of it.
+ - This option only has effect when C(dest) is a directory and C(version) is set to C(latest) or C(version_by_spec)
+ is defined.
+ type: bool
+ default: false
+ verify_checksum:
+ type: str
+ description:
+ - If C(never), the MD5/SHA1 checksum will never be downloaded and verified.
+ - If C(download), the MD5/SHA1 checksum will be downloaded and verified only after artifact download. This is the default.
+ - If C(change), the MD5/SHA1 checksum will be downloaded and verified if the destination already exist,
+ to verify if they are identical. This was the behaviour before 2.6. Since it downloads the checksum before (maybe)
+ downloading the artifact, and since some repository software, when acting as a proxy/cache, return a 404 error
+ if the artifact has not been cached yet, it may fail unexpectedly.
+ If you still need it, you should consider using C(always) instead - if you deal with a checksum, it is better to
+ use it to verify integrity after download.
+ - C(always) combines C(download) and C(change).
+ required: false
+ default: 'download'
+ choices: ['never', 'download', 'change', 'always']
+ checksum_alg:
+ type: str
+ description:
+ - If C(md5), checksums will use the MD5 algorithm. This is the default.
+ - If C(sha1), checksums will use the SHA1 algorithm. This can be used on systems configured to use
+ FIPS-compliant algorithms, since MD5 will be blocked on such systems.
+ default: 'md5'
+ choices: ['md5', 'sha1']
+ version_added: 3.2.0
+ unredirected_headers:
+ type: list
+ elements: str
+ version_added: 5.2.0
+ description:
+ - A list of headers that should not be included in the redirection. This headers are sent to the fetch_url C(fetch_url) function.
+ - On ansible-core version 2.12 or later, the default of this option is C([Authorization, Cookie]).
+ - Useful if the redirection URL does not need to have sensitive headers in the request.
+ - Requires ansible-core version 2.12 or later.
+ directory_mode:
+ type: str
+ description:
+ - Filesystem permission mode applied recursively to I(dest) when it is a directory.
+extends_documentation_fragment:
+ - ansible.builtin.files
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Download the latest version of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+
+- name: Download JUnit 4.11 from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version: 4.11
+ dest: /tmp/junit-4.11.jar
+
+- name: Download an artifact from a private repository requiring authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ username: user
+ password: pass
+ dest: /tmp/library-name-latest.jar
+
+- name: Download an artifact from a private repository requiring certificate authentication
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: library-name
+ repository_url: 'https://repo.company.com/maven'
+ client_cert: /path/to/cert.pem
+ client_key: /path/to/key.pem
+ dest: /tmp/library-name-latest.jar
+
+- name: Download a WAR File to the Tomcat webapps directory to be deployed
+ community.general.maven_artifact:
+ group_id: com.company
+ artifact_id: web-app
+ extension: war
+ repository_url: 'https://repo.company.com/maven'
+ dest: /var/lib/tomcat7/webapps/web-app.war
+
+- name: Keep a downloaded artifact's name, i.e. retain the version
+ community.general.maven_artifact:
+ version: latest
+ artifact_id: spring-core
+ group_id: org.springframework
+ dest: /tmp/
+ keep_name: true
+
+- name: Download the latest version of the JUnit framework artifact from Maven local
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ dest: /tmp/junit-latest.jar
+ repository_url: "file://{{ lookup('env','HOME') }}/.m2/repository"
+
+- name: Download the latest version between 3.8 and 4.0 (exclusive) of the JUnit framework artifact from Maven Central
+ community.general.maven_artifact:
+ group_id: junit
+ artifact_id: junit
+ version_by_spec: "[3.8,4.0)"
+ dest: /tmp/
+'''
+
+import hashlib
+import os
+import posixpath
+import shutil
+import io
+import tempfile
+import traceback
+import re
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible.module_utils.ansible_release import __version__ as ansible_version
+from re import match
+
+LXML_ETREE_IMP_ERR = None
+try:
+ from lxml import etree
+ HAS_LXML_ETREE = True
+except ImportError:
+ LXML_ETREE_IMP_ERR = traceback.format_exc()
+ HAS_LXML_ETREE = False
+
+BOTO_IMP_ERR = None
+try:
+ import boto3
+ HAS_BOTO = True
+except ImportError:
+ BOTO_IMP_ERR = traceback.format_exc()
+ HAS_BOTO = False
+
+SEMANTIC_VERSION_IMP_ERR = None
+try:
+ from semantic_version import Version, Spec
+ HAS_SEMANTIC_VERSION = True
+except ImportError:
+ SEMANTIC_VERSION_IMP_ERR = traceback.format_exc()
+ HAS_SEMANTIC_VERSION = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+
+
+def split_pre_existing_dir(dirname):
+ '''
+ Return the first pre-existing directory and a list of the new directories that will be created.
+ '''
+ head, tail = os.path.split(dirname)
+ b_head = to_bytes(head, errors='surrogate_or_strict')
+ if not os.path.exists(b_head):
+ if head == dirname:
+ return None, [head]
+ else:
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
+ else:
+ return head, [tail]
+ new_directory_list.append(tail)
+ return pre_existing_dir, new_directory_list
+
+
+def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
+ '''
+ Walk the new directories list and make sure that permissions are as we would expect
+ '''
+ if new_directory_list:
+ first_sub_dir = new_directory_list.pop(0)
+ if not pre_existing_dir:
+ working_dir = first_sub_dir
+ else:
+ working_dir = os.path.join(pre_existing_dir, first_sub_dir)
+ directory_args['path'] = working_dir
+ changed = module.set_fs_attributes_if_different(directory_args, changed)
+ changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
+ return changed
+
+
+class Artifact(object):
+ def __init__(self, group_id, artifact_id, version, version_by_spec, classifier='', extension='jar'):
+ if not group_id:
+ raise ValueError("group_id must be set")
+ if not artifact_id:
+ raise ValueError("artifact_id must be set")
+
+ self.group_id = group_id
+ self.artifact_id = artifact_id
+ self.version = version
+ self.version_by_spec = version_by_spec
+ self.classifier = classifier
+
+ if not extension:
+ self.extension = "jar"
+ else:
+ self.extension = extension
+
+ def is_snapshot(self):
+ return self.version and self.version.endswith("SNAPSHOT")
+
+ def path(self, with_version=True):
+ base = posixpath.join(self.group_id.replace(".", "/"), self.artifact_id)
+ if with_version and self.version:
+ timestamp_version_match = re.match("^(.*-)?([0-9]{8}\\.[0-9]{6}-[0-9]+)$", self.version)
+ if timestamp_version_match:
+ base = posixpath.join(base, timestamp_version_match.group(1) + "SNAPSHOT")
+ else:
+ base = posixpath.join(base, self.version)
+ return base
+
+ def _generate_filename(self):
+ filename = self.artifact_id + "-" + self.classifier + "." + self.extension
+ if not self.classifier:
+ filename = self.artifact_id + "." + self.extension
+ return filename
+
+ def get_filename(self, filename=None):
+ if not filename:
+ filename = self._generate_filename()
+ elif os.path.isdir(filename):
+ filename = os.path.join(filename, self._generate_filename())
+ return filename
+
+ def __str__(self):
+ result = "%s:%s:%s" % (self.group_id, self.artifact_id, self.version)
+ if self.classifier:
+ result = "%s:%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.classifier, self.version)
+ elif self.extension != "jar":
+ result = "%s:%s:%s:%s" % (self.group_id, self.artifact_id, self.extension, self.version)
+ return result
+
+ @staticmethod
+ def parse(input):
+ parts = input.split(":")
+ if len(parts) >= 3:
+ g = parts[0]
+ a = parts[1]
+ v = parts[-1]
+ t = None
+ c = None
+ if len(parts) == 4:
+ t = parts[2]
+ if len(parts) == 5:
+ t = parts[2]
+ c = parts[3]
+ return Artifact(g, a, v, c, t)
+ else:
+ return None
+
+
+class MavenDownloader:
+ def __init__(self, module, base, local=False, headers=None):
+ self.module = module
+ if base.endswith("/"):
+ base = base.rstrip("/")
+ self.base = base
+ self.local = local
+ self.headers = headers
+ self.user_agent = "Ansible {0} maven_artifact".format(ansible_version)
+ self.latest_version_found = None
+ self.metadata_file_name = "maven-metadata-local.xml" if local else "maven-metadata.xml"
+
+ def find_version_by_spec(self, artifact):
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ original_versions = xml.xpath("/metadata/versioning/versions/version/text()")
+ versions = []
+ for version in original_versions:
+ try:
+ versions.append(Version.coerce(version))
+ except ValueError:
+ # This means that version string is not a valid semantic versioning
+ pass
+
+ parse_versions_syntax = {
+ # example -> (,1.0]
+ r"^\(,(?P<upper_bound>[0-9.]*)]$": "<={upper_bound}",
+ # example -> 1.0
+ r"^(?P<version>[0-9.]*)$": "~={version}",
+ # example -> [1.0]
+ r"^\[(?P<version>[0-9.]*)\]$": "=={version}",
+ # example -> [1.2, 1.3]
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]*)\]$": ">={lower_bound},<={upper_bound}",
+ # example -> [1.2, 1.3)
+ r"^\[(?P<lower_bound>[0-9.]*),\s*(?P<upper_bound>[0-9.]+)\)$": ">={lower_bound},<{upper_bound}",
+ # example -> [1.5,)
+ r"^\[(?P<lower_bound>[0-9.]*),\)$": ">={lower_bound}",
+ }
+
+ for regex, spec_format in parse_versions_syntax.items():
+ regex_result = match(regex, artifact.version_by_spec)
+ if regex_result:
+ spec = Spec(spec_format.format(**regex_result.groupdict()))
+ selected_version = spec.select(versions)
+
+ if not selected_version:
+ raise ValueError("No version found with this spec version: {0}".format(artifact.version_by_spec))
+
+ # To deal when repos on maven don't have patch number on first build (e.g. 3.8 instead of 3.8.0)
+ if str(selected_version) not in original_versions:
+ selected_version.patch = None
+
+ return str(selected_version)
+
+ raise ValueError("The spec version {0} is not supported! ".format(artifact.version_by_spec))
+
+ def find_latest_version_available(self, artifact):
+ if self.latest_version_found:
+ return self.latest_version_found
+ path = "/%s/%s" % (artifact.path(False), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+ v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
+ if v:
+ self.latest_version_found = v[0]
+ return v[0]
+
+ def find_uri_for_artifact(self, artifact):
+ if artifact.version_by_spec:
+ artifact.version = self.find_version_by_spec(artifact)
+
+ if artifact.version == "latest":
+ artifact.version = self.find_latest_version_available(artifact)
+
+ if artifact.is_snapshot():
+ if self.local:
+ return self._uri_for_artifact(artifact, artifact.version)
+ path = "/%s/%s" % (artifact.path(), self.metadata_file_name)
+ content = self._getContent(self.base + path, "Failed to retrieve the maven metadata file: " + path)
+ xml = etree.fromstring(content)
+
+ for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
+ classifier = snapshotArtifact.xpath("classifier/text()")
+ artifact_classifier = classifier[0] if classifier else ''
+ extension = snapshotArtifact.xpath("extension/text()")
+ artifact_extension = extension[0] if extension else ''
+ if artifact_classifier == artifact.classifier and artifact_extension == artifact.extension:
+ return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
+ timestamp_xmlpath = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")
+ if timestamp_xmlpath:
+ timestamp = timestamp_xmlpath[0]
+ build_number = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
+ return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + build_number))
+
+ return self._uri_for_artifact(artifact, artifact.version)
+
+ def _uri_for_artifact(self, artifact, version=None):
+ if artifact.is_snapshot() and not version:
+ raise ValueError("Expected uniqueversion for snapshot artifact " + str(artifact))
+ elif not artifact.is_snapshot():
+ version = artifact.version
+ if artifact.classifier:
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "-" + artifact.classifier + "." + artifact.extension)
+
+ return posixpath.join(self.base, artifact.path(), artifact.artifact_id + "-" + version + "." + artifact.extension)
+
+ # for small files, directly get the full content
+ def _getContent(self, url, failmsg, force=True):
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ with io.open(parsed_url.path, 'rb') as f:
+ return f.read()
+ if force:
+ raise ValueError(failmsg + " because can not find file: " + url)
+ return None
+ response = self._request(url, failmsg, force)
+ if response:
+ return response.read()
+ return None
+
+ # only for HTTP request
+ def _request(self, url, failmsg, force=True):
+ url_to_use = url
+ parsed_url = urlparse(url)
+
+ if parsed_url.scheme == 's3':
+ parsed_url = urlparse(url)
+ bucket_name = parsed_url.netloc
+ key_name = parsed_url.path[1:]
+ client = boto3.client('s3', aws_access_key_id=self.module.params.get('username', ''), aws_secret_access_key=self.module.params.get('password', ''))
+ url_to_use = client.generate_presigned_url('get_object', Params={'Bucket': bucket_name, 'Key': key_name}, ExpiresIn=10)
+
+ req_timeout = self.module.params.get('timeout')
+
+ # Hack to add parameters in the way that fetch_url expects
+ self.module.params['url_username'] = self.module.params.get('username', '')
+ self.module.params['url_password'] = self.module.params.get('password', '')
+ self.module.params['http_agent'] = self.user_agent
+
+ kwargs = {}
+ if self.module.params['unredirected_headers']:
+ kwargs['unredirected_headers'] = self.module.params['unredirected_headers']
+
+ response, info = fetch_url(
+ self.module,
+ url_to_use,
+ timeout=req_timeout,
+ headers=self.headers,
+ **kwargs
+ )
+
+ if info['status'] == 200:
+ return response
+ if force:
+ raise ValueError(failmsg + " because of " + info['msg'] + "for URL " + url_to_use)
+ return None
+
+ def download(self, tmpdir, artifact, verify_download, filename=None, checksum_alg='md5'):
+ if (not artifact.version and not artifact.version_by_spec) or artifact.version == "latest":
+ artifact = Artifact(artifact.group_id, artifact.artifact_id, self.find_latest_version_available(artifact), None,
+ artifact.classifier, artifact.extension)
+ url = self.find_uri_for_artifact(artifact)
+ tempfd, tempname = tempfile.mkstemp(dir=tmpdir)
+
+ try:
+ # copy to temp file
+ if self.local:
+ parsed_url = urlparse(url)
+ if os.path.isfile(parsed_url.path):
+ shutil.copy2(parsed_url.path, tempname)
+ else:
+ return "Can not find local file: " + parsed_url.path
+ else:
+ response = self._request(url, "Failed to download artifact " + str(artifact))
+ with os.fdopen(tempfd, 'wb') as f:
+ shutil.copyfileobj(response, f)
+
+ if verify_download:
+ invalid_checksum = self.is_invalid_checksum(tempname, url, checksum_alg)
+ if invalid_checksum:
+ # if verify_change was set, the previous file would be deleted
+ os.remove(tempname)
+ return invalid_checksum
+ except Exception as e:
+ os.remove(tempname)
+ raise e
+
+ # all good, now copy temp file to target
+ shutil.move(tempname, artifact.get_filename(filename))
+ return None
+
+ def is_invalid_checksum(self, file, remote_url, checksum_alg='md5'):
+ if os.path.exists(file):
+ local_checksum = self._local_checksum(checksum_alg, file)
+ if self.local:
+ parsed_url = urlparse(remote_url)
+ remote_checksum = self._local_checksum(checksum_alg, parsed_url.path)
+ else:
+ try:
+ remote_checksum = to_text(self._getContent(remote_url + '.' + checksum_alg, "Failed to retrieve checksum", False), errors='strict')
+ except UnicodeError as e:
+ return "Cannot retrieve a valid %s checksum from %s: %s" % (checksum_alg, remote_url, to_native(e))
+ if not remote_checksum:
+ return "Cannot find %s checksum from %s" % (checksum_alg, remote_url)
+ try:
+ # Check if remote checksum only contains md5/sha1 or md5/sha1 + filename
+ _remote_checksum = remote_checksum.split(None, 1)[0]
+ remote_checksum = _remote_checksum
+ # remote_checksum is empty so we continue and keep original checksum string
+ # This should not happen since we check for remote_checksum before
+ except IndexError:
+ pass
+ if local_checksum.lower() == remote_checksum.lower():
+ return None
+ else:
+ return "Checksum does not match: we computed " + local_checksum + " but the repository states " + remote_checksum
+
+ return "Path does not exist: " + file
+
+ def _local_checksum(self, checksum_alg, file):
+ if checksum_alg.lower() == 'md5':
+ hash = hashlib.md5()
+ elif checksum_alg.lower() == 'sha1':
+ hash = hashlib.sha1()
+ else:
+ raise ValueError("Unknown checksum_alg %s" % checksum_alg)
+ with io.open(file, 'rb') as f:
+ for chunk in iter(lambda: f.read(8192), b''):
+ hash.update(chunk)
+ return hash.hexdigest()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ group_id=dict(required=True),
+ artifact_id=dict(required=True),
+ version=dict(default=None),
+ version_by_spec=dict(default=None),
+ classifier=dict(default=''),
+ extension=dict(default='jar'),
+ repository_url=dict(default='https://repo1.maven.org/maven2'),
+ username=dict(default=None, aliases=['aws_secret_key']),
+ password=dict(default=None, no_log=True, aliases=['aws_secret_access_key']),
+ headers=dict(type='dict'),
+ force_basic_auth=dict(default=False, type='bool'),
+ state=dict(default="present", choices=["present", "absent"]), # TODO - Implement a "latest" state
+ timeout=dict(default=10, type='int'),
+ dest=dict(type="path", required=True),
+ validate_certs=dict(required=False, default=True, type='bool'),
+ client_cert=dict(type="path", required=False),
+ client_key=dict(type="path", required=False),
+ keep_name=dict(required=False, default=False, type='bool'),
+ verify_checksum=dict(required=False, default='download', choices=['never', 'download', 'change', 'always']),
+ checksum_alg=dict(required=False, default='md5', choices=['md5', 'sha1']),
+ unredirected_headers=dict(type='list', elements='str', required=False),
+ directory_mode=dict(type='str'),
+ ),
+ add_file_common_args=True,
+ mutually_exclusive=([('version', 'version_by_spec')])
+ )
+
+ if LooseVersion(ansible_version) < LooseVersion("2.12") and module.params['unredirected_headers']:
+ module.fail_json(msg="Unredirected Headers parameter provided, but your ansible-core version does not support it. Minimum version is 2.12")
+
+ if LooseVersion(ansible_version) >= LooseVersion("2.12") and module.params['unredirected_headers'] is None:
+ # if the user did not supply unredirected params, we use the default, ONLY on ansible core 2.12 and above
+ module.params['unredirected_headers'] = ['Authorization', 'Cookie']
+
+ if not HAS_LXML_ETREE:
+ module.fail_json(msg=missing_required_lib('lxml'), exception=LXML_ETREE_IMP_ERR)
+
+ if module.params['version_by_spec'] and not HAS_SEMANTIC_VERSION:
+ module.fail_json(msg=missing_required_lib('semantic_version'), exception=SEMANTIC_VERSION_IMP_ERR)
+
+ repository_url = module.params["repository_url"]
+ if not repository_url:
+ repository_url = "https://repo1.maven.org/maven2"
+ try:
+ parsed_url = urlparse(repository_url)
+ except AttributeError as e:
+ module.fail_json(msg='url parsing went wrong %s' % e)
+
+ local = parsed_url.scheme == "file"
+
+ if parsed_url.scheme == 's3' and not HAS_BOTO:
+ module.fail_json(msg=missing_required_lib('boto3', reason='when using s3:// repository URLs'),
+ exception=BOTO_IMP_ERR)
+
+ group_id = module.params["group_id"]
+ artifact_id = module.params["artifact_id"]
+ version = module.params["version"]
+ version_by_spec = module.params["version_by_spec"]
+ classifier = module.params["classifier"]
+ extension = module.params["extension"]
+ headers = module.params['headers']
+ state = module.params["state"]
+ dest = module.params["dest"]
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ keep_name = module.params["keep_name"]
+ verify_checksum = module.params["verify_checksum"]
+ verify_download = verify_checksum in ['download', 'always']
+ verify_change = verify_checksum in ['change', 'always']
+ checksum_alg = module.params["checksum_alg"]
+
+ downloader = MavenDownloader(module, repository_url, local, headers)
+
+ if not version_by_spec and not version:
+ version = "latest"
+
+ try:
+ artifact = Artifact(group_id, artifact_id, version, version_by_spec, classifier, extension)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ changed = False
+ prev_state = "absent"
+
+ if dest.endswith(os.sep):
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+ if not os.path.exists(b_dest):
+ (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dest)
+ os.makedirs(b_dest)
+ directory_args = module.load_file_common_arguments(module.params)
+ directory_mode = module.params["directory_mode"]
+ if directory_mode is not None:
+ directory_args['mode'] = directory_mode
+ else:
+ directory_args['mode'] = None
+ changed = adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
+
+ if os.path.isdir(b_dest):
+ version_part = version
+ if version == 'latest':
+ version_part = downloader.find_latest_version_available(artifact)
+ elif version_by_spec:
+ version_part = downloader.find_version_by_spec(artifact)
+
+ filename = "{artifact_id}{version_part}{classifier}.{extension}".format(
+ artifact_id=artifact_id,
+ version_part="-{0}".format(version_part) if keep_name else "",
+ classifier="-{0}".format(classifier) if classifier else "",
+ extension=extension
+ )
+ dest = posixpath.join(dest, filename)
+
+ b_dest = to_bytes(dest, errors='surrogate_or_strict')
+
+ if os.path.lexists(b_dest) and ((not verify_change) or not downloader.is_invalid_checksum(dest, downloader.find_uri_for_artifact(artifact), checksum_alg)):
+ prev_state = "present"
+
+ if prev_state == "absent":
+ try:
+ download_error = downloader.download(module.tmpdir, artifact, verify_download, b_dest, checksum_alg)
+ if download_error is None:
+ changed = True
+ else:
+ module.fail_json(msg="Cannot retrieve the artifact to destination: " + download_error)
+ except ValueError as e:
+ module.fail_json(msg=e.args[0])
+
+ file_args = module.load_file_common_arguments(module.params, path=dest)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+ if changed:
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
+ extension=extension, repository_url=repository_url, changed=changed)
+ else:
+ module.exit_json(state=state, dest=dest, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/memset_dns_reload.py b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
new file mode 100644
index 000000000..a1168724f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/memset_dns_reload.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_dns_reload
+author: "Simon Weald (@glitchcrab)"
+short_description: Request reload of Memset's DNS infrastructure,
+notes:
+ - DNS reload requests are a best-effort service provided by Memset; these generally
+ happen every 15 minutes by default, however you can request an immediate reload if
+ later tasks rely on the records being created. An API key generated via the
+ Memset customer control panel is required with the following minimum scope -
+ I(dns.reload). If you wish to poll the job status to wait until the reload has
+ completed, then I(job.status) is also required.
+description:
+ - Request a reload of Memset's DNS infrastructure, and optionally poll until it finishes.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ poll:
+ default: false
+ type: bool
+ description:
+ - Boolean value, if set will poll the reload job's status and return
+ when the job has completed (unless the 30 second timeout is reached first).
+ If the timeout is reached then the task will not be marked as failed, but
+ stderr will indicate that the polling failed.
+'''
+
+EXAMPLES = '''
+- name: Submit DNS reload and poll
+ community.general.memset_dns_reload:
+ api_key: 5eb86c9196ab03919abcf03857163741
+ poll: true
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Raw response from the Memset API.
+ returned: always
+ type: complex
+ contains:
+ error:
+ description: Whether the job ended in error state.
+ returned: always
+ type: bool
+ sample: true
+ finished:
+ description: Whether the job completed before the result was returned.
+ returned: always
+ type: bool
+ sample: true
+ id:
+ description: Job ID.
+ returned: always
+ type: str
+ sample: "c9cc8ad2a3e3fb8c63ed83c424928ef8"
+ status:
+ description: Job status.
+ returned: always
+ type: str
+ sample: "DONE"
+ type:
+ description: Job type.
+ returned: always
+ type: str
+ sample: "dns"
+'''
+
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def poll_reload_status(api_key=None, job_id=None, payload=None):
+ '''
+ We poll the `job.status` endpoint every 5 seconds up to a
+ maximum of 6 times. This is a relatively arbitrary choice of
+ timeout, however requests rarely take longer than 15 seconds
+ to complete.
+ '''
+ memset_api, stderr, msg = None, None, None
+ payload['id'] = job_id
+
+ api_method = 'job.status'
+ _has_failed, _msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+
+ while not response.json()['finished']:
+ counter = 0
+ while counter < 6:
+ sleep(5)
+ _has_failed, msg, response = memset_api_call(api_key=api_key, api_method=api_method, payload=payload)
+ counter += 1
+ if response.json()['error']:
+ # the reload job was submitted but polling failed. Don't return this as an overall task failure.
+ stderr = "Reload submitted successfully, but the Memset API returned a job error when attempting to poll the reload status."
+ else:
+ memset_api = response.json()
+ msg = None
+
+ return memset_api, msg, stderr
+
+
+def reload_dns(args=None):
+ '''
+ DNS reloads are a single API call and therefore there's not much
+ which can go wrong outside of auth errors.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ memset_api, msg, stderr = None, None, None
+
+ api_method = 'dns.reload'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ if response.status_code is not None:
+ retvals['memset_api'] = response.json()
+ else:
+ retvals['stderr'] = response.stderr
+ retvals['msg'] = msg
+ return retvals
+
+ # set changed to true if the reload request was accepted.
+ has_changed = True
+ memset_api = msg
+ # empty msg var as we don't want to return the API's json response twice.
+ msg = None
+
+ if args['poll']:
+ # hand off to the poll function.
+ job_id = response.json()['id']
+ memset_api, msg, stderr = poll_reload_status(api_key=args['api_key'], job_id=job_id, payload=payload)
+
+ # assemble return variables.
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return retvals
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ poll=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=False
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = reload_dns(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/memset_memstore_info.py b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
new file mode 100644
index 000000000..5fc9d79e1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/memset_memstore_info.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_memstore_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve Memstore product usage information
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(memstore.usage).
+description:
+ - Retrieve Memstore product usage information.
+ - This module was called C(memset_memstore_facts) before Ansible 2.9. The usage did not change.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The Memstore product name (i.e. C(mstestyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get usage for mstestyaa1
+ community.general.memset_memstore_info:
+ name: mstestyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ cdn_bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound CDN bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ containers:
+ description: Number of containers
+ returned: always
+ type: int
+ sample: 10
+ bytes:
+ description: Space used in bytes
+ returned: always
+ type: int
+ sample: 3860997965
+ objs:
+ description: Number of objects
+ returned: always
+ type: int
+ sample: 1000
+ bandwidth:
+ description: Dictionary of CDN bandwidth facts
+ returned: always
+ type: complex
+ contains:
+ bytes_out:
+ description: Outbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+ requests:
+ description: Number of requests in the last 24 hours
+ returned: always
+ type: int
+ sample: 10
+ bytes_in:
+ description: Inbound bandwidth for the last 24 hours in bytes
+ returned: always
+ type: int
+ sample: 1000
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'memstore.usage'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ if response.status_code is not None:
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ else:
+ retvals['stderr'] = "{0}" . format(response.stderr)
+ return retvals
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return retvals
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/memset_server_info.py b/ansible_collections/community/general/plugins/modules/memset_server_info.py
new file mode 100644
index 000000000..ecc0375eb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/memset_server_info.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_server_info
+author: "Simon Weald (@glitchcrab)"
+short_description: Retrieve server information
+notes:
+ - An API key generated via the Memset customer control panel is needed with the
+ following minimum scope - I(server.info).
+description:
+ - Retrieve server information.
+ - This module was called C(memset_server_facts) before Ansible 2.9. The usage did not change.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ api_key:
+ required: true
+ type: str
+ description:
+ - The API key obtained from the Memset control panel.
+ name:
+ required: true
+ type: str
+ description:
+ - The server product name (i.e. C(testyaa1)).
+'''
+
+EXAMPLES = '''
+- name: Get details for testyaa1
+ community.general.memset_server_info:
+ name: testyaa1
+ api_key: 5eb86c9896ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+memset_api:
+ description: Info from the Memset API
+ returned: always
+ type: complex
+ contains:
+ backups:
+ description: Whether this server has a backup service.
+ returned: always
+ type: bool
+ sample: true
+ control_panel:
+ description: Whether the server has a control panel (i.e. cPanel).
+ returned: always
+ type: str
+ sample: 'cpanel'
+ data_zone:
+ description: The data zone the server is in.
+ returned: always
+ type: str
+ sample: 'Memset Public Cloud'
+ expiry_date:
+ description: Current expiry date of the server.
+ returned: always
+ type: str
+ sample: '2018-08-10'
+ firewall_rule_group:
+ description: Details about the firewall group this server is in.
+ returned: always
+ type: dict
+ sample: {
+ "default_outbound_policy": "RETURN",
+ "name": "testyaa-fw1",
+ "nickname": "testyaa cPanel rules",
+ "notes": "",
+ "public": false,
+ "rules": {
+ "51d7db54d39c3544ef7c48baa0b9944f": {
+ "action": "ACCEPT",
+ "comment": "",
+ "dest_ip6s": "any",
+ "dest_ips": "any",
+ "dest_ports": "any",
+ "direction": "Inbound",
+ "ip_version": "any",
+ "ordering": 2,
+ "protocols": "icmp",
+ "rule_group_name": "testyaa-fw1",
+ "rule_id": "51d7db54d39c3544ef7c48baa0b9944f",
+ "source_ip6s": "any",
+ "source_ips": "any",
+ "source_ports": "any"
+ }
+ }
+ }
+ firewall_type:
+ description: The type of firewall the server has (i.e. self-managed, managed).
+ returned: always
+ type: str
+ sample: 'managed'
+ host_name:
+ description: The server's hostname.
+ returned: always
+ type: str
+ sample: 'testyaa1.miniserver.com'
+ ignore_monitoring_off:
+ description: When true, Memset won't remind the customer that monitoring is disabled.
+ returned: always
+ type: bool
+ sample: true
+ ips:
+ description: List of dictionaries of all IP addresses assigned to the server.
+ returned: always
+ type: list
+ sample: [
+ {
+ "address": "1.2.3.4",
+ "bytes_in_today": 1000.0,
+ "bytes_in_yesterday": 2000.0,
+ "bytes_out_today": 1000.0,
+ "bytes_out_yesterday": 2000.0
+ }
+ ]
+ monitor:
+ description: Whether the server has monitoring enabled.
+ returned: always
+ type: bool
+ sample: true
+ monitoring_level:
+ description: The server's monitoring level (i.e. basic).
+ returned: always
+ type: str
+ sample: 'basic'
+ name:
+ description: Server name (same as the service name).
+ returned: always
+ type: str
+ sample: 'testyaa1'
+ network_zones:
+ description: The network zone(s) the server is in.
+ returned: always
+ type: list
+ sample: [ 'reading' ]
+ nickname:
+ description: Customer-set nickname for the server.
+ returned: always
+ type: str
+ sample: 'database server'
+ no_auto_reboot:
+ description: Whether or not to reboot the server if monitoring detects it down.
+ returned: always
+ type: bool
+ sample: true
+ no_nrpe:
+ description: Whether Memset should use NRPE to monitor this server.
+ returned: always
+ type: bool
+ sample: true
+ os:
+ description: The server's Operating System.
+ returned: always
+ type: str
+ sample: 'debian_stretch_64'
+ penetration_patrol:
+ description: Intrusion detection support level for this server.
+ returned: always
+ type: str
+ sample: 'managed'
+ penetration_patrol_alert_level:
+ description: The alert level at which notifications are sent.
+ returned: always
+ type: int
+ sample: 10
+ primary_ip:
+ description: Server's primary IP.
+ returned: always
+ type: str
+ sample: '1.2.3.4'
+ renewal_price_amount:
+ description: Renewal cost for the server.
+ returned: always
+ type: str
+ sample: '30.00'
+ renewal_price_currency:
+ description: Currency for renewal payments.
+ returned: always
+ type: str
+ sample: 'GBP'
+ renewal_price_vat:
+ description: VAT rate for renewal payments
+ returned: always
+ type: str
+ sample: '20'
+ start_date:
+ description: Server's start date.
+ returned: always
+ type: str
+ sample: '2013-04-10'
+ status:
+ description: Current status of the server (i.e. live, onhold).
+ returned: always
+ type: str
+ sample: 'LIVE'
+ support_level:
+ description: Support level included with the server.
+ returned: always
+ type: str
+ sample: 'managed'
+ type:
+ description: What this server is (i.e. dedicated)
+ returned: always
+ type: str
+ sample: 'miniserver'
+ vlans:
+ description: Dictionary of tagged and untagged VLANs this server is in.
+ returned: always
+ type: dict
+ sample: {
+ tagged: [],
+ untagged: [ 'testyaa-vlan1', 'testyaa-vlan2' ]
+ }
+ vulnscan:
+ description: Vulnerability scanning level.
+ returned: always
+ type: str
+ sample: 'basic'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def get_facts(args=None):
+ '''
+ Performs a simple API call and returns a JSON blob.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ payload['name'] = args['name']
+
+ api_method = 'server.info'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ if response.status_code is not None:
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ else:
+ retvals['stderr'] = "{0}" . format(response.stderr)
+ return retvals
+
+ # we don't want to return the same thing twice
+ msg = None
+ memset_api = response.json()
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return retvals
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+
+ retvals = get_facts(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/memset_zone.py b/ansible_collections/community/general/plugins/modules/memset_zone.py
new file mode 100644
index 000000000..e17472e39
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/memset_zone.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone
+author: "Simon Weald (@glitchcrab)"
+short_description: Creates and deletes Memset DNS zones
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+description:
+ - Manage DNS zones in a Memset account.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ required: true
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ name:
+ required: true
+ description:
+ - The zone nickname; usually the same as the main domain. Ensure this
+ value has at most 250 characters.
+ type: str
+ aliases: [ nickname ]
+ ttl:
+ description:
+ - The default TTL for all records created in the zone. This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_create).
+ type: int
+ default: 0
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ force:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Forces deletion of a zone and all zone domains/zone records it contains.
+'''
+
+EXAMPLES = '''
+# Create the zone 'test'
+- name: Create zone
+ community.general.memset_zone:
+ name: test
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ ttl: 300
+ delegate_to: localhost
+
+# Force zone deletion
+- name: Force delete zone
+ community.general.memset_zone:
+ name: test
+ state: absent
+ api_key: 5eb86c9196ab03919abcf03857163741
+ force: true
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Zone info from the Memset API
+ returned: when state == present
+ type: complex
+ contains:
+ domains:
+ description: List of domains in this zone
+ returned: always
+ type: list
+ sample: []
+ id:
+ description: Zone id
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ nickname:
+ description: Zone name
+ returned: always
+ type: str
+ sample: "example.com"
+ records:
+ description: List of DNS records for domains in this zone
+ returned: always
+ type: list
+ sample: []
+ ttl:
+ description: Default TTL for domains in this zone
+ returned: always
+ type: int
+ sample: 300
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ # zone domain length must be less than 250 chars.
+ if len(args['name']) > 250:
+ stderr = 'Zone name must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr, stderr=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+
+ api_method = 'dns.zone_list'
+ has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, counter = check_zone(data=response, name=args['name'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((zone_exists and args['state'] == 'absent') or (not zone_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return retvals
+
+
+def create_zone(args=None, zone_exists=None, payload=None):
+ '''
+ At this point we already know whether the zone exists, so we
+ just need to make the API reflect the desired state.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if not zone_exists:
+ payload['ttl'] = args['ttl']
+ payload['nickname'] = args['name']
+ api_method = 'dns.zone_create'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ else:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ break
+ if zone['ttl'] != args['ttl']:
+ # update the zone if the desired TTL is different.
+ payload['id'] = zone['id']
+ payload['ttl'] = args['ttl']
+ api_method = 'dns.zone_update'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ # populate return var with zone info.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if zone_exists:
+ payload = dict()
+ payload['id'] = zone_id
+ api_method = 'dns.zone_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ memset_api = response.json()
+
+ return has_failed, has_changed, memset_api, msg
+
+
+def delete_zone(args=None, zone_exists=None, payload=None):
+ '''
+ Deletion requires extra sanity checking as the zone cannot be
+ deleted if it contains domains or records. Setting force=true
+ will override this behaviour.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ if zone_exists:
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ counter = 0
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ counter += 1
+ if counter == 1:
+ for zone in response.json():
+ if zone['nickname'] == args['name']:
+ zone_id = zone['id']
+ domain_count = len(zone['domains'])
+ record_count = len(zone['records'])
+ if (domain_count > 0 or record_count > 0) and args['force'] is False:
+ # we need to fail out if force was not explicitly set.
+ stderr = 'Zone contains domains or records and force was not used.'
+ has_failed = True
+ has_changed = False
+ module.fail_json(failed=has_failed, changed=has_changed, msg=msg, stderr=stderr, rc=1)
+ api_method = 'dns.zone_delete'
+ payload['id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ # return raw JSON from API in named var and then unset msg var so we aren't returning the same thing twice.
+ memset_api = msg
+ msg = None
+ else:
+ # zone names are not unique, so we cannot safely delete the requested
+ # zone at this time.
+ has_failed = True
+ has_changed = False
+ msg = 'Unable to delete zone as multiple zones with the same name exist.'
+ else:
+ has_failed, has_changed = False, False
+
+ return has_failed, has_changed, memset_api, msg
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = _msg
+
+ if response.stderr is not None:
+ retvals['stderr'] = response.stderr
+
+ return retvals
+
+ zone_exists, _msg, counter, _zone_id = get_zone_id(zone_name=args['name'], current_zones=response.json())
+
+ if args['state'] == 'present':
+ has_failed, has_changed, memset_api, msg = create_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ elif args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone(args=args, zone_exists=zone_exists, payload=payload)
+
+ retvals['failed'] = has_failed
+ retvals['changed'] = has_changed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return retvals
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ name=dict(required=True, aliases=['nickname'], type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ force=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_domain.py b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
new file mode 100644
index 000000000..172a48be2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/memset_zone_domain.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_domain
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete domains in Memset DNS zones
+notes:
+ - Zone domains can be thought of as a collection of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_domain_create), I(dns.zone_domain_delete), I(dns.zone_domain_list).
+ - Currently this module can only create one domain at a time. Multiple domains should
+ be created using C(with_items).
+description:
+ - Manage DNS zone domains in a Memset account.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ domain:
+ required: true
+ description:
+ - The zone domain name. Ensure this value has at most 250 characters.
+ type: str
+ aliases: ['name']
+ zone:
+ required: true
+ description:
+ - The zone to add the domain to (this must already exist).
+ type: str
+'''
+
+EXAMPLES = '''
+# Create the zone domain 'test.com'
+- name: Create zone domain
+ community.general.memset_zone_domain:
+ domain: test.com
+ zone: testzone
+ state: present
+ api_key: 5eb86c9196ab03919abcf03857163741
+ delegate_to: localhost
+'''
+
+RETURN = '''
+memset_api:
+ description: Domain info from the Memset API
+ returned: when changed or state == present
+ type: complex
+ contains:
+ domain:
+ description: Domain name
+ returned: always
+ type: str
+ sample: "example.com"
+ id:
+ description: Domain ID
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import check_zone_domain
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_domain_create)
+ '''
+ # zone domain length must be less than 250 chars
+ if len(args['domain']) > 250:
+ stderr = 'Zone domain must be less than 250 characters in length.'
+ module.fail_json(failed=True, msg=stderr)
+
+
+def check(args=None):
+ '''
+ Support for running with check mode.
+ '''
+ retvals = dict()
+ has_changed = False
+
+ api_method = 'dns.zone_domain_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ # set changed to true if the operation would cause a change.
+ has_changed = ((domain_exists and args['state'] == 'absent') or (not domain_exists and args['state'] == 'present'))
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+
+ return retvals
+
+
+def create_zone_domain(args=None, zone_exists=None, zone_id=None, payload=None):
+ '''
+ At this point we already know whether the containing zone exists,
+ so we just need to create the domain (or exit if it already exists).
+ '''
+ has_changed, has_failed = False, False
+ msg = None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ for zone_domain in response.json():
+ if zone_domain['domain'] == args['domain']:
+ # zone domain already exists, nothing to change.
+ has_changed = False
+ break
+ else:
+ # we need to create the domain
+ api_method = 'dns.zone_domain_create'
+ payload['domain'] = args['domain']
+ payload['zone_id'] = zone_id
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+
+ return has_failed, has_changed, msg
+
+
+def delete_zone_domain(args=None, payload=None):
+ '''
+ Deletion is pretty simple, domains are always unique so we
+ we don't need to do any sanity checking to avoid deleting the
+ wrong thing.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ api_method = 'dns.zone_domain_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ domain_exists = check_zone_domain(data=response, domain=args['domain'])
+
+ if domain_exists:
+ api_method = 'dns.zone_domain_delete'
+ payload['domain'] = args['domain']
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = response.json()
+ # unset msg as we don't want to return unnecessary info to the user.
+ msg = None
+
+ return has_failed, has_changed, memset_api, msg
+
+
+def create_or_delete_domain(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete.
+ '''
+ retvals, payload = dict(), dict()
+ has_changed, has_failed = False, False
+ msg, stderr, memset_api = None, None, None
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = has_failed
+ retvals['msg'] = msg
+ if response.status_code is not None:
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ else:
+ retvals['stderr'] = response.stderr
+ return retvals
+
+ zone_exists, msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ # the zone needs to be unique - this isn't a requirement of Memset's API but it
+ # makes sense in the context of this module.
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone '{0}' does not exist, cannot create domain." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones, cannot create domain." . format(args['zone'])
+
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ return retvals
+
+ if args['state'] == 'present':
+ has_failed, has_changed, msg = create_zone_domain(args=args, zone_exists=zone_exists, zone_id=zone_id, payload=payload)
+
+ if args['state'] == 'absent':
+ has_failed, has_changed, memset_api, msg = delete_zone_domain(args=args, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return retvals
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ domain=dict(required=True, aliases=['name'], type='str'),
+ zone=dict(required=True, type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # validate some API-specific limitations.
+ api_validation(args=args)
+
+ if module.check_mode:
+ retvals = check(args)
+ else:
+ retvals = create_or_delete_domain(args)
+
+ # we would need to populate the return values with the API's response
+ # in several places so it's easier to do it at the end instead.
+ if not retvals['failed']:
+ if args['state'] == 'present' and not module.check_mode:
+ payload = dict()
+ payload['domain'] = args['domain']
+ api_method = 'dns.zone_domain_info'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ retvals['memset_api'] = response.json()
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/memset_zone_record.py b/ansible_collections/community/general/plugins/modules/memset_zone_record.py
new file mode 100644
index 000000000..4e56a11ca
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/memset_zone_record.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Simon Weald <ansible@simonweald.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: memset_zone_record
+author: "Simon Weald (@glitchcrab)"
+short_description: Create and delete records in Memset DNS zones
+notes:
+ - Zones can be thought of as a logical group of domains, all of which share the
+ same DNS records (i.e. they point to the same IP). An API key generated via the
+ Memset customer control panel is needed with the following minimum scope -
+ I(dns.zone_create), I(dns.zone_delete), I(dns.zone_list).
+ - Currently this module can only create one DNS record at a time. Multiple records
+ should be created using C(with_items).
+description:
+ - Manage DNS records in a Memset account.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ default: present
+ description:
+ - Indicates desired state of resource.
+ type: str
+ choices: [ absent, present ]
+ api_key:
+ required: true
+ description:
+ - The API key obtained from the Memset control panel.
+ type: str
+ address:
+ required: true
+ description:
+ - The address for this record (can be IP or text string depending on record type).
+ type: str
+ aliases: [ ip, data ]
+ priority:
+ description:
+ - C(SRV) and C(TXT) record priority, in the range 0 > 999 (inclusive).
+ type: int
+ default: 0
+ record:
+ required: false
+ description:
+ - The subdomain to create.
+ type: str
+ default: ''
+ type:
+ required: true
+ description:
+ - The type of DNS record to create.
+ choices: [ A, AAAA, CNAME, MX, NS, SRV, TXT ]
+ type: str
+ relative:
+ type: bool
+ default: false
+ description:
+ - If set then the current domain is added onto the address field for C(CNAME), C(MX), C(NS)
+ and C(SRV)record types.
+ ttl:
+ description:
+ - The record's TTL in seconds (will inherit zone's TTL if not explicitly set). This must be a
+ valid int from U(https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create).
+ default: 0
+ choices: [ 0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400 ]
+ type: int
+ zone:
+ required: true
+ description:
+ - The name of the zone to which to add the record to.
+ type: str
+'''
+
+EXAMPLES = '''
+# Create DNS record for www.domain.com
+- name: Create DNS record
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: A
+ record: www
+ address: 1.2.3.4
+ ttl: 300
+ relative: false
+ delegate_to: localhost
+
+# create an SPF record for domain.com
+- name: Create SPF record for domain.com
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ state: present
+ zone: domain.com
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:a1.2.3.4 ?all"
+ delegate_to: localhost
+
+# create multiple DNS records
+- name: Create multiple DNS records
+ community.general.memset_zone_record:
+ api_key: dcf089a2896940da9ffefb307ef49ccd
+ zone: "{{ item.zone }}"
+ type: "{{ item.type }}"
+ record: "{{ item.record }}"
+ address: "{{ item.address }}"
+ delegate_to: localhost
+ with_items:
+ - { 'zone': 'domain1.com', 'type': 'A', 'record': 'www', 'address': '1.2.3.4' }
+ - { 'zone': 'domain2.com', 'type': 'A', 'record': 'mail', 'address': '4.3.2.1' }
+'''
+
+RETURN = '''
+memset_api:
+ description: Record info from the Memset API.
+ returned: when state == present
+ type: complex
+ contains:
+ address:
+ description: Record content (may be an IP, string or blank depending on record type).
+ returned: always
+ type: str
+ sample: 1.1.1.1
+ id:
+ description: Record ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+ priority:
+ description: Priority for C(MX) and C(SRV) records.
+ returned: always
+ type: int
+ sample: 10
+ record:
+ description: Name of record.
+ returned: always
+ type: str
+ sample: "www"
+ relative:
+ description: Adds the current domain onto the address field for C(CNAME), C(MX), C(NS) and C(SRV) types.
+ returned: always
+ type: bool
+ sample: false
+ ttl:
+ description: Record TTL.
+ returned: always
+ type: int
+ sample: 10
+ type:
+ description: Record type.
+ returned: always
+ type: str
+ sample: AAAA
+ zone_id:
+ description: Zone ID.
+ returned: always
+ type: str
+ sample: "b0bb1ce851aeea6feeb2dc32fe83bf9c"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.memset import get_zone_id
+from ansible_collections.community.general.plugins.module_utils.memset import memset_api_call
+
+
+def api_validation(args=None):
+ '''
+ Perform some validation which will be enforced by Memset's API (see:
+ https://www.memset.com/apidocs/methods_dns.html#dns.zone_record_create)
+ '''
+ failed_validation = False
+
+ # priority can only be integer 0 > 999
+ if not 0 <= args['priority'] <= 999:
+ failed_validation = True
+ error = 'Priority must be in the range 0 > 999 (inclusive).'
+ # data value must be max 250 chars
+ if len(args['address']) > 250:
+ failed_validation = True
+ error = "Address must be less than 250 characters in length."
+ # record value must be max 250 chars
+ if args['record']:
+ if len(args['record']) > 63:
+ failed_validation = True
+ error = "Record must be less than 63 characters in length."
+ # relative isn't used for all record types
+ if args['relative']:
+ if args['type'] not in ['CNAME', 'MX', 'NS', 'SRV']:
+ failed_validation = True
+ error = "Relative is only valid for CNAME, MX, NS and SRV record types."
+ # if any of the above failed then fail early
+ if failed_validation:
+ module.fail_json(failed=True, msg=error)
+
+
+def create_zone_record(args=None, zone_id=None, records=None, payload=None):
+ '''
+ Sanity checking has already occurred prior to this function being
+ called, so we can go ahead and either create or update the record.
+ As defaults are defined for all values in the argument_spec, this
+ may cause some changes to occur as the defaults are enforced (if
+ the user has only configured required variables).
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # assemble the new record.
+ new_record = dict()
+ new_record['zone_id'] = zone_id
+ for arg in ['priority', 'address', 'relative', 'record', 'ttl', 'type']:
+ new_record[arg] = args[arg]
+
+ # if we have any matches, update them.
+ if records:
+ for zone_record in records:
+ # record exists, add ID to payload.
+ new_record['id'] = zone_record['id']
+ if zone_record == new_record:
+ # nothing to do; record is already correct so we populate
+ # the return var with the existing record's details.
+ memset_api = zone_record
+ return has_changed, has_failed, memset_api, msg
+ else:
+ # merge dicts ensuring we change any updated values
+ payload = zone_record.copy()
+ payload.update(new_record)
+ api_method = 'dns.zone_record_update'
+ if args['check_mode']:
+ has_changed = True
+ # return the new record to the user in the returned var.
+ memset_api = new_record
+ return has_changed, has_failed, memset_api, msg
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+ else:
+ # no record found, so we need to create it
+ api_method = 'dns.zone_record_create'
+ payload = new_record
+ if args['check_mode']:
+ has_changed = True
+ # populate the return var with the new record's details.
+ memset_api = new_record
+ return has_changed, has_failed, memset_api, msg
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = new_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return has_changed, has_failed, memset_api, msg
+
+
+def delete_zone_record(args=None, records=None, payload=None):
+ '''
+ Matching records can be cleanly deleted without affecting other
+ resource types, so this is pretty simple to achieve.
+ '''
+ has_changed, has_failed = False, False
+ msg, memset_api = None, None
+
+ # if we have any matches, delete them.
+ if records:
+ for zone_record in records:
+ if args['check_mode']:
+ has_changed = True
+ return has_changed, has_failed, memset_api, msg
+ payload['id'] = zone_record['id']
+ api_method = 'dns.zone_record_delete'
+ has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method, payload=payload)
+ if not has_failed:
+ has_changed = True
+ memset_api = zone_record
+ # empty msg as we don't want to return a boatload of json to the user.
+ msg = None
+
+ return has_changed, has_failed, memset_api, msg
+
+
+def create_or_delete(args=None):
+ '''
+ We need to perform some initial sanity checking and also look
+ up required info before handing it off to create or delete functions.
+ Check mode is integrated into the create or delete functions.
+ '''
+ has_failed, has_changed = False, False
+ msg, memset_api, stderr = None, None, None
+ retvals, payload = dict(), dict()
+
+ # get the zones and check if the relevant zone exists.
+ api_method = 'dns.zone_list'
+ _has_failed, msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ if _has_failed:
+ # this is the first time the API is called; incorrect credentials will
+ # manifest themselves at this point so we need to ensure the user is
+ # informed of the reason.
+ retvals['failed'] = _has_failed
+ retvals['msg'] = msg
+ if response.status_code is not None:
+ retvals['stderr'] = "API returned an error: {0}" . format(response.status_code)
+ else:
+ retvals['stderr'] = response.stderr
+ return retvals
+
+ zone_exists, _msg, counter, zone_id = get_zone_id(zone_name=args['zone'], current_zones=response.json())
+
+ if not zone_exists:
+ has_failed = True
+ if counter == 0:
+ stderr = "DNS zone {0} does not exist." . format(args['zone'])
+ elif counter > 1:
+ stderr = "{0} matches multiple zones." . format(args['zone'])
+ retvals['failed'] = has_failed
+ retvals['msg'] = stderr
+ retvals['stderr'] = stderr
+ return retvals
+
+ # get a list of all records ( as we can't limit records by zone)
+ api_method = 'dns.zone_record_list'
+ _has_failed, _msg, response = memset_api_call(api_key=args['api_key'], api_method=api_method)
+
+ # find any matching records
+ records = [record for record in response.json() if record['zone_id'] == zone_id
+ and record['record'] == args['record'] and record['type'] == args['type']]
+
+ if args['state'] == 'present':
+ has_changed, has_failed, memset_api, msg = create_zone_record(args=args, zone_id=zone_id, records=records, payload=payload)
+
+ if args['state'] == 'absent':
+ has_changed, has_failed, memset_api, msg = delete_zone_record(args=args, records=records, payload=payload)
+
+ retvals['changed'] = has_changed
+ retvals['failed'] = has_failed
+ for val in ['msg', 'stderr', 'memset_api']:
+ if val is not None:
+ retvals[val] = eval(val)
+
+ return retvals
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ api_key=dict(required=True, type='str', no_log=True),
+ zone=dict(required=True, type='str'),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT'], type='str'),
+ address=dict(required=True, aliases=['ip', 'data'], type='str'),
+ record=dict(required=False, default='', type='str'),
+ ttl=dict(required=False, default=0, choices=[0, 300, 600, 900, 1800, 3600, 7200, 10800, 21600, 43200, 86400], type='int'),
+ priority=dict(required=False, default=0, type='int'),
+ relative=dict(required=False, default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # populate the dict with the user-provided vars.
+ args = dict()
+ for key, arg in module.params.items():
+ args[key] = arg
+ args['check_mode'] = module.check_mode
+
+ # perform some Memset API-specific validation
+ api_validation(args=args)
+
+ retvals = create_or_delete(args)
+
+ if retvals['failed']:
+ module.fail_json(**retvals)
+ else:
+ module.exit_json(**retvals)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mksysb.py b/ansible_collections/community/general/plugins/modules/mksysb.py
new file mode 100644
index 000000000..8272dbf7d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mksysb.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Alexei Znamensky (@russoz) <russoz@gmail.com>
+# Copyright (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+author: Kairo Araujo (@kairoaraujo)
+module: mksysb
+short_description: Generates AIX mksysb rootvg backups
+description:
+ - This module manages a basic AIX mksysb (image) of rootvg.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ backup_crypt_files:
+ description:
+ - Backup encrypted files.
+ type: bool
+ default: true
+ backup_dmapi_fs:
+ description:
+ - Back up DMAPI filesystem files.
+ type: bool
+ default: true
+ create_map_files:
+ description:
+ - Creates a new MAP files.
+ type: bool
+ default: false
+ exclude_files:
+ description:
+ - Excludes files using C(/etc/rootvg.exclude).
+ type: bool
+ default: false
+ exclude_wpar_files:
+ description:
+ - Excludes WPAR files.
+ type: bool
+ default: false
+ extended_attrs:
+ description:
+ - Backup extended attributes.
+ type: bool
+ default: true
+ name:
+ type: str
+ description:
+ - Backup name
+ required: true
+ new_image_data:
+ description:
+ - Creates a new file data.
+ type: bool
+ default: true
+ software_packing:
+ description:
+ - Exclude files from packing option listed in
+ C(/etc/exclude_packing.rootvg).
+ type: bool
+ default: false
+ storage_path:
+ type: str
+ description:
+ - Storage path where the mksysb will stored.
+ required: true
+ use_snapshot:
+ description:
+ - Creates backup using snapshots.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Running a backup image mksysb
+ community.general.mksysb:
+ name: myserver
+ storage_path: /repository/images
+ exclude_files: true
+ exclude_wpar_files: true
+'''
+
+RETURN = '''
+changed:
+ description: Return changed for mksysb actions as true or false.
+ returned: always
+ type: bool
+msg:
+ description: Return message regarding the action.
+ returned: always
+ type: str
+'''
+
+import os
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+
+
+class MkSysB(ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ backup_crypt_files=dict(type='bool', default=True),
+ backup_dmapi_fs=dict(type='bool', default=True),
+ create_map_files=dict(type='bool', default=False),
+ exclude_files=dict(type='bool', default=False),
+ exclude_wpar_files=dict(type='bool', default=False),
+ extended_attrs=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ new_image_data=dict(type='bool', default=True),
+ software_packing=dict(type='bool', default=False),
+ storage_path=dict(type='str', required=True),
+ use_snapshot=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ )
+ command_args_formats = dict(
+ create_map_files=cmd_runner_fmt.as_bool("-m"),
+ use_snapshot=cmd_runner_fmt.as_bool("-T"),
+ exclude_files=cmd_runner_fmt.as_bool("-e"),
+ exclude_wpar_files=cmd_runner_fmt.as_bool("-G"),
+ new_image_data=cmd_runner_fmt.as_bool("-i"),
+ software_packing=cmd_runner_fmt.as_bool_not("-p"),
+ extended_attrs=cmd_runner_fmt.as_bool("-a"),
+ backup_crypt_files=cmd_runner_fmt.as_bool_not("-Z"),
+ backup_dmapi_fs=cmd_runner_fmt.as_bool("-A"),
+ combined_path=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda p, n: ["%s/%s" % (p, n)])),
+ )
+
+ def __init_module__(self):
+ if not os.path.isdir(self.vars.storage_path):
+ self.do_raise("Storage path %s is not valid." % self.vars.storage_path)
+
+ def __run__(self):
+ def process(rc, out, err):
+ if rc != 0:
+ self.do_raise("mksysb failed: {0}".format(out))
+
+ runner = CmdRunner(
+ self.module,
+ ['mksysb', '-X'],
+ self.command_args_formats,
+ )
+ with runner(['create_map_files', 'use_snapshot', 'exclude_files', 'exclude_wpar_files', 'software_packing',
+ 'extended_attrs', 'backup_crypt_files', 'backup_dmapi_fs', 'new_image_data', 'combined_path'],
+ output_process=process, check_mode_skip=True) as ctx:
+ ctx.run(combined_path=[self.vars.storage_path, self.vars.name])
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+ self.changed = True
+
+
+def main():
+ MkSysB.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/modprobe.py b/ansible_collections/community/general/plugins/modules/modprobe.py
new file mode 100644
index 000000000..6389d758d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/modprobe.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, David Stygstra <david.stygstra@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: modprobe
+short_description: Load or unload kernel modules
+author:
+ - David Stygstra (@stygstra)
+ - Julien Dauphant (@jdauphant)
+ - Matt Jeffery (@mattjeffery)
+description:
+ - Load or unload kernel modules.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of kernel module to manage.
+ state:
+ type: str
+ description:
+ - Whether the module should be present or absent.
+ choices: [ absent, present ]
+ default: present
+ params:
+ type: str
+ description:
+ - Modules parameters.
+ default: ''
+ persistent:
+ type: str
+ choices: [ disabled, absent, present ]
+ default: disabled
+ description:
+ - Persistency between reboots for configured module.
+ - This option creates files in C(/etc/modules-load.d/) and C(/etc/modprobe.d/) that make your module configuration persistent during reboots.
+ - If C(present), adds module name to C(/etc/modules-load.d/) and params to C(/etc/modprobe.d/) so the module will be loaded on next reboot.
+ - If C(absent), will comment out module name from C(/etc/modules-load.d/) and comment out params from C(/etc/modprobe.d/) so the module will not be
+ loaded on next reboot.
+ - If C(disabled), will not touch anything and leave C(/etc/modules-load.d/) and C(/etc/modprobe.d/) as it is.
+ - Note that it is usually a better idea to rely on the automatic module loading by PCI IDs, USB IDs, DMI IDs or similar triggers encoded in the
+ kernel modules themselves instead of configuration like this.
+ - In fact, most modern kernel modules are prepared for automatic loading already.
+ - "B(Note:) This option works only with distributions that use C(systemd) when set to values other than C(disabled)."
+'''
+
+EXAMPLES = '''
+- name: Add the 802.1q module
+ community.general.modprobe:
+ name: 8021q
+ state: present
+
+- name: Add the dummy module
+ community.general.modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
+
+- name: Add the dummy module and make sure it is loaded after reboots
+ community.general.modprobe:
+ name: dummy
+ state: present
+ params: 'numdummies=2'
+ persistent: present
+'''
+
+import os.path
+import platform
+import shlex
+import traceback
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+RELEASE_VER = platform.release()
+MODULES_LOAD_LOCATION = '/etc/modules-load.d'
+PARAMETERS_FILES_LOCATION = '/etc/modprobe.d'
+
+
+class Modprobe(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.modprobe_bin = module.get_bin_path('modprobe', True)
+
+ self.check_mode = module.check_mode
+ self.desired_state = module.params['state']
+ self.name = module.params['name']
+ self.params = module.params['params']
+ self.persistent = module.params['persistent']
+
+ self.changed = False
+
+ self.re_find_module = re.compile(r'^ *{0} *(?:[#;].*)?\n?\Z'.format(self.name))
+ self.re_find_params = re.compile(r'^options {0} \w+=\S+ *(?:[#;].*)?\n?\Z'.format(self.name))
+ self.re_get_params_and_values = re.compile(r'^options {0} (\w+=\S+) *(?:[#;].*)?\n?\Z'.format(self.name))
+
+ def load_module(self):
+ command = [self.modprobe_bin]
+ if self.check_mode:
+ command.append('-n')
+ command.extend([self.name] + shlex.split(self.params))
+
+ rc, out, err = self.module.run_command(command)
+
+ if rc != 0:
+ return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result)
+
+ if self.check_mode or self.module_loaded():
+ self.changed = True
+ else:
+ rc, stdout, stderr = self.module.run_command(
+ [self.modprobe_bin, '-n', '--first-time', self.name] + shlex.split(self.params)
+ )
+ if rc != 0:
+ self.module.warn(stderr)
+
+ @property
+ def module_is_loaded_persistently(self):
+ for module_file in self.modules_files:
+ with open(module_file) as file:
+ for line in file:
+ if self.re_find_module.match(line):
+ return True
+
+ return False
+
+ @property
+ def params_is_set(self):
+ desired_params = set(self.params.split())
+
+ return desired_params == self.permanent_params
+
+ @property
+ def permanent_params(self):
+ params = set()
+
+ for modprobe_file in self.modprobe_files:
+ with open(modprobe_file) as file:
+ for line in file:
+ match = self.re_get_params_and_values.match(line)
+ if match:
+ params.add(match.group(1))
+
+ return params
+
+ def create_module_file(self):
+ file_path = os.path.join(MODULES_LOAD_LOCATION,
+ self.name + '.conf')
+ with open(file_path, 'w') as file:
+ file.write(self.name + '\n')
+
+ @property
+ def module_options_file_content(self):
+ file_content = ['options {0} {1}'.format(self.name, param)
+ for param in self.params.split()]
+ return '\n'.join(file_content) + '\n'
+
+ def create_module_options_file(self):
+ new_file_path = os.path.join(PARAMETERS_FILES_LOCATION,
+ self.name + '.conf')
+ with open(new_file_path, 'w') as file:
+ file.write(self.module_options_file_content)
+
+ def disable_old_params(self):
+
+ for modprobe_file in self.modprobe_files:
+ with open(modprobe_file) as file:
+ file_content = file.readlines()
+
+ content_changed = False
+ for index, line in enumerate(file_content):
+ if self.re_find_params.match(line):
+ file_content[index] = '#' + line
+ content_changed = True
+
+ if content_changed:
+ with open(modprobe_file, 'w') as file:
+ file.write('\n'.join(file_content))
+
+ def disable_module_permanent(self):
+
+ for module_file in self.modules_files:
+ with open(module_file) as file:
+ file_content = file.readlines()
+
+ content_changed = False
+ for index, line in enumerate(file_content):
+ if self.re_find_module.match(line):
+ file_content[index] = '#' + line
+ content_changed = True
+
+ if content_changed:
+ with open(module_file, 'w') as file:
+ file.write('\n'.join(file_content))
+
+ def load_module_permanent(self):
+
+ if not self.module_is_loaded_persistently:
+ self.create_module_file()
+ self.changed = True
+
+ if not self.params_is_set:
+ self.disable_old_params()
+ self.create_module_options_file()
+ self.changed = True
+
+ def unload_module_permanent(self):
+ if self.module_is_loaded_persistently:
+ self.disable_module_permanent()
+ self.changed = True
+
+ if self.permanent_params:
+ self.disable_old_params()
+ self.changed = True
+
+ @property
+ def modules_files(self):
+ modules_paths = [os.path.join(MODULES_LOAD_LOCATION, path)
+ for path in os.listdir(MODULES_LOAD_LOCATION)]
+ return [path for path in modules_paths if os.path.isfile(path)]
+
+ @property
+ def modprobe_files(self):
+ modules_paths = [os.path.join(PARAMETERS_FILES_LOCATION, path)
+ for path in os.listdir(PARAMETERS_FILES_LOCATION)]
+ return [path for path in modules_paths if os.path.isfile(path)]
+
+ def module_loaded(self):
+ is_loaded = False
+ try:
+ with open('/proc/modules') as modules:
+ module_name = self.name.replace('-', '_') + ' '
+ for line in modules:
+ if line.startswith(module_name):
+ is_loaded = True
+ break
+
+ if not is_loaded:
+ module_file = '/' + self.name + '.ko'
+ builtin_path = os.path.join('/lib/modules/', RELEASE_VER, 'modules.builtin')
+ with open(builtin_path) as builtins:
+ for line in builtins:
+ if line.rstrip().endswith(module_file):
+ is_loaded = True
+ break
+ except (IOError, OSError) as e:
+ self.module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **self.result)
+
+ return is_loaded
+
+ def unload_module(self):
+ command = [self.modprobe_bin, '-r', self.name]
+ if self.check_mode:
+ command.append('-n')
+
+ rc, out, err = self.module.run_command(command)
+ if rc != 0:
+ return self.module.fail_json(msg=err, rc=rc, stdout=out, stderr=err, **self.result)
+
+ self.changed = True
+
+ @property
+ def result(self):
+ return {
+ 'changed': self.changed,
+ 'name': self.name,
+ 'params': self.params,
+ 'state': self.desired_state,
+ }
+
+
+def build_module():
+ return AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ params=dict(type='str', default=''),
+ persistent=dict(type='str', default='disabled', choices=['disabled', 'present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+
+def main():
+ module = build_module()
+
+ modprobe = Modprobe(module)
+
+ if modprobe.desired_state == 'present' and not modprobe.module_loaded():
+ modprobe.load_module()
+ elif modprobe.desired_state == 'absent' and modprobe.module_loaded():
+ modprobe.unload_module()
+
+ if modprobe.persistent == 'present' and not (modprobe.module_is_loaded_persistently and modprobe.params_is_set):
+ modprobe.load_module_permanent()
+ elif modprobe.persistent == 'absent' and (modprobe.module_is_loaded_persistently or modprobe.permanent_params):
+ modprobe.unload_module_permanent()
+
+ module.exit_json(**modprobe.result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/monit.py b/ansible_collections/community/general/plugins/modules/monit.py
new file mode 100644
index 000000000..d2a160678
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/monit.py
@@ -0,0 +1,349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Darryl Stoflet <stoflet@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: monit
+short_description: Manage the state of a program monitored via Monit
+description:
+ - Manage the state of a program monitored via I(Monit).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the I(monit) program/process to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - The state of service.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
+ type: str
+ timeout:
+ description:
+ - If there are pending actions for the service monitored by monit, then Ansible will check
+ for up to this many seconds to verify the requested action has been performed.
+ Ansible will sleep for five seconds between each check.
+ default: 300
+ type: int
+author:
+ - Darryl Stoflet (@dstoflet)
+ - Simon Kelly (@snopoke)
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program httpd to be in started state
+ community.general.monit:
+ name: httpd
+ state: started
+'''
+
+import time
+import re
+
+from collections import namedtuple
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import python_2_unicode_compatible
+
+
+STATE_COMMAND_MAP = {
+ 'stopped': 'stop',
+ 'started': 'start',
+ 'monitored': 'monitor',
+ 'unmonitored': 'unmonitor',
+ 'restarted': 'restart'
+}
+
+MONIT_SERVICES = ['Process', 'File', 'Fifo', 'Filesystem', 'Directory', 'Remote host', 'System', 'Program',
+ 'Network']
+
+
+@python_2_unicode_compatible
+class StatusValue(namedtuple("Status", "value, is_pending")):
+ MISSING = 'missing'
+ OK = 'ok'
+ NOT_MONITORED = 'not_monitored'
+ INITIALIZING = 'initializing'
+ DOES_NOT_EXIST = 'does_not_exist'
+ EXECUTION_FAILED = 'execution_failed'
+ ALL_STATUS = [
+ MISSING, OK, NOT_MONITORED, INITIALIZING, DOES_NOT_EXIST, EXECUTION_FAILED
+ ]
+
+ def __new__(cls, value, is_pending=False):
+ return super(StatusValue, cls).__new__(cls, value, is_pending)
+
+ def pending(self):
+ return StatusValue(self.value, True)
+
+ def __getattr__(self, item):
+ if item in ('is_%s' % status for status in self.ALL_STATUS):
+ return self.value == getattr(self, item[3:].upper())
+ raise AttributeError(item)
+
+ def __str__(self):
+ return "%s%s" % (self.value, " (pending)" if self.is_pending else "")
+
+
+class Status(object):
+ MISSING = StatusValue(StatusValue.MISSING)
+ OK = StatusValue(StatusValue.OK)
+ RUNNING = StatusValue(StatusValue.OK)
+ NOT_MONITORED = StatusValue(StatusValue.NOT_MONITORED)
+ INITIALIZING = StatusValue(StatusValue.INITIALIZING)
+ DOES_NOT_EXIST = StatusValue(StatusValue.DOES_NOT_EXIST)
+ EXECUTION_FAILED = StatusValue(StatusValue.EXECUTION_FAILED)
+
+
+class Monit(object):
+ def __init__(self, module, monit_bin_path, service_name, timeout):
+ self.module = module
+ self.monit_bin_path = monit_bin_path
+ self.process_name = service_name
+ self.timeout = timeout
+
+ self._monit_version = None
+ self._raw_version = None
+ self._status_change_retry_count = 6
+
+ def monit_version(self):
+ if self._monit_version is None:
+ self._raw_version, version = self._get_monit_version()
+ # Use only major and minor even if there are more these should be enough
+ self._monit_version = version[0], version[1]
+ return self._monit_version
+
+ def _get_monit_version(self):
+ rc, out, err = self.module.run_command([self.monit_bin_path, '-V'], check_rc=True)
+ version_line = out.split('\n')[0]
+ raw_version = re.search(r"([0-9]+\.){1,2}([0-9]+)?", version_line).group()
+ return raw_version, tuple(map(int, raw_version.split('.')))
+
+ def exit_fail(self, msg, status=None, **kwargs):
+ kwargs.update({
+ 'msg': msg,
+ 'monit_version': self._raw_version,
+ 'process_status': str(status) if status else None,
+ })
+ self.module.fail_json(**kwargs)
+
+ def exit_success(self, state):
+ self.module.exit_json(changed=True, name=self.process_name, state=state)
+
+ @property
+ def command_args(self):
+ return ["-B"] if self.monit_version() > (5, 18) else []
+
+ def get_status(self, validate=False):
+ """Return the status of the process in monit.
+
+ :@param validate: Force monit to re-check the status of the process
+ """
+ monit_command = "validate" if validate else "status"
+ check_rc = False if validate else True # 'validate' always has rc = 1
+ command = [self.monit_bin_path, monit_command] + self.command_args + [self.process_name]
+ rc, out, err = self.module.run_command(command, check_rc=check_rc)
+ return self._parse_status(out, err)
+
+ def _parse_status(self, output, err):
+ escaped_monit_services = '|'.join([re.escape(x) for x in MONIT_SERVICES])
+ pattern = "(%s) '%s'" % (escaped_monit_services, re.escape(self.process_name))
+ if not re.search(pattern, output, re.IGNORECASE):
+ return Status.MISSING
+
+ status_val = re.findall(r"^\s*status\s*([\w\- ]+)", output, re.MULTILINE)
+ if not status_val:
+ self.exit_fail("Unable to find process status", stdout=output, stderr=err)
+
+ status_val = status_val[0].strip().upper()
+ if ' | ' in status_val:
+ status_val = status_val.split(' | ')[0]
+ if ' - ' not in status_val:
+ status_val = status_val.replace(' ', '_')
+ return getattr(Status, status_val)
+ else:
+ status_val, substatus = status_val.split(' - ')
+ action, state = substatus.split()
+ if action in ['START', 'INITIALIZING', 'RESTART', 'MONITOR']:
+ status = Status.OK
+ else:
+ status = Status.NOT_MONITORED
+
+ if state == 'pending':
+ status = status.pending()
+ return status
+
+ def is_process_present(self):
+ command = [self.monit_bin_path, 'summary'] + self.command_args
+ rc, out, err = self.module.run_command(command, check_rc=True)
+ return bool(re.findall(r'\b%s\b' % self.process_name, out))
+
+ def is_process_running(self):
+ return self.get_status().is_ok
+
+ def run_command(self, command):
+ """Runs a monit command, and returns the new status."""
+ return self.module.run_command([self.monit_bin_path, command, self.process_name], check_rc=True)
+
+ def wait_for_status_change(self, current_status):
+ running_status = self.get_status()
+ if running_status.value != current_status.value or current_status.value == StatusValue.EXECUTION_FAILED:
+ return running_status
+
+ loop_count = 0
+ while running_status.value == current_status.value:
+ if loop_count >= self._status_change_retry_count:
+ self.exit_fail('waited too long for monit to change state', running_status)
+
+ loop_count += 1
+ time.sleep(0.5)
+ validate = loop_count % 2 == 0 # force recheck of status every second try
+ running_status = self.get_status(validate)
+ return running_status
+
+ def wait_for_monit_to_stop_pending(self, current_status=None):
+ """Fails this run if there is no status or it's pending/initializing for timeout"""
+ timeout_time = time.time() + self.timeout
+
+ if not current_status:
+ current_status = self.get_status()
+ waiting_status = [
+ StatusValue.MISSING,
+ StatusValue.INITIALIZING,
+ StatusValue.DOES_NOT_EXIST,
+ ]
+ while current_status.is_pending or (current_status.value in waiting_status):
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for "pending", or "initiating" status to go away', current_status)
+
+ time.sleep(5)
+ current_status = self.get_status(validate=True)
+ return current_status
+
+ def reload(self):
+ rc, out, err = self.module.run_command([self.monit_bin_path, 'reload'])
+ if rc != 0:
+ self.exit_fail('monit reload failed', stdout=out, stderr=err)
+ self.exit_success(state='reloaded')
+
+ def present(self):
+ self.run_command('reload')
+
+ timeout_time = time.time() + self.timeout
+ while not self.is_process_present():
+ if time.time() >= timeout_time:
+ self.exit_fail('waited too long for process to become "present"')
+
+ time.sleep(5)
+
+ self.exit_success(state='present')
+
+ def change_state(self, state, expected_status, invert_expected=None):
+ current_status = self.get_status()
+ self.run_command(STATE_COMMAND_MAP[state])
+ status = self.wait_for_status_change(current_status)
+ status = self.wait_for_monit_to_stop_pending(status)
+ status_match = status.value == expected_status.value
+ if invert_expected:
+ status_match = not status_match
+ if status_match:
+ self.exit_success(state=state)
+ self.exit_fail('%s process not %s' % (self.process_name, state), status)
+
+ def stop(self):
+ self.change_state('stopped', Status.NOT_MONITORED)
+
+ def unmonitor(self):
+ self.change_state('unmonitored', Status.NOT_MONITORED)
+
+ def restart(self):
+ self.change_state('restarted', Status.OK)
+
+ def start(self):
+ self.change_state('started', Status.OK)
+
+ def monitor(self):
+ self.change_state('monitored', Status.NOT_MONITORED, invert_expected=True)
+
+
+def main():
+ arg_spec = dict(
+ name=dict(required=True),
+ timeout=dict(default=300, type='int'),
+ state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
+ )
+
+ module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
+
+ name = module.params['name']
+ state = module.params['state']
+ timeout = module.params['timeout']
+
+ monit = Monit(module, module.get_bin_path('monit', True), name, timeout)
+
+ def exit_if_check_mode():
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ if state == 'reloaded':
+ exit_if_check_mode()
+ monit.reload()
+
+ present = monit.is_process_present()
+
+ if not present and not state == 'present':
+ module.fail_json(msg='%s process not presently configured with monit' % name, name=name)
+
+ if state == 'present':
+ if present:
+ module.exit_json(changed=False, name=name, state=state)
+ exit_if_check_mode()
+ monit.present()
+
+ monit.wait_for_monit_to_stop_pending()
+ running = monit.is_process_running()
+
+ if running and state in ['started', 'monitored']:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if running and state == 'stopped':
+ exit_if_check_mode()
+ monit.stop()
+
+ if running and state == 'unmonitored':
+ exit_if_check_mode()
+ monit.unmonitor()
+
+ elif state == 'restarted':
+ exit_if_check_mode()
+ monit.restart()
+
+ elif not running and state == 'started':
+ exit_if_check_mode()
+ monit.start()
+
+ elif not running and state == 'monitored':
+ exit_if_check_mode()
+ monit.monitor()
+
+ module.exit_json(changed=False, name=name, state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mqtt.py b/ansible_collections/community/general/plugins/modules/mqtt.py
new file mode 100644
index 000000000..389382649
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mqtt.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, 2014, Jan-Piet Mens <jpmens () gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mqtt
+short_description: Publish a message on an MQTT topic for the IoT
+description:
+ - Publish a message on an MQTT topic.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ server:
+ type: str
+ description:
+ - MQTT broker address/name
+ default: localhost
+ port:
+ type: int
+ description:
+ - MQTT broker port number
+ default: 1883
+ username:
+ type: str
+ description:
+ - Username to authenticate against the broker.
+ password:
+ type: str
+ description:
+ - Password for C(username) to authenticate against the broker.
+ client_id:
+ type: str
+ description:
+ - MQTT client identifier
+ - If not specified, a value C(hostname + pid) will be used.
+ topic:
+ type: str
+ description:
+ - MQTT topic name
+ required: true
+ payload:
+ type: str
+ description:
+ - Payload. The special string C("None") may be used to send a NULL
+ (i.e. empty) payload which is useful to simply notify with the I(topic)
+ or to clear previously retained messages.
+ required: true
+ qos:
+ type: str
+ description:
+ - QoS (Quality of Service)
+ default: "0"
+ choices: [ "0", "1", "2" ]
+ retain:
+ description:
+ - Setting this flag causes the broker to retain (i.e. keep) the message so that
+ applications that subsequently subscribe to the topic can received the last
+ retained message immediately.
+ type: bool
+ default: false
+ ca_cert:
+ type: path
+ description:
+ - The path to the Certificate Authority certificate files that are to be
+ treated as trusted by this client. If this is the only option given
+ then the client will operate in a similar manner to a web browser. That
+ is to say it will require the broker to have a certificate signed by the
+ Certificate Authorities in ca_certs and will communicate using TLS v1,
+ but will not attempt any form of authentication. This provides basic
+ network encryption but may not be sufficient depending on how the broker
+ is configured.
+ aliases: [ ca_certs ]
+ client_cert:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client certificate. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ certfile ]
+ client_key:
+ type: path
+ description:
+ - The path pointing to the PEM encoded client private key. If this is not
+ None it will be used as client information for TLS based
+ authentication. Support for this feature is broker dependent.
+ aliases: [ keyfile ]
+ tls_version:
+ description:
+ - Specifies the version of the SSL/TLS protocol to be used.
+ - By default (if the python version supports it) the highest TLS version is
+ detected. If unavailable, TLS v1 is used.
+ type: str
+ choices:
+ - tlsv1.1
+ - tlsv1.2
+requirements: [ mosquitto ]
+notes:
+ - This module requires a connection to an MQTT broker such as Mosquitto
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.org/project/paho-mqtt/)).
+author: "Jan-Piet Mens (@jpmens)"
+'''
+
+EXAMPLES = '''
+- name: Publish a message on an MQTT topic
+ community.general.mqtt:
+ topic: 'service/ansible/{{ ansible_hostname }}'
+ payload: 'Hello at {{ ansible_date_time.iso8601 }}'
+ qos: 0
+ retain: false
+ client_id: ans001
+ delegate_to: localhost
+'''
+
+# ===========================================
+# MQTT module support methods.
+#
+
+import os
+import ssl
+import traceback
+import platform
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+HAS_PAHOMQTT = True
+PAHOMQTT_IMP_ERR = None
+try:
+ import socket
+ import paho.mqtt.publish as mqtt
+except ImportError:
+ PAHOMQTT_IMP_ERR = traceback.format_exc()
+ HAS_PAHOMQTT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ tls_map = {}
+
+ try:
+ tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2
+ except AttributeError:
+ pass
+
+ try:
+ tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1
+ except AttributeError:
+ pass
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ server=dict(default='localhost'),
+ port=dict(default=1883, type='int'),
+ topic=dict(required=True),
+ payload=dict(required=True),
+ client_id=dict(default=None),
+ qos=dict(default="0", choices=["0", "1", "2"]),
+ retain=dict(default=False, type='bool'),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
+ client_cert=dict(default=None, type='path', aliases=['certfile']),
+ client_key=dict(default=None, type='path', aliases=['keyfile']),
+ tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR)
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
+ ca_certs = module.params.get("ca_cert", None)
+ certfile = module.params.get("client_cert", None)
+ keyfile = module.params.get("client_key", None)
+ tls_version = module.params.get("tls_version", None)
+
+ if client_id is None:
+ client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
+
+ if payload and payload == 'None':
+ payload = None
+
+ auth = None
+ if username is not None:
+ auth = {'username': username, 'password': password}
+
+ tls = None
+ if ca_certs is not None:
+ if tls_version:
+ tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
+ else:
+ if LooseVersion(platform.python_version()) <= LooseVersion("3.5.2"):
+ # Specifying `None` on later versions of python seems sufficient to
+ # instruct python to autonegotiate the SSL/TLS connection. On versions
+ # 3.5.2 and lower though we need to specify the version.
+ #
+ # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was
+ # not available until 3.5.3.
+ tls_version = ssl.PROTOCOL_SSLv23
+
+ tls = {
+ 'ca_certs': ca_certs,
+ 'certfile': certfile,
+ 'keyfile': keyfile,
+ 'tls_version': tls_version,
+ }
+
+ try:
+ mqtt.single(
+ topic,
+ payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth,
+ tls=tls
+ )
+ except Exception as e:
+ module.fail_json(
+ msg="unable to publish to MQTT broker %s" % to_native(e),
+ exception=traceback.format_exc()
+ )
+
+ module.exit_json(changed=False, topic=topic)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mssql_db.py b/ansible_collections/community/general/plugins/modules/mssql_db.py
new file mode 100644
index 000000000..4006033cf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mssql_db.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Vedit Firat Arig <firatarig@gmail.com>
+# Outline and parts are reused from Mark Theunissen's mysql_db module
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: mssql_db
+short_description: Add or remove MSSQL databases from a remote host
+description:
+ - Add or remove MSSQL databases from a remote host.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - name of the database to add or remove
+ required: true
+ aliases: [ db ]
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with
+ type: str
+ default: ''
+ login_password:
+ description:
+ - The password used to authenticate with
+ type: str
+ default: ''
+ login_host:
+ description:
+ - Host running the database
+ type: str
+ required: true
+ login_port:
+ description:
+ - Port of the MSSQL server. Requires login_host be defined as other than localhost if login_port is used
+ default: '1433'
+ type: str
+ state:
+ description:
+ - The database state
+ default: present
+ choices: [ "present", "absent", "import" ]
+ type: str
+ target:
+ description:
+ - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
+ files (C(.sql)) files are supported.
+ type: str
+ autocommit:
+ description:
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
+ within a transaction.
+ type: bool
+ default: false
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as pip install pymssql (See M(ansible.builtin.pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+author: Vedit Firat Arig (@vedit)
+'''
+
+EXAMPLES = '''
+- name: Create a new database with name 'jackdata'
+ community.general.mssql_db:
+ name: jackdata
+ state: present
+
+# Copy database dump file to remote host and restore it to database 'my_db'
+- name: Copy database dump file to remote host
+ ansible.builtin.copy:
+ src: dump.sql
+ dest: /tmp
+
+- name: Restore the dump file to database 'my_db'
+ community.general.mssql_db:
+ name: my_db
+ state: import
+ target: /tmp/dump.sql
+'''
+
+RETURN = '''
+#
+'''
+
+import os
+import traceback
+
+PYMSSQL_IMP_ERR = None
+try:
+ import pymssql
+except ImportError:
+ PYMSSQL_IMP_ERR = traceback.format_exc()
+ mssql_found = False
+else:
+ mssql_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def db_exists(conn, cursor, db):
+ cursor.execute("SELECT name FROM master.sys.databases WHERE name = %s", db)
+ conn.commit()
+ return bool(cursor.rowcount)
+
+
+def db_create(conn, cursor, db):
+ cursor.execute("CREATE DATABASE [%s]" % db)
+ return db_exists(conn, cursor, db)
+
+
+def db_delete(conn, cursor, db):
+ try:
+ cursor.execute("ALTER DATABASE [%s] SET single_user WITH ROLLBACK IMMEDIATE" % db)
+ except Exception:
+ pass
+ cursor.execute("DROP DATABASE [%s]" % db)
+ return not db_exists(conn, cursor, db)
+
+
+def db_import(conn, cursor, module, db, target):
+ if os.path.isfile(target):
+ with open(target, 'r') as backup:
+ sqlQuery = "USE [%s]\n" % db
+ for line in backup:
+ if line is None:
+ break
+ elif line.startswith('GO'):
+ cursor.execute(sqlQuery)
+ sqlQuery = "USE [%s]\n" % db
+ else:
+ sqlQuery += line
+ cursor.execute(sqlQuery)
+ conn.commit()
+ return 0, "import successful", ""
+ else:
+ return 1, "cannot find target file", "cannot find target file"
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['db']),
+ login_user=dict(default=''),
+ login_password=dict(default='', no_log=True),
+ login_host=dict(required=True),
+ login_port=dict(default='1433'),
+ target=dict(default=None),
+ autocommit=dict(type='bool', default=False),
+ state=dict(
+ default='present', choices=['present', 'absent', 'import'])
+ )
+ )
+
+ if not mssql_found:
+ module.fail_json(msg=missing_required_lib('pymssql'), exception=PYMSSQL_IMP_ERR)
+
+ db = module.params['name']
+ state = module.params['state']
+ autocommit = module.params['autocommit']
+ target = module.params["target"]
+
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+
+ login_querystring = login_host
+ if login_port != "1433":
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user != "" and login_password == "":
+ module.fail_json(msg="when supplying login_user arguments login_password must be provided")
+
+ try:
+ conn = pymssql.connect(user=login_user, password=login_password, host=login_querystring, database='master')
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+ changed = False
+
+ if db_exists(conn, cursor, db):
+ if state == "absent":
+ try:
+ changed = db_delete(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error deleting database: " + str(e))
+ elif state == "import":
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+ else:
+ if state == "present":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+ elif state == "import":
+ try:
+ changed = db_create(conn, cursor, db)
+ except Exception as e:
+ module.fail_json(msg="error creating database: " + str(e))
+
+ conn.autocommit(autocommit)
+ rc, stdout, stderr = db_import(conn, cursor, module, db, target)
+
+ if rc != 0:
+ module.fail_json(msg="%s" % stderr)
+ else:
+ module.exit_json(changed=True, db=db, msg=stdout)
+
+ module.exit_json(changed=changed, db=db)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/mssql_script.py b/ansible_collections/community/general/plugins/modules/mssql_script.py
new file mode 100644
index 000000000..1696000db
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/mssql_script.py
@@ -0,0 +1,313 @@
+#!/usr/bin/python
+
+# Copyright (c) 2021, Kris Budde <kris@budd.ee
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: mssql_script
+
+short_description: Execute SQL scripts on a MSSQL database
+
+version_added: "4.0.0"
+
+description:
+ - Execute SQL scripts on a MSSQL database.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - The script will not be executed in check mode.
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description: Database to run script against.
+ aliases: [ db ]
+ default: ''
+ type: str
+ login_user:
+ description: The username used to authenticate with.
+ type: str
+ login_password:
+ description: The password used to authenticate with.
+ type: str
+ login_host:
+ description: Host running the database.
+ type: str
+ required: true
+ login_port:
+ description: Port of the MSSQL server. Requires I(login_host) be defined as well.
+ default: 1433
+ type: int
+ script:
+ description:
+ - The SQL script to be executed.
+ - Script can contain multiple SQL statements. Multiple Batches can be separated by C(GO) command.
+ - Each batch must return at least one result set.
+ required: true
+ type: str
+ output:
+ description:
+ - With C(default) each row will be returned as a list of values. See C(query_results).
+ - Output format C(dict) will return dictionary with the column names as keys. See C(query_results_dict).
+ - C(dict) requires named columns to be returned by each query otherwise an error is thrown.
+ choices: [ "dict", "default" ]
+ default: 'default'
+ type: str
+ params:
+ description: |
+ Parameters passed to the script as SQL parameters. ('SELECT %(name)s"' with C(example: '{"name": "John Doe"}).)'
+ type: dict
+notes:
+ - Requires the pymssql Python package on the remote host. For Ubuntu, this
+ is as easy as C(pip install pymssql) (See M(ansible.builtin.pip).)
+requirements:
+ - python >= 2.7
+ - pymssql
+
+author:
+ - Kris Budde (@kbudde)
+'''
+
+EXAMPLES = r'''
+- name: Check DB connection
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ db: master
+ script: "SELECT 1"
+
+- name: Query with parameter
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s
+ params:
+ dbname: msdb
+ register: result_params
+- assert:
+ that:
+ - result_params.query_results[0][0][0][0] == 'msdb'
+ - result_params.query_results[0][0][0][1] == 'ONLINE'
+
+- name: two batches with default output
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ SELECT 'Batch 0 - Select 0'
+ SELECT 'Batch 0 - Select 1'
+ GO
+ SELECT 'Batch 1 - Select 0'
+ register: result_batches
+- assert:
+ that:
+ - result_batches.query_results | length == 2 # two batch results
+ - result_batches.query_results[0] | length == 2 # two selects in first batch
+ - result_batches.query_results[0][0] | length == 1 # one row in first select
+ - result_batches.query_results[0][0][0] | length == 1 # one column in first row
+ - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # each row contains a list of values.
+
+- name: two batches with dict output
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ output: dict
+ script: |
+ SELECT 'Batch 0 - Select 0' as b0s0
+ SELECT 'Batch 0 - Select 1' as b0s1
+ GO
+ SELECT 'Batch 1 - Select 0' as b1s0
+ register: result_batches_dict
+- assert:
+ that:
+ - result_batches_dict.query_results_dict | length == 2 # two batch results
+ - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch
+ - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select
+ - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row
+'''
+
+RETURN = r'''
+query_results:
+ description: List of batches (queries separated by C(GO) keyword).
+ type: list
+ elements: list
+ returned: success and I(output=default)
+ sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
+ contains:
+ queries:
+ description:
+ - List of result sets of each query.
+ - If a query returns no results, the results of this and all the following queries will not be included in the output.
+ - Use the C(GO) keyword in I(script) to separate queries.
+ type: list
+ elements: list
+ contains:
+ rows:
+ description: List of rows returned by query.
+ type: list
+ elements: list
+ contains:
+ column_value:
+ description:
+ - List of column values.
+ - Any non-standard JSON type is converted to string.
+ type: list
+ example: ["Batch 0 - Select 0"]
+ returned: success, if output is default
+query_results_dict:
+ description: List of batches (queries separated by C(GO) keyword).
+ type: list
+ elements: list
+ returned: success and I(output=dict)
+ sample: [[[["Batch 0 - Select 0"]], [["Batch 0 - Select 1"]]], [[["Batch 1 - Select 0"]]]]
+ contains:
+ queries:
+ description:
+ - List of result sets of each query.
+ - If a query returns no results, the results of this and all the following queries will not be included in the output.
+ Use 'GO' keyword to separate queries.
+ type: list
+ elements: list
+ contains:
+ rows:
+ description: List of rows returned by query.
+ type: list
+ elements: list
+ contains:
+ column_dict:
+ description:
+ - Dictionary of column names and values.
+ - Any non-standard JSON type is converted to string.
+ type: dict
+ example: {"col_name": "Batch 0 - Select 0"}
+ returned: success, if output is dict
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import traceback
+import json
+PYMSSQL_IMP_ERR = None
+try:
+ import pymssql
+except ImportError:
+ PYMSSQL_IMP_ERR = traceback.format_exc()
+ MSSQL_FOUND = False
+else:
+ MSSQL_FOUND = True
+
+
+def clean_output(o):
+ return str(o)
+
+
+def run_module():
+ module_args = dict(
+ name=dict(required=False, aliases=['db'], default=''),
+ login_user=dict(),
+ login_password=dict(no_log=True),
+ login_host=dict(required=True),
+ login_port=dict(type='int', default=1433),
+ script=dict(required=True),
+ output=dict(default='default', choices=['dict', 'default']),
+ params=dict(type='dict'),
+ )
+
+ result = dict(
+ changed=False,
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+ if not MSSQL_FOUND:
+ module.fail_json(msg=missing_required_lib(
+ 'pymssql'), exception=PYMSSQL_IMP_ERR)
+
+ db = module.params['name']
+ login_user = module.params['login_user']
+ login_password = module.params['login_password']
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ script = module.params['script']
+ output = module.params['output']
+ sql_params = module.params['params']
+
+ login_querystring = login_host
+ if login_port != 1433:
+ login_querystring = "%s:%s" % (login_host, login_port)
+
+ if login_user is not None and login_password is None:
+ module.fail_json(
+ msg="when supplying login_user argument, login_password must also be provided")
+
+ try:
+ conn = pymssql.connect(
+ user=login_user, password=login_password, host=login_querystring, database=db)
+ cursor = conn.cursor()
+ except Exception as e:
+ if "Unknown database" in str(e):
+ errno, errstr = e.args
+ module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
+ else:
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+
+ conn.autocommit(True)
+
+ query_results_key = 'query_results'
+ if output == 'dict':
+ cursor = conn.cursor(as_dict=True)
+ query_results_key = 'query_results_dict'
+
+ queries = script.split('\nGO\n')
+ result['changed'] = True
+ if module.check_mode:
+ module.exit_json(**result)
+
+ query_results = []
+ try:
+ for query in queries:
+ cursor.execute(query, sql_params)
+ qry_result = []
+ rows = cursor.fetchall()
+ while rows:
+ qry_result.append(rows)
+ rows = cursor.fetchall()
+ query_results.append(qry_result)
+ except Exception as e:
+ return module.fail_json(msg="query failed", query=query, error=str(e), **result)
+
+ # ensure that the result is json serializable
+ qry_results = json.loads(json.dumps(query_results, default=clean_output))
+
+ result[query_results_key] = qry_results
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nagios.py b/ansible_collections/community/general/plugins/modules/nagios.py
new file mode 100644
index 000000000..1831d0496
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nagios.py
@@ -0,0 +1,1255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This file is largely copied from the Nagios module included in the
+# Func project. Original copyright follows:
+#
+# func-nagios - Schedule downtime and enables/disable notifications
+# Copyright 2011, Red Hat, Inc.
+# Tim Bielawa <tbielawa@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nagios
+short_description: Perform common tasks in Nagios related to downtime and notifications
+description:
+ - "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
+ - The C(nagios) module is not idempotent.
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ to the host the playbook is currently running on.
+ - You can specify multiple services at once by separating them with commas, .e.g. I(services=httpd,nfs,puppet).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime/acknowledge for the I(host itself),
+ e.g., I(service=host). This keyword may not be given with other services at the same time.
+ I(Setting alerts/downtime/acknowledge for a host does not affect alerts/downtime/acknowledge for any of the services running on it.)
+ To schedule downtime for all services on particular host use keyword "all", e.g., I(service=all).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ action:
+ description:
+ - Action to take.
+ - servicegroup options were added in 2.0.
+ - delete_downtime options were added in 2.2.
+ - The C(acknowledge) and C(forced_check) actions were added in community.general 1.2.0.
+ required: true
+ choices: [ "downtime", "delete_downtime", "enable_alerts", "disable_alerts", "silence", "unsilence",
+ "silence_nagios", "unsilence_nagios", "command", "servicegroup_service_downtime",
+ "servicegroup_host_downtime", "acknowledge", "forced_check" ]
+ type: str
+ host:
+ description:
+ - Host to operate on in Nagios.
+ type: str
+ cmdfile:
+ description:
+ - Path to the nagios I(command file) (FIFO pipe).
+ Only required if auto-detection fails.
+ type: str
+ author:
+ description:
+ - Author to leave downtime comments as.
+ Only used when I(action) is C(downtime) or C(acknowledge).
+ type: str
+ default: Ansible
+ comment:
+ description:
+ - Comment when I(action) is C(downtime) or C(acknowledge).
+ type: str
+ default: Scheduling downtime
+ start:
+ description:
+ - When downtime should start, in C(time_t) format (epoch seconds).
+ version_added: '0.2.0'
+ type: str
+ minutes:
+ description:
+ - Minutes to schedule downtime for.
+ - Only usable with the C(downtime) action.
+ type: int
+ default: 30
+ services:
+ description:
+ - >
+ What to manage downtime/alerts for. Separate multiple services with commas.
+ I(service) is an alias for I(services).
+ B(Required) option when I(action) is one of: C(downtime), C(acknowledge), C(forced_check), C(enable_alerts), C(disable_alerts).
+ aliases: [ "service" ]
+ type: str
+ servicegroup:
+ description:
+ - The Servicegroup we want to set downtimes/alerts for.
+ B(Required) option when using the C(servicegroup_service_downtime) amd C(servicegroup_host_downtime).
+ type: str
+ command:
+ description:
+ - The raw command to send to nagios, which
+ should not include the submitted time header or the line-feed
+ B(Required) option when using the C(command) action.
+ type: str
+
+author: "Tim Bielawa (@tbielawa)"
+'''
+
+EXAMPLES = '''
+- name: Set 30 minutes of apache downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 30
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime starting at 2019-04-23T02:00:00+00:00
+ community.general.nagios:
+ action: downtime
+ start: 1555984800
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule an hour of HOST downtime, with a comment describing the reason
+ community.general.nagios:
+ action: downtime
+ minutes: 60
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: Rebuilding machine
+
+- name: Schedule downtime for ALL services on HOST
+ community.general.nagios:
+ action: downtime
+ minutes: 45
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Schedule downtime for a few services
+ community.general.nagios:
+ action: downtime
+ services: frob,foobar,qeuz
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all services in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_service_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Set 30 minutes downtime for all host in servicegroup foo
+ community.general.nagios:
+ action: servicegroup_host_downtime
+ minutes: 30
+ servicegroup: foo
+ host: '{{ inventory_hostname }}'
+
+- name: Delete all downtime for a given host
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: all
+
+- name: Delete all downtime for HOST with a particular comment
+ community.general.nagios:
+ action: delete_downtime
+ host: '{{ inventory_hostname }}'
+ service: host
+ comment: Planned maintenance
+
+- name: Acknowledge an HOST with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: host
+ host: '{{ inventory_hostname }}'
+ comment: 'power outage - see casenr 12345'
+
+- name: Acknowledge an active service problem for the httpd service with a particular comment
+ community.general.nagios:
+ action: acknowledge
+ service: httpd
+ host: '{{ inventory_hostname }}'
+ comment: 'service crashed - see casenr 12345'
+
+- name: Reset a passive service check for snmp trap
+ community.general.nagios:
+ action: forced_check
+ service: snmp
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for the httpd service
+ community.general.nagios:
+ action: forced_check
+ service: httpd
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for all services of a particular host
+ community.general.nagios:
+ action: forced_check
+ service: all
+ host: '{{ inventory_hostname }}'
+
+- name: Force an active service check for a particular host
+ community.general.nagios:
+ action: forced_check
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Enable SMART disk alerts
+ community.general.nagios:
+ action: enable_alerts
+ service: smart
+ host: '{{ inventory_hostname }}'
+
+- name: Disable httpd and nfs alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: httpd,nfs
+ host: '{{ inventory_hostname }}'
+
+- name: Disable HOST alerts
+ community.general.nagios:
+ action: disable_alerts
+ service: host
+ host: '{{ inventory_hostname }}'
+
+- name: Silence ALL alerts
+ community.general.nagios:
+ action: silence
+ host: '{{ inventory_hostname }}'
+
+- name: Unsilence all alerts
+ community.general.nagios:
+ action: unsilence
+ host: '{{ inventory_hostname }}'
+
+- name: Shut up nagios
+ community.general.nagios:
+ action: silence_nagios
+
+- name: Annoy me negios
+ community.general.nagios:
+ action: unsilence_nagios
+
+- name: Command something
+ community.general.nagios:
+ action: command
+ command: DISABLE_FAILURE_PREDICTION
+'''
+
+import time
+import os.path
+import stat
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def which_cmdfile():
+ locations = [
+ # rhel
+ '/etc/nagios/nagios.cfg',
+ # debian
+ '/etc/nagios3/nagios.cfg',
+ # older debian
+ '/etc/nagios2/nagios.cfg',
+ # bsd, solaris
+ '/usr/local/etc/nagios/nagios.cfg',
+ # groundwork it monitoring
+ '/usr/local/groundwork/nagios/etc/nagios.cfg',
+ # open monitoring distribution
+ '/omd/sites/oppy/tmp/nagios/nagios.cfg',
+ # ???
+ '/usr/local/nagios/etc/nagios.cfg',
+ '/usr/local/nagios/nagios.cfg',
+ '/opt/nagios/etc/nagios.cfg',
+ '/opt/nagios/nagios.cfg',
+ # icinga on debian/ubuntu
+ '/etc/icinga/icinga.cfg',
+ # icinga installed from source (default location)
+ '/usr/local/icinga/etc/icinga.cfg',
+ ]
+
+ for path in locations:
+ if os.path.exists(path):
+ for line in open(path):
+ if line.startswith('command_file'):
+ return line.split('=')[1].strip()
+
+ return None
+
+
+def main():
+ ACTION_CHOICES = [
+ 'downtime',
+ 'delete_downtime',
+ 'silence',
+ 'unsilence',
+ 'enable_alerts',
+ 'disable_alerts',
+ 'silence_nagios',
+ 'unsilence_nagios',
+ 'command',
+ 'servicegroup_host_downtime',
+ 'servicegroup_service_downtime',
+ 'acknowledge',
+ 'forced_check',
+ ]
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ action=dict(type='str', required=True, choices=ACTION_CHOICES),
+ author=dict(type='str', default='Ansible'),
+ comment=dict(type='str', default='Scheduling downtime'),
+ host=dict(type='str'),
+ servicegroup=dict(type='str'),
+ start=dict(type='str'),
+ minutes=dict(type='int', default=30),
+ cmdfile=dict(type='str', default=which_cmdfile()),
+ services=dict(type='str', aliases=['service']),
+ command=dict(type='str'),
+ ),
+ required_if=[
+ ('action', 'downtime', ['host', 'services']),
+ ('action', 'delete_downtime', ['host', 'services']),
+ ('action', 'silence', ['host']),
+ ('action', 'unsilence', ['host']),
+ ('action', 'enable_alerts', ['host', 'services']),
+ ('action', 'disable_alerts', ['host', 'services']),
+ ('action', 'command', ['command']),
+ ('action', 'servicegroup_host_downtime', ['host', 'servicegroup']),
+ ('action', 'servicegroup_service_downtime', ['host', 'servicegroup']),
+ ('action', 'acknowledge', ['host', 'services']),
+ ('action', 'forced_check', ['host', 'services']),
+ ],
+ )
+
+ if not module.params['cmdfile']:
+ module.fail_json(msg='unable to locate nagios.cfg')
+
+ ansible_nagios = Nagios(module, **module.params)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ ansible_nagios.act()
+
+
+class Nagios(object):
+ """
+ Perform common tasks in Nagios related to downtime and
+ notifications.
+
+ The complete set of external commands Nagios handles is documented
+ on their website:
+
+ http://old.nagios.org/developerinfo/externalcommands/commandlist.php
+
+ Note that in the case of `schedule_svc_downtime`,
+ `enable_svc_notifications`, and `disable_svc_notifications`, the
+ service argument should be passed as a list.
+ """
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.action = kwargs['action']
+ self.author = kwargs['author']
+ self.comment = kwargs['comment']
+ self.host = kwargs['host']
+ self.servicegroup = kwargs['servicegroup']
+ if kwargs['start'] is not None:
+ self.start = int(kwargs['start'])
+ else:
+ self.start = None
+ self.minutes = kwargs['minutes']
+ self.cmdfile = kwargs['cmdfile']
+ self.command = kwargs['command']
+
+ if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'):
+ self.services = kwargs['services']
+ else:
+ self.services = kwargs['services'].split(',')
+
+ self.command_results = []
+
+ def _now(self):
+ """
+ The time in seconds since 12:00:00AM Jan 1, 1970
+ """
+
+ return int(time.time())
+
+ def _write_command(self, cmd):
+ """
+ Write the given command to the Nagios command file
+ """
+
+ if not os.path.exists(self.cmdfile):
+ self.module.fail_json(msg='nagios command file does not exist',
+ cmdfile=self.cmdfile)
+ if not stat.S_ISFIFO(os.stat(self.cmdfile).st_mode):
+ self.module.fail_json(msg='nagios command file is not a fifo file',
+ cmdfile=self.cmdfile)
+ try:
+ with open(self.cmdfile, 'w') as fp:
+ fp.write(cmd)
+ fp.flush()
+ self.command_results.append(cmd.strip())
+ except IOError:
+ self.module.fail_json(msg='unable to write to nagios command file',
+ cmdfile=self.cmdfile)
+
+ def _fmt_dt_str(self, cmd, host, duration, author=None,
+ comment=None, start=None,
+ svc=None, fixed=1, trigger=0):
+ """
+ Format an external-command downtime string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ duration - Minutes to schedule downtime for
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ Default is to use the entry time (now)
+ svc - Service to schedule downtime for, omit when for host downtime
+ fixed - Start now if 1, start when a problem is detected if 0
+ trigger - Optional ID of event to start downtime from. Leave as 0 for
+ fixed downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ entry_time = self._now()
+ if start is None:
+ start = entry_time
+
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+ duration_s = (duration * 60)
+ end = start + duration_s
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ dt_args = [svc, str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ dt_args = [str(start), str(end), str(fixed), str(trigger),
+ str(duration_s), author, comment]
+
+ dt_arg_str = ";".join(dt_args)
+ dt_str = hdr + dt_arg_str + "\n"
+
+ return dt_str
+
+ def _fmt_ack_str(self, cmd, host, author=None,
+ comment=None, svc=None, sticky=0, notify=1, persistent=0):
+ """
+ Format an external-command acknowledge string.
+
+ cmd - Nagios command ID
+ host - Host schedule downtime on
+ author - Name to file the downtime as
+ comment - Reason for running this command (upgrade, reboot, etc)
+ svc - Service to schedule downtime for, omit when for host downtime
+ sticky - the acknowledgement will remain until the host returns to an UP state if set to 1
+ notify - a notification will be sent out to contacts
+ persistent - survive across restarts of the Nagios process
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>]
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if not author:
+ author = self.author
+
+ if not comment:
+ comment = self.comment
+
+ if svc is not None:
+ ack_args = [svc, str(sticky), str(notify), str(persistent), author, comment]
+ else:
+ # Downtime for a host if no svc specified
+ ack_args = [str(sticky), str(notify), str(persistent), author, comment]
+
+ ack_arg_str = ";".join(ack_args)
+ ack_str = hdr + ack_arg_str + "\n"
+
+ return ack_str
+
+ def _fmt_dt_del_str(self, cmd, host, svc=None, start=None, comment=None):
+ """
+ Format an external-command downtime deletion string.
+
+ cmd - Nagios command ID
+ host - Host to remove scheduled downtime from
+ comment - Reason downtime was added (upgrade, reboot, etc)
+ start - Start of downtime in seconds since 12:00AM Jan 1 1970
+ svc - Service to remove downtime for, omit to remove all downtime for the host
+
+ Syntax: [submitted] COMMAND;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if comment is None:
+ comment = self.comment
+
+ dt_del_args = []
+ if svc is not None:
+ dt_del_args.append(svc)
+ else:
+ dt_del_args.append('')
+
+ if start is not None:
+ dt_del_args.append(str(start))
+ else:
+ dt_del_args.append('')
+
+ if comment is not None:
+ dt_del_args.append(comment)
+ else:
+ dt_del_args.append('')
+
+ dt_del_arg_str = ";".join(dt_del_args)
+ dt_del_str = hdr + dt_del_arg_str + "\n"
+
+ return dt_del_str
+
+ def _fmt_chk_str(self, cmd, host, svc=None, start=None):
+ """
+ Format an external-command forced host or service check string.
+
+ cmd - Nagios command ID
+ host - Host to check service from
+ svc - Service to check
+ start - check time
+
+ Syntax: [submitted] COMMAND;<host_name>;[<service_description>];<check_time>
+ """
+
+ entry_time = self._now()
+ hdr = "[%s] %s;%s;" % (entry_time, cmd, host)
+
+ if start is None:
+ start = entry_time + 3
+
+ if svc is None:
+ chk_args = [str(start)]
+ else:
+ chk_args = [svc, str(start)]
+
+ chk_arg_str = ";".join(chk_args)
+ chk_str = hdr + chk_arg_str + "\n"
+
+ return chk_str
+
+ def _fmt_notif_str(self, cmd, host=None, svc=None):
+ """
+ Format an external-command notification string.
+
+ cmd - Nagios command ID.
+ host - Host to en/disable notifications on.. A value is not required
+ for global downtime
+ svc - Service to schedule downtime for. A value is not required
+ for host downtime.
+
+ Syntax: [submitted] COMMAND;<host_name>[;<service_description>]
+ """
+
+ entry_time = self._now()
+ notif_str = "[%s] %s" % (entry_time, cmd)
+ if host is not None:
+ notif_str += ";%s" % host
+
+ if svc is not None:
+ notif_str += ";%s" % svc
+
+ notif_str += "\n"
+
+ return notif_str
+
+ def schedule_svc_downtime(self, host, services=None, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ service.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the service.
+
+ Syntax: SCHEDULE_SVC_DOWNTIME;<host_name>;<service_description>
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SVC_DOWNTIME"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start, svc=service)
+ self._write_command(dt_cmd_str)
+
+ def schedule_host_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for a particular
+ host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ Syntax: SCHEDULE_HOST_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def acknowledge_svc_problem(self, host, services=None):
+ """
+ This command is used to acknowledge a particular
+ service problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_SVC_PROBLEM;<host_name>;<service_description>;
+ <sticky>;<notify>;<persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_SVC_PROBLEM"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ ack_cmd_str = self._fmt_ack_str(cmd, host, svc=service)
+ self._write_command(ack_cmd_str)
+
+ def acknowledge_host_problem(self, host):
+ """
+ This command is used to acknowledge a particular
+ host problem.
+
+ By acknowledging the current problem, future notifications
+ for the same servicestate are disabled
+
+ Syntax: ACKNOWLEDGE_HOST_PROBLEM;<host_name>;<sticky>;<notify>;
+ <persistent>;<author>;<comment>
+ """
+
+ cmd = "ACKNOWLEDGE_HOST_PROBLEM"
+ ack_cmd_str = self._fmt_ack_str(cmd, host)
+ self._write_command(ack_cmd_str)
+
+ def schedule_forced_host_check(self, host):
+ """
+ This command schedules a forced active check for a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_CHECK;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_CHECK"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_host_svc_check(self, host):
+ """
+ This command schedules a forced active check for all services
+ associated with a particular host.
+
+ Syntax: SCHEDULE_FORCED_HOST_SVC_CHECKS;<host_name>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_HOST_SVC_CHECKS"
+
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=None)
+ self._write_command(chk_cmd_str)
+
+ def schedule_forced_svc_check(self, host, services=None):
+ """
+ This command schedules a forced active check for a particular
+ service.
+
+ Syntax: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time>
+ """
+
+ cmd = "SCHEDULE_FORCED_SVC_CHECK"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ chk_cmd_str = self._fmt_chk_str(cmd, host, svc=service)
+ self._write_command(chk_cmd_str)
+
+ def schedule_host_svc_downtime(self, host, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for
+ all services associated with a particular host.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the host.
+
+ SCHEDULE_HOST_SVC_DOWNTIME;<host_name>;<start_time>;<end_time>;
+ <fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOST_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def delete_host_downtime(self, host, services=None, comment=None):
+ """
+ This command is used to remove scheduled downtime for a particular
+ host.
+
+ Syntax: DEL_DOWNTIME_BY_HOST_NAME;<host_name>;
+ [<service_desription>];[<start_time>];[<comment>]
+ """
+
+ cmd = "DEL_DOWNTIME_BY_HOST_NAME"
+
+ if services is None:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, comment=comment)
+ self._write_command(dt_del_cmd_str)
+ else:
+ for service in services:
+ dt_del_cmd_str = self._fmt_dt_del_str(cmd, host, svc=service, comment=comment)
+ self._write_command(dt_del_cmd_str)
+
+ def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular hostgroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;<hostgroup_name>;<start_time>;
+ <end_time>;<fixed>;<trigger_id>;<duration>;<author>;<comment>
+ """
+
+ cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all hosts in a
+ particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the hosts.
+
+ Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30, start=None):
+ """
+ This command is used to schedule downtime for all services in
+ a particular servicegroup.
+
+ During the specified downtime, Nagios will not send
+ notifications out about the services.
+
+ Note that scheduling downtime for services does not
+ automatically schedule downtime for the hosts those services
+ are associated with.
+
+ Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;<servicegroup_name>;
+ <start_time>;<end_time>;<fixed>;<trigger_id>;<duration>;<author>;
+ <comment>
+ """
+
+ cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME"
+ dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes, start=start)
+ self._write_command(dt_cmd_str)
+
+ def disable_host_svc_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services on the specified host.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_host_notifications(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified host.
+
+ Note that this command does not disable notifications for
+ services associated with this host.
+
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "DISABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def disable_svc_notifications(self, host, services=None):
+ """
+ This command is used to prevent notifications from being sent
+ out for the specified service.
+
+ Note that this command does not disable notifications from
+ being sent out about the host.
+
+ Syntax: DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "DISABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_host_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all hosts in the specified servicegroup.
+
+ Note that this command does not disable notifications for
+ services associated with hosts in this service group.
+
+ Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ This command is used to prevent notifications from being sent
+ out for all services in the specified servicegroup.
+
+ Note that this does not prevent notifications from being sent
+ out about the hosts in this servicegroup.
+
+ Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Disables notifications for all hosts in a particular
+ hostgroup.
+
+ Note that this does not disable notifications for the services
+ associated with the hosts in the hostgroup - see the
+ DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that.
+
+ Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def disable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Disables notifications for all services associated with hosts
+ in a particular hostgroup.
+
+ Note that this does not disable notifications for the hosts in
+ the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS
+ command for that.
+
+ Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ self._write_command(notif_str)
+
+ def enable_host_notifications(self, host):
+ """
+ Enables notifications for a particular host.
+
+ Note that this command does not enable notifications for
+ services associated with this host.
+
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ self._write_command(notif_str)
+
+ def enable_host_svc_notifications(self, host):
+ """
+ Enables notifications for all services on the specified host.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = "ENABLE_HOST_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, host)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_svc_notifications(self, host, services=None):
+ """
+ Enables notifications for a particular service.
+
+ Note that this does not enable notifications for the host.
+
+ Syntax: ENABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
+ """
+
+ cmd = "ENABLE_SVC_NOTIFICATIONS"
+
+ if services is None:
+ services = []
+
+ nagios_return = True
+ return_str_list = []
+ for service in services:
+ notif_str = self._fmt_notif_str(cmd, host, svc=service)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_host_notifications(self, hostgroup):
+ """
+ Enables notifications for all hosts in a particular hostgroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_hostgroup_svc_notifications(self, hostgroup):
+ """
+ Enables notifications for all services that are associated
+ with hosts in a particular hostgroup.
+
+ Note that this does not enable notifications for the hosts in
+ this hostgroup.
+
+ Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS;<hostgroup_name>
+ """
+
+ cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, hostgroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_host_notifications(self, servicegroup):
+ """
+ Enables notifications for all hosts that have services that
+ are members of a particular servicegroup.
+
+ Note that this command does not enable notifications for
+ services associated with the hosts in this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def enable_servicegroup_svc_notifications(self, servicegroup):
+ """
+ Enables notifications for all services that are members of a
+ particular servicegroup.
+
+ Note that this does not enable notifications for the hosts in
+ this servicegroup.
+
+ Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS;<servicegroup_name>
+ """
+
+ cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS"
+ notif_str = self._fmt_notif_str(cmd, servicegroup)
+ nagios_return = self._write_command(notif_str)
+
+ if nagios_return:
+ return notif_str
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_host(self, host):
+ """
+ This command is used to prevent notifications from being sent
+ out for the host and all services on the specified host.
+
+ This is equivalent to calling disable_host_svc_notifications
+ and disable_host_notifications.
+
+ Syntax: DISABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: DISABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "DISABLE_HOST_SVC_NOTIFICATIONS",
+ "DISABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def unsilence_host(self, host):
+ """
+ This command is used to enable notifications for the host and
+ all services on the specified host.
+
+ This is equivalent to calling enable_host_svc_notifications
+ and enable_host_notifications.
+
+ Syntax: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name>
+ Syntax: ENABLE_HOST_NOTIFICATIONS;<host_name>
+ """
+
+ cmd = [
+ "ENABLE_HOST_SVC_NOTIFICATIONS",
+ "ENABLE_HOST_NOTIFICATIONS"
+ ]
+ nagios_return = True
+ return_str_list = []
+ for c in cmd:
+ notif_str = self._fmt_notif_str(c, host)
+ nagios_return = self._write_command(notif_str) and nagios_return
+ return_str_list.append(notif_str)
+
+ if nagios_return:
+ return return_str_list
+ else:
+ return "Fail: could not write to the command file"
+
+ def silence_nagios(self):
+ """
+ This command is used to disable notifications for all hosts and services
+ in nagios.
+
+ This is a 'SHUT UP, NAGIOS' command
+ """
+ cmd = 'DISABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def unsilence_nagios(self):
+ """
+ This command is used to enable notifications for all hosts and services
+ in nagios.
+
+ This is a 'OK, NAGIOS, GO'' command
+ """
+ cmd = 'ENABLE_NOTIFICATIONS'
+ self._write_command(self._fmt_notif_str(cmd))
+
+ def nagios_cmd(self, cmd):
+ """
+ This sends an arbitrary command to nagios
+
+ It prepends the submitted time and appends a \n
+
+ You just have to provide the properly formatted command
+ """
+
+ pre = '[%s]' % int(time.time())
+
+ post = '\n'
+ cmdstr = '%s %s%s' % (pre, cmd, post)
+ self._write_command(cmdstr)
+
+ def act(self):
+ """
+ Figure out what you want to do from ansible, and then do the
+ needful (at the earliest).
+ """
+ # host or service downtime?
+ if self.action == 'downtime':
+ if self.services == 'host':
+ self.schedule_host_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ elif self.services == 'all':
+ self.schedule_host_svc_downtime(self.host, minutes=self.minutes,
+ start=self.start)
+ else:
+ self.schedule_svc_downtime(self.host,
+ services=self.services,
+ minutes=self.minutes,
+ start=self.start)
+
+ elif self.action == 'acknowledge':
+ if self.services == 'host':
+ self.acknowledge_host_problem(self.host)
+ else:
+ self.acknowledge_svc_problem(self.host, services=self.services)
+
+ elif self.action == 'delete_downtime':
+ if self.services == 'host':
+ self.delete_host_downtime(self.host)
+ elif self.services == 'all':
+ self.delete_host_downtime(self.host, comment='')
+ else:
+ self.delete_host_downtime(self.host, services=self.services)
+
+ elif self.action == 'forced_check':
+ if self.services == 'host':
+ self.schedule_forced_host_check(self.host)
+ elif self.services == 'all':
+ self.schedule_forced_host_svc_check(self.host)
+ else:
+ self.schedule_forced_svc_check(self.host, services=self.services)
+
+ elif self.action == "servicegroup_host_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_host_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+ elif self.action == "servicegroup_service_downtime":
+ if self.servicegroup:
+ self.schedule_servicegroup_svc_downtime(servicegroup=self.servicegroup, minutes=self.minutes, start=self.start)
+
+ # toggle the host AND service alerts
+ elif self.action == 'silence':
+ self.silence_host(self.host)
+
+ elif self.action == 'unsilence':
+ self.unsilence_host(self.host)
+
+ # toggle host/svc alerts
+ elif self.action == 'enable_alerts':
+ if self.services == 'host':
+ self.enable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.enable_host_svc_notifications(self.host)
+ else:
+ self.enable_svc_notifications(self.host,
+ services=self.services)
+
+ elif self.action == 'disable_alerts':
+ if self.services == 'host':
+ self.disable_host_notifications(self.host)
+ elif self.services == 'all':
+ self.disable_host_svc_notifications(self.host)
+ else:
+ self.disable_svc_notifications(self.host,
+ services=self.services)
+ elif self.action == 'silence_nagios':
+ self.silence_nagios()
+
+ elif self.action == 'unsilence_nagios':
+ self.unsilence_nagios()
+
+ elif self.action == 'command':
+ self.nagios_cmd(self.command)
+
+ # wtf?
+ else:
+ self.module.fail_json(msg="unknown action specified: '%s'" %
+ self.action)
+
+ self.module.exit_json(nagios_commands=self.command_results,
+ changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/netcup_dns.py b/ansible_collections/community/general/plugins/modules/netcup_dns.py
new file mode 100644
index 000000000..77be50b2c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/netcup_dns.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Nicolai Buchwitz <nb@tipi-net.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: netcup_dns
+notes: []
+short_description: Manage Netcup DNS records
+description:
+ - "Manages DNS records via the Netcup API, see the docs U(https://ccp.netcup.net/run/webservice/servers/endpoint.php)."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_key:
+ description:
+ - "API key for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))."
+ required: true
+ type: str
+ api_password:
+ description:
+ - "API password for authentication, must be obtained via the netcup CCP (U(https://ccp.netcup.net))."
+ required: true
+ type: str
+ customer_id:
+ description:
+ - Netcup customer id.
+ required: true
+ type: int
+ domain:
+ description:
+ - Domainname the records should be added / removed.
+ required: true
+ type: str
+ record:
+ description:
+ - Record to add or delete, supports wildcard (*). Default is C(@) (e.g. the zone name).
+ default: "@"
+ aliases: [ name ]
+ type: str
+ type:
+ description:
+ - Record type.
+ choices: ['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']
+ required: true
+ type: str
+ value:
+ description:
+ - Record value.
+ required: true
+ type: str
+ solo:
+ type: bool
+ default: false
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with I(state=present).
+ - This will delete all other records with the same record name and type.
+ priority:
+ description:
+ - Record priority. Required for I(type=MX).
+ required: false
+ type: int
+ state:
+ description:
+ - Whether the record should exist or not.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ timeout:
+ description:
+ - HTTP(S) connection timeout in seconds.
+ default: 5
+ type: int
+ version_added: 5.7.0
+requirements:
+ - "nc-dnsapi >= 0.1.3"
+author: "Nicolai Buchwitz (@nbuchwitz)"
+
+'''
+
+EXAMPLES = '''
+- name: Create a record of type A
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+
+- name: Delete that record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+ state: absent
+
+- name: Create a wildcard record
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "*"
+ type: "A"
+ value: "127.0.1.1"
+
+- name: Set the MX record for example.com
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ type: "MX"
+ value: "mail.example.com"
+
+- name: Set a record and ensure that this is the only one
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ name: "demo"
+ domain: "example.com"
+ type: "AAAA"
+ value: "::1"
+ solo: true
+
+- name: Increase the connection timeout to avoid problems with an unstable connection
+ community.general.netcup_dns:
+ api_key: "..."
+ api_password: "..."
+ customer_id: "..."
+ domain: "example.com"
+ name: "mail"
+ type: "A"
+ value: "127.0.0.1"
+ timeout: 30
+
+'''
+
+RETURN = '''
+records:
+ description: list containing all records
+ returned: success
+ type: complex
+ contains:
+ name:
+ description: the record name
+ returned: success
+ type: str
+ sample: fancy-hostname
+ type:
+ description: the record type
+ returned: succcess
+ type: str
+ sample: A
+ value:
+ description: the record destination
+ returned: success
+ type: str
+ sample: 127.0.0.1
+ priority:
+ description: the record priority (only relevant if type=MX)
+ returned: success
+ type: int
+ sample: 0
+ id:
+ description: internal id of the record
+ returned: success
+ type: int
+ sample: 12345
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+NCDNSAPI_IMP_ERR = None
+try:
+ import nc_dnsapi
+ from nc_dnsapi import DNSRecord
+
+ HAS_NCDNSAPI = True
+except ImportError:
+ NCDNSAPI_IMP_ERR = traceback.format_exc()
+ HAS_NCDNSAPI = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(required=True, no_log=True),
+ api_password=dict(required=True, no_log=True),
+ customer_id=dict(required=True, type='int'),
+
+ domain=dict(required=True),
+ record=dict(required=False, default='@', aliases=['name']),
+ type=dict(required=True, choices=['A', 'AAAA', 'MX', 'CNAME', 'CAA', 'SRV', 'TXT', 'TLSA', 'NS', 'DS']),
+ value=dict(required=True),
+ priority=dict(required=False, type='int'),
+ solo=dict(required=False, type='bool', default=False),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ timeout=dict(required=False, type='int', default=5),
+
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_NCDNSAPI:
+ module.fail_json(msg=missing_required_lib('nc-dnsapi'), exception=NCDNSAPI_IMP_ERR)
+
+ api_key = module.params.get('api_key')
+ api_password = module.params.get('api_password')
+ customer_id = module.params.get('customer_id')
+ domain = module.params.get('domain')
+ record_type = module.params.get('type')
+ record = module.params.get('record')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ solo = module.params.get('solo')
+ state = module.params.get('state')
+ timeout = module.params.get('timeout')
+
+ if record_type == 'MX' and not priority:
+ module.fail_json(msg="record type MX required the 'priority' argument")
+
+ has_changed = False
+ all_records = []
+ try:
+ with nc_dnsapi.Client(customer_id, api_key, api_password, timeout) as api:
+ all_records = api.dns_records(domain)
+ record = DNSRecord(record, record_type, value, priority=priority)
+
+ # try to get existing record
+ record_exists = False
+ for r in all_records:
+ if r == record:
+ record_exists = True
+ record = r
+
+ break
+
+ if state == 'present':
+ if solo:
+ obsolete_records = [r for r in all_records if
+ r.hostname == record.hostname
+ and r.type == record.type
+ and not r.destination == record.destination]
+
+ if obsolete_records:
+ if not module.check_mode:
+ all_records = api.delete_dns_records(domain, obsolete_records)
+
+ has_changed = True
+
+ if not record_exists:
+ if not module.check_mode:
+ all_records = api.add_dns_record(domain, record)
+
+ has_changed = True
+ elif state == 'absent' and record_exists:
+ if not module.check_mode:
+ all_records = api.delete_dns_record(domain, record)
+
+ has_changed = True
+
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=has_changed, result={"records": [record_data(r) for r in all_records]})
+
+
+def record_data(r):
+ return {"name": r.hostname, "type": r.type, "value": r.destination, "priority": r.priority, "id": r.id}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/newrelic_deployment.py b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
new file mode 100644
index 000000000..ac9903b57
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/newrelic_deployment.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2013 Matt Coddington <coddington@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: newrelic_deployment
+author: "Matt Coddington (@mcodd)"
+short_description: Notify New Relic about app deployments
+description:
+ - Notify New Relic about app deployments (see https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/record-monitor-deployments/)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - API token to place in the Api-Key header.
+ required: true
+ app_name:
+ type: str
+ description:
+ - The value of app_name in the newrelic.yml file used by the application.
+ - One of I(app_name) or I(application_id) is required.
+ required: false
+ application_id:
+ type: str
+ description:
+ - The application ID found in the metadata of the application in APM.
+ - One of I(app_name) or I(application_id) is required.
+ required: false
+ changelog:
+ type: str
+ description:
+ - A list of changes for this deployment
+ required: false
+ description:
+ type: str
+ description:
+ - Text annotation for the deployment - notes for you
+ required: false
+ revision:
+ type: str
+ description:
+ - A revision number (e.g., git commit SHA)
+ required: true
+ user:
+ type: str
+ description:
+ - The name of the user/process that triggered this deployment
+ required: false
+ appname:
+ type: str
+ description:
+ - Name of the application.
+ - This option has been deprecated and will be removed in community.general 7.0.0. Please do not use.
+ required: false
+ environment:
+ type: str
+ description:
+ - The environment for this deployment.
+ - This option has been deprecated and will be removed community.general 7.0.0. Please do not use.
+ required: false
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+ type: bool
+requirements: []
+'''
+
+EXAMPLES = '''
+- name: Notify New Relic about an app deployment
+ community.general.newrelic_deployment:
+ token: AAAAAA
+ app_name: myapp
+ user: ansible deployment
+ revision: '1.0'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import quote
+import json
+
+# ===========================================
+# Module execution.
+#
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ app_name=dict(required=False),
+ application_id=dict(required=False),
+ changelog=dict(required=False),
+ description=dict(required=False),
+ revision=dict(required=True),
+ user=dict(required=False),
+ appname=dict(required=False, removed_in_version='7.0.0', removed_from_collection='community.general'),
+ environment=dict(required=False, removed_in_version='7.0.0', removed_from_collection='community.general'),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ required_one_of=[['app_name', 'application_id']],
+ supports_check_mode=True
+ )
+
+ # build list of params
+ params = {}
+ if module.params["app_name"] and module.params["application_id"]:
+ module.fail_json(msg="only one of 'app_name' or 'application_id' can be set")
+
+ app_id = None
+ if module.params["app_name"]:
+ app_id = get_application_id(module)
+ elif module.params["application_id"]:
+ app_id = module.params["application_id"]
+ else:
+ module.fail_json(msg="you must set one of 'app_name' or 'application_id'")
+
+ if app_id is None:
+ module.fail_json(msg="No application with name %s is found in NewRelic" % module.params["app_name"])
+
+ for item in ["changelog", "description", "revision", "user"]:
+ if module.params[item]:
+ params[item] = module.params[item]
+
+ # If we're in check mode, just exit pretending like we succeeded
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ # Send the data to New Relic
+ url = "https://api.newrelic.com/v2/applications/%s/deployments.json" % quote(str(app_id), safe='')
+ data = {
+ 'deployment': params
+ }
+ headers = {
+ 'Api-Key': module.params["token"],
+ 'Content-Type': 'application/json',
+ }
+ response, info = fetch_url(module, url, data=module.jsonify(data), headers=headers, method="POST")
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg="Unable to insert deployment marker: %s" % info['msg'])
+
+
+def get_application_id(module):
+ url = "https://api.newrelic.com/v2/applications.json"
+ data = "filter[name]=%s" % module.params["app_name"]
+ headers = {
+ 'Api-Key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] not in (200, 201):
+ module.fail_json(msg="Unable to get application: %s" % info['msg'])
+
+ result = json.loads(response.read())
+ if result is None or len(result.get("applications", "")) == 0:
+ module.fail_json(msg='No application found with name "%s"' % module.params["app_name"])
+
+ return result["applications"][0]["id"]
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nexmo.py b/ansible_collections/community/general/plugins/modules/nexmo.py
new file mode 100644
index 000000000..7461c1cb9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nexmo.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+author: "Matt Martz (@sivel)"
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ api_key:
+ type: str
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ type: str
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ type: int
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ type: list
+ elements: int
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+extends_documentation_fragment:
+ - ansible.builtin.url
+ - community.general.attributes
+'''
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ community.general.nexmo:
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+"""
+import json
+
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url, url_argument_spec
+
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except Exception:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list', elements='int'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nginx_status_info.py b/ansible_collections/community/general/plugins/modules/nginx_status_info.py
new file mode 100644
index 000000000..6bbea078b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nginx_status_info.py
@@ -0,0 +1,159 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, René Moser <mail@renemoser.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: nginx_status_info
+short_description: Retrieve information on nginx status
+description:
+ - Gathers information from nginx from an URL having C(stub_status) enabled.
+author: "René Moser (@resmo)"
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ url:
+ type: str
+ description:
+ - URL of the nginx status.
+ required: true
+ timeout:
+ type: int
+ description:
+ - HTTP connection timeout in seconds.
+ required: false
+ default: 10
+
+notes:
+ - See U(http://nginx.org/en/docs/http/ngx_http_stub_status_module.html) for more information.
+'''
+
+EXAMPLES = r'''
+# Gather status info from nginx on localhost
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ register: result
+
+# Gather status info from nginx on localhost with a custom timeout of 20 seconds
+- name: Get current http stats
+ community.general.nginx_status_info:
+ url: http://localhost/nginx_status
+ timeout: 20
+ register: result
+'''
+
+RETURN = r'''
+---
+active_connections:
+ description: Active connections.
+ returned: success
+ type: int
+ sample: 2340
+accepts:
+ description: The total number of accepted client connections.
+ returned: success
+ type: int
+ sample: 81769947
+handled:
+ description: The total number of handled connections. Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ returned: success
+ type: int
+ sample: 81769947
+requests:
+ description: The total number of client requests.
+ returned: success
+ type: int
+ sample: 144332345
+reading:
+ description: The current number of connections where nginx is reading the request header.
+ returned: success
+ type: int
+ sample: 0
+writing:
+ description: The current number of connections where nginx is writing the response back to the client.
+ returned: success
+ type: int
+ sample: 241
+waiting:
+ description: The current number of idle client connections waiting for a request.
+ returned: success
+ type: int
+ sample: 2092
+data:
+ description: HTTP response as is.
+ returned: success
+ type: str
+ sample: "Active connections: 2340 \nserver accepts handled requests\n 81769947 81769947 144332345 \nReading: 0 Writing: 241 Waiting: 2092 \n"
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_text
+
+
+class NginxStatusInfo(object):
+
+ def __init__(self):
+ self.url = module.params.get('url')
+ self.timeout = module.params.get('timeout')
+
+ def run(self):
+ result = {
+ 'active_connections': None,
+ 'accepts': None,
+ 'handled': None,
+ 'requests': None,
+ 'reading': None,
+ 'writing': None,
+ 'waiting': None,
+ 'data': None,
+ }
+ (response, info) = fetch_url(module=module, url=self.url, force=True, timeout=self.timeout)
+ if not response:
+ module.fail_json(msg="No valid or no response from url %s within %s seconds (timeout)" % (self.url, self.timeout))
+
+ data = to_text(response.read(), errors='surrogate_or_strict')
+ if not data:
+ return result
+
+ result['data'] = data
+ expr = r'Active connections: ([0-9]+) \nserver accepts handled requests\n ([0-9]+) ([0-9]+) ([0-9]+) \n' \
+ r'Reading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)'
+ match = re.match(expr, data, re.S)
+ if match:
+ result['active_connections'] = int(match.group(1))
+ result['accepts'] = int(match.group(2))
+ result['handled'] = int(match.group(3))
+ result['requests'] = int(match.group(4))
+ result['reading'] = int(match.group(5))
+ result['writing'] = int(match.group(6))
+ result['waiting'] = int(match.group(7))
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(type='str', required=True),
+ timeout=dict(type='int', default=10),
+ ),
+ supports_check_mode=True,
+ )
+
+ nginx_status_info = NginxStatusInfo().run()
+ module.exit_json(changed=False, **nginx_status_info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nictagadm.py b/ansible_collections/community/general/plugins/modules/nictagadm.py
new file mode 100644
index 000000000..074e09b4a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nictagadm.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Bruce Smith <Bruce.Smith.IT@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nictagadm
+short_description: Manage nic tags on SmartOS systems
+description:
+ - Create or delete nic tags on SmartOS systems.
+author:
+ - Bruce Smith (@SmithX10)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the nic tag.
+ required: true
+ type: str
+ mac:
+ description:
+ - Specifies the I(mac) address to attach the nic tag to when not creating an I(etherstub).
+ - Parameters I(mac) and I(etherstub) are mutually exclusive.
+ type: str
+ etherstub:
+ description:
+ - Specifies that the nic tag will be attached to a created I(etherstub).
+ - Parameter I(etherstub) is mutually exclusive with both I(mtu), and I(mac).
+ type: bool
+ default: false
+ mtu:
+ description:
+ - Specifies the size of the I(mtu) of the desired nic tag.
+ - Parameters I(mtu) and I(etherstub) are mutually exclusive.
+ type: int
+ force:
+ description:
+ - When I(state) is absent set this switch will use the C(-f) parameter and delete the nic tag regardless of existing VMs.
+ type: bool
+ default: false
+ state:
+ description:
+ - Create or delete a SmartOS nic tag.
+ type: str
+ choices: [ absent, present ]
+ default: present
+'''
+
+EXAMPLES = r'''
+- name: Create 'storage0' on '00:1b:21:a3:f5:4d'
+ community.general.nictagadm:
+ name: storage0
+ mac: 00:1b:21:a3:f5:4d
+ mtu: 9000
+ state: present
+
+- name: Remove 'storage0' nic tag
+ community.general.nictagadm:
+ name: storage0
+ state: absent
+'''
+
+RETURN = r'''
+name:
+ description: nic tag name
+ returned: always
+ type: str
+ sample: storage0
+mac:
+ description: MAC Address that the nic tag was attached to.
+ returned: always
+ type: str
+ sample: 00:1b:21:a3:f5:4d
+etherstub:
+ description: specifies if the nic tag will create and attach to an etherstub.
+ returned: always
+ type: bool
+ sample: false
+mtu:
+ description: specifies which MTU size was passed during the nictagadm add command. mtu and etherstub are mutually exclusive.
+ returned: always
+ type: int
+ sample: 1500
+force:
+ description: Shows if -f was used during the deletion of a nic tag
+ returned: always
+ type: bool
+ sample: false
+state:
+ description: state of the target
+ returned: always
+ type: str
+ sample: present
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+
+
+class NicTag(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.name = module.params['name']
+ self.mac = module.params['mac']
+ self.etherstub = module.params['etherstub']
+ self.mtu = module.params['mtu']
+ self.force = module.params['force']
+ self.state = module.params['state']
+
+ self.nictagadm_bin = self.module.get_bin_path('nictagadm', True)
+
+ def is_valid_mac(self):
+ return is_mac(self.mac.lower())
+
+ def nictag_exists(self):
+ cmd = [self.nictagadm_bin, 'exists', self.name]
+ (rc, dummy, dummy) = self.module.run_command(cmd)
+
+ return rc == 0
+
+ def add_nictag(self):
+ cmd = [self.nictagadm_bin, '-v', 'add']
+
+ if self.etherstub:
+ cmd.append('-l')
+
+ if self.mtu:
+ cmd.append('-p')
+ cmd.append('mtu=' + str(self.mtu))
+
+ if self.mac:
+ cmd.append('-p')
+ cmd.append('mac=' + str(self.mac))
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+ def delete_nictag(self):
+ cmd = [self.nictagadm_bin, '-v', 'delete']
+
+ if self.force:
+ cmd.append('-f')
+
+ cmd.append(self.name)
+
+ return self.module.run_command(cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ mac=dict(type='str'),
+ etherstub=dict(type='bool', default=False),
+ mtu=dict(type='int'),
+ force=dict(type='bool', default=False),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ mutually_exclusive=[
+ ['etherstub', 'mac'],
+ ['etherstub', 'mtu'],
+ ],
+ required_if=[
+ ['etherstub', False, ['name', 'mac']],
+ ['state', 'absent', ['name', 'force']],
+ ],
+ supports_check_mode=True
+ )
+
+ nictag = NicTag(module)
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ changed=False,
+ etherstub=nictag.etherstub,
+ force=nictag.force,
+ name=nictag.name,
+ mac=nictag.mac,
+ mtu=nictag.mtu,
+ state=nictag.state,
+ )
+
+ if not nictag.is_valid_mac():
+ module.fail_json(msg='Invalid MAC Address Value',
+ name=nictag.name,
+ mac=nictag.mac,
+ etherstub=nictag.etherstub)
+
+ if nictag.state == 'absent':
+ if nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.delete_nictag()
+ if rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+ elif nictag.state == 'present':
+ if not nictag.nictag_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nictag.add_nictag()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nictag.name, msg=err, rc=rc)
+
+ if rc is not None:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nmcli.py b/ansible_collections/community/general/plugins/modules/nmcli.py
new file mode 100644
index 000000000..08680bf6e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nmcli.py
@@ -0,0 +1,2504 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Chris Long <alcamie@gmail.com> <chlong@redhat.com>
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: nmcli
+author:
+ - Chris Long (@alcamie101)
+short_description: Manage Networking
+requirements:
+ - nmcli
+extends_documentation_fragment:
+ - community.general.attributes
+description:
+ - 'Manage the network devices. Create, modify and manage various connection and device type e.g., ethernet, teams, bonds, vlans etc.'
+ - 'On CentOS 8 and Fedora >=29 like systems, the requirements can be met by installing the following packages: NetworkManager.'
+ - 'On CentOS 7 and Fedora <=28 like systems, the requirements can be met by installing the following packages: NetworkManager-tui.'
+ - 'On Ubuntu and Debian like systems, the requirements can be met by installing the following packages: network-manager'
+ - 'On openSUSE, the requirements can be met by installing the following packages: NetworkManager.'
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ state:
+ description:
+ - Whether the device should exist or not, taking action if the state is different from what is stated.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ autoconnect:
+ description:
+ - Whether the connection should start on boot.
+ - Whether the connection profile can be automatically activated
+ type: bool
+ default: true
+ conn_name:
+ description:
+ - The name used to call the connection. Pattern is <type>[-<ifname>][-<num>].
+ type: str
+ required: true
+ ifname:
+ description:
+ - The interface to bind the connection to.
+ - The connection will only be applicable to this interface name.
+ - A special value of C('*') can be used for interface-independent connections.
+ - The ifname argument is mandatory for all connection types except bond, team, bridge, vlan and vpn.
+ - This parameter defaults to C(conn_name) when left unset for all connection types except vpn that removes it.
+ type: str
+ type:
+ description:
+ - This is the type of device or network connection that you wish to create or modify.
+ - Type C(dummy) is added in community.general 3.5.0.
+ - Type C(generic) is added in Ansible 2.5.
+ - Type C(infiniband) is added in community.general 2.0.0.
+ - Type C(gsm) is added in community.general 3.7.0.
+ - Type C(macvlan) is added in community.general 6.6.0.
+ - Type C(wireguard) is added in community.general 4.3.0.
+ - Type C(vpn) is added in community.general 5.1.0.
+ type: str
+ choices: [ bond, bond-slave, bridge, bridge-slave, dummy, ethernet, generic, gre, infiniband, ipip, macvlan, sit, team, team-slave, vlan, vxlan,
+ wifi, gsm, wireguard, vpn ]
+ mode:
+ description:
+ - This is the type of device or network connection that you wish to create for a bond or bridge.
+ type: str
+ choices: [ 802.3ad, active-backup, balance-alb, balance-rr, balance-tlb, balance-xor, broadcast ]
+ default: balance-rr
+ transport_mode:
+ description:
+ - This option sets the connection type of Infiniband IPoIB devices.
+ type: str
+ choices: [ datagram, connected ]
+ version_added: 5.8.0
+ master:
+ description:
+ - Master <master (ifname, or connection UUID or conn_name) of bridge, team, bond master connection profile.
+ type: str
+ ip4:
+ description:
+ - List of IPv4 addresses to this interface.
+ - Use the format C(192.0.2.24/24) or C(192.0.2.24).
+ - If defined and I(method4) is not specified, automatically set C(ipv4.method) to C(manual).
+ type: list
+ elements: str
+ gw4:
+ description:
+ - The IPv4 gateway for this interface.
+ - Use the format C(192.0.2.1).
+ - This parameter is mutually_exclusive with never_default4 parameter.
+ type: str
+ gw4_ignore_auto:
+ description:
+ - Ignore automatically configured IPv4 routes.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ routes4:
+ description:
+ - The list of IPv4 routes.
+ - Use the format C(192.0.3.0/24 192.0.2.1).
+ - To specify more complex routes, use the I(routes4_extended) option.
+ type: list
+ elements: str
+ version_added: 2.0.0
+ routes4_extended:
+ description:
+ - The list of IPv4 routes.
+ type: list
+ elements: dict
+ suboptions:
+ ip:
+ description:
+ - IP or prefix of route.
+ - Use the format C(192.0.3.0/24).
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Use the format C(192.0.2.1).
+ type: str
+ metric:
+ description:
+ - Route metric.
+ type: int
+ table:
+ description:
+ - The table to add this route to.
+ - The default depends on C(ipv4.route-table).
+ type: int
+ cwnd:
+ description:
+ - The clamp for congestion window.
+ type: int
+ mtu:
+ description:
+ - If non-zero, only transmit packets of the specified size or smaller.
+ type: int
+ onlink:
+ description:
+ - Pretend that the nexthop is directly attached to this link, even if it does not match any interface prefix.
+ type: bool
+ tos:
+ description:
+ - The Type Of Service.
+ type: int
+ route_metric4:
+ description:
+ - Set metric level of ipv4 routes configured on interface.
+ type: int
+ version_added: 2.0.0
+ routing_rules4:
+ description:
+ - Is the same as in an C(ip route add) command, except always requires specifying a priority.
+ type: list
+ elements: str
+ version_added: 3.3.0
+ never_default4:
+ description:
+ - Set as default route.
+ - This parameter is mutually_exclusive with gw4 parameter.
+ type: bool
+ default: false
+ version_added: 2.0.0
+ dns4:
+ description:
+ - A list of up to 3 DNS servers.
+ - The entries must be IPv4 addresses, for example C(192.0.2.53).
+ elements: str
+ type: list
+ dns4_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ dns4_ignore_auto:
+ description:
+ - Ignore automatically configured IPv4 name servers.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ method4:
+ description:
+ - Configuration method to be used for IPv4.
+ - If I(ip4) is set, C(ipv4.method) is automatically set to C(manual) and this parameter is not needed.
+ type: str
+ choices: [auto, link-local, manual, shared, disabled]
+ version_added: 2.2.0
+ may_fail4:
+ description:
+ - If you need I(ip4) configured before C(network-online.target) is reached, set this option to C(false).
+ - This option applies when C(method4) is not C(disabled).
+ type: bool
+ default: true
+ version_added: 3.3.0
+ ip6:
+ description:
+ - List of IPv6 addresses to this interface.
+ - Use the format C(abbe::cafe/128) or C(abbe::cafe).
+ - If defined and I(method6) is not specified, automatically set C(ipv6.method) to C(manual).
+ type: list
+ elements: str
+ gw6:
+ description:
+ - The IPv6 gateway for this interface.
+ - Use the format C(2001:db8::1).
+ type: str
+ gw6_ignore_auto:
+ description:
+ - Ignore automatically configured IPv6 routes.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ routes6:
+ description:
+ - The list of IPv6 routes.
+ - Use the format C(fd12:3456:789a:1::/64 2001:dead:beef::1).
+ - To specify more complex routes, use the I(routes6_extended) option.
+ type: list
+ elements: str
+ version_added: 4.4.0
+ routes6_extended:
+ description:
+ - The list of IPv6 routes but with parameters.
+ type: list
+ elements: dict
+ suboptions:
+ ip:
+ description:
+ - IP or prefix of route.
+ - Use the format C(fd12:3456:789a:1::/64).
+ type: str
+ required: true
+ next_hop:
+ description:
+ - Use the format C(2001:dead:beef::1).
+ type: str
+ metric:
+ description:
+ - Route metric.
+ type: int
+ table:
+ description:
+ - The table to add this route to.
+ - The default depends on C(ipv6.route-table).
+ type: int
+ cwnd:
+ description:
+ - The clamp for congestion window.
+ type: int
+ mtu:
+ description:
+ - If non-zero, only transmit packets of the specified size or smaller.
+ type: int
+ onlink:
+ description:
+ - Pretend that the nexthop is directly attached to this link, even if it does not match any interface prefix.
+ type: bool
+ route_metric6:
+ description:
+ - Set metric level of IPv6 routes configured on interface.
+ type: int
+ version_added: 4.4.0
+ dns6:
+ description:
+ - A list of up to 3 DNS servers.
+ - The entries must be IPv6 addresses, for example C(2001:4860:4860::8888).
+ elements: str
+ type: list
+ dns6_search:
+ description:
+ - A list of DNS search domains.
+ elements: str
+ type: list
+ dns6_ignore_auto:
+ description:
+ - Ignore automatically configured IPv6 name servers.
+ type: bool
+ default: false
+ version_added: 3.2.0
+ method6:
+ description:
+ - Configuration method to be used for IPv6
+ - If I(ip6) is set, C(ipv6.method) is automatically set to C(manual) and this parameter is not needed.
+ - C(disabled) was added in community.general 3.3.0.
+ type: str
+ choices: [ignore, auto, dhcp, link-local, manual, shared, disabled]
+ version_added: 2.2.0
+ ip_privacy6:
+ description:
+ - If enabled, it makes the kernel generate a temporary IPv6 address in addition to the public one.
+ type: str
+ choices: [disabled, prefer-public-addr, prefer-temp-addr, unknown]
+ version_added: 4.2.0
+ addr_gen_mode6:
+ description:
+ - Configure method for creating the address for use with IPv6 Stateless Address Autoconfiguration.
+ - C(default) and C(deafult-or-eui64) have been added in community.general 6.5.0.
+ type: str
+ choices: [default, default-or-eui64, eui64, stable-privacy]
+ version_added: 4.2.0
+ mtu:
+ description:
+ - The connection MTU, e.g. 9000. This can't be applied when creating the interface and is done once the interface has been created.
+ - Can be used when modifying Team, VLAN, Ethernet (Future plans to implement wifi, gsm, pppoe, infiniband)
+ - This parameter defaults to C(1500) when unset.
+ type: int
+ dhcp_client_id:
+ description:
+ - DHCP Client Identifier sent to the DHCP server.
+ type: str
+ primary:
+ description:
+ - This is only used with bond and is the primary interface name (for "active-backup" mode), this is the usually the 'ifname'.
+ type: str
+ miimon:
+ description:
+ - This is only used with bond - miimon.
+ - This parameter defaults to C(100) when unset.
+ type: int
+ downdelay:
+ description:
+ - This is only used with bond - downdelay.
+ type: int
+ updelay:
+ description:
+ - This is only used with bond - updelay.
+ type: int
+ xmit_hash_policy:
+ description:
+ - This is only used with bond - xmit_hash_policy type.
+ type: str
+ version_added: 5.6.0
+ arp_interval:
+ description:
+ - This is only used with bond - ARP interval.
+ type: int
+ arp_ip_target:
+ description:
+ - This is only used with bond - ARP IP target.
+ type: str
+ stp:
+ description:
+ - This is only used with bridge and controls whether Spanning Tree Protocol (STP) is enabled for this bridge.
+ type: bool
+ default: true
+ priority:
+ description:
+ - This is only used with 'bridge' - sets STP priority.
+ type: int
+ default: 128
+ forwarddelay:
+ description:
+ - This is only used with bridge - [forward-delay <2-30>] STP forwarding delay, in seconds.
+ type: int
+ default: 15
+ hellotime:
+ description:
+ - This is only used with bridge - [hello-time <1-10>] STP hello time, in seconds.
+ type: int
+ default: 2
+ maxage:
+ description:
+ - This is only used with bridge - [max-age <6-42>] STP maximum message age, in seconds.
+ type: int
+ default: 20
+ ageingtime:
+ description:
+ - This is only used with bridge - [ageing-time <0-1000000>] the Ethernet MAC address aging time, in seconds.
+ type: int
+ default: 300
+ mac:
+ description:
+ - MAC address of the connection.
+ - Note this requires a recent kernel feature, originally introduced in 3.15 upstream kernel.
+ type: str
+ slavepriority:
+ description:
+ - This is only used with 'bridge-slave' - [<0-63>] - STP priority of this slave.
+ type: int
+ default: 32
+ path_cost:
+ description:
+ - This is only used with 'bridge-slave' - [<1-65535>] - STP port cost for destinations via this slave.
+ type: int
+ default: 100
+ hairpin:
+ description:
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+ frame was received on.
+ - The default value is C(true), but that is being deprecated
+ and it will be changed to C(false) in community.general 7.0.0.
+ type: bool
+ runner:
+ description:
+ - This is the type of device or network connection that you wish to create for a team.
+ type: str
+ choices: [ broadcast, roundrobin, activebackup, loadbalance, lacp ]
+ default: roundrobin
+ version_added: 3.4.0
+ runner_hwaddr_policy:
+ description:
+ - This defines the policy of how hardware addresses of team device and port devices
+ should be set during the team lifetime.
+ type: str
+ choices: [ same_all, by_active, only_active ]
+ version_added: 3.4.0
+ runner_fast_rate:
+ description:
+ - Option specifies the rate at which our link partner is asked to transmit LACPDU
+ packets. If this is C(true) then packets will be sent once per second. Otherwise they
+ will be sent every 30 seconds.
+ - Only allowed for C(lacp) runner.
+ type: bool
+ version_added: 6.5.0
+ vlanid:
+ description:
+ - This is only used with VLAN - VLAN ID in range <0-4095>.
+ type: int
+ vlandev:
+ description:
+ - This is only used with VLAN - parent device this VLAN is on, can use ifname.
+ type: str
+ flags:
+ description:
+ - This is only used with VLAN - flags.
+ type: str
+ ingress:
+ description:
+ - This is only used with VLAN - VLAN ingress priority mapping.
+ type: str
+ egress:
+ description:
+ - This is only used with VLAN - VLAN egress priority mapping.
+ type: str
+ vxlan_id:
+ description:
+ - This is only used with VXLAN - VXLAN ID.
+ type: int
+ vxlan_remote:
+ description:
+ - This is only used with VXLAN - VXLAN destination IP address.
+ type: str
+ vxlan_local:
+ description:
+ - This is only used with VXLAN - VXLAN local IP address.
+ type: str
+ ip_tunnel_dev:
+ description:
+ - This is used with GRE/IPIP/SIT - parent device this GRE/IPIP/SIT tunnel, can use ifname.
+ type: str
+ ip_tunnel_remote:
+ description:
+ - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT destination IP address.
+ type: str
+ ip_tunnel_local:
+ description:
+ - This is used with GRE/IPIP/SIT - GRE/IPIP/SIT local IP address.
+ type: str
+ ip_tunnel_input_key:
+ description:
+ - The key used for tunnel input packets.
+ - Only used when I(type=gre).
+ type: str
+ version_added: 3.6.0
+ ip_tunnel_output_key:
+ description:
+ - The key used for tunnel output packets.
+ - Only used when I(type=gre).
+ type: str
+ version_added: 3.6.0
+ zone:
+ description:
+ - The trust level of the connection.
+ - When updating this property on a currently activated connection, the change takes effect immediately.
+ type: str
+ version_added: 2.0.0
+ wifi_sec:
+ description:
+ - The security configuration of the WiFi connection.
+ - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
+ - 'An up-to-date list of supported attributes can be found here:
+ U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless-security.html).'
+ - 'For instance to use common WPA-PSK auth with a password:
+ C({key-mgmt: wpa-psk, psk: my_password}).'
+ type: dict
+ suboptions:
+ auth-alg:
+ description:
+ - When WEP is used (that is, if I(key-mgmt) = C(none) or C(ieee8021x)) indicate the 802.11 authentication algorithm required by the AP here.
+ - One of C(open) for Open System, C(shared) for Shared Key, or C(leap) for Cisco LEAP.
+ - When using Cisco LEAP (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)) the I(leap-username) and I(leap-password) properties
+ must be specified.
+ type: str
+ choices: [ open, shared, leap ]
+ fils:
+ description:
+ - Indicates whether Fast Initial Link Setup (802.11ai) must be enabled for the connection.
+ - One of C(0) (use global default value), C(1) (disable FILS), C(2) (enable FILS if the supplicant and the access point support it) or C(3)
+ (enable FILS and fail if not supported).
+ - When set to C(0) and no global default is set, FILS will be optionally enabled.
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ default: 0
+ group:
+ description:
+ - A list of group/broadcast encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in
+ the list.
+ - For maximum compatibility leave this property empty.
+ type: list
+ elements: str
+ choices: [ wep40, wep104, tkip, ccmp ]
+ key-mgmt:
+ description:
+ - Key management used for the connection.
+ - One of C(none) (WEP or no password protection), C(ieee8021x) (Dynamic WEP), C(owe) (Opportunistic Wireless Encryption), C(wpa-psk) (WPA2
+ + WPA3 personal), C(sae) (WPA3 personal only), C(wpa-eap) (WPA2 + WPA3 enterprise) or C(wpa-eap-suite-b-192) (WPA3 enterprise only).
+ - This property must be set for any Wi-Fi connection that uses security.
+ type: str
+ choices: [ none, ieee8021x, owe, wpa-psk, sae, wpa-eap, wpa-eap-suite-b-192 ]
+ leap-password-flags:
+ description: Flags indicating how to handle the I(leap-password) property.
+ type: list
+ elements: int
+ leap-password:
+ description: The login password for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)).
+ type: str
+ leap-username:
+ description: The login username for legacy LEAP connections (that is, if I(key-mgmt=ieee8021x) and I(auth-alg=leap)).
+ type: str
+ pairwise:
+ description:
+ - A list of pairwise encryption algorithms which prevents connections to Wi-Fi networks that do not utilize one of the algorithms in the
+ list.
+ - For maximum compatibility leave this property empty.
+ type: list
+ elements: str
+ choices: [ tkip, ccmp ]
+ pmf:
+ description:
+ - Indicates whether Protected Management Frames (802.11w) must be enabled for the connection.
+ - One of C(0) (use global default value), C(1) (disable PMF), C(2) (enable PMF if the supplicant and the access point support it) or C(3)
+ (enable PMF and fail if not supported).
+ - When set to C(0) and no global default is set, PMF will be optionally enabled.
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ default: 0
+ proto:
+ description:
+ - List of strings specifying the allowed WPA protocol versions to use.
+ - Each element may be C(wpa) (allow WPA) or C(rsn) (allow WPA2/RSN).
+ - If not specified, both WPA and RSN connections are allowed.
+ type: list
+ elements: str
+ choices: [ wpa, rsn ]
+ psk-flags:
+ description: Flags indicating how to handle the I(psk) property.
+ type: list
+ elements: int
+ psk:
+ description:
+ - Pre-Shared-Key for WPA networks.
+ - For WPA-PSK, it is either an ASCII passphrase of 8 to 63 characters that is (as specified in the 802.11i standard) hashed to derive the
+ actual key, or the key in form of 64 hexadecimal character.
+ - The WPA3-Personal networks use a passphrase of any length for SAE authentication.
+ type: str
+ wep-key-flags:
+ description: Flags indicating how to handle the I(wep-key0), I(wep-key1), I(wep-key2), and I(wep-key3) properties.
+ type: list
+ elements: int
+ wep-key-type:
+ description:
+ - Controls the interpretation of WEP keys.
+ - Allowed values are C(1), in which case the key is either a 10- or 26-character hexadecimal string, or a 5- or 13-character ASCII
+ password; or C(2), in which case the passphrase is provided as a string and will be hashed using the de-facto MD5 method to derive the
+ actual WEP key.
+ type: int
+ choices: [ 1, 2 ]
+ wep-key0:
+ description:
+ - Index 0 WEP key. This is the WEP key used in most networks.
+ - See the I(wep-key-type) property for a description of how this key is interpreted.
+ type: str
+ wep-key1:
+ description:
+ - Index 1 WEP key. This WEP index is not used by most networks.
+ - See the I(wep-key-type) property for a description of how this key is interpreted.
+ type: str
+ wep-key2:
+ description:
+ - Index 2 WEP key. This WEP index is not used by most networks.
+ - See the I(wep-key-type) property for a description of how this key is interpreted.
+ type: str
+ wep-key3:
+ description:
+ - Index 3 WEP key. This WEP index is not used by most networks.
+ - See the I(wep-key-type) property for a description of how this key is interpreted.
+ type: str
+ wep-tx-keyidx:
+ description:
+ - When static WEP is used (that is, if I(key-mgmt=none)) and a non-default WEP key index is used by the AP, put that WEP key index here.
+ - Valid values are C(0) (default key) through C(3).
+ - Note that some consumer access points (like the Linksys WRT54G) number the keys C(1) - C(4).
+ type: int
+ choices: [ 0, 1, 2, 3 ]
+ default: 0
+ wps-method:
+ description:
+ - Flags indicating which mode of WPS is to be used if any.
+ - There is little point in changing the default setting as NetworkManager will automatically determine whether it is feasible to start WPS
+ enrollment from the Access Point capabilities.
+ - WPS can be disabled by setting this property to a value of C(1).
+ type: int
+ default: 0
+ version_added: 3.0.0
+ ssid:
+ description:
+ - Name of the Wireless router or the access point.
+ type: str
+ version_added: 3.0.0
+ wifi:
+ description:
+ - The configuration of the WiFi connection.
+ - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
+ - 'An up-to-date list of supported attributes can be found here:
+ U(https://networkmanager.dev/docs/api/latest/settings-802-11-wireless.html).'
+ - 'For instance to create a hidden AP mode WiFi connection:
+ C({hidden: true, mode: ap}).'
+ type: dict
+ suboptions:
+ ap-isolation:
+ description:
+ - Configures AP isolation, which prevents communication between wireless devices connected to this AP.
+ - This property can be set to a value different from C(-1) only when the interface is configured in AP mode.
+ - If set to C(1), devices are not able to communicate with each other. This increases security because it protects devices against attacks
+ from other clients in the network. At the same time, it prevents devices to access resources on the same wireless networks as file
+ shares, printers, etc.
+ - If set to C(0), devices can talk to each other.
+ - When set to C(-1), the global default is used; in case the global default is unspecified it is assumed to be C(0).
+ type: int
+ choices: [ -1, 0, 1 ]
+ default: -1
+ assigned-mac-address:
+ description:
+ - The new field for the cloned MAC address.
+ - It can be either a hardware address in ASCII representation, or one of the special values C(preserve), C(permanent), C(random) or
+ C(stable).
+ - This field replaces the deprecated I(cloned-mac-address) on D-Bus, which can only contain explicit hardware addresses.
+ - Note that this property only exists in D-Bus API. libnm and nmcli continue to call this property I(cloned-mac-address).
+ type: str
+ band:
+ description:
+ - 802.11 frequency band of the network.
+ - One of C(a) for 5GHz 802.11a or C(bg) for 2.4GHz 802.11.
+ - This will lock associations to the Wi-Fi network to the specific band, so for example, if C(a) is specified, the device will not
+ associate with the same network in the 2.4GHz band even if the network's settings are compatible.
+ - This setting depends on specific driver capability and may not work with all drivers.
+ type: str
+ choices: [ a, bg ]
+ bssid:
+ description:
+ - If specified, directs the device to only associate with the given access point.
+ - This capability is highly driver dependent and not supported by all devices.
+ - Note this property does not control the BSSID used when creating an Ad-Hoc network and is unlikely to in the future.
+ type: str
+ channel:
+ description:
+ - Wireless channel to use for the Wi-Fi connection.
+ - The device will only join (or create for Ad-Hoc networks) a Wi-Fi network on the specified channel.
+ - Because channel numbers overlap between bands, this property also requires the I(band) property to be set.
+ type: int
+ default: 0
+ cloned-mac-address:
+ description:
+ - This D-Bus field is deprecated in favor of I(assigned-mac-address) which is more flexible and allows specifying special variants like
+ C(random).
+ - For libnm and nmcli, this field is called I(cloned-mac-address).
+ type: str
+ generate-mac-address-mask:
+ description:
+ - With I(cloned-mac-address) setting C(random) or C(stable), by default all bits of the MAC address are scrambled and a
+ locally-administered, unicast MAC address is created. This property allows to specify that certain bits are fixed.
+ - Note that the least significant bit of the first MAC address will always be unset to create a unicast MAC address.
+ - If the property is C(null), it is eligible to be overwritten by a default connection setting.
+ - If the value is still c(null) or an empty string, the default is to create a locally-administered, unicast MAC address.
+ - If the value contains one MAC address, this address is used as mask. The set bits of the mask are to be filled with the current MAC
+ address of the device, while the unset bits are subject to randomization.
+ - Setting C(FE:FF:FF:00:00:00) means to preserve the OUI of the current MAC address and only randomize the lower 3 bytes using the
+ C(random) or C(stable) algorithm.
+ - If the value contains one additional MAC address after the mask, this address is used instead of the current MAC address to fill the bits
+ that shall not be randomized.
+ - For example, a value of C(FE:FF:FF:00:00:00 68:F7:28:00:00:00) will set the OUI of the MAC address to 68:F7:28, while the lower bits are
+ randomized.
+ - A value of C(02:00:00:00:00:00 00:00:00:00:00:00) will create a fully scrambled globally-administered, burned-in MAC address.
+ - If the value contains more than one additional MAC addresses, one of them is chosen randomly. For example,
+ C(02:00:00:00:00:00 00:00:00:00:00:00 02:00:00:00:00:00) will create a fully scrambled MAC address, randomly locally or globally
+ administered.
+ type: str
+ hidden:
+ description:
+ - If C(true), indicates that the network is a non-broadcasting network that hides its SSID. This works both in infrastructure and AP mode.
+ - In infrastructure mode, various workarounds are used for a more reliable discovery of hidden networks, such as probe-scanning the SSID.
+ However, these workarounds expose inherent insecurities with hidden SSID networks, and thus hidden SSID networks should be used with
+ caution.
+ - In AP mode, the created network does not broadcast its SSID.
+ - Note that marking the network as hidden may be a privacy issue for you (in infrastructure mode) or client stations (in AP mode), as the
+ explicit probe-scans are distinctly recognizable on the air.
+ type: bool
+ default: false
+ mac-address-blacklist:
+ description:
+ - A list of permanent MAC addresses of Wi-Fi devices to which this connection should never apply.
+ - Each MAC address should be given in the standard hex-digits-and-colons notation (for example, C(00:11:22:33:44:55)).
+ type: list
+ elements: str
+ mac-address-randomization:
+ description:
+ - One of C(0) (never randomize unless the user has set a global default to randomize and the supplicant supports randomization), C(1)
+ (never randomize the MAC address), or C(2) (always randomize the MAC address).
+ - This property is deprecated for I(cloned-mac-address).
+ type: int
+ default: 0
+ choices: [ 0, 1, 2 ]
+ mac-address:
+ description:
+ - If specified, this connection will only apply to the Wi-Fi device whose permanent MAC address matches.
+ - This property does not change the MAC address of the device (for example for MAC spoofing).
+ type: str
+ mode:
+ description: Wi-Fi network mode. If blank, C(infrastructure) is assumed.
+ type: str
+ choices: [ infrastructure, mesh, adhoc, ap ]
+ default: infrastructure
+ mtu:
+ description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
+ type: int
+ default: 0
+ powersave:
+ description:
+ - One of C(2) (disable Wi-Fi power saving), C(3) (enable Wi-Fi power saving), C(1) (don't touch currently configure setting) or C(0) (use
+ the globally configured value).
+ - All other values are reserved.
+ type: int
+ default: 0
+ choices: [ 0, 1, 2, 3 ]
+ rate:
+ description:
+ - If non-zero, directs the device to only use the specified bitrate for communication with the access point.
+ - Units are in Kb/s, so for example C(5500) = 5.5 Mbit/s.
+ - This property is highly driver dependent and not all devices support setting a static bitrate.
+ type: int
+ default: 0
+ tx-power:
+ description:
+ - If non-zero, directs the device to use the specified transmit power.
+ - Units are dBm.
+ - This property is highly driver dependent and not all devices support setting a static transmit power.
+ type: int
+ default: 0
+ wake-on-wlan:
+ description:
+ - The NMSettingWirelessWakeOnWLan options to enable. Not all devices support all options.
+ - May be any combination of C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_ANY) (C(0x2)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_DISCONNECT) (C(0x4)),
+ C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_MAGIC) (C(0x8)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_GTK_REKEY_FAILURE) (C(0x10)),
+ C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_EAP_IDENTITY_REQUEST) (C(0x20)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_4WAY_HANDSHAKE) (C(0x40)),
+ C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_RFKILL_RELEASE) (C(0x80)), C(NM_SETTING_WIRELESS_WAKE_ON_WLAN_TCP) (C(0x100)) or the special values
+ C(0x1) (to use global settings) and C(0x8000) (to disable management of Wake-on-LAN in NetworkManager).
+ - Note the option values' sum must be specified in order to combine multiple options.
+ type: int
+ default: 1
+ version_added: 3.5.0
+ ignore_unsupported_suboptions:
+ description:
+ - Ignore suboptions which are invalid or unsupported by the version of NetworkManager/nmcli installed on the host.
+ - Only I(wifi) and I(wifi_sec) options are currently affected.
+ type: bool
+ default: false
+ version_added: 3.6.0
+ gsm:
+ description:
+ - The configuration of the GSM connection.
+ - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
+ - 'An up-to-date list of supported attributes can be found here:
+ U(https://networkmanager.dev/docs/api/latest/settings-gsm.html).'
+ - 'For instance to use apn, pin, username and password:
+ C({apn: provider.apn, pin: 1234, username: apn.username, password: apn.password}).'
+ type: dict
+ version_added: 3.7.0
+ suboptions:
+ apn:
+ description:
+ - The GPRS Access Point Name specifying the APN used when establishing a data session with the GSM-based network.
+ - The APN often determines how the user will be billed for their network usage and whether the user has access to the Internet or
+ just a provider-specific walled-garden, so it is important to use the correct APN for the user's mobile broadband plan.
+ - The APN may only be composed of the characters a-z, 0-9, ., and - per GSM 03.60 Section 14.9.
+ type: str
+ auto-config:
+ description: When C(true), the settings such as I(gsm.apn), I(gsm.username), or I(gsm.password) will default to values that match the network
+ the modem will register to in the Mobile Broadband Provider database.
+ type: bool
+ default: false
+ device-id:
+ description:
+ - The device unique identifier (as given by the C(WWAN) management service) which this connection applies to.
+ - If given, the connection will only apply to the specified device.
+ type: str
+ home-only:
+ description:
+ - When C(true), only connections to the home network will be allowed.
+ - Connections to roaming networks will not be made.
+ type: bool
+ default: false
+ mtu:
+ description: If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple Ethernet frames.
+ type: int
+ default: 0
+ network-id:
+ description:
+ - The Network ID (GSM LAI format, ie MCC-MNC) to force specific network registration.
+ - If the Network ID is specified, NetworkManager will attempt to force the device to register only on the specified network.
+ - This can be used to ensure that the device does not roam when direct roaming control of the device is not otherwise possible.
+ type: str
+ number:
+ description: Legacy setting that used to help establishing PPP data sessions for GSM-based modems.
+ type: str
+ password:
+ description:
+ - The password used to authenticate with the network, if required.
+ - Many providers do not require a password, or accept any password.
+ - But if a password is required, it is specified here.
+ type: str
+ password-flags:
+ description:
+ - NMSettingSecretFlags indicating how to handle the I(password) property.
+ - 'Following choices are allowed:
+ C(0) B(NONE): The system is responsible for providing and storing this secret (default),
+ C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
+ asked to retrieve it
+ C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed
+ C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
+ (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
+ type: int
+ choices: [ 0, 1, 2 , 4 ]
+ default: 0
+ pin:
+ description:
+ - If the SIM is locked with a PIN it must be unlocked before any other operations are requested.
+ - Specify the PIN here to allow operation of the device.
+ type: str
+ pin-flags:
+ description:
+ - NMSettingSecretFlags indicating how to handle the I(gsm.pin) property.
+ - See I(gsm.password-flags) for NMSettingSecretFlags choices.
+ type: int
+ choices: [ 0, 1, 2 , 4 ]
+ default: 0
+ sim-id:
+ description:
+ - The SIM card unique identifier (as given by the C(WWAN) management service) which this connection applies to.
+ - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) which contains a SIM card matching
+ the given identifier.'
+ type: str
+ sim-operator-id:
+ description:
+ - A MCC/MNC string like C(310260) or C(21601I) identifying the specific mobile network operator which this connection applies to.
+ - 'If given, the connection will apply to any device also allowed by I(gsm.device-id) and I(gsm.sim-id) which contains a SIM card
+ provisioned by the given operator.'
+ type: str
+ username:
+ description:
+ - The username used to authenticate with the network, if required.
+ - Many providers do not require a username, or accept any username.
+ - But if a username is required, it is specified here.
+ macvlan:
+ description:
+ - The configuration of the MAC VLAN connection.
+ - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
+ - 'An up-to-date list of supported attributes can be found here:
+ U(https://networkmanager.dev/docs/api/latest/settings-macvlan.html).'
+ type: dict
+ version_added: 6.6.0
+ suboptions:
+ mode:
+ description:
+ - The macvlan mode, which specifies the communication mechanism between multiple macvlans on the same lower device.
+ - 'Following choices are allowed: C(1) B(vepa), C(2) B(bridge), C(3) B(private), C(4) B(passthru)
+ and C(5) B(source)'
+ type: int
+ choices: [ 1, 2, 3, 4, 5 ]
+ required: true
+ parent:
+ description:
+ - If given, specifies the parent interface name or parent connection UUID from which this MAC-VLAN interface should
+ be created. If this property is not specified, the connection must contain an "802-3-ethernet" setting with a
+ "mac-address" property.
+ type: str
+ required: true
+ promiscuous:
+ description:
+ - Whether the interface should be put in promiscuous mode.
+ type: bool
+ tap:
+ description:
+ - Whether the interface should be a MACVTAP.
+ type: bool
+ wireguard:
+ description:
+ - The configuration of the Wireguard connection.
+ - Note the list of suboption attributes may vary depending on which version of NetworkManager/nmcli is installed on the host.
+ - 'An up-to-date list of supported attributes can be found here:
+ U(https://networkmanager.dev/docs/api/latest/settings-wireguard.html).'
+ - 'For instance to configure a listen port:
+ C({listen-port: 12345}).'
+ type: dict
+ version_added: 4.3.0
+ suboptions:
+ fwmark:
+ description:
+ - The 32-bit fwmark for outgoing packets.
+ - The use of fwmark is optional and is by default off. Setting it to 0 disables it.
+ - Note that I(wireguard.ip4-auto-default-route) or I(wireguard.ip6-auto-default-route) enabled, implies to automatically choose a fwmark.
+ type: int
+ ip4-auto-default-route:
+ description:
+ - Whether to enable special handling of the IPv4 default route.
+ - If enabled, the IPv4 default route from I(wireguard.peer-routes) will be placed to a dedicated routing-table and two policy
+ routing rules will be added.
+ - The fwmark number is also used as routing-table for the default-route, and if fwmark is zero, an unused fwmark/table is chosen
+ automatically. This corresponds to what wg-quick does with Table=auto and what WireGuard calls "Improved Rule-based Routing"
+ type: bool
+ ip6-auto-default-route:
+ description:
+ - Like I(wireguard.ip4-auto-default-route), but for the IPv6 default route.
+ type: bool
+ listen-port:
+ description: The WireGuard connection listen-port. If not specified, the port will be chosen randomly when the
+ interface comes up.
+ type: int
+ mtu:
+ description:
+ - If non-zero, only transmit packets of the specified size or smaller, breaking larger packets up into multiple fragments.
+ - If zero a default MTU is used. Note that contrary to wg-quick's MTU setting, this does not take into account the current routes
+ at the time of activation.
+ type: int
+ peer-routes:
+ description:
+ - Whether to automatically add routes for the AllowedIPs ranges of the peers.
+ - If C(true) (the default), NetworkManager will automatically add routes in the routing tables according to C(ipv4.route-table) and
+ C(ipv6.route-table). Usually you want this automatism enabled.
+ - If C(false), no such routes are added automatically. In this case, the user may want to configure static routes in C(ipv4.routes)
+ and C(ipv6.routes), respectively.
+ - Note that if the peer's AllowedIPs is C(0.0.0.0/0) or C(::/0) and the profile's C(ipv4.never-default) or C(ipv6.never-default)
+ setting is enabled, the peer route for this peer won't be added automatically.
+ type: bool
+ private-key:
+ description: The 256 bit private-key in base64 encoding.
+ type: str
+ private-key-flags:
+ description: C(NMSettingSecretFlags) indicating how to handle the I(wireguard.private-key) property.
+ type: int
+ choices: [ 0, 1, 2 ]
+ vpn:
+ description:
+ - Configuration of a VPN connection (PPTP and L2TP).
+ - In order to use L2TP you need to be sure that C(network-manager-l2tp) - and C(network-manager-l2tp-gnome)
+ if host has UI - are installed on the host.
+ type: dict
+ version_added: 5.1.0
+ suboptions:
+ permissions:
+ description: User that will have permission to use the connection.
+ type: str
+ required: true
+ service-type:
+ description: This defines the service type of connection.
+ type: str
+ required: true
+ gateway:
+ description: The gateway to connection. It can be an IP address (for example C(192.0.2.1))
+ or a FQDN address (for example C(vpn.example.com)).
+ type: str
+ required: true
+ password-flags:
+ description:
+ - NMSettingSecretFlags indicating how to handle the I(password) property.
+ - 'Following choices are allowed:
+ C(0) B(NONE): The system is responsible for providing and storing this secret (default);
+ C(1) B(AGENT_OWNED): A user secret agent is responsible for providing and storing this secret; when it is required agents will be
+ asked to retrieve it;
+ C(2) B(NOT_SAVED): This secret should not be saved, but should be requested from the user each time it is needed;
+ C(4) B(NOT_REQUIRED): In situations where it cannot be automatically determined that the secret is required
+ (some VPNs and PPP providers do not require all secrets) this flag indicates that the specific secret is not required.'
+ type: int
+ choices: [ 0, 1, 2 , 4 ]
+ default: 0
+ user:
+ description: Username provided by VPN administrator.
+ type: str
+ required: true
+ ipsec-enabled:
+ description:
+ - Enable or disable IPSec tunnel to L2TP host.
+ - This option is need when C(service-type) is C(org.freedesktop.NetworkManager.l2tp).
+ type: bool
+ ipsec-psk:
+ description:
+ - The pre-shared key in base64 encoding.
+ - >
+ You can encode using this Ansible jinja2 expression: C("0s{{ '[YOUR PRE-SHARED KEY]' | ansible.builtin.b64encode }}").
+ - This is only used when I(ipsec-enabled=true).
+ type: str
+'''
+
+EXAMPLES = r'''
+# These examples are using the following inventory:
+#
+# ## Directory layout:
+#
+# |_/inventory/cloud-hosts
+# | /group_vars/openstack-stage.yml
+# | /host_vars/controller-01.openstack.host.com
+# | /host_vars/controller-02.openstack.host.com
+# |_/playbook/library/nmcli.py
+# | /playbook-add.yml
+# | /playbook-del.yml
+# ```
+#
+# ## inventory examples
+# ### groups_vars
+# ```yml
+# ---
+# #devops_os_define_network
+# storage_gw: "192.0.2.254"
+# external_gw: "198.51.100.254"
+# tenant_gw: "203.0.113.254"
+#
+# #Team vars
+# nmcli_team:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# nmcli_team_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #bond vars
+# nmcli_bond:
+# - conn_name: tenant
+# ip4: '{{ tenant_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: external
+# ip4: '{{ external_ip }}'
+# gw4: ''
+# mode: balance-rr
+# - conn_name: storage
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# mode: balance-rr
+# nmcli_bond_slave:
+# - conn_name: em1
+# ifname: em1
+# master: tenant
+# - conn_name: em2
+# ifname: em2
+# master: tenant
+# - conn_name: p2p1
+# ifname: p2p1
+# master: storage
+# - conn_name: p2p2
+# ifname: p2p2
+# master: external
+#
+# #ethernet vars
+# nmcli_ethernet:
+# - conn_name: em1
+# ifname: em1
+# ip4:
+# - '{{ tenant_ip }}'
+# - '{{ second_tenant_ip }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: em2
+# ifname: em2
+# ip4: '{{ tenant_ip1 }}'
+# gw4: '{{ tenant_gw }}'
+# - conn_name: p2p1
+# ifname: p2p1
+# ip4: '{{ storage_ip }}'
+# gw4: '{{ storage_gw }}'
+# - conn_name: p2p2
+# ifname: p2p2
+# ip4: '{{ external_ip }}'
+# gw4: '{{ external_gw }}'
+# ```
+#
+# ### host_vars
+# ```yml
+# ---
+# storage_ip: "192.0.2.91/23"
+# external_ip: "198.51.100.23/21"
+# tenant_ip: "203.0.113.77/23"
+# second_tenant_ip: "204.0.113.77/23"
+# ```
+
+
+
+## playbook-add.yml example
+
+---
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Install needed network manager libs
+ ansible.builtin.package:
+ name:
+ - NetworkManager-libnm
+ - nm-connection-editor
+ - libsemanage-python
+ - policycoreutils-python
+ state: present
+
+##### Working with all cloud nodes - Teaming
+ - name: Try nmcli add team - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: team
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team }}'
+
+ - name: Try nmcli add teams-slave
+ community.general.nmcli:
+ type: team-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_team_slave }}'
+
+###### Working with all cloud nodes - Bonding
+ - name: Try nmcli add bond - conn_name only & ip4 gw4 mode
+ community.general.nmcli:
+ type: bond
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ mode: '{{ item.mode }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond }}'
+
+ - name: Try nmcli add bond-slave
+ community.general.nmcli:
+ type: bond-slave
+ conn_name: '{{ item.conn_name }}'
+ ifname: '{{ item.ifname }}'
+ master: '{{ item.master }}'
+ state: present
+ with_items:
+ - '{{ nmcli_bond_slave }}'
+
+##### Working with all cloud nodes - Ethernet
+ - name: Try nmcli add Ethernet - conn_name only & ip4 gw4
+ community.general.nmcli:
+ type: ethernet
+ conn_name: '{{ item.conn_name }}'
+ ip4: '{{ item.ip4 }}'
+ gw4: '{{ item.gw4 }}'
+ state: present
+ with_items:
+ - '{{ nmcli_ethernet }}'
+
+## playbook-del.yml example
+- hosts: openstack-stage
+ remote_user: root
+ tasks:
+
+ - name: Try nmcli del team - multiple
+ community.general.nmcli:
+ conn_name: '{{ item.conn_name }}'
+ state: absent
+ with_items:
+ - conn_name: em1
+ - conn_name: em2
+ - conn_name: p1p1
+ - conn_name: p1p2
+ - conn_name: p2p1
+ - conn_name: p2p2
+ - conn_name: tenant
+ - conn_name: storage
+ - conn_name: external
+ - conn_name: team-em1
+ - conn_name: team-em2
+ - conn_name: team-p1p1
+ - conn_name: team-p1p2
+ - conn_name: team-p2p1
+ - conn_name: team-p2p2
+
+ - name: Add an Ethernet connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+
+ - name: Add an Team connection with static IP configuration
+ community.general.nmcli:
+ conn_name: my-team1
+ ifname: my-team1
+ type: team
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ state: present
+ autoconnect: true
+
+ - name: Optionally, at the same time specify IPv6 addresses for the device
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4: 192.0.2.100/24
+ gw4: 192.0.2.1
+ ip6: 2001:db8::cafe
+ gw6: 2001:db8::1
+ state: present
+
+ - name: Add two IPv4 DNS server addresses
+ community.general.nmcli:
+ conn_name: my-eth1
+ type: ethernet
+ dns4:
+ - 192.0.2.53
+ - 198.51.100.53
+ state: present
+
+ - name: Make a profile usable for all compatible Ethernet interfaces
+ community.general.nmcli:
+ ctype: ethernet
+ name: my-eth1
+ ifname: '*'
+ state: present
+
+ - name: Change the property of a setting e.g. MTU
+ community.general.nmcli:
+ conn_name: my-eth1
+ mtu: 9000
+ type: ethernet
+ state: present
+
+ - name: Add second ip4 address
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip4:
+ - 192.0.2.100/24
+ - 192.0.3.100/24
+ state: present
+
+ - name: Add second ip6 address
+ community.general.nmcli:
+ conn_name: my-eth1
+ ifname: eth1
+ type: ethernet
+ ip6:
+ - 2001:db8::cafe
+ - 2002:db8::cafe
+ state: present
+
+ - name: Add VxLan
+ community.general.nmcli:
+ type: vxlan
+ conn_name: vxlan_test1
+ vxlan_id: 16
+ vxlan_local: 192.168.1.2
+ vxlan_remote: 192.168.1.5
+
+ - name: Add gre
+ community.general.nmcli:
+ type: gre
+ conn_name: gre_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add ipip
+ community.general.nmcli:
+ type: ipip
+ conn_name: ipip_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add sit
+ community.general.nmcli:
+ type: sit
+ conn_name: sit_test1
+ ip_tunnel_dev: eth0
+ ip_tunnel_local: 192.168.1.2
+ ip_tunnel_remote: 192.168.1.5
+
+ - name: Add zone
+ community.general.nmcli:
+ type: ethernet
+ conn_name: my-eth1
+ zone: external
+ state: present
+
+# nmcli exits with status 0 if it succeeds and exits with a status greater
+# than zero when there is a failure. The following list of status codes may be
+# returned:
+#
+# - 0 Success - indicates the operation succeeded
+# - 1 Unknown or unspecified error
+# - 2 Invalid user input, wrong nmcli invocation
+# - 3 Timeout expired (see --wait option)
+# - 4 Connection activation failed
+# - 5 Connection deactivation failed
+# - 6 Disconnecting device failed
+# - 7 Connection deletion failed
+# - 8 NetworkManager is not running
+# - 9 nmcli and NetworkManager versions mismatch
+# - 10 Connection, device, or access point does not exist.
+
+- name: Create the wifi connection
+ community.general.nmcli:
+ type: wifi
+ conn_name: Brittany
+ ifname: wlp4s0
+ ssid: Brittany
+ wifi_sec:
+ key-mgmt: wpa-psk
+ psk: my_password
+ autoconnect: true
+ state: present
+
+- name: Create a hidden AP mode wifi connection
+ community.general.nmcli:
+ type: wifi
+ conn_name: ChocoMaster
+ ifname: wlo1
+ ssid: ChocoMaster
+ wifi:
+ hidden: true
+ mode: ap
+ autoconnect: true
+ state: present
+
+- name: Create a gsm connection
+ community.general.nmcli:
+ type: gsm
+ conn_name: my-gsm-provider
+ ifname: cdc-wdm0
+ gsm:
+ apn: my.provider.apn
+ username: my-provider-username
+ password: my-provider-password
+ pin: my-sim-pin
+ autoconnect: true
+ state: present
+
+- name: Create a macvlan connection
+ community.general.nmcli:
+ type: macvlan
+ conn_name: my-macvlan-connection
+ ifname: mymacvlan0
+ macvlan:
+ mode: 2
+ parent: eth1
+ autoconnect: true
+ state: present
+
+- name: Create a wireguard connection
+ community.general.nmcli:
+ type: wireguard
+ conn_name: my-wg-provider
+ ifname: mywg0
+ wireguard:
+ listen-port: 51820
+ private-key: my-private-key
+ autoconnect: true
+ state: present
+
+- name: >-
+ Create a VPN L2TP connection for ansible_user to connect on vpn.example.com
+ authenticating with user 'brittany' and pre-shared key as 'Brittany123'
+ community.general.nmcli:
+ type: vpn
+ conn_name: my-vpn-connection
+ vpn:
+ permissions: "{{ ansible_user }}"
+ service-type: org.freedesktop.NetworkManager.l2tp
+ gateway: vpn.example.com
+ password-flags: 2
+ user: brittany
+ ipsec-enabled: true
+ ipsec-psk: "0s{{ 'Brittany123' | ansible.builtin.b64encode }}"
+ autoconnect: false
+ state: present
+
+'''
+
+RETURN = r"""#
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_text
+import re
+
+
+class NmcliModuleError(Exception):
+ pass
+
+
+class Nmcli(object):
+ """
+ This is the generic nmcli manipulation class that is subclassed based on platform.
+ A subclass may wish to override the following action methods:-
+ - create_connection()
+ - delete_connection()
+ - edit_connection()
+ - modify_connection()
+ - show_connection()
+ - up_connection()
+ - down_connection()
+ All subclasses MUST define platform and distribution (which may be None).
+ """
+
+ platform = 'Generic'
+ distribution = None
+
+ SECRET_OPTIONS = (
+ '802-11-wireless-security.leap-password',
+ '802-11-wireless-security.psk',
+ '802-11-wireless-security.wep-key0',
+ '802-11-wireless-security.wep-key1',
+ '802-11-wireless-security.wep-key2',
+ '802-11-wireless-security.wep-key3'
+ )
+
+ def __init__(self, module):
+ self.module = module
+ self.state = module.params['state']
+ self.ignore_unsupported_suboptions = module.params['ignore_unsupported_suboptions']
+ self.autoconnect = module.params['autoconnect']
+ self.conn_name = module.params['conn_name']
+ self.master = module.params['master']
+ self.ifname = module.params['ifname']
+ self.type = module.params['type']
+ self.ip4 = module.params['ip4']
+ self.gw4 = module.params['gw4']
+ self.gw4_ignore_auto = module.params['gw4_ignore_auto']
+ self.routes4 = module.params['routes4']
+ self.routes4_extended = module.params['routes4_extended']
+ self.route_metric4 = module.params['route_metric4']
+ self.routing_rules4 = module.params['routing_rules4']
+ self.never_default4 = module.params['never_default4']
+ self.dns4 = module.params['dns4']
+ self.dns4_search = module.params['dns4_search']
+ self.dns4_ignore_auto = module.params['dns4_ignore_auto']
+ self.method4 = module.params['method4']
+ self.may_fail4 = module.params['may_fail4']
+ self.ip6 = module.params['ip6']
+ self.gw6 = module.params['gw6']
+ self.gw6_ignore_auto = module.params['gw6_ignore_auto']
+ self.routes6 = module.params['routes6']
+ self.routes6_extended = module.params['routes6_extended']
+ self.route_metric6 = module.params['route_metric6']
+ self.dns6 = module.params['dns6']
+ self.dns6_search = module.params['dns6_search']
+ self.dns6_ignore_auto = module.params['dns6_ignore_auto']
+ self.method6 = module.params['method6']
+ self.ip_privacy6 = module.params['ip_privacy6']
+ self.addr_gen_mode6 = module.params['addr_gen_mode6']
+ self.mtu = module.params['mtu']
+ self.stp = module.params['stp']
+ self.priority = module.params['priority']
+ self.mode = module.params['mode']
+ self.miimon = module.params['miimon']
+ self.primary = module.params['primary']
+ self.downdelay = module.params['downdelay']
+ self.updelay = module.params['updelay']
+ self.xmit_hash_policy = module.params['xmit_hash_policy']
+ self.arp_interval = module.params['arp_interval']
+ self.arp_ip_target = module.params['arp_ip_target']
+ self.slavepriority = module.params['slavepriority']
+ self.forwarddelay = module.params['forwarddelay']
+ self.hellotime = module.params['hellotime']
+ self.maxage = module.params['maxage']
+ self.ageingtime = module.params['ageingtime']
+ # hairpin should be back to normal in 7.0.0
+ self._hairpin = module.params['hairpin']
+ self.path_cost = module.params['path_cost']
+ self.mac = module.params['mac']
+ self.runner = module.params['runner']
+ self.runner_hwaddr_policy = module.params['runner_hwaddr_policy']
+ self.runner_fast_rate = module.params['runner_fast_rate']
+ self.vlanid = module.params['vlanid']
+ self.vlandev = module.params['vlandev']
+ self.flags = module.params['flags']
+ self.ingress = module.params['ingress']
+ self.egress = module.params['egress']
+ self.vxlan_id = module.params['vxlan_id']
+ self.vxlan_local = module.params['vxlan_local']
+ self.vxlan_remote = module.params['vxlan_remote']
+ self.ip_tunnel_dev = module.params['ip_tunnel_dev']
+ self.ip_tunnel_local = module.params['ip_tunnel_local']
+ self.ip_tunnel_remote = module.params['ip_tunnel_remote']
+ self.ip_tunnel_input_key = module.params['ip_tunnel_input_key']
+ self.ip_tunnel_output_key = module.params['ip_tunnel_output_key']
+ self.nmcli_bin = self.module.get_bin_path('nmcli', True)
+ self.dhcp_client_id = module.params['dhcp_client_id']
+ self.zone = module.params['zone']
+ self.ssid = module.params['ssid']
+ self.wifi = module.params['wifi']
+ self.wifi_sec = module.params['wifi_sec']
+ self.gsm = module.params['gsm']
+ self.macvlan = module.params['macvlan']
+ self.wireguard = module.params['wireguard']
+ self.vpn = module.params['vpn']
+ self.transport_mode = module.params['transport_mode']
+
+ if self.method4:
+ self.ipv4_method = self.method4
+ elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip4:
+ self.ipv4_method = 'disabled'
+ elif self.ip4:
+ self.ipv4_method = 'manual'
+ else:
+ self.ipv4_method = None
+
+ if self.method6:
+ self.ipv6_method = self.method6
+ elif self.type in ('dummy', 'macvlan', 'wireguard') and not self.ip6:
+ self.ipv6_method = 'disabled'
+ elif self.ip6:
+ self.ipv6_method = 'manual'
+ else:
+ self.ipv6_method = None
+
+ self.edit_commands = []
+
+ @property
+ def hairpin(self):
+ if self._hairpin is None:
+ self.module.deprecate(
+ "Parameter 'hairpin' default value will change from true to false in community.general 7.0.0. "
+ "Set the value explicitly to suppress this warning.",
+ version='7.0.0', collection_name='community.general',
+ )
+ # Should be False in 7.0.0 but then that should be in argument_specs
+ self._hairpin = True
+ return self._hairpin
+
+ def execute_command(self, cmd, use_unsafe_shell=False, data=None):
+ if isinstance(cmd, list):
+ cmd = [to_text(item) for item in cmd]
+ else:
+ cmd = to_text(cmd)
+ return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
+
+ def execute_edit_commands(self, commands, arguments):
+ arguments = arguments or []
+ cmd = [self.nmcli_bin, 'con', 'edit'] + arguments
+ data = "\n".join(commands)
+ return self.execute_command(cmd, data=data)
+
+ def connection_options(self, detect_change=False):
+ # Options common to multiple connection types.
+ options = {
+ 'connection.autoconnect': self.autoconnect,
+ 'connection.zone': self.zone,
+ }
+
+ # IP address options.
+ if self.ip_conn_type and not self.master:
+ options.update({
+ 'ipv4.addresses': self.enforce_ipv4_cidr_notation(self.ip4),
+ 'ipv4.dhcp-client-id': self.dhcp_client_id,
+ 'ipv4.dns': self.dns4,
+ 'ipv4.dns-search': self.dns4_search,
+ 'ipv4.ignore-auto-dns': self.dns4_ignore_auto,
+ 'ipv4.gateway': self.gw4,
+ 'ipv4.ignore-auto-routes': self.gw4_ignore_auto,
+ 'ipv4.routes': self.enforce_routes_format(self.routes4, self.routes4_extended),
+ 'ipv4.route-metric': self.route_metric4,
+ 'ipv4.routing-rules': self.routing_rules4,
+ 'ipv4.never-default': self.never_default4,
+ 'ipv4.method': self.ipv4_method,
+ 'ipv4.may-fail': self.may_fail4,
+ 'ipv6.addresses': self.enforce_ipv6_cidr_notation(self.ip6),
+ 'ipv6.dns': self.dns6,
+ 'ipv6.dns-search': self.dns6_search,
+ 'ipv6.ignore-auto-dns': self.dns6_ignore_auto,
+ 'ipv6.gateway': self.gw6,
+ 'ipv6.ignore-auto-routes': self.gw6_ignore_auto,
+ 'ipv6.routes': self.enforce_routes_format(self.routes6, self.routes6_extended),
+ 'ipv6.route-metric': self.route_metric6,
+ 'ipv6.method': self.ipv6_method,
+ 'ipv6.ip6-privacy': self.ip_privacy6,
+ 'ipv6.addr-gen-mode': self.addr_gen_mode6
+ })
+ # when 'method' is disabled the 'may_fail' no make sense but accepted by nmcli with keeping 'yes'
+ # force ignoring to save idempotency
+ if self.ipv4_method and self.ipv4_method != 'disabled':
+ options.update({'ipv4.may-fail': self.may_fail4})
+
+ # Layer 2 options.
+ if self.mac:
+ options.update({self.mac_setting: self.mac})
+
+ if self.mtu_conn_type:
+ options.update({self.mtu_setting: self.mtu})
+
+ # Connections that can have a master.
+ if self.slave_conn_type:
+ options.update({
+ 'connection.master': self.master,
+ })
+
+ # Options specific to a connection type.
+ if self.type == 'bond':
+ options.update({
+ 'arp-interval': self.arp_interval,
+ 'arp-ip-target': self.arp_ip_target,
+ 'downdelay': self.downdelay,
+ 'miimon': self.miimon,
+ 'mode': self.mode,
+ 'primary': self.primary,
+ 'updelay': self.updelay,
+ 'xmit_hash_policy': self.xmit_hash_policy,
+ })
+ elif self.type == 'bond-slave':
+ options.update({
+ 'connection.slave-type': 'bond',
+ })
+ elif self.type == 'bridge':
+ options.update({
+ 'bridge.ageing-time': self.ageingtime,
+ 'bridge.forward-delay': self.forwarddelay,
+ 'bridge.hello-time': self.hellotime,
+ 'bridge.max-age': self.maxage,
+ 'bridge.priority': self.priority,
+ 'bridge.stp': self.stp,
+ })
+ # priority make sense when stp enabed, otherwise nmcli keeps bridge-priority to 32768 regrdless of input.
+ # force ignoring to save idempotency
+ if self.stp:
+ options.update({'bridge.priority': self.priority})
+ elif self.type == 'team':
+ options.update({
+ 'team.runner': self.runner,
+ 'team.runner-hwaddr-policy': self.runner_hwaddr_policy,
+ })
+ if self.runner_fast_rate is not None:
+ options.update({
+ 'team.runner-fast-rate': self.runner_fast_rate,
+ })
+ elif self.type == 'bridge-slave':
+ options.update({
+ 'connection.slave-type': 'bridge',
+ 'bridge-port.path-cost': self.path_cost,
+ 'bridge-port.hairpin-mode': self.hairpin,
+ 'bridge-port.priority': self.slavepriority,
+ })
+ elif self.type == 'team-slave':
+ options.update({
+ 'connection.slave-type': 'team',
+ })
+ elif self.tunnel_conn_type:
+ options.update({
+ 'ip-tunnel.local': self.ip_tunnel_local,
+ 'ip-tunnel.mode': self.type,
+ 'ip-tunnel.parent': self.ip_tunnel_dev,
+ 'ip-tunnel.remote': self.ip_tunnel_remote,
+ })
+ if self.type == 'gre':
+ options.update({
+ 'ip-tunnel.input-key': self.ip_tunnel_input_key,
+ 'ip-tunnel.output-key': self.ip_tunnel_output_key
+ })
+ elif self.type == 'vlan':
+ options.update({
+ 'vlan.id': self.vlanid,
+ 'vlan.parent': self.vlandev,
+ 'vlan.flags': self.flags,
+ 'vlan.ingress': self.ingress,
+ 'vlan.egress': self.egress,
+ })
+ elif self.type == 'vxlan':
+ options.update({
+ 'vxlan.id': self.vxlan_id,
+ 'vxlan.local': self.vxlan_local,
+ 'vxlan.remote': self.vxlan_remote,
+ })
+ elif self.type == 'wifi':
+ options.update({
+ '802-11-wireless.ssid': self.ssid,
+ 'connection.slave-type': 'bond' if self.master else None,
+ })
+ if self.wifi:
+ for name, value in self.wifi.items():
+ options.update({
+ '802-11-wireless.%s' % name: value
+ })
+ if self.wifi_sec:
+ for name, value in self.wifi_sec.items():
+ options.update({
+ '802-11-wireless-security.%s' % name: value
+ })
+ elif self.type == 'gsm':
+ if self.gsm:
+ for name, value in self.gsm.items():
+ options.update({
+ 'gsm.%s' % name: value,
+ })
+ elif self.type == 'macvlan':
+ if self.macvlan:
+ for name, value in self.macvlan.items():
+ options.update({
+ 'macvlan.%s' % name: value,
+ })
+ elif self.state == 'present':
+ raise NmcliModuleError('type is macvlan but all of the following are missing: macvlan')
+ elif self.type == 'wireguard':
+ if self.wireguard:
+ for name, value in self.wireguard.items():
+ options.update({
+ 'wireguard.%s' % name: value,
+ })
+ elif self.type == 'vpn':
+ if self.vpn:
+ vpn_data_values = ''
+ for name, value in self.vpn.items():
+ if name == 'service-type':
+ options.update({
+ 'vpn.service-type': value,
+ })
+ elif name == 'permissions':
+ options.update({
+ 'connection.permissions': value,
+ })
+ else:
+ if vpn_data_values != '':
+ vpn_data_values += ', '
+
+ if isinstance(value, bool):
+ value = self.bool_to_string(value)
+
+ vpn_data_values += '%s=%s' % (name, value)
+ options.update({
+ 'vpn.data': vpn_data_values,
+ })
+ elif self.type == 'infiniband':
+ options.update({
+ 'infiniband.transport-mode': self.transport_mode,
+ })
+
+ # Convert settings values based on the situation.
+ for setting, value in options.items():
+ setting_type = self.settings_type(setting)
+ convert_func = None
+ if setting_type is bool:
+ # Convert all bool options to yes/no.
+ convert_func = self.bool_to_string
+ if detect_change:
+ if setting in ('vlan.id', 'vxlan.id'):
+ # Convert VLAN/VXLAN IDs to text when detecting changes.
+ convert_func = to_text
+ elif setting == self.mtu_setting:
+ # MTU is 'auto' by default when detecting changes.
+ convert_func = self.mtu_to_string
+ elif setting == 'ipv6.ip6-privacy':
+ convert_func = self.ip6_privacy_to_num
+ elif setting_type is list:
+ # Convert lists to strings for nmcli create/modify commands.
+ convert_func = self.list_to_string
+
+ if callable(convert_func):
+ options[setting] = convert_func(options[setting])
+
+ return options
+
+ @property
+ def ip_conn_type(self):
+ return self.type in (
+ 'bond',
+ 'bridge',
+ 'dummy',
+ 'ethernet',
+ '802-3-ethernet',
+ 'generic',
+ 'gre',
+ 'infiniband',
+ 'ipip',
+ 'sit',
+ 'team',
+ 'vlan',
+ 'wifi',
+ '802-11-wireless',
+ 'gsm',
+ 'macvlan',
+ 'wireguard',
+ 'vpn',
+ )
+
+ @property
+ def mac_setting(self):
+ if self.type == 'bridge':
+ return 'bridge.mac-address'
+ else:
+ return '802-3-ethernet.cloned-mac-address'
+
+ @property
+ def mtu_conn_type(self):
+ return self.type in (
+ 'dummy',
+ 'ethernet',
+ 'team-slave',
+ 'vlan',
+ )
+
+ @property
+ def mtu_setting(self):
+ return '802-3-ethernet.mtu'
+
+ @staticmethod
+ def mtu_to_string(mtu):
+ if not mtu:
+ return 'auto'
+ else:
+ return to_text(mtu)
+
+ @staticmethod
+ def ip6_privacy_to_num(privacy):
+ ip6_privacy_values = {
+ 'disabled': '0',
+ 'prefer-public-addr': '1 (enabled, prefer public IP)',
+ 'prefer-temp-addr': '2 (enabled, prefer temporary IP)',
+ 'unknown': '-1',
+ }
+
+ if privacy is None:
+ return None
+
+ if privacy not in ip6_privacy_values:
+ raise AssertionError('{privacy} is invalid ip_privacy6 option'.format(privacy=privacy))
+
+ return ip6_privacy_values[privacy]
+
+ @property
+ def slave_conn_type(self):
+ return self.type in (
+ 'bond-slave',
+ 'bridge-slave',
+ 'team-slave',
+ 'wifi',
+ )
+
+ @property
+ def tunnel_conn_type(self):
+ return self.type in (
+ 'gre',
+ 'ipip',
+ 'sit',
+ )
+
+ @staticmethod
+ def enforce_ipv4_cidr_notation(ip4_addresses):
+ if ip4_addresses is None:
+ return None
+ return [address if '/' in address else address + '/32' for address in ip4_addresses]
+
+ @staticmethod
+ def enforce_ipv6_cidr_notation(ip6_addresses):
+ if ip6_addresses is None:
+ return None
+ return [address if '/' in address else address + '/128' for address in ip6_addresses]
+
+ def enforce_routes_format(self, routes, routes_extended):
+ if routes is not None:
+ return routes
+ elif routes_extended is not None:
+ return [self.route_to_string(route) for route in routes_extended]
+ else:
+ return None
+
+ @staticmethod
+ def route_to_string(route):
+ result_str = ''
+ result_str += route['ip']
+ if route.get('next_hop') is not None:
+ result_str += ' ' + route['next_hop']
+ if route.get('metric') is not None:
+ result_str += ' ' + str(route['metric'])
+
+ for attribute, value in sorted(route.items()):
+ if attribute not in ('ip', 'next_hop', 'metric') and value is not None:
+ result_str += ' {0}={1}'.format(attribute, str(value).lower())
+
+ return result_str
+
+ @staticmethod
+ def bool_to_string(boolean):
+ if boolean:
+ return "yes"
+ else:
+ return "no"
+
+ @staticmethod
+ def list_to_string(lst):
+ if lst is None:
+ return None
+ else:
+ return ",".join(lst)
+
+ @staticmethod
+ def settings_type(setting):
+ if setting in ('bridge.stp',
+ 'bridge-port.hairpin-mode',
+ 'connection.autoconnect',
+ 'ipv4.never-default',
+ 'ipv4.ignore-auto-dns',
+ 'ipv4.ignore-auto-routes',
+ 'ipv4.may-fail',
+ 'ipv6.ignore-auto-dns',
+ 'ipv6.ignore-auto-routes',
+ '802-11-wireless.hidden',
+ 'team.runner-fast-rate'):
+ return bool
+ elif setting in ('ipv4.addresses',
+ 'ipv6.addresses',
+ 'ipv4.dns',
+ 'ipv4.dns-search',
+ 'ipv4.routes',
+ 'ipv4.routing-rules',
+ 'ipv6.dns',
+ 'ipv6.dns-search',
+ 'ipv6.routes',
+ '802-11-wireless-security.group',
+ '802-11-wireless-security.leap-password-flags',
+ '802-11-wireless-security.pairwise',
+ '802-11-wireless-security.proto',
+ '802-11-wireless-security.psk-flags',
+ '802-11-wireless-security.wep-key-flags',
+ '802-11-wireless.mac-address-blacklist'):
+ return list
+ return str
+
+ def get_route_params(self, raw_values):
+ routes_params = []
+ for raw_value in raw_values:
+ route_params = {}
+ for parameter, value in re.findall(r'([\w-]*)\s?=\s?([^\s,}]*)', raw_value):
+ if parameter == 'nh':
+ route_params['next_hop'] = value
+ elif parameter == 'mt':
+ route_params['metric'] = value
+ else:
+ route_params[parameter] = value
+ routes_params.append(route_params)
+ return [self.route_to_string(route_params) for route_params in routes_params]
+
+ def list_connection_info(self):
+ cmd = [self.nmcli_bin, '--fields', 'name', '--terse', 'con', 'show']
+ (rc, out, err) = self.execute_command(cmd)
+ if rc != 0:
+ raise NmcliModuleError(err)
+ return out.splitlines()
+
+ def connection_exists(self):
+ return self.conn_name in self.list_connection_info()
+
+ def down_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'down', self.conn_name]
+ return self.execute_command(cmd)
+
+ def up_connection(self):
+ cmd = [self.nmcli_bin, 'con', 'up', self.conn_name]
+ return self.execute_command(cmd)
+
+ def connection_update(self, nmcli_command):
+ if nmcli_command == 'create':
+ cmd = [self.nmcli_bin, 'con', 'add', 'type']
+ if self.tunnel_conn_type:
+ cmd.append('ip-tunnel')
+ else:
+ cmd.append(self.type)
+ cmd.append('con-name')
+ elif nmcli_command == 'modify':
+ cmd = [self.nmcli_bin, 'con', 'modify']
+ else:
+ self.module.fail_json(msg="Invalid nmcli command.")
+ cmd.append(self.conn_name)
+
+ # Use connection name as default for interface name on creation.
+ if nmcli_command == 'create' and self.ifname is None:
+ ifname = self.conn_name
+ else:
+ ifname = self.ifname
+
+ options = {
+ 'connection.interface-name': ifname,
+ }
+
+ # VPN doesn't need an interface but if sended it must be a valid interface.
+ if self.type == 'vpn' and self.ifname is None:
+ del options['connection.interface-name']
+
+ options.update(self.connection_options())
+
+ # Constructing the command.
+ for key, value in options.items():
+ if value is not None:
+ if key in self.SECRET_OPTIONS:
+ self.edit_commands += ['set %s %s' % (key, value)]
+ continue
+ if key == 'xmit_hash_policy':
+ cmd.extend(['+bond.options', 'xmit_hash_policy=%s' % value])
+ continue
+ cmd.extend([key, value])
+
+ return self.execute_command(cmd)
+
+ def create_connection(self):
+ status = self.connection_update('create')
+ if status[0] == 0 and self.edit_commands:
+ status = self.edit_connection()
+ if self.create_connection_up:
+ status = self.up_connection()
+ return status
+
+ @property
+ def create_connection_up(self):
+ if self.type in ('bond', 'dummy', 'ethernet', 'infiniband', 'wifi'):
+ if (self.mtu is not None) or (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ elif self.type == 'team':
+ if (self.dns4 is not None) or (self.dns6 is not None):
+ return True
+ return False
+
+ def remove_connection(self):
+ # self.down_connection()
+ cmd = [self.nmcli_bin, 'con', 'del', self.conn_name]
+ return self.execute_command(cmd)
+
+ def modify_connection(self):
+ status = self.connection_update('modify')
+ if status[0] == 0 and self.edit_commands:
+ status = self.edit_connection()
+ return status
+
+ def edit_connection(self):
+ commands = self.edit_commands + ['save', 'quit']
+ return self.execute_edit_commands(commands, arguments=[self.conn_name])
+
+ def show_connection(self):
+ cmd = [self.nmcli_bin, '--show-secrets', 'con', 'show', self.conn_name]
+
+ (rc, out, err) = self.execute_command(cmd)
+
+ if rc != 0:
+ raise NmcliModuleError(err)
+
+ p_enum_value = re.compile(r'^([-]?\d+) \((\w+)\)$')
+
+ conn_info = dict()
+ for line in out.splitlines():
+ pair = line.split(':', 1)
+ key = pair[0].strip()
+ key_type = self.settings_type(key)
+ if key and len(pair) > 1:
+ raw_value = pair[1].lstrip()
+ if raw_value == '--':
+ conn_info[key] = None
+ elif key == 'bond.options':
+ # Aliases such as 'miimon', 'downdelay' are equivalent to the +bond.options 'option=value' syntax.
+ opts = raw_value.split(',')
+ for opt in opts:
+ alias_pair = opt.split('=', 1)
+ if len(alias_pair) > 1:
+ alias_key = alias_pair[0]
+ alias_value = alias_pair[1]
+ conn_info[alias_key] = alias_value
+ elif key in ('ipv4.routes', 'ipv6.routes'):
+ conn_info[key] = [s.strip() for s in raw_value.split(';')]
+ elif key_type == list:
+ conn_info[key] = [s.strip() for s in raw_value.split(',')]
+ else:
+ m_enum = p_enum_value.match(raw_value)
+ if m_enum is not None:
+ value = m_enum.group(1)
+ else:
+ value = raw_value
+ conn_info[key] = value
+
+ return conn_info
+
+ def get_supported_properties(self, setting):
+ properties = []
+
+ if setting == '802-11-wireless-security':
+ set_property = 'psk'
+ set_value = 'FAKEVALUE'
+ commands = ['set %s.%s %s' % (setting, set_property, set_value)]
+ else:
+ commands = []
+
+ commands += ['print %s' % setting, 'quit', 'yes']
+
+ (rc, out, err) = self.execute_edit_commands(commands, arguments=['type', self.type])
+
+ if rc != 0:
+ raise NmcliModuleError(err)
+
+ for line in out.splitlines():
+ prefix = '%s.' % setting
+ if (line.startswith(prefix)):
+ pair = line.split(':', 1)
+ property = pair[0].strip().replace(prefix, '')
+ properties.append(property)
+
+ return properties
+
+ def check_for_unsupported_properties(self, setting):
+ if setting == '802-11-wireless':
+ setting_key = 'wifi'
+ elif setting == '802-11-wireless-security':
+ setting_key = 'wifi_sec'
+ else:
+ setting_key = setting
+
+ supported_properties = self.get_supported_properties(setting)
+ unsupported_properties = []
+
+ for property, value in getattr(self, setting_key).items():
+ if property not in supported_properties:
+ unsupported_properties.append(property)
+
+ if unsupported_properties:
+ msg_options = []
+ for property in unsupported_properties:
+ msg_options.append('%s.%s' % (setting_key, property))
+
+ msg = 'Invalid or unsupported option(s): "%s"' % '", "'.join(msg_options)
+ if self.ignore_unsupported_suboptions:
+ self.module.warn(msg)
+ else:
+ self.module.fail_json(msg=msg)
+
+ return unsupported_properties
+
+ def _compare_conn_params(self, conn_info, options):
+ changed = False
+ diff_before = dict()
+ diff_after = dict()
+
+ for key, value in options.items():
+ # We can't just do `if not value` because then if there's a value
+ # of 0 specified as an integer it'll be interpreted as empty when
+ # it actually isn't.
+ if value != 0 and not value:
+ continue
+
+ if key in conn_info:
+ current_value = conn_info[key]
+ if key == '802-11-wireless.wake-on-wlan' and current_value is not None:
+ match = re.match('0x([0-9A-Fa-f]+)', current_value)
+ if match:
+ current_value = str(int(match.group(1), 16))
+ if key in ('ipv4.routes', 'ipv6.routes') and current_value is not None:
+ current_value = self.get_route_params(current_value)
+ if key == self.mac_setting:
+ # MAC addresses are case insensitive, nmcli always reports them in uppercase
+ value = value.upper()
+ # ensure current_value is also converted to uppercase in case nmcli changes behaviour
+ if current_value:
+ current_value = current_value.upper()
+ if key == 'gsm.apn':
+ # Depending on version nmcli adds double-qoutes to gsm.apn
+ # Need to strip them in order to compare both
+ if current_value:
+ current_value = current_value.strip('"')
+ if key == self.mtu_setting and self.mtu is None:
+ self.mtu = 0
+ if key == 'vpn.data':
+ if current_value:
+ current_value = sorted(re.sub(r'\s*=\s*', '=', part.strip(), count=1) for part in current_value.split(','))
+ value = sorted(part.strip() for part in value.split(','))
+ else:
+ # parameter does not exist
+ current_value = None
+
+ if isinstance(current_value, list) and isinstance(value, list):
+ # compare values between two lists
+ if key in ('ipv4.addresses', 'ipv6.addresses'):
+ # The order of IP addresses matters because the first one
+ # is the default source address for outbound connections.
+ changed |= current_value != value
+ else:
+ changed |= sorted(current_value) != sorted(value)
+ elif all([key == self.mtu_setting, self.type == 'dummy', current_value is None, value == 'auto', self.mtu is None]):
+ value = None
+ else:
+ value = to_text(value)
+ if current_value != value:
+ changed = True
+
+ diff_before[key] = current_value
+ diff_after[key] = value
+
+ diff = {
+ 'before': diff_before,
+ 'after': diff_after,
+ }
+ return (changed, diff)
+
+ def is_connection_changed(self):
+ options = {
+ 'connection.interface-name': self.ifname,
+ }
+
+ # VPN doesn't need an interface but if sended it must be a valid interface.
+ if self.type == 'vpn' and self.ifname is None:
+ del options['connection.interface-name']
+
+ if not self.type:
+ current_con_type = self.show_connection().get('connection.type')
+ if current_con_type:
+ if current_con_type == '802-11-wireless':
+ current_con_type = 'wifi'
+ self.type = current_con_type
+
+ options.update(self.connection_options(detect_change=True))
+ return self._compare_conn_params(self.show_connection(), options)
+
+
+def main():
+ # Parsing argument file
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_unsupported_suboptions=dict(type='bool', default=False),
+ autoconnect=dict(type='bool', default=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ conn_name=dict(type='str', required=True),
+ master=dict(type='str'),
+ ifname=dict(type='str'),
+ type=dict(type='str',
+ choices=[
+ 'bond',
+ 'bond-slave',
+ 'bridge',
+ 'bridge-slave',
+ 'dummy',
+ 'ethernet',
+ 'generic',
+ 'gre',
+ 'infiniband',
+ 'ipip',
+ 'sit',
+ 'team',
+ 'team-slave',
+ 'vlan',
+ 'vxlan',
+ 'wifi',
+ 'gsm',
+ 'macvlan',
+ 'wireguard',
+ 'vpn',
+ ]),
+ ip4=dict(type='list', elements='str'),
+ gw4=dict(type='str'),
+ gw4_ignore_auto=dict(type='bool', default=False),
+ routes4=dict(type='list', elements='str'),
+ routes4_extended=dict(type='list',
+ elements='dict',
+ options=dict(
+ ip=dict(type='str', required=True),
+ next_hop=dict(type='str'),
+ metric=dict(type='int'),
+ table=dict(type='int'),
+ tos=dict(type='int'),
+ cwnd=dict(type='int'),
+ mtu=dict(type='int'),
+ onlink=dict(type='bool')
+ )),
+ route_metric4=dict(type='int'),
+ routing_rules4=dict(type='list', elements='str'),
+ never_default4=dict(type='bool', default=False),
+ dns4=dict(type='list', elements='str'),
+ dns4_search=dict(type='list', elements='str'),
+ dns4_ignore_auto=dict(type='bool', default=False),
+ method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']),
+ may_fail4=dict(type='bool', default=True),
+ dhcp_client_id=dict(type='str'),
+ ip6=dict(type='list', elements='str'),
+ gw6=dict(type='str'),
+ gw6_ignore_auto=dict(type='bool', default=False),
+ dns6=dict(type='list', elements='str'),
+ dns6_search=dict(type='list', elements='str'),
+ dns6_ignore_auto=dict(type='bool', default=False),
+ routes6=dict(type='list', elements='str'),
+ routes6_extended=dict(type='list',
+ elements='dict',
+ options=dict(
+ ip=dict(type='str', required=True),
+ next_hop=dict(type='str'),
+ metric=dict(type='int'),
+ table=dict(type='int'),
+ cwnd=dict(type='int'),
+ mtu=dict(type='int'),
+ onlink=dict(type='bool')
+ )),
+ route_metric6=dict(type='int'),
+ method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']),
+ ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']),
+ addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']),
+ # Bond Specific vars
+ mode=dict(type='str', default='balance-rr',
+ choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+ miimon=dict(type='int'),
+ downdelay=dict(type='int'),
+ updelay=dict(type='int'),
+ xmit_hash_policy=dict(type='str'),
+ arp_interval=dict(type='int'),
+ arp_ip_target=dict(type='str'),
+ primary=dict(type='str'),
+ # general usage
+ mtu=dict(type='int'),
+ mac=dict(type='str'),
+ zone=dict(type='str'),
+ # bridge specific vars
+ stp=dict(type='bool', default=True),
+ priority=dict(type='int', default=128),
+ slavepriority=dict(type='int', default=32),
+ forwarddelay=dict(type='int', default=15),
+ hellotime=dict(type='int', default=2),
+ maxage=dict(type='int', default=20),
+ ageingtime=dict(type='int', default=300),
+ hairpin=dict(type='bool'),
+ path_cost=dict(type='int', default=100),
+ # team specific vars
+ runner=dict(type='str', default='roundrobin',
+ choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']),
+ # team active-backup runner specific options
+ runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']),
+ # team lacp runner specific options
+ runner_fast_rate=dict(type='bool'),
+ # vlan specific vars
+ vlanid=dict(type='int'),
+ vlandev=dict(type='str'),
+ flags=dict(type='str'),
+ ingress=dict(type='str'),
+ egress=dict(type='str'),
+ # vxlan specific vars
+ vxlan_id=dict(type='int'),
+ vxlan_local=dict(type='str'),
+ vxlan_remote=dict(type='str'),
+ # ip-tunnel specific vars
+ ip_tunnel_dev=dict(type='str'),
+ ip_tunnel_local=dict(type='str'),
+ ip_tunnel_remote=dict(type='str'),
+ # ip-tunnel type gre specific vars
+ ip_tunnel_input_key=dict(type='str', no_log=True),
+ ip_tunnel_output_key=dict(type='str', no_log=True),
+ # 802-11-wireless* specific vars
+ ssid=dict(type='str'),
+ wifi=dict(type='dict'),
+ wifi_sec=dict(type='dict', no_log=True),
+ gsm=dict(type='dict'),
+ macvlan=dict(type='dict', options=dict(
+ mode=dict(type='int', choices=[1, 2, 3, 4, 5], required=True),
+ parent=dict(type='str', required=True),
+ promiscuous=dict(type='bool'),
+ tap=dict(type='bool'))),
+ wireguard=dict(type='dict'),
+ vpn=dict(type='dict'),
+ transport_mode=dict(type='str', choices=['datagram', 'connected']),
+ ),
+ mutually_exclusive=[['never_default4', 'gw4'],
+ ['routes4_extended', 'routes4'],
+ ['routes6_extended', 'routes6']],
+ required_if=[("type", "wifi", [("ssid")])],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ nmcli = Nmcli(module)
+
+ (rc, out, err) = (None, '', '')
+ result = {'conn_name': nmcli.conn_name, 'state': nmcli.state}
+
+ # check for issues
+ if nmcli.conn_name is None:
+ nmcli.module.fail_json(msg="Please specify a name for the connection")
+ # team checks
+ if nmcli.type == "team":
+ if nmcli.runner_hwaddr_policy and not nmcli.runner == "activebackup":
+ nmcli.module.fail_json(msg="Runner-hwaddr-policy is only allowed for runner activebackup")
+ if nmcli.runner_fast_rate is not None and nmcli.runner != "lacp":
+ nmcli.module.fail_json(msg="runner-fast-rate is only allowed for runner lacp")
+ # team-slave checks
+ if nmcli.type == 'team-slave':
+ if nmcli.master is None:
+ nmcli.module.fail_json(msg="Please specify a name for the master when type is %s" % nmcli.type)
+ if nmcli.ifname is None:
+ nmcli.module.fail_json(msg="Please specify an interface name for the connection when type is %s" % nmcli.type)
+ if nmcli.type == 'wifi':
+ unsupported_properties = {}
+ if nmcli.wifi:
+ if 'ssid' in nmcli.wifi:
+ module.warn("Ignoring option 'wifi.ssid', it must be specified with option 'ssid'")
+ del nmcli.wifi['ssid']
+ unsupported_properties['wifi'] = nmcli.check_for_unsupported_properties('802-11-wireless')
+ if nmcli.wifi_sec:
+ unsupported_properties['wifi_sec'] = nmcli.check_for_unsupported_properties('802-11-wireless-security')
+ if nmcli.ignore_unsupported_suboptions and unsupported_properties:
+ for setting_key, properties in unsupported_properties.items():
+ for property in properties:
+ del getattr(nmcli, setting_key)[property]
+
+ try:
+ if nmcli.state == 'absent':
+ if nmcli.connection_exists():
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = nmcli.down_connection()
+ (rc, out, err) = nmcli.remove_connection()
+ if rc != 0:
+ module.fail_json(name=('No Connection named %s exists' % nmcli.conn_name), msg=err, rc=rc)
+
+ elif nmcli.state == 'present':
+ if nmcli.connection_exists():
+ changed, diff = nmcli.is_connection_changed()
+ if module._diff:
+ result['diff'] = diff
+
+ if changed:
+ # modify connection (note: this function is check mode aware)
+ # result['Connection']=('Connection %s of Type %s is not being added' % (nmcli.conn_name, nmcli.type))
+ result['Exists'] = 'Connections do exist so we are modifying them'
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.modify_connection()
+ else:
+ result['Exists'] = 'Connections already exist and no changes made'
+ if module.check_mode:
+ module.exit_json(changed=False, **result)
+ if not nmcli.connection_exists():
+ result['Connection'] = ('Connection %s of Type %s is being added' % (nmcli.conn_name, nmcli.type))
+ if module.check_mode:
+ module.exit_json(changed=True, **result)
+ (rc, out, err) = nmcli.create_connection()
+ if rc is not None and rc != 0:
+ module.fail_json(name=nmcli.conn_name, msg=err, rc=rc)
+ except NmcliModuleError as e:
+ module.fail_json(name=nmcli.conn_name, msg=str(e))
+
+ if rc is None:
+ result['changed'] = False
+ else:
+ result['changed'] = True
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nomad_job.py b/ansible_collections/community/general/plugins/modules/nomad_job.py
new file mode 100644
index 000000000..ca76536b4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nomad_job.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Launch a Nomad Job
+description:
+ - Launch a Nomad job.
+ - Stop a Nomad job.
+ - Force start a Nomad job
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of job for delete, stop and start job without source.
+ - Name of job for delete, stop and start job without source.
+ - Either this or I(content) must be specified.
+ type: str
+ state:
+ description:
+ - Deploy or remove job.
+ choices: ["present", "absent"]
+ required: true
+ type: str
+ force_start:
+ description:
+ - Force job to started.
+ type: bool
+ default: false
+ content:
+ description:
+ - Content of Nomad job.
+ - Either this or I(name) must be specified.
+ type: str
+ content_format:
+ description:
+ - Type of content of Nomad job.
+ choices: ["hcl", "json"]
+ default: hcl
+ type: str
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Create job
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ content: "{{ lookup('ansible.builtin.file', 'job.hcl') }}"
+ timeout: 120
+
+- name: Stop job
+ community.general.nomad_job:
+ host: localhost
+ state: absent
+ name: api
+
+- name: Force job to start
+ community.general.nomad_job:
+ host: localhost
+ state: present
+ name: api
+ timeout: 120
+ force_start: true
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ state=dict(required=True, choices=['present', 'absent']),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ namespace=dict(type='str'),
+ name=dict(type='str'),
+ content_format=dict(choices=['hcl', 'json'], default='hcl'),
+ content=dict(type='str'),
+ force_start=dict(type='bool', default=False),
+ token=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ["name", "content"]
+ ],
+ required_one_of=[
+ ['name', 'content']
+ ]
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ if module.params.get('state') == "present":
+
+ if module.params.get('name') and not module.params.get('force_start'):
+ module.fail_json(msg='For start job with name, force_start is needed')
+
+ changed = False
+ if module.params.get('content'):
+
+ if module.params.get('content_format') == 'json':
+
+ job_json = module.params.get('content')
+ try:
+ job_json = json.loads(job_json)
+ except ValueError as e:
+ module.fail_json(msg=to_native(e))
+ job = dict()
+ job['job'] = job_json
+ try:
+ job_id = job_json.get('ID')
+ if job_id is None:
+ module.fail_json(msg="Cannot retrieve job with ID None")
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('content_format') == 'hcl':
+
+ try:
+ job_hcl = module.params.get('content')
+ job_json = nomad_client.jobs.parse(job_hcl)
+ job = dict()
+ job['job'] = job_json
+ except nomad.api.exceptions.BadRequestNomadException as err:
+ msg = str(err.nomad_resp.reason) + " " + str(err.nomad_resp.text)
+ module.fail_json(msg=to_native(msg))
+ try:
+ job_id = job_json.get('ID')
+ plan = nomad_client.job.plan_job(job_id, job, diff=True)
+ if not plan['Diff'].get('Type') == "None":
+ changed = True
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = plan
+ else:
+ result = plan
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('force_start'):
+
+ try:
+ job = dict()
+ if module.params.get('name'):
+ job_name = module.params.get('name')
+ else:
+ job_name = job_json['Name']
+ job_json = nomad_client.job.get_job(job_name)
+ if job_json['Status'] == 'running':
+ result = job_json
+ else:
+ job_json['Status'] = 'running'
+ job_json['Stop'] = False
+ job['job'] = job_json
+ if not module.check_mode:
+ result = nomad_client.jobs.register_job(job)
+ else:
+ result = nomad_client.validate.validate_job(job)
+ if not result.status_code == 200:
+ module.fail_json(msg=to_native(result.text))
+ result = json.loads(result.text)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('state') == "absent":
+
+ try:
+ if not module.params.get('name') is None:
+ job_name = module.params.get('name')
+ else:
+ if module.params.get('content_format') == 'hcl':
+ job_json = nomad_client.jobs.parse(module.params.get('content'))
+ job_name = job_json['Name']
+ if module.params.get('content_format') == 'json':
+ job_json = module.params.get('content')
+ job_name = job_json['Name']
+ job = nomad_client.job.get_job(job_name)
+ if job['Status'] == 'dead':
+ changed = False
+ result = job
+ else:
+ if not module.check_mode:
+ result = nomad_client.job.deregister_job(job_name)
+ else:
+ result = job
+ changed = True
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nomad_job_info.py b/ansible_collections/community/general/plugins/modules/nomad_job_info.py
new file mode 100644
index 000000000..5ee25a57a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nomad_job_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, FERREIRA Christophe <christophe.ferreira@cnaf.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: nomad_job_info
+author: FERREIRA Christophe (@chris93111)
+version_added: "1.3.0"
+short_description: Get Nomad Jobs info
+description:
+ - Get info for one Nomad job.
+ - List Nomad jobs.
+requirements:
+ - python-nomad
+extends_documentation_fragment:
+ - community.general.nomad
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ name:
+ description:
+ - Name of job for Get info.
+ - If not specified, lists all jobs.
+ type: str
+notes:
+ - C(check_mode) is supported.
+seealso:
+ - name: Nomad jobs documentation
+ description: Complete documentation for Nomad API jobs.
+ link: https://www.nomadproject.io/api-docs/jobs/
+'''
+
+EXAMPLES = '''
+- name: Get info for job awx
+ community.general.nomad_job_info:
+ host: localhost
+ name: awx
+ register: result
+
+- name: List Nomad jobs
+ community.general.nomad_job_info:
+ host: localhost
+ register: result
+
+'''
+
+RETURN = '''
+result:
+ description: List with dictionary contains jobs info
+ returned: success
+ type: list
+ sample: [
+ {
+ "Affinities": null,
+ "AllAtOnce": false,
+ "Constraints": null,
+ "ConsulToken": "",
+ "CreateIndex": 13,
+ "Datacenters": [
+ "dc1"
+ ],
+ "Dispatched": false,
+ "ID": "example",
+ "JobModifyIndex": 13,
+ "Meta": null,
+ "ModifyIndex": 13,
+ "Multiregion": null,
+ "Name": "example",
+ "Namespace": "default",
+ "NomadTokenID": "",
+ "ParameterizedJob": null,
+ "ParentID": "",
+ "Payload": null,
+ "Periodic": null,
+ "Priority": 50,
+ "Region": "global",
+ "Spreads": null,
+ "Stable": false,
+ "Status": "pending",
+ "StatusDescription": "",
+ "Stop": false,
+ "SubmitTime": 1602244370615307000,
+ "TaskGroups": [
+ {
+ "Affinities": null,
+ "Constraints": null,
+ "Count": 1,
+ "EphemeralDisk": {
+ "Migrate": false,
+ "SizeMB": 300,
+ "Sticky": false
+ },
+ "Meta": null,
+ "Migrate": {
+ "HealthCheck": "checks",
+ "HealthyDeadline": 300000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000
+ },
+ "Name": "cache",
+ "Networks": null,
+ "ReschedulePolicy": {
+ "Attempts": 0,
+ "Delay": 30000000000,
+ "DelayFunction": "exponential",
+ "Interval": 0,
+ "MaxDelay": 3600000000000,
+ "Unlimited": true
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Scaling": null,
+ "Services": null,
+ "ShutdownDelay": null,
+ "Spreads": null,
+ "StopAfterClientDisconnect": null,
+ "Tasks": [
+ {
+ "Affinities": null,
+ "Artifacts": null,
+ "CSIPluginConfig": null,
+ "Config": {
+ "image": "redis:3.2",
+ "port_map": [
+ {
+ "db": 6379.0
+ }
+ ]
+ },
+ "Constraints": null,
+ "DispatchPayload": null,
+ "Driver": "docker",
+ "Env": null,
+ "KillSignal": "",
+ "KillTimeout": 5000000000,
+ "Kind": "",
+ "Leader": false,
+ "Lifecycle": null,
+ "LogConfig": {
+ "MaxFileSizeMB": 10,
+ "MaxFiles": 10
+ },
+ "Meta": null,
+ "Name": "redis",
+ "Resources": {
+ "CPU": 500,
+ "Devices": null,
+ "DiskMB": 0,
+ "IOPS": 0,
+ "MemoryMB": 256,
+ "Networks": [
+ {
+ "CIDR": "",
+ "DNS": null,
+ "Device": "",
+ "DynamicPorts": [
+ {
+ "HostNetwork": "default",
+ "Label": "db",
+ "To": 0,
+ "Value": 0
+ }
+ ],
+ "IP": "",
+ "MBits": 10,
+ "Mode": "",
+ "ReservedPorts": null
+ }
+ ]
+ },
+ "RestartPolicy": {
+ "Attempts": 3,
+ "Delay": 15000000000,
+ "Interval": 1800000000000,
+ "Mode": "fail"
+ },
+ "Services": [
+ {
+ "AddressMode": "auto",
+ "CanaryMeta": null,
+ "CanaryTags": null,
+ "Checks": [
+ {
+ "AddressMode": "",
+ "Args": null,
+ "CheckRestart": null,
+ "Command": "",
+ "Expose": false,
+ "FailuresBeforeCritical": 0,
+ "GRPCService": "",
+ "GRPCUseTLS": false,
+ "Header": null,
+ "InitialStatus": "",
+ "Interval": 10000000000,
+ "Method": "",
+ "Name": "alive",
+ "Path": "",
+ "PortLabel": "",
+ "Protocol": "",
+ "SuccessBeforePassing": 0,
+ "TLSSkipVerify": false,
+ "TaskName": "",
+ "Timeout": 2000000000,
+ "Type": "tcp"
+ }
+ ],
+ "Connect": null,
+ "EnableTagOverride": false,
+ "Meta": null,
+ "Name": "redis-cache",
+ "PortLabel": "db",
+ "Tags": [
+ "global",
+ "cache"
+ ],
+ "TaskName": ""
+ }
+ ],
+ "ShutdownDelay": 0,
+ "Templates": null,
+ "User": "",
+ "Vault": null,
+ "VolumeMounts": null
+ }
+ ],
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "checks",
+ "HealthyDeadline": 180000000000,
+ "MaxParallel": 1,
+ "MinHealthyTime": 10000000000,
+ "ProgressDeadline": 600000000000,
+ "Stagger": 30000000000
+ },
+ "Volumes": null
+ }
+ ],
+ "Type": "service",
+ "Update": {
+ "AutoPromote": false,
+ "AutoRevert": false,
+ "Canary": 0,
+ "HealthCheck": "",
+ "HealthyDeadline": 0,
+ "MaxParallel": 1,
+ "MinHealthyTime": 0,
+ "ProgressDeadline": 0,
+ "Stagger": 30000000000
+ },
+ "VaultNamespace": "",
+ "VaultToken": "",
+ "Version": 0
+ }
+ ]
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+import_nomad = None
+try:
+ import nomad
+ import_nomad = True
+except ImportError:
+ import_nomad = False
+
+
+def run():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True, type='str'),
+ use_ssl=dict(type='bool', default=True),
+ timeout=dict(type='int', default=5),
+ validate_certs=dict(type='bool', default=True),
+ client_cert=dict(type='path'),
+ client_key=dict(type='path'),
+ namespace=dict(type='str'),
+ name=dict(type='str'),
+ token=dict(type='str', no_log=True)
+ ),
+ supports_check_mode=True
+ )
+
+ if not import_nomad:
+ module.fail_json(msg=missing_required_lib("python-nomad"))
+
+ certificate_ssl = (module.params.get('client_cert'), module.params.get('client_key'))
+
+ nomad_client = nomad.Nomad(
+ host=module.params.get('host'),
+ secure=module.params.get('use_ssl'),
+ timeout=module.params.get('timeout'),
+ verify=module.params.get('validate_certs'),
+ cert=certificate_ssl,
+ namespace=module.params.get('namespace'),
+ token=module.params.get('token')
+ )
+
+ changed = False
+ result = list()
+ try:
+ job_list = nomad_client.jobs.get_jobs()
+ for job in job_list:
+ result.append(nomad_client.job.get_job(job.get('ID')))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ if module.params.get('name'):
+ filter = list()
+ try:
+ for job in result:
+ if job.get('ID') == module.params.get('name'):
+ filter.append(job)
+ result = filter
+ if not filter:
+ module.fail_json(msg="Couldn't find Job with id " + str(module.params.get('name')))
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+ module.exit_json(changed=changed, result=result)
+
+
+def main():
+
+ run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nosh.py b/ansible_collections/community/general/plugins/modules/nosh.py
new file mode 100644
index 000000000..2dfb8d590
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nosh.py
@@ -0,0 +1,559 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Thomas Caravia <taca@kadisius.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nosh
+author:
+ - "Thomas Caravia (@tacatac)"
+short_description: Manage services with nosh
+description:
+ - Control running and enabled state for system-wide or user services.
+ - BSD and Linux systems are supported.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ required: true
+ description:
+ - Name of the service to manage.
+ state:
+ type: str
+ required: false
+ choices: [ started, stopped, reset, restarted, reloaded ]
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary.
+ C(restarted) will always bounce the service.
+ C(reloaded) will send a SIGHUP or start the service.
+ C(reset) will start or stop the service according to whether it is
+ enabled or not.
+ enabled:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service, independently of C(*.preset) file
+ preference or running state. Mutually exclusive with I(preset). Will take
+ effect prior to I(state=reset).
+ preset:
+ required: false
+ type: bool
+ description:
+ - Enable or disable the service according to local preferences in C(*.preset) files.
+ Mutually exclusive with I(enabled). Only has an effect if set to true. Will take
+ effect prior to I(state=reset).
+ user:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Run system-control talking to the calling user's service manager, rather than
+ the system-wide service manager.
+requirements:
+ - A system with an active nosh service manager, see Notes for further information.
+notes:
+ - Information on the nosh utilities suite may be found at U(https://jdebp.eu/Softwares/nosh/).
+'''
+
+EXAMPLES = '''
+- name: Start dnscache if not running
+ community.general.nosh:
+ name: dnscache
+ state: started
+
+- name: Stop mpd, if running
+ community.general.nosh:
+ name: mpd
+ state: stopped
+
+- name: Restart unbound or start it if not already running
+ community.general.nosh:
+ name: unbound
+ state: restarted
+
+- name: Reload fail2ban or start it if not already running
+ community.general.nosh:
+ name: fail2ban
+ state: reloaded
+
+- name: Disable nsd
+ community.general.nosh:
+ name: nsd
+ enabled: false
+
+- name: For package installers, set nginx running state according to local enable settings, preset and reset
+ community.general.nosh:
+ name: nginx
+ preset: true
+ state: reset
+
+- name: Reboot the host if nosh is the system manager, would need a "wait_for*" task at least, not recommended as-is
+ community.general.nosh:
+ name: reboot
+ state: started
+
+- name: Using conditionals with the module facts
+ tasks:
+ - name: Obtain information on tinydns service
+ community.general.nosh:
+ name: tinydns
+ register: result
+
+ - name: Fail if service not loaded
+ ansible.builtin.fail:
+ msg: "The {{ result.name }} service is not loaded"
+ when: not result.status
+
+ - name: Fail if service is running
+ ansible.builtin.fail:
+ msg: "The {{ result.name }} service is running"
+ when: result.status and result.status['DaemontoolsEncoreState'] == "running"
+'''
+
+RETURN = '''
+name:
+ description: name used to find the service
+ returned: success
+ type: str
+ sample: "sshd"
+service_path:
+ description: resolved path for the service
+ returned: success
+ type: str
+ sample: "/var/sv/sshd"
+enabled:
+ description: whether the service is enabled at system bootstrap
+ returned: success
+ type: bool
+ sample: true
+preset:
+ description: whether the enabled status reflects the one set in the relevant C(*.preset) file
+ returned: success
+ type: bool
+ sample: 'False'
+state:
+ description: service process run state, C(None) if the service is not loaded and will not be started
+ returned: if state option is used
+ type: str
+ sample: "reloaded"
+status:
+ description: A dictionary with the key=value pairs returned by C(system-control show-json) or C(None) if the service is not loaded
+ returned: success
+ type: complex
+ contains:
+ After:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys", "log"]
+ Before:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Conflicts:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: []
+ DaemontoolsEncoreState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "running"
+ DaemontoolsState:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "up"
+ Enabled:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: true
+ LogService:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "../cyclog@sshd"
+ MainPID:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 661
+ Paused:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ ReadyAfterRun:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ RemainAfterExit:
+ description: [] # FIXME
+ returned: success
+ type: bool
+ sample: 'False'
+ Required-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: []
+ RestartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RestartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RestartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ RunExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ RunTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ RunUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StartExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1
+ StartExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StartTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StartUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ StopExitStatusCode:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopExitStatusNumber:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: '0'
+ StopTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ StopUTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Stopped-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/shutdown"]
+ Timestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 4611686019935648081
+ UTCTimestamp:
+ description: [] # FIXME
+ returned: success
+ type: int
+ sample: 1508260140
+ Want:
+ description: [] # FIXME
+ returned: success
+ type: str
+ sample: "nothing"
+ Wanted-By:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/server","/etc/service-bundles/targets/sockets"]
+ Wants:
+ description: [] # FIXME
+ returned: success
+ type: list
+ sample: ["/etc/service-bundles/targets/basic","../sshdgenkeys"]
+user:
+ description: whether the user-level service manager is called
+ returned: success
+ type: bool
+ sample: false
+'''
+
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.service import fail_if_missing
+from ansible.module_utils.common.text.converters import to_native
+
+
+def run_sys_ctl(module, args):
+ sys_ctl = [module.get_bin_path('system-control', required=True)]
+ if module.params['user']:
+ sys_ctl = sys_ctl + ['--user']
+ return module.run_command(sys_ctl + args)
+
+
+def get_service_path(module, service):
+ (rc, out, err) = run_sys_ctl(module, ['find', service])
+ # fail if service not found
+ if rc != 0:
+ fail_if_missing(module, False, service, msg='host')
+ else:
+ return to_native(out).strip()
+
+
+def service_is_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-enabled', service_path])
+ return rc == 0
+
+
+def service_is_preset_enabled(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['preset', '--dry-run', service_path])
+ return to_native(out).strip().startswith("enable")
+
+
+def service_is_loaded(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['is-loaded', service_path])
+ return rc == 0
+
+
+def get_service_status(module, service_path):
+ (rc, out, err) = run_sys_ctl(module, ['show-json', service_path])
+ # will fail if not service is not loaded
+ if err is not None and err:
+ module.fail_json(msg=err)
+ else:
+ json_out = json.loads(to_native(out).strip())
+ status = json_out[service_path] # descend past service path header
+ return status
+
+
+def service_is_running(service_status):
+ return service_status['DaemontoolsEncoreState'] in set(['starting', 'started', 'running'])
+
+
+def handle_enabled(module, result, service_path):
+ """Enable or disable a service as needed.
+
+ - 'preset' will set the enabled state according to available preset file settings.
+ - 'enabled' will set the enabled state explicitly, independently of preset settings.
+
+ These options are set to "mutually exclusive" but the explicit 'enabled' option will
+ have priority if the check is bypassed.
+ """
+
+ # computed prior in control flow
+ preset = result['preset']
+ enabled = result['enabled']
+
+ # preset, effect only if option set to true (no reverse preset)
+ if module.params['preset']:
+ action = 'preset'
+
+ # run preset if needed
+ if preset != module.params['preset']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['preset'] = not preset
+ result['enabled'] = not enabled
+
+ # enabled/disabled state
+ if module.params['enabled'] is not None:
+ if module.params['enabled']:
+ action = 'enable'
+ else:
+ action = 'disable'
+
+ # change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, out + err))
+ result['enabled'] = not enabled
+ result['preset'] = not preset
+
+
+def handle_state(module, result, service_path):
+ """Set service running state as needed.
+
+ Takes into account the fact that a service may not be loaded (no supervise directory) in
+ which case it is 'stopped' as far as the service manager is concerned. No status information
+ can be obtained and the service can only be 'started'.
+ """
+ # default to desired state, no action
+ result['state'] = module.params['state']
+ state = module.params['state']
+ action = None
+
+ # computed prior in control flow, possibly modified by handle_enabled()
+ enabled = result['enabled']
+
+ # service not loaded -> not started by manager, no status information
+ if not service_is_loaded(module, service_path):
+ if state in ['started', 'restarted', 'reloaded']:
+ action = 'start'
+ result['state'] = 'started'
+ elif state == 'reset':
+ if enabled:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ result['state'] = None
+ else:
+ result['state'] = None
+
+ # service is loaded
+ else:
+ # get status information
+ result['status'] = get_service_status(module, service_path)
+ running = service_is_running(result['status'])
+
+ if state == 'started':
+ if not running:
+ action = 'start'
+ elif state == 'stopped':
+ if running:
+ action = 'stop'
+ # reset = start/stop according to enabled status
+ elif state == 'reset':
+ if enabled is not running:
+ if running:
+ action = 'stop'
+ result['state'] = 'stopped'
+ else:
+ action = 'start'
+ result['state'] = 'started'
+ # start if not running, 'service' module constraint
+ elif state == 'restarted':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'condrestart'
+ # start if not running, 'service' module constraint
+ elif state == 'reloaded':
+ if not running:
+ action = 'start'
+ result['state'] = 'started'
+ else:
+ action = 'hangup'
+
+ # change state as needed
+ if action:
+ result['changed'] = True
+ if not module.check_mode:
+ (rc, out, err) = run_sys_ctl(module, [action, service_path])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service_path, err))
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['started', 'stopped', 'reset', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ preset=dict(type='bool'),
+ user=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['enabled', 'preset']],
+ )
+
+ service = module.params['name']
+ rc = 0
+ out = err = ''
+ result = {
+ 'name': service,
+ 'changed': False,
+ 'status': None,
+ }
+
+ # check service can be found (or fail) and get path
+ service_path = get_service_path(module, service)
+
+ # get preliminary service facts
+ result['service_path'] = service_path
+ result['user'] = module.params['user']
+ result['enabled'] = service_is_enabled(module, service_path)
+ result['preset'] = result['enabled'] is service_is_preset_enabled(module, service_path)
+
+ # set enabled state, service need not be loaded
+ if module.params['enabled'] is not None or module.params['preset']:
+ handle_enabled(module, result, service_path)
+
+ # set service running state
+ if module.params['state'] is not None:
+ handle_state(module, result, service_path)
+
+ # get final service status if possible
+ if service_is_loaded(module, service_path):
+ result['status'] = get_service_status(module, service_path)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/npm.py b/ansible_collections/community/general/plugins/modules/npm.py
new file mode 100644
index 000000000..013fd6e57
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/npm.py
@@ -0,0 +1,342 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: npm
+short_description: Manage node.js packages with npm
+description:
+ - Manage node.js packages with Node Package Manager (npm).
+author: "Chris Hoffman (@chrishoffman)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of a node.js library to install.
+ type: str
+ required: false
+ path:
+ description:
+ - The base path where to install the node.js libraries.
+ type: path
+ required: false
+ version:
+ description:
+ - The version to be installed.
+ type: str
+ required: false
+ global:
+ description:
+ - Install the node.js library globally.
+ required: false
+ default: false
+ type: bool
+ executable:
+ description:
+ - The executable location for npm.
+ - This is useful if you are using a version manager, such as nvm.
+ type: path
+ required: false
+ ignore_scripts:
+ description:
+ - Use the C(--ignore-scripts) flag when installing.
+ required: false
+ type: bool
+ default: false
+ unsafe_perm:
+ description:
+ - Use the C(--unsafe-perm) flag when installing.
+ type: bool
+ default: false
+ ci:
+ description:
+ - Install packages based on package-lock file, same as running C(npm ci).
+ type: bool
+ default: false
+ production:
+ description:
+ - Install dependencies in production mode, excluding devDependencies.
+ required: false
+ type: bool
+ default: false
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ type: str
+ state:
+ description:
+ - The state of the node.js library.
+ required: false
+ type: str
+ default: present
+ choices: [ "present", "absent", "latest" ]
+ no_optional:
+ description:
+ - Use the C(--no-optional) flag when installing.
+ type: bool
+ default: false
+ version_added: 2.0.0
+ no_bin_links:
+ description:
+ - Use the C(--no-bin-links) flag when installing.
+ type: bool
+ default: false
+ version_added: 2.5.0
+requirements:
+ - npm installed in bin path (recommended /usr/local/bin)
+'''
+
+EXAMPLES = r'''
+- name: Install "coffee-script" node.js package.
+ community.general.npm:
+ name: coffee-script
+ path: /app/location
+
+- name: Install "coffee-script" node.js package on version 1.6.1.
+ community.general.npm:
+ name: coffee-script
+ version: '1.6.1'
+ path: /app/location
+
+- name: Install "coffee-script" node.js package globally.
+ community.general.npm:
+ name: coffee-script
+ global: true
+
+- name: Remove the globally package "coffee-script".
+ community.general.npm:
+ name: coffee-script
+ global: true
+ state: absent
+
+- name: Install "coffee-script" node.js package from custom registry.
+ community.general.npm:
+ name: coffee-script
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.npm:
+ path: /app/location
+
+- name: Update packages based on package.json to their latest version.
+ community.general.npm:
+ path: /app/location
+ state: latest
+
+- name: Install packages based on package.json using the npm installed with nvm v0.10.1.
+ community.general.npm:
+ path: /app/location
+ executable: /opt/nvm/v0.10.1/bin/npm
+ state: present
+'''
+
+import json
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class Npm(object):
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.glbl = kwargs['glbl']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+ self.unsafe_perm = kwargs['unsafe_perm']
+ self.state = kwargs['state']
+ self.no_optional = kwargs['no_optional']
+ self.no_bin_links = kwargs['no_bin_links']
+
+ if kwargs['executable']:
+ self.executable = kwargs['executable'].split(' ')
+ else:
+ self.executable = [module.get_bin_path('npm', True)]
+
+ if kwargs['version'] and self.state != 'absent':
+ self.name_version = self.name + '@' + str(self.version)
+ else:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True, add_package_name=True):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+ cmd = self.executable + args
+
+ if self.glbl:
+ cmd.append('--global')
+ if self.production and ('install' in cmd or 'update' in cmd or 'ci' in cmd):
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.unsafe_perm:
+ cmd.append('--unsafe-perm')
+ if self.name_version and add_package_name:
+ cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+ if self.no_optional:
+ cmd.append('--no-optional')
+ if self.no_bin_links:
+ cmd.append('--no-bin-links')
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path:
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out
+ return ''
+
+ def list(self):
+ cmd = ['list', '--json', '--long']
+
+ installed = list()
+ missing = list()
+ data = {}
+ try:
+ data = json.loads(self._exec(cmd, True, False, False) or '{}')
+ except (getattr(json, 'JSONDecodeError', ValueError)) as e:
+ self.module.fail_json(msg="Failed to parse NPM output with error %s" % to_native(e))
+ if 'dependencies' in data:
+ for dep, props in data['dependencies'].items():
+
+ if 'missing' in props and props['missing']:
+ missing.append(dep)
+ elif 'invalid' in props and props['invalid']:
+ missing.append(dep)
+ else:
+ installed.append(dep)
+ if 'version' in props and props['version']:
+ dep_version = dep + '@' + str(props['version'])
+ installed.append(dep_version)
+ if self.name_version and self.name_version not in installed:
+ missing.append(self.name)
+ # Named dependency not installed
+ else:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ return self._exec(['install'])
+
+ def ci_install(self):
+ return self._exec(['ci'])
+
+ def update(self):
+ return self._exec(['update'])
+
+ def uninstall(self):
+ return self._exec(['uninstall'])
+
+ def list_outdated(self):
+ outdated = list()
+ data = self._exec(['outdated'], True, False)
+ for dep in data.splitlines():
+ if dep:
+ # node.js v0.10.22 changed the `npm outdated` module separator
+ # from "@" to " ". Split on both for backwards compatibility.
+ pkg, other = re.split(r'\s|@', dep, 1)
+ outdated.append(pkg)
+
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None, type='str'),
+ path=dict(default=None, type='path'),
+ version=dict(default=None, type='str'),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None, type='str'),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ unsafe_perm=dict(default=False, type='bool'),
+ ci=dict(default=False, type='bool'),
+ no_optional=dict(default=False, type='bool'),
+ no_bin_links=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ glbl = module.params['global']
+ production = module.params['production']
+ executable = module.params['executable']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+ unsafe_perm = module.params['unsafe_perm']
+ ci = module.params['ci']
+ no_optional = module.params['no_optional']
+ no_bin_links = module.params['no_bin_links']
+
+ if not path and not glbl:
+ module.fail_json(msg='path must be specified when not using global')
+ if state == 'absent' and not name:
+ module.fail_json(msg='uninstalling a package is only available for named packages')
+
+ npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production,
+ executable=executable, registry=registry, ignore_scripts=ignore_scripts,
+ unsafe_perm=unsafe_perm, state=state, no_optional=no_optional, no_bin_links=no_bin_links)
+
+ changed = False
+ if ci:
+ npm.ci_install()
+ changed = True
+ elif state == 'present':
+ installed, missing = npm.list()
+ if missing:
+ changed = True
+ npm.install()
+ elif state == 'latest':
+ installed, missing = npm.list()
+ outdated = npm.list_outdated()
+ if missing:
+ changed = True
+ npm.install()
+ if outdated:
+ changed = True
+ npm.update()
+ else: # absent
+ installed, missing = npm.list()
+ if name in installed:
+ changed = True
+ npm.uninstall()
+
+ module.exit_json(changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/nsupdate.py b/ansible_collections/community/general/plugins/modules/nsupdate.py
new file mode 100644
index 000000000..b2a84f76b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/nsupdate.py
@@ -0,0 +1,527 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Marcin Skarbek <github@skarbek.name>
+# Copyright (c) 2016, Andreas Olsson <andreas@arrakis.se>
+# Copyright (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+#
+# This module was ported from https://github.com/mskarbek/ansible-nsupdate
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: nsupdate
+
+short_description: Manage DNS records
+description:
+ - Create, update and remove DNS records using DDNS updates
+requirements:
+ - dnspython
+author: "Loic Blot (@nerzhul)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Manage DNS record.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ server:
+ description:
+ - Apply DNS modification on this server, specified by IPv4 or IPv6 address.
+ required: true
+ type: str
+ port:
+ description:
+ - Use this TCP port when connecting to C(server).
+ default: 53
+ type: int
+ key_name:
+ description:
+ - Use TSIG key name to authenticate against DNS C(server)
+ type: str
+ key_secret:
+ description:
+ - Use TSIG key secret, associated with C(key_name), to authenticate against C(server)
+ type: str
+ key_algorithm:
+ description:
+ - Specify key algorithm used by C(key_secret).
+ choices: ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224', 'hmac-sha256', 'hmac-sha384',
+ 'hmac-sha512']
+ default: 'hmac-md5'
+ type: str
+ zone:
+ description:
+ - DNS record will be modified on this C(zone).
+ - When omitted DNS will be queried to attempt finding the correct zone.
+ - Starting with Ansible 2.7 this parameter is optional.
+ type: str
+ record:
+ description:
+ - Sets the DNS record to modify. When zone is omitted this has to be absolute (ending with a dot).
+ required: true
+ type: str
+ type:
+ description:
+ - Sets the record type.
+ default: 'A'
+ type: str
+ ttl:
+ description:
+ - Sets the record TTL.
+ default: 3600
+ type: int
+ value:
+ description:
+ - Sets the record value.
+ type: list
+ elements: str
+ protocol:
+ description:
+ - Sets the transport protocol (TCP or UDP). TCP is the recommended and a more robust option.
+ default: 'tcp'
+ choices: ['tcp', 'udp']
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Add or modify ansible.example.org A to 192.168.1.1"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: "192.168.1.1"
+
+- name: Add or modify ansible.example.org A to 192.168.1.1, 192.168.1.2 and 192.168.1.3"
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "ansible"
+ value: ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
+
+- name: Remove puppet.example.org CNAME
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ zone: "example.org"
+ record: "puppet"
+ type: "CNAME"
+ state: absent
+
+- name: Add 1.1.168.192.in-addr.arpa. PTR for ansible.example.org
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ value: "ansible.example.org."
+ state: present
+
+- name: Remove 1.1.168.192.in-addr.arpa. PTR
+ community.general.nsupdate:
+ key_name: "nsupdate"
+ key_secret: "+bFQtBCta7j2vWkjPkAFtgA=="
+ server: "10.1.1.1"
+ record: "1.1.168.192.in-addr.arpa."
+ type: "PTR"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: If module has modified record
+ returned: success
+ type: str
+record:
+ description: DNS record
+ returned: success
+ type: str
+ sample: 'ansible'
+ttl:
+ description: DNS record TTL
+ returned: success
+ type: int
+ sample: 86400
+type:
+ description: DNS record type
+ returned: success
+ type: str
+ sample: 'CNAME'
+value:
+ description: DNS record value(s)
+ returned: success
+ type: list
+ sample: '192.168.1.1'
+zone:
+ description: DNS record zone
+ returned: success
+ type: str
+ sample: 'example.org.'
+dns_rc:
+ description: dnspython return code
+ returned: always
+ type: int
+ sample: 4
+dns_rc_str:
+ description: dnspython return code (string representation)
+ returned: always
+ type: str
+ sample: 'REFUSED'
+'''
+
+import traceback
+
+from binascii import Error as binascii_error
+from socket import error as socket_error
+
+DNSPYTHON_IMP_ERR = None
+try:
+ import dns.update
+ import dns.query
+ import dns.tsigkeyring
+ import dns.message
+ import dns.resolver
+
+ HAVE_DNSPYTHON = True
+except ImportError:
+ DNSPYTHON_IMP_ERR = traceback.format_exc()
+ HAVE_DNSPYTHON = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class RecordManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ if module.params['key_name']:
+ try:
+ self.keyring = dns.tsigkeyring.from_text({
+ module.params['key_name']: module.params['key_secret']
+ })
+ except TypeError:
+ module.fail_json(msg='Missing key_secret')
+ except binascii_error as e:
+ module.fail_json(msg='TSIG key error: %s' % to_native(e))
+ else:
+ self.keyring = None
+
+ if module.params['key_algorithm'] == 'hmac-md5':
+ self.algorithm = 'HMAC-MD5.SIG-ALG.REG.INT'
+ else:
+ self.algorithm = module.params['key_algorithm']
+
+ if module.params['zone'] is None:
+ if module.params['record'][-1] != '.':
+ self.module.fail_json(msg='record must be absolute when omitting zone parameter')
+ self.zone = self.lookup_zone()
+ else:
+ self.zone = module.params['zone']
+
+ if self.zone[-1] != '.':
+ self.zone += '.'
+
+ if module.params['record'][-1] != '.':
+ self.fqdn = module.params['record'] + '.' + self.zone
+ else:
+ self.fqdn = module.params['record']
+
+ if self.module.params['type'].lower() == 'txt' and self.module.params['value'] is not None:
+ self.value = list(map(self.txt_helper, self.module.params['value']))
+ else:
+ self.value = self.module.params['value']
+
+ self.dns_rc = 0
+
+ def txt_helper(self, entry):
+ if entry[0] == '"' and entry[-1] == '"':
+ return entry
+ return '"{text}"'.format(text=entry)
+
+ def lookup_zone(self):
+ name = dns.name.from_text(self.module.params['record'])
+ while True:
+ query = dns.message.make_query(name, dns.rdatatype.SOA)
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ if lookup.rcode() in [dns.rcode.SERVFAIL, dns.rcode.REFUSED]:
+ self.module.fail_json(msg='Zone lookup failure: \'%s\' will not respond to queries regarding \'%s\'.' % (
+ self.module.params['server'], self.module.params['record']))
+ # If the response contains an Answer SOA RR whose name matches the queried name,
+ # this is the name of the zone in which the record needs to be inserted.
+ for rr in lookup.answer:
+ if rr.rdtype == dns.rdatatype.SOA and rr.name == name:
+ return rr.name.to_text()
+ # If the response contains an Authority SOA RR whose name is a subdomain of the queried name,
+ # this SOA name is the zone in which the record needs to be inserted.
+ for rr in lookup.authority:
+ if rr.rdtype == dns.rdatatype.SOA and name.fullcompare(rr.name)[0] == dns.name.NAMERELN_SUBDOMAIN:
+ return rr.name.to_text()
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ self.module.fail_json(msg='Zone lookup of \'%s\' failed for unknown reason.' % (self.module.params['record']))
+
+ def __do_update(self, update):
+ response = None
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ response = dns.query.tcp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ response = dns.query.udp(update, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+ return response
+
+ def create_or_update_record(self):
+ result = {'changed': False, 'failed': False}
+
+ exists = self.record_exists()
+ if exists in [0, 2]:
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ if exists == 0:
+ self.dns_rc = self.create_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to create DNS record (rc: %d)" % self.dns_rc
+
+ elif exists == 2:
+ self.dns_rc = self.modify_record()
+ if self.dns_rc != 0:
+ result['msg'] = "Failed to update DNS record (rc: %d)" % self.dns_rc
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ else:
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ return result
+
+ def create_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+
+ response = self.__do_update(update)
+ return dns.message.Message.rcode(response)
+
+ def modify_record(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+
+ if self.module.params['type'].upper() == 'NS':
+ # When modifying a NS record, Bind9 silently refuses to delete all the NS entries for a zone:
+ # > 09-May-2022 18:00:50.352 client @0x7fe7dd1f9568 192.168.1.3#45458/key rndc_ddns_ansible:
+ # > updating zone 'lab/IN': attempt to delete all SOA or NS records ignored
+ # https://gitlab.isc.org/isc-projects/bind9/-/blob/v9_18/lib/ns/update.c#L3304
+ # Let's perform dns inserts and updates first, deletes after.
+ query = dns.message.make_query(self.module.params['record'], self.module.params['type'])
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+
+ entries_to_remove = [n.to_text() for n in lookup.answer[0].items if n.to_text() not in self.value]
+ else:
+ update.delete(self.module.params['record'], self.module.params['type'])
+
+ for entry in self.value:
+ try:
+ update.add(self.module.params['record'],
+ self.module.params['ttl'],
+ self.module.params['type'],
+ entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+
+ if self.module.params['type'].upper() == 'NS':
+ for entry in entries_to_remove:
+ update.delete(self.module.params['record'], self.module.params['type'], entry)
+
+ response = self.__do_update(update)
+
+ return dns.message.Message.rcode(response)
+
+ def remove_record(self):
+ result = {'changed': False, 'failed': False}
+
+ if self.record_exists() == 0:
+ return result
+
+ # Check mode and record exists, declared fake change.
+ if self.module.check_mode:
+ self.module.exit_json(changed=True)
+
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ update.delete(self.module.params['record'], self.module.params['type'])
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+
+ if self.dns_rc != 0:
+ result['failed'] = True
+ result['msg'] = "Failed to delete record (rc: %d)" % self.dns_rc
+ else:
+ result['changed'] = True
+
+ return result
+
+ def record_exists(self):
+ update = dns.update.Update(self.zone, keyring=self.keyring, keyalgorithm=self.algorithm)
+ try:
+ update.present(self.module.params['record'], self.module.params['type'])
+ except dns.rdatatype.UnknownRdatatype as e:
+ self.module.fail_json(msg='Record error: {0}'.format(to_native(e)))
+
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.module.params['state'] == 'absent':
+ return 1
+ for entry in self.value:
+ try:
+ update.present(self.module.params['record'], self.module.params['type'], entry)
+ except AttributeError:
+ self.module.fail_json(msg='value needed when state=present')
+ except dns.exception.SyntaxError:
+ self.module.fail_json(msg='Invalid/malformed value')
+ response = self.__do_update(update)
+ self.dns_rc = dns.message.Message.rcode(response)
+ if self.dns_rc == 0:
+ if self.ttl_changed():
+ return 2
+ else:
+ return 1
+ else:
+ return 2
+ else:
+ return 0
+
+ def ttl_changed(self):
+ query = dns.message.make_query(self.fqdn, self.module.params['type'])
+ if self.keyring:
+ query.use_tsig(keyring=self.keyring, algorithm=self.algorithm)
+
+ try:
+ if self.module.params['protocol'] == 'tcp':
+ lookup = dns.query.tcp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ else:
+ lookup = dns.query.udp(query, self.module.params['server'], timeout=10, port=self.module.params['port'])
+ except (dns.tsig.PeerBadKey, dns.tsig.PeerBadSignature) as e:
+ self.module.fail_json(msg='TSIG update error (%s): %s' % (e.__class__.__name__, to_native(e)))
+ except (socket_error, dns.exception.Timeout) as e:
+ self.module.fail_json(msg='DNS server error: (%s): %s' % (e.__class__.__name__, to_native(e)))
+
+ if lookup.rcode() != dns.rcode.NOERROR:
+ self.module.fail_json(msg='Failed to lookup TTL of existing matching record.')
+
+ if self.module.params['type'] == 'NS':
+ current_ttl = lookup.answer[0].ttl if lookup.answer else lookup.authority[0].ttl
+ else:
+ current_ttl = lookup.answer[0].ttl
+ return current_ttl != self.module.params['ttl']
+
+
+def main():
+ tsig_algs = ['HMAC-MD5.SIG-ALG.REG.INT', 'hmac-md5', 'hmac-sha1', 'hmac-sha224',
+ 'hmac-sha256', 'hmac-sha384', 'hmac-sha512']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=False, default='present', choices=['present', 'absent'], type='str'),
+ server=dict(required=True, type='str'),
+ port=dict(required=False, default=53, type='int'),
+ key_name=dict(required=False, type='str'),
+ key_secret=dict(required=False, type='str', no_log=True),
+ key_algorithm=dict(required=False, default='hmac-md5', choices=tsig_algs, type='str'),
+ zone=dict(required=False, default=None, type='str'),
+ record=dict(required=True, type='str'),
+ type=dict(required=False, default='A', type='str'),
+ ttl=dict(required=False, default=3600, type='int'),
+ value=dict(required=False, default=None, type='list', elements='str'),
+ protocol=dict(required=False, default='tcp', choices=['tcp', 'udp'], type='str')
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAVE_DNSPYTHON:
+ module.fail_json(msg=missing_required_lib('dnspython'), exception=DNSPYTHON_IMP_ERR)
+
+ if len(module.params["record"]) == 0:
+ module.fail_json(msg='record cannot be empty.')
+
+ record = RecordManager(module)
+ result = {}
+ if module.params["state"] == 'absent':
+ result = record.remove_record()
+ elif module.params["state"] == 'present':
+ result = record.create_or_update_record()
+
+ result['dns_rc'] = record.dns_rc
+ result['dns_rc_str'] = dns.rcode.to_text(record.dns_rc)
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ result['record'] = dict(zone=record.zone,
+ record=module.params['record'],
+ type=module.params['type'],
+ ttl=module.params['ttl'],
+ value=record.value)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ocapi_command.py b/ansible_collections/community/general/plugins/modules/ocapi_command.py
new file mode 100644
index 000000000..ed2366736
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ocapi_command.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022 Western Digital Corporation
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ocapi_command
+version_added: 6.3.0
+short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
+description:
+ - Builds OCAPI URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller such as Indicator LED, Reboot, Power Mode, Firmware Update.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - Command to execute on OOB controller.
+ type: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ proxy_slot_number:
+ description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server.
+ type: int
+ update_image_path:
+ required: false
+ description:
+ - For C(FWUpload), the path on the local filesystem of the firmware update image.
+ type: str
+ job_name:
+ required: false
+ description:
+ - For C(DeleteJob) command, the name of the job to delete.
+ type: str
+ username:
+ required: true
+ description:
+ - Username for authenticating to OOB controller.
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authenticating to OOB controller.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller.
+ default: 10
+ type: int
+
+author: "Mike Moerk (@mikemoerk)"
+'''
+
+EXAMPLES = '''
+ - name: Set the power state to low
+ community.general.ocapi_command:
+ category: Chassis
+ command: PowerModeLow
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set the power state to normal
+ community.general.ocapi_command:
+ category: Chassis
+ command: PowerModeNormal
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Set chassis indicator LED to on
+ community.general.ocapi_command:
+ category: Chassis
+ command: IndicatorLedOn
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Set chassis indicator LED to off
+ community.general.ocapi_command:
+ category: Chassis
+ command: IndicatorLedOff
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Reset Enclosure
+ community.general.ocapi_command:
+ category: Systems
+ command: PowerGracefulRestart
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Firmware Upload
+ community.general.ocapi_command:
+ category: Update
+ command: FWUpload
+ baseuri: "iom1.wdc.com"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_path: "/path/to/firmware.tar.gz"
+ - name: Firmware Update
+ community.general.ocapi_command:
+ category: Update
+ command: FWUpdate
+ baseuri: "iom1.wdc.com"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Firmware Activate
+ community.general.ocapi_command:
+ category: Update
+ command: FWActivate
+ baseuri: "iom1.wdc.com"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+ - name: Delete Job
+ community.general.ocapi_command:
+ category: Jobs
+ command: DeleteJob
+ job_name: FirmwareUpdate
+ baseuri: "{{ baseuri }}"
+ proxy_slot_number: 2
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description.
+ returned: always
+ type: str
+ sample: "Action was successful"
+
+jobUri:
+ description: URI to use to monitor status of the operation. Returned for async commands such as Firmware Update, Firmware Activate.
+ returned: when supported
+ type: str
+ sample: "https://ioma.wdc.com/Storage/Devices/openflex-data24-usalp03020qb0003/Jobs/FirmwareUpdate/"
+
+operationStatusId:
+ description: OCAPI State ID (see OCAPI documentation for possible values).
+ returned: when supported
+ type: int
+ sample: 2
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "PowerModeLow", "PowerModeNormal"],
+ "Systems": ["PowerGracefulRestart"],
+ "Update": ["FWUpload", "FWUpdate", "FWActivate"],
+ "Jobs": ["DeleteJob"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='str'),
+ job_name=dict(type='str'),
+ baseuri=dict(required=True, type='str'),
+ proxy_slot_number=dict(type='int'),
+ update_image_path=dict(type='str'),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=True
+ )
+
+ category = module.params['category']
+ command = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {
+ 'user': module.params['username'],
+ 'pswd': module.params['password']
+ }
+
+ # timeout
+ timeout = module.params['timeout']
+
+ base_uri = "https://" + module.params["baseuri"]
+ proxy_slot_number = module.params.get("proxy_slot_number")
+ ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that the command is valid
+ if command not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Chassis":
+ if command.startswith("IndicatorLed"):
+ result = ocapi_utils.manage_chassis_indicator_led(command)
+ elif command.startswith("PowerMode"):
+ result = ocapi_utils.manage_system_power(command)
+ elif category == "Systems":
+ if command.startswith("Power"):
+ result = ocapi_utils.manage_system_power(command)
+ elif category == "Update":
+ if command == "FWUpload":
+ update_image_path = module.params.get("update_image_path")
+ if update_image_path is None:
+ module.fail_json(msg=to_native("Missing update_image_path."))
+ result = ocapi_utils.upload_firmware_image(update_image_path)
+ elif command == "FWUpdate":
+ result = ocapi_utils.update_firmware_image()
+ elif command == "FWActivate":
+ result = ocapi_utils.activate_firmware_image()
+ elif category == "Jobs":
+ if command == "DeleteJob":
+ job_name = module.params.get("job_name")
+ if job_name is None:
+ module.fail_json("Missing job_name")
+ job_uri = urljoin(base_uri, "Jobs/" + job_name)
+ result = ocapi_utils.delete_job(job_uri)
+
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ else:
+ del result['ret']
+ changed = result.get('changed', True)
+ session = result.get('session', dict())
+ kwargs = {
+ "changed": changed,
+ "session": session,
+ "msg": "Action was successful." if not module.check_mode else result.get(
+ "msg", "No action performed in check mode."
+ )
+ }
+ result_keys = [result_key for result_key in result if result_key not in kwargs]
+ for result_key in result_keys:
+ kwargs[result_key] = result[result_key]
+ module.exit_json(**kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ocapi_info.py b/ansible_collections/community/general/plugins/modules/ocapi_info.py
new file mode 100644
index 000000000..d7dfdccc7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ocapi_info.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022 Western Digital Corporation
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ocapi_info
+version_added: 6.3.0
+short_description: Manages Out-Of-Band controllers using Open Composable API (OCAPI)
+description:
+ - Builds OCAPI URIs locally and sends them to remote OOB controllers to
+ get information back.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - Command to execute on OOB controller.
+ type: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ proxy_slot_number:
+ description: For proxied inband requests, the slot number of the IOM. Only applies if I(baseuri) is a proxy server.
+ type: int
+ username:
+ required: true
+ description:
+ - Username for authenticating to OOB controller.
+ type: str
+ password:
+ required: true
+ description:
+ - Password for authenticating to OOB controller.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller.
+ default: 10
+ type: int
+ job_name:
+ description:
+ - Name of job for fetching status.
+ type: str
+
+
+author: "Mike Moerk (@mikemoerk)"
+'''
+
+EXAMPLES = '''
+ - name: Get job status
+ community.general.ocapi_info:
+ category: Status
+ command: JobStatus
+ baseuri: "http://iom1.wdc.com"
+ jobName: FirmwareUpdate
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description.
+ returned: always
+ type: str
+ sample: "Action was successful"
+
+percentComplete:
+ description: Percent complete of the relevant operation. Applies to C(JobStatus) command.
+ returned: when supported
+ type: int
+ sample: 99
+
+operationStatus:
+ description: Status of the relevant operation. Applies to C(JobStatus) command. See OCAPI documentation for details.
+ returned: when supported
+ type: str
+ sample: "Activate needed"
+
+operationStatusId:
+ description: Integer value of status (corresponds to operationStatus). Applies to C(JobStatus) command. See OCAPI documentation for details.
+ returned: when supported
+ type: int
+ sample: 65540
+
+operationHealth:
+ description: Health of the operation. Applies to C(JobStatus) command. See OCAPI documentation for details.
+ returned: when supported
+ type: str
+ sample: "OK"
+
+operationHealthId:
+ description: >
+ Integer value for health of the operation (corresponds to C(operationHealth)). Applies to C(JobStatus) command.
+ See OCAPI documentation for details.
+ returned: when supported
+ type: str
+ sample: "OK"
+
+details:
+ description: Details of the relevant operation. Applies to C(JobStatus) command.
+ returned: when supported
+ type: list
+ elements: str
+
+status:
+ description: Dict containing status information. See OCAPI documentation for details.
+ returned: when supported
+ type: dict
+ sample: {
+ "Details": [
+ "None"
+ ],
+ "Health": [
+ {
+ "ID": 5,
+ "Name": "OK"
+ }
+ ],
+ "State": {
+ "ID": 16,
+ "Name": "In service"
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Jobs": ["JobStatus"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='str'),
+ job_name=dict(type='str'),
+ baseuri=dict(required=True, type='str'),
+ proxy_slot_number=dict(type='int'),
+ username=dict(required=True),
+ password=dict(required=True, no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ supports_check_mode=True
+ )
+
+ category = module.params['category']
+ command = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {
+ 'user': module.params['username'],
+ 'pswd': module.params['password']
+ }
+
+ # timeout
+ timeout = module.params['timeout']
+
+ base_uri = "https://" + module.params["baseuri"]
+ proxy_slot_number = module.params.get("proxy_slot_number")
+ ocapi_utils = OcapiUtils(creds, base_uri, proxy_slot_number, timeout, module)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that the command is valid
+ if command not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (command, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Jobs":
+ if command == "JobStatus":
+ if module.params.get("job_name") is None:
+ module.fail_json(msg=to_native(
+ "job_name required for JobStatus command."))
+ job_uri = urljoin(base_uri, 'Jobs/' + module.params["job_name"])
+ result = ocapi_utils.get_job_status(job_uri)
+
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ else:
+ del result['ret']
+ changed = False
+ session = result.get('session', dict())
+ kwargs = {
+ "changed": changed,
+ "session": session,
+ "msg": "Action was successful." if not module.check_mode else result.get(
+ "msg", "No action performed in check mode."
+ )
+ }
+ result_keys = [result_key for result_key in result if result_key not in kwargs]
+ for result_key in result_keys:
+ kwargs[result_key] = result[result_key]
+ module.exit_json(**kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oci_vcn.py b/ansible_collections/community/general/plugins/modules/oci_vcn.py
new file mode 100644
index 000000000..4e6487b8f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oci_vcn.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, 2018, Oracle and/or its affiliates.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oci_vcn
+short_description: Manage Virtual Cloud Networks(VCN) in OCI
+description:
+ - This module allows the user to create, delete and update virtual cloud networks(VCNs) in OCI.
+ The complete Oracle Cloud Infrastructure Ansible Modules can be downloaded from
+ U(https://github.com/oracle/oci-ansible-modules/releases).
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ cidr_block:
+ description: The CIDR IP address block of the VCN. Required when creating a VCN with I(state=present).
+ type: str
+ required: false
+ compartment_id:
+ description: The OCID of the compartment to contain the VCN. Required when creating a VCN with I(state=present).
+ This option is mutually exclusive with I(vcn_id).
+ type: str
+ display_name:
+ description: A user-friendly name. Does not have to be unique, and it's changeable.
+ type: str
+ aliases: [ 'name' ]
+ dns_label:
+ description: A DNS label for the VCN, used in conjunction with the VNIC's hostname and subnet's DNS label to
+ form a fully qualified domain name (FQDN) for each VNIC within this subnet (for example,
+ bminstance-1.subnet123.vcn1.oraclevcn.com). Not required to be unique, but it's a best practice
+ to set unique DNS labels for VCNs in your tenancy. Must be an alphanumeric string that begins
+ with a letter. The value cannot be changed.
+ type: str
+ state:
+ description: Create or update a VCN with I(state=present). Use I(state=absent) to delete a VCN.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ vcn_id:
+ description: The OCID of the VCN. Required when deleting a VCN with I(state=absent) or updating a VCN
+ with I(state=present). This option is mutually exclusive with I(compartment_id).
+ type: str
+ aliases: [ 'id' ]
+author: "Rohit Chaware (@rohitChaware)"
+extends_documentation_fragment:
+ - community.general.oracle
+ - community.general.oracle_creatable_resource
+ - community.general.oracle_wait_options
+ - community.general.oracle_tags
+ - community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create a VCN
+ community.general.oci_vcn:
+ cidr_block: '10.0.0.0/16'
+ compartment_id: 'ocid1.compartment.oc1..xxxxxEXAMPLExxxxx'
+ display_name: my_vcn
+ dns_label: ansiblevcn
+
+- name: Updates the specified VCN's display name
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ display_name: ansible_vcn
+
+- name: Delete the specified VCN
+ community.general.oci_vcn:
+ vcn_id: ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx
+ state: absent
+"""
+
+RETURN = """
+vcn:
+ description: Information about the VCN
+ returned: On successful create and update operation
+ type: dict
+ sample: {
+ "cidr_block": "10.0.0.0/16",
+ compartment_id": "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx",
+ "default_dhcp_options_id": "ocid1.dhcpoptions.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_route_table_id": "ocid1.routetable.oc1.phx.xxxxxEXAMPLExxxxx",
+ "default_security_list_id": "ocid1.securitylist.oc1.phx.xxxxxEXAMPLExxxxx",
+ "display_name": "ansible_vcn",
+ "dns_label": "ansiblevcn",
+ "id": "ocid1.vcn.oc1.phx.xxxxxEXAMPLExxxxx",
+ "lifecycle_state": "AVAILABLE",
+ "time_created": "2017-11-13T20:22:40.626000+00:00",
+ "vcn_domain_name": "ansiblevcn.oraclevcn.com"
+ }
+"""
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible_collections.community.general.plugins.module_utils.oracle import oci_utils
+
+try:
+ from oci.core.virtual_network_client import VirtualNetworkClient
+ from oci.core.models import CreateVcnDetails
+ from oci.core.models import UpdateVcnDetails
+
+ HAS_OCI_PY_SDK = True
+except ImportError:
+ HAS_OCI_PY_SDK = False
+
+
+def delete_vcn(virtual_network_client, module):
+ result = oci_utils.delete_and_wait(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ delete_fn=virtual_network_client.delete_vcn,
+ kwargs_delete={"vcn_id": module.params["vcn_id"]},
+ module=module,
+ )
+ return result
+
+
+def update_vcn(virtual_network_client, module):
+ result = oci_utils.check_and_update_resource(
+ resource_type="vcn",
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ kwargs_get={"vcn_id": module.params["vcn_id"]},
+ update_fn=virtual_network_client.update_vcn,
+ primitive_params_update=["vcn_id"],
+ kwargs_non_primitive_update={UpdateVcnDetails: "update_vcn_details"},
+ module=module,
+ update_attributes=list(UpdateVcnDetails().attribute_map.keys()),
+ )
+ return result
+
+
+def create_vcn(virtual_network_client, module):
+ create_vcn_details = CreateVcnDetails()
+ for attribute in create_vcn_details.attribute_map.keys():
+ if attribute in module.params:
+ setattr(create_vcn_details, attribute, module.params[attribute])
+
+ result = oci_utils.create_and_wait(
+ resource_type="vcn",
+ create_fn=virtual_network_client.create_vcn,
+ kwargs_create={"create_vcn_details": create_vcn_details},
+ client=virtual_network_client,
+ get_fn=virtual_network_client.get_vcn,
+ get_param="vcn_id",
+ module=module,
+ )
+ return result
+
+
+def main():
+ module_args = oci_utils.get_taggable_arg_spec(
+ supports_create=True, supports_wait=True
+ )
+ module_args.update(
+ dict(
+ cidr_block=dict(type="str", required=False),
+ compartment_id=dict(type="str", required=False),
+ display_name=dict(type="str", required=False, aliases=["name"]),
+ dns_label=dict(type="str", required=False),
+ state=dict(
+ type="str",
+ required=False,
+ default="present",
+ choices=["absent", "present"],
+ ),
+ vcn_id=dict(type="str", required=False, aliases=["id"]),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ mutually_exclusive=[["compartment_id", "vcn_id"]],
+ )
+
+ if not HAS_OCI_PY_SDK:
+ module.fail_json(msg=missing_required_lib("oci"))
+
+ virtual_network_client = oci_utils.create_service_client(
+ module, VirtualNetworkClient
+ )
+
+ exclude_attributes = {"display_name": True, "dns_label": True}
+ state = module.params["state"]
+ vcn_id = module.params["vcn_id"]
+
+ if state == "absent":
+ if vcn_id is not None:
+ result = delete_vcn(virtual_network_client, module)
+ else:
+ module.fail_json(
+ msg="Specify vcn_id with state as 'absent' to delete a VCN."
+ )
+
+ else:
+ if vcn_id is not None:
+ result = update_vcn(virtual_network_client, module)
+ else:
+ result = oci_utils.check_and_create_resource(
+ resource_type="vcn",
+ create_fn=create_vcn,
+ kwargs_create={
+ "virtual_network_client": virtual_network_client,
+ "module": module,
+ },
+ list_fn=virtual_network_client.list_vcns,
+ kwargs_list={"compartment_id": module.params["compartment_id"]},
+ module=module,
+ model=CreateVcnDetails(),
+ exclude_attributes=exclude_attributes,
+ )
+
+ module.exit_json(**result)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/odbc.py b/ansible_collections/community/general/plugins/modules/odbc.py
new file mode 100644
index 000000000..fbc4b63ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/odbc.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, John Westcott <john.westcott.iv@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: odbc
+author: "John Westcott IV (@john-westcott-iv)"
+version_added: "1.0.0"
+short_description: Execute SQL via ODBC
+description:
+ - Read/Write info via ODBC drivers.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ dsn:
+ description:
+ - The connection string passed into ODBC.
+ required: true
+ type: str
+ query:
+ description:
+ - The SQL query to perform.
+ required: true
+ type: str
+ params:
+ description:
+ - Parameters to pass to the SQL query.
+ type: list
+ elements: str
+ commit:
+ description:
+ - Perform a commit after the execution of the SQL query.
+ - Some databases allow a commit after a select whereas others raise an exception.
+ - Default is C(true) to support legacy module behavior.
+ type: bool
+ default: true
+ version_added: 1.3.0
+requirements:
+ - "python >= 2.6"
+ - "pyodbc"
+
+notes:
+ - "Like the command module, this module always returns changed = yes whether or not the query would change the database."
+ - "To alter this behavior you can use C(changed_when): [yes or no]."
+ - "For details about return values (description and row_count) see U(https://github.com/mkleehammer/pyodbc/wiki/Cursor)."
+'''
+
+EXAMPLES = '''
+- name: Set some values in the test db
+ community.general.odbc:
+ dsn: "DRIVER={ODBC Driver 13 for SQL Server};Server=db.ansible.com;Database=my_db;UID=admin;PWD=password;"
+ query: "Select * from table_a where column1 = ?"
+ params:
+ - "value1"
+ commit: false
+ changed_when: false
+'''
+
+RETURN = '''
+results:
+ description: List of lists of strings containing selected rows, likely empty for DDL statements.
+ returned: success
+ type: list
+ elements: list
+description:
+ description: "List of dicts about the columns selected from the cursors, likely empty for DDL statements. See notes."
+ returned: success
+ type: list
+ elements: dict
+row_count:
+ description: "The number of rows selected or modified according to the cursor defaults to -1. See notes."
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+HAS_PYODBC = None
+try:
+ import pyodbc
+ HAS_PYODBC = True
+except ImportError as e:
+ HAS_PYODBC = False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dsn=dict(type='str', required=True, no_log=True),
+ query=dict(type='str', required=True),
+ params=dict(type='list', elements='str'),
+ commit=dict(type='bool', default=True),
+ ),
+ )
+
+ dsn = module.params.get('dsn')
+ query = module.params.get('query')
+ params = module.params.get('params')
+ commit = module.params.get('commit')
+
+ if not HAS_PYODBC:
+ module.fail_json(msg=missing_required_lib('pyodbc'))
+
+ # Try to make a connection with the DSN
+ connection = None
+ try:
+ connection = pyodbc.connect(dsn)
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to DSN: {0}'.format(to_native(e)))
+
+ result = dict(
+ changed=True,
+ description=[],
+ row_count=-1,
+ results=[],
+ )
+
+ try:
+ cursor = connection.cursor()
+
+ if params:
+ cursor.execute(query, params)
+ else:
+ cursor.execute(query)
+ if commit:
+ cursor.commit()
+ try:
+ # Get the rows out into an 2d array
+ for row in cursor.fetchall():
+ new_row = []
+ for column in row:
+ new_row.append("{0}".format(column))
+ result['results'].append(new_row)
+
+ # Return additional information from the cursor
+ for row_description in cursor.description:
+ description = {}
+ description['name'] = row_description[0]
+ description['type'] = row_description[1].__name__
+ description['display_size'] = row_description[2]
+ description['internal_size'] = row_description[3]
+ description['precision'] = row_description[4]
+ description['scale'] = row_description[5]
+ description['nullable'] = row_description[6]
+ result['description'].append(description)
+
+ result['row_count'] = cursor.rowcount
+ except pyodbc.ProgrammingError as pe:
+ pass
+ except Exception as e:
+ module.fail_json(msg="Exception while reading rows: {0}".format(to_native(e)))
+
+ cursor.close()
+ except Exception as e:
+ module.fail_json(msg="Failed to execute query: {0}".format(to_native(e)))
+ finally:
+ connection.close()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/office_365_connector_card.py b/ansible_collections/community/general/plugins/modules/office_365_connector_card.py
new file mode 100644
index 000000000..ed8ebd188
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/office_365_connector_card.py
@@ -0,0 +1,310 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Marc Sensenich <hello@marc-sensenich.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: office_365_connector_card
+short_description: Use webhooks to create Connector Card messages within an Office 365 group
+description:
+ - Creates Connector Card messages through
+ Office 365 Connectors
+ U(https://learn.microsoft.com/en-us/microsoftteams/platform/task-modules-and-cards/cards/cards-reference#connector-card-for-microsoft-365-groups).
+author: "Marc Sensenich (@marc-sensenich)"
+notes:
+ - This module is not idempotent, therefore if the same task is run twice
+ there will be two Connector Cards created
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ webhook:
+ type: str
+ description:
+ - The webhook URL is given to you when you create a new Connector.
+ required: true
+ summary:
+ type: str
+ description:
+ - A string used for summarizing card content.
+ - This will be shown as the message subject.
+ - This is required if the text parameter isn't populated.
+ color:
+ type: str
+ description:
+ - Accent color used for branding or indicating status in the card.
+ title:
+ type: str
+ description:
+ - A title for the Connector message. Shown at the top of the message.
+ text:
+ type: str
+ description:
+ - The main text of the card.
+ - This will be rendered below the sender information and optional title,
+ - and above any sections or actions present.
+ actions:
+ type: list
+ elements: dict
+ description:
+ - This array of objects will power the action links
+ - found at the bottom of the card.
+ sections:
+ type: list
+ elements: dict
+ description:
+ - Contains a list of sections to display in the card.
+ - For more information see U(https://learn.microsoft.com/en-us/outlook/actionable-messages/message-card-reference#section-fields).
+'''
+
+EXAMPLES = """
+- name: Create a simple Connector Card
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ text: 'Hello, World!'
+
+- name: Create a Connector Card with the full format
+ community.general.office_365_connector_card:
+ webhook: https://outlook.office.com/webhook/GUID/IncomingWebhook/GUID/GUID
+ summary: This is the summary property
+ title: This is the **card's title** property
+ text: This is the **card's text** property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ color: E81123
+ sections:
+ - title: This is the **section's title** property
+ activity_image: http://connectorsdemo.azurewebsites.net/images/MSC12_Oscar_002.jpg
+ activity_title: This is the section's **activityTitle** property
+ activity_subtitle: This is the section's **activitySubtitle** property
+ activity_text: This is the section's **activityText** property.
+ hero_image:
+ image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ text: This is the section's text property. Lorem ipsum dolor sit amet, consectetur
+ adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
+ facts:
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ - name: This is a fact name
+ value: This is a fact value
+ images:
+ - image: http://connectorsdemo.azurewebsites.net/images/MicrosoftSurface_024_Cafe_OH-06315_VS_R1c.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Scene_01.jpg
+ title: This is the image's alternate text
+ - image: http://connectorsdemo.azurewebsites.net/images/WIN12_Anthony_02.jpg
+ title: This is the image's alternate text
+ actions:
+ - "@type": ActionCard
+ name: Comment
+ inputs:
+ - "@type": TextInput
+ id: comment
+ is_multiline: true
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": ActionCard
+ name: Due Date
+ inputs:
+ - "@type": DateInput
+ id: dueDate
+ title: Input's title property
+ actions:
+ - "@type": HttpPOST
+ name: Save
+ target: http://...
+ - "@type": HttpPOST
+ name: Action's name prop.
+ target: http://...
+ - "@type": OpenUri
+ name: Action's name prop
+ targets:
+ - os: default
+ uri: http://...
+ - start_group: true
+ title: This is the title of a **second section**
+ text: This second section is visually separated from the first one by setting its
+ **startGroup** property to true.
+"""
+
+RETURN = """
+"""
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
+
+OFFICE_365_CARD_CONTEXT = "http://schema.org/extensions"
+OFFICE_365_CARD_TYPE = "MessageCard"
+OFFICE_365_CARD_EMPTY_PAYLOAD_MSG = "Summary or Text is required."
+OFFICE_365_INVALID_WEBHOOK_MSG = "The Incoming Webhook was not reachable."
+
+
+def build_actions(actions):
+ action_items = []
+
+ for action in actions:
+ action_item = snake_dict_to_camel_dict(action)
+ action_items.append(action_item)
+
+ return action_items
+
+
+def build_sections(sections):
+ sections_created = []
+
+ for section in sections:
+ sections_created.append(build_section(section))
+
+ return sections_created
+
+
+def build_section(section):
+ section_payload = dict()
+
+ if 'title' in section:
+ section_payload['title'] = section['title']
+
+ if 'start_group' in section:
+ section_payload['startGroup'] = section['start_group']
+
+ if 'activity_image' in section:
+ section_payload['activityImage'] = section['activity_image']
+
+ if 'activity_title' in section:
+ section_payload['activityTitle'] = section['activity_title']
+
+ if 'activity_subtitle' in section:
+ section_payload['activitySubtitle'] = section['activity_subtitle']
+
+ if 'activity_text' in section:
+ section_payload['activityText'] = section['activity_text']
+
+ if 'hero_image' in section:
+ section_payload['heroImage'] = section['hero_image']
+
+ if 'text' in section:
+ section_payload['text'] = section['text']
+
+ if 'facts' in section:
+ section_payload['facts'] = section['facts']
+
+ if 'images' in section:
+ section_payload['images'] = section['images']
+
+ if 'actions' in section:
+ section_payload['potentialAction'] = build_actions(section['actions'])
+
+ return section_payload
+
+
+def build_payload_for_connector_card(module, summary=None, color=None, title=None, text=None, actions=None, sections=None):
+ payload = dict()
+ payload['@context'] = OFFICE_365_CARD_CONTEXT
+ payload['@type'] = OFFICE_365_CARD_TYPE
+
+ if summary is not None:
+ payload['summary'] = summary
+
+ if color is not None:
+ payload['themeColor'] = color
+
+ if title is not None:
+ payload['title'] = title
+
+ if text is not None:
+ payload['text'] = text
+
+ if actions:
+ payload['potentialAction'] = build_actions(actions)
+
+ if sections:
+ payload['sections'] = build_sections(sections)
+
+ payload = module.jsonify(payload)
+ return payload
+
+
+def do_notify_connector_card_webhook(module, webhook, payload):
+ headers = {
+ 'Content-Type': 'application/json'
+ }
+
+ response, info = fetch_url(
+ module=module,
+ url=webhook,
+ headers=headers,
+ method='POST',
+ data=payload
+ )
+
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ elif info['status'] == 400 and module.check_mode:
+ if info['body'] == OFFICE_365_CARD_EMPTY_PAYLOAD_MSG:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg=OFFICE_365_INVALID_WEBHOOK_MSG)
+ else:
+ module.fail_json(
+ msg="failed to send %s as a connector card to Incoming Webhook: %s"
+ % (payload, info['msg'])
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ webhook=dict(required=True, no_log=True),
+ summary=dict(type='str'),
+ color=dict(type='str'),
+ title=dict(type='str'),
+ text=dict(type='str'),
+ actions=dict(type='list', elements='dict'),
+ sections=dict(type='list', elements='dict')
+ ),
+ supports_check_mode=True
+ )
+
+ webhook = module.params['webhook']
+ summary = module.params['summary']
+ color = module.params['color']
+ title = module.params['title']
+ text = module.params['text']
+ actions = module.params['actions']
+ sections = module.params['sections']
+
+ payload = build_payload_for_connector_card(
+ module,
+ summary,
+ color,
+ title,
+ text,
+ actions,
+ sections)
+
+ if module.check_mode:
+ # In check mode, send an empty payload to validate connection
+ check_mode_payload = build_payload_for_connector_card(module)
+ do_notify_connector_card_webhook(module, webhook, check_mode_payload)
+
+ do_notify_connector_card_webhook(module, webhook, payload)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ohai.py b/ansible_collections/community/general/plugins/modules/ohai.py
new file mode 100644
index 000000000..7fdab3bb7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ohai.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ohai
+short_description: Returns inventory data from I(Ohai)
+description:
+ - Similar to the M(community.general.facter) module, this runs the I(Ohai) discovery program
+ (U(https://docs.chef.io/ohai.html)) on the remote host and
+ returns JSON inventory data.
+ I(Ohai) data is a bit more verbose and nested than I(facter).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options: {}
+notes: []
+requirements: [ "ohai" ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+# Retrieve (ohai) data from all Web servers and store in one-file per host
+ansible webservers -m ohai --tree=/tmp/ohaidata
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict()
+ )
+ cmd = ["/usr/bin/env", "ohai"]
+ rc, out, err = module.run_command(cmd, check_rc=True)
+ module.exit_json(**json.loads(out))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/omapi_host.py b/ansible_collections/community/general/plugins/modules/omapi_host.py
new file mode 100644
index 000000000..c93c57853
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/omapi_host.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: omapi_host
+short_description: Setup OMAPI hosts
+description: Manage OMAPI hosts into compatible DHCPd servers
+requirements:
+ - pypureomapi
+author:
+ - Loic Blot (@nerzhul)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Create or remove OMAPI host.
+ type: str
+ required: true
+ choices: [ absent, present ]
+ hostname:
+ description:
+ - Sets the host lease hostname (mandatory if state=present).
+ type: str
+ aliases: [ name ]
+ host:
+ description:
+ - Sets OMAPI server host to interact with.
+ type: str
+ default: localhost
+ port:
+ description:
+ - Sets the OMAPI server port to interact with.
+ type: int
+ default: 7911
+ key_name:
+ description:
+ - Sets the TSIG key name for authenticating against OMAPI server.
+ type: str
+ required: true
+ key:
+ description:
+ - Sets the TSIG key content for authenticating against OMAPI server.
+ type: str
+ required: true
+ macaddr:
+ description:
+ - Sets the lease host MAC address.
+ type: str
+ required: true
+ ip:
+ description:
+ - Sets the lease host IP address.
+ type: str
+ statements:
+ description:
+ - Attach a list of OMAPI DHCP statements with host lease (without ending semicolon).
+ type: list
+ elements: str
+ default: []
+ ddns:
+ description:
+ - Enable dynamic DNS updates for this host.
+ type: bool
+ default: false
+
+'''
+EXAMPLES = r'''
+- name: Add a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.98.4.55
+ macaddr: 44:dd:ab:dd:11:44
+ name: server01
+ ip: 192.168.88.99
+ ddns: true
+ statements:
+ - filename "pxelinux.0"
+ - next-server 1.1.1.1
+ state: present
+
+- name: Remove a host using OMAPI
+ community.general.omapi_host:
+ key_name: defomapi
+ key: +bFQtBCta6j2vWkjPkNFtgA==
+ host: 10.1.1.1
+ macaddr: 00:66:ab:dd:11:44
+ state: absent
+'''
+
+RETURN = r'''
+lease:
+ description: dictionary containing host information
+ returned: success
+ type: complex
+ contains:
+ ip-address:
+ description: IP address, if there is.
+ returned: success
+ type: str
+ sample: '192.168.1.5'
+ hardware-address:
+ description: MAC address
+ returned: success
+ type: str
+ sample: '00:11:22:33:44:55'
+ hardware-type:
+ description: hardware type, generally '1'
+ returned: success
+ type: int
+ sample: 1
+ name:
+ description: hostname
+ returned: success
+ type: str
+ sample: 'mydesktop'
+'''
+
+import binascii
+import socket
+import struct
+import traceback
+
+PUREOMAPI_IMP_ERR = None
+try:
+ from pypureomapi import Omapi, OmapiMessage, OmapiError, OmapiErrorNotFound
+ from pypureomapi import pack_ip, unpack_ip, pack_mac, unpack_mac
+ from pypureomapi import OMAPI_OP_STATUS, OMAPI_OP_UPDATE
+ pureomapi_found = True
+except ImportError:
+ PUREOMAPI_IMP_ERR = traceback.format_exc()
+ pureomapi_found = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+
+
+class OmapiHostManager:
+ def __init__(self, module):
+ self.module = module
+ self.omapi = None
+ self.connect()
+
+ def connect(self):
+ try:
+ self.omapi = Omapi(self.module.params['host'], self.module.params['port'], to_bytes(self.module.params['key_name']),
+ self.module.params['key'])
+ except binascii.Error:
+ self.module.fail_json(msg="Unable to open OMAPI connection. 'key' is not a valid base64 key.")
+ except OmapiError as e:
+ self.module.fail_json(msg="Unable to open OMAPI connection. Ensure 'host', 'port', 'key' and 'key_name' "
+ "are valid. Exception was: %s" % to_native(e))
+ except socket.error as e:
+ self.module.fail_json(msg="Unable to connect to OMAPI server: %s" % to_native(e))
+
+ def get_host(self, macaddr):
+ msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
+ msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
+ msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ return None
+ return response
+
+ @staticmethod
+ def unpack_facts(obj):
+ result = dict(obj)
+ if 'hardware-address' in result:
+ result['hardware-address'] = to_native(unpack_mac(result[to_bytes('hardware-address')]))
+
+ if 'ip-address' in result:
+ result['ip-address'] = to_native(unpack_ip(result[to_bytes('ip-address')]))
+
+ if 'hardware-type' in result:
+ result['hardware-type'] = struct.unpack("!I", result[to_bytes('hardware-type')])
+
+ return result
+
+ def setup_host(self):
+ if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
+ self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")
+
+ msg = None
+ host_response = self.get_host(self.module.params['macaddr'])
+ # If host was not found using macaddr, add create message
+ if host_response is None:
+ msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
+ msg.message.append((to_bytes('create'), struct.pack('!I', 1)))
+ msg.message.append((to_bytes('exclusive'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('hardware-address'), pack_mac(self.module.params['macaddr'])))
+ msg.obj.append((to_bytes('hardware-type'), struct.pack('!I', 1)))
+ msg.obj.append((to_bytes('name'), to_bytes(self.module.params['hostname'])))
+ if self.module.params['ip'] is not None:
+ msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))
+
+ stmt_join = ""
+ if self.module.params['ddns']:
+ stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])
+
+ try:
+ if len(self.module.params['statements']) > 0:
+ stmt_join += "; ".join(self.module.params['statements'])
+ stmt_join += "; "
+ except TypeError as e:
+ self.module.fail_json(msg="Invalid statements found: %s" % to_native(e))
+
+ if len(stmt_join) > 0:
+ msg.obj.append((to_bytes('statements'), to_bytes(stmt_join)))
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_UPDATE:
+ self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+ # Forge update message
+ else:
+ response_obj = self.unpack_facts(host_response.obj)
+ fields_to_update = {}
+
+ if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
+ unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
+ fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])
+
+ # Name cannot be changed
+ if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
+ self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
+ "Please delete host and add new." %
+ (response_obj['name'], self.module.params['hostname']))
+
+ """
+ # It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
+ if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
+ response_obj['statements'] != self.module.params['statements']:
+ with open('/tmp/omapi', 'w') as fb:
+ for (k,v) in iteritems(response_obj):
+ fb.writelines('statements: %s %s\n' % (k, v))
+ """
+ if len(fields_to_update) == 0:
+ self.module.exit_json(changed=False, lease=response_obj)
+ else:
+ msg = OmapiMessage.update(host_response.handle)
+ msg.update_object(fields_to_update)
+
+ try:
+ response = self.omapi.query_server(msg)
+ if response.opcode != OMAPI_OP_STATUS:
+ self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
+ "are valid.")
+ self.module.exit_json(changed=True)
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+ def remove_host(self):
+ try:
+ self.omapi.del_host(self.module.params['macaddr'])
+ self.module.exit_json(changed=True)
+ except OmapiErrorNotFound:
+ self.module.exit_json()
+ except OmapiError as e:
+ self.module.fail_json(msg="OMAPI error: %s" % to_native(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ host=dict(type='str', default="localhost"),
+ port=dict(type='int', default=7911),
+ key_name=dict(type='str', required=True),
+ key=dict(type='str', required=True, no_log=True),
+ macaddr=dict(type='str', required=True),
+ hostname=dict(type='str', aliases=['name']),
+ ip=dict(type='str'),
+ ddns=dict(type='bool', default=False),
+ statements=dict(type='list', elements='str', default=[]),
+ ),
+ supports_check_mode=False,
+ )
+
+ if not pureomapi_found:
+ module.fail_json(msg=missing_required_lib('pypureomapi'), exception=PUREOMAPI_IMP_ERR)
+
+ if module.params['key'] is None or len(module.params["key"]) == 0:
+ module.fail_json(msg="'key' parameter cannot be empty.")
+
+ if module.params['key_name'] is None or len(module.params["key_name"]) == 0:
+ module.fail_json(msg="'key_name' parameter cannot be empty.")
+
+ host_manager = OmapiHostManager(module)
+ try:
+ if module.params['state'] == 'present':
+ host_manager.setup_host()
+ elif module.params['state'] == 'absent':
+ host_manager.remove_host()
+ except ValueError as e:
+ module.fail_json(msg="OMAPI input value error: %s" % to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/one_host.py b/ansible_collections/community/general/plugins/modules/one_host.py
new file mode 100644
index 000000000..c4578f950
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/one_host.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2018 www.privaz.io Valletech AB
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: one_host
+
+short_description: Manages OpenNebula Hosts
+
+
+requirements:
+ - pyone
+
+description:
+ - "Manages OpenNebula Hosts"
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - Hostname of the machine to manage.
+ required: true
+ type: str
+ state:
+ description:
+ - Takes the host to the desired lifecycle state.
+ - If C(absent) the host will be deleted from the cluster.
+ - If C(present) the host will be created in the cluster (includes C(enabled), C(disabled) and C(offline) states).
+ - If C(enabled) the host is fully operational.
+ - C(disabled), e.g. to perform maintenance operations.
+ - C(offline), host is totally offline.
+ choices:
+ - absent
+ - present
+ - enabled
+ - disabled
+ - offline
+ default: present
+ type: str
+ im_mad_name:
+ description:
+ - The name of the information manager, this values are taken from the oned.conf with the tag name IM_MAD (name)
+ default: kvm
+ type: str
+ vmm_mad_name:
+ description:
+ - The name of the virtual machine manager mad name, this values are taken from the oned.conf with the tag name VM_MAD (name)
+ default: kvm
+ type: str
+ cluster_id:
+ description:
+ - The cluster ID.
+ default: 0
+ type: int
+ cluster_name:
+ description:
+ - The cluster specified by name.
+ type: str
+ labels:
+ description:
+ - The labels for this host.
+ type: list
+ elements: str
+ template:
+ description:
+ - The template or attribute changes to merge into the host template.
+ aliases:
+ - attributes
+ type: dict
+
+extends_documentation_fragment:
+ - community.general.opennebula
+ - community.general.attributes
+
+author:
+ - Rafael del Valle (@rvalle)
+'''
+
+EXAMPLES = '''
+- name: Create a new host in OpenNebula
+ community.general.one_host:
+ name: host1
+ cluster_id: 1
+ api_url: http://127.0.0.1:2633/RPC2
+
+- name: Create a host and adjust its template
+ community.general.one_host:
+ name: host2
+ cluster_name: default
+ template:
+ LABELS:
+ - gold
+ - ssd
+ RESERVED_CPU: -100
+'''
+
+# TODO: pending setting guidelines on returned values
+RETURN = '''
+'''
+
+# TODO: Documentation on valid state transitions is required to properly implement all valid cases
+# TODO: To be coherent with CLI this module should also provide "flush" functionality
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
+
+try:
+ from pyone import HOST_STATES, HOST_STATUS
+except ImportError:
+ pass # handled at module utils
+
+
+# Pseudo definitions...
+
+HOST_ABSENT = -99 # the host is absent (special case defined by this module)
+
+
+class HostModule(OpenNebulaModule):
+
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'offline'], default='present'),
+ im_mad_name=dict(type='str', default="kvm"),
+ vmm_mad_name=dict(type='str', default="kvm"),
+ cluster_id=dict(type='int', default=0),
+ cluster_name=dict(type='str'),
+ labels=dict(type='list', elements='str'),
+ template=dict(type='dict', aliases=['attributes']),
+ )
+
+ mutually_exclusive = [
+ ['cluster_id', 'cluster_name']
+ ]
+
+ OpenNebulaModule.__init__(self, argument_spec, mutually_exclusive=mutually_exclusive)
+
+ def allocate_host(self):
+ """
+ Creates a host entry in OpenNebula
+ Returns: True on success, fails otherwise.
+
+ """
+ if not self.one.host.allocate(self.get_parameter('name'),
+ self.get_parameter('vmm_mad_name'),
+ self.get_parameter('im_mad_name'),
+ self.get_parameter('cluster_id')):
+ self.fail(msg="could not allocate host")
+ else:
+ self.result['changed'] = True
+ return True
+
+ def wait_for_host_state(self, host, target_states):
+ """
+ Utility method that waits for a host state.
+ Args:
+ host:
+ target_states:
+
+ """
+ return self.wait_for_state('host',
+ lambda: self.one.host.info(host.ID).STATE,
+ lambda s: HOST_STATES(s).name, target_states,
+ invalid_states=[HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR])
+
+ def run(self, one, module, result):
+
+ # Get the list of hosts
+ host_name = self.get_parameter("name")
+ host = self.get_host_by_name(host_name)
+
+ # manage host state
+ desired_state = self.get_parameter('state')
+ if bool(host):
+ current_state = host.STATE
+ current_state_name = HOST_STATES(host.STATE).name
+ else:
+ current_state = HOST_ABSENT
+ current_state_name = "ABSENT"
+
+ # apply properties
+ if desired_state == 'present':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.ERROR, HOST_STATES.MONITORING_ERROR]:
+ self.fail(msg="invalid host state %s" % current_state_name)
+
+ elif desired_state == 'enabled':
+ if current_state == HOST_ABSENT:
+ self.allocate_host()
+ host = self.get_host_by_name(host_name)
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ elif current_state in [HOST_STATES.DISABLED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.ENABLED):
+ self.wait_for_host_state(host, [HOST_STATES.MONITORED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not enable host")
+ elif current_state in [HOST_STATES.MONITORED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to enable" % current_state_name)
+
+ elif desired_state == 'disabled':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be put in disabled state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.OFFLINE]:
+ if one.host.status(host.ID, HOST_STATUS.DISABLED):
+ self.wait_for_host_state(host, [HOST_STATES.DISABLED])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not disable host")
+ elif current_state in [HOST_STATES.DISABLED]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to disable" % current_state_name)
+
+ elif desired_state == 'offline':
+ if current_state == HOST_ABSENT:
+ self.fail(msg='absent host cannot be placed in offline state')
+ elif current_state in [HOST_STATES.MONITORED, HOST_STATES.DISABLED]:
+ if one.host.status(host.ID, HOST_STATUS.OFFLINE):
+ self.wait_for_host_state(host, [HOST_STATES.OFFLINE])
+ result['changed'] = True
+ else:
+ self.fail(msg="could not set host offline")
+ elif current_state in [HOST_STATES.OFFLINE]:
+ pass
+ else:
+ self.fail(msg="unknown host state %s, cowardly refusing to change state to offline" % current_state_name)
+
+ elif desired_state == 'absent':
+ if current_state != HOST_ABSENT:
+ if one.host.delete(host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="could not delete host from cluster")
+
+ # if we reach this point we can assume that the host was taken to the desired state
+
+ if desired_state != "absent":
+ # manipulate or modify the template
+ desired_template_changes = self.get_parameter('template')
+
+ if desired_template_changes is None:
+ desired_template_changes = dict()
+
+ # complete the template with specific ansible parameters
+ if self.is_parameter('labels'):
+ desired_template_changes['LABELS'] = self.get_parameter('labels')
+
+ if self.requires_template_update(host.TEMPLATE, desired_template_changes):
+ # setup the root element so that pyone will generate XML instead of attribute vector
+ desired_template_changes = {"TEMPLATE": desired_template_changes}
+ if one.host.update(host.ID, desired_template_changes, 1): # merge the template
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host template")
+
+ # the cluster
+ if host.CLUSTER_ID != self.get_parameter('cluster_id'):
+ if one.cluster.addhost(self.get_parameter('cluster_id'), host.ID):
+ result['changed'] = True
+ else:
+ self.fail(msg="failed to update the host cluster")
+
+ # return
+ self.exit()
+
+
+def main():
+ HostModule().run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/one_image.py b/ansible_collections/community/general/plugins/modules/one_image.py
new file mode 100644
index 000000000..a50b33e93
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/one_image.py
@@ -0,0 +1,414 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Milan Ilic <milani@nordeus.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: one_image
+short_description: Manages OpenNebula images
+description:
+ - Manages OpenNebula images
+requirements:
+ - pyone
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ id:
+ description:
+ - A C(id) of the image you would like to manage.
+ type: int
+ name:
+ description:
+ - A C(name) of the image you would like to manage.
+ type: str
+ state:
+ description:
+ - C(present) - state that is used to manage the image
+ - C(absent) - delete the image
+ - C(cloned) - clone the image
+ - C(renamed) - rename the image to the C(new_name)
+ choices: ["present", "absent", "cloned", "renamed"]
+ default: present
+ type: str
+ enabled:
+ description:
+ - Whether the image should be enabled or disabled.
+ type: bool
+ new_name:
+ description:
+ - A name that will be assigned to the existing or new image.
+ - In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Fetch the IMAGE by id
+ community.general.one_image:
+ id: 45
+ register: result
+
+- name: Print the IMAGE properties
+ ansible.builtin.debug:
+ var: result
+
+- name: Rename existing IMAGE
+ community.general.one_image:
+ id: 34
+ state: renamed
+ new_name: bar-image
+
+- name: Disable the IMAGE by id
+ community.general.one_image:
+ id: 37
+ enabled: false
+
+- name: Enable the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ enabled: true
+
+- name: Clone the IMAGE by name
+ community.general.one_image:
+ name: bar-image
+ state: cloned
+ new_name: bar-image-clone
+ register: result
+
+- name: Delete the IMAGE by id
+ community.general.one_image:
+ id: '{{ result.id }}'
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: image id
+ type: int
+ returned: success
+ sample: 153
+name:
+ description: image name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: image's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: image's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: image's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: image's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of image instance
+ type: str
+ returned: success
+ sample: READY
+used:
+ description: is image in use
+ type: bool
+ returned: success
+ sample: true
+running_vms:
+ description: count of running vms that use this image
+ type: int
+ returned: success
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_image(module, client, predicate):
+ # Filter -2 means fetch all images user can Use
+ pool = client.imagepool.info(-2, -1, -1, -1)
+
+ for image in pool.IMAGE:
+ if predicate(image):
+ return image
+
+ return None
+
+
+def get_image_by_name(module, client, image_name):
+ return get_image(module, client, lambda image: (image.NAME == image_name))
+
+
+def get_image_by_id(module, client, image_id):
+ return get_image(module, client, lambda image: (image.ID == image_id))
+
+
+def get_image_instance(module, client, requested_id, requested_name):
+ if requested_id:
+ return get_image_by_id(module, client, requested_id)
+ else:
+ return get_image_by_name(module, client, requested_name)
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+
+ return info
+
+
+def wait_for_state(module, client, image_id, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ image = client.image.info(image_id)
+ state = image.STATE
+
+ if state_predicate(state):
+ return image
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_ready(module, client, image_id, wait_timeout=60):
+ return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
+
+
+def wait_for_delete(module, client, image_id, wait_timeout=60):
+ return wait_for_state(module, client, image_id, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
+
+
+def enable_image(module, client, image, enable):
+ image = client.image.info(image.ID)
+ changed = False
+
+ state = image.STATE
+
+ if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
+ if enable:
+ module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
+ else:
+ module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
+
+ if ((enable and state != IMAGE_STATES.index('READY')) or
+ (not enable and state != IMAGE_STATES.index('DISABLED'))):
+ changed = True
+
+ if changed and not module.check_mode:
+ client.image.enable(image.ID, enable)
+
+ result = get_image_info(image)
+ result['changed'] = changed
+
+ return result
+
+
+def clone_image(module, client, image, new_name):
+ if new_name is None:
+ new_name = "Copy of " + image.NAME
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ result = get_image_info(tmp_image)
+ result['changed'] = False
+ return result
+
+ if image.STATE == IMAGE_STATES.index('DISABLED'):
+ module.fail_json(msg="Cannot clone DISABLED image")
+
+ if not module.check_mode:
+ new_id = client.image.clone(image.ID, new_name)
+ wait_for_ready(module, client, new_id)
+ image = client.image.info(new_id)
+
+ result = get_image_info(image)
+ result['changed'] = True
+
+ return result
+
+
+def rename_image(module, client, image, new_name):
+ if new_name is None:
+ module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
+
+ if new_name == image.NAME:
+ result = get_image_info(image)
+ result['changed'] = False
+ return result
+
+ tmp_image = get_image_by_name(module, client, new_name)
+ if tmp_image:
+ module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.ID))
+
+ if not module.check_mode:
+ client.image.rename(image.ID, new_name)
+
+ result = get_image_info(image)
+ result['changed'] = True
+ return result
+
+
+def delete_image(module, client, image):
+
+ if not image:
+ return {'changed': False}
+
+ if image.RUNNING_VMS > 0:
+ module.fail_json(msg="Cannot delete image. There are " + str(image.RUNNING_VMS) + " VMs using it.")
+
+ if not module.check_mode:
+ client.image.delete(image.ID)
+ wait_for_delete(module, client, image.ID)
+
+ return {'changed': True}
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not (url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "id": {"required": False, "type": "int"},
+ "name": {"required": False, "type": "str"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'cloned', 'renamed'],
+ "type": "str"
+ },
+ "enabled": {"required": False, "type": "bool"},
+ "new_name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['id', 'name']],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ id = params.get('id')
+ name = params.get('name')
+ state = params.get('state')
+ enabled = params.get('enabled')
+ new_name = params.get('new_name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ result = {}
+
+ if not id and state == 'renamed':
+ module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
+
+ image = get_image_instance(module, client, id, name)
+ if not image and state != 'absent':
+ if id:
+ module.fail_json(msg="There is no image with id=" + str(id))
+ else:
+ module.fail_json(msg="There is no image with name=" + name)
+
+ if state == 'absent':
+ result = delete_image(module, client, image)
+ else:
+ result = get_image_info(image)
+ changed = False
+ result['changed'] = False
+
+ if enabled is not None:
+ result = enable_image(module, client, image, enabled)
+ if state == "cloned":
+ result = clone_image(module, client, image, new_name)
+ elif state == "renamed":
+ result = rename_image(module, client, image, new_name)
+
+ changed = changed or result['changed']
+ result['changed'] = changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/one_image_info.py b/ansible_collections/community/general/plugins/modules/one_image_info.py
new file mode 100644
index 000000000..938f0ef2a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/one_image_info.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Milan Ilic <milani@nordeus.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: one_image_info
+short_description: Gather information on OpenNebula images
+description:
+ - Gather information on OpenNebula images.
+ - This module was called C(one_image_facts) before Ansible 2.9. The usage did not change.
+requirements:
+ - pyone
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ type: str
+ ids:
+ description:
+ - A list of images ids whose facts you want to gather.
+ aliases: ['id']
+ type: list
+ elements: str
+ name:
+ description:
+ - A C(name) of the image whose facts will be gathered.
+ - If the C(name) begins with '~' the C(name) will be used as regex pattern
+ - which restricts the list of images (whose facts will be returned) whose names match specified regex.
+ - Also, if the C(name) begins with '~*' case-insensitive matching will be performed.
+ - See examples for more details.
+ type: str
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+EXAMPLES = '''
+- name: Gather facts about all images
+ community.general.one_image_info:
+ register: result
+
+- name: Print all images facts
+ ansible.builtin.debug:
+ msg: result
+
+- name: Gather facts about an image using ID
+ community.general.one_image_info:
+ ids:
+ - 123
+
+- name: Gather facts about an image using the name
+ community.general.one_image_info:
+ name: 'foo-image'
+ register: foo_image
+
+- name: Gather facts about all IMAGEs whose name matches regex 'app-image-.*'
+ community.general.one_image_info:
+ name: '~app-image-.*'
+ register: app_images
+
+- name: Gather facts about all IMAGEs whose name matches regex 'foo-image-.*' ignoring cases
+ community.general.one_image_info:
+ name: '~*foo-image-.*'
+ register: foo_images
+'''
+
+RETURN = '''
+images:
+ description: A list of images info
+ type: complex
+ returned: success
+ contains:
+ id:
+ description: image id
+ type: int
+ sample: 153
+ name:
+ description: image name
+ type: str
+ sample: app1
+ group_id:
+ description: image's group id
+ type: int
+ sample: 1
+ group_name:
+ description: image's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: image's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: image's owner name
+ type: str
+ sample: ansible-test
+ state:
+ description: state of image instance
+ type: str
+ sample: READY
+ used:
+ description: is image in use
+ type: bool
+ sample: true
+ running_vms:
+ description: count of running vms that use this image
+ type: int
+ sample: 7
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+
+
+def get_all_images(client):
+ pool = client.imagepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all images user can Use
+
+ return pool
+
+
+IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
+
+
+def get_image_info(image):
+ info = {
+ 'id': image.ID,
+ 'name': image.NAME,
+ 'state': IMAGE_STATES[image.STATE],
+ 'running_vms': image.RUNNING_VMS,
+ 'used': bool(image.RUNNING_VMS),
+ 'user_name': image.UNAME,
+ 'user_id': image.UID,
+ 'group_name': image.GNAME,
+ 'group_id': image.GID,
+ }
+ return info
+
+
+def get_images_by_ids(module, client, ids):
+ images = []
+ pool = get_all_images(client)
+
+ for image in pool.IMAGE:
+ if str(image.ID) in ids:
+ images.append(image)
+ ids.remove(str(image.ID))
+ if len(ids) == 0:
+ break
+
+ if len(ids) > 0:
+ module.fail_json(msg='There is no IMAGE(s) with id(s)=' + ', '.join('{id}'.format(id=str(image_id)) for image_id in ids))
+
+ return images
+
+
+def get_images_by_name(module, client, name_pattern):
+
+ images = []
+ pattern = None
+
+ pool = get_all_images(client)
+
+ if name_pattern.startswith('~'):
+ import re
+ if name_pattern[1] == '*':
+ pattern = re.compile(name_pattern[2:], re.IGNORECASE)
+ else:
+ pattern = re.compile(name_pattern[1:])
+
+ for image in pool.IMAGE:
+ if pattern is not None:
+ if pattern.match(image.NAME):
+ images.append(image)
+ elif name_pattern == image.NAME:
+ images.append(image)
+ break
+
+ # if the specific name is indicated
+ if pattern is None and len(images) == 0:
+ module.fail_json(msg="There is no IMAGE with name=" + name_pattern)
+
+ return images
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not (url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "ids": {"required": False, "aliases": ['id'], "type": "list", "elements": "str"},
+ "name": {"required": False, "type": "str"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[['ids', 'name']],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ ids = params.get('ids')
+ name = params.get('name')
+ client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ if ids:
+ images = get_images_by_ids(module, client, ids)
+ elif name:
+ images = get_images_by_name(module, client, name)
+ else:
+ images = get_all_images(client).IMAGE
+
+ result = {
+ 'images': [get_image_info(image) for image in images],
+ }
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/one_service.py b/ansible_collections/community/general/plugins/modules/one_service.py
new file mode 100644
index 000000000..4f5143887
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/one_service.py
@@ -0,0 +1,759 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Milan Ilic <milani@nordeus.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: one_service
+short_description: Deploy and manage OpenNebula services
+description:
+ - Manage OpenNebula services
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula OneFlow API server.
+ - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted.
+ - If not set then the value of the ONEFLOW_URL environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula OneFlow API server. If not set then the value of the C(ONEFLOW_PASSWORD) environment variable is used.
+ type: str
+ template_name:
+ description:
+ - Name of service template to use to create a new instance of a service.
+ type: str
+ template_id:
+ description:
+ - ID of a service template to use to create a new instance of a service.
+ type: int
+ service_id:
+ description:
+ - ID of a service instance that you would like to manage.
+ type: int
+ service_name:
+ description:
+ - Name of a service instance that you would like to manage.
+ type: str
+ unique:
+ description:
+ - Setting I(unique=true) will make sure that there is only one service instance running with a name set with C(service_name) when
+ instantiating a service from a template specified with I(template_id) or I(template_name). Check examples below.
+ type: bool
+ default: false
+ state:
+ description:
+ - C(present) - instantiate a service from a template specified with I(template_id) or I(template_name).
+ - C(absent) - terminate an instance of a service specified with I(template_id) or I(template_name).
+ choices: ["present", "absent"]
+ default: present
+ type: str
+ mode:
+ description:
+ - Set permission mode of a service instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the service.
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the service.
+ type: int
+ wait:
+ description:
+ - Wait for the instance to reach RUNNING state after DEPLOYING or COOLDOWN state after SCALING.
+ type: bool
+ default: false
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds.
+ default: 300
+ type: int
+ custom_attrs:
+ description:
+ - Dictionary of key/value custom attributes which will be used when instantiating a new service.
+ default: {}
+ type: dict
+ role:
+ description:
+ - Name of the role whose cardinality should be changed.
+ type: str
+ cardinality:
+ description:
+ - Number of VMs for the specified role.
+ type: int
+ force:
+ description:
+ - Force the new cardinality even if it is outside the limits.
+ type: bool
+ default: false
+author:
+ - "Milan Ilic (@ilicmilan)"
+'''
+
+EXAMPLES = '''
+- name: Instantiate a new service
+ community.general.one_service:
+ template_id: 90
+ register: result
+
+- name: Print service properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Instantiate a new service with specified service_name, service group and mode
+ community.general.one_service:
+ template_name: 'app1_template'
+ service_name: 'app1'
+ group_id: 1
+ mode: '660'
+
+- name: Instantiate a new service with template_id and pass custom_attrs dict
+ community.general.one_service:
+ template_id: 90
+ custom_attrs:
+ public_network_id: 21
+ private_network_id: 26
+
+- name: Instantiate a new service 'foo' if the service doesn't already exist, otherwise do nothing
+ community.general.one_service:
+ template_id: 53
+ service_name: 'foo'
+ unique: true
+
+- name: Delete a service by ID
+ community.general.one_service:
+ service_id: 153
+ state: absent
+
+- name: Get service info
+ community.general.one_service:
+ service_id: 153
+ register: service_info
+
+- name: Change service owner, group and mode
+ community.general.one_service:
+ service_name: 'app2'
+ owner_id: 34
+ group_id: 113
+ mode: '600'
+
+- name: Instantiate service and wait for it to become RUNNING
+ community.general.one_service:
+ template_id: 43
+ service_name: 'foo1'
+
+- name: Wait service to become RUNNING
+ community.general.one_service:
+ service_id: 112
+ wait: true
+
+- name: Change role cardinality
+ community.general.one_service:
+ service_id: 153
+ role: bar
+ cardinality: 5
+
+- name: Change role cardinality and wait for it to be applied
+ community.general.one_service:
+ service_id: 112
+ role: foo
+ cardinality: 7
+ wait: true
+'''
+
+RETURN = '''
+service_id:
+ description: service id
+ type: int
+ returned: success
+ sample: 153
+service_name:
+ description: service name
+ type: str
+ returned: success
+ sample: app1
+group_id:
+ description: service's group id
+ type: int
+ returned: success
+ sample: 1
+group_name:
+ description: service's group name
+ type: str
+ returned: success
+ sample: one-users
+owner_id:
+ description: service's owner id
+ type: int
+ returned: success
+ sample: 143
+owner_name:
+ description: service's owner name
+ type: str
+ returned: success
+ sample: ansible-test
+state:
+ description: state of service instance
+ type: str
+ returned: success
+ sample: RUNNING
+mode:
+ description: service's mode
+ type: int
+ returned: success
+ sample: 660
+roles:
+ description: list of dictionaries of roles, each role is described by name, cardinality, state and nodes ids
+ type: list
+ returned: success
+ sample:
+ - {"cardinality": 1,"name": "foo","state": "RUNNING", "ids": [ 123, 456 ]}
+ - {"cardinality": 2,"name": "bar","state": "RUNNING", "ids": [ 452, 567, 746 ]}
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+
+STATES = ("PENDING", "DEPLOYING", "RUNNING", "UNDEPLOYING", "WARNING", "DONE",
+ "FAILED_UNDEPLOYING", "FAILED_DEPLOYING", "SCALING", "FAILED_SCALING", "COOLDOWN")
+
+
+def get_all_templates(module, auth):
+ try:
+ all_templates = open_url(url=(auth.url + "/service_template"), method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(all_templates.read())
+
+
+def get_template(module, auth, pred):
+ all_templates_dict = get_all_templates(module, auth)
+
+ found = 0
+ found_template = None
+ template_name = ''
+
+ if "DOCUMENT_POOL" in all_templates_dict and "DOCUMENT" in all_templates_dict["DOCUMENT_POOL"]:
+ for template in all_templates_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(template):
+ found = found + 1
+ found_template = template
+ template_name = template["NAME"]
+
+ if found <= 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg="There is no template with unique name: " + template_name)
+ else:
+ return found_template
+
+
+def get_all_services(module, auth):
+ try:
+ response = open_url(auth.url + "/service", method="GET", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ return module.from_json(response.read())
+
+
+def get_service(module, auth, pred):
+ all_services_dict = get_all_services(module, auth)
+
+ found = 0
+ found_service = None
+ service_name = ''
+
+ if "DOCUMENT_POOL" in all_services_dict and "DOCUMENT" in all_services_dict["DOCUMENT_POOL"]:
+ for service in all_services_dict["DOCUMENT_POOL"]["DOCUMENT"]:
+ if pred(service):
+ found = found + 1
+ found_service = service
+ service_name = service["NAME"]
+
+ # fail if there are more services with same name
+ if found > 1:
+ module.fail_json(msg="There are multiple services with a name: '" +
+ service_name + "'. You have to use a unique service name or use 'service_id' instead.")
+ elif found <= 0:
+ return None
+ else:
+ return found_service
+
+
+def get_service_by_id(module, auth, service_id):
+ return get_service(module, auth, lambda service: (int(service["ID"]) == int(service_id))) if service_id else None
+
+
+def get_service_by_name(module, auth, service_name):
+ return get_service(module, auth, lambda service: (service["NAME"] == service_name))
+
+
+def get_service_info(module, auth, service):
+
+ result = {
+ "service_id": int(service["ID"]),
+ "service_name": service["NAME"],
+ "group_id": int(service["GID"]),
+ "group_name": service["GNAME"],
+ "owner_id": int(service["UID"]),
+ "owner_name": service["UNAME"],
+ "state": STATES[service["TEMPLATE"]["BODY"]["state"]]
+ }
+
+ roles_status = service["TEMPLATE"]["BODY"]["roles"]
+ roles = []
+ for role in roles_status:
+ nodes_ids = []
+ if "nodes" in role:
+ for node in role["nodes"]:
+ nodes_ids.append(node["deploy_id"])
+ roles.append({"name": role["name"], "cardinality": role["cardinality"], "state": STATES[int(role["state"])], "ids": nodes_ids})
+
+ result["roles"] = roles
+ result["mode"] = int(parse_service_permissions(service))
+
+ return result
+
+
+def create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout):
+ # make sure that the values in custom_attrs dict are strings
+ custom_attrs_with_str = dict((k, str(v)) for k, v in custom_attrs.items())
+
+ data = {
+ "action": {
+ "perform": "instantiate",
+ "params": {
+ "merge_template": {
+ "custom_attrs_values": custom_attrs_with_str,
+ "name": service_name
+ }
+ }
+ }
+ }
+
+ try:
+ response = open_url(auth.url + "/service_template/" + str(template_id) + "/action", method="POST",
+ data=module.jsonify(data), force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ service_result = module.from_json(response.read())["DOCUMENT"]
+
+ return service_result
+
+
+def wait_for_service_to_become_ready(module, auth, service_id, wait_timeout):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id), method="GET",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Request for service status has failed. Error message: " + str(e))
+
+ status_result = module.from_json(status_result.read())
+ service_state = status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["state"]
+
+ if service_state in [STATES.index("RUNNING"), STATES.index("COOLDOWN")]:
+ return status_result["DOCUMENT"]
+ elif service_state not in [STATES.index("PENDING"), STATES.index("DEPLOYING"), STATES.index("SCALING")]:
+ log_message = ''
+ for log_info in status_result["DOCUMENT"]["TEMPLATE"]["BODY"]["log"]:
+ if log_info["severity"] == "E":
+ log_message = log_message + log_info["message"]
+ break
+
+ module.fail_json(msg="Deploying is unsuccessful. Service state: " + STATES[service_state] + ". Error message: " + log_message)
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired")
+
+
+def change_service_permissions(module, auth, service_id, permissions):
+
+ data = {
+ "action": {
+ "perform": "chmod",
+ "params": {"octet": permissions}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_owner(module, auth, service_id, owner_id):
+ data = {
+ "action": {
+ "perform": "chown",
+ "params": {"owner_id": owner_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_service_group(module, auth, service_id, group_id):
+
+ data = {
+ "action": {
+ "perform": "chgrp",
+ "params": {"group_id": group_id}
+ }
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/action", method="POST", force_basic_auth=True,
+ url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def change_role_cardinality(module, auth, service_id, role, cardinality, force):
+
+ data = {
+ "cardinality": cardinality,
+ "force": force
+ }
+
+ try:
+ status_result = open_url(auth.url + "/service/" + str(service_id) + "/role/" + role, method="PUT",
+ force_basic_auth=True, url_username=auth.user, url_password=auth.password, data=module.jsonify(data))
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if status_result.getcode() != 204:
+ module.fail_json(msg="Failed to change cardinality for role: " + role + ". Return code: " + str(status_result.getcode()))
+
+
+def check_change_service_owner(module, service, owner_id):
+ old_owner_id = int(service["UID"])
+
+ return old_owner_id != owner_id
+
+
+def check_change_service_group(module, service, group_id):
+ old_group_id = int(service["GID"])
+
+ return old_group_id != group_id
+
+
+def parse_service_permissions(service):
+ perm_dict = service["PERMISSIONS"]
+ '''
+ This is the structure of the 'PERMISSIONS' dictionary:
+
+ "PERMISSIONS": {
+ "OWNER_U": "1",
+ "OWNER_M": "1",
+ "OWNER_A": "0",
+ "GROUP_U": "0",
+ "GROUP_M": "0",
+ "GROUP_A": "0",
+ "OTHER_U": "0",
+ "OTHER_M": "0",
+ "OTHER_A": "0"
+ }
+ '''
+
+ owner_octal = int(perm_dict["OWNER_U"]) * 4 + int(perm_dict["OWNER_M"]) * 2 + int(perm_dict["OWNER_A"])
+ group_octal = int(perm_dict["GROUP_U"]) * 4 + int(perm_dict["GROUP_M"]) * 2 + int(perm_dict["GROUP_A"])
+ other_octal = int(perm_dict["OTHER_U"]) * 4 + int(perm_dict["OTHER_M"]) * 2 + int(perm_dict["OTHER_A"])
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def check_change_service_permissions(module, service, permissions):
+ old_permissions = parse_service_permissions(service)
+
+ return old_permissions != permissions
+
+
+def check_change_role_cardinality(module, service, role_name, cardinality):
+ roles_list = service["TEMPLATE"]["BODY"]["roles"]
+
+ for role in roles_list:
+ if role["name"] == role_name:
+ return int(role["cardinality"]) != cardinality
+
+ module.fail_json(msg="There is no role with name: " + role_name)
+
+
+def create_service_and_operation(module, auth, template_id, service_name, owner_id, group_id, permissions, custom_attrs, unique, wait, wait_timeout):
+ if not service_name:
+ service_name = ''
+ changed = False
+ service = None
+
+ if unique:
+ service = get_service_by_name(module, auth, service_name)
+
+ if not service:
+ if not module.check_mode:
+ service = create_service(module, auth, template_id, service_name, custom_attrs, unique, wait, wait_timeout)
+ changed = True
+
+ # if check_mode=true and there would be changes, service doesn't exist and we can not get it
+ if module.check_mode and changed:
+ return {"changed": True}
+
+ result = service_operation(module, auth, owner_id=owner_id, group_id=group_id, wait=wait,
+ wait_timeout=wait_timeout, permissions=permissions, service=service)
+
+ if result["changed"]:
+ changed = True
+
+ result["changed"] = changed
+
+ return result
+
+
+def service_operation(module, auth, service_id=None, owner_id=None, group_id=None, permissions=None,
+ role=None, cardinality=None, force=None, wait=False, wait_timeout=None, service=None):
+
+ changed = False
+
+ if not service:
+ service = get_service_by_id(module, auth, service_id)
+ else:
+ service_id = service["ID"]
+
+ if not service:
+ module.fail_json(msg="There is no service with id: " + str(service_id))
+
+ if owner_id:
+ if check_change_service_owner(module, service, owner_id):
+ if not module.check_mode:
+ change_service_owner(module, auth, service_id, owner_id)
+ changed = True
+ if group_id:
+ if check_change_service_group(module, service, group_id):
+ if not module.check_mode:
+ change_service_group(module, auth, service_id, group_id)
+ changed = True
+ if permissions:
+ if check_change_service_permissions(module, service, permissions):
+ if not module.check_mode:
+ change_service_permissions(module, auth, service_id, permissions)
+ changed = True
+
+ if role:
+ if check_change_role_cardinality(module, service, role, cardinality):
+ if not module.check_mode:
+ change_role_cardinality(module, auth, service_id, role, cardinality, force)
+ changed = True
+
+ if wait and not module.check_mode:
+ service = wait_for_service_to_become_ready(module, auth, service_id, wait_timeout)
+
+ # if something has changed, fetch service info again
+ if changed:
+ service = get_service_by_id(module, auth, service_id)
+
+ service_info = get_service_info(module, auth, service)
+ service_info["changed"] = changed
+
+ return service_info
+
+
+def delete_service(module, auth, service_id):
+ service = get_service_by_id(module, auth, service_id)
+ if not service:
+ return {"changed": False}
+
+ service_info = get_service_info(module, auth, service)
+
+ service_info["changed"] = True
+
+ if module.check_mode:
+ return service_info
+
+ try:
+ result = open_url(auth.url + '/service/' + str(service_id), method="DELETE", force_basic_auth=True, url_username=auth.user, url_password=auth.password)
+ except Exception as e:
+ module.fail_json(msg="Service deletion has failed. Error message: " + str(e))
+
+ return service_info
+
+
+def get_template_by_name(module, auth, template_name):
+ return get_template(module, auth, lambda template: (template["NAME"] == template_name))
+
+
+def get_template_by_id(module, auth, template_id):
+ return get_template(module, auth, lambda template: (int(template["ID"]) == int(template_id))) if template_id else None
+
+
+def get_template_id(module, auth, requested_id, requested_name):
+ template = get_template_by_id(module, auth, requested_id) if requested_id else get_template_by_name(module, auth, requested_name)
+
+ if template:
+ return template["ID"]
+
+ return None
+
+
+def get_service_id_by_name(module, auth, service_name):
+ service = get_service_by_name(module, auth, service_name)
+
+ if service:
+ return service["ID"]
+
+ return None
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONEFLOW_URL')
+
+ if not username:
+ username = os.environ.get('ONEFLOW_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONEFLOW_PASSWORD')
+
+ if not (url and username and password):
+ module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'user', 'password'))
+
+ return auth_params(url=url, user=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "service_name": {"required": False, "type": "str"},
+ "service_id": {"required": False, "type": "int"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "unique": {"default": False, "type": "bool"},
+ "wait": {"default": False, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "custom_attrs": {"default": {}, "type": "dict"},
+ "role": {"required": False, "type": "str"},
+ "cardinality": {"required": False, "type": "int"},
+ "force": {"default": False, "type": "bool"}
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'service_id'],
+ ['service_id', 'service_name'],
+ ['template_id', 'template_name', 'role'],
+ ['template_id', 'template_name', 'cardinality'],
+ ['service_id', 'custom_attrs']
+ ],
+ required_together=[['role', 'cardinality']],
+ supports_check_mode=True)
+
+ auth = get_connection_info(module)
+ params = module.params
+ service_name = params.get('service_name')
+ service_id = params.get('service_id')
+
+ requested_template_id = params.get('template_id')
+ requested_template_name = params.get('template_name')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ unique = params.get('unique')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ custom_attrs = params.get('custom_attrs')
+ role = params.get('role')
+ cardinality = params.get('cardinality')
+ force = params.get('force')
+
+ template_id = None
+
+ if requested_template_id or requested_template_name:
+ template_id = get_template_id(module, auth, requested_template_id, requested_template_name)
+ if not template_id:
+ if requested_template_id:
+ module.fail_json(msg="There is no template with template_id: " + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ if unique and not service_name:
+ module.fail_json(msg="You cannot use unique without passing service_name!")
+
+ if template_id and state == 'absent':
+ module.fail_json(msg="State absent is not valid for template")
+
+ if template_id and state == 'present': # Instantiate a service
+ result = create_service_and_operation(module, auth, template_id, service_name, owner_id,
+ group_id, permissions, custom_attrs, unique, wait, wait_timeout)
+ else:
+ if not (service_id or service_name):
+ module.fail_json(msg="To manage the service at least the service id or service name should be specified!")
+ if custom_attrs:
+ module.fail_json(msg="You can only set custom_attrs when instantiate service!")
+
+ if not service_id:
+ service_id = get_service_id_by_name(module, auth, service_name)
+ # The task should be failed when we want to manage a non-existent service identified by its name
+ if not service_id and state == 'present':
+ module.fail_json(msg="There is no service with name: " + service_name)
+
+ if state == 'absent':
+ result = delete_service(module, auth, service_id)
+ else:
+ result = service_operation(module, auth, service_id, owner_id, group_id, permissions, role, cardinality, force, wait, wait_timeout)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/one_template.py b/ansible_collections/community/general/plugins/modules/one_template.py
new file mode 100644
index 000000000..97d0f856e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/one_template.py
@@ -0,0 +1,284 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Georg Gadinger <nilsding@nilsding.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: one_template
+
+short_description: Manages OpenNebula templates
+
+version_added: 2.4.0
+
+requirements:
+ - pyone
+
+description:
+ - "Manages OpenNebula templates."
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - Note that check mode always returns C(changed=true) for existing templates, even if the template would not actually change.
+ diff_mode:
+ support: none
+
+options:
+ id:
+ description:
+ - A I(id) of the template you would like to manage. If not set then a
+ - new template will be created with the given I(name).
+ type: int
+ name:
+ description:
+ - A I(name) of the template you would like to manage. If a template with
+ - the given name does not exist it will be created, otherwise it will be
+ - managed by this module.
+ type: str
+ template:
+ description:
+ - A string containing the template contents.
+ type: str
+ state:
+ description:
+ - C(present) - state that is used to manage the template.
+ - C(absent) - delete the template.
+ choices: ["present", "absent"]
+ default: present
+ type: str
+
+extends_documentation_fragment:
+ - community.general.opennebula
+ - community.general.attributes
+
+author:
+ - "Georg Gadinger (@nilsding)"
+'''
+
+EXAMPLES = '''
+- name: Fetch the TEMPLATE by id
+ community.general.one_template:
+ id: 6459
+ register: result
+
+- name: Print the TEMPLATE properties
+ ansible.builtin.debug:
+ var: result
+
+- name: Fetch the TEMPLATE by name
+ community.general.one_template:
+ name: tf-prd-users-workerredis-p6379a
+ register: result
+
+- name: Create a new or update an existing TEMPLATE
+ community.general.one_template:
+ name: generic-opensuse
+ template: |
+ CONTEXT = [
+ HOSTNAME = "generic-opensuse"
+ ]
+ CPU = "1"
+ CUSTOM_ATTRIBUTE = ""
+ DISK = [
+ CACHE = "writeback",
+ DEV_PREFIX = "sd",
+ DISCARD = "unmap",
+ IMAGE = "opensuse-leap-15.2",
+ IMAGE_UNAME = "oneadmin",
+ IO = "threads",
+ SIZE = "" ]
+ MEMORY = "2048"
+ NIC = [
+ MODEL = "virtio",
+ NETWORK = "testnet",
+ NETWORK_UNAME = "oneadmin" ]
+ OS = [
+ ARCH = "x86_64",
+ BOOT = "disk0" ]
+ SCHED_REQUIREMENTS = "CLUSTER_ID=\\"100\\""
+ VCPU = "2"
+
+- name: Delete the TEMPLATE by id
+ community.general.one_template:
+ id: 6459
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: template id
+ type: int
+ returned: when I(state=present)
+ sample: 153
+name:
+ description: template name
+ type: str
+ returned: when I(state=present)
+ sample: app1
+template:
+ description: the parsed template
+ type: dict
+ returned: when I(state=present)
+group_id:
+ description: template's group id
+ type: int
+ returned: when I(state=present)
+ sample: 1
+group_name:
+ description: template's group name
+ type: str
+ returned: when I(state=present)
+ sample: one-users
+owner_id:
+ description: template's owner id
+ type: int
+ returned: when I(state=present)
+ sample: 143
+owner_name:
+ description: template's owner name
+ type: str
+ returned: when I(state=present)
+ sample: ansible-test
+'''
+
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import OpenNebulaModule
+
+
+class TemplateModule(OpenNebulaModule):
+ def __init__(self):
+ argument_spec = dict(
+ id=dict(type='int', required=False),
+ name=dict(type='str', required=False),
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ template=dict(type='str', required=False),
+ )
+
+ mutually_exclusive = [
+ ['id', 'name']
+ ]
+
+ required_one_of = [('id', 'name')]
+
+ required_if = [
+ ['state', 'present', ['template']]
+ ]
+
+ OpenNebulaModule.__init__(self,
+ argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of,
+ required_if=required_if)
+
+ def run(self, one, module, result):
+ params = module.params
+ id = params.get('id')
+ name = params.get('name')
+ desired_state = params.get('state')
+ template_data = params.get('template')
+
+ self.result = {}
+
+ template = self.get_template_instance(id, name)
+ needs_creation = False
+ if not template and desired_state != 'absent':
+ if id:
+ module.fail_json(msg="There is no template with id=" + str(id))
+ else:
+ needs_creation = True
+
+ if desired_state == 'absent':
+ self.result = self.delete_template(template)
+ else:
+ if needs_creation:
+ self.result = self.create_template(name, template_data)
+ else:
+ self.result = self.update_template(template, template_data)
+
+ self.exit()
+
+ def get_template(self, predicate):
+ # -3 means "Resources belonging to the user"
+ # the other two parameters are used for pagination, -1 for both essentially means "return all"
+ pool = self.one.templatepool.info(-3, -1, -1)
+
+ for template in pool.VMTEMPLATE:
+ if predicate(template):
+ return template
+
+ return None
+
+ def get_template_by_id(self, template_id):
+ return self.get_template(lambda template: (template.ID == template_id))
+
+ def get_template_by_name(self, name):
+ return self.get_template(lambda template: (template.NAME == name))
+
+ def get_template_instance(self, requested_id, requested_name):
+ if requested_id:
+ return self.get_template_by_id(requested_id)
+ else:
+ return self.get_template_by_name(requested_name)
+
+ def get_template_info(self, template):
+ info = {
+ 'id': template.ID,
+ 'name': template.NAME,
+ 'template': template.TEMPLATE,
+ 'user_name': template.UNAME,
+ 'user_id': template.UID,
+ 'group_name': template.GNAME,
+ 'group_id': template.GID,
+ }
+
+ return info
+
+ def create_template(self, name, template_data):
+ if not self.module.check_mode:
+ self.one.template.allocate("NAME = \"" + name + "\"\n" + template_data)
+
+ result = self.get_template_info(self.get_template_by_name(name))
+ result['changed'] = True
+
+ return result
+
+ def update_template(self, template, template_data):
+ if not self.module.check_mode:
+ # 0 = replace the whole template
+ self.one.template.update(template.ID, template_data, 0)
+
+ result = self.get_template_info(self.get_template_by_id(template.ID))
+ if self.module.check_mode:
+ # Unfortunately it is not easy to detect if the template would have changed, therefore always report a change here.
+ result['changed'] = True
+ else:
+ # if the previous parsed template data is not equal to the updated one, this has changed
+ result['changed'] = template.TEMPLATE != result['template']
+
+ return result
+
+ def delete_template(self, template):
+ if not template:
+ return {'changed': False}
+
+ if not self.module.check_mode:
+ self.one.template.delete(template.ID)
+
+ return {'changed': True}
+
+
+def main():
+ TemplateModule().run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/one_vm.py b/ansible_collections/community/general/plugins/modules/one_vm.py
new file mode 100644
index 000000000..1bbf47466
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/one_vm.py
@@ -0,0 +1,1725 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Milan Ilic <milani@nordeus.com>
+# Copyright (c) 2019, Jan Meerkamp <meerkamp@dvv.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: one_vm
+short_description: Creates or terminates OpenNebula instances
+description:
+ - Manages OpenNebula instances
+requirements:
+ - pyone
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_url:
+ description:
+ - URL of the OpenNebula RPC server.
+ - It is recommended to use HTTPS so that the username/password are not
+ - transferred over the network unencrypted.
+ - If not set then the value of the C(ONE_URL) environment variable is used.
+ type: str
+ api_username:
+ description:
+ - Name of the user to login into the OpenNebula RPC server. If not set
+ - then the value of the C(ONE_USERNAME) environment variable is used.
+ type: str
+ api_password:
+ description:
+ - Password of the user to login into OpenNebula RPC server. If not set
+ - then the value of the C(ONE_PASSWORD) environment variable is used.
+ - if both I(api_username) or I(api_password) are not set, then it will try
+ - authenticate with ONE auth file. Default path is "~/.one/one_auth".
+ - Set environment variable C(ONE_AUTH) to override this path.
+ type: str
+ template_name:
+ description:
+ - Name of VM template to use to create a new instace
+ type: str
+ template_id:
+ description:
+ - ID of a VM template to use to create a new instance
+ type: int
+ vm_start_on_hold:
+ description:
+ - Set to true to put vm on hold while creating
+ default: false
+ type: bool
+ instance_ids:
+ description:
+ - A list of instance ids used for states':' C(absent), C(running), C(rebooted), C(poweredoff)
+ aliases: ['ids']
+ type: list
+ elements: int
+ state:
+ description:
+ - C(present) - create instances from a template specified with C(template_id)/C(template_name).
+ - C(running) - run instances
+ - C(poweredoff) - power-off instances
+ - C(rebooted) - reboot instances
+ - C(absent) - terminate instances
+ choices: ["present", "absent", "running", "rebooted", "poweredoff"]
+ default: present
+ type: str
+ hard:
+ description:
+ - Reboot, power-off or terminate instances C(hard)
+ default: false
+ type: bool
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning. Keep
+ - in mind if you are waiting for instance to be in running state it
+ - doesn't mean that you will be able to SSH on that machine only that
+ - boot process have started on that instance, see 'wait_for' example for
+ - details.
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - How long before wait gives up, in seconds
+ default: 300
+ type: int
+ attributes:
+ description:
+ - A dictionary of key/value attributes to add to new instances, or for
+ - setting C(state) of instances with these attributes.
+ - Keys are case insensitive and OpenNebula automatically converts them to upper case.
+ - Be aware C(NAME) is a special attribute which sets the name of the VM when it's deployed.
+ - C(#) character(s) can be appended to the C(NAME) and the module will automatically add
+ - indexes to the names of VMs.
+ - For example':' C(NAME':' foo-###) would create VMs with names C(foo-000), C(foo-001),...
+ - When used with C(count_attributes) and C(exact_count) the module will
+ - match the base name without the index part.
+ default: {}
+ type: dict
+ labels:
+ description:
+ - A list of labels to associate with new instances, or for setting
+ - C(state) of instances with these labels.
+ default: []
+ type: list
+ elements: str
+ count_attributes:
+ description:
+ - A dictionary of key/value attributes that can only be used with
+ - C(exact_count) to determine how many nodes based on a specific
+ - attributes criteria should be deployed. This can be expressed in
+ - multiple ways and is shown in the EXAMPLES section.
+ type: dict
+ count_labels:
+ description:
+ - A list of labels that can only be used with C(exact_count) to determine
+ - how many nodes based on a specific labels criteria should be deployed.
+ - This can be expressed in multiple ways and is shown in the EXAMPLES
+ - section.
+ type: list
+ elements: str
+ count:
+ description:
+ - Number of instances to launch
+ default: 1
+ type: int
+ exact_count:
+ description:
+ - Indicates how many instances that match C(count_attributes) and
+ - C(count_labels) parameters should be deployed. Instances are either
+ - created or terminated based on this value.
+ - NOTE':' Instances with the least IDs will be terminated first.
+ type: int
+ mode:
+ description:
+ - Set permission mode of the instance in octet format, e.g. C(600) to give owner C(use) and C(manage) and nothing to group and others.
+ type: str
+ owner_id:
+ description:
+ - ID of the user which will be set as the owner of the instance
+ type: int
+ group_id:
+ description:
+ - ID of the group which will be set as the group of the instance
+ type: int
+ memory:
+ description:
+ - The size of the memory for new instances (in MB, GB, ...)
+ type: str
+ disk_size:
+ description:
+ - The size of the disk created for new instances (in MB, GB, TB,...).
+ - NOTE':' If The Template hats Multiple Disks the Order of the Sizes is
+ - matched against the order specified in C(template_id)/C(template_name).
+ type: list
+ elements: str
+ cpu:
+ description:
+ - Percentage of CPU divided by 100 required for the new instance. Half a
+ - processor is written 0.5.
+ type: float
+ vcpu:
+ description:
+ - Number of CPUs (cores) new VM will have.
+ type: int
+ networks:
+ description:
+ - A list of dictionaries with network parameters. See examples for more details.
+ default: []
+ type: list
+ elements: dict
+ disk_saveas:
+ description:
+ - Creates an image from a VM disk.
+ - It is a dictionary where you have to specify C(name) of the new image.
+ - Optionally you can specify C(disk_id) of the disk you want to save. By default C(disk_id) is 0.
+ - I(NOTE)':' This operation will only be performed on the first VM (if more than one VM ID is passed)
+ - and the VM has to be in the C(poweredoff) state.
+ - Also this operation will fail if an image with specified C(name) already exists.
+ type: dict
+ persistent:
+ description:
+ - Create a private persistent copy of the template plus any image defined in DISK, and instantiate that copy.
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ datastore_id:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: int
+ datastore_name:
+ description:
+ - Name of Datastore to use to create a new instace
+ version_added: '0.2.0'
+ type: str
+ updateconf:
+ description:
+ - When I(instance_ids) is provided, updates running VMs with the C(updateconf) API call.
+ - When new VMs are being created, emulates the C(updateconf) API call via direct template merge.
+ - Allows for complete modifications of the C(CONTEXT) attribute.
+ type: dict
+ version_added: 6.3.0
+author:
+ - "Milan Ilic (@ilicmilan)"
+ - "Jan Meerkamp (@meerkampdvv)"
+'''
+
+
+EXAMPLES = '''
+- name: Create a new instance
+ community.general.one_vm:
+ template_id: 90
+ register: result
+
+- name: Print VM properties
+ ansible.builtin.debug:
+ msg: result
+
+- name: Deploy a new VM on hold
+ community.general.one_vm:
+ template_name: 'app1_template'
+ vm_start_on_hold: 'True'
+
+- name: Deploy a new VM and set its name to 'foo'
+ community.general.one_vm:
+ template_name: 'app1_template'
+ attributes:
+ name: foo
+
+- name: Deploy a new VM and set its group_id and mode
+ community.general.one_vm:
+ template_id: 90
+ group_id: 16
+ mode: 660
+
+- name: Deploy a new VM as persistent
+ community.general.one_vm:
+ template_id: 90
+ persistent: true
+
+- name: Change VM's permissions to 640
+ community.general.one_vm:
+ instance_ids: 5
+ mode: 640
+
+- name: Deploy 2 new instances and set memory, vcpu, disk_size and 3 networks
+ community.general.one_vm:
+ template_id: 15
+ disk_size: 35.2 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 2
+ networks:
+ - NETWORK_ID: 27
+ - NETWORK: "default-network"
+ NETWORK_UNAME: "app-user"
+ SECURITY_GROUPS: "120,124"
+ - NETWORK_ID: 27
+ SECURITY_GROUPS: "10"
+
+- name: Deploy a new instance which uses a Template with two Disks
+ community.general.one_vm:
+ template_id: 42
+ disk_size:
+ - 35.2 GB
+ - 50 GB
+ memory: 4 GB
+ vcpu: 4
+ count: 1
+ networks:
+ - NETWORK_ID: 27
+
+- name: "Deploy an new instance with attribute 'bar: bar1' and set its name to 'foo'"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: foo
+ bar: bar1
+
+- name: "Enforce that 2 instances with attributes 'foo1: app1' and 'foo2: app2' are deployed"
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ foo1: app1
+ foo2: app2
+ exact_count: 2
+ count_attributes:
+ foo1: app1
+ foo2: app2
+
+- name: Enforce that 4 instances with an attribute 'bar' are deployed
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: app
+ bar: bar2
+ exact_count: 4
+ count_attributes:
+ bar:
+
+# Deploy 2 new instances with attribute 'foo: bar' and labels 'app1' and 'app2' and names in format 'fooapp-##'
+# Names will be: fooapp-00 and fooapp-01
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-##
+ foo: bar
+ labels:
+ - app1
+ - app2
+ count: 2
+
+# Deploy 2 new instances with attribute 'app: app1' and names in format 'fooapp-###'
+# Names will be: fooapp-002 and fooapp-003
+- name: Deploy 2 new instances
+ community.general.one_vm:
+ template_id: 53
+ attributes:
+ name: fooapp-###
+ app: app1
+ count: 2
+
+# Reboot all instances with name in format 'fooapp-#'
+# Instances 'fooapp-00', 'fooapp-01', 'fooapp-002' and 'fooapp-003' will be rebooted
+- name: Reboot all instances with names in a certain format
+ community.general.one_vm:
+ attributes:
+ name: fooapp-#
+ state: rebooted
+
+# Enforce that only 1 instance with name in format 'fooapp-#' is deployed
+# The task will delete oldest instances, so only the 'fooapp-003' will remain
+- name: Enforce that only 1 instance with name in a certain format is deployed
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 1
+ count_attributes:
+ name: fooapp-#
+
+- name: Deploy an new instance with a network
+ community.general.one_vm:
+ template_id: 53
+ networks:
+ - NETWORK_ID: 27
+ register: vm
+
+- name: Wait for SSH to come up
+ ansible.builtin.wait_for_connection:
+ delegate_to: '{{ vm.instances[0].networks[0].ip }}'
+
+- name: Terminate VMs by ids
+ community.general.one_vm:
+ instance_ids:
+ - 153
+ - 160
+ state: absent
+
+- name: Reboot all VMs that have labels 'foo' and 'app1'
+ community.general.one_vm:
+ labels:
+ - foo
+ - app1
+ state: rebooted
+
+- name: "Fetch all VMs that have name 'foo' and attribute 'app: bar'"
+ community.general.one_vm:
+ attributes:
+ name: foo
+ app: bar
+ register: results
+
+- name: Deploy 2 new instances with labels 'foo1' and 'foo2'
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ - foo2
+ count: 2
+
+- name: Enforce that only 1 instance with label 'foo1' will be running
+ community.general.one_vm:
+ template_name: app_template
+ labels:
+ - foo1
+ exact_count: 1
+ count_labels:
+ - foo1
+
+- name: Terminate all instances that have attribute foo
+ community.general.one_vm:
+ template_id: 53
+ exact_count: 0
+ count_attributes:
+ foo:
+
+- name: "Power-off the VM and save VM's disk with id=0 to the image with name 'foo-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ state: poweredoff
+ disk_saveas:
+ name: foo-image
+
+- name: "Save VM's disk with id=1 to the image with name 'bar-image'"
+ community.general.one_vm:
+ instance_ids: 351
+ disk_saveas:
+ name: bar-image
+ disk_id: 1
+
+- name: "Deploy 2 new instances with a custom 'start script'"
+ community.general.one_vm:
+ template_name: app_template
+ count: 2
+ updateconf:
+ CONTEXT:
+ START_SCRIPT: ip r r 169.254.16.86/32 dev eth0
+
+- name: "Add a custom 'start script' to a running VM"
+ community.general.one_vm:
+ instance_ids: 351
+ updateconf:
+ CONTEXT:
+ START_SCRIPT: ip r r 169.254.16.86/32 dev eth0
+
+- name: "Update SSH public keys inside the VM's context"
+ community.general.one_vm:
+ instance_ids: 351
+ updateconf:
+ CONTEXT:
+ SSH_PUBLIC_KEY: |-
+ ssh-rsa ...
+ ssh-ed25519 ...
+'''
+
+RETURN = '''
+instances_ids:
+ description: a list of instances ids whose state is changed or which are fetched with C(instance_ids) option.
+ type: list
+ returned: success
+ sample: [ 1234, 1235 ]
+instances:
+ description: a list of instances info whose state is changed or which are fetched with C(instance_ids) option.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's owner id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's owner name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: str
+ sample: 20480 MB
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+ updateconf:
+ description: A dictionary of key/values attributes that are set with the updateconf API call.
+ type: dict
+ version_added: 6.3.0
+ sample: {
+ "OS": { "ARCH": "x86_64" },
+ "CONTEXT": {
+ "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
+ "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ }
+ }
+tagged_instances:
+ description:
+ - A list of instances info based on a specific attributes and/or
+ - labels that are specified with C(count_attributes) and C(count_labels)
+ - options.
+ type: complex
+ returned: success
+ contains:
+ vm_id:
+ description: vm id
+ type: int
+ sample: 153
+ vm_name:
+ description: vm name
+ type: str
+ sample: foo
+ template_id:
+ description: vm's template id
+ type: int
+ sample: 153
+ group_id:
+ description: vm's group id
+ type: int
+ sample: 1
+ group_name:
+ description: vm's group name
+ type: str
+ sample: one-users
+ owner_id:
+ description: vm's user id
+ type: int
+ sample: 143
+ owner_name:
+ description: vm's user name
+ type: str
+ sample: app-user
+ mode:
+ description: vm's mode
+ type: str
+ returned: success
+ sample: 660
+ state:
+ description: state of an instance
+ type: str
+ sample: ACTIVE
+ lcm_state:
+ description: lcm state of an instance that is only relevant when the state is ACTIVE
+ type: str
+ sample: RUNNING
+ cpu:
+ description: Percentage of CPU divided by 100
+ type: float
+ sample: 0.2
+ vcpu:
+ description: Number of CPUs (cores)
+ type: int
+ sample: 2
+ memory:
+ description: The size of the memory in MB
+ type: str
+ sample: 4096 MB
+ disk_size:
+ description: The size of the disk in MB
+ type: list
+ sample: [
+ "20480 MB",
+ "10240 MB"
+ ]
+ networks:
+ description: a list of dictionaries with info about IP, NAME, MAC, SECURITY_GROUPS for each NIC
+ type: list
+ sample: [
+ {
+ "ip": "10.120.5.33",
+ "mac": "02:00:0a:78:05:21",
+ "name": "default-test-private",
+ "security_groups": "0,10"
+ },
+ {
+ "ip": "10.120.5.34",
+ "mac": "02:00:0a:78:05:22",
+ "name": "default-test-private",
+ "security_groups": "0"
+ }
+ ]
+ uptime_h:
+ description: Uptime of the instance in hours
+ type: int
+ sample: 35
+ labels:
+ description: A list of string labels that are associated with the instance
+ type: list
+ sample: [
+ "foo",
+ "spec-label"
+ ]
+ attributes:
+ description: A dictionary of key/values attributes that are associated with the instance
+ type: dict
+ sample: {
+ "HYPERVISOR": "kvm",
+ "LOGO": "images/logos/centos.png",
+ "TE_GALAXY": "bar",
+ "USER_INPUTS": null
+ }
+ updateconf:
+ description: A dictionary of key/values attributes that are set with the updateconf API call
+ type: dict
+ version_added: 6.3.0
+ sample: {
+ "OS": { "ARCH": "x86_64" },
+ "CONTEXT": {
+ "START_SCRIPT": "ip r r 169.254.16.86/32 dev eth0",
+ "SSH_PUBLIC_KEY": "ssh-rsa ...\\nssh-ed25519 ..."
+ }
+ }
+'''
+
+try:
+ import pyone
+ HAS_PYONE = True
+except ImportError:
+ HAS_PYONE = False
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.dict_transformations import dict_merge
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render
+
+
+UPDATECONF_ATTRIBUTES = {
+ "OS": ["ARCH", "MACHINE", "KERNEL", "INITRD", "BOOTLOADER", "BOOT", "SD_DISK_BUS", "UUID"],
+ "FEATURES": ["ACPI", "PAE", "APIC", "LOCALTIME", "HYPERV", "GUEST_AGENT"],
+ "INPUT": ["TYPE", "BUS"],
+ "GRAPHICS": ["TYPE", "LISTEN", "PASSWD", "KEYMAP"],
+ "RAW": ["DATA", "DATA_VMX", "TYPE"],
+ "CONTEXT": [],
+}
+
+
+def check_updateconf(module, to_check):
+ '''Checks if attributes are compatible with one.vm.updateconf API call.'''
+ for attr, subattributes in to_check.items():
+ if attr not in UPDATECONF_ATTRIBUTES:
+ module.fail_json(msg="'{0:}' is not a valid VM attribute.".format(attr))
+ if not UPDATECONF_ATTRIBUTES[attr]:
+ continue
+ for subattr in subattributes:
+ if subattr not in UPDATECONF_ATTRIBUTES[attr]:
+ module.fail_json(msg="'{0:}' is not a valid VM subattribute of '{1:}'".format(subattr, attr))
+
+
+def parse_updateconf(vm_template):
+ '''Extracts 'updateconf' attributes from a VM template.'''
+ updateconf = {}
+ for attr, subattributes in vm_template.items():
+ if attr not in UPDATECONF_ATTRIBUTES:
+ continue
+ tmp = {}
+ for subattr, value in subattributes.items():
+ if UPDATECONF_ATTRIBUTES[attr] and subattr not in UPDATECONF_ATTRIBUTES[attr]:
+ continue
+ tmp[subattr] = value
+ if tmp:
+ updateconf[attr] = tmp
+ return updateconf
+
+
+def get_template(module, client, predicate):
+
+ pool = client.templatepool.info(-2, -1, -1, -1)
+ # Filter -2 means fetch all templates user can Use
+ found = 0
+ found_template = None
+ template_name = ''
+
+ for template in pool.VMTEMPLATE:
+ if predicate(template):
+ found = found + 1
+ found_template = template
+ template_name = template.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more templates with name: ' + template_name)
+ return found_template
+
+
+def get_template_by_name(module, client, template_name):
+ return get_template(module, client, lambda template: (template.NAME == template_name))
+
+
+def get_template_by_id(module, client, template_id):
+ return get_template(module, client, lambda template: (template.ID == template_id))
+
+
+def get_template_id(module, client, requested_id, requested_name):
+ template = get_template_by_id(module, client, requested_id) if requested_id is not None else get_template_by_name(module, client, requested_name)
+ if template:
+ return template.ID
+ else:
+ return None
+
+
+def get_datastore(module, client, predicate):
+ pool = client.datastorepool.info()
+ found = 0
+ found_datastore = None
+ datastore_name = ''
+
+ for datastore in pool.DATASTORE:
+ if predicate(datastore):
+ found = found + 1
+ found_datastore = datastore
+ datastore_name = datastore.NAME
+
+ if found == 0:
+ return None
+ elif found > 1:
+ module.fail_json(msg='There are more datastores with name: ' + datastore_name)
+ return found_datastore
+
+
+def get_datastore_by_name(module, client, datastore_name):
+ return get_datastore(module, client, lambda datastore: (datastore.NAME == datastore_name))
+
+
+def get_datastore_by_id(module, client, datastore_id):
+ return get_datastore(module, client, lambda datastore: (datastore.ID == datastore_id))
+
+
+def get_datastore_id(module, client, requested_id, requested_name):
+ datastore = get_datastore_by_id(module, client, requested_id) if requested_id else get_datastore_by_name(module, client, requested_name)
+ if datastore:
+ return datastore.ID
+ else:
+ return None
+
+
+def get_vm_by_id(client, vm_id):
+ try:
+ vm = client.vm.info(int(vm_id))
+ except BaseException:
+ return None
+ return vm
+
+
+def get_vms_by_ids(module, client, state, ids):
+ vms = []
+
+ for vm_id in ids:
+ vm = get_vm_by_id(client, vm_id)
+ if vm is None and state != 'absent':
+ module.fail_json(msg='There is no VM with id=' + str(vm_id))
+ vms.append(vm)
+
+ return vms
+
+
+def get_vm_info(client, vm):
+
+ vm = client.vm.info(vm.ID)
+
+ networks_info = []
+
+ disk_size = []
+ if 'DISK' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['DISK'], list):
+ for disk in vm.TEMPLATE['DISK']:
+ disk_size.append(disk['SIZE'] + ' MB')
+ else:
+ disk_size.append(vm.TEMPLATE['DISK']['SIZE'] + ' MB')
+
+ if 'NIC' in vm.TEMPLATE:
+ if isinstance(vm.TEMPLATE['NIC'], list):
+ for nic in vm.TEMPLATE['NIC']:
+ networks_info.append({
+ 'ip': nic.get('IP', ''),
+ 'mac': nic.get('MAC', ''),
+ 'name': nic.get('NETWORK', ''),
+ 'security_groups': nic.get('SECURITY_GROUPS', '')
+ })
+ else:
+ networks_info.append({
+ 'ip': vm.TEMPLATE['NIC'].get('IP', ''),
+ 'mac': vm.TEMPLATE['NIC'].get('MAC', ''),
+ 'name': vm.TEMPLATE['NIC'].get('NETWORK', ''),
+ 'security_groups':
+ vm.TEMPLATE['NIC'].get('SECURITY_GROUPS', '')
+ })
+ import time
+
+ current_time = time.localtime()
+ vm_start_time = time.localtime(vm.STIME)
+
+ vm_uptime = time.mktime(current_time) - time.mktime(vm_start_time)
+ vm_uptime /= (60 * 60)
+
+ permissions_str = parse_vm_permissions(client, vm)
+
+ # LCM_STATE is VM's sub-state that is relevant only when STATE is ACTIVE
+ vm_lcm_state = None
+ if vm.STATE == VM_STATES.index('ACTIVE'):
+ vm_lcm_state = LCM_STATES[vm.LCM_STATE]
+
+ vm_labels, vm_attributes = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ updateconf = parse_updateconf(vm.TEMPLATE)
+
+ info = {
+ 'template_id': int(vm.TEMPLATE['TEMPLATE_ID']),
+ 'vm_id': vm.ID,
+ 'vm_name': vm.NAME,
+ 'state': VM_STATES[vm.STATE],
+ 'lcm_state': vm_lcm_state,
+ 'owner_name': vm.UNAME,
+ 'owner_id': vm.UID,
+ 'networks': networks_info,
+ 'disk_size': disk_size,
+ 'memory': vm.TEMPLATE['MEMORY'] + ' MB',
+ 'vcpu': vm.TEMPLATE['VCPU'],
+ 'cpu': vm.TEMPLATE['CPU'],
+ 'group_name': vm.GNAME,
+ 'group_id': vm.GID,
+ 'uptime_h': int(vm_uptime),
+ 'attributes': vm_attributes,
+ 'mode': permissions_str,
+ 'labels': vm_labels,
+ 'updateconf': updateconf,
+ }
+
+ return info
+
+
+def parse_vm_permissions(client, vm):
+ vm_PERMISSIONS = client.vm.info(vm.ID).PERMISSIONS
+
+ owner_octal = int(vm_PERMISSIONS.OWNER_U) * 4 + int(vm_PERMISSIONS.OWNER_M) * 2 + int(vm_PERMISSIONS.OWNER_A)
+ group_octal = int(vm_PERMISSIONS.GROUP_U) * 4 + int(vm_PERMISSIONS.GROUP_M) * 2 + int(vm_PERMISSIONS.GROUP_A)
+ other_octal = int(vm_PERMISSIONS.OTHER_U) * 4 + int(vm_PERMISSIONS.OTHER_M) * 2 + int(vm_PERMISSIONS.OTHER_A)
+
+ permissions = str(owner_octal) + str(group_octal) + str(other_octal)
+
+ return permissions
+
+
+def set_vm_permissions(module, client, vms, permissions):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ old_permissions = parse_vm_permissions(client, vm)
+ changed = changed or old_permissions != permissions
+
+ if not module.check_mode and old_permissions != permissions:
+ permissions_str = bin(int(permissions, base=8))[2:] # 600 -> 110000000
+ mode_bits = [int(d) for d in permissions_str]
+ try:
+ client.vm.chmod(
+ vm.ID, mode_bits[0], mode_bits[1], mode_bits[2], mode_bits[3], mode_bits[4], mode_bits[5], mode_bits[6], mode_bits[7], mode_bits[8])
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Permissions changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def set_vm_ownership(module, client, vms, owner_id, group_id):
+ changed = False
+
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ if owner_id is None:
+ owner_id = vm.UID
+ if group_id is None:
+ group_id = vm.GID
+
+ changed = changed or owner_id != vm.UID or group_id != vm.GID
+
+ if not module.check_mode and (owner_id != vm.UID or group_id != vm.GID):
+ try:
+ client.vm.chown(vm.ID, owner_id, group_id)
+ except pyone.OneAuthorizationException:
+ module.fail_json(msg="Ownership changing is unsuccessful, but instances are present if you deployed them.")
+
+ return changed
+
+
+def update_vm(module, client, vm, updateconf_dict):
+ changed = False
+ if not updateconf_dict:
+ return changed
+
+ before = client.vm.info(vm.ID).TEMPLATE
+
+ client.vm.updateconf(vm.ID, render(updateconf_dict), 1) # 1: Merge new template with the existing one.
+
+ after = client.vm.info(vm.ID).TEMPLATE
+
+ changed = before != after
+ return changed
+
+
+def update_vms(module, client, vms, *args):
+ changed = False
+ for vm in vms:
+ changed = update_vm(module, client, vm, *args) or changed
+ return changed
+
+
+def get_size_in_MB(module, size_str):
+
+ SYMBOLS = ['B', 'KB', 'MB', 'GB', 'TB']
+
+ s = size_str
+ init = size_str
+ num = ""
+ while s and s[0:1].isdigit() or s[0:1] == '.':
+ num += s[0]
+ s = s[1:]
+ num = float(num)
+ symbol = s.strip()
+
+ if symbol not in SYMBOLS:
+ module.fail_json(msg="Cannot interpret %r %r %d" % (init, symbol, num))
+
+ prefix = {'B': 1}
+
+ for i, s in enumerate(SYMBOLS[1:]):
+ prefix[s] = 1 << (i + 1) * 10
+
+ size_in_bytes = int(num * prefix[symbol])
+ size_in_MB = size_in_bytes / (1024 * 1024)
+
+ return size_in_MB
+
+
+def create_vm(module, client, template_id, attributes_dict, labels_list, disk_size, network_attrs_list, vm_start_on_hold, vm_persistent, updateconf_dict):
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ template = client.template.info(template_id).TEMPLATE
+
+ disk_count = len(flatten(template.get('DISK', [])))
+ if disk_size:
+ size_count = len(flatten(disk_size))
+ # check if the number of disks is correct
+ if disk_count != size_count:
+ module.fail_json(msg='This template has ' + str(disk_count) + ' disks but you defined ' + str(size_count))
+
+ vm_extra_template = dict_merge(template or {}, attributes_dict or {})
+ vm_extra_template = dict_merge(vm_extra_template, {
+ 'LABELS': ','.join(labels_list),
+ 'NIC': flatten(network_attrs_list, extract=True),
+ 'DISK': flatten([
+ disk if not size else dict_merge(disk, {
+ 'SIZE': str(int(get_size_in_MB(module, size))),
+ })
+ for disk, size in zip(
+ flatten(template.get('DISK', [])),
+ flatten(disk_size or [None] * disk_count),
+ )
+ if disk is not None
+ ], extract=True)
+ })
+ vm_extra_template = dict_merge(vm_extra_template, updateconf_dict or {})
+
+ try:
+ vm_id = client.template.instantiate(template_id,
+ vm_name,
+ vm_start_on_hold,
+ render(vm_extra_template),
+ vm_persistent)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+
+ vm = get_vm_by_id(client, vm_id)
+ return get_vm_info(client, vm)
+
+
+def generate_next_index(vm_filled_indexes_list, num_sign_cnt):
+ counter = 0
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ while cnt_str in vm_filled_indexes_list:
+ counter = counter + 1
+ cnt_str = str(counter).zfill(num_sign_cnt)
+
+ return cnt_str
+
+
+def get_vm_labels_and_attributes_dict(client, vm_id):
+ vm_USER_TEMPLATE = client.vm.info(vm_id).USER_TEMPLATE
+
+ attrs_dict = {}
+ labels_list = []
+
+ for key, value in vm_USER_TEMPLATE.items():
+ if key != 'LABELS':
+ attrs_dict[key] = value
+ else:
+ if key is not None and value is not None:
+ labels_list = value.split(',')
+
+ return labels_list, attrs_dict
+
+
+def get_all_vms_by_attributes(client, attributes_dict, labels_list):
+ pool = client.vmpool.info(-2, -1, -1, -1).VM
+ vm_list = []
+ name = ''
+ if attributes_dict:
+ name = attributes_dict.pop('NAME', '')
+
+ if name != '':
+ base_name = name[:len(name) - name.count('#')]
+ # Check does the name have indexed format
+ with_hash = name.endswith('#')
+
+ for vm in pool:
+ if vm.NAME.startswith(base_name):
+ if with_hash and vm.NAME[len(base_name):].isdigit():
+ # If the name has indexed format and after base_name it has only digits it'll be matched
+ vm_list.append(vm)
+ elif not with_hash and vm.NAME == name:
+ # If the name is not indexed it has to be same
+ vm_list.append(vm)
+ pool = vm_list
+
+ import copy
+
+ vm_list = copy.copy(pool)
+
+ for vm in pool:
+ remove_list = []
+ vm_labels_list, vm_attributes_dict = get_vm_labels_and_attributes_dict(client, vm.ID)
+
+ if attributes_dict and len(attributes_dict) > 0:
+ for key, val in attributes_dict.items():
+ if key in vm_attributes_dict:
+ if val and vm_attributes_dict[key] != val:
+ remove_list.append(vm)
+ break
+ else:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ remove_list = []
+ if labels_list and len(labels_list) > 0:
+ for label in labels_list:
+ if label not in vm_labels_list:
+ remove_list.append(vm)
+ break
+ vm_list = list(set(vm_list).difference(set(remove_list)))
+
+ return vm_list
+
+
+def create_count_of_vms(module, client,
+ template_id, count,
+ attributes_dict, labels_list, disk_size, network_attrs_list,
+ wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict):
+ new_vms_list = []
+
+ vm_name = ''
+ if attributes_dict:
+ vm_name = attributes_dict.get('NAME', '')
+
+ if module.check_mode:
+ return True, [], []
+
+ # Create list of used indexes
+ vm_filled_indexes_list = None
+ num_sign_cnt = vm_name.count('#')
+ if vm_name != '' and num_sign_cnt > 0:
+ vm_list = get_all_vms_by_attributes(client, {'NAME': vm_name}, None)
+ base_name = vm_name[:len(vm_name) - num_sign_cnt]
+ vm_name = base_name
+ # Make list which contains used indexes in format ['000', '001',...]
+ vm_filled_indexes_list = list((vm.NAME[len(base_name):].zfill(num_sign_cnt)) for vm in vm_list)
+
+ while count > 0:
+ new_vm_name = vm_name
+ # Create indexed name
+ if vm_filled_indexes_list is not None:
+ next_index = generate_next_index(vm_filled_indexes_list, num_sign_cnt)
+ vm_filled_indexes_list.append(next_index)
+ new_vm_name += next_index
+ # Update NAME value in the attributes in case there is index
+ attributes_dict['NAME'] = new_vm_name
+ new_vm_dict = create_vm(module, client,
+ template_id, attributes_dict, labels_list, disk_size, network_attrs_list,
+ vm_start_on_hold, vm_persistent, updateconf_dict)
+ new_vm_id = new_vm_dict.get('vm_id')
+ new_vm = get_vm_by_id(client, new_vm_id)
+ new_vms_list.append(new_vm)
+ count -= 1
+
+ if vm_start_on_hold:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_hold(module, client, vm, wait_timeout)
+ else:
+ if wait:
+ for vm in new_vms_list:
+ wait_for_running(module, client, vm, wait_timeout)
+
+ return True, new_vms_list, []
+
+
+def create_exact_count_of_vms(module, client,
+ template_id, exact_count, attributes_dict, count_attributes_dict,
+ labels_list, count_labels_list, disk_size, network_attrs_list,
+ hard, wait, wait_timeout, vm_start_on_hold, vm_persistent, updateconf_dict):
+ vm_list = get_all_vms_by_attributes(client, count_attributes_dict, count_labels_list)
+
+ vm_count_diff = exact_count - len(vm_list)
+ changed = vm_count_diff != 0
+
+ new_vms_list = []
+ instances_list = []
+ tagged_instances_list = vm_list
+
+ if module.check_mode:
+ return changed, instances_list, tagged_instances_list
+
+ if vm_count_diff > 0:
+ # Add more VMs
+ changed, instances_list, tagged_instances = create_count_of_vms(module, client, template_id, vm_count_diff, attributes_dict,
+ labels_list, disk_size, network_attrs_list, wait, wait_timeout,
+ vm_start_on_hold, vm_persistent, updateconf_dict)
+
+ tagged_instances_list += instances_list
+ elif vm_count_diff < 0:
+ # Delete surplus VMs
+ old_vms_list = []
+
+ while vm_count_diff < 0:
+ old_vm = vm_list.pop(0)
+ old_vms_list.append(old_vm)
+ terminate_vm(module, client, old_vm, hard)
+ vm_count_diff += 1
+
+ if wait:
+ for vm in old_vms_list:
+ wait_for_done(module, client, vm, wait_timeout)
+
+ instances_list = old_vms_list
+ # store only the remaining instances
+ old_vms_set = set(old_vms_list)
+ tagged_instances_list = [vm for vm in vm_list if vm not in old_vms_set]
+
+ return changed, instances_list, tagged_instances_list
+
+
+VM_STATES = ['INIT', 'PENDING', 'HOLD', 'ACTIVE', 'STOPPED', 'SUSPENDED', 'DONE', '', 'POWEROFF', 'UNDEPLOYED', 'CLONING', 'CLONING_FAILURE']
+LCM_STATES = ['LCM_INIT', 'PROLOG', 'BOOT', 'RUNNING', 'MIGRATE', 'SAVE_STOP',
+ 'SAVE_SUSPEND', 'SAVE_MIGRATE', 'PROLOG_MIGRATE', 'PROLOG_RESUME',
+ 'EPILOG_STOP', 'EPILOG', 'SHUTDOWN', 'STATE13', 'STATE14', 'CLEANUP_RESUBMIT', 'UNKNOWN', 'HOTPLUG', 'SHUTDOWN_POWEROFF',
+ 'BOOT_UNKNOWN', 'BOOT_POWEROFF', 'BOOT_SUSPENDED', 'BOOT_STOPPED', 'CLEANUP_DELETE', 'HOTPLUG_SNAPSHOT', 'HOTPLUG_NIC',
+ 'HOTPLUG_SAVEAS', 'HOTPLUG_SAVEAS_POWEROFF', 'HOTPULG_SAVEAS_SUSPENDED', 'SHUTDOWN_UNDEPLOY']
+
+
+def wait_for_state(module, client, vm, wait_timeout, state_predicate):
+ import time
+ start_time = time.time()
+
+ while (time.time() - start_time) < wait_timeout:
+ vm = client.vm.info(vm.ID)
+ state = vm.STATE
+ lcm_state = vm.LCM_STATE
+
+ if state_predicate(state, lcm_state):
+ return vm
+ elif state not in [VM_STATES.index('INIT'), VM_STATES.index('PENDING'), VM_STATES.index('HOLD'),
+ VM_STATES.index('ACTIVE'), VM_STATES.index('CLONING'), VM_STATES.index('POWEROFF')]:
+ module.fail_json(msg='Action is unsuccessful. VM state: ' + VM_STATES[state])
+
+ time.sleep(1)
+
+ module.fail_json(msg="Wait timeout has expired!")
+
+
+def wait_for_running(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state,
+ lcm_state: (state in [VM_STATES.index('ACTIVE')] and lcm_state in [LCM_STATES.index('RUNNING')]))
+
+
+def wait_for_done(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('DONE')]))
+
+
+def wait_for_hold(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('HOLD')]))
+
+
+def wait_for_poweroff(module, client, vm, wait_timeout):
+ return wait_for_state(module, client, vm, wait_timeout, lambda state, lcm_state: (state in [VM_STATES.index('POWEROFF')]))
+
+
+def terminate_vm(module, client, vm, hard=False):
+ changed = False
+
+ if not vm:
+ return changed
+
+ changed = True
+
+ if not module.check_mode:
+ if hard:
+ client.vm.action('terminate-hard', vm.ID)
+ else:
+ client.vm.action('terminate', vm.ID)
+
+ return changed
+
+
+def terminate_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = terminate_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def poweroff_vm(module, client, vm, hard):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN'), LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ if not hard:
+ client.vm.action('poweroff', vm.ID)
+ else:
+ client.vm.action('poweroff-hard', vm.ID)
+
+ return changed
+
+
+def poweroff_vms(module, client, vms, hard):
+ changed = False
+
+ for vm in vms:
+ changed = poweroff_vm(module, client, vm, hard) or changed
+
+ return changed
+
+
+def reboot_vms(module, client, vms, wait_timeout, hard):
+
+ if not module.check_mode:
+ # Firstly, power-off all instances
+ for vm in vms:
+ vm = client.vm.info(vm.ID)
+ lcm_state = vm.LCM_STATE
+ state = vm.STATE
+ if lcm_state not in [LCM_STATES.index('SHUTDOWN_POWEROFF')] and state not in [VM_STATES.index('POWEROFF')]:
+ poweroff_vm(module, client, vm, hard)
+
+ # Wait for all to be power-off
+ for vm in vms:
+ wait_for_poweroff(module, client, vm, wait_timeout)
+
+ for vm in vms:
+ resume_vm(module, client, vm)
+
+ return True
+
+
+def resume_vm(module, client, vm):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ state = vm.STATE
+ if state in [VM_STATES.index('HOLD')]:
+ changed = release_vm(module, client, vm)
+ return changed
+
+ lcm_state = vm.LCM_STATE
+ if lcm_state == LCM_STATES.index('SHUTDOWN_POWEROFF'):
+ module.fail_json(msg="Cannot perform action 'resume' because this action is not available " +
+ "for LCM_STATE: 'SHUTDOWN_POWEROFF'. Wait for the VM to shutdown properly")
+ if lcm_state not in [LCM_STATES.index('RUNNING')]:
+ changed = True
+
+ if changed and not module.check_mode:
+ client.vm.action('resume', vm.ID)
+
+ return changed
+
+
+def resume_vms(module, client, vms):
+ changed = False
+
+ for vm in vms:
+ changed = resume_vm(module, client, vm) or changed
+
+ return changed
+
+
+def release_vm(module, client, vm):
+ vm = client.vm.info(vm.ID)
+ changed = False
+
+ state = vm.STATE
+ if state != VM_STATES.index('HOLD'):
+ module.fail_json(msg="Cannot perform action 'release' because this action is not available " +
+ "because VM is not in state 'HOLD'.")
+ else:
+ changed = True
+
+ if changed and not module.check_mode:
+ client.vm.action('release', vm.ID)
+
+ return changed
+
+
+def check_name_attribute(module, attributes):
+ if attributes.get("NAME"):
+ import re
+ if re.match(r'^[^#]+#*$', attributes.get("NAME")) is None:
+ module.fail_json(msg="Ilegal 'NAME' attribute: '" + attributes.get("NAME") +
+ "' .Signs '#' are allowed only at the end of the name and the name cannot contain only '#'.")
+
+
+TEMPLATE_RESTRICTED_ATTRIBUTES = ["CPU", "VCPU", "OS", "FEATURES", "MEMORY", "DISK", "NIC", "INPUT", "GRAPHICS",
+ "CONTEXT", "CREATED_BY", "CPU_COST", "DISK_COST", "MEMORY_COST",
+ "TEMPLATE_ID", "VMID", "AUTOMATIC_DS_REQUIREMENTS", "DEPLOY_FOLDER", "LABELS"]
+
+
+def check_attributes(module, attributes):
+ for key in attributes.keys():
+ if key in TEMPLATE_RESTRICTED_ATTRIBUTES:
+ module.fail_json(msg='Restricted attribute `' + key + '` cannot be used when filtering VMs.')
+ # Check the format of the name attribute
+ check_name_attribute(module, attributes)
+
+
+def disk_save_as(module, client, vm, disk_saveas, wait_timeout):
+ if not disk_saveas.get('name'):
+ module.fail_json(msg="Key 'name' is required for 'disk_saveas' option")
+
+ image_name = disk_saveas.get('name')
+ disk_id = disk_saveas.get('disk_id', 0)
+
+ if not module.check_mode:
+ if vm.STATE != VM_STATES.index('POWEROFF'):
+ module.fail_json(msg="'disksaveas' option can be used only when the VM is in 'POWEROFF' state")
+ try:
+ client.vm.disksaveas(vm.ID, disk_id, image_name, 'OS', -1)
+ except pyone.OneException as e:
+ module.fail_json(msg=str(e))
+ wait_for_poweroff(module, client, vm, wait_timeout) # wait for VM to leave the hotplug_saveas_poweroff state
+
+
+def get_connection_info(module):
+
+ url = module.params.get('api_url')
+ username = module.params.get('api_username')
+ password = module.params.get('api_password')
+
+ if not url:
+ url = os.environ.get('ONE_URL')
+
+ if not username:
+ username = os.environ.get('ONE_USERNAME')
+
+ if not password:
+ password = os.environ.get('ONE_PASSWORD')
+
+ if not username:
+ if not password:
+ authfile = os.environ.get('ONE_AUTH')
+ if authfile is None:
+ authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth")
+ try:
+ with open(authfile, "r") as fp:
+ authstring = fp.read().rstrip()
+ username = authstring.split(":")[0]
+ password = authstring.split(":")[1]
+ except (OSError, IOError):
+ module.fail_json(msg=("Could not find or read ONE_AUTH file at '%s'" % authfile))
+ except Exception:
+ module.fail_json(msg=("Error occurs when read ONE_AUTH file at '%s'" % authfile))
+ if not url:
+ module.fail_json(msg="Opennebula API url (api_url) is not specified")
+ from collections import namedtuple
+
+ auth_params = namedtuple('auth', ('url', 'username', 'password'))
+
+ return auth_params(url=url, username=username, password=password)
+
+
+def main():
+ fields = {
+ "api_url": {"required": False, "type": "str"},
+ "api_username": {"required": False, "type": "str"},
+ "api_password": {"required": False, "type": "str", "no_log": True},
+ "instance_ids": {"required": False, "aliases": ['ids'], "type": "list", "elements": "int"},
+ "template_name": {"required": False, "type": "str"},
+ "template_id": {"required": False, "type": "int"},
+ "vm_start_on_hold": {"default": False, "type": "bool"},
+ "state": {
+ "default": "present",
+ "choices": ['present', 'absent', 'rebooted', 'poweredoff', 'running'],
+ "type": "str"
+ },
+ "mode": {"required": False, "type": "str"},
+ "owner_id": {"required": False, "type": "int"},
+ "group_id": {"required": False, "type": "int"},
+ "wait": {"default": True, "type": "bool"},
+ "wait_timeout": {"default": 300, "type": "int"},
+ "hard": {"default": False, "type": "bool"},
+ "memory": {"required": False, "type": "str"},
+ "cpu": {"required": False, "type": "float"},
+ "vcpu": {"required": False, "type": "int"},
+ "disk_size": {"required": False, "type": "list", "elements": "str"},
+ "datastore_name": {"required": False, "type": "str"},
+ "datastore_id": {"required": False, "type": "int"},
+ "networks": {"default": [], "type": "list", "elements": "dict"},
+ "count": {"default": 1, "type": "int"},
+ "exact_count": {"required": False, "type": "int"},
+ "attributes": {"default": {}, "type": "dict"},
+ "count_attributes": {"required": False, "type": "dict"},
+ "labels": {"default": [], "type": "list", "elements": "str"},
+ "count_labels": {"required": False, "type": "list", "elements": "str"},
+ "disk_saveas": {"type": "dict"},
+ "persistent": {"default": False, "type": "bool"},
+ "updateconf": {"type": "dict"},
+ }
+
+ module = AnsibleModule(argument_spec=fields,
+ mutually_exclusive=[
+ ['template_id', 'template_name', 'instance_ids'],
+ ['template_id', 'template_name', 'disk_saveas'],
+ ['instance_ids', 'count_attributes', 'count'],
+ ['instance_ids', 'count_labels', 'count'],
+ ['instance_ids', 'exact_count'],
+ ['instance_ids', 'attributes'],
+ ['instance_ids', 'labels'],
+ ['disk_saveas', 'attributes'],
+ ['disk_saveas', 'labels'],
+ ['exact_count', 'count'],
+ ['count', 'hard'],
+ ['instance_ids', 'cpu'], ['instance_ids', 'vcpu'],
+ ['instance_ids', 'memory'], ['instance_ids', 'disk_size'],
+ ['instance_ids', 'networks'],
+ ['persistent', 'disk_size']
+ ],
+ supports_check_mode=True)
+
+ if not HAS_PYONE:
+ module.fail_json(msg='This module requires pyone to work!')
+
+ auth = get_connection_info(module)
+ params = module.params
+ instance_ids = params.get('instance_ids')
+ requested_template_name = params.get('template_name')
+ requested_template_id = params.get('template_id')
+ put_vm_on_hold = params.get('vm_start_on_hold')
+ state = params.get('state')
+ permissions = params.get('mode')
+ owner_id = params.get('owner_id')
+ group_id = params.get('group_id')
+ wait = params.get('wait')
+ wait_timeout = params.get('wait_timeout')
+ hard = params.get('hard')
+ memory = params.get('memory')
+ cpu = params.get('cpu')
+ vcpu = params.get('vcpu')
+ disk_size = params.get('disk_size')
+ requested_datastore_id = params.get('datastore_id')
+ requested_datastore_name = params.get('datastore_name')
+ networks = params.get('networks')
+ count = params.get('count')
+ exact_count = params.get('exact_count')
+ attributes = params.get('attributes')
+ count_attributes = params.get('count_attributes')
+ labels = params.get('labels')
+ count_labels = params.get('count_labels')
+ disk_saveas = params.get('disk_saveas')
+ persistent = params.get('persistent')
+ updateconf = params.get('updateconf')
+
+ if not (auth.username and auth.password):
+ module.warn("Credentials missing")
+ else:
+ one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password)
+
+ if attributes:
+ attributes = dict((key.upper(), value) for key, value in attributes.items())
+ check_attributes(module, attributes)
+
+ if count_attributes:
+ count_attributes = dict((key.upper(), value) for key, value in count_attributes.items())
+ if not attributes:
+ import copy
+ module.warn('When you pass `count_attributes` without `attributes` option when deploying, `attributes` option will have same values implicitly.')
+ attributes = copy.copy(count_attributes)
+ check_attributes(module, count_attributes)
+
+ if updateconf:
+ check_updateconf(module, updateconf)
+
+ if count_labels and not labels:
+ module.warn('When you pass `count_labels` without `labels` option when deploying, `labels` option will have same values implicitly.')
+ labels = count_labels
+
+ # Fetch template
+ template_id = None
+ if requested_template_id is not None or requested_template_name:
+ template_id = get_template_id(module, one_client, requested_template_id, requested_template_name)
+ if template_id is None:
+ if requested_template_id is not None:
+ module.fail_json(msg='There is no template with template_id: ' + str(requested_template_id))
+ elif requested_template_name:
+ module.fail_json(msg="There is no template with name: " + requested_template_name)
+
+ # Fetch datastore
+ datastore_id = None
+ if requested_datastore_id or requested_datastore_name:
+ datastore_id = get_datastore_id(module, one_client, requested_datastore_id, requested_datastore_name)
+ if datastore_id is None:
+ if requested_datastore_id:
+ module.fail_json(msg='There is no datastore with datastore_id: ' + str(requested_datastore_id))
+ elif requested_datastore_name:
+ module.fail_json(msg="There is no datastore with name: " + requested_datastore_name)
+ else:
+ attributes['SCHED_DS_REQUIREMENTS'] = 'ID=' + str(datastore_id)
+
+ if exact_count and template_id is None:
+ module.fail_json(msg='Option `exact_count` needs template_id or template_name')
+
+ if exact_count is not None and not (count_attributes or count_labels):
+ module.fail_json(msg='Either `count_attributes` or `count_labels` has to be specified with option `exact_count`.')
+ if (count_attributes or count_labels) and exact_count is None:
+ module.fail_json(msg='Option `exact_count` has to be specified when either `count_attributes` or `count_labels` is used.')
+ if template_id is not None and state != 'present':
+ module.fail_json(msg="Only state 'present' is valid for the template")
+
+ if memory:
+ attributes['MEMORY'] = str(int(get_size_in_MB(module, memory)))
+ if cpu:
+ attributes['CPU'] = str(cpu)
+ if vcpu:
+ attributes['VCPU'] = str(vcpu)
+
+ if exact_count is not None and state != 'present':
+ module.fail_json(msg='The `exact_count` option is valid only for the `present` state')
+ if exact_count is not None and exact_count < 0:
+ module.fail_json(msg='`exact_count` cannot be less than 0')
+ if count <= 0:
+ module.fail_json(msg='`count` has to be greater than 0')
+
+ if permissions is not None:
+ import re
+ if re.match("^[0-7]{3}$", permissions) is None:
+ module.fail_json(msg="Option `mode` has to have exactly 3 digits and be in the octet format e.g. 600")
+
+ if exact_count is not None:
+ # Deploy an exact count of VMs
+ changed, instances_list, tagged_instances_list = create_exact_count_of_vms(module, one_client, template_id, exact_count, attributes,
+ count_attributes, labels, count_labels, disk_size,
+ networks, hard, wait, wait_timeout, put_vm_on_hold, persistent, updateconf)
+ vms = tagged_instances_list
+ elif template_id is not None and state == 'present':
+ # Deploy count VMs
+ changed, instances_list, tagged_instances_list = create_count_of_vms(module, one_client, template_id, count,
+ attributes, labels, disk_size, networks, wait, wait_timeout,
+ put_vm_on_hold, persistent, updateconf)
+ # instances_list - new instances
+ # tagged_instances_list - all instances with specified `count_attributes` and `count_labels`
+ vms = instances_list
+ else:
+ # Fetch data of instances, or change their state
+ if not (instance_ids or attributes or labels):
+ module.fail_json(msg="At least one of `instance_ids`,`attributes`,`labels` must be passed!")
+
+ if memory or cpu or vcpu or disk_size or networks:
+ module.fail_json(msg="Parameters as `memory`, `cpu`, `vcpu`, `disk_size` and `networks` you can only set when deploying a VM!")
+
+ if hard and state not in ['rebooted', 'poweredoff', 'absent', 'present']:
+ module.fail_json(msg="The 'hard' option can be used only for one of these states: 'rebooted', 'poweredoff', 'absent' and 'present'")
+
+ vms = []
+ tagged = False
+ changed = False
+
+ if instance_ids:
+ vms = get_vms_by_ids(module, one_client, state, instance_ids)
+ else:
+ tagged = True
+ vms = get_all_vms_by_attributes(one_client, attributes, labels)
+
+ if len(vms) == 0 and state != 'absent' and state != 'present':
+ module.fail_json(msg='There are no instances with specified `instance_ids`, `attributes` and/or `labels`')
+
+ if len(vms) == 0 and state == 'present' and not tagged:
+ module.fail_json(msg='There are no instances with specified `instance_ids`.')
+
+ if tagged and state == 'absent':
+ module.fail_json(msg='Option `instance_ids` is required when state is `absent`.')
+
+ if state == 'absent':
+ changed = terminate_vms(module, one_client, vms, hard)
+ elif state == 'rebooted':
+ changed = reboot_vms(module, one_client, vms, wait_timeout, hard)
+ elif state == 'poweredoff':
+ changed = poweroff_vms(module, one_client, vms, hard)
+ elif state == 'running':
+ changed = resume_vms(module, one_client, vms)
+
+ instances_list = vms
+ tagged_instances_list = []
+
+ if permissions is not None:
+ changed = set_vm_permissions(module, one_client, vms, permissions) or changed
+
+ if owner_id is not None or group_id is not None:
+ changed = set_vm_ownership(module, one_client, vms, owner_id, group_id) or changed
+
+ if template_id is None and updateconf is not None:
+ changed = update_vms(module, one_client, vms, updateconf) or changed
+
+ if wait and not module.check_mode and state != 'present':
+ wait_for = {
+ 'absent': wait_for_done,
+ 'rebooted': wait_for_running,
+ 'poweredoff': wait_for_poweroff,
+ 'running': wait_for_running
+ }
+ for vm in vms:
+ if vm is not None:
+ wait_for[state](module, one_client, vm, wait_timeout)
+
+ if disk_saveas is not None:
+ if len(vms) == 0:
+ module.fail_json(msg="There is no VM whose disk will be saved.")
+ disk_save_as(module, one_client, vms[0], disk_saveas, wait_timeout)
+ changed = True
+
+ # instances - a list of instances info whose state is changed or which are fetched with C(instance_ids) option
+ instances = list(get_vm_info(one_client, vm) for vm in instances_list if vm is not None)
+ instances_ids = list(vm.ID for vm in instances_list if vm is not None)
+ # tagged_instances - A list of instances info based on a specific attributes and/or labels that are specified with C(count_attributes) and C(count_labels)
+ tagged_instances = list(get_vm_info(one_client, vm) for vm in tagged_instances_list if vm is not None)
+
+ result = {'changed': changed, 'instances': instances, 'instances_ids': instances_ids, 'tagged_instances': tagged_instances}
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
new file mode 100644
index 000000000..37dca74f2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneandone_firewall_policy.py
@@ -0,0 +1,580 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_firewall_policy
+short_description: Configure 1&1 firewall policy
+description:
+ - Create, remove, reconfigure, update firewall policies.
+ This module has a dependency on 1and1 >= 1.0.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Define a firewall policy state to create, remove, or update.
+ required: false
+ type: str
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ firewall_policy:
+ description:
+ - The identifier (id or name) of the firewall policy used with update state.
+ type: str
+ rules:
+ description:
+ - A list of rules that will be set for the firewall policy.
+ Each rule must contain protocol parameter, in addition to three optional parameters
+ (port_from, port_to, and source)
+ type: list
+ elements: dict
+ default: []
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a firewall policy.
+ Used in combination with update state.
+ type: list
+ elements: str
+ required: false
+ default: []
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
+ type: list
+ elements: str
+ required: false
+ default: []
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing firewall policy.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
+ type: list
+ elements: str
+ required: false
+ default: []
+ description:
+ description:
+ - Firewall policy description. maxLength=256
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible-firewall-policy
+ description: Testing creation of firewall policies with ansible
+ rules:
+ -
+ protocol: TCP
+ port_from: 80
+ port_to: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible-firewall-policy
+
+- name: Update a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ state: update
+ firewall_policy: ansible-firewall-policy
+ name: ansible-firewall-policy-updated
+ description: Testing creation of firewall policies with ansible - updated
+
+- name: Add server to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ add_server_ips:
+ - server_identifier (id or name)
+ - server_identifier #2 (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ description: Adding rules to an existing firewall policy
+ add_rules:
+ -
+ protocol: TCP
+ port_from: 70
+ port_to: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_from: 60
+ port_to: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a firewall policy
+ community.general.oneandone_firewall_policy:
+ auth_token: oneandone_private_api_key
+ firewall_policy: ansible-firewall-policy-updated
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+firewall_policy:
+ description: Information about the firewall policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_firewall_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
+ """
+ Assigns servers to a firewall policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in server_ids:
+ server = get_server(oneandone_conn, _server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.attach_server_firewall_policy(
+ firewall_id=firewall_id,
+ server_ips=attach_servers)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
+ """
+ Unassigns a server/IP from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ firewall_server = oneandone_conn.get_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ if firewall_server:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_server(
+ firewall_id=firewall_id,
+ server_ip_id=server_ip_id)
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
+ """
+ Adds new rules to a firewall policy.
+ """
+ try:
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ if module.check_mode:
+ firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
+ if (firewall_rules and firewall_policy_id):
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.add_firewall_policy_rule(
+ firewall_id=firewall_id,
+ firewall_policy_rules=firewall_rules
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
+ """
+ Removes a rule from a firewall policy.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_firewall_policy_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ firewall_policy = oneandone_conn.remove_firewall_rule(
+ firewall_id=firewall_id,
+ rule_id=rule_id
+ )
+ return firewall_policy
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_firewall_policy(module, oneandone_conn):
+ """
+ Updates a firewall policy based on input arguments.
+ Firewall rules and server ips can be added/removed to/from
+ firewall policy. Firewall policy name and description can be
+ updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ firewall_policy_id = module.params.get('firewall_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
+ if firewall_policy is None:
+ _check_mode(module, False)
+
+ if name or description:
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.modify_firewall(
+ firewall_id=firewall_policy['id'],
+ name=name,
+ description=description)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_server_ips))
+
+ firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+
+ _remove_firewall_server(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ if add_rules:
+ firewall_policy = _add_firewall_rules(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ add_rules)
+ _check_mode(module, firewall_policy)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+
+ _remove_firewall_rule(module,
+ oneandone_conn,
+ firewall_policy['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
+ changed = True
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def create_firewall_policy(module, oneandone_conn):
+ """
+ Create a new firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ firewall_rules = []
+
+ for rule in rules:
+ firewall_rule = oneandone.client.FirewallPolicyRule(
+ protocol=rule['protocol'],
+ port_from=rule['port_from'],
+ port_to=rule['port_to'],
+ source=rule['source'])
+ firewall_rules.append(firewall_rule)
+
+ firewall_policy_obj = oneandone.client.FirewallPolicy(
+ name=name,
+ description=description
+ )
+
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.create_firewall_policy(
+ firewall_policy=firewall_policy_obj,
+ firewall_policy_rules=firewall_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.firewall_policy,
+ firewall_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
+ changed = True if firewall_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, firewall_policy)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def remove_firewall_policy(module, oneandone_conn):
+ """
+ Removes a firewall policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ fp_id = module.params.get('name')
+ firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
+ if module.check_mode:
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
+
+ changed = True if firewall_policy else False
+
+ return (changed, {
+ 'id': firewall_policy['id'],
+ 'name': firewall_policy['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ description=dict(type='str'),
+ rules=dict(type='list', elements="dict", default=[]),
+ add_server_ips=dict(type='list', elements="str", default=[]),
+ remove_server_ips=dict(type='list', elements="str", default=[]),
+ add_rules=dict(type='list', elements="dict", default=[]),
+ remove_rules=dict(type='list', elements="str", default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a firewall policy.")
+ try:
+ (changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'update':
+ if not module.params.get('firewall_policy'):
+ module.fail_json(
+ msg="'firewall_policy' parameter is required to update a firewall policy.")
+ try:
+ (changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ for param in ('name', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new firewall policies." % param)
+ try:
+ (changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, firewall_policy=firewall_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
new file mode 100644
index 000000000..7f7af9c4f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneandone_load_balancer.py
@@ -0,0 +1,684 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_load_balancer
+short_description: Configure 1&1 load balancer
+description:
+ - Create, remove, update load balancers.
+ This module has a dependency on 1and1 >= 1.0.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Define a load balancer state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ load_balancer:
+ description:
+ - The identifier (id or name) of the load balancer used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Load balancer name used with present state. Used as identifier (id or name) when used with absent state.
+ maxLength=128
+ type: str
+ health_check_test:
+ description:
+ - Type of the health check. At the moment, HTTP is not allowed.
+ type: str
+ choices: [ "NONE", "TCP", "HTTP", "ICMP" ]
+ health_check_interval:
+ description:
+ - Health check period in seconds. minimum=5, maximum=300, multipleOf=1
+ type: str
+ health_check_path:
+ description:
+ - Url to call for checking. Required for HTTP health check. maxLength=1000
+ type: str
+ required: false
+ health_check_parse:
+ description:
+ - Regular expression to check. Required for HTTP health check. maxLength=64
+ type: str
+ required: false
+ persistence:
+ description:
+ - Persistence.
+ type: bool
+ persistence_time:
+ description:
+ - Persistence time in seconds. Required if persistence is enabled. minimum=30, maximum=1200, multipleOf=1
+ type: str
+ method:
+ description:
+ - Balancing procedure.
+ type: str
+ choices: [ "ROUND_ROBIN", "LEAST_CONNECTIONS" ]
+ datacenter:
+ description:
+ - ID or country code of the datacenter where the load balancer will be created.
+ - If not specified, it defaults to I(US).
+ type: str
+ choices: [ "US", "ES", "DE", "GB" ]
+ required: false
+ rules:
+ description:
+ - A list of rule objects that will be set for the load balancer. Each rule must contain protocol,
+ port_balancer, and port_server parameters, in addition to source parameter, which is optional.
+ type: list
+ elements: dict
+ default: []
+ description:
+ description:
+ - Description of the load balancer. maxLength=256
+ type: str
+ required: false
+ add_server_ips:
+ description:
+ - A list of server identifiers (id or name) to be assigned to a load balancer.
+ Used in combination with update state.
+ type: list
+ elements: str
+ required: false
+ default: []
+ remove_server_ips:
+ description:
+ - A list of server IP ids to be unassigned from a load balancer. Used in combination with update state.
+ type: list
+ elements: str
+ required: false
+ default: []
+ add_rules:
+ description:
+ - A list of rules that will be added to an existing load balancer.
+ It is syntax is the same as the one used for rules parameter. Used in combination with update state.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ remove_rules:
+ description:
+ - A list of rule ids that will be removed from an existing load balancer. Used in combination with update state.
+ type: list
+ elements: str
+ required: false
+ default: []
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ description: Testing creation of load balancer with ansible
+ health_check_test: TCP
+ health_check_interval: 40
+ persistence: true
+ persistence_time: 1200
+ method: ROUND_ROBIN
+ datacenter: US
+ rules:
+ -
+ protocol: TCP
+ port_balancer: 80
+ port_server: 80
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+
+- name: Destroy a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ name: ansible load balancer
+ wait: true
+ wait_timeout: 500
+ state: absent
+
+- name: Update a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer
+ name: ansible load balancer updated
+ description: Testing the update of a load balancer with ansible
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add server to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding server to a load balancer with ansible
+ add_server_ips:
+ - server identifier (id or name)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove server from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Removing server from a load balancer with ansible
+ remove_server_ips:
+ - B2504878540DBC5F7634EB00A07C1EBD (server's ip id)
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Add rules to a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ add_rules:
+ -
+ protocol: TCP
+ port_balancer: 70
+ port_server: 70
+ source: 0.0.0.0
+ -
+ protocol: TCP
+ port_balancer: 60
+ port_server: 60
+ source: 0.0.0.0
+ wait: true
+ wait_timeout: 500
+ state: update
+
+- name: Remove rules from a load balancer
+ community.general.oneandone_load_balancer:
+ auth_token: oneandone_private_api_key
+ load_balancer: ansible load balancer updated
+ description: Adding rules to a load balancer with ansible
+ remove_rules:
+ - rule_id #1
+ - rule_id #2
+ - ...
+ wait: true
+ wait_timeout: 500
+ state: update
+'''
+
+RETURN = '''
+load_balancer:
+ description: Information about the load balancer that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Balancer"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_load_balancer,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+HEALTH_CHECK_TESTS = ['NONE', 'TCP', 'HTTP', 'ICMP']
+METHODS = ['ROUND_ROBIN', 'LEAST_CONNECTIONS']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_server_ips(module, oneandone_conn, load_balancer_id, server_ids):
+ """
+ Assigns servers to a load balancer.
+ """
+ try:
+ attach_servers = []
+
+ for server_id in server_ids:
+ server = get_server(oneandone_conn, server_id, True)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server['id'],
+ server_ip_id=next(iter(server['ips'] or []), None)['id']
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.attach_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ips=attach_servers)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_server(module, oneandone_conn, load_balancer_id, server_ip_id):
+ """
+ Unassigns a server/IP from a load balancer.
+ """
+ try:
+ if module.check_mode:
+ lb_server = oneandone_conn.get_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ if lb_server:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_server(
+ load_balancer_id=load_balancer_id,
+ server_ip_id=server_ip_id)
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_load_balancer_rules(module, oneandone_conn, load_balancer_id, rules):
+ """
+ Adds new rules to a load_balancer.
+ """
+ try:
+ load_balancer_rules = []
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ if module.check_mode:
+ lb_id = get_load_balancer(oneandone_conn, load_balancer_id)
+ if (load_balancer_rules and lb_id):
+ return True
+ return False
+
+ load_balancer = oneandone_conn.add_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _remove_load_balancer_rule(module, oneandone_conn, load_balancer_id, rule_id):
+ """
+ Removes a rule from a load_balancer.
+ """
+ try:
+ if module.check_mode:
+ rule = oneandone_conn.get_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id)
+ if rule:
+ return True
+ return False
+
+ load_balancer = oneandone_conn.remove_load_balancer_rule(
+ load_balancer_id=load_balancer_id,
+ rule_id=rule_id
+ )
+ return load_balancer
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_load_balancer(module, oneandone_conn):
+ """
+ Updates a load_balancer based on input arguments.
+ Load balancer rules and server ips can be added/removed to/from
+ load balancer. Load balancer name, description, health_check_test,
+ health_check_interval, persistence, persistence_time, and method
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ load_balancer_id = module.params.get('load_balancer')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ add_server_ips = module.params.get('add_server_ips')
+ remove_server_ips = module.params.get('remove_server_ips')
+ add_rules = module.params.get('add_rules')
+ remove_rules = module.params.get('remove_rules')
+
+ changed = False
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer_id, True)
+ if load_balancer is None:
+ _check_mode(module, False)
+
+ if (name or description or health_check_test or health_check_interval or health_check_path or
+ health_check_parse or persistence or persistence_time or method):
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.modify_load_balancer(
+ load_balancer_id=load_balancer['id'],
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method)
+ changed = True
+
+ if add_server_ips:
+ if module.check_mode:
+ _check_mode(module, _add_server_ips(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_server_ips))
+
+ load_balancer = _add_server_ips(module, oneandone_conn, load_balancer['id'], add_server_ips)
+ changed = True
+
+ if remove_server_ips:
+ chk_changed = False
+ for server_ip_id in remove_server_ips:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+
+ _remove_load_balancer_server(module,
+ oneandone_conn,
+ load_balancer['id'],
+ server_ip_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ if add_rules:
+ load_balancer = _add_load_balancer_rules(module,
+ oneandone_conn,
+ load_balancer['id'],
+ add_rules)
+ _check_mode(module, load_balancer)
+ changed = True
+
+ if remove_rules:
+ chk_changed = False
+ for rule_id in remove_rules:
+ if module.check_mode:
+ chk_changed |= _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+
+ _remove_load_balancer_rule(module,
+ oneandone_conn,
+ load_balancer['id'],
+ rule_id)
+ _check_mode(module, chk_changed)
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True)
+ changed = True
+
+ try:
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_load_balancer(module, oneandone_conn):
+ """
+ Create a new load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ health_check_test = module.params.get('health_check_test')
+ health_check_interval = module.params.get('health_check_interval')
+ health_check_path = module.params.get('health_check_path')
+ health_check_parse = module.params.get('health_check_parse')
+ persistence = module.params.get('persistence')
+ persistence_time = module.params.get('persistence_time')
+ method = module.params.get('method')
+ datacenter = module.params.get('datacenter')
+ rules = module.params.get('rules')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ load_balancer_rules = []
+
+ datacenter_id = None
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ for rule in rules:
+ load_balancer_rule = oneandone.client.LoadBalancerRule(
+ protocol=rule['protocol'],
+ port_balancer=rule['port_balancer'],
+ port_server=rule['port_server'],
+ source=rule['source'])
+ load_balancer_rules.append(load_balancer_rule)
+
+ _check_mode(module, True)
+ load_balancer_obj = oneandone.client.LoadBalancer(
+ health_check_path=health_check_path,
+ health_check_parse=health_check_parse,
+ name=name,
+ description=description,
+ health_check_test=health_check_test,
+ health_check_interval=health_check_interval,
+ persistence=persistence,
+ persistence_time=persistence_time,
+ method=method,
+ datacenter_id=datacenter_id
+ )
+
+ load_balancer = oneandone_conn.create_load_balancer(
+ load_balancer=load_balancer_obj,
+ load_balancer_rules=load_balancer_rules
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.load_balancer,
+ load_balancer['id'],
+ wait_timeout,
+ wait_interval)
+
+ load_balancer = get_load_balancer(oneandone_conn, load_balancer['id'], True) # refresh
+ changed = True if load_balancer else False
+
+ _check_mode(module, False)
+
+ return (changed, load_balancer)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_load_balancer(module, oneandone_conn):
+ """
+ Removes a load_balancer.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ lb_id = module.params.get('name')
+ load_balancer_id = get_load_balancer(oneandone_conn, lb_id)
+ if module.check_mode:
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ load_balancer = oneandone_conn.delete_load_balancer(load_balancer_id)
+
+ changed = True if load_balancer else False
+
+ return (changed, {
+ 'id': load_balancer['id'],
+ 'name': load_balancer['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ load_balancer=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ health_check_test=dict(
+ choices=HEALTH_CHECK_TESTS),
+ health_check_interval=dict(type='str'),
+ health_check_path=dict(type='str'),
+ health_check_parse=dict(type='str'),
+ persistence=dict(type='bool'),
+ persistence_time=dict(type='str'),
+ method=dict(
+ choices=METHODS),
+ datacenter=dict(
+ choices=DATACENTERS),
+ rules=dict(type='list', elements="dict", default=[]),
+ add_server_ips=dict(type='list', elements="str", default=[]),
+ remove_server_ips=dict(type='list', elements="str", default=[]),
+ add_rules=dict(type='list', elements="dict", default=[]),
+ remove_rules=dict(type='list', elements="str", default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a load balancer.")
+ try:
+ (changed, load_balancer) = remove_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('load_balancer'):
+ module.fail_json(
+ msg="'load_balancer' parameter is required for updating a load balancer.")
+ try:
+ (changed, load_balancer) = update_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'health_check_test', 'health_check_interval', 'persistence',
+ 'persistence_time', 'method', 'rules'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new load balancers." % param)
+ try:
+ (changed, load_balancer) = create_load_balancer(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, load_balancer=load_balancer)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
new file mode 100644
index 000000000..6118645bf
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneandone_monitoring_policy.py
@@ -0,0 +1,1045 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_monitoring_policy
+short_description: Configure 1&1 monitoring policy
+description:
+ - Create, remove, update monitoring policies
+ (and add/remove ports, processes, and servers).
+ This module has a dependency on 1and1 >= 1.0.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Define a monitoring policy's state to create, remove, update.
+ type: str
+ required: false
+ default: present
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Monitoring policy name used with present state. Used as identifier (id or name) when used with absent state. maxLength=128
+ type: str
+ monitoring_policy:
+ description:
+ - The identifier (id or name) of the monitoring policy used with update state.
+ type: str
+ agent:
+ description:
+ - Set true for using agent.
+ type: str
+ email:
+ description:
+ - User's email. maxLength=128
+ type: str
+ description:
+ description:
+ - Monitoring policy description. maxLength=256
+ type: str
+ required: false
+ thresholds:
+ description:
+ - Monitoring policy thresholds. Each of the suboptions have warning and critical,
+ which both have alert and value suboptions. Warning is used to set limits for
+ warning alerts, critical is used to set critical alerts. alert enables alert,
+ and value is used to advise when the value is exceeded.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ cpu:
+ description:
+ - Consumption limits of CPU.
+ required: true
+ ram:
+ description:
+ - Consumption limits of RAM.
+ required: true
+ disk:
+ description:
+ - Consumption limits of hard disk.
+ required: true
+ internal_ping:
+ description:
+ - Response limits of internal ping.
+ required: true
+ transfer:
+ description:
+ - Consumption limits for transfer.
+ required: true
+ ports:
+ description:
+ - Array of ports that will be monitoring.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ protocol:
+ description:
+ - Internet protocol.
+ choices: [ "TCP", "UDP" ]
+ required: true
+ port:
+ description:
+ - Port number. minimum=1, maximum=65535
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RESPONDING", "NOT_RESPONDING" ]
+ required: true
+ email_notification:
+ description:
+ - Set true for sending e-mail notifications.
+ required: true
+ processes:
+ description:
+ - Array of processes that will be monitoring.
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ process:
+ description:
+ - Name of the process. maxLength=50
+ required: true
+ alert_if:
+ description:
+ - Case of alert.
+ choices: [ "RUNNING", "NOT_RUNNING" ]
+ required: true
+ add_ports:
+ description:
+ - Ports to add to the monitoring policy.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ add_processes:
+ description:
+ - Processes to add to the monitoring policy.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ add_servers:
+ description:
+ - Servers to add to the monitoring policy.
+ type: list
+ elements: str
+ required: false
+ default: []
+ remove_ports:
+ description:
+ - Ports to remove from the monitoring policy.
+ type: list
+ elements: str
+ required: false
+ default: []
+ remove_processes:
+ description:
+ - Processes to remove from the monitoring policy.
+ type: list
+ elements: str
+ required: false
+ default: []
+ remove_servers:
+ description:
+ - Servers to remove from the monitoring policy.
+ type: list
+ elements: str
+ required: false
+ default: []
+ update_ports:
+ description:
+ - Ports to be updated on the monitoring policy.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ update_processes:
+ description:
+ - Processes to be updated on the monitoring policy.
+ type: list
+ elements: dict
+ required: false
+ default: []
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ name: ansible monitoring policy
+ description: Testing creation of a monitoring policy with ansible
+ email: your@emailaddress.com
+ agent: true
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 92
+ alert: false
+ -
+ ram:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ disk:
+ warning:
+ value: 80
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 50
+ alert: false
+ critical:
+ value: 100
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 1000
+ alert: false
+ critical:
+ value: 2000
+ alert: false
+ ports:
+ -
+ protocol: TCP
+ port: 22
+ alert_if: RESPONDING
+ email_notification: false
+ processes:
+ -
+ process: test
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+
+- name: Destroy a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: ansible monitoring policy
+
+- name: Update a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy
+ name: ansible monitoring policy updated
+ description: Testing creation of a monitoring policy with ansible updated
+ email: another@emailaddress.com
+ thresholds:
+ -
+ cpu:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ ram:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ disk:
+ warning:
+ value: 70
+ alert: false
+ critical:
+ value: 80
+ alert: false
+ -
+ internal_ping:
+ warning:
+ value: 60
+ alert: false
+ critical:
+ value: 90
+ alert: false
+ -
+ transfer:
+ warning:
+ value: 900
+ alert: false
+ critical:
+ value: 1900
+ alert: false
+ wait: true
+ state: update
+
+- name: Add a port to a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_ports:
+ -
+ protocol: TCP
+ port: 33
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing ports of a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_ports:
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 34
+ alert_if: RESPONDING
+ email_notification: false
+ -
+ id: existing_port_id
+ protocol: TCP
+ port: 23
+ alert_if: RESPONDING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a port from a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_ports:
+ - port_id
+ state: update
+
+- name: Add a process to a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_processes:
+ -
+ process: test_2
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Update existing processes of a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ update_processes:
+ -
+ id: process_id
+ process: test_1
+ alert_if: NOT_RUNNING
+ email_notification: false
+ -
+ id: process_id
+ process: test_3
+ alert_if: NOT_RUNNING
+ email_notification: false
+ wait: true
+ state: update
+
+- name: Remove a process from a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_processes:
+ - process_id
+ wait: true
+ state: update
+
+- name: Add server to a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ add_servers:
+ - server id or name
+ wait: true
+ state: update
+
+- name: Remove server from a monitoring policy
+ community.general.oneandone_monitoring_policy:
+ auth_token: oneandone_private_api_key
+ monitoring_policy: ansible monitoring policy updated
+ remove_servers:
+ - server01
+ wait: true
+ state: update
+'''
+
+RETURN = '''
+monitoring_policy:
+ description: Information about the monitoring policy that was processed
+ type: dict
+ sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_monitoring_policy,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_ports(module, oneandone_conn, monitoring_policy_id, ports):
+ """
+ Adds new ports to a monitoring policy.
+ """
+ try:
+ monitoring_policy_ports = []
+
+ for _port in ports:
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=_port['protocol'],
+ port=_port['port'],
+ alert_if=_port['alert_if'],
+ email_notification=_port['email_notification']
+ )
+ monitoring_policy_ports.append(monitoring_policy_port)
+
+ if module.check_mode:
+ if monitoring_policy_ports:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_port(
+ monitoring_policy_id=monitoring_policy_id,
+ ports=monitoring_policy_ports)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_port(module, oneandone_conn, monitoring_policy_id, port_id):
+ """
+ Removes a port from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if monitoring_policy:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_port(module, oneandone_conn, monitoring_policy_id, port_id, port):
+ """
+ Modifies a monitoring policy port.
+ """
+ try:
+ if module.check_mode:
+ cm_port = oneandone_conn.get_monitoring_policy_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id)
+ if cm_port:
+ return True
+ return False
+
+ monitoring_policy_port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=port['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_port(
+ monitoring_policy_id=monitoring_policy_id,
+ port_id=port_id,
+ port=monitoring_policy_port)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _add_processes(module, oneandone_conn, monitoring_policy_id, processes):
+ """
+ Adds new processes to a monitoring policy.
+ """
+ try:
+ monitoring_policy_processes = []
+
+ for _process in processes:
+ monitoring_policy_process = oneandone.client.Process(
+ process=_process['process'],
+ alert_if=_process['alert_if'],
+ email_notification=_process['email_notification']
+ )
+ monitoring_policy_processes.append(monitoring_policy_process)
+
+ if module.check_mode:
+ mp_id = get_monitoring_policy(oneandone_conn, monitoring_policy_id)
+ if (monitoring_policy_processes and mp_id):
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.add_process(
+ monitoring_policy_id=monitoring_policy_id,
+ processes=monitoring_policy_processes)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _delete_monitoring_policy_process(module, oneandone_conn, monitoring_policy_id, process_id):
+ """
+ Removes a process from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id
+ )
+ if process:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.delete_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _modify_process(module, oneandone_conn, monitoring_policy_id, process_id, process):
+ """
+ Modifies a monitoring policy process.
+ """
+ try:
+ if module.check_mode:
+ cm_process = oneandone_conn.get_monitoring_policy_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id)
+ if cm_process:
+ return True
+ return False
+
+ monitoring_policy_process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=process['email_notification']
+ )
+
+ monitoring_policy = oneandone_conn.modify_process(
+ monitoring_policy_id=monitoring_policy_id,
+ process_id=process_id,
+ process=monitoring_policy_process)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _attach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, servers):
+ """
+ Attaches servers to a monitoring policy.
+ """
+ try:
+ attach_servers = []
+
+ for _server_id in servers:
+ server_id = get_server(oneandone_conn, _server_id)
+ attach_server = oneandone.client.AttachServer(
+ server_id=server_id
+ )
+ attach_servers.append(attach_server)
+
+ if module.check_mode:
+ if attach_servers:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.attach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ servers=attach_servers)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _detach_monitoring_policy_server(module, oneandone_conn, monitoring_policy_id, server_id):
+ """
+ Detaches a server from a monitoring policy.
+ """
+ try:
+ if module.check_mode:
+ mp_server = oneandone_conn.get_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ if mp_server:
+ return True
+ return False
+
+ monitoring_policy = oneandone_conn.detach_monitoring_policy_server(
+ monitoring_policy_id=monitoring_policy_id,
+ server_id=server_id)
+ return monitoring_policy
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def update_monitoring_policy(module, oneandone_conn):
+ """
+ Updates a monitoring_policy based on input arguments.
+ Monitoring policy ports, processes and servers can be added/removed to/from
+ a monitoring policy. Monitoring policy name, description, email,
+ thresholds for cpu, ram, disk, transfer and internal_ping
+ can be updated as well.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ monitoring_policy_id = module.params.get('monitoring_policy')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ thresholds = module.params.get('thresholds')
+ add_ports = module.params.get('add_ports')
+ update_ports = module.params.get('update_ports')
+ remove_ports = module.params.get('remove_ports')
+ add_processes = module.params.get('add_processes')
+ update_processes = module.params.get('update_processes')
+ remove_processes = module.params.get('remove_processes')
+ add_servers = module.params.get('add_servers')
+ remove_servers = module.params.get('remove_servers')
+
+ changed = False
+
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy_id, True)
+ if monitoring_policy is None:
+ _check_mode(module, False)
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(
+ name=name,
+ description=description,
+ email=email
+ )
+
+ _thresholds = None
+
+ if thresholds:
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for threshold in thresholds:
+ key = list(threshold.keys())[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=threshold[key]['warning']['value'],
+ warning_alert=str(threshold[key]['warning']['alert']).lower(),
+ critical_value=threshold[key]['critical']['value'],
+ critical_alert=str(threshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ if name or description or email or thresholds:
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.modify_monitoring_policy(
+ monitoring_policy_id=monitoring_policy['id'],
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds)
+ changed = True
+
+ if add_ports:
+ if module.check_mode:
+ _check_mode(module, _add_ports(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_ports))
+
+ monitoring_policy = _add_ports(module, oneandone_conn, monitoring_policy['id'], add_ports)
+ changed = True
+
+ if update_ports:
+ chk_changed = False
+ for update_port in update_ports:
+ if module.check_mode:
+ chk_changed |= _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+
+ _modify_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_port['id'],
+ update_port)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_ports:
+ chk_changed = False
+ for port_id in remove_ports:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+
+ _delete_monitoring_policy_port(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ port_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_processes:
+ monitoring_policy = _add_processes(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_processes)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if update_processes:
+ chk_changed = False
+ for update_process in update_processes:
+ if module.check_mode:
+ chk_changed |= _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+
+ _modify_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ update_process['id'],
+ update_process)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if remove_processes:
+ chk_changed = False
+ for process_id in remove_processes:
+ if module.check_mode:
+ chk_changed |= _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+
+ _delete_monitoring_policy_process(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ process_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ if add_servers:
+ monitoring_policy = _attach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ add_servers)
+ _check_mode(module, monitoring_policy)
+ changed = True
+
+ if remove_servers:
+ chk_changed = False
+ for _server_id in remove_servers:
+ server_id = get_server(oneandone_conn, _server_id)
+
+ if module.check_mode:
+ chk_changed |= _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+
+ _detach_monitoring_policy_server(module,
+ oneandone_conn,
+ monitoring_policy['id'],
+ server_id)
+ _check_mode(module, chk_changed)
+ monitoring_policy = get_monitoring_policy(oneandone_conn, monitoring_policy['id'], True)
+ changed = True
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_monitoring_policy(module, oneandone_conn):
+ """
+ Creates a new monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ name = module.params.get('name')
+ description = module.params.get('description')
+ email = module.params.get('email')
+ agent = module.params.get('agent')
+ thresholds = module.params.get('thresholds')
+ ports = module.params.get('ports')
+ processes = module.params.get('processes')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ _monitoring_policy = oneandone.client.MonitoringPolicy(name,
+ description,
+ email,
+ agent, )
+
+ _monitoring_policy.specs['agent'] = str(_monitoring_policy.specs['agent']).lower()
+
+ threshold_entities = ['cpu', 'ram', 'disk', 'internal_ping', 'transfer']
+
+ _thresholds = []
+ for threshold in thresholds:
+ key = list(threshold.keys())[0]
+ if key in threshold_entities:
+ _threshold = oneandone.client.Threshold(
+ entity=key,
+ warning_value=threshold[key]['warning']['value'],
+ warning_alert=str(threshold[key]['warning']['alert']).lower(),
+ critical_value=threshold[key]['critical']['value'],
+ critical_alert=str(threshold[key]['critical']['alert']).lower())
+ _thresholds.append(_threshold)
+
+ _ports = []
+ for port in ports:
+ _port = oneandone.client.Port(
+ protocol=port['protocol'],
+ port=port['port'],
+ alert_if=port['alert_if'],
+ email_notification=str(port['email_notification']).lower())
+ _ports.append(_port)
+
+ _processes = []
+ for process in processes:
+ _process = oneandone.client.Process(
+ process=process['process'],
+ alert_if=process['alert_if'],
+ email_notification=str(process['email_notification']).lower())
+ _processes.append(_process)
+
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.create_monitoring_policy(
+ monitoring_policy=_monitoring_policy,
+ thresholds=_thresholds,
+ ports=_ports,
+ processes=_processes
+ )
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.monitoring_policy,
+ monitoring_policy['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if monitoring_policy else False
+
+ _check_mode(module, False)
+
+ return (changed, monitoring_policy)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_monitoring_policy(module, oneandone_conn):
+ """
+ Removes a monitoring policy.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ mp_id = module.params.get('name')
+ monitoring_policy_id = get_monitoring_policy(oneandone_conn, mp_id)
+ if module.check_mode:
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ monitoring_policy = oneandone_conn.delete_monitoring_policy(monitoring_policy_id)
+
+ changed = True if monitoring_policy else False
+
+ return (changed, {
+ 'id': monitoring_policy['id'],
+ 'name': monitoring_policy['name']
+ })
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ name=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ agent=dict(type='str'),
+ email=dict(type='str'),
+ description=dict(type='str'),
+ thresholds=dict(type='list', elements="dict", default=[]),
+ ports=dict(type='list', elements="dict", default=[]),
+ processes=dict(type='list', elements="dict", default=[]),
+ add_ports=dict(type='list', elements="dict", default=[]),
+ update_ports=dict(type='list', elements="dict", default=[]),
+ remove_ports=dict(type='list', elements="str", default=[]),
+ add_processes=dict(type='list', elements="dict", default=[]),
+ update_processes=dict(type='list', elements="dict", default=[]),
+ remove_processes=dict(type='list', elements="str", default=[]),
+ add_servers=dict(type='list', elements="str", default=[]),
+ remove_servers=dict(type='list', elements="str", default=[]),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required to delete a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = remove_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+ elif state == 'update':
+ if not module.params.get('monitoring_policy'):
+ module.fail_json(
+ msg="'monitoring_policy' parameter is required to update a monitoring policy.")
+ try:
+ (changed, monitoring_policy) = update_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('name', 'agent', 'email', 'thresholds', 'ports', 'processes'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for a new monitoring policy." % param)
+ try:
+ (changed, monitoring_policy) = create_monitoring_policy(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, monitoring_policy=monitoring_policy)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_private_network.py b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
new file mode 100644
index 000000000..114bf2f22
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneandone_private_network.py
@@ -0,0 +1,455 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_private_network
+short_description: Configure 1&1 private networking
+description:
+ - Create, remove, reconfigure, update a private network.
+ This module has a dependency on 1and1 >= 1.0.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Define a network's state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ private_network:
+ description:
+ - The identifier (id or name) of the network used with update state.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ name:
+ description:
+ - Private network name used with present state. Used as identifier (id or name) when used with absent state.
+ type: str
+ description:
+ description:
+ - Set a description for the network.
+ type: str
+ datacenter:
+ description:
+ - The identifier of the datacenter where the private network will be created
+ type: str
+ choices: [US, ES, DE, GB]
+ network_address:
+ description:
+ - Set a private network space, i.e. 192.168.1.0
+ type: str
+ subnet_mask:
+ description:
+ - Set the netmask for the private network, i.e. 255.255.255.0
+ type: str
+ add_members:
+ description:
+ - List of server identifiers (name or id) to be added to the private network.
+ type: list
+ elements: str
+ default: []
+ remove_members:
+ description:
+ - List of server identifiers (name or id) to be removed from the private network.
+ type: list
+ elements: str
+ default: []
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ name: backup_network
+ description: Testing creation of a private network with ansible
+ network_address: 70.35.193.100
+ subnet_mask: 255.0.0.0
+ datacenter: US
+
+- name: Destroy a private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: absent
+ name: backup_network
+
+- name: Modify the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ network_address: 192.168.2.0
+ subnet_mask: 255.255.255.0
+
+- name: Add members to the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ add_members:
+ - server identifier (id or name)
+
+- name: Remove members from the private network
+ community.general.oneandone_private_network:
+ auth_token: oneandone_private_api_key
+ state: update
+ private_network: backup_network
+ remove_members:
+ - server identifier (id or name)
+'''
+
+RETURN = '''
+private_network:
+ description: Information about the private network.
+ type: dict
+ sample: '{"name": "backup_network", "id": "55726DEDA20C99CF6F2AF8F18CAC9963"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_private_network,
+ get_server,
+ get_datacenter,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _add_servers(module, oneandone_conn, name, members):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id and members:
+ return True
+ return False
+
+ network = oneandone_conn.attach_private_network_servers(
+ private_network_id=private_network_id,
+ server_ids=members)
+
+ return network
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def _remove_member(module, oneandone_conn, name, member_id):
+ try:
+ private_network_id = get_private_network(oneandone_conn, name)
+
+ if module.check_mode:
+ if private_network_id:
+ network_member = oneandone_conn.get_private_network_server(
+ private_network_id=private_network_id,
+ server_id=member_id)
+ if network_member:
+ return True
+ return False
+
+ network = oneandone_conn.remove_private_network_server(
+ private_network_id=name,
+ server_id=member_id)
+
+ return network
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def create_network(module, oneandone_conn):
+ """
+ Create new private network
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any network was added.
+ """
+ name = module.params.get('name')
+ description = module.params.get('description')
+ network_address = module.params.get('network_address')
+ subnet_mask = module.params.get('subnet_mask')
+ datacenter = module.params.get('datacenter')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ network = oneandone_conn.create_private_network(
+ private_network=oneandone.client.PrivateNetwork(
+ name=name,
+ description=description,
+ network_address=network_address,
+ subnet_mask=subnet_mask,
+ datacenter_id=datacenter_id
+ ))
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.private_network,
+ network['id'],
+ wait_timeout,
+ wait_interval)
+ network = get_private_network(oneandone_conn,
+ network['id'],
+ True)
+
+ changed = True if network else False
+
+ _check_mode(module, False)
+
+ return (changed, network)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_network(module, oneandone_conn):
+ """
+ Modifies a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+ """
+ try:
+ _private_network_id = module.params.get('private_network')
+ _name = module.params.get('name')
+ _description = module.params.get('description')
+ _network_address = module.params.get('network_address')
+ _subnet_mask = module.params.get('subnet_mask')
+ _add_members = module.params.get('add_members')
+ _remove_members = module.params.get('remove_members')
+
+ changed = False
+
+ private_network = get_private_network(oneandone_conn,
+ _private_network_id,
+ True)
+ if private_network is None:
+ _check_mode(module, False)
+
+ if _name or _description or _network_address or _subnet_mask:
+ _check_mode(module, True)
+ private_network = oneandone_conn.modify_private_network(
+ private_network_id=private_network['id'],
+ name=_name,
+ description=_description,
+ network_address=_network_address,
+ subnet_mask=_subnet_mask)
+ changed = True
+
+ if _add_members:
+ instances = []
+
+ for member in _add_members:
+ instance_id = get_server(oneandone_conn, member)
+ instance_obj = oneandone.client.AttachServer(server_id=instance_id)
+
+ instances.extend([instance_obj])
+ private_network = _add_servers(module, oneandone_conn, private_network['id'], instances)
+ _check_mode(module, private_network)
+ changed = True
+
+ if _remove_members:
+ chk_changed = False
+ for member in _remove_members:
+ instance = get_server(oneandone_conn, member, True)
+
+ if module.check_mode:
+ chk_changed |= _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ _check_mode(module, instance and chk_changed)
+
+ _remove_member(module,
+ oneandone_conn,
+ private_network['id'],
+ instance['id'])
+ private_network = get_private_network(oneandone_conn,
+ private_network['id'],
+ True)
+ changed = True
+
+ return (changed, private_network)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def remove_network(module, oneandone_conn):
+ """
+ Removes a private network.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+ """
+ try:
+ pn_id = module.params.get('name')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ private_network_id = get_private_network(oneandone_conn, pn_id)
+ if module.check_mode:
+ if private_network_id is None:
+ _check_mode(module, False)
+ _check_mode(module, True)
+ private_network = oneandone_conn.delete_private_network(private_network_id)
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.private_network,
+ private_network['id'],
+ wait_timeout,
+ wait_interval)
+
+ changed = True if private_network else False
+
+ return (changed, {
+ 'id': private_network['id'],
+ 'name': private_network['name']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ private_network=dict(type='str'),
+ name=dict(type='str'),
+ description=dict(type='str'),
+ network_address=dict(type='str'),
+ subnet_mask=dict(type='str'),
+ add_members=dict(type='list', elements="str", default=[]),
+ remove_members=dict(type='list', elements="str", default=[]),
+ datacenter=dict(
+ choices=DATACENTERS),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for deleting a network.")
+ try:
+ (changed, private_network) = remove_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('private_network'):
+ module.fail_json(
+ msg="'private_network' parameter is required for updating a network.")
+ try:
+ (changed, private_network) = update_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(
+ msg="'name' parameter is required for new networks.")
+ try:
+ (changed, private_network) = create_network(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, private_network=private_network)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
new file mode 100644
index 000000000..df5476feb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneandone_public_ip.py
@@ -0,0 +1,338 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_public_ip
+short_description: Configure 1&1 public IPs
+description:
+ - Create, update, and remove public IPs.
+ This module has a dependency on 1and1 >= 1.0.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Define a public ip state to create, remove, or update.
+ type: str
+ required: false
+ default: 'present'
+ choices: [ "present", "absent", "update" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ required: false
+ reverse_dns:
+ description:
+ - Reverse DNS name. maxLength=256
+ type: str
+ required: false
+ datacenter:
+ description:
+ - ID of the datacenter where the IP will be created (only for unassigned IPs).
+ type: str
+ choices: [US, ES, DE, GB]
+ default: US
+ required: false
+ type:
+ description:
+ - Type of IP. Currently, only IPV4 is available.
+ type: str
+ choices: ["IPV4", "IPV6"]
+ default: 'IPV4'
+ required: false
+ public_ip_id:
+ description:
+ - The ID of the public IP used with update and delete states.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the _wait_for methods
+ type: int
+ default: 5
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - Amel Ajdinovic (@aajdinov)
+ - Ethan Devenport (@edevenport)
+'''
+
+EXAMPLES = '''
+- name: Create a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ reverse_dns: example.com
+ datacenter: US
+ type: IPV4
+
+- name: Update a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ reverse_dns: secondexample.com
+ state: update
+
+- name: Delete a public IP
+ community.general.oneandone_public_ip:
+ auth_token: oneandone_private_api_key
+ public_ip_id: public ip id
+ state: absent
+'''
+
+RETURN = '''
+public_ip:
+ description: Information about the public ip that was processed
+ type: dict
+ sample: '{"id": "F77CC589EBC120905B4F4719217BFF6D", "ip": "10.5.132.106"}'
+ returned: always
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_public_ip,
+ OneAndOneResources,
+ wait_for_resource_creation_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+TYPES = ['IPV4', 'IPV6']
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def create_public_ip(module, oneandone_conn):
+ """
+ Create new public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was added.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ datacenter = module.params.get('datacenter')
+ ip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ if datacenter is not None:
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.create_public_ip(
+ reverse_dns=reverse_dns,
+ ip_type=ip_type,
+ datacenter_id=datacenter_id)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def update_public_ip(module, oneandone_conn):
+ """
+ Update a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was changed.
+ """
+ reverse_dns = module.params.get('reverse_dns')
+ public_ip_id = module.params.get('public_ip_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ public_ip = oneandone_conn.modify_public_ip(
+ ip_id=public_ip['id'],
+ reverse_dns=reverse_dns)
+
+ if wait:
+ wait_for_resource_creation_completion(oneandone_conn,
+ OneAndOneResources.public_ip,
+ public_ip['id'],
+ wait_timeout,
+ wait_interval)
+ public_ip = oneandone_conn.get_public_ip(public_ip['id'])
+
+ changed = True if public_ip else False
+
+ return (changed, public_ip)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_public_ip(module, oneandone_conn):
+ """
+ Delete a public IP
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any public IP was deleted.
+ """
+ public_ip_id = module.params.get('public_ip_id')
+
+ public_ip = get_public_ip(oneandone_conn, public_ip_id, True)
+ if public_ip is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='public IP %s not found.' % public_ip_id)
+
+ try:
+ _check_mode(module, True)
+ deleted_public_ip = oneandone_conn.delete_public_ip(
+ ip_id=public_ip['id'])
+
+ changed = True if deleted_public_ip else False
+
+ return (changed, {
+ 'id': public_ip['id']
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str', no_log=True,
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ public_ip_id=dict(type='str'),
+ reverse_dns=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ type=dict(
+ choices=TYPES,
+ default='IPV4'),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
+ ),
+ supports_check_mode=True
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='auth_token parameter is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to delete a public ip.")
+ try:
+ (changed, public_ip) = delete_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ elif state == 'update':
+ if not module.params.get('public_ip_id'):
+ module.fail_json(
+ msg="'public_ip_id' parameter is required to update a public ip.")
+ try:
+ (changed, public_ip) = update_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ elif state == 'present':
+ try:
+ (changed, public_ip) = create_public_ip(module, oneandone_conn)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, public_ip=public_ip)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneandone_server.py b/ansible_collections/community/general/plugins/modules/oneandone_server.py
new file mode 100644
index 000000000..59f504178
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneandone_server.py
@@ -0,0 +1,704 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneandone_server
+short_description: Create, destroy, start, stop, and reboot a 1&1 Host server
+description:
+ - Create, destroy, update, start, stop, and reboot a 1&1 Host server.
+ When the server is created it can optionally wait for it to be 'running' before returning.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Define a server's state to create, remove, start or stop it.
+ type: str
+ default: present
+ choices: [ "present", "absent", "running", "stopped" ]
+ auth_token:
+ description:
+ - Authenticating API token provided by 1&1. Overrides the
+ ONEANDONE_AUTH_TOKEN environment variable.
+ type: str
+ api_url:
+ description:
+ - Custom API URL. Overrides the
+ ONEANDONE_API_URL environment variable.
+ type: str
+ datacenter:
+ description:
+ - The datacenter location.
+ type: str
+ default: US
+ choices: [ "US", "ES", "DE", "GB" ]
+ hostname:
+ description:
+ - The hostname or ID of the server. Only used when state is 'present'.
+ type: str
+ description:
+ description:
+ - The description of the server.
+ type: str
+ appliance:
+ description:
+ - The operating system name or ID for the server.
+ It is required only for 'present' state.
+ type: str
+ fixed_instance_size:
+ description:
+ - The instance size name or ID of the server.
+ It is required only for 'present' state, and it is mutually exclusive with
+ vcore, cores_per_processor, ram, and hdds parameters.
+ - 'The available choices are: C(S), C(M), C(L), C(XL), C(XXL), C(3XL), C(4XL), C(5XL)'
+ type: str
+ vcore:
+ description:
+ - The total number of processors.
+ It must be provided with cores_per_processor, ram, and hdds parameters.
+ type: int
+ cores_per_processor:
+ description:
+ - The number of cores per processor.
+ It must be provided with vcore, ram, and hdds parameters.
+ type: int
+ ram:
+ description:
+ - The amount of RAM memory.
+ It must be provided with with vcore, cores_per_processor, and hdds parameters.
+ type: float
+ hdds:
+ description:
+ - A list of hard disks with nested "size" and "is_main" properties.
+ It must be provided with vcore, cores_per_processor, and ram parameters.
+ type: list
+ elements: dict
+ private_network:
+ description:
+ - The private network name or ID.
+ type: str
+ firewall_policy:
+ description:
+ - The firewall policy name or ID.
+ type: str
+ load_balancer:
+ description:
+ - The load balancer name or ID.
+ type: str
+ monitoring_policy:
+ description:
+ - The monitoring policy name or ID.
+ type: str
+ server:
+ description:
+ - Server identifier (ID or hostname). It is required for all states except 'running' and 'present'.
+ type: str
+ count:
+ description:
+ - The number of servers to create.
+ type: int
+ default: 1
+ ssh_key:
+ description:
+ - User's public SSH key (contents, not path).
+ type: raw
+ server_type:
+ description:
+ - The type of server to be built.
+ type: str
+ default: "cloud"
+ choices: [ "cloud", "baremetal", "k8s_node" ]
+ wait:
+ description:
+ - Wait for the server to be in state 'running' before returning.
+ Also used for delete operation (set to 'false' if you don't want to wait
+ for each individual server to be deleted before moving on with
+ other tasks.)
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ wait_interval:
+ description:
+ - Defines the number of seconds to wait when using the wait_for methods
+ type: int
+ default: 5
+ auto_increment:
+ description:
+ - When creating multiple servers at once, whether to differentiate
+ hostnames by appending a count after them or substituting the count
+ where there is a %02d or %03d in the hostname string.
+ type: bool
+ default: true
+
+requirements:
+ - "1and1"
+ - "python >= 2.6"
+
+author:
+ - "Amel Ajdinovic (@aajdinov)"
+ - "Ethan Devenport (@edevenport)"
+
+'''
+
+EXAMPLES = '''
+- name: Create three servers and enumerate their names
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ fixed_instance_size: XL
+ datacenter: US
+ appliance: C5A349786169F140BCBC335675014C08
+ auto_increment: true
+ count: 3
+
+- name: Create three servers, passing in an ssh_key
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ hostname: node%02d
+ vcore: 2
+ cores_per_processor: 4
+ ram: 8.0
+ hdds:
+ - size: 50
+ is_main: false
+ datacenter: ES
+ appliance: C5A349786169F140BCBC335675014C08
+ count: 3
+ wait: true
+ wait_timeout: 600
+ wait_interval: 10
+ ssh_key: SSH_PUBLIC_KEY
+
+- name: Removing server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: absent
+ server: 'node01'
+
+- name: Starting server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: running
+ server: 'node01'
+
+- name: Stopping server
+ community.general.oneandone_server:
+ auth_token: oneandone_private_api_key
+ state: stopped
+ server: 'node01'
+'''
+
+RETURN = '''
+servers:
+ description: Information about each server that was processed
+ type: list
+ sample:
+ - {"hostname": "my-server", "id": "server-id"}
+ returned: always
+'''
+
+import os
+import time
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.oneandone import (
+ get_datacenter,
+ get_fixed_instance_size,
+ get_appliance,
+ get_private_network,
+ get_monitoring_policy,
+ get_firewall_policy,
+ get_load_balancer,
+ get_server,
+ OneAndOneResources,
+ wait_for_resource_creation_completion,
+ wait_for_resource_deletion_completion
+)
+
+HAS_ONEANDONE_SDK = True
+
+try:
+ import oneandone.client
+except ImportError:
+ HAS_ONEANDONE_SDK = False
+
+DATACENTERS = ['US', 'ES', 'DE', 'GB']
+
+ONEANDONE_SERVER_STATES = (
+ 'DEPLOYING',
+ 'POWERED_OFF',
+ 'POWERED_ON',
+ 'POWERING_ON',
+ 'POWERING_OFF',
+)
+
+
+def _check_mode(module, result):
+ if module.check_mode:
+ module.exit_json(
+ changed=result
+ )
+
+
+def _create_server(module, oneandone_conn, hostname, description,
+ fixed_instance_size_id, vcore, cores_per_processor, ram,
+ hdds, datacenter_id, appliance_id, ssh_key,
+ private_network_id, firewall_policy_id, load_balancer_id,
+ monitoring_policy_id, server_type, wait, wait_timeout,
+ wait_interval):
+
+ try:
+ existing_server = get_server(oneandone_conn, hostname)
+
+ if existing_server:
+ if module.check_mode:
+ return False
+ return None
+
+ if module.check_mode:
+ return True
+
+ server = oneandone_conn.create_server(
+ oneandone.client.Server(
+ name=hostname,
+ description=description,
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ appliance_id=appliance_id,
+ datacenter_id=datacenter_id,
+ rsa_key=ssh_key,
+ private_network_id=private_network_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ monitoring_policy_id=monitoring_policy_id,
+ server_type=server_type,), hdds)
+
+ if wait:
+ wait_for_resource_creation_completion(
+ oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+
+ return server
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+
+def _insert_network_data(server):
+ for addr_data in server['ips']:
+ if addr_data['type'] == 'IPV6':
+ server['public_ipv6'] = addr_data['ip']
+ elif addr_data['type'] == 'IPV4':
+ server['public_ipv4'] = addr_data['ip']
+ return server
+
+
+def create_server(module, oneandone_conn):
+ """
+ Create new server
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ any server was added, and a 'servers' attribute with the list of the
+ created servers' hostname, id and ip addresses.
+ """
+ hostname = module.params.get('hostname')
+ description = module.params.get('description')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ fixed_instance_size = module.params.get('fixed_instance_size')
+ vcore = module.params.get('vcore')
+ cores_per_processor = module.params.get('cores_per_processor')
+ ram = module.params.get('ram')
+ hdds = module.params.get('hdds')
+ datacenter = module.params.get('datacenter')
+ appliance = module.params.get('appliance')
+ ssh_key = module.params.get('ssh_key')
+ private_network = module.params.get('private_network')
+ monitoring_policy = module.params.get('monitoring_policy')
+ firewall_policy = module.params.get('firewall_policy')
+ load_balancer = module.params.get('load_balancer')
+ server_type = module.params.get('server_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ datacenter_id = get_datacenter(oneandone_conn, datacenter)
+ if datacenter_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='datacenter %s not found.' % datacenter)
+
+ fixed_instance_size_id = None
+ if fixed_instance_size:
+ fixed_instance_size_id = get_fixed_instance_size(
+ oneandone_conn,
+ fixed_instance_size)
+ if fixed_instance_size_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='fixed_instance_size %s not found.' % fixed_instance_size)
+
+ appliance_id = get_appliance(oneandone_conn, appliance)
+ if appliance_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='appliance %s not found.' % appliance)
+
+ private_network_id = None
+ if private_network:
+ private_network_id = get_private_network(
+ oneandone_conn,
+ private_network)
+ if private_network_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='private network %s not found.' % private_network)
+
+ monitoring_policy_id = None
+ if monitoring_policy:
+ monitoring_policy_id = get_monitoring_policy(
+ oneandone_conn,
+ monitoring_policy)
+ if monitoring_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='monitoring policy %s not found.' % monitoring_policy)
+
+ firewall_policy_id = None
+ if firewall_policy:
+ firewall_policy_id = get_firewall_policy(
+ oneandone_conn,
+ firewall_policy)
+ if firewall_policy_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='firewall policy %s not found.' % firewall_policy)
+
+ load_balancer_id = None
+ if load_balancer:
+ load_balancer_id = get_load_balancer(
+ oneandone_conn,
+ load_balancer)
+ if load_balancer_id is None:
+ _check_mode(module, False)
+ module.fail_json(
+ msg='load balancer %s not found.' % load_balancer)
+
+ if auto_increment:
+ hostnames = _auto_increment_hostname(count, hostname)
+ descriptions = _auto_increment_description(count, description)
+ else:
+ hostnames = [hostname] * count
+ descriptions = [description] * count
+
+ hdd_objs = []
+ if hdds:
+ for hdd in hdds:
+ hdd_objs.append(oneandone.client.Hdd(
+ size=hdd['size'],
+ is_main=hdd['is_main']
+ ))
+
+ servers = []
+ for index, name in enumerate(hostnames):
+ server = _create_server(
+ module=module,
+ oneandone_conn=oneandone_conn,
+ hostname=name,
+ description=descriptions[index],
+ fixed_instance_size_id=fixed_instance_size_id,
+ vcore=vcore,
+ cores_per_processor=cores_per_processor,
+ ram=ram,
+ hdds=hdd_objs,
+ datacenter_id=datacenter_id,
+ appliance_id=appliance_id,
+ ssh_key=ssh_key,
+ private_network_id=private_network_id,
+ monitoring_policy_id=monitoring_policy_id,
+ firewall_policy_id=firewall_policy_id,
+ load_balancer_id=load_balancer_id,
+ server_type=server_type,
+ wait=wait,
+ wait_timeout=wait_timeout,
+ wait_interval=wait_interval)
+ if server:
+ servers.append(server)
+
+ changed = False
+
+ if servers:
+ for server in servers:
+ if server:
+ _check_mode(module, True)
+ _check_mode(module, False)
+ servers = [_insert_network_data(_server) for _server in servers]
+ changed = True
+
+ _check_mode(module, False)
+
+ return (changed, servers)
+
+
+def remove_server(module, oneandone_conn):
+ """
+ Removes a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary containing a 'changed' attribute indicating whether
+ the server was removed, and a 'removed_server' attribute with
+ the removed server's hostname and id.
+ """
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+ removed_server = None
+
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ _check_mode(module, True)
+ try:
+ oneandone_conn.delete_server(server_id=server['id'])
+ if wait:
+ wait_for_resource_deletion_completion(oneandone_conn,
+ OneAndOneResources.server,
+ server['id'],
+ wait_timeout,
+ wait_interval)
+ changed = True
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to terminate the server: %s" % str(ex))
+
+ removed_server = {
+ 'id': server['id'],
+ 'hostname': server['name']
+ }
+ _check_mode(module, False)
+
+ return (changed, removed_server)
+
+
+def startstop_server(module, oneandone_conn):
+ """
+ Starts or Stops a server.
+
+ module : AnsibleModule object
+ oneandone_conn: authenticated oneandone object.
+
+ Returns a dictionary with a 'changed' attribute indicating whether
+ anything has changed for the server as a result of this function
+ being run, and a 'server' attribute with basic information for
+ the server.
+ """
+ state = module.params.get('state')
+ server_id = module.params.get('server')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ wait_interval = module.params.get('wait_interval')
+
+ changed = False
+
+ # Resolve server
+ server = get_server(oneandone_conn, server_id, True)
+ if server:
+ # Attempt to change the server state, only if it's not already there
+ # or on its way.
+ try:
+ if state == 'stopped' and server['status']['state'] == 'POWERED_ON':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_OFF',
+ method='SOFTWARE')
+ elif state == 'running' and server['status']['state'] == 'POWERED_OFF':
+ _check_mode(module, True)
+ oneandone_conn.modify_server_status(
+ server_id=server['id'],
+ action='POWER_ON',
+ method='SOFTWARE')
+ except Exception as ex:
+ module.fail_json(
+ msg="failed to set server %s to state %s: %s" % (
+ server_id, state, str(ex)))
+
+ _check_mode(module, False)
+
+ # Make sure the server has reached the desired state
+ if wait:
+ operation_completed = False
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(wait_interval)
+ server = oneandone_conn.get_server(server['id']) # refresh
+ server_state = server['status']['state']
+ if state == 'stopped' and server_state == 'POWERED_OFF':
+ operation_completed = True
+ break
+ if state == 'running' and server_state == 'POWERED_ON':
+ operation_completed = True
+ break
+ if not operation_completed:
+ module.fail_json(
+ msg="Timeout waiting for server %s to get to state %s" % (
+ server_id, state))
+
+ changed = True
+ server = _insert_network_data(server)
+
+ _check_mode(module, False)
+
+ return (changed, server)
+
+
+def _auto_increment_hostname(count, hostname):
+ """
+ Allow a custom incremental count in the hostname when defined with the
+ string formatting (%) operator. Otherwise, increment using name-01,
+ name-02, name-03, and so forth.
+ """
+ if '%' not in hostname:
+ hostname = "%s-%%01d" % hostname
+
+ return [
+ hostname % i
+ for i in xrange(1, count + 1)
+ ]
+
+
+def _auto_increment_description(count, description):
+ """
+ Allow the incremental count in the description when defined with the
+ string formatting (%) operator. Otherwise, repeat the same description.
+ """
+ if '%' in description:
+ return [
+ description % i
+ for i in xrange(1, count + 1)
+ ]
+ else:
+ return [description] * count
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_AUTH_TOKEN'),
+ no_log=True),
+ api_url=dict(
+ type='str',
+ default=os.environ.get('ONEANDONE_API_URL')),
+ hostname=dict(type='str'),
+ description=dict(type='str'),
+ appliance=dict(type='str'),
+ fixed_instance_size=dict(type='str'),
+ vcore=dict(type='int'),
+ cores_per_processor=dict(type='int'),
+ ram=dict(type='float'),
+ hdds=dict(type='list', elements='dict'),
+ count=dict(type='int', default=1),
+ ssh_key=dict(type='raw', no_log=False),
+ auto_increment=dict(type='bool', default=True),
+ server=dict(type='str'),
+ datacenter=dict(
+ choices=DATACENTERS,
+ default='US'),
+ private_network=dict(type='str'),
+ firewall_policy=dict(type='str'),
+ load_balancer=dict(type='str'),
+ monitoring_policy=dict(type='str'),
+ server_type=dict(type='str', default='cloud', choices=['cloud', 'baremetal', 'k8s_node']),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ wait_interval=dict(type='int', default=5),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'running', 'stopped']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(['fixed_instance_size', 'vcore'], ['fixed_instance_size', 'cores_per_processor'],
+ ['fixed_instance_size', 'ram'], ['fixed_instance_size', 'hdds'],),
+ required_together=(['vcore', 'cores_per_processor', 'ram', 'hdds'],)
+ )
+
+ if not HAS_ONEANDONE_SDK:
+ module.fail_json(msg='1and1 required for this module')
+
+ if not module.params.get('auth_token'):
+ module.fail_json(
+ msg='The "auth_token" parameter or ' +
+ 'ONEANDONE_AUTH_TOKEN environment variable is required.')
+
+ if not module.params.get('api_url'):
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'))
+ else:
+ oneandone_conn = oneandone.client.OneAndOneService(
+ api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for deleting a server.")
+ try:
+ (changed, servers) = remove_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('server'):
+ module.fail_json(
+ msg="'server' parameter is required for starting/stopping a server.")
+ try:
+ (changed, servers) = startstop_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ elif state == 'present':
+ for param in ('hostname',
+ 'appliance',
+ 'datacenter'):
+ if not module.params.get(param):
+ module.fail_json(
+ msg="%s parameter is required for new server." % param)
+ try:
+ (changed, servers) = create_server(module, oneandone_conn)
+ except Exception as ex:
+ module.fail_json(msg=str(ex))
+
+ module.exit_json(changed=changed, servers=servers)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/onepassword_info.py b/ansible_collections/community/general/plugins/modules/onepassword_info.py
new file mode 100644
index 000000000..bb814c443
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/onepassword_info.py
@@ -0,0 +1,390 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Ryan Conway (@rylon)
+# Copyright (c) 2018, Scott Buchanan <sbuchanan@ri.pn> (onepassword.py used as starting point)
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: onepassword_info
+author:
+ - Ryan Conway (@Rylon)
+requirements:
+ - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/)
+notes:
+ - Tested with C(op) version 0.5.5
+ - "Based on the C(onepassword) lookup plugin by Scott Buchanan <sbuchanan@ri.pn>."
+short_description: Gather items from 1Password
+description:
+ - M(community.general.onepassword_info) wraps the C(op) command line utility to fetch data about one or more 1Password items.
+ - A fatal error occurs if any of the items being searched for can not be found.
+ - Recommend using with the C(no_log) option to avoid logging the values of the secrets being retrieved.
+ - This module was called C(onepassword_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.onepassword_info) module no longer returns C(ansible_facts)!
+ You must now use the C(register) option to use the facts in other tasks.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ search_terms:
+ type: list
+ elements: dict
+ description:
+ - A list of one or more search terms.
+ - Each search term can either be a simple string or it can be a dictionary for more control.
+ - When passing a simple string, I(field) is assumed to be C(password).
+ - When passing a dictionary, the following fields are available.
+ suboptions:
+ name:
+ type: str
+ description:
+ - The name of the 1Password item to search for (required).
+ field:
+ type: str
+ description:
+ - The name of the field to search for within this item (optional, defaults to "password" (or "document" if the item has an attachment).
+ section:
+ type: str
+ description:
+ - The name of a section within this item containing the specified field (optional, will search all sections if not specified).
+ vault:
+ type: str
+ description:
+ - The name of the particular 1Password vault to search, useful if your 1Password user has access to multiple vaults (optional).
+ required: true
+ auto_login:
+ type: dict
+ description:
+ - A dictionary containing authentication details. If this is set, M(community.general.onepassword_info)
+ will attempt to sign in to 1Password automatically.
+ - Without this option, you must have already logged in via the 1Password CLI before running Ansible.
+ - It is B(highly) recommended to store 1Password credentials in an Ansible Vault. Ensure that the key used to encrypt
+ the Ansible Vault is equal to or greater in strength than the 1Password master password.
+ suboptions:
+ subdomain:
+ type: str
+ description:
+ - 1Password subdomain name (<subdomain>.1password.com).
+ - If this is not specified, the most recent subdomain will be used.
+ username:
+ type: str
+ description:
+ - 1Password username.
+ - Only required for initial sign in.
+ master_password:
+ type: str
+ description:
+ - The master password for your subdomain.
+ - This is always required when specifying C(auto_login).
+ required: true
+ secret_key:
+ type: str
+ description:
+ - The secret key for your subdomain.
+ - Only required for initial sign in.
+ required: false
+ cli_path:
+ type: path
+ description: Used to specify the exact path to the C(op) command line interface
+ required: false
+ default: 'op'
+'''
+
+EXAMPLES = '''
+# Gather secrets from 1Password, assuming there is a 'password' field:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms: My 1Password item
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets from 1Password, with more advanced search terms:
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+# Gather secrets combining simple and advanced search terms to retrieve two items, one of which we fetch two
+# fields. In the first 'password' is fetched, as a field name is not specified (default behaviour) and in the
+# second, 'Custom field name' is fetched, as that is specified explicitly.
+- name: Get a password
+ community.general.onepassword_info:
+ search_terms:
+ - My 1Password item # 'name' is optional when passing a simple string...
+ - name: My Other 1Password item # ...but it can also be set for consistency
+ - name: My 1Password item
+ field: Custom field name # optional, defaults to 'password'
+ section: Custom section name # optional, defaults to 'None'
+ vault: Name of the vault # optional, only necessary if there is more than 1 Vault available
+ - name: A 1Password item with document attachment
+ delegate_to: localhost
+ register: my_1password_item
+ no_log: true # Don't want to log the secrets to the console!
+
+- name: Debug a password (for example)
+ ansible.builtin.debug:
+ msg: "{{ my_1password_item['onepassword']['My 1Password item'] }}"
+'''
+
+RETURN = '''
+---
+# One or more dictionaries for each matching item from 1Password, along with the appropriate fields.
+# This shows the response you would expect to receive from the third example documented above.
+onepassword:
+ description: Dictionary of each 1password item matching the given search terms, shows what would be returned from the third example above.
+ returned: success
+ type: dict
+ sample:
+ "My 1Password item":
+ password: the value of this field
+ Custom field name: the value of this field
+ "My Other 1Password item":
+ password: the value of this field
+ "A 1Password item with document attachment":
+ document: the contents of the document attached to this item
+'''
+
+
+import errno
+import json
+import os
+import re
+
+from subprocess import Popen, PIPE
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig
+
+
+class AnsibleModuleError(Exception):
+ def __init__(self, results):
+ self.results = results
+
+ def __repr__(self):
+ return self.results
+
+
+class OnePasswordInfo(object):
+
+ def __init__(self):
+ self.cli_path = module.params.get('cli_path')
+ self.auto_login = module.params.get('auto_login')
+ self.logged_in = False
+ self.token = None
+
+ terms = module.params.get('search_terms')
+ self.terms = self.parse_search_terms(terms)
+
+ self._config = OnePasswordConfig()
+
+ def _run(self, args, expected_rc=0, command_input=None, ignore_errors=False):
+ if self.token:
+ # Adds the session token to all commands if we're logged in.
+ args += [to_bytes('--session=') + self.token]
+
+ command = [self.cli_path] + args
+ p = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
+ out, err = p.communicate(input=command_input)
+ rc = p.wait()
+ if not ignore_errors and rc != expected_rc:
+ raise AnsibleModuleError(to_native(err))
+ return rc, out, err
+
+ def _parse_field(self, data_json, item_id, field_name, section_title=None):
+ data = json.loads(data_json)
+
+ if ('documentAttributes' in data['details']):
+ # This is actually a document, let's fetch the document data instead!
+ document = self._run(["get", "document", data['overview']['title']])
+ return {'document': document[1].strip()}
+
+ else:
+ # This is not a document, let's try to find the requested field
+
+ # Some types of 1Password items have a 'password' field directly alongside the 'fields' attribute,
+ # not inside it, so we need to check there first.
+ if (field_name in data['details']):
+ return {field_name: data['details'][field_name]}
+
+ # Otherwise we continue looking inside the 'fields' attribute for the specified field.
+ else:
+ if section_title is None:
+ for field_data in data['details'].get('fields', []):
+ if field_data.get('name', '').lower() == field_name.lower():
+ return {field_name: field_data.get('value', '')}
+
+ # Not found it yet, so now lets see if there are any sections defined
+ # and search through those for the field. If a section was given, we skip
+ # any non-matching sections, otherwise we search them all until we find the field.
+ for section_data in data['details'].get('sections', []):
+ if section_title is not None and section_title.lower() != section_data['title'].lower():
+ continue
+ for field_data in section_data.get('fields', []):
+ if field_data.get('t', '').lower() == field_name.lower():
+ return {field_name: field_data.get('v', '')}
+
+ # We will get here if the field could not be found in any section and the item wasn't a document to be downloaded.
+ optional_section_title = '' if section_title is None else " in the section '%s'" % section_title
+ module.fail_json(msg="Unable to find an item in 1Password named '%s' with the field '%s'%s." % (item_id, field_name, optional_section_title))
+
+ def parse_search_terms(self, terms):
+ processed_terms = []
+
+ for term in terms:
+ if not isinstance(term, dict):
+ term = {'name': term}
+
+ if 'name' not in term:
+ module.fail_json(msg="Missing required 'name' field from search term, got: '%s'" % to_native(term))
+
+ term['field'] = term.get('field', 'password')
+ term['section'] = term.get('section', None)
+ term['vault'] = term.get('vault', None)
+
+ processed_terms.append(term)
+
+ return processed_terms
+
+ def get_raw(self, item_id, vault=None):
+ try:
+ args = ["get", "item", item_id]
+ if vault is not None:
+ args += ['--vault={0}'.format(vault)]
+ rc, output, dummy = self._run(args)
+ return output
+
+ except Exception as e:
+ if re.search(".*not found.*", to_native(e)):
+ module.fail_json(msg="Unable to find an item in 1Password named '%s'." % item_id)
+ else:
+ module.fail_json(msg="Unexpected error attempting to find an item in 1Password named '%s': %s" % (item_id, to_native(e)))
+
+ def get_field(self, item_id, field, section=None, vault=None):
+ output = self.get_raw(item_id, vault)
+ return self._parse_field(output, item_id, field, section) if output != '' else ''
+
+ def full_login(self):
+ if self.auto_login is not None:
+ if None in [self.auto_login.get('subdomain'), self.auto_login.get('username'),
+ self.auto_login.get('secret_key'), self.auto_login.get('master_password')]:
+ module.fail_json(msg='Unable to perform initial sign in to 1Password. '
+ 'subdomain, username, secret_key, and master_password are required to perform initial sign in.')
+
+ args = [
+ 'signin',
+ '{0}.1password.com'.format(self.auto_login['subdomain']),
+ to_bytes(self.auto_login['username']),
+ to_bytes(self.auto_login['secret_key']),
+ '--output=raw',
+ ]
+
+ try:
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+ except AnsibleModuleError as e:
+ module.fail_json(msg="Failed to perform initial sign in to 1Password: %s" % to_native(e))
+ else:
+ module.fail_json(msg="Unable to perform an initial sign in to 1Password. Please run '%s signin' "
+ "or define credentials in 'auto_login'. See the module documentation for details." % self.cli_path)
+
+ def get_token(self):
+ # If the config file exists, assume an initial signin has taken place and try basic sign in
+ if os.path.isfile(self._config.config_file_path):
+
+ if self.auto_login is not None:
+
+ # Since we are not currently signed in, master_password is required at a minimum
+ if not self.auto_login.get('master_password'):
+ module.fail_json(msg="Unable to sign in to 1Password. 'auto_login.master_password' is required.")
+
+ # Try signing in using the master_password and a subdomain if one is provided
+ try:
+ args = ['signin', '--output=raw']
+
+ if self.auto_login.get('subdomain'):
+ args = ['signin', self.auto_login['subdomain'], '--output=raw']
+
+ rc, out, err = self._run(args, command_input=to_bytes(self.auto_login['master_password']))
+ self.token = out.strip()
+
+ except AnsibleModuleError:
+ self.full_login()
+
+ else:
+ self.full_login()
+
+ else:
+ # Attempt a full sign in since there appears to be no existing sign in
+ self.full_login()
+
+ def assert_logged_in(self):
+ try:
+ rc, out, err = self._run(['get', 'account'], ignore_errors=True)
+ if rc == 0:
+ self.logged_in = True
+ if not self.logged_in:
+ self.get_token()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ module.fail_json(msg="1Password CLI tool '%s' not installed in path on control machine" % self.cli_path)
+ raise e
+
+ def run(self):
+ result = {}
+
+ self.assert_logged_in()
+
+ for term in self.terms:
+ value = self.get_field(term['name'], term['field'], term['section'], term['vault'])
+
+ if term['name'] in result:
+ # If we already have a result for this key, we have to append this result dictionary
+ # to the existing one. This is only applicable when there is a single item
+ # in 1Password which has two different fields, and we want to retrieve both of them.
+ result[term['name']].update(value)
+ else:
+ # If this is the first result for this key, simply set it.
+ result[term['name']] = value
+
+ return result
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ cli_path=dict(type='path', default='op'),
+ auto_login=dict(type='dict', options=dict(
+ subdomain=dict(type='str'),
+ username=dict(type='str'),
+ master_password=dict(required=True, type='str', no_log=True),
+ secret_key=dict(type='str', no_log=True),
+ ), default=None),
+ search_terms=dict(required=True, type='list', elements='dict'),
+ ),
+ supports_check_mode=True
+ )
+
+ results = {'onepassword': OnePasswordInfo().run()}
+
+ module.exit_json(changed=False, **results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
new file mode 100644
index 000000000..541f3d669
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_datacenter_info.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_datacenter_info
+short_description: Retrieve information about the OneView Data Centers
+description:
+ - Retrieve information about the OneView Data Centers.
+ - This module was called C(oneview_datacenter_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_datacenter_info) module no longer returns C(ansible_facts)!
+requirements:
+ - "hpOneView >= 2.0.1"
+author:
+ - Alex Monteiro (@aalexmonteiro)
+ - Madhav Bharadwaj (@madhav-bharadwaj)
+ - Priyanka Sood (@soodpr)
+ - Ricardo Galeno (@ricardogpsf)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - Data Center name.
+ type: str
+ options:
+ description:
+ - "Retrieve additional information. Options available: 'visualContent'."
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Data Centers
+ ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather paginated, filtered and sorted information about Data Centers
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'state=Unmanaged'
+ register: result
+
+- name: Print fetched information about paginated, filtered and sorted list of Data Centers
+ ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about a Data Center by name
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Data Center found by name
+ ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Gather information about the Data Center Visual Content
+ community.general.oneview_datacenter_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: "My Data Center"
+ options:
+ - visualContent
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Data Center found by name
+ ansible.builtin.debug:
+ msg: "{{ result.datacenters }}"
+
+- name: Print fetched information about Data Center Visual Content
+ ansible.builtin.debug:
+ msg: "{{ result.datacenter_visual_content }}"
+'''
+
+RETURN = '''
+datacenters:
+ description: Has all the OneView information about the Data Centers.
+ returned: Always, but can be null.
+ type: dict
+
+datacenter_visual_content:
+ description: Has information about the Data Center Visual Content.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class DatacenterInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list', elements='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(DatacenterInfoModule, self).__init__(
+ additional_arg_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ def execute_module(self):
+
+ client = self.oneview_client.datacenters
+ info = {}
+
+ if self.module.params.get('name'):
+ datacenters = client.get_by('name', self.module.params['name'])
+
+ if self.options and 'visualContent' in self.options:
+ if datacenters:
+ info['datacenter_visual_content'] = client.get_visual_content(datacenters[0]['uri'])
+ else:
+ info['datacenter_visual_content'] = None
+
+ info['datacenters'] = datacenters
+ else:
+ info['datacenters'] = client.get_all(**self.facts_params)
+
+ return dict(changed=False, **info)
+
+
+def main():
+ DatacenterInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
new file mode 100644
index 000000000..3e593b7ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_enclosure_info.py
@@ -0,0 +1,252 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_enclosure_info
+short_description: Retrieve information about one or more Enclosures
+description:
+ - Retrieve information about one or more of the Enclosures from OneView.
+ - This module was called C(oneview_enclosure_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_enclosure_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - Enclosure name.
+ type: str
+ options:
+ description:
+ - "List with options to gather additional information about an Enclosure and related resources.
+ Options allowed: C(script), C(environmentalConfiguration), and C(utilization). For the option C(utilization),
+ you can provide specific parameters."
+ type: list
+ elements: raw
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Enclosures
+ community.general.oneview_enclosure_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Enclosures
+ ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather paginated, filtered and sorted information about Enclosures
+ community.general.oneview_enclosure_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: status=OK
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated, filtered ans sorted list of Enclosures
+ ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name
+ community.general.oneview_enclosure_info:
+ name: Enclosure-Name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Enclosure found by name
+ ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Gather information about an Enclosure by name with options
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - script # optional
+ - environmentalConfiguration # optional
+ - utilization # optional
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Enclosure found by name
+ ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Print fetched information about Enclosure Script
+ ansible.builtin.debug:
+ msg: "{{ result.enclosure_script }}"
+
+- name: Print fetched information about Enclosure Environmental Configuration
+ ansible.builtin.debug:
+ msg: "{{ result.enclosure_environmental_configuration }}"
+
+- name: Print fetched information about Enclosure Utilization
+ ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+
+- name: "Gather information about an Enclosure with temperature data at a resolution of one sample per day, between two
+ specified dates"
+ community.general.oneview_enclosure_info:
+ name: Test-Enclosure
+ options:
+ - utilization: # optional
+ fields: AmbientTemperature
+ filter:
+ - startDate=2016-07-01T14:29:42.000Z
+ - endDate=2017-07-01T03:29:42.000Z
+ view: day
+ refresh: false
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Enclosure found by name
+ ansible.builtin.debug:
+ msg: "{{ result.enclosures }}"
+
+- name: Print fetched information about Enclosure Utilization
+ ansible.builtin.debug:
+ msg: "{{ result.enclosure_utilization }}"
+'''
+
+RETURN = '''
+enclosures:
+ description: Has all the OneView information about the Enclosures.
+ returned: Always, but can be null.
+ type: dict
+
+enclosure_script:
+ description: Has all the OneView information about the script of an Enclosure.
+ returned: When requested, but can be null.
+ type: str
+
+enclosure_environmental_configuration:
+ description: Has all the OneView information about the environmental configuration of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+
+enclosure_utilization:
+ description: Has all the OneView information about the utilization of an Enclosure.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EnclosureInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list', elements='raw'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EnclosureInfoModule, self).__init__(
+ additional_arg_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ def execute_module(self):
+
+ info = {}
+
+ if self.module.params['name']:
+ enclosures = self._get_by_name(self.module.params['name'])
+
+ if self.options and enclosures:
+ info = self._gather_optional_info(self.options, enclosures[0])
+ else:
+ enclosures = self.oneview_client.enclosures.get_all(**self.facts_params)
+
+ info['enclosures'] = enclosures
+
+ return dict(changed=False, **info)
+
+ def _gather_optional_info(self, options, enclosure):
+
+ enclosure_client = self.oneview_client.enclosures
+ info = {}
+
+ if options.get('script'):
+ info['enclosure_script'] = enclosure_client.get_script(enclosure['uri'])
+ if options.get('environmentalConfiguration'):
+ env_config = enclosure_client.get_environmental_configuration(enclosure['uri'])
+ info['enclosure_environmental_configuration'] = env_config
+ if options.get('utilization'):
+ info['enclosure_utilization'] = self._get_utilization(enclosure, options['utilization'])
+
+ return info
+
+ def _get_utilization(self, enclosure, params):
+ fields = view = refresh = filter = ''
+
+ if isinstance(params, dict):
+ fields = params.get('fields')
+ view = params.get('view')
+ refresh = params.get('refresh')
+ filter = params.get('filter')
+
+ return self.oneview_client.enclosures.get_utilization(enclosure['uri'],
+ fields=fields,
+ filter=filter,
+ refresh=refresh,
+ view=view)
+
+ def _get_by_name(self, name):
+ return self.oneview_client.enclosures.get_by('name', name)
+
+
+def main():
+ EnclosureInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
new file mode 100644
index 000000000..8eb63db5a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network
+short_description: Manage OneView Ethernet Network resources
+description:
+ - Provides an interface to manage Ethernet Network resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicates the desired state for the Ethernet Network resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ - C(default_bandwidth_reset) will reset the network connection template to the default.
+ type: str
+ default: present
+ choices: [present, absent, default_bandwidth_reset]
+ data:
+ description:
+ - List with Ethernet Network properties.
+ type: dict
+ required: true
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Ethernet Network is present using the default configuration
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ vlanId: '201'
+ delegate_to: localhost
+
+- name: Update the Ethernet Network changing bandwidth and purpose
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is present with name 'Renamed Ethernet Network'
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ newName: 'Renamed Ethernet Network'
+ delegate_to: localhost
+
+- name: Ensure that the Ethernet Network is absent
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: 'New Ethernet Network'
+ delegate_to: localhost
+
+- name: Create Ethernet networks in bulk
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ vlanIdRange: '1-10,15,17'
+ purpose: General
+ namePrefix: TestNetwork
+ smartLink: false
+ privateNetwork: false
+ bandwidth:
+ maximumBandwidth: 10000
+ typicalBandwidth: 2000
+ delegate_to: localhost
+
+- name: Reset to the default network connection template
+ community.general.oneview_ethernet_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: default_bandwidth_reset
+ data:
+ name: 'Test Ethernet Network'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+ethernet_network:
+ description: Has the facts about the Ethernet Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+
+ethernet_network_bulk:
+ description: Has the facts about the Ethernet Networks affected by the bulk insert.
+ returned: When 'vlanIdRange' attribute is in data argument. Can be null.
+ type: dict
+
+ethernet_network_connection_template:
+ description: Has the facts about the Ethernet Network Connection Template.
+ returned: On state 'default_bandwidth_reset'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class EthernetNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'Ethernet Network created successfully.'
+ MSG_UPDATED = 'Ethernet Network updated successfully.'
+ MSG_DELETED = 'Ethernet Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Ethernet Network is already present.'
+ MSG_ALREADY_ABSENT = 'Ethernet Network is already absent.'
+
+ MSG_BULK_CREATED = 'Ethernet Networks created successfully.'
+ MSG_MISSING_BULK_CREATED = 'Some missing Ethernet Networks were created successfully.'
+ MSG_BULK_ALREADY_EXIST = 'The specified Ethernet Networks already exist.'
+ MSG_CONNECTION_TEMPLATE_RESET = 'Ethernet Network connection template was reset to the default.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network was not found.'
+
+ RESOURCE_FACT_NAME = 'ethernet_network'
+
+ def __init__(self):
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'default_bandwidth_reset', 'present']),
+ data=dict(type='dict', required=True),
+ )
+
+ super(EthernetNetworkModule, self).__init__(additional_arg_spec=argument_spec, validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+
+ changed, msg, ansible_facts, resource = False, '', {}, None
+
+ if self.data.get('name'):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ if self.data.get('vlanIdRange'):
+ return self._bulk_present()
+ else:
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+ elif self.state == 'default_bandwidth_reset':
+ changed, msg, ansible_facts = self._default_bandwidth_reset(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=ansible_facts)
+
+ def _present(self, resource):
+
+ bandwidth = self.data.pop('bandwidth', None)
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if bandwidth:
+ if self._update_connection_template(result['ansible_facts']['ethernet_network'], bandwidth)[0]:
+ result['changed'] = True
+ result['msg'] = self.MSG_UPDATED
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'ethernet_network', scope_uris)
+
+ return result
+
+ def _bulk_present(self):
+ vlan_id_range = self.data['vlanIdRange']
+ result = dict(ansible_facts={})
+ ethernet_networks = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ if not ethernet_networks:
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_BULK_CREATED
+
+ else:
+ vlan_ids = self.resource_client.dissociate_values_or_ranges(vlan_id_range)
+ for net in ethernet_networks[:]:
+ vlan_ids.remove(net['vlanId'])
+
+ if len(vlan_ids) == 0:
+ result['msg'] = self.MSG_BULK_ALREADY_EXIST
+ result['changed'] = False
+ else:
+ if len(vlan_ids) == 1:
+ self.data['vlanIdRange'] = '{0}-{1}'.format(vlan_ids[0], vlan_ids[0])
+ else:
+ self.data['vlanIdRange'] = ','.join(map(str, vlan_ids))
+
+ self.resource_client.create_bulk(self.data)
+ result['changed'] = True
+ result['msg'] = self.MSG_MISSING_BULK_CREATED
+ result['ansible_facts']['ethernet_network_bulk'] = self.resource_client.get_range(self.data['namePrefix'], vlan_id_range)
+
+ return result
+
+ def _update_connection_template(self, ethernet_network, bandwidth):
+
+ if 'connectionTemplateUri' not in ethernet_network:
+ return False, None
+
+ connection_template = self.oneview_client.connection_templates.get(ethernet_network['connectionTemplateUri'])
+
+ merged_data = connection_template.copy()
+ merged_data.update({'bandwidth': bandwidth})
+
+ if not self.compare(connection_template, merged_data):
+ connection_template = self.oneview_client.connection_templates.update(merged_data)
+ return True, connection_template
+ else:
+ return False, None
+
+ def _default_bandwidth_reset(self, resource):
+
+ if not resource:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND)
+
+ default_connection_template = self.oneview_client.connection_templates.get_default()
+
+ changed, connection_template = self._update_connection_template(resource, default_connection_template['bandwidth'])
+
+ return changed, self.MSG_CONNECTION_TEMPLATE_RESET, dict(
+ ethernet_network_connection_template=connection_template)
+
+
+def main():
+ EthernetNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
new file mode 100644
index 000000000..e107f3b47
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_ethernet_network_info.py
@@ -0,0 +1,177 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_ethernet_network_info
+short_description: Retrieve the information about one or more of the OneView Ethernet Networks
+description:
+ - Retrieve the information about one or more of the Ethernet Networks from OneView.
+ - This module was called C(oneview_ethernet_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_ethernet_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - Ethernet Network name.
+ type: str
+ options:
+ description:
+ - "List with options to gather additional information about an Ethernet Network and related resources.
+ Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
+ type: list
+ elements: str
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Ethernet Networks
+ ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather paginated and filtered information about Ethernet Networks
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'purpose=General'
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated and filtered list of Ethernet Networks
+ ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Ethernet network name
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Ethernet Network found by name
+ ansible.builtin.debug:
+ msg: "{{ result.ethernet_networks }}"
+
+- name: Gather information about an Ethernet Network by name with options
+ community.general.oneview_ethernet_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: eth1
+ options:
+ - associatedProfiles
+ - associatedUplinkGroups
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Ethernet Network Associated Profiles
+ ansible.builtin.debug:
+ msg: "{{ result.enet_associated_profiles }}"
+
+- name: Print fetched information about Ethernet Network Associated Uplink Groups
+ ansible.builtin.debug:
+ msg: "{{ result.enet_associated_uplink_groups }}"
+'''
+
+RETURN = '''
+ethernet_networks:
+ description: Has all the OneView information about the Ethernet Networks.
+ returned: Always, but can be null.
+ type: dict
+
+enet_associated_profiles:
+ description: Has all the OneView information about the profiles which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+
+enet_associated_uplink_groups:
+ description: Has all the OneView information about the uplink sets which are using the Ethernet network.
+ returned: When requested, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class EthernetNetworkInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list', elements='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(EthernetNetworkInfoModule, self).__init__(
+ additional_arg_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ self.resource_client = self.oneview_client.ethernet_networks
+
+ def execute_module(self):
+ info = {}
+ if self.module.params['name']:
+ ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
+
+ if self.module.params.get('options') and ethernet_networks:
+ info = self.__gather_optional_info(ethernet_networks[0])
+ else:
+ ethernet_networks = self.resource_client.get_all(**self.facts_params)
+
+ info['ethernet_networks'] = ethernet_networks
+
+ return dict(changed=False, **info)
+
+ def __gather_optional_info(self, ethernet_network):
+
+ info = {}
+
+ if self.options.get('associatedProfiles'):
+ info['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
+ if self.options.get('associatedUplinkGroups'):
+ info['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
+
+ return info
+
+ def __get_associated_profiles(self, ethernet_network):
+ associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
+ return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
+
+ def __get_associated_uplink_groups(self, ethernet_network):
+ uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
+ return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
+
+
+def main():
+ EthernetNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
new file mode 100644
index 000000000..4c5f867e2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_fc_network.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network
+short_description: Manage OneView Fibre Channel Network resources
+description:
+ - Provides an interface to manage Fibre Channel Network resources. Can create, update, and delete.
+requirements:
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicates the desired state for the Fibre Channel Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ type: str
+ choices: ['present', 'absent']
+ required: true
+ data:
+ description:
+ - List with the Fibre Channel Network properties.
+ type: dict
+ required: true
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Fibre Channel Network is present using the default configuration
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+
+- name: Ensure that the Fibre Channel Network is present with fabricType 'DirectAttach'
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ fabricType: 'DirectAttach'
+
+- name: Ensure that the Fibre Channel Network is present and is inserted in the desired scopes
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: present
+ data:
+ name: 'New FC Network'
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+
+- name: Ensure that the Fibre Channel Network is absent
+ community.general.oneview_fc_network:
+ config: "{{ config_file_path }}"
+ state: absent
+ data:
+ name: 'New FC Network'
+'''
+
+RETURN = '''
+fc_network:
+ description: Has the facts about the managed OneView FC Network.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FC Network created successfully.'
+ MSG_UPDATED = 'FC Network updated successfully.'
+ MSG_DELETED = 'FC Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FC Network is already present.'
+ MSG_ALREADY_ABSENT = 'FC Network is already absent.'
+ RESOURCE_FACT_NAME = 'fc_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(
+ required=True,
+ choices=['present', 'absent']))
+
+ super(FcNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fc_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self._present(resource)
+ else:
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fc_network', scope_uris)
+ return result
+
+
+def main():
+ FcNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
new file mode 100644
index 000000000..d4044b08b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_fc_network_info.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fc_network_info
+short_description: Retrieve the information about one or more of the OneView Fibre Channel Networks
+description:
+ - Retrieve the information about one or more of the Fibre Channel Networks from OneView.
+ - This module was called C(oneview_fc_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fc_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - Fibre Channel Network name.
+ type: str
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Fibre Channel Networks
+ ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather paginated, filtered and sorted information about Fibre Channel Networks
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 1
+ count: 3
+ sort: 'name:descending'
+ filter: 'fabricType=FabricAttach'
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated, filtered and sorted list of Fibre Channel Networks
+ ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+
+- name: Gather information about a Fibre Channel Network by name
+ community.general.oneview_fc_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: network name
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Fibre Channel Network found by name
+ ansible.builtin.debug:
+ msg: "{{ result.fc_networks }}"
+'''
+
+RETURN = '''
+fc_networks:
+ description: Has all the OneView information about the Fibre Channel Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(required=False, type='str'),
+ params=dict(required=False, type='dict')
+ )
+
+ super(FcNetworkInfoModule, self).__init__(
+ additional_arg_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fc_networks = self.oneview_client.fc_networks.get_by('name', self.module.params['name'])
+ else:
+ fc_networks = self.oneview_client.fc_networks.get_all(**self.facts_params)
+
+ return dict(changed=False, fc_networks=fc_networks)
+
+
+def main():
+ FcNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
new file mode 100644
index 000000000..73eef5af0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network
+short_description: Manage OneView FCoE Network resources
+description:
+ - Provides an interface to manage FCoE Network resources. Can create, update, or delete.
+requirements:
+ - "python >= 2.7.9"
+ - "hpOneView >= 4.0.0"
+author: "Felipe Bulsoni (@fgbulsoni)"
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicates the desired state for the FCoE Network resource.
+ C(present) will ensure data properties are compliant with OneView.
+ C(absent) will remove the resource from OneView, if it exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with FCoE Network properties.
+ type: dict
+ required: true
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that FCoE Network is present using the default configuration
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: Test FCoE Network
+ vlanId: 201
+ delegate_to: localhost
+
+- name: Update the FCOE network scopes
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: present
+ data:
+ name: New FCoE Network
+ scopeUris:
+ - '/rest/scopes/00SC123456'
+ - '/rest/scopes/01SC123456'
+ delegate_to: localhost
+
+- name: Ensure that FCoE Network is absent
+ community.general.oneview_fcoe_network:
+ config: '/etc/oneview/oneview_config.json'
+ state: absent
+ data:
+ name: New FCoE Network
+ delegate_to: localhost
+'''
+
+RETURN = '''
+fcoe_network:
+ description: Has the facts about the OneView FCoE Networks.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkModule(OneViewModuleBase):
+ MSG_CREATED = 'FCoE Network created successfully.'
+ MSG_UPDATED = 'FCoE Network updated successfully.'
+ MSG_DELETED = 'FCoE Network deleted successfully.'
+ MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
+ MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
+ RESOURCE_FACT_NAME = 'fcoe_network'
+
+ def __init__(self):
+
+ additional_arg_spec = dict(data=dict(required=True, type='dict'),
+ state=dict(default='present',
+ choices=['present', 'absent']))
+
+ super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
+ validate_etag_support=True)
+
+ self.resource_client = self.oneview_client.fcoe_networks
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
+ return result
+
+
+def main():
+ FcoeNetworkModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
new file mode 100644
index 000000000..d9ee1b379
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_fcoe_network_info.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_fcoe_network_info
+short_description: Retrieve the information about one or more of the OneView FCoE Networks
+description:
+ - Retrieve the information about one or more of the FCoE Networks from OneView.
+ - This module was called C(oneview_fcoe_network_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_fcoe_network_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - FCoE Network name.
+ type: str
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about FCoE Networks
+ ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather paginated, filtered and sorted information about FCoE Networks
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: 'vlanId=2'
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated, filtered and sorted list of FCoE Networks
+ ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+
+- name: Gather information about a FCoE Network by name
+ community.general.oneview_fcoe_network_info:
+ config: /etc/oneview/oneview_config.json
+ name: Test FCoE Network Information
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about FCoE Network found by name
+ ansible.builtin.debug:
+ msg: "{{ result.fcoe_networks }}"
+'''
+
+RETURN = '''
+fcoe_networks:
+ description: Has all the OneView information about the FCoE Networks.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class FcoeNetworkInfoModule(OneViewModuleBase):
+ def __init__(self):
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(FcoeNetworkInfoModule, self).__init__(
+ additional_arg_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ def execute_module(self):
+
+ if self.module.params['name']:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_by('name', self.module.params['name'])
+ else:
+ fcoe_networks = self.oneview_client.fcoe_networks.get_all(**self.facts_params)
+
+ return dict(changed=False, fcoe_networks=fcoe_networks)
+
+
+def main():
+ FcoeNetworkInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
new file mode 100644
index 000000000..cd8e87528
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group
+short_description: Manage OneView Logical Interconnect Group resources
+description:
+ - Provides an interface to manage Logical Interconnect Group resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicates the desired state for the Logical Interconnect Group resource.
+ C(absent) will remove the resource from OneView, if it exists.
+ C(present) will ensure data properties are compliant with OneView.
+ type: str
+ choices: [absent, present]
+ default: present
+ data:
+ description:
+ - List with the Logical Interconnect Group properties.
+ type: dict
+ required: true
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Ensure that the Logical Interconnect Group is present
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ uplinkSets: []
+ enclosureType: C7000
+ interconnectMapTemplate:
+ interconnectMapEntryTemplates:
+ - logicalDownlinkUri: ~
+ logicalLocation:
+ locationEntries:
+ - relativeValue: 1
+ type: Bay
+ - relativeValue: 1
+ type: Enclosure
+ permittedInterconnectTypeName: HP VC Flex-10/10D Module
+ # Alternatively you can inform permittedInterconnectTypeUri
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group has the specified scopes
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: Test Logical Interconnect Group
+ scopeUris:
+ - /rest/scopes/00SC123456
+ - /rest/scopes/01SC123456
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is present with name 'Test'
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: New Logical Interconnect Group
+ newName: Test
+ delegate_to: localhost
+
+- name: Ensure that the Logical Interconnect Group is absent
+ community.general.oneview_logical_interconnect_group:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: New Logical Interconnect Group
+ delegate_to: localhost
+'''
+
+RETURN = '''
+logical_interconnect_group:
+ description: Has the facts about the OneView Logical Interconnect Group.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class LogicalInterconnectGroupModule(OneViewModuleBase):
+ MSG_CREATED = 'Logical Interconnect Group created successfully.'
+ MSG_UPDATED = 'Logical Interconnect Group updated successfully.'
+ MSG_DELETED = 'Logical Interconnect Group deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Logical Interconnect Group is already present.'
+ MSG_ALREADY_ABSENT = 'Logical Interconnect Group is already absent.'
+ MSG_INTERCONNECT_TYPE_NOT_FOUND = 'Interconnect Type was not found.'
+
+ RESOURCE_FACT_NAME = 'logical_interconnect_group'
+
+ def __init__(self):
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict')
+ )
+
+ super(LogicalInterconnectGroupModule, self).__init__(additional_arg_spec=argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.logical_interconnect_groups
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data['name'])
+
+ if self.state == 'present':
+ return self.__present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def __present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+
+ self.__replace_name_by_uris(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, 'logical_interconnect_group', scope_uris)
+
+ return result
+
+ def __replace_name_by_uris(self, data):
+ map_template = data.get('interconnectMapTemplate')
+
+ if map_template:
+ map_entry_templates = map_template.get('interconnectMapEntryTemplates')
+ if map_entry_templates:
+ for value in map_entry_templates:
+ permitted_interconnect_type_name = value.pop('permittedInterconnectTypeName', None)
+ if permitted_interconnect_type_name:
+ value['permittedInterconnectTypeUri'] = self.__get_interconnect_type_by_name(
+ permitted_interconnect_type_name).get('uri')
+
+ def __get_interconnect_type_by_name(self, name):
+ i_type = self.oneview_client.interconnect_types.get_by('name', name)
+ if i_type:
+ return i_type[0]
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+
+def main():
+ LogicalInterconnectGroupModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
new file mode 100644
index 000000000..0111bf2c1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_logical_interconnect_group_info.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_logical_interconnect_group_info
+short_description: Retrieve information about one or more of the OneView Logical Interconnect Groups
+description:
+ - Retrieve information about one or more of the Logical Interconnect Groups from OneView
+ - This module was called C(oneview_logical_interconnect_group_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_logical_interconnect_group_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - Logical Interconnect Group name.
+ type: str
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Logical Interconnect Groups
+ ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather paginated, filtered and sorted information about Logical Interconnect Groups
+ community.general.oneview_logical_interconnect_group_info:
+ params:
+ start: 0
+ count: 3
+ sort: name:descending
+ filter: name=LIGName
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated, filtered and sorted list of Logical Interconnect Groups
+ ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+
+- name: Gather information about a Logical Interconnect Group by name
+ community.general.oneview_logical_interconnect_group_info:
+ name: logical interconnect group name
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Logical Interconnect Group found by name
+ ansible.builtin.debug:
+ msg: "{{ result.logical_interconnect_groups }}"
+'''
+
+RETURN = '''
+logical_interconnect_groups:
+ description: Has all the OneView information about the Logical Interconnect Groups.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class LogicalInterconnectGroupInfoModule(OneViewModuleBase):
+ def __init__(self):
+
+ argument_spec = dict(
+ name=dict(type='str'),
+ params=dict(type='dict'),
+ )
+
+ super(LogicalInterconnectGroupInfoModule, self).__init__(
+ additional_arg_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ def execute_module(self):
+ if self.module.params.get('name'):
+ ligs = self.oneview_client.logical_interconnect_groups.get_by('name', self.module.params['name'])
+ else:
+ ligs = self.oneview_client.logical_interconnect_groups.get_all(**self.facts_params)
+
+ return dict(changed=False, logical_interconnect_groups=ligs)
+
+
+def main():
+ LogicalInterconnectGroupInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set.py b/ansible_collections/community/general/plugins/modules/oneview_network_set.py
new file mode 100644
index 000000000..a6a62a05c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_network_set.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set
+short_description: Manage HPE OneView Network Set resources
+description:
+ - Provides an interface to manage Network Set resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 4.0.0
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicates the desired state for the Network Set resource.
+ - C(present) will ensure data properties are compliant with OneView.
+ - C(absent) will remove the resource from OneView, if it exists.
+ type: str
+ default: present
+ choices: ['present', 'absent']
+ data:
+ description:
+ - List with the Network Set properties.
+ type: dict
+ required: true
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create a Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ networkUris:
+ - Test Ethernet Network_1 # can be a name
+ - /rest/ethernet-networks/e4360c9d-051d-4931-b2aa-7de846450dd8 # or a URI
+ delegate_to: localhost
+
+- name: Update the Network Set name to 'OneViewSDK Test Network Set - Renamed' and change the associated networks
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ newName: OneViewSDK Test Network Set - Renamed
+ networkUris:
+ - Test Ethernet Network_1
+ delegate_to: localhost
+
+- name: Delete the Network Set
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: OneViewSDK Test Network Set - Renamed
+ delegate_to: localhost
+
+- name: Update the Network set with two scopes
+ community.general.oneview_network_set:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: OneViewSDK Test Network Set
+ scopeUris:
+ - /rest/scopes/01SC123456
+ - /rest/scopes/02SC123456
+ delegate_to: localhost
+'''
+
+RETURN = '''
+network_set:
+ description: Has the facts about the Network Set.
+ returned: On state 'present', but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleResourceNotFound
+
+
+class NetworkSetModule(OneViewModuleBase):
+ MSG_CREATED = 'Network Set created successfully.'
+ MSG_UPDATED = 'Network Set updated successfully.'
+ MSG_DELETED = 'Network Set deleted successfully.'
+ MSG_ALREADY_PRESENT = 'Network Set is already present.'
+ MSG_ALREADY_ABSENT = 'Network Set is already absent.'
+ MSG_ETHERNET_NETWORK_NOT_FOUND = 'Ethernet Network not found: '
+ RESOURCE_FACT_NAME = 'network_set'
+
+ argument_spec = dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ data=dict(required=True, type='dict'))
+
+ def __init__(self):
+ super(NetworkSetModule, self).__init__(additional_arg_spec=self.argument_spec,
+ validate_etag_support=True)
+ self.resource_client = self.oneview_client.network_sets
+
+ def execute_module(self):
+ resource = self.get_by_name(self.data.get('name'))
+
+ if self.state == 'present':
+ return self._present(resource)
+ elif self.state == 'absent':
+ return self.resource_absent(resource)
+
+ def _present(self, resource):
+ scope_uris = self.data.pop('scopeUris', None)
+ self._replace_network_name_by_uri(self.data)
+ result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
+ if scope_uris is not None:
+ result = self.resource_scopes_set(result, self.RESOURCE_FACT_NAME, scope_uris)
+ return result
+
+ def _get_ethernet_network_by_name(self, name):
+ result = self.oneview_client.ethernet_networks.get_by('name', name)
+ return result[0] if result else None
+
+ def _get_network_uri(self, network_name_or_uri):
+ if network_name_or_uri.startswith('/rest/ethernet-networks'):
+ return network_name_or_uri
+ else:
+ enet_network = self._get_ethernet_network_by_name(network_name_or_uri)
+ if enet_network:
+ return enet_network['uri']
+ else:
+ raise OneViewModuleResourceNotFound(self.MSG_ETHERNET_NETWORK_NOT_FOUND + network_name_or_uri)
+
+ def _replace_network_name_by_uri(self, data):
+ if 'networkUris' in data:
+ data['networkUris'] = [self._get_network_uri(x) for x in data['networkUris']]
+
+
+def main():
+ NetworkSetModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
new file mode 100644
index 000000000..d1a1f2913
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_network_set_info.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_network_set_info
+short_description: Retrieve information about the OneView Network Sets
+description:
+ - Retrieve information about the Network Sets from OneView.
+ - This module was called C(oneview_network_set_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_network_set_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ name:
+ description:
+ - Network Set name.
+ type: str
+
+ options:
+ description:
+ - "List with options to gather information about Network Set.
+ Option allowed: C(withoutEthernet).
+ The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.factsparams
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Network Sets
+ ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather paginated, filtered and sorted information about Network Sets
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ params:
+ start: 0
+ count: 3
+ sort: 'name:descending'
+ filter: name='netset001'
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated, filtered and sorted list of Network Sets
+ ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about all Network Sets, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Network Sets, excluding Ethernet networks
+ ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Network Set found by name
+ ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+
+- name: Gather information about a Network Set by name, excluding Ethernet networks
+ community.general.oneview_network_set_info:
+ hostname: 172.16.101.48
+ username: administrator
+ password: my_password
+ api_version: 500
+ name: Name of the Network Set
+ options:
+ - withoutEthernet
+ no_log: true
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about Network Set found by name, excluding Ethernet networks
+ ansible.builtin.debug:
+ msg: "{{ result.network_sets }}"
+'''
+
+RETURN = '''
+network_sets:
+ description: Has all the OneView information about the Network Sets.
+ returned: Always, but can be empty.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class NetworkSetInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ name=dict(type='str'),
+ options=dict(type='list', elements='str'),
+ params=dict(type='dict'),
+ )
+
+ def __init__(self):
+ super(NetworkSetInfoModule, self).__init__(
+ additional_arg_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+
+ def execute_module(self):
+
+ name = self.module.params.get('name')
+
+ if 'withoutEthernet' in self.options:
+ filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
+ network_sets = self.oneview_client.network_sets.get_all_without_ethernet(filter=filter_by_name)
+ elif name:
+ network_sets = self.oneview_client.network_sets.get_by('name', name)
+ else:
+ network_sets = self.oneview_client.network_sets.get_all(**self.facts_params)
+
+ return dict(changed=False, network_sets=network_sets)
+
+
+def main():
+ NetworkSetInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
new file mode 100644
index 000000000..65a016b1c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_san_manager.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager
+short_description: Manage OneView SAN Manager resources
+description:
+ - Provides an interface to manage SAN Manager resources. Can create, update, or delete.
+requirements:
+ - hpOneView >= 3.1.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicates the desired state for the Uplink Set resource.
+ - C(present) ensures data properties are compliant with OneView.
+ - C(absent) removes the resource from OneView, if it exists.
+ - C(connection_information_set) updates the connection information for the SAN Manager. This operation is non-idempotent.
+ type: str
+ default: present
+ choices: [present, absent, connection_information_set]
+ data:
+ description:
+ - List with SAN Manager properties.
+ type: dict
+ required: true
+
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.oneview.validateetag
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Creates a Device Manager for the Brocade SAN provider with the given hostname and credentials
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ providerDisplayName: Brocade Network Advisor
+ connectionInfo:
+ - name: Host
+ value: 172.18.15.1
+ - name: Port
+ value: 5989
+ - name: Username
+ value: username
+ - name: Password
+ value: password
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Ensure a Device Manager for the Cisco SAN Provider is present
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.20.1
+ providerDisplayName: Cisco
+ connectionInfo:
+ - name: Host
+ value: 172.18.20.1
+ - name: SnmpPort
+ value: 161
+ - name: SnmpUserName
+ value: admin
+ - name: SnmpAuthLevel
+ value: authnopriv
+ - name: SnmpAuthProtocol
+ value: sha
+ - name: SnmpAuthString
+ value: password
+ delegate_to: localhost
+
+- name: Sets the SAN Manager connection information
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: connection_information_set
+ data:
+ connectionInfo:
+ - name: Host
+ value: '172.18.15.1'
+ - name: Port
+ value: '5989'
+ - name: Username
+ value: 'username'
+ - name: Password
+ value: 'password'
+ - name: UseSsl
+ value: true
+ delegate_to: localhost
+
+- name: Refreshes the SAN Manager
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: present
+ data:
+ name: 172.18.15.1
+ refreshState: RefreshPending
+ delegate_to: localhost
+
+- name: Delete the SAN Manager recently created
+ community.general.oneview_san_manager:
+ config: /etc/oneview/oneview_config.json
+ state: absent
+ data:
+ name: '172.18.15.1'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+san_manager:
+ description: Has the OneView facts about the SAN Manager.
+ returned: On state 'present'. Can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase, OneViewModuleValueError
+
+
+class SanManagerModule(OneViewModuleBase):
+ MSG_CREATED = 'SAN Manager created successfully.'
+ MSG_UPDATED = 'SAN Manager updated successfully.'
+ MSG_DELETED = 'SAN Manager deleted successfully.'
+ MSG_ALREADY_PRESENT = 'SAN Manager is already present.'
+ MSG_ALREADY_ABSENT = 'SAN Manager is already absent.'
+ MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND = "The provider '{0}' was not found."
+
+ argument_spec = dict(
+ state=dict(type='str', default='present', choices=['absent', 'present', 'connection_information_set']),
+ data=dict(type='dict', required=True)
+ )
+
+ def __init__(self):
+ super(SanManagerModule, self).__init__(additional_arg_spec=self.argument_spec, validate_etag_support=True)
+ self.resource_client = self.oneview_client.san_managers
+
+ def execute_module(self):
+ if self.data.get('connectionInfo'):
+ for connection_hash in self.data.get('connectionInfo'):
+ if connection_hash.get('name') == 'Host':
+ resource_name = connection_hash.get('value')
+ elif self.data.get('name'):
+ resource_name = self.data.get('name')
+ else:
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+ raise OneViewModuleValueError(msg.format())
+
+ resource = self.resource_client.get_by_name(resource_name)
+
+ if self.state == 'present':
+ changed, msg, san_manager = self._present(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ elif self.state == 'absent':
+ return self.resource_absent(resource, method='remove')
+
+ elif self.state == 'connection_information_set':
+ changed, msg, san_manager = self._connection_information_set(resource)
+ return dict(changed=changed, msg=msg, ansible_facts=dict(san_manager=san_manager))
+
+ def _present(self, resource):
+ if not resource:
+ provider_uri = self.data.get('providerUri', self._get_provider_uri_by_display_name(self.data))
+ return True, self.MSG_CREATED, self.resource_client.add(self.data, provider_uri)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+
+ # Remove 'connectionInfo' from comparison, since it is not possible to validate it.
+ resource.pop('connectionInfo', None)
+ merged_data.pop('connectionInfo', None)
+
+ if self.compare(resource, merged_data):
+ return False, self.MSG_ALREADY_PRESENT, resource
+ else:
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _connection_information_set(self, resource):
+ if not resource:
+ return self._present(resource)
+ else:
+ merged_data = resource.copy()
+ merged_data.update(self.data)
+ merged_data.pop('refreshState', None)
+ if not self.data.get('connectionInfo', None):
+ raise OneViewModuleValueError('A connectionInfo field is required for this operation.')
+ updated_san_manager = self.resource_client.update(resource=merged_data, id_or_uri=resource['uri'])
+ return True, self.MSG_UPDATED, updated_san_manager
+
+ def _get_provider_uri_by_display_name(self, data):
+ display_name = data.get('providerDisplayName')
+ provider_uri = self.resource_client.get_provider_uri(display_name)
+
+ if not provider_uri:
+ raise OneViewModuleValueError(self.MSG_SAN_MANAGER_PROVIDER_DISPLAY_NAME_NOT_FOUND.format(display_name))
+
+ return provider_uri
+
+
+def main():
+ SanManagerModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
new file mode 100644
index 000000000..9b00a6bb5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/oneview_san_manager_info.py
@@ -0,0 +1,129 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: oneview_san_manager_info
+short_description: Retrieve information about one or more of the OneView SAN Managers
+description:
+ - Retrieve information about one or more of the SAN Managers from OneView
+ - This module was called C(oneview_san_manager_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.oneview_san_manager_info) module no longer returns C(ansible_facts)!
+requirements:
+ - hpOneView >= 2.0.1
+author:
+ - Felipe Bulsoni (@fgbulsoni)
+ - Thiago Miotto (@tmiotto)
+ - Adriane Cardozo (@adriane-cardozo)
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ provider_display_name:
+ description:
+ - Provider Display Name.
+ type: str
+ params:
+ description:
+ - List of params to delimit, filter and sort the list of resources.
+ - "params allowed:
+ - C(start): The first item to return, using 0-based indexing.
+ - C(count): The number of resources to return.
+ - C(query): A general query string to narrow the list of resources returned.
+ - C(sort): The sort order of the returned data set."
+ type: dict
+extends_documentation_fragment:
+ - community.general.oneview
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather information about all SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about SAN Managers
+ ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather paginated, filtered and sorted information about SAN Managers
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ params:
+ start: 0
+ count: 3
+ sort: name:ascending
+ query: isInternal eq false
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about paginated, filtered and sorted list of SAN Managers
+ ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+
+- name: Gather information about a SAN Manager by provider display name
+ community.general.oneview_san_manager_info:
+ config: /etc/oneview/oneview_config.json
+ provider_display_name: Brocade Network Advisor
+ delegate_to: localhost
+ register: result
+
+- name: Print fetched information about SAN Manager found by provider display name
+ ansible.builtin.debug:
+ msg: "{{ result.san_managers }}"
+'''
+
+RETURN = '''
+san_managers:
+ description: Has all the OneView information about the SAN Managers.
+ returned: Always, but can be null.
+ type: dict
+'''
+
+from ansible_collections.community.general.plugins.module_utils.oneview import OneViewModuleBase
+
+
+class SanManagerInfoModule(OneViewModuleBase):
+ argument_spec = dict(
+ provider_display_name=dict(type='str'),
+ params=dict(type='dict')
+ )
+
+ def __init__(self):
+ super(SanManagerInfoModule, self).__init__(
+ additional_arg_spec=self.argument_spec,
+ supports_check_mode=True,
+ )
+ self.resource_client = self.oneview_client.san_managers
+
+ def execute_module(self):
+ if self.module.params.get('provider_display_name'):
+ provider_display_name = self.module.params['provider_display_name']
+ san_manager = self.oneview_client.san_managers.get_by_provider_display_name(provider_display_name)
+ if san_manager:
+ resources = [san_manager]
+ else:
+ resources = []
+ else:
+ resources = self.oneview_client.san_managers.get_all(**self.facts_params)
+
+ return dict(changed=False, san_managers=resources)
+
+
+def main():
+ SanManagerInfoModule().run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/online_server_info.py b/ansible_collections/community/general/plugins/modules/online_server_info.py
new file mode 100644
index 000000000..f6d03cb27
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/online_server_info.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: online_server_info
+short_description: Gather information about Online servers
+description:
+ - Gather information about the servers.
+ - U(https://www.online.net/en/dedicated-server)
+author:
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.online
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Online server information
+ community.general.online_server_info:
+ api_token: '0d1627e8-bbf0-44c5-a46f-5c4d3aef033f'
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_server_info }}"
+'''
+
+RETURN = r'''
+online_server_info:
+ description:
+ - Response from Online API.
+ - "For more details please refer to: U(https://console.online.net/en/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "online_server_info": [
+ {
+ "abuse": "abuse@example.com",
+ "anti_ddos": false,
+ "bmc": {
+ "session_key": null
+ },
+ "boot_mode": "normal",
+ "contacts": {
+ "owner": "foobar",
+ "tech": "foobar"
+ },
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "drive_arrays": [
+ {
+ "disks": [
+ {
+ "$ref": "/api/v1/server/hardware/disk/68452"
+ },
+ {
+ "$ref": "/api/v1/server/hardware/disk/68453"
+ }
+ ],
+ "raid_controller": {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ },
+ "raid_level": "RAID1"
+ }
+ ],
+ "hardware_watch": true,
+ "hostname": "sd-42",
+ "id": 42,
+ "ip": [
+ {
+ "address": "195.154.172.149",
+ "mac": "28:92:4a:33:5e:c6",
+ "reverse": "195-154-172-149.rev.poneytelecom.eu.",
+ "switch_port_state": "up",
+ "type": "public"
+ },
+ {
+ "address": "10.90.53.212",
+ "mac": "28:92:4a:33:5e:c7",
+ "reverse": null,
+ "switch_port_state": "up",
+ "type": "private"
+ }
+ ],
+ "last_reboot": "2018-08-23T08:32:03.000Z",
+ "location": {
+ "block": "A",
+ "datacenter": "DC3",
+ "position": 19,
+ "rack": "A23",
+ "room": "4 4-4"
+ },
+ "network": {
+ "ip": [
+ "195.154.172.149"
+ ],
+ "ipfo": [],
+ "private": [
+ "10.90.53.212"
+ ]
+ },
+ "offer": "Pro-1-S-SATA",
+ "os": {
+ "name": "FreeBSD",
+ "version": "11.1-RELEASE"
+ },
+ "power": "ON",
+ "proactive_monitoring": false,
+ "raid_controllers": [
+ {
+ "$ref": "/api/v1/server/hardware/raidController/9910"
+ }
+ ],
+ "support": "Basic service level"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineServerInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineServerInfo, self).__init__(module)
+ self.name = 'api/v1/server'
+
+ def _get_server_detail(self, server_path):
+ try:
+ return self.get(path=server_path).json
+ except OnlineException as exc:
+ self.module.fail_json(msg="A problem occurred while fetching: %s (%s)" % (server_path, exc))
+
+ def all_detailed_servers(self):
+ servers_api_path = self.get_resources()
+
+ server_data = (
+ self._get_server_detail(server_api_path)
+ for server_api_path in servers_api_path
+ )
+
+ return [s for s in server_data if s is not None]
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ servers_info = OnlineServerInfo(module).all_detailed_servers()
+ module.exit_json(
+ online_server_info=servers_info
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/online_user_info.py b/ansible_collections/community/general/plugins/modules/online_user_info.py
new file mode 100644
index 000000000..1d91418ca
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/online_user_info.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: online_user_info
+short_description: Gather information about Online user
+description:
+ - Gather information about the user.
+author:
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.online
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = r'''
+- name: Gather Online user info
+ community.general.online_user_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.online_user_info }}"
+'''
+
+RETURN = r'''
+online_user_info:
+ description:
+ - Response from Online API.
+ - "For more details please refer to: U(https://console.online.net/en/api/)."
+ returned: success
+ type: dict
+ sample:
+ "online_user_info": {
+ "company": "foobar LLC",
+ "email": "foobar@example.com",
+ "first_name": "foo",
+ "id": 42,
+ "last_name": "bar",
+ "login": "foobar"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.online import (
+ Online, OnlineException, online_argument_spec
+)
+
+
+class OnlineUserInfo(Online):
+
+ def __init__(self, module):
+ super(OnlineUserInfo, self).__init__(module)
+ self.name = 'api/v1/user'
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=online_argument_spec(),
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ online_user_info=OnlineUserInfo(module).get_resources()
+ )
+ except OnlineException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/open_iscsi.py b/ansible_collections/community/general/plugins/modules/open_iscsi.py
new file mode 100644
index 000000000..af08d1c54
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/open_iscsi.py
@@ -0,0 +1,464 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: open_iscsi
+author:
+ - Serge van Ginderachter (@srvg)
+short_description: Manage iSCSI targets with Open-iSCSI
+description:
+ - Discover targets on given portal, (dis)connect targets, mark targets to
+ manually or auto start, return device nodes of connected targets.
+requirements:
+ - open_iscsi library and tools (iscsiadm)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ portal:
+ description:
+ - The domain name or IP address of the iSCSI target.
+ type: str
+ aliases: [ ip ]
+ port:
+ description:
+ - The port on which the iSCSI target process listens.
+ type: str
+ default: '3260'
+ target:
+ description:
+ - The iSCSI target name.
+ type: str
+ aliases: [ name, targetname ]
+ login:
+ description:
+ - Whether the target node should be connected.
+ type: bool
+ aliases: [ state ]
+ node_auth:
+ description:
+ - The value for C(node.session.auth.authmethod).
+ type: str
+ default: CHAP
+ node_user:
+ description:
+ - The value for C(node.session.auth.username).
+ type: str
+ node_pass:
+ description:
+ - The value for C(node.session.auth.password).
+ type: str
+ node_user_in:
+ description:
+ - The value for C(node.session.auth.username_in).
+ type: str
+ version_added: 3.8.0
+ node_pass_in:
+ description:
+ - The value for C(node.session.auth.password_in).
+ type: str
+ version_added: 3.8.0
+ auto_node_startup:
+ description:
+ - Whether the target node should be automatically connected at startup.
+ type: bool
+ aliases: [ automatic ]
+ auto_portal_startup:
+ description:
+ - Whether the target node portal should be automatically connected at startup.
+ type: bool
+ version_added: 3.2.0
+ discover:
+ description:
+ - Whether the list of target nodes on the portal should be
+ (re)discovered and added to the persistent iSCSI database.
+ - Keep in mind that C(iscsiadm) discovery resets configuration, like C(node.startup)
+ to manual, hence combined with I(auto_node_startup=true) will always return
+ a changed state.
+ type: bool
+ default: false
+ show_nodes:
+ description:
+ - Whether the list of nodes in the persistent iSCSI database should be returned by the module.
+ type: bool
+ default: false
+ rescan:
+ description:
+ - Rescan an established session for discovering new targets.
+ - When I(target) is omitted, will rescan all sessions.
+ type: bool
+ default: false
+ version_added: 4.1.0
+
+'''
+
+EXAMPLES = r'''
+- name: Perform a discovery on sun.com and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: true
+ discover: true
+ portal: sun.com
+
+- name: Perform a discovery on 10.1.2.3 and show available target nodes
+ community.general.open_iscsi:
+ show_nodes: true
+ discover: true
+ ip: 10.1.2.3
+
+# NOTE: Only works if exactly one target is exported to the initiator
+- name: Discover targets on portal and login to the one available
+ community.general.open_iscsi:
+ portal: '{{ iscsi_target }}'
+ login: true
+ discover: true
+
+- name: Connect to the named target, after updating the local persistent database (cache)
+ community.general.open_iscsi:
+ login: true
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Disconnect from the cached named target
+ community.general.open_iscsi:
+ login: false
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Override and disable automatic portal login on specific portal
+ community.general.open_iscsi:
+ login: false
+ portal: 10.1.1.250
+ auto_portal_startup: false
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+
+- name: Rescan one or all established sessions to discover new targets (omit target for all sessions)
+ community.general.open_iscsi:
+ rescan: true
+ target: iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d
+'''
+
+import glob
+import os
+import re
+import socket
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+ISCSIADM = 'iscsiadm'
+iscsiadm_cmd = None
+
+
+def compare_nodelists(l1, l2):
+ l1.sort()
+ l2.sort()
+ return l1 == l2
+
+
+def iscsi_get_cached_nodes(module, portal=None):
+ cmd = [iscsiadm_cmd, '--mode', 'node']
+ rc, out, err = module.run_command(cmd)
+
+ nodes = []
+ if rc == 0:
+ lines = out.splitlines()
+ for line in lines:
+ # line format is "ip:port,target_portal_group_tag targetname"
+ parts = line.split()
+ if len(parts) > 2:
+ module.fail_json(msg='error parsing output', cmd=cmd)
+ target = parts[1]
+ parts = parts[0].split(':')
+ target_portal = parts[0]
+
+ if portal is None or portal == target_portal:
+ nodes.append(target)
+
+ # older versions of scsiadm don't have nice return codes
+ # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
+ # err can contain [N|n]o records...
+ elif rc == 21 or (rc == 255 and "o records found" in err):
+ pass
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+ return nodes
+
+
+def iscsi_discover(module, portal, port):
+ cmd = [iscsiadm_cmd, '--mode', 'discovery', '--type', 'sendtargets', '--portal', '%s:%s' % (portal, port)]
+ module.run_command(cmd, check_rc=True)
+
+
+def iscsi_rescan(module, target=None):
+ if target is None:
+ cmd = [iscsiadm_cmd, '--mode', 'session', '--rescan']
+ else:
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--rescan', '-T', target]
+ rc, out, err = module.run_command(cmd)
+ return out
+
+
+def target_loggedon(module, target, portal=None, port=None):
+ cmd = [iscsiadm_cmd, '--mode', 'session']
+ rc, out, err = module.run_command(cmd)
+
+ if portal is None:
+ portal = ""
+ if port is None:
+ port = ""
+
+ if rc == 0:
+ search_re = "%s:%s.*%s" % (re.escape(portal), port, re.escape(target))
+ return re.search(search_re, out) is not None
+ elif rc == 21:
+ return False
+ else:
+ module.fail_json(cmd=cmd, rc=rc, msg=err)
+
+
+def target_login(module, target, portal=None, port=None):
+ node_auth = module.params['node_auth']
+ node_user = module.params['node_user']
+ node_pass = module.params['node_pass']
+ node_user_in = module.params['node_user_in']
+ node_pass_in = module.params['node_pass_in']
+
+ if node_user:
+ params = [('node.session.auth.authmethod', node_auth),
+ ('node.session.auth.username', node_user),
+ ('node.session.auth.password', node_pass)]
+ for (name, value) in params:
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', name, '--value', value]
+ module.run_command(cmd, check_rc=True)
+
+ if node_user_in:
+ params = [('node.session.auth.username_in', node_user_in),
+ ('node.session.auth.password_in', node_pass_in)]
+ for (name, value) in params:
+ cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value)
+ module.run_command(cmd, check_rc=True)
+
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--login']
+ if portal is not None and port is not None:
+ cmd.append('--portal')
+ cmd.append('%s:%s' % (portal, port))
+
+ module.run_command(cmd, check_rc=True)
+
+
+def target_logout(module, target):
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--logout']
+ module.run_command(cmd, check_rc=True)
+
+
+def target_device_node(target):
+ # if anyone know a better way to find out which devicenodes get created for
+ # a given target...
+
+ devices = glob.glob('/dev/disk/by-path/*%s*' % target)
+ devdisks = []
+ for dev in devices:
+ # exclude partitions
+ if "-part" not in dev:
+ devdisk = os.path.realpath(dev)
+ # only add once (multi-path?)
+ if devdisk not in devdisks:
+ devdisks.append(devdisk)
+ return devdisks
+
+
+def target_isauto(module, target, portal=None, port=None):
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target]
+
+ if portal is not None and port is not None:
+ cmd.append('--portal')
+ cmd.append('%s:%s' % (portal, port))
+
+ dummy, out, dummy = module.run_command(cmd, check_rc=True)
+
+ lines = out.splitlines()
+ for line in lines:
+ if 'node.startup' in line:
+ return 'automatic' in line
+ return False
+
+
+def target_setauto(module, target, portal=None, port=None):
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'automatic']
+
+ if portal is not None and port is not None:
+ cmd.append('--portal')
+ cmd.append('%s:%s' % (portal, port))
+
+ module.run_command(cmd, check_rc=True)
+
+
+def target_setmanual(module, target, portal=None, port=None):
+ cmd = [iscsiadm_cmd, '--mode', 'node', '--targetname', target, '--op=update', '--name', 'node.startup', '--value', 'manual']
+
+ if portal is not None and port is not None:
+ cmd.append('--portal')
+ cmd.append('%s:%s' % (portal, port))
+
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+ # load ansible module object
+ module = AnsibleModule(
+ argument_spec=dict(
+
+ # target
+ portal=dict(type='str', aliases=['ip']),
+ port=dict(type='str', default='3260'),
+ target=dict(type='str', aliases=['name', 'targetname']),
+ node_auth=dict(type='str', default='CHAP'),
+ node_user=dict(type='str'),
+ node_pass=dict(type='str', no_log=True),
+ node_user_in=dict(type='str'),
+ node_pass_in=dict(type='str', no_log=True),
+
+ # actions
+ login=dict(type='bool', aliases=['state']),
+ auto_node_startup=dict(type='bool', aliases=['automatic']),
+ auto_portal_startup=dict(type='bool'),
+ discover=dict(type='bool', default=False),
+ show_nodes=dict(type='bool', default=False),
+ rescan=dict(type='bool', default=False),
+ ),
+
+ required_together=[['node_user', 'node_pass'], ['node_user_in', 'node_pass_in']],
+ required_if=[('discover', True, ['portal'])],
+ supports_check_mode=True,
+ )
+
+ global iscsiadm_cmd
+ iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True)
+
+ # parameters
+ portal = module.params['portal']
+ if portal:
+ try:
+ portal = socket.getaddrinfo(portal, None)[0][4][0]
+ except socket.gaierror:
+ module.fail_json(msg="Portal address is incorrect")
+
+ target = module.params['target']
+ port = module.params['port']
+ login = module.params['login']
+ automatic = module.params['auto_node_startup']
+ automatic_portal = module.params['auto_portal_startup']
+ discover = module.params['discover']
+ show_nodes = module.params['show_nodes']
+ rescan = module.params['rescan']
+
+ check = module.check_mode
+
+ cached = iscsi_get_cached_nodes(module, portal)
+
+ # return json dict
+ result = {'changed': False}
+
+ if discover:
+ if check:
+ nodes = cached
+ else:
+ iscsi_discover(module, portal, port)
+ nodes = iscsi_get_cached_nodes(module, portal)
+ if not compare_nodelists(cached, nodes):
+ result['changed'] |= True
+ result['cache_updated'] = True
+ else:
+ nodes = cached
+
+ if login is not None or automatic is not None:
+ if target is None:
+ if len(nodes) > 1:
+ module.fail_json(msg="Need to specify a target")
+ else:
+ target = nodes[0]
+ else:
+ # check given target is in cache
+ check_target = False
+ for node in nodes:
+ if node == target:
+ check_target = True
+ break
+ if not check_target:
+ module.fail_json(msg="Specified target not found")
+
+ if show_nodes:
+ result['nodes'] = nodes
+
+ if login is not None:
+ loggedon = target_loggedon(module, target, portal, port)
+ if (login and loggedon) or (not login and not loggedon):
+ result['changed'] |= False
+ if login:
+ result['devicenodes'] = target_device_node(target)
+ elif not check:
+ if login:
+ target_login(module, target, portal, port)
+ # give udev some time
+ time.sleep(1)
+ result['devicenodes'] = target_device_node(target)
+ else:
+ target_logout(module, target)
+ result['changed'] |= True
+ result['connection_changed'] = True
+ else:
+ result['changed'] |= True
+ result['connection_changed'] = True
+
+ if automatic is not None:
+ isauto = target_isauto(module, target)
+ if (automatic and isauto) or (not automatic and not isauto):
+ result['changed'] |= False
+ result['automatic_changed'] = False
+ elif not check:
+ if automatic:
+ target_setauto(module, target)
+ else:
+ target_setmanual(module, target)
+ result['changed'] |= True
+ result['automatic_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_changed'] = True
+
+ if automatic_portal is not None:
+ isauto = target_isauto(module, target, portal, port)
+ if (automatic_portal and isauto) or (not automatic_portal and not isauto):
+ result['changed'] |= False
+ result['automatic_portal_changed'] = False
+ elif not check:
+ if automatic_portal:
+ target_setauto(module, target, portal, port)
+ else:
+ target_setmanual(module, target, portal, port)
+ result['changed'] |= True
+ result['automatic_portal_changed'] = True
+ else:
+ result['changed'] |= True
+ result['automatic_portal_changed'] = True
+
+ if rescan is not False:
+ result['changed'] = True
+ result['sessions'] = iscsi_rescan(module, target)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/openbsd_pkg.py b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
new file mode 100644
index 000000000..2baea828a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/openbsd_pkg.py
@@ -0,0 +1,664 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Patrik Lundin <patrik@sigterm.se>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: openbsd_pkg
+author:
+ - Patrik Lundin (@eest)
+short_description: Manage packages on OpenBSD
+description:
+ - Manage packages on OpenBSD using the pkg tools.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - A name or a list of names of the packages.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ choices: [ absent, latest, present, installed, removed ]
+ default: present
+ type: str
+ build:
+ description:
+ - Build the package from source instead of downloading and installing
+ a binary. Requires that the port source tree is already installed.
+ Automatically builds and installs the 'sqlports' package, if it is
+ not already installed.
+ - Mutually exclusive with I(snapshot).
+ type: bool
+ default: false
+ snapshot:
+ description:
+ - Force C(%c) and C(%m) to expand to C(snapshots), even on a release kernel.
+ - Mutually exclusive with I(build).
+ type: bool
+ default: false
+ version_added: 1.3.0
+ ports_dir:
+ description:
+ - When used in combination with the C(build) option, allows overriding
+ the default ports source directory.
+ default: /usr/ports
+ type: path
+ clean:
+ description:
+ - When updating or removing packages, delete the extra configuration
+ file(s) in the old packages which are annotated with @extra in
+ the packaging-list.
+ type: bool
+ default: false
+ quick:
+ description:
+ - Replace or delete packages quickly; do not bother with checksums
+ before removing normal files.
+ type: bool
+ default: false
+notes:
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+'''
+
+EXAMPLES = '''
+- name: Make sure nmap is installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+
+- name: Make sure nmap is the latest version
+ community.general.openbsd_pkg:
+ name: nmap
+ state: latest
+
+- name: Make sure nmap is not installed
+ community.general.openbsd_pkg:
+ name: nmap
+ state: absent
+
+- name: Make sure nmap is installed, build it from source if it is not
+ community.general.openbsd_pkg:
+ name: nmap
+ state: present
+ build: true
+
+- name: Specify a pkg flavour with '--'
+ community.general.openbsd_pkg:
+ name: vim--no_x11
+ state: present
+
+- name: Specify the default flavour to avoid ambiguity errors
+ community.general.openbsd_pkg:
+ name: vim--
+ state: present
+
+- name: Specify a package branch (requires at least OpenBSD 6.0)
+ community.general.openbsd_pkg:
+ name: python%3.5
+ state: present
+
+- name: Update all packages on the system
+ community.general.openbsd_pkg:
+ name: '*'
+ state: latest
+
+- name: Purge a package and it's configuration files
+ community.general.openbsd_pkg:
+ name: mpd
+ clean: true
+ state: absent
+
+- name: Quickly remove a package without checking checksums
+ community.general.openbsd_pkg:
+ name: qt5
+ quick: true
+ state: absent
+'''
+
+import os
+import platform
+import re
+import shlex
+import sqlite3
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+
+# Function used for executing commands.
+def execute_command(cmd, module):
+ # Break command line into arguments.
+ # This makes run_command() use shell=False which we need to not cause shell
+ # expansion of special characters like '*'.
+ cmd_args = shlex.split(cmd)
+
+ # We set TERM to 'dumb' to keep pkg_add happy if the machine running
+ # ansible is using a TERM that the managed machine does not know about,
+ # e.g.: "No progress meter: failed termcap lookup on xterm-kitty".
+ return module.run_command(cmd_args, environ_update={'TERM': 'dumb'})
+
+
+# Function used to find out if a package is currently installed.
+def get_package_state(names, pkg_spec, module):
+ info_cmd = 'pkg_info -Iq'
+
+ for name in names:
+ command = "%s inst:%s" % (info_cmd, name)
+
+ rc, stdout, stderr = execute_command(command, module)
+
+ if stderr:
+ module.fail_json(msg="failed in get_package_state(): " + stderr)
+
+ if stdout:
+ # If the requested package name is just a stem, like "python", we may
+ # find multiple packages with that name.
+ pkg_spec[name]['installed_names'] = stdout.splitlines()
+ module.debug("get_package_state(): installed_names = %s" % pkg_spec[name]['installed_names'])
+ pkg_spec[name]['installed_state'] = True
+ else:
+ pkg_spec[name]['installed_state'] = False
+
+
+# Function used to make sure a package is present.
+def package_present(names, pkg_spec, module):
+ build = module.params['build']
+
+ for name in names:
+ # It is possible package_present() has been called from package_latest().
+ # In that case we do not want to operate on the whole list of names,
+ # only the leftovers.
+ if pkg_spec['package_latest_leftovers']:
+ if name not in pkg_spec['package_latest_leftovers']:
+ module.debug("package_present(): ignoring '%s' which is not a package_latest() leftover" % name)
+ continue
+ else:
+ module.debug("package_present(): handling package_latest() leftovers, installing '%s'" % name)
+
+ if module.check_mode:
+ install_cmd = 'pkg_add -Imn'
+ else:
+ if build is True:
+ port_dir = "%s/%s" % (module.params['ports_dir'], get_package_source_path(name, pkg_spec, module))
+ if os.path.isdir(port_dir):
+ if pkg_spec[name]['flavor']:
+ flavors = pkg_spec[name]['flavor'].replace('-', ' ')
+ install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
+ elif pkg_spec[name]['subpackage']:
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
+ pkg_spec[name]['subpackage'])
+ else:
+ install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
+ else:
+ module.fail_json(msg="the port source directory %s does not exist" % (port_dir))
+ else:
+ install_cmd = 'pkg_add -Im'
+
+ if module.params['snapshot'] is True:
+ install_cmd += ' -Dsnap'
+
+ if pkg_spec[name]['installed_state'] is False:
+
+ # Attempt to install the package
+ if build is True and not module.check_mode:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = module.run_command(install_cmd, module, use_unsafe_shell=True)
+ else:
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (install_cmd, name), module)
+
+ # The behaviour of pkg_add is a bit different depending on if a
+ # specific version is supplied or not.
+ #
+ # When a specific version is supplied the return code will be 0 when
+ # a package is found and 1 when it is not. If a version is not
+ # supplied the tool will exit 0 in both cases.
+ #
+ # It is important to note that "version" relates to the
+ # packages-specs(7) notion of a version. If using the branch syntax
+ # (like "python%3.5") even though a branch name may look like a
+ # version string it is not used an one by pkg_add.
+ if pkg_spec[name]['version'] or build is True:
+ # Depend on the return code.
+ module.debug("package_present(): depending on return code for name '%s'" % name)
+ if pkg_spec[name]['rc']:
+ pkg_spec[name]['changed'] = False
+ else:
+ # Depend on stderr instead.
+ module.debug("package_present(): depending on stderr for name '%s'" % name)
+ if pkg_spec[name]['stderr']:
+ # There is a corner case where having an empty directory in
+ # installpath prior to the right location will result in a
+ # "file:/local/package/directory/ is empty" message on stderr
+ # while still installing the package, so we need to look for
+ # for a message like "packagename-1.0: ok" just in case.
+ match = re.search(r"\W%s-[^:]+: ok\W" % re.escape(pkg_spec[name]['stem']), pkg_spec[name]['stdout'])
+
+ if match:
+ # It turns out we were able to install the package.
+ module.debug("package_present(): we were able to install package for name '%s'" % name)
+ pkg_spec[name]['changed'] = True
+ else:
+ # We really did fail, fake the return code.
+ module.debug("package_present(): we really did fail for name '%s'" % name)
+ pkg_spec[name]['rc'] = 1
+ pkg_spec[name]['changed'] = False
+ else:
+ module.debug("package_present(): stderr was not set for name '%s'" % name)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to make sure a package is the latest available version.
+def package_latest(names, pkg_spec, module):
+ if module.params['build'] is True:
+ module.fail_json(msg="the combination of build=%s and state=latest is not supported" % module.params['build'])
+
+ upgrade_cmd = 'pkg_add -um'
+
+ if module.check_mode:
+ upgrade_cmd += 'n'
+
+ if module.params['clean']:
+ upgrade_cmd += 'c'
+
+ if module.params['quick']:
+ upgrade_cmd += 'q'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+
+ # Attempt to upgrade the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (upgrade_cmd, name), module)
+
+ # Look for output looking something like "nmap-6.01->6.25: ok" to see if
+ # something changed (or would have changed). Use \W to delimit the match
+ # from progress meter output.
+ pkg_spec[name]['changed'] = False
+ for installed_name in pkg_spec[name]['installed_names']:
+ module.debug("package_latest(): checking for pre-upgrade package name: %s" % installed_name)
+ match = re.search(r"\W%s->.+: ok\W" % re.escape(installed_name), pkg_spec[name]['stdout'])
+ if match:
+ module.debug("package_latest(): pre-upgrade package name match: %s" % installed_name)
+
+ pkg_spec[name]['changed'] = True
+ break
+
+ # FIXME: This part is problematic. Based on the issues mentioned (and
+ # handled) in package_present() it is not safe to blindly trust stderr
+ # as an indicator that the command failed, and in the case with
+ # empty installpath directories this will break.
+ #
+ # For now keep this safeguard here, but ignore it if we managed to
+ # parse out a successful update above. This way we will report a
+ # successful run when we actually modify something but fail
+ # otherwise.
+ if pkg_spec[name]['changed'] is not True:
+ if pkg_spec[name]['stderr']:
+ pkg_spec[name]['rc'] = 1
+
+ else:
+ # Note packages that need to be handled by package_present
+ module.debug("package_latest(): package '%s' is not installed, will be handled by package_present()" % name)
+ pkg_spec['package_latest_leftovers'].append(name)
+
+ # If there were any packages that were not installed we call
+ # package_present() which will handle those.
+ if pkg_spec['package_latest_leftovers']:
+ module.debug("package_latest(): calling package_present() to handle leftovers")
+ package_present(names, pkg_spec, module)
+
+
+# Function used to make sure a package is not installed.
+def package_absent(names, pkg_spec, module):
+ remove_cmd = 'pkg_delete -I'
+
+ if module.check_mode:
+ remove_cmd += 'n'
+
+ if module.params['clean']:
+ remove_cmd += 'c'
+
+ if module.params['quick']:
+ remove_cmd += 'q'
+
+ for name in names:
+ if pkg_spec[name]['installed_state'] is True:
+ # Attempt to remove the package.
+ (pkg_spec[name]['rc'], pkg_spec[name]['stdout'], pkg_spec[name]['stderr']) = execute_command("%s %s" % (remove_cmd, name), module)
+
+ if pkg_spec[name]['rc'] == 0:
+ pkg_spec[name]['changed'] = True
+ else:
+ pkg_spec[name]['changed'] = False
+
+ else:
+ pkg_spec[name]['rc'] = 0
+ pkg_spec[name]['stdout'] = ''
+ pkg_spec[name]['stderr'] = ''
+ pkg_spec[name]['changed'] = False
+
+
+# Function used to parse the package name based on packages-specs(7).
+# The general name structure is "stem-version[-flavors]".
+#
+# Names containing "%" are a special variation not part of the
+# packages-specs(7) syntax. See pkg_add(1) on OpenBSD 6.0 or later for a
+# description.
+def parse_package_name(names, pkg_spec, module):
+
+ # Initialize empty list of package_latest() leftovers.
+ pkg_spec['package_latest_leftovers'] = []
+
+ for name in names:
+ module.debug("parse_package_name(): parsing name: %s" % name)
+ # Do some initial matches so we can base the more advanced regex on that.
+ version_match = re.search("-[0-9]", name)
+ versionless_match = re.search("--", name)
+
+ # Stop if someone is giving us a name that both has a version and is
+ # version-less at the same time.
+ if version_match and versionless_match:
+ module.fail_json(msg="package name both has a version and is version-less: " + name)
+
+ # All information for a given name is kept in the pkg_spec keyed by that name.
+ pkg_spec[name] = {}
+
+ # If name includes a version.
+ if version_match:
+ match = re.search("^(?P<stem>[^%]+)-(?P<version>[0-9][^-]*)(?P<flavor_separator>-)?(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = match.group('version')
+ pkg_spec[name]['flavor_separator'] = match.group('flavor_separator')
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'version'
+ module.debug("version_match: stem: %(stem)s, version: %(version)s, flavor_separator: %(flavor_separator)s, "
+ "flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at version_match: " + name)
+
+ # If name includes no version but is version-less ("--").
+ elif versionless_match:
+ match = re.search("^(?P<stem>[^%]+)--(?P<flavor>[a-z].*)?(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = '-'
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = '-'
+ pkg_spec[name]['flavor'] = match.group('flavor')
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'versionless'
+ module.debug("versionless_match: stem: %(stem)s, flavor: %(flavor)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at versionless_match: " + name)
+
+ # If name includes no version, and is not version-less, it is all a
+ # stem, possibly with a branch (%branchname) tacked on at the
+ # end.
+ else:
+ match = re.search("^(?P<stem>[^%]+)(%(?P<branch>.+))?$", name)
+ if match:
+ pkg_spec[name]['stem'] = match.group('stem')
+ pkg_spec[name]['version_separator'] = None
+ pkg_spec[name]['version'] = None
+ pkg_spec[name]['flavor_separator'] = None
+ pkg_spec[name]['flavor'] = None
+ pkg_spec[name]['branch'] = match.group('branch')
+ pkg_spec[name]['style'] = 'stem'
+ module.debug("stem_match: stem: %(stem)s, branch: %(branch)s, style: %(style)s" % pkg_spec[name])
+ else:
+ module.fail_json(msg="unable to parse package name at else: " + name)
+
+ # Verify that the managed host is new enough to support branch syntax.
+ if pkg_spec[name]['branch']:
+ branch_release = "6.0"
+
+ if LooseVersion(platform.release()) < LooseVersion(branch_release):
+ module.fail_json(msg="package name using 'branch' syntax requires at least OpenBSD %s: %s" % (branch_release, name))
+
+ # Sanity check that there are no trailing dashes in flavor.
+ # Try to stop strange stuff early so we can be strict later.
+ if pkg_spec[name]['flavor']:
+ match = re.search("-$", pkg_spec[name]['flavor'])
+ if match:
+ module.fail_json(msg="trailing dash in flavor: " + pkg_spec[name]['flavor'])
+
+
+# Function used for figuring out the port path.
+def get_package_source_path(name, pkg_spec, module):
+ pkg_spec[name]['subpackage'] = None
+ if pkg_spec[name]['stem'] == 'sqlports':
+ return 'databases/sqlports'
+ else:
+ # try for an exact match first
+ sqlports_db_file = '/usr/local/share/sqlports'
+ if not os.path.isfile(sqlports_db_file):
+ module.fail_json(msg="sqlports file '%s' is missing" % sqlports_db_file)
+
+ conn = sqlite3.connect(sqlports_db_file)
+ first_part_of_query = 'SELECT fullpkgpath, fullpkgname FROM ports WHERE fullpkgname'
+ query = first_part_of_query + ' = ?'
+ module.debug("package_package_source_path(): exact query: %s" % query)
+ cursor = conn.execute(query, (name,))
+ results = cursor.fetchall()
+
+ # next, try for a fuzzier match
+ if len(results) < 1:
+ looking_for = pkg_spec[name]['stem'] + (pkg_spec[name]['version_separator'] or '-') + (pkg_spec[name]['version'] or '%')
+ query = first_part_of_query + ' LIKE ?'
+ if pkg_spec[name]['flavor']:
+ looking_for += pkg_spec[name]['flavor_separator'] + pkg_spec[name]['flavor']
+ module.debug("package_package_source_path(): fuzzy flavor query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ elif pkg_spec[name]['style'] == 'versionless':
+ query += ' AND fullpkgname NOT LIKE ?'
+ module.debug("package_package_source_path(): fuzzy versionless query: %s" % query)
+ cursor = conn.execute(query, (looking_for, "%s-%%" % looking_for,))
+ else:
+ module.debug("package_package_source_path(): fuzzy query: %s" % query)
+ cursor = conn.execute(query, (looking_for,))
+ results = cursor.fetchall()
+
+ # error if we don't find exactly 1 match
+ conn.close()
+ if len(results) < 1:
+ module.fail_json(msg="could not find a port by the name '%s'" % name)
+ if len(results) > 1:
+ matches = map(lambda x: x[1], results)
+ module.fail_json(msg="too many matches, unsure which to build: %s" % ' OR '.join(matches))
+
+ # there's exactly 1 match, so figure out the subpackage, if any, then return
+ fullpkgpath = results[0][0]
+ parts = fullpkgpath.split(',')
+ if len(parts) > 1 and parts[1][0] == '-':
+ pkg_spec[name]['subpackage'] = parts[1]
+ return parts[0]
+
+
+# Function used for upgrading all installed packages.
+def upgrade_packages(pkg_spec, module):
+ if module.check_mode:
+ upgrade_cmd = 'pkg_add -Imnu'
+ else:
+ upgrade_cmd = 'pkg_add -Imu'
+
+ if module.params['snapshot']:
+ upgrade_cmd += ' -Dsnap'
+
+ # Create a minimal pkg_spec entry for '*' to store return values.
+ pkg_spec['*'] = {}
+
+ # Attempt to upgrade all packages.
+ pkg_spec['*']['rc'], pkg_spec['*']['stdout'], pkg_spec['*']['stderr'] = execute_command("%s" % upgrade_cmd, module)
+
+ # Try to find any occurrence of a package changing version like:
+ # "bzip2-1.0.6->1.0.6p0: ok".
+ match = re.search(r"\W\w.+->.+: ok\W", pkg_spec['*']['stdout'])
+ if match:
+ pkg_spec['*']['changed'] = True
+
+ else:
+ pkg_spec['*']['changed'] = False
+
+ # It seems we can not trust the return value, so depend on the presence of
+ # stderr to know if something failed.
+ if pkg_spec['*']['stderr']:
+ pkg_spec['*']['rc'] = 1
+ else:
+ pkg_spec['*']['rc'] = 0
+
+
+# ===========================================
+# Main control flow.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ build=dict(type='bool', default=False),
+ snapshot=dict(type='bool', default=False),
+ ports_dir=dict(type='path', default='/usr/ports'),
+ quick=dict(type='bool', default=False),
+ clean=dict(type='bool', default=False),
+ ),
+ mutually_exclusive=[['snapshot', 'build']],
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ build = module.params['build']
+ ports_dir = module.params['ports_dir']
+
+ rc = 0
+ stdout = ''
+ stderr = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+ result['build'] = build
+
+ # The data structure used to keep track of package information.
+ pkg_spec = {}
+
+ if build is True:
+ if not os.path.isdir(ports_dir):
+ module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))
+
+ # build sqlports if its not installed yet
+ parse_package_name(['sqlports'], pkg_spec, module)
+ get_package_state(['sqlports'], pkg_spec, module)
+ if not pkg_spec['sqlports']['installed_state']:
+ module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
+ package_present(['sqlports'], pkg_spec, module)
+
+ asterisk_name = False
+ for n in name:
+ if n == '*':
+ if len(name) != 1:
+ module.fail_json(msg="the package name '*' can not be mixed with other names")
+
+ asterisk_name = True
+
+ if asterisk_name:
+ if state != 'latest':
+ module.fail_json(msg="the package name '*' is only valid when using state=latest")
+ else:
+ # Perform an upgrade of all installed packages.
+ upgrade_packages(pkg_spec, module)
+ else:
+ # Parse package names and put results in the pkg_spec dictionary.
+ parse_package_name(name, pkg_spec, module)
+
+ # Not sure how the branch syntax is supposed to play together
+ # with build mode. Disable it for now.
+ for n in name:
+ if pkg_spec[n]['branch'] and module.params['build'] is True:
+ module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))
+
+ # Get state for all package names.
+ get_package_state(name, pkg_spec, module)
+
+ # Perform requested action.
+ if state in ['installed', 'present']:
+ package_present(name, pkg_spec, module)
+ elif state in ['absent', 'removed']:
+ package_absent(name, pkg_spec, module)
+ elif state == 'latest':
+ package_latest(name, pkg_spec, module)
+
+ # The combined changed status for all requested packages. If anything
+ # is changed this is set to True.
+ combined_changed = False
+
+ # The combined failed status for all requested packages. If anything
+ # failed this is set to True.
+ combined_failed = False
+
+ # We combine all error messages in this comma separated string, for example:
+ # "msg": "Can't find nmapp\n, Can't find nmappp\n"
+ combined_error_message = ''
+
+ # Loop over all requested package names and check if anything failed or
+ # changed.
+ for n in name:
+ if pkg_spec[n]['rc'] != 0:
+ combined_failed = True
+ if pkg_spec[n]['stderr']:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stderr']
+ else:
+ combined_error_message = pkg_spec[n]['stderr']
+ else:
+ if combined_error_message:
+ combined_error_message += ", %s" % pkg_spec[n]['stdout']
+ else:
+ combined_error_message = pkg_spec[n]['stdout']
+
+ if pkg_spec[n]['changed'] is True:
+ combined_changed = True
+
+ # If combined_error_message contains anything at least some part of the
+ # list of requested package names failed.
+ if combined_failed:
+ module.fail_json(msg=combined_error_message, **result)
+
+ result['changed'] = combined_changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/opendj_backendprop.py b/ansible_collections/community/general/plugins/modules/opendj_backendprop.py
new file mode 100644
index 000000000..fed53532d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/opendj_backendprop.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Werner Dijkerman (ikben@werner-dijkerman.nl)
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: opendj_backendprop
+short_description: Will update the backend configuration of OpenDJ via the dsconfig set-backend-prop command
+description:
+ - This module will update settings for OpenDJ with the command set-backend-prop.
+ - It will check first via de get-backend-prop if configuration needs to be applied.
+author:
+ - Werner Dijkerman (@dj-wasabi)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ opendj_bindir:
+ description:
+ - The path to the bin directory of OpenDJ.
+ required: false
+ default: /opt/opendj/bin
+ type: path
+ hostname:
+ description:
+ - The hostname of the OpenDJ server.
+ required: true
+ type: str
+ port:
+ description:
+ - The Admin port on which the OpenDJ instance is available.
+ required: true
+ type: str
+ username:
+ description:
+ - The username to connect to.
+ required: false
+ default: cn=Directory Manager
+ type: str
+ password:
+ description:
+ - The password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ type: str
+ passwordfile:
+ description:
+ - Location to the password file which holds the password for the cn=Directory Manager user.
+ - Either password or passwordfile is needed.
+ required: false
+ type: path
+ backend:
+ description:
+ - The name of the backend on which the property needs to be updated.
+ required: true
+ type: str
+ name:
+ description:
+ - The configuration setting to update.
+ required: true
+ type: str
+ value:
+ description:
+ - The value for the configuration item.
+ required: true
+ type: str
+ state:
+ description:
+ - If configuration needs to be added/updated
+ required: false
+ default: "present"
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Add or update OpenDJ backend properties
+ action: opendj_backendprop
+ hostname=localhost
+ port=4444
+ username="cn=Directory Manager"
+ password=password
+ backend=userRoot
+ name=index-entry-limit
+ value=5000
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class BackendProp(object):
+
+ def __init__(self, module):
+ self._module = module
+
+ def get_property(self, opendj_bindir, hostname, port, username, password_method, backend_name):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'get-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '-n', '-X', '-s'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return stdout
+ else:
+ self._module.fail_json(msg="Error message: " + str(stderr))
+
+ def set_property(self, opendj_bindir, hostname, port, username, password_method, backend_name, name, value):
+ my_command = [
+ opendj_bindir + '/dsconfig',
+ 'set-backend-prop',
+ '-h', hostname,
+ '--port', str(port),
+ '--bindDN', username,
+ '--backend-name', backend_name,
+ '--set', name + ":" + value,
+ '-n', '-X'
+ ] + password_method
+ rc, stdout, stderr = self._module.run_command(my_command)
+ if rc == 0:
+ return True
+ else:
+ self._module.fail_json(msg="Error message: " + stderr)
+
+ def validate_data(self, data=None, name=None, value=None):
+ for config_line in data.split('\n'):
+ if config_line:
+ split_line = config_line.split()
+ if split_line[0] == name:
+ if split_line[1] == value:
+ return True
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ opendj_bindir=dict(default="/opt/opendj/bin", type="path"),
+ hostname=dict(required=True),
+ port=dict(required=True),
+ username=dict(default="cn=Directory Manager", required=False),
+ password=dict(required=False, no_log=True),
+ passwordfile=dict(required=False, type="path"),
+ backend=dict(required=True),
+ name=dict(required=True),
+ value=dict(required=True),
+ state=dict(default="present"),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[['password', 'passwordfile']],
+ required_one_of=[['password', 'passwordfile']]
+ )
+
+ opendj_bindir = module.params['opendj_bindir']
+ hostname = module.params['hostname']
+ port = module.params['port']
+ username = module.params['username']
+ password = module.params['password']
+ passwordfile = module.params['passwordfile']
+ backend_name = module.params['backend']
+ name = module.params['name']
+ value = module.params['value']
+ state = module.params['state']
+
+ if module.params["password"] is not None:
+ password_method = ['-w', password]
+ elif module.params["passwordfile"] is not None:
+ password_method = ['-j', passwordfile]
+
+ opendj = BackendProp(module)
+ validate = opendj.get_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name)
+
+ if validate:
+ if not opendj.validate_data(data=validate, name=name, value=value):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ if opendj.set_property(opendj_bindir=opendj_bindir,
+ hostname=hostname,
+ port=port,
+ username=username,
+ password_method=password_method,
+ backend_name=backend_name,
+ name=name,
+ value=value):
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/openwrt_init.py b/ansible_collections/community/general/plugins/modules/openwrt_init.py
new file mode 100644
index 000000000..a0e156b33
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/openwrt_init.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Andrew Gaffney <andrew@agaffney.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: openwrt_init
+author:
+ - "Andrew Gaffney (@agaffney)"
+short_description: Manage services on OpenWrt
+description:
+ - Controls OpenWrt services on remote hosts.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name of the service.
+ required: true
+ aliases: ['service']
+ state:
+ type: str
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
+ C(restarted) will always bounce the service. C(reloaded) will always reload.
+ choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
+ enabled:
+ description:
+ - Whether the service should start on boot. B(At least one of state and enabled are required.)
+ type: bool
+ pattern:
+ type: str
+ description:
+ - If the service does not respond to the 'running' command, name a
+ substring to look for as would be found in the output of the I(ps)
+ command as a stand-in for a 'running' result. If the string is found,
+ the service will be assumed to be running.
+notes:
+ - One option other than name is required.
+requirements:
+ - An OpenWrt system (with python)
+'''
+
+EXAMPLES = '''
+- name: Start service httpd, if not running
+ community.general.openwrt_init:
+ state: started
+ name: httpd
+
+- name: Stop service cron, if running
+ community.general.openwrt_init:
+ name: cron
+ state: stopped
+
+- name: Reload service httpd, in all cases
+ community.general.openwrt_init:
+ name: httpd
+ state: reloaded
+
+- name: Enable service httpd
+ community.general.openwrt_init:
+ name: httpd
+ enabled: true
+'''
+
+RETURN = '''
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+module = None
+init_script = None
+
+
+# ===============================
+# Check if service is enabled
+def is_enabled():
+ rc, dummy, dummy = module.run_command([init_script, 'enabled'])
+ return rc == 0
+
+
+# ===========================================
+# Main control flow
+def main():
+ global module, init_script
+ # init
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, type='str', aliases=['service']),
+ state=dict(type='str', choices=['started', 'stopped', 'restarted', 'reloaded']),
+ enabled=dict(type='bool'),
+ pattern=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[('state', 'enabled')],
+ )
+
+ # initialize
+ service = module.params['name']
+ init_script = '/etc/init.d/' + service
+ result = {
+ 'name': service,
+ 'changed': False,
+ }
+ # check if service exists
+ if not os.path.exists(init_script):
+ module.fail_json(msg='service %s does not exist' % service)
+
+ # Enable/disable service startup at boot if requested
+ if module.params['enabled'] is not None:
+ # do we need to enable the service?
+ enabled = is_enabled()
+
+ # default to current state
+ result['enabled'] = enabled
+
+ # Change enable/disable if needed
+ if enabled != module.params['enabled']:
+ result['changed'] = True
+ action = 'enable' if module.params['enabled'] else 'disable'
+
+ if not module.check_mode:
+ rc, dummy, err = module.run_command([init_script, action])
+ # openwrt init scripts can return a non-zero exit code on a successful 'enable'
+ # command if the init script doesn't contain a STOP value, so we ignore the exit
+ # code and explicitly check if the service is now in the desired state
+ if is_enabled() != module.params['enabled']:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ result['enabled'] = not enabled
+
+ if module.params['state'] is not None:
+ running = False
+
+ # check if service is currently running
+ if module.params['pattern']:
+ # Find ps binary
+ psbin = module.get_bin_path('ps', True)
+
+ # this should be busybox ps, so we only want/need to the 'w' option
+ rc, psout, dummy = module.run_command([psbin, 'w'])
+ # If rc is 0, set running as appropriate
+ if rc == 0:
+ lines = psout.split("\n")
+ running = any((module.params['pattern'] in line and "pattern=" not in line) for line in lines)
+ else:
+ rc, dummy, dummy = module.run_command([init_script, 'running'])
+ if rc == 0:
+ running = True
+
+ # default to desired state
+ result['state'] = module.params['state']
+
+ # determine action, if any
+ action = None
+ if module.params['state'] == 'started':
+ if not running:
+ action = 'start'
+ result['changed'] = True
+ elif module.params['state'] == 'stopped':
+ if running:
+ action = 'stop'
+ result['changed'] = True
+ else:
+ action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
+ result['state'] = 'started'
+ result['changed'] = True
+
+ if action:
+ if not module.check_mode:
+ rc, dummy, err = module.run_command([init_script, action])
+ if rc != 0:
+ module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/opkg.py b/ansible_collections/community/general/plugins/modules/opkg.py
new file mode 100644
index 000000000..d2ac314d0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/opkg.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Patrick Pelletier <pp.pelletier@gmail.com>
+# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: opkg
+author: "Patrick Pelletier (@skinp)"
+short_description: Package manager for OpenWrt and Openembedded/Yocto based Linux distributions
+description:
+ - Manages ipk packages for OpenWrt and Openembedded/Yocto based Linux distributions
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of package(s) to install/remove.
+ - C(NAME=VERSION) syntax is also supported to install a package
+ in a certain version. See the examples. This only works on Yocto based
+ Linux distributions (opkg>=0.3.2) and not for OpenWrt. This is
+ supported since community.general 6.2.0.
+ aliases: [pkg]
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ choices: [ 'present', 'absent', 'installed', 'removed' ]
+ default: present
+ type: str
+ force:
+ description:
+ - The C(opkg --force) parameter used.
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
+ default: ""
+ type: str
+ update_cache:
+ description:
+ - Update the package DB first.
+ default: false
+ type: bool
+requirements:
+ - opkg
+ - python
+'''
+EXAMPLES = '''
+- name: Install foo
+ community.general.opkg:
+ name: foo
+ state: present
+
+- name: Install foo in version 1.2 (opkg>=0.3.2 on Yocto based Linux distributions)
+ community.general.opkg:
+ name: foo=1.2
+ state: present
+
+- name: Update cache and install foo
+ community.general.opkg:
+ name: foo
+ state: present
+ update_cache: true
+
+- name: Remove foo
+ community.general.opkg:
+ name: foo
+ state: absent
+
+- name: Remove foo and bar
+ community.general.opkg:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Install foo using overwrite option forcibly
+ community.general.opkg:
+ name: foo
+ state: present
+ force: overwrite
+'''
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+
+
+class Opkg(StateModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True, type="list", elements="str"),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
+ "checksum", "removal-of-dependent-packages"]),
+ update_cache=dict(default=False, type='bool'),
+ ),
+ )
+
+ def __init_module__(self):
+ self.vars.set("install_c", 0, output=False, change=True)
+ self.vars.set("remove_c", 0, output=False, change=True)
+
+ state_map = dict(
+ query="list-installed",
+ present="install",
+ installed="install",
+ absent="remove",
+ removed="remove",
+ )
+
+ def _force(value):
+ if value == "":
+ value = None
+ return cmd_runner_fmt.as_optval("--force-")(value, ctx_ignore_none=True)
+
+ self.runner = CmdRunner(
+ self.module,
+ command="opkg",
+ arg_formats=dict(
+ package=cmd_runner_fmt.as_list(),
+ state=cmd_runner_fmt.as_map(state_map),
+ force=cmd_runner_fmt.as_func(_force),
+ update_cache=cmd_runner_fmt.as_bool("update")
+ ),
+ )
+
+ if self.vars.update_cache:
+ rc, dummy, dummy = self.runner("update_cache").run()
+ if rc != 0:
+ self.do_raise("could not update package db")
+
+ @staticmethod
+ def split_name_and_version(package):
+ """ Split the name and the version when using the NAME=VERSION syntax """
+ splitted = package.split('=', 1)
+ if len(splitted) == 1:
+ return splitted[0], None
+ else:
+ return splitted[0], splitted[1]
+
+ def _package_in_desired_state(self, name, want_installed, version=None):
+ dummy, out, dummy = self.runner("state package").run(state="query", package=name)
+
+ has_package = out.startswith(name + " - %s" % ("" if not version else (version + " ")))
+ return want_installed == has_package
+
+ def state_present(self):
+ with self.runner("state force package") as ctx:
+ for package in self.vars.name:
+ pkg_name, pkg_version = self.split_name_and_version(package)
+ if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version) or self.vars.force == "reinstall":
+ ctx.run(package=package)
+ if not self._package_in_desired_state(pkg_name, want_installed=True, version=pkg_version):
+ self.do_raise("failed to install %s" % package)
+ self.vars.install_c += 1
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+ if self.vars.install_c > 0:
+ self.vars.msg = "installed %s package(s)" % (self.vars.install_c)
+ else:
+ self.vars.msg = "package(s) already present"
+
+ def state_absent(self):
+ with self.runner("state force package") as ctx:
+ for package in self.vars.name:
+ package, dummy = self.split_name_and_version(package)
+ if not self._package_in_desired_state(package, want_installed=False):
+ ctx.run(package=package)
+ if not self._package_in_desired_state(package, want_installed=False):
+ self.do_raise("failed to remove %s" % package)
+ self.vars.remove_c += 1
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+ if self.vars.remove_c > 0:
+ self.vars.msg = "removed %s package(s)" % (self.vars.remove_c)
+ else:
+ self.vars.msg = "package(s) already absent"
+
+ state_installed = state_present
+ state_removed = state_absent
+
+
+def main():
+ Opkg.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/osx_defaults.py b/ansible_collections/community/general/plugins/modules/osx_defaults.py
new file mode 100644
index 000000000..161584373
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/osx_defaults.py
@@ -0,0 +1,409 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, GeekChimp - Franck Nijhof <franck@geekchimp.com> (DO NOT CONTACT!)
+# Copyright (c) 2019, Ansible project
+# Copyright (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: osx_defaults
+author:
+# DO NOT RE-ADD GITHUB HANDLE!
+- Franck Nijhof (!UNKNOWN)
+short_description: Manage macOS user defaults
+description:
+ - osx_defaults allows users to read, write, and delete macOS user defaults from Ansible scripts.
+ - macOS applications and other programs use the defaults system to record user preferences and other
+ information that must be maintained when the applications are not running (such as default font for new
+ documents, or the position of an Info panel).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ domain:
+ description:
+ - The domain is a domain name of the form C(com.companyname.appname).
+ type: str
+ default: NSGlobalDomain
+ host:
+ description:
+ - The host on which the preference should apply.
+ - The special value C(currentHost) corresponds to the C(-currentHost) switch of the defaults commandline tool.
+ type: str
+ key:
+ description:
+ - The key of the user preference.
+ type: str
+ type:
+ description:
+ - The type of value to write.
+ type: str
+ choices: [ array, bool, boolean, date, float, int, integer, string ]
+ default: string
+ array_add:
+ description:
+ - Add new elements to the array for a key which has an array as its value.
+ type: bool
+ default: false
+ value:
+ description:
+ - The value to write.
+ - Only required when I(state=present).
+ type: raw
+ state:
+ description:
+ - The state of the user defaults.
+ - If set to C(list) will query the given parameter specified by C(key). Returns 'null' is nothing found or mis-spelled.
+ - C(list) added in version 2.8.
+ type: str
+ choices: [ absent, list, present ]
+ default: present
+ path:
+ description:
+ - The path in which to search for C(defaults).
+ type: str
+ default: /usr/bin:/usr/local/bin
+notes:
+ - Apple Mac caches defaults. You may need to logout and login to apply the changes.
+'''
+
+EXAMPLES = r'''
+- name: Set boolean valued key for application domain
+ community.general.osx_defaults:
+ domain: com.apple.Safari
+ key: IncludeInternalDebugMenu
+ type: bool
+ value: true
+ state: present
+
+- name: Set string valued key for global domain
+ community.general.osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+ state: present
+
+- name: Set int valued key for arbitrary plist
+ community.general.osx_defaults:
+ domain: /Library/Preferences/com.apple.SoftwareUpdate
+ key: AutomaticCheckEnabled
+ type: int
+ value: 1
+ become: true
+
+- name: Set int valued key only for the current host
+ community.general.osx_defaults:
+ domain: com.apple.screensaver
+ host: currentHost
+ key: showClock
+ type: int
+ value: 1
+
+- name: Defaults to global domain and setting value
+ community.general.osx_defaults:
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeters
+
+- name: Setting an array valued key
+ community.general.osx_defaults:
+ key: AppleLanguages
+ type: array
+ value:
+ - en
+ - nl
+
+- name: Removing a key
+ community.general.osx_defaults:
+ domain: com.geekchimp.macable
+ key: ExampleKeyToRemove
+ state: absent
+'''
+
+from datetime import datetime
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import binary_type, text_type
+
+
+# exceptions --------------------------------------------------------------- {{{
+class OSXDefaultsException(Exception):
+ def __init__(self, msg):
+ self.message = msg
+
+
+# /exceptions -------------------------------------------------------------- }}}
+
+# class MacDefaults -------------------------------------------------------- {{{
+class OSXDefaults(object):
+ """ Class to manage Mac OS user defaults """
+
+ # init ---------------------------------------------------------------- {{{
+ def __init__(self, module):
+ """ Initialize this module. Finds 'defaults' executable and preps the parameters """
+ # Initial var for storing current defaults value
+ self.current_value = None
+ self.module = module
+ self.domain = module.params['domain']
+ self.host = module.params['host']
+ self.key = module.params['key']
+ self.type = module.params['type']
+ self.array_add = module.params['array_add']
+ self.value = module.params['value']
+ self.state = module.params['state']
+ self.path = module.params['path']
+
+ # Try to find the defaults executable
+ self.executable = self.module.get_bin_path(
+ 'defaults',
+ required=False,
+ opt_dirs=self.path.split(':'),
+ )
+
+ if not self.executable:
+ raise OSXDefaultsException("Unable to locate defaults executable.")
+
+ # Ensure the value is the correct type
+ if self.state != 'absent':
+ self.value = self._convert_type(self.type, self.value)
+
+ # /init --------------------------------------------------------------- }}}
+
+ # tools --------------------------------------------------------------- {{{
+ @staticmethod
+ def is_int(value):
+ as_str = str(value)
+ if (as_str.startswith("-")):
+ return as_str[1:].isdigit()
+ else:
+ return as_str.isdigit()
+
+ @staticmethod
+ def _convert_type(data_type, value):
+ """ Converts value to given type """
+ if data_type == "string":
+ return str(value)
+ elif data_type in ["bool", "boolean"]:
+ if isinstance(value, (binary_type, text_type)):
+ value = value.lower()
+ if value in [True, 1, "true", "1", "yes"]:
+ return True
+ elif value in [False, 0, "false", "0", "no"]:
+ return False
+ raise OSXDefaultsException("Invalid boolean value: {0}".format(repr(value)))
+ elif data_type == "date":
+ try:
+ return datetime.strptime(value.split("+")[0].strip(), "%Y-%m-%d %H:%M:%S")
+ except ValueError:
+ raise OSXDefaultsException(
+ "Invalid date value: {0}. Required format yyy-mm-dd hh:mm:ss.".format(repr(value))
+ )
+ elif data_type in ["int", "integer"]:
+ if not OSXDefaults.is_int(value):
+ raise OSXDefaultsException("Invalid integer value: {0}".format(repr(value)))
+ return int(value)
+ elif data_type == "float":
+ try:
+ value = float(value)
+ except ValueError:
+ raise OSXDefaultsException("Invalid float value: {0}".format(repr(value)))
+ return value
+ elif data_type == "array":
+ if not isinstance(value, list):
+ raise OSXDefaultsException("Invalid value. Expected value to be an array")
+ return value
+
+ raise OSXDefaultsException('Type is not supported: {0}'.format(data_type))
+
+ def _host_args(self):
+ """ Returns a normalized list of commandline arguments based on the "host" attribute """
+ if self.host is None:
+ return []
+ elif self.host == 'currentHost':
+ return ['-currentHost']
+ else:
+ return ['-host', self.host]
+
+ def _base_command(self):
+ """ Returns a list containing the "defaults" executable and any common base arguments """
+ return [self.executable] + self._host_args()
+
+ @staticmethod
+ def _convert_defaults_str_to_list(value):
+ """ Converts array output from defaults to an list """
+ # Split output of defaults. Every line contains a value
+ value = value.splitlines()
+
+ # Remove first and last item, those are not actual values
+ value.pop(0)
+ value.pop(-1)
+
+ # Remove spaces at beginning and comma (,) at the end, unquote and unescape double quotes
+ value = [re.sub('^ *"?|"?,? *$', '', x.replace('\\"', '"')) for x in value]
+
+ return value
+
+ # /tools -------------------------------------------------------------- }}}
+
+ # commands ------------------------------------------------------------ {{{
+ def read(self):
+ """ Reads value of this domain & key from defaults """
+ # First try to find out the type
+ rc, out, err = self.module.run_command(self._base_command() + ["read-type", self.domain, self.key])
+
+ # If RC is 1, the key does not exist
+ if rc == 1:
+ return None
+
+ # If the RC is not 0, then terrible happened! Ooooh nooo!
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key type from defaults: %s" % err)
+
+ # Ok, lets parse the type from output
+ data_type = out.strip().replace('Type is ', '')
+
+ # Now get the current value
+ rc, out, err = self.module.run_command(self._base_command() + ["read", self.domain, self.key])
+
+ # Strip output
+ out = out.strip()
+
+ # A non zero RC at this point is kinda strange...
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while reading key value from defaults: %s" % err)
+
+ # Convert string to list when type is array
+ if data_type == "array":
+ out = self._convert_defaults_str_to_list(out)
+
+ # Store the current_value
+ self.current_value = self._convert_type(data_type, out)
+
+ def write(self):
+ """ Writes value to this domain & key to defaults """
+ # We need to convert some values so the defaults commandline understands it
+ if isinstance(self.value, bool):
+ if self.value:
+ value = "TRUE"
+ else:
+ value = "FALSE"
+ elif isinstance(self.value, (int, float)):
+ value = str(self.value)
+ elif self.array_add and self.current_value is not None:
+ value = list(set(self.value) - set(self.current_value))
+ elif isinstance(self.value, datetime):
+ value = self.value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ value = self.value
+
+ # When the type is array and array_add is enabled, morph the type :)
+ if self.type == "array" and self.array_add:
+ self.type = "array-add"
+
+ # All values should be a list, for easy passing it to the command
+ if not isinstance(value, list):
+ value = [value]
+
+ rc, out, err = self.module.run_command(self._base_command() + ['write', self.domain, self.key, '-' + self.type] + value,
+ expand_user_and_vars=False)
+
+ if rc != 0:
+ raise OSXDefaultsException('An error occurred while writing value to defaults: %s' % err)
+
+ def delete(self):
+ """ Deletes defaults key from domain """
+ rc, out, err = self.module.run_command(self._base_command() + ['delete', self.domain, self.key])
+ if rc != 0:
+ raise OSXDefaultsException("An error occurred while deleting key from defaults: %s" % err)
+
+ # /commands ----------------------------------------------------------- }}}
+
+ # run ----------------------------------------------------------------- {{{
+ """ Does the magic! :) """
+
+ def run(self):
+
+ # Get the current value from defaults
+ self.read()
+
+ if self.state == 'list':
+ self.module.exit_json(key=self.key, value=self.current_value)
+
+ # Handle absent state
+ if self.state == "absent":
+ if self.current_value is None:
+ return False
+ if self.module.check_mode:
+ return True
+ self.delete()
+ return True
+
+ # There is a type mismatch! Given type does not match the type in defaults
+ value_type = type(self.value)
+ if self.current_value is not None and not isinstance(self.current_value, value_type):
+ raise OSXDefaultsException("Type mismatch. Type in defaults: %s" % type(self.current_value).__name__)
+
+ # Current value matches the given value. Nothing need to be done. Arrays need extra care
+ if self.type == "array" and self.current_value is not None and not self.array_add and \
+ set(self.current_value) == set(self.value):
+ return False
+ elif self.type == "array" and self.current_value is not None and self.array_add and len(list(set(self.value) - set(self.current_value))) == 0:
+ return False
+ elif self.current_value == self.value:
+ return False
+
+ if self.module.check_mode:
+ return True
+
+ # Change/Create/Set given key/value for domain in defaults
+ self.write()
+ return True
+
+ # /run ---------------------------------------------------------------- }}}
+
+
+# /class MacDefaults ------------------------------------------------------ }}}
+
+
+# main -------------------------------------------------------------------- {{{
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', default='NSGlobalDomain'),
+ host=dict(type='str'),
+ key=dict(type='str', no_log=False),
+ type=dict(type='str', default='string', choices=['array', 'bool', 'boolean', 'date', 'float', 'int', 'integer', 'string']),
+ array_add=dict(type='bool', default=False),
+ value=dict(type='raw'),
+ state=dict(type='str', default='present', choices=['absent', 'list', 'present']),
+ path=dict(type='str', default='/usr/bin:/usr/local/bin'),
+ ),
+ supports_check_mode=True,
+ required_if=(
+ ('state', 'present', ['value']),
+ ),
+ )
+
+ try:
+ defaults = OSXDefaults(module=module)
+ module.exit_json(changed=defaults.run())
+ except OSXDefaultsException as e:
+ module.fail_json(msg=e.message)
+
+
+# /main ------------------------------------------------------------------- }}}
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
new file mode 100644
index 000000000..cd3639a4c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ovh_ip_failover.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_failover
+short_description: Manage OVH IP failover address
+description:
+ - Manage OVH (French European hosting provider) IP Failover Address. For now, this module can only be used to move
+ an ip failover (or failover block) between services
+author: "Pascal HERAUD (@pascalheraud)"
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh >= 0.4.8
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ required: true
+ description:
+ - The IP address to manage (can be a single IP like 1.1.1.1
+ or a block like 1.1.1.1/28 )
+ type: str
+ service:
+ required: true
+ description:
+ - The name of the OVH service this IP address should be routed
+ type: str
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ wait_completion:
+ required: false
+ default: true
+ type: bool
+ description:
+ - If true, the module will wait for the IP address to be moved.
+ If false, exit without waiting. The taskId will be returned
+ in module output
+ wait_task_completion:
+ required: false
+ default: 0
+ description:
+ - If not 0, the module will wait for this task id to be
+ completed. Use wait_task_completion if you want to wait for
+ completion of a previously executed task with
+ wait_completion=false. You can execute this module repeatedly on
+ a list of failover IPs using wait_completion=false (see examples)
+ type: int
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ required: false
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+ type: int
+
+'''
+
+EXAMPLES = '''
+# Route an IP address 1.1.1.1 to the service ns666.ovh.net
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_completion: false
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+ register: moved
+- community.general.ovh_ip_failover:
+ name: 1.1.1.1
+ service: ns666.ovh.net
+ endpoint: ovh-eu
+ wait_task_completion: "{{moved.taskId}}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote_plus
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while client.get('/ip/{0}/task'.format(quote_plus(name)),
+ function='genericMoveFloatingIp',
+ status='todo'):
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def waitForTaskDone(client, name, taskId, timeout):
+ currentTimeout = timeout
+ while True:
+ task = client.get('/ip/{0}/task/{1}'.format(quote_plus(name), taskId))
+ if task['status'] == 'done':
+ return True
+ time.sleep(5) # Delay for 5 sec because it's long to wait completion, do not harass the API
+ currentTimeout -= 5
+ if currentTimeout < 0:
+ return False
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ service=dict(required=True),
+ endpoint=dict(required=True),
+ wait_completion=dict(default=True, type='bool'),
+ wait_task_completion=dict(default=0, type='int'),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ result = dict(
+ changed=False
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ service = module.params.get('service')
+ timeout = module.params.get('timeout')
+ wait_completion = module.params.get('wait_completion')
+ wait_task_completion = module.params.get('wait_task_completion')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ ips = client.get('/ip', ip=name, type='failover')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of ips, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in ips and '{0}/32'.format(name) not in ips:
+ module.fail_json(msg='IP {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ ipproperties = client.get('/ip/{0}'.format(quote_plus(name)))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the properties '
+ 'of the ip, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if ipproperties['routedTo']['serviceName'] != service:
+ if not module.check_mode:
+ if wait_task_completion == 0:
+ # Move the IP and get the created taskId
+ task = client.post('/ip/{0}/move'.format(quote_plus(name)), to=service)
+ taskId = task['taskId']
+ result['moved'] = True
+ else:
+ # Just wait for the given taskId to be completed
+ taskId = wait_task_completion
+ result['moved'] = False
+ result['taskId'] = taskId
+ if wait_completion or wait_task_completion != 0:
+ if not waitForTaskDone(client, name, taskId, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of move ip to service'.format(timeout))
+ result['waited'] = True
+ else:
+ result['waited'] = False
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py b/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 000000000..f70b5804a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,320 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+author: Pascal Heraud (@pascalheraud)
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consumer
+ key as described into U(https://docs.ovh.com/gb/en/customer/first-steps-with-ovh-api/)
+requirements:
+ - ovh > 0.3.5
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ type: str
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ type: str
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines whether the backend is to be created/modified
+ or deleted
+ type: str
+ probe:
+ default: 'none'
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ type: str
+ weight:
+ default: 8
+ description:
+ - Determines the weight for this backend
+ type: int
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ type: str
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ type: str
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ type: str
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ type: str
+ timeout:
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed.
+ type: int
+
+'''
+
+EXAMPLES = '''
+- name: Adds or modify the backend '212.1.1.1' to a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+- name: Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+ ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = module.params.get('weight')
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the probe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
new file mode 100644
index 000000000..43d64e618
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ovh_monthly_billing.py
@@ -0,0 +1,163 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Francois Lallart (@fraff)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: ovh_monthly_billing
+author: Francois Lallart (@fraff)
+version_added: '0.2.0'
+short_description: Manage OVH monthly billing
+description:
+ - Enable monthly billing on OVH cloud intances (be aware OVH does not allow to disable it).
+requirements: [ "ovh" ]
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ project_id:
+ required: true
+ type: str
+ description:
+ - ID of the project, get it with U(https://api.ovh.com/console/#/cloud/project#GET)
+ instance_id:
+ required: true
+ type: str
+ description:
+ - ID of the instance, get it with U(https://api.ovh.com/console/#/cloud/project/%7BserviceName%7D/instance#GET)
+ endpoint:
+ type: str
+ description:
+ - The endpoint to use (for instance ovh-eu)
+ application_key:
+ type: str
+ description:
+ - The applicationKey to use
+ application_secret:
+ type: str
+ description:
+ - The application secret to use
+ consumer_key:
+ type: str
+ description:
+ - The consumer key to use
+'''
+
+EXAMPLES = '''
+- name: Basic usage, using auth from /etc/ovh.conf
+ community.general.ovh_monthly_billing:
+ project_id: 0c727a20aa144485b70c44dee9123b46
+ instance_id: 8fa89ad2-8f08-4220-9fa4-9695ea23e948
+
+# Get openstack cloud ID and instance ID, OVH use them in its API
+- name: Get openstack cloud ID and instance ID
+ os_server_info:
+ cloud: myProjectName
+ region_name: myRegionName
+ server: myServerName
+ register: openstack_servers
+
+- name: Use IDs
+ community.general.ovh_monthly_billing:
+ project_id: "{{ openstack_servers.0.tenant_id }}"
+ instance_id: "{{ openstack_servers.0.id }}"
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import traceback
+
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+ OVH_IMPORT_ERROR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_id=dict(required=True),
+ instance_id=dict(required=True),
+ endpoint=dict(required=False),
+ application_key=dict(required=False, no_log=True),
+ application_secret=dict(required=False, no_log=True),
+ consumer_key=dict(required=False, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ # Get parameters
+ project_id = module.params.get('project_id')
+ instance_id = module.params.get('instance_id')
+ endpoint = module.params.get('endpoint')
+ application_key = module.params.get('application_key')
+ application_secret = module.params.get('application_secret')
+ consumer_key = module.params.get('consumer_key')
+ project = ""
+ instance = ""
+ ovh_billing_status = ""
+
+ if not HAS_OVH:
+ module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
+
+ # Connect to OVH API
+ client = ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+ # Check that the instance exists
+ try:
+ project = client.get('/cloud/project/{0}'.format(project_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='project {0} does not exist'.format(project_id))
+
+ # Check that the instance exists
+ try:
+ instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
+ except ovh.exceptions.ResourceNotFoundError:
+ module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
+
+ # Is monthlyBilling already enabled or pending ?
+ if instance['monthlyBilling'] is not None:
+ if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
+ module.exit_json(changed=False, ovh_billing_status=instance['monthlyBilling'])
+
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Dry Run!")
+
+ try:
+ ovh_billing_status = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
+ module.exit_json(changed=True, ovh_billing_status=ovh_billing_status['monthlyBilling'])
+ except APIError as apiError:
+ module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
+
+ # We should never reach here
+ module.fail_json(msg='Internal ovh_monthly_billing module error')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
new file mode 100644
index 000000000..47b827908
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pacemaker_cluster.py
@@ -0,0 +1,230 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacemaker_cluster
+short_description: Manage pacemaker clusters
+author:
+ - Mathieu Bultel (@matbu)
+description:
+ - This module can manage a pacemaker cluster and nodes from Ansible using
+ the pacemaker cli.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicate desired state of the cluster
+ choices: [ cleanup, offline, online, restart ]
+ type: str
+ node:
+ description:
+ - Specify which node of the cluster you want to manage. None == the
+ cluster status itself, 'all' == check the status of all nodes.
+ type: str
+ timeout:
+ description:
+ - Timeout when the module should considered that the action has failed
+ default: 300
+ type: int
+ force:
+ description:
+ - Force the change of the cluster state
+ type: bool
+ default: true
+'''
+EXAMPLES = '''
+---
+- name: Set cluster Online
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Get cluster state
+ community.general.pacemaker_cluster:
+ state: online
+'''
+
+RETURN = '''
+changed:
+ description: true if the cluster state has changed
+ type: bool
+ returned: always
+out:
+ description: The output of the current state of the cluster. It return a
+ list of the nodes state.
+ type: str
+ sample: 'out: [[" overcloud-controller-0", " Online"]]}'
+ returned: always
+rc:
+ description: exit code of the module
+ type: bool
+ returned: always
+'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+_PCS_CLUSTER_DOWN = "Error: cluster is not currently running on this node"
+
+
+def get_cluster_status(module):
+ cmd = "pcs cluster status"
+ rc, out, err = module.run_command(cmd)
+ if out in _PCS_CLUSTER_DOWN:
+ return 'offline'
+ else:
+ return 'online'
+
+
+def get_node_status(module, node='all'):
+ if node == 'all':
+ cmd = "pcs cluster pcsd-status %s" % node
+ else:
+ cmd = "pcs cluster pcsd-status"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+ status = []
+ for o in out.splitlines():
+ status.append(o.split(':'))
+ return status
+
+
+def clean_cluster(module, timeout):
+ cmd = "pcs resource cleanup"
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+
+def set_cluster(module, state, timeout, force):
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def set_node(module, state, timeout, force, node='all'):
+ # map states
+ if state == 'online':
+ cmd = "pcs cluster start"
+ if state == 'offline':
+ cmd = "pcs cluster stop"
+ if force:
+ cmd = "%s --force" % cmd
+
+ nodes_state = get_node_status(module, node)
+ for node in nodes_state:
+ if node[1].strip().lower() != state:
+ cmd = "%s %s" % (cmd, node[0].strip())
+ rc, out, err = module.run_command(cmd)
+ if rc == 1:
+ module.fail_json(msg="Command execution failed.\nCommand: `%s`\nError: %s" % (cmd, err))
+
+ t = time.time()
+ ready = False
+ while time.time() < t + timeout:
+ nodes_state = get_node_status(module)
+ for node in nodes_state:
+ if node[1].strip().lower() == state:
+ ready = True
+ break
+ if not ready:
+ module.fail_json(msg="Failed to set the state `%s` on the cluster\n" % (state))
+
+
+def main():
+ argument_spec = dict(
+ state=dict(type='str', choices=['online', 'offline', 'restart', 'cleanup']),
+ node=dict(type='str'),
+ timeout=dict(type='int', default=300),
+ force=dict(type='bool', default=True),
+ )
+
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ )
+ changed = False
+ state = module.params['state']
+ node = module.params['node']
+ force = module.params['force']
+ timeout = module.params['timeout']
+
+ if state in ['online', 'offline']:
+ # Get cluster status
+ if node is None:
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == state:
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Fail to bring the cluster %s" % state)
+ else:
+ cluster_state = get_node_status(module, node)
+ # Check cluster state
+ for node_state in cluster_state:
+ if node_state[1].strip().lower() == state:
+ module.exit_json(changed=changed, out=cluster_state)
+ else:
+ # Set cluster status if needed
+ set_cluster(module, state, timeout, force)
+ cluster_state = get_node_status(module, node)
+ module.exit_json(changed=True, out=cluster_state)
+
+ if state in ['restart']:
+ set_cluster(module, 'offline', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'offline':
+ set_cluster(module, 'online', timeout, force)
+ cluster_state = get_cluster_status(module)
+ if cluster_state == 'online':
+ module.exit_json(changed=True, out=cluster_state)
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be started")
+ else:
+ module.fail_json(msg="Failed during the restart of the cluster, the cluster can't be stopped")
+
+ if state in ['cleanup']:
+ clean_cluster(module, timeout)
+ cluster_state = get_cluster_status(module)
+ module.exit_json(changed=True,
+ out=cluster_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/packet_device.py b/ansible_collections/community/general/plugins/modules/packet_device.py
new file mode 100644
index 000000000..d220c5f8f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/packet_device.py
@@ -0,0 +1,682 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2016, Tomas Karasek <tom.to.the.k@gmail.com>
+# Copyright (c) 2016, Matt Baldwin <baldwin@stackpointcloud.com>
+# Copyright (c) 2016, Thibaud Morel l'Horset <teebes@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_device
+
+short_description: Manage a bare metal server in the Packet Host
+
+description:
+ - Manage a bare metal server in the Packet Host (a "device" in the API terms).
+ - When the machine is created it can optionally wait for public IP address, or for active state.
+ - This module has a dependency on packet >= 1.0.
+ - API is documented at U(https://www.packet.net/developers/api/devices).
+
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+ - Thibaud Morel l'Horset (@teebes) <teebes@gmail.com>
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ count:
+ description:
+ - The number of devices to create. Count number can be included in hostname via the %d string formatter.
+ default: 1
+ type: int
+
+ count_offset:
+ description:
+ - From which number to start the count.
+ default: 1
+ type: int
+
+ device_ids:
+ description:
+ - List of device IDs on which to operate.
+ type: list
+ elements: str
+
+ tags:
+ description:
+ - List of device tags.
+ - Currently implemented only for device creation.
+ type: list
+ elements: str
+ version_added: '0.2.0'
+
+ facility:
+ description:
+ - Facility slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/facilities/).
+ type: str
+
+ features:
+ description:
+ - Dict with "features" for device creation. See Packet API docs for details.
+ type: dict
+
+ hostnames:
+ description:
+ - A hostname of a device, or a list of hostnames.
+ - If given string or one-item list, you can use the C("%d") Python string format to expand numbers from I(count).
+ - If only one hostname, it might be expanded to list if I(count)>1.
+ aliases: [name]
+ type: list
+ elements: str
+
+ locked:
+ description:
+ - Whether to lock a created device.
+ default: false
+ aliases: [lock]
+ type: bool
+
+ operating_system:
+ description:
+ - OS slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/operatingsystems/).
+ type: str
+
+ plan:
+ description:
+ - Plan slug for device creation. See Packet API for current list - U(https://www.packet.net/developers/api/plans/).
+ type: str
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+ type: str
+
+ state:
+ description:
+ - Desired state of the device.
+ - If set to C(present) (the default), the module call will return immediately after the device-creating HTTP request successfully returns.
+ - If set to C(active), the module call will block until all the specified devices are in state active due to the Packet API, or until I(wait_timeout).
+ choices: [present, absent, active, inactive, rebooted]
+ default: present
+ type: str
+
+ user_data:
+ description:
+ - Userdata blob made available to the machine
+ type: str
+
+ wait_for_public_IPv:
+ description:
+ - Whether to wait for the instance to be assigned a public IPv4/IPv6 address.
+ - If set to 4, it will wait until IPv4 is assigned to the instance.
+ - If set to 6, wait until public IPv6 is assigned to the instance.
+ choices: [4,6]
+ type: int
+
+ wait_timeout:
+ description:
+ - How long (seconds) to wait either for automatic IP address assignment, or for the device to reach the C(active) I(state).
+ - If I(wait_for_public_IPv) is set and I(state) is C(active), the module will wait for both events consequently, applying the timeout twice.
+ default: 900
+ type: int
+
+ ipxe_script_url:
+ description:
+ - URL of custom iPXE script for provisioning.
+ - More about custom iPXE for Packet devices at U(https://help.packet.net/technical/infrastructure/custom-ipxe).
+ type: str
+ default: ''
+
+ always_pxe:
+ description:
+ - Persist PXE as the first boot option.
+ - Normally, the PXE process happens only on the first boot. Set this arg to have your device continuously boot to iPXE.
+ default: false
+ type: bool
+
+
+requirements:
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+# Creating devices
+
+- name: Create 1 device
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ tags: ci-xyz
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+# Create the same device and wait until it is in state "active", (when it's
+# ready for other API operations). Fail if the device is not "active" in
+# 10 minutes.
+
+- name: Create device and wait up to 10 minutes for active state
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+ wait_timeout: 600
+
+- name: Create 3 ubuntu devices called server-01, server-02 and server-03
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: server-%02d
+ count: 3
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+
+- name: Create 3 coreos devices with userdata, wait until they get IPs and then wait for SSH
+ hosts: localhost
+ tasks:
+ - name: Create 3 devices and register their facts
+ community.general.packet_device:
+ hostnames: [coreos-one, coreos-two, coreos-three]
+ operating_system: coreos_stable
+ plan: baremetal_0
+ facility: ewr1
+ locked: true
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ wait_for_public_IPv: 4
+ user_data: |
+ #cloud-config
+ ssh_authorized_keys:
+ - {{ lookup('file', 'my_packet_sshkey') }}
+ coreos:
+ etcd:
+ discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
+ addr: $private_ipv4:4001
+ peer-addr: $private_ipv4:7001
+ fleet:
+ public-ip: $private_ipv4
+ units:
+ - name: etcd.service
+ command: start
+ - name: fleet.service
+ command: start
+ register: newhosts
+
+ - name: Wait for ssh
+ ansible.builtin.wait_for:
+ delay: 1
+ host: "{{ item.public_ipv4 }}"
+ port: 22
+ state: started
+ timeout: 500
+ with_items: "{{ newhosts.devices }}"
+
+
+# Other states of devices
+
+- name: Remove 3 devices by uuid
+ hosts: localhost
+ tasks:
+ - community.general.packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ state: absent
+ device_ids:
+ - 1fb4faf8-a638-4ac7-8f47-86fe514c30d8
+ - 2eb4faf8-a638-4ac7-8f47-86fe514c3043
+ - 6bb4faf8-a638-4ac7-8f47-86fe514c301f
+'''
+
+RETURN = '''
+changed:
+ description: True if a device was altered in any way (created, modified or removed)
+ type: bool
+ sample: true
+ returned: success
+
+devices:
+ description: Information about each device that was processed
+ type: list
+ sample:
+ - {
+ "hostname": "my-server.com",
+ "id": "2a5122b9-c323-4d5c-b53c-9ad3f54273e7",
+ "public_ipv4": "147.229.15.12",
+ "private-ipv4": "10.0.15.12",
+ "tags": [],
+ "locked": false,
+ "state": "provisioning",
+ "public_ipv6": "2604:1380:2:5200::3"
+ }
+ returned: success
+''' # NOQA
+
+
+import os
+import re
+import time
+import uuid
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+MAX_DEVICES = 100
+
+PACKET_DEVICE_STATES = (
+ 'queued',
+ 'provisioning',
+ 'failed',
+ 'powering_on',
+ 'active',
+ 'powering_off',
+ 'inactive',
+ 'rebooting',
+)
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'active', 'inactive', 'rebooted', 'present']
+
+
+def serialize_device(device):
+ """
+ Standard representation for a device as returned by various tasks::
+
+ {
+ 'id': 'device_id'
+ 'hostname': 'device_hostname',
+ 'tags': [],
+ 'locked': false,
+ 'state': 'provisioning',
+ 'ip_addresses': [
+ {
+ "address": "147.75.194.227",
+ "address_family": 4,
+ "public": true
+ },
+ {
+ "address": "2604:1380:2:5200::3",
+ "address_family": 6,
+ "public": true
+ },
+ {
+ "address": "10.100.11.129",
+ "address_family": 4,
+ "public": false
+ }
+ ],
+ "private_ipv4": "10.100.11.129",
+ "public_ipv4": "147.75.194.227",
+ "public_ipv6": "2604:1380:2:5200::3",
+ }
+
+ """
+ device_data = {}
+ device_data['id'] = device.id
+ device_data['hostname'] = device.hostname
+ device_data['tags'] = device.tags
+ device_data['locked'] = device.locked
+ device_data['state'] = device.state
+ device_data['ip_addresses'] = [
+ {
+ 'address': addr_data['address'],
+ 'address_family': addr_data['address_family'],
+ 'public': addr_data['public'],
+ }
+ for addr_data in device.ip_addresses
+ ]
+ # Also include each IPs as a key for easier lookup in roles.
+ # Key names:
+ # - public_ipv4
+ # - public_ipv6
+ # - private_ipv4
+ # - private_ipv6 (if there is one)
+ for ipdata in device_data['ip_addresses']:
+ if ipdata['public']:
+ if ipdata['address_family'] == 6:
+ device_data['public_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['public_ipv4'] = ipdata['address']
+ elif not ipdata['public']:
+ if ipdata['address_family'] == 6:
+ # Packet doesn't give public ipv6 yet, but maybe one
+ # day they will
+ device_data['private_ipv6'] = ipdata['address']
+ elif ipdata['address_family'] == 4:
+ device_data['private_ipv4'] = ipdata['address']
+ return device_data
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def listify_string_name_or_id(s):
+ if ',' in s:
+ return s.split(',')
+ else:
+ return [s]
+
+
+def get_hostname_list(module):
+ # hostname is a list-typed param, so I guess it should return list
+ # (and it does, in Ansible 2.2.1) but in order to be defensive,
+ # I keep here the code to convert an eventual string to list
+ hostnames = module.params.get('hostnames')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ if isinstance(hostnames, str):
+ hostnames = listify_string_name_or_id(hostnames)
+ if not isinstance(hostnames, list):
+ raise Exception("name %s is not convertible to list" % hostnames)
+
+ # at this point, hostnames is a list
+ hostnames = [h.strip() for h in hostnames]
+
+ if (len(hostnames) > 1) and (count > 1):
+ _msg = ("If you set count>1, you should only specify one hostname "
+ "with the %d formatter, not a list of hostnames.")
+ raise Exception(_msg)
+
+ if (len(hostnames) == 1) and (count > 0):
+ hostname_spec = hostnames[0]
+ count_range = range(count_offset, count_offset + count)
+ if re.search(r"%\d{0,2}d", hostname_spec):
+ hostnames = [hostname_spec % i for i in count_range]
+ elif count > 1:
+ hostname_spec = '%s%%02d' % hostname_spec
+ hostnames = [hostname_spec % i for i in count_range]
+
+ for hn in hostnames:
+ if not is_valid_hostname(hn):
+ raise Exception("Hostname '%s' does not seem to be valid" % hn)
+
+ if len(hostnames) > MAX_DEVICES:
+ raise Exception("You specified too many hostnames, max is %d" %
+ MAX_DEVICES)
+ return hostnames
+
+
+def get_device_id_list(module):
+ device_ids = module.params.get('device_ids')
+
+ if isinstance(device_ids, str):
+ device_ids = listify_string_name_or_id(device_ids)
+
+ device_ids = [di.strip() for di in device_ids]
+
+ for di in device_ids:
+ if not is_valid_uuid(di):
+ raise Exception("Device ID '%s' does not seem to be valid" % di)
+
+ if len(device_ids) > MAX_DEVICES:
+ raise Exception("You specified too many devices, max is %d" %
+ MAX_DEVICES)
+ return device_ids
+
+
+def create_single_device(module, packet_conn, hostname):
+
+ for param in ('hostnames', 'operating_system', 'plan'):
+ if not module.params.get(param):
+ raise Exception("%s parameter is required for new device."
+ % param)
+ project_id = module.params.get('project_id')
+ plan = module.params.get('plan')
+ tags = module.params.get('tags')
+ user_data = module.params.get('user_data')
+ facility = module.params.get('facility')
+ operating_system = module.params.get('operating_system')
+ locked = module.params.get('locked')
+ ipxe_script_url = module.params.get('ipxe_script_url')
+ always_pxe = module.params.get('always_pxe')
+ if operating_system != 'custom_ipxe':
+ for param in ('ipxe_script_url', 'always_pxe'):
+ if module.params.get(param):
+ raise Exception('%s parameter is not valid for non custom_ipxe operating_system.' % param)
+
+ device = packet_conn.create_device(
+ project_id=project_id,
+ hostname=hostname,
+ tags=tags,
+ plan=plan,
+ facility=facility,
+ operating_system=operating_system,
+ userdata=user_data,
+ locked=locked,
+ ipxe_script_url=ipxe_script_url,
+ always_pxe=always_pxe)
+ return device
+
+
+def refresh_device_list(module, packet_conn, devices):
+ device_ids = [d.id for d in devices]
+ new_device_list = get_existing_devices(module, packet_conn)
+ return [d for d in new_device_list if d.id in device_ids]
+
+
+def wait_for_devices_active(module, packet_conn, watched_devices):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ refreshed = watched_devices
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, watched_devices)
+ if all(d.state == 'active' for d in refreshed):
+ return refreshed
+ time.sleep(5)
+ raise Exception("Waiting for state \"active\" timed out for devices: %s"
+ % [d.hostname for d in refreshed if d.state != "active"])
+
+
+def wait_for_public_IPv(module, packet_conn, created_devices):
+
+ def has_public_ip(addr_list, ip_v):
+ return any(a['public'] and a['address_family'] == ip_v and a['address'] for a in addr_list)
+
+ def all_have_public_ip(ds, ip_v):
+ return all(has_public_ip(d.ip_addresses, ip_v) for d in ds)
+
+ address_family = module.params.get('wait_for_public_IPv')
+
+ wait_timeout = module.params.get('wait_timeout')
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ refreshed = refresh_device_list(module, packet_conn, created_devices)
+ if all_have_public_ip(refreshed, address_family):
+ return refreshed
+ time.sleep(5)
+
+ raise Exception("Waiting for IPv%d address timed out. Hostnames: %s"
+ % (address_family, [d.hostname for d in created_devices]))
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ return packet_conn.list_devices(
+ project_id, params={
+ 'per_page': MAX_DEVICES})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_ids'):
+ device_id_list = get_device_id_list(module)
+ return {'ids': device_id_list, 'hostnames': []}
+ elif module.params.get('hostnames'):
+ hostname_list = get_hostname_list(module)
+ return {'hostnames': hostname_list, 'ids': []}
+
+
+def act_on_devices(module, packet_conn, target_state):
+ specified_identifiers = get_specified_device_identifiers(module)
+ existing_devices = get_existing_devices(module, packet_conn)
+ changed = False
+ create_hostnames = []
+ if target_state in ['present', 'active', 'rebooted']:
+ # states where we might create non-existing specified devices
+ existing_devices_names = [ed.hostname for ed in existing_devices]
+ create_hostnames = [hn for hn in specified_identifiers['hostnames']
+ if hn not in existing_devices_names]
+
+ process_devices = [d for d in existing_devices
+ if (d.id in specified_identifiers['ids']) or
+ (d.hostname in specified_identifiers['hostnames'])]
+
+ if target_state != 'present':
+ _absent_state_map = {}
+ for s in PACKET_DEVICE_STATES:
+ _absent_state_map[s] = packet.Device.delete
+
+ state_map = {
+ 'absent': _absent_state_map,
+ 'active': {'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ 'inactive': {'active': packet.Device.power_off},
+ 'rebooted': {'active': packet.Device.reboot,
+ 'inactive': packet.Device.power_on,
+ 'provisioning': None, 'rebooting': None
+ },
+ }
+
+ # First do non-creation actions, it might be faster
+ for d in process_devices:
+ if d.state == target_state:
+ continue
+ if d.state in state_map[target_state]:
+ api_operation = state_map[target_state].get(d.state)
+ if api_operation is not None:
+ api_operation(d)
+ changed = True
+ else:
+ _msg = (
+ "I don't know how to process existing device %s from state %s "
+ "to state %s" %
+ (d.hostname, d.state, target_state))
+ raise Exception(_msg)
+
+ # At last create missing devices
+ created_devices = []
+ if create_hostnames:
+ created_devices = [create_single_device(module, packet_conn, n)
+ for n in create_hostnames]
+ if module.params.get('wait_for_public_IPv'):
+ created_devices = wait_for_public_IPv(
+ module, packet_conn, created_devices)
+ changed = True
+
+ processed_devices = created_devices + process_devices
+ if target_state == 'active':
+ processed_devices = wait_for_devices_active(
+ module, packet_conn, processed_devices)
+
+ return {
+ 'changed': changed,
+ 'devices': [serialize_device(d) for d in processed_devices]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ count=dict(type='int', default=1),
+ count_offset=dict(type='int', default=1),
+ device_ids=dict(type='list', elements='str'),
+ facility=dict(),
+ features=dict(type='dict'),
+ hostnames=dict(type='list', elements='str', aliases=['name']),
+ tags=dict(type='list', elements='str'),
+ locked=dict(type='bool', default=False, aliases=['lock']),
+ operating_system=dict(),
+ plan=dict(),
+ project_id=dict(required=True),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ user_data=dict(),
+ wait_for_public_IPv=dict(type='int', choices=[4, 6]),
+ wait_timeout=dict(type='int', default=900),
+ ipxe_script_url=dict(default=''),
+ always_pxe=dict(type='bool', default=False),
+ ),
+ required_one_of=[('device_ids', 'hostnames',)],
+ mutually_exclusive=[
+ ('hostnames', 'device_ids'),
+ ('count', 'device_ids'),
+ ('count_offset', 'device_ids'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_devices(module, packet_conn, state))
+ except Exception as e:
+ module.fail_json(msg='failed to set device state %s, error: %s' %
+ (state, to_native(e)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
new file mode 100644
index 000000000..afeb7ea04
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/packet_ip_subnet.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_ip_subnet
+
+short_description: Assign IP subnet to a bare metal server
+
+description:
+ - Assign or unassign IPv4 or IPv6 subnets to or from a device in the Packet host.
+ - IPv4 subnets must come from already reserved block.
+ - IPv6 subnets must come from publicly routable /56 block from your project.
+ - See U(https://support.packet.com/kb/articles/elastic-ips) for more info on IP block reservation.
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ hostname:
+ description:
+ - A hostname of a device to/from which to assign/remove a subnet.
+ required: false
+ type: str
+
+ device_id:
+ description:
+ - UUID of a device to/from which to assign/remove a subnet.
+ required: false
+ type: str
+
+ project_id:
+ description:
+ - UUID of a project of the device to/from which to assign/remove a subnet.
+ type: str
+
+ device_count:
+ description:
+ - The number of devices to retrieve from the project. The max allowed value is 1000.
+ - See U(https://www.packet.com/developers/api/#retrieve-all-devices-of-a-project) for more info.
+ default: 100
+ type: int
+
+ cidr:
+ description:
+ - IPv4 or IPv6 subnet which you want to manage. It must come from a reserved block for your project in the Packet Host.
+ aliases: [name]
+ type: str
+ required: true
+
+ state:
+ description:
+ - Desired state of the IP subnet on the specified device.
+ - With state == C(present), you must specify either hostname or device_id. Subnet with given CIDR will then be assigned to the specified device.
+ - With state == C(absent), you can specify either hostname or device_id. The subnet will be removed from specified devices.
+ - If you leave both hostname and device_id empty, the subnet will be removed from any device it's assigned to.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+
+requirements:
+ - "packet-python >= 1.35"
+ - "python >= 2.6"
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass it to the auth_token parameter of the module instead.
+
+- name: Create 1 device and assign an arbitrary public IPv4 subnet to it
+ hosts: localhost
+ tasks:
+
+ - packet_device:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostnames: myserver
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: sjc1
+ state: active
+
+# Pick an IPv4 address from a block allocated to your project.
+
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ hostname: myserver
+ cidr: "147.75.201.78/32"
+
+# Release IP address 147.75.201.78
+
+- name: Unassign IP address from any device in your project
+ hosts: localhost
+ tasks:
+ - community.general.packet_ip_subnet:
+ project_id: 89b497ee-5afc-420a-8fb5-56984898f4df
+ cidr: "147.75.201.78/32"
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: True if an IP address assignments were altered in any way (created or removed).
+ type: bool
+ sample: true
+ returned: success
+
+device_id:
+ type: str
+ description: UUID of the device associated with the specified IP address.
+ returned: success
+
+subnet:
+ description: Dict with data about the handled IP subnet.
+ type: dict
+ sample:
+ address: 147.75.90.241
+ address_family: 4
+ assigned_to: { href : /devices/61f9aa5e-0530-47f5-97c2-113828e61ed0 }
+ cidr: 31
+ created_at: '2017-08-07T15:15:30Z'
+ enabled: true
+ gateway: 147.75.90.240
+ href: /ips/31eda960-0a16-4c0f-b196-f3dc4928529f
+ id: 1eda960-0a16-4c0f-b196-f3dc4928529f
+ manageable: true
+ management: true
+ netmask: 255.255.255.254
+ network: 147.75.90.240
+ public: true
+ returned: success
+'''
+
+
+import uuid
+import re
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils.common.text.converters import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+NAME_RE = r'({0}|{0}{1}*{0})'.format(r'[a-zA-Z0-9]', r'[a-zA-Z0-9\-]')
+HOSTNAME_RE = r'({0}\.)*{0}$'.format(NAME_RE)
+PROJECT_MAX_DEVICES = 100
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+ALLOWED_STATES = ['absent', 'present']
+
+
+def is_valid_hostname(hostname):
+ return re.match(HOSTNAME_RE, hostname) is not None
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_existing_devices(module, packet_conn):
+ project_id = module.params.get('project_id')
+ if not is_valid_uuid(project_id):
+ raise Exception("Project ID {0} does not seem to be valid".format(project_id))
+
+ per_page = module.params.get('device_count')
+ return packet_conn.list_devices(
+ project_id, params={'per_page': per_page})
+
+
+def get_specified_device_identifiers(module):
+ if module.params.get('device_id'):
+ _d_id = module.params.get('device_id')
+ if not is_valid_uuid(_d_id):
+ raise Exception("Device ID '{0}' does not seem to be valid".format(_d_id))
+ return {'device_id': _d_id, 'hostname': None}
+ elif module.params.get('hostname'):
+ _hn = module.params.get('hostname')
+ if not is_valid_hostname(_hn):
+ raise Exception("Hostname '{0}' does not seem to be valid".format(_hn))
+ return {'hostname': _hn, 'device_id': None}
+ else:
+ return {'hostname': None, 'device_id': None}
+
+
+def parse_subnet_cidr(cidr):
+ if "/" not in cidr:
+ raise Exception("CIDR expression in wrong format, must be address/prefix_len")
+ addr, prefixlen = cidr.split("/")
+ try:
+ prefixlen = int(prefixlen)
+ except ValueError:
+ raise Exception("Wrong prefix length in CIDR expression {0}".format(cidr))
+ return addr, prefixlen
+
+
+def act_on_assignment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ specified_cidr = module.params.get("cidr")
+ address, prefixlen = parse_subnet_cidr(specified_cidr)
+
+ specified_identifier = get_specified_device_identifiers(module)
+
+ if module.check_mode:
+ return return_dict
+
+ if (specified_identifier['hostname'] is None) and (
+ specified_identifier['device_id'] is None):
+ if target_state == 'absent':
+ # The special case to release the IP from any assignment
+ for d in get_existing_devices(module, packet_conn):
+ for ia in d.ip_addresses:
+ if address == ia['address'] and prefixlen == ia['cidr']:
+ packet_conn.call_api(ia['href'], "DELETE")
+ return_dict['changed'] = True
+ return_dict['subnet'] = ia
+ return_dict['device_id'] = d.id
+ return return_dict
+ raise Exception("If you assign an address, you must specify either "
+ "target device ID or target unique hostname.")
+
+ if specified_identifier['device_id'] is not None:
+ device = packet_conn.get_device(specified_identifier['device_id'])
+ else:
+ all_devices = get_existing_devices(module, packet_conn)
+ hn = specified_identifier['hostname']
+ matching_devices = [d for d in all_devices if d.hostname == hn]
+ if len(matching_devices) > 1:
+ raise Exception("There are more than one devices matching given hostname {0}".format(hn))
+ if len(matching_devices) == 0:
+ raise Exception("There is no device matching given hostname {0}".format(hn))
+ device = matching_devices[0]
+
+ return_dict['device_id'] = device.id
+ assignment_dicts = [i for i in device.ip_addresses
+ if i['address'] == address and i['cidr'] == prefixlen]
+ if len(assignment_dicts) > 1:
+ raise Exception("IP address {0} is assigned more than once for device {1}".format(
+ specified_cidr, device.hostname))
+
+ if target_state == "absent":
+ if len(assignment_dicts) == 1:
+ packet_conn.call_api(assignment_dicts[0]['href'], "DELETE")
+ return_dict['subnet'] = assignment_dicts[0]
+ return_dict['changed'] = True
+ elif target_state == "present":
+ if len(assignment_dicts) == 0:
+ new_assignment = packet_conn.call_api(
+ "devices/{0}/ips".format(device.id), "POST", {"address": "{0}".format(specified_cidr)})
+ return_dict['changed'] = True
+ return_dict['subnet'] = new_assignment
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ device_id=dict(type='str'),
+ hostname=dict(type='str'),
+ project_id=dict(type='str'),
+ device_count=dict(type='int', default=PROJECT_MAX_DEVICES),
+ cidr=dict(type='str', required=True, aliases=['name']),
+ state=dict(choices=ALLOWED_STATES, default='present'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[('hostname', 'device_id')],
+ required_one_of=[['hostname', 'device_id', 'project_id']],
+ required_by=dict(
+ hostname=('project_id',),
+ ),
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ try:
+ module.exit_json(**act_on_assignment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set IP subnet to state {0}, error: {1}".format(state, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/packet_project.py b/ansible_collections/community/general/plugins/modules/packet_project.py
new file mode 100644
index 000000000..da4a2bb89
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/packet_project.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright (c) 2019, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_project
+
+short_description: Create/delete a project in Packet host
+
+description:
+ - Create/delete a project in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#projects).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ payment_method:
+ description:
+ - Payment method is name of one of the payment methods available to your user.
+ - When blank, the API assumes the default payment method.
+ type: str
+
+ auth_token:
+ description:
+ - Packet api token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Name for/of the project.
+ type: str
+
+ org_id:
+ description:
+ - UUID of the organization to create a project for.
+ - When blank, the API assumes the default organization.
+ type: str
+
+ id:
+ description:
+ - UUID of the project which you want to remove.
+ type: str
+
+ custom_data:
+ description:
+ - Custom data about the project to create.
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.40"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create new project
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "new project"
+
+- name: Create new project within non-default organization
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "my org project"
+ org_id: a4cc87f9-e00f-48c2-9460-74aa60beb6b0
+
+- name: Remove project by id
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+
+- name: Create new project with non-default billing method
+ hosts: localhost
+ tasks:
+ community.general.packet_project:
+ name: "newer project"
+ payment_method: "the other visa"
+'''
+
+RETURN = '''
+changed:
+ description: True if a project was created or removed.
+ type: bool
+ sample: true
+ returned: success
+
+name:
+ description: Name of addressed project.
+ type: str
+ returned: success
+
+id:
+ description: UUID of addressed project.
+ type: str
+ returned: success
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils.common.text.converters import to_native
+
+HAS_PACKET_SDK = True
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def act_on_project(target_state, module, packet_conn):
+ result_dict = {'changed': False}
+ given_id = module.params.get('id')
+ given_name = module.params.get('name')
+ if given_id:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_id == p.id]
+ else:
+ matching_projects = [
+ p for p in packet_conn.list_projects() if given_name == p.name]
+
+ if target_state == 'present':
+ if len(matching_projects) == 0:
+ org_id = module.params.get('org_id')
+ custom_data = module.params.get('custom_data')
+ payment_method = module.params.get('payment_method')
+
+ if not org_id:
+ params = {
+ "name": given_name,
+ "payment_method_id": payment_method,
+ "customdata": custom_data
+ }
+ new_project_data = packet_conn.call_api("projects", "POST", params)
+ new_project = packet.Project(new_project_data, packet_conn)
+ else:
+ new_project = packet_conn.create_organization_project(
+ org_id=org_id,
+ name=given_name,
+ payment_method_id=payment_method,
+ customdata=custom_data
+ )
+
+ result_dict['changed'] = True
+ matching_projects.append(new_project)
+
+ result_dict['name'] = matching_projects[0].name
+ result_dict['id'] = matching_projects[0].id
+ else:
+ if len(matching_projects) > 1:
+ _msg = ("More than projects matched for module call with state = absent: "
+ "{0}".format(to_native(matching_projects)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_projects) == 1:
+ p = matching_projects[0]
+ result_dict['name'] = p.name
+ result_dict['id'] = p.id
+ result_dict['changed'] = True
+ try:
+ p.delete()
+ except Exception as e:
+ _msg = ("while trying to remove project {0}, id {1}, got error: {2}".format(
+ p.name, p.id, to_native(e)))
+ module.fail_json(msg=_msg)
+ return result_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ name=dict(type='str'),
+ id=dict(type='str'),
+ org_id=dict(type='str'),
+ payment_method=dict(type='str'),
+ custom_data=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id",)],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ]
+ )
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_project(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set project state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/packet_sshkey.py b/ansible_collections/community/general/plugins/modules/packet_sshkey.py
new file mode 100644
index 000000000..97f55ba23
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/packet_sshkey.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2016 Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_sshkey
+short_description: Create/delete an SSH key in Packet host
+description:
+ - Create/delete an SSH key in Packet host.
+ - API is documented at U(https://www.packet.net/help/api/#page:ssh-keys,header:ssh-keys-ssh-keys-post).
+author: "Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+ label:
+ description:
+ - Label for the key. If you keep it empty, it will be read from key string.
+ type: str
+ aliases: [name]
+ id:
+ description:
+ - UUID of the key which you want to remove.
+ type: str
+ fingerprint:
+ description:
+ - Fingerprint of the key which you want to remove.
+ type: str
+ key:
+ description:
+ - Public Key string ({type} {base64 encoded key} {description}).
+ type: str
+ key_file:
+ description:
+ - File with the public key.
+ type: path
+
+requirements:
+ - "python >= 2.6"
+ - packet-python
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- name: Create sshkey from string
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
+
+- name: Create sshkey from file
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ label: key from file
+ key_file: ~/ff.pub
+
+- name: Remove sshkey by id
+ hosts: localhost
+ tasks:
+ community.general.packet_sshkey:
+ state: absent
+ id: eef49903-7a09-4ca1-af67-4087c29ab5b6
+'''
+
+RETURN = '''
+changed:
+ description: True if a sshkey was created or removed.
+ type: bool
+ sample: true
+ returned: always
+sshkeys:
+ description: Information about sshkeys that were created/removed.
+ type: list
+ sample: [
+ {
+ "fingerprint": "5c:93:74:7c:ed:07:17:62:28:75:79:23:d6:08:93:46",
+ "id": "41d61bd8-3342-428b-a09c-e67bdd18a9b7",
+ "key": "ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2",
+ "label": "mynewkey33"
+ }
+ ]
+ returned: always
+''' # NOQA
+
+import os
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule
+
+HAS_PACKET_SDK = True
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+
+def serialize_sshkey(sshkey):
+ sshkey_data = {}
+ copy_keys = ['id', 'key', 'label', 'fingerprint']
+ for name in copy_keys:
+ sshkey_data[name] = getattr(sshkey, name)
+ return sshkey_data
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def load_key_string(key_str):
+ ret_dict = {}
+ key_str = key_str.strip()
+ ret_dict['key'] = key_str
+ cut_key = key_str.split()
+ if len(cut_key) in [2, 3]:
+ if len(cut_key) == 3:
+ ret_dict['label'] = cut_key[2]
+ else:
+ raise Exception("Public key %s is in wrong format" % key_str)
+ return ret_dict
+
+
+def get_sshkey_selector(module):
+ key_id = module.params.get('id')
+ if key_id:
+ if not is_valid_uuid(key_id):
+ raise Exception("sshkey ID %s is not valid UUID" % key_id)
+ selecting_fields = ['label', 'fingerprint', 'id', 'key']
+ select_dict = {}
+ for f in selecting_fields:
+ if module.params.get(f) is not None:
+ select_dict[f] = module.params.get(f)
+
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as _file:
+ loaded_key = load_key_string(_file.read())
+ select_dict['key'] = loaded_key['key']
+ if module.params.get('label') is None:
+ if loaded_key.get('label'):
+ select_dict['label'] = loaded_key['label']
+
+ def selector(k):
+ if 'key' in select_dict:
+ # if key string is specified, compare only the key strings
+ return k.key == select_dict['key']
+ else:
+ # if key string not specified, all the fields must match
+ return all(select_dict[f] == getattr(k, f) for f in select_dict)
+ return selector
+
+
+def act_on_sshkeys(target_state, module, packet_conn):
+ selector = get_sshkey_selector(module)
+ existing_sshkeys = packet_conn.list_ssh_keys()
+ matching_sshkeys = filter(selector, existing_sshkeys)
+ changed = False
+ if target_state == 'present':
+ if matching_sshkeys == []:
+ # there is no key matching the fields from module call
+ # => create the key, label and
+ newkey = {}
+ if module.params.get('key_file'):
+ with open(module.params.get('key_file')) as f:
+ newkey = load_key_string(f.read())
+ if module.params.get('key'):
+ newkey = load_key_string(module.params.get('key'))
+ if module.params.get('label'):
+ newkey['label'] = module.params.get('label')
+ for param in ('label', 'key'):
+ if param not in newkey:
+ _msg = ("If you want to ensure a key is present, you must "
+ "supply both a label and a key string, either in "
+ "module params, or in a key file. %s is missing"
+ % param)
+ raise Exception(_msg)
+ matching_sshkeys = []
+ new_key_response = packet_conn.create_ssh_key(
+ newkey['label'], newkey['key'])
+ changed = True
+
+ matching_sshkeys.append(new_key_response)
+ else:
+ # state is 'absent' => delete matching keys
+ for k in matching_sshkeys:
+ try:
+ k.delete()
+ changed = True
+ except Exception as e:
+ _msg = ("while trying to remove sshkey %s, id %s %s, "
+ "got error: %s" %
+ (k.label, k.id, target_state, e))
+ raise Exception(_msg)
+
+ return {
+ 'changed': changed,
+ 'sshkeys': [serialize_sshkey(k) for k in matching_sshkeys]
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=['present', 'absent'], default='present'),
+ auth_token=dict(default=os.environ.get(PACKET_API_TOKEN_ENV_VAR),
+ no_log=True),
+ label=dict(type='str', aliases=['name']),
+ id=dict(type='str'),
+ fingerprint=dict(type='str'),
+ key=dict(type='str', no_log=True),
+ key_file=dict(type='path'),
+ ),
+ mutually_exclusive=[
+ ('label', 'id'),
+ ('label', 'fingerprint'),
+ ('id', 'fingerprint'),
+ ('key', 'fingerprint'),
+ ('key', 'id'),
+ ('key_file', 'key'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable %s, "
+ "the auth_token parameter is required" %
+ PACKET_API_TOKEN_ENV_VAR)
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in ['present', 'absent']:
+ try:
+ module.exit_json(**act_on_sshkeys(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(msg='failed to set sshkey state: %s' % str(e))
+ else:
+ module.fail_json(msg='%s is not a valid state for this module' % state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/packet_volume.py b/ansible_collections/community/general/plugins/modules/packet_volume.py
new file mode 100644
index 000000000..910d64b55
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/packet_volume.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: packet_volume
+
+short_description: Create/delete a volume in Packet host
+
+description:
+ - Create/delete a volume in Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/#volumes).
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Desired state of the volume.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ project_id:
+ description:
+ - ID of project of the device.
+ required: true
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ name:
+ description:
+ - Selector for API-generated name of the volume
+ type: str
+
+ description:
+ description:
+ - User-defined description attribute for Packet volume.
+ - "It is used used as idempotent identifier - if volume with given
+ description exists, new one is not created."
+ type: str
+
+ id:
+ description:
+ - UUID of a volume.
+ type: str
+
+ plan:
+ description:
+ - storage_1 for standard tier, storage_2 for premium (performance) tier.
+ - Tiers are described at U(https://www.packet.com/cloud/storage/).
+ choices: ['storage_1', 'storage_2']
+ default: 'storage_1'
+ type: str
+
+ facility:
+ description:
+ - Location of the volume.
+ - Volumes can only be attached to device in the same location.
+ type: str
+
+ size:
+ description:
+ - Size of the volume in gigabytes.
+ type: int
+
+ locked:
+ description:
+ - Create new volume locked.
+ type: bool
+ default: false
+
+ billing_cycle:
+ description:
+ - Billing cycle for new volume.
+ choices: ['hourly', 'monthly']
+ default: 'hourly'
+ type: str
+
+ snapshot_policy:
+ description:
+ - Snapshot policy for new volume.
+ type: dict
+
+ suboptions:
+ snapshot_count:
+ description:
+ - How many snapshots to keep, a positive integer.
+ required: true
+ type: int
+
+ snapshot_frequency:
+ description:
+ - Frequency of snapshots.
+ required: true
+ choices: ["15min", "1hour", "1day", "1week", "1month", "1year"]
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+ vars:
+ volname: testvol123
+ project_id: 53000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ community.general.packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: 'ewr1'
+ plan: 'storage_1'
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+ register: result_create
+
+ - name: Delete volume
+ community.general.packet_volume:
+ id: "{{ result_create.id }}"
+ project_id: "{{ project_id }}"
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: UUID of specified volume
+ type: str
+ returned: success
+ sample: 53000fb2-ee46-4673-93a8-de2c2bdba33c
+name:
+ description: The API-generated name of the volume resource.
+ type: str
+ returned: if volume is attached/detached to/from some device
+ sample: "volume-a91dc506"
+description:
+ description: The user-defined description of the volume resource.
+ type: str
+ returned: success
+ sample: "Just another volume"
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils.common.text.converters import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+VOLUME_PLANS = ["storage_1", "storage_2"]
+VOLUME_STATES = ["present", "absent"]
+BILLING = ["hourly", "monthly"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(module):
+ if module.params.get('id'):
+ i = module.params.get('id')
+ if not is_valid_uuid(i):
+ raise Exception("Volume ID '{0}' is not a valid UUID".format(i))
+ return lambda v: v['id'] == i
+ elif module.params.get('name'):
+ n = module.params.get('name')
+ return lambda v: v['name'] == n
+ elif module.params.get('description'):
+ d = module.params.get('description')
+ return lambda v: v['description'] == d
+
+
+def get_or_fail(params, key):
+ item = params.get(key)
+ if item is None:
+ raise Exception("{0} must be specified for new volume".format(key))
+ return item
+
+
+def act_on_volume(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ s = get_volume_selector(module)
+ project_id = module.params.get("project_id")
+ api_method = "projects/{0}/storage".format(project_id)
+ all_volumes = packet_conn.call_api(api_method, "GET")['volumes']
+ matching_volumes = [v for v in all_volumes if s(v)]
+
+ if target_state == "present":
+ if len(matching_volumes) == 0:
+ params = {
+ "description": get_or_fail(module.params, "description"),
+ "size": get_or_fail(module.params, "size"),
+ "plan": get_or_fail(module.params, "plan"),
+ "facility": get_or_fail(module.params, "facility"),
+ "locked": get_or_fail(module.params, "locked"),
+ "billing_cycle": get_or_fail(module.params, "billing_cycle"),
+ "snapshot_policies": module.params.get("snapshot_policy"),
+ }
+
+ new_volume_data = packet_conn.call_api(api_method, "POST", params)
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = new_volume_data[k]
+
+ else:
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = matching_volumes[0][k]
+
+ else:
+ if len(matching_volumes) > 1:
+ _msg = ("More than one volume matches in module call for absent state: {0}".format(
+ to_native(matching_volumes)))
+ module.fail_json(msg=_msg)
+
+ if len(matching_volumes) == 1:
+ volume = matching_volumes[0]
+ packet_conn.call_api("storage/{0}".format(volume['id']), "DELETE")
+ return_dict['changed'] = True
+ for k in ['id', 'name', 'description']:
+ return_dict[k] = volume[k]
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str'),
+ description=dict(type="str"),
+ name=dict(type='str'),
+ state=dict(choices=VOLUME_STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ project_id=dict(required=True),
+ plan=dict(choices=VOLUME_PLANS, default="storage_1"),
+ facility=dict(type="str"),
+ size=dict(type="int"),
+ locked=dict(type="bool", default=False),
+ snapshot_policy=dict(type='dict'),
+ billing_cycle=dict(type='str', choices=BILLING, default="hourly"),
+ ),
+ supports_check_mode=True,
+ required_one_of=[("name", "id", "description")],
+ mutually_exclusive=[
+ ('name', 'id'),
+ ('id', 'description'),
+ ('name', 'description'),
+ ]
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in VOLUME_STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(**act_on_volume(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume state {0}: {1}".format(
+ state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
new file mode 100644
index 000000000..7f6c68e05
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/packet_volume_attachment.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Nurfet Becirevic <nurfet.becirevic@gmail.com>
+# Copyright (c) 2017, Tomas Karasek <tom.to.the.k@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: packet_volume_attachment
+
+short_description: Attach/detach a volume to a device in the Packet host
+
+description:
+ - Attach/detach a volume to a device in the Packet host.
+ - API is documented at U(https://www.packet.com/developers/api/volumes/).
+ - "This module creates the attachment route in the Packet API. In order to discover
+ the block devices on the server, you have to run the Attach Scripts,
+ as documented at U(https://help.packet.net/technical/storage/packet-block-storage-linux)."
+
+version_added: '0.2.0'
+
+author:
+ - Tomas Karasek (@t0mk) <tom.to.the.k@gmail.com>
+ - Nurfet Becirevic (@nurfet-becirevic) <nurfet.becirevic@gmail.com>
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Indicate desired state of the attachment.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+
+ auth_token:
+ description:
+ - Packet API token. You can also supply it in env var C(PACKET_API_TOKEN).
+ type: str
+
+ project_id:
+ description:
+ - UUID of the project to which the device and volume belong.
+ type: str
+ required: true
+
+ volume:
+ description:
+ - Selector for the volume.
+ - It can be a UUID, an API-generated volume name, or user-defined description string.
+ - 'Example values: 4a347482-b546-4f67-8300-fb5018ef0c5, volume-4a347482, "my volume"'
+ type: str
+ required: true
+
+ device:
+ description:
+ - Selector for the device.
+ - It can be a UUID of the device, or a hostname.
+ - 'Example values: 98a14f7a-3d27-4478-b7cf-35b5670523f3, "my device"'
+ type: str
+
+requirements:
+ - "python >= 2.6"
+ - "packet-python >= 1.35"
+
+'''
+
+EXAMPLES = '''
+# All the examples assume that you have your Packet API token in env var PACKET_API_TOKEN.
+# You can also pass the api token in module param auth_token.
+
+- hosts: localhost
+
+ vars:
+ volname: testvol
+ devname: testdev
+ project_id: 52000fb2-ee46-4673-93a8-de2c2bdba33b
+
+ tasks:
+ - name: Create volume
+ packet_volume:
+ description: "{{ volname }}"
+ project_id: "{{ project_id }}"
+ facility: ewr1
+ plan: storage_1
+ state: present
+ size: 10
+ snapshot_policy:
+ snapshot_count: 10
+ snapshot_frequency: 1day
+
+ - name: Create a device
+ packet_device:
+ project_id: "{{ project_id }}"
+ hostnames: "{{ devname }}"
+ operating_system: ubuntu_16_04
+ plan: baremetal_0
+ facility: ewr1
+ state: present
+
+ - name: Attach testvol to testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+
+ - name: Detach testvol from testdev
+ community.general.packet_volume_attachment:
+ project_id: "{{ project_id }}"
+ volume: "{{ volname }}"
+ device: "{{ devname }}"
+ state: absent
+'''
+
+RETURN = '''
+volume_id:
+ description: UUID of volume addressed by the module call.
+ type: str
+ returned: success
+
+device_id:
+ description: UUID of device addressed by the module call.
+ type: str
+ returned: success
+'''
+
+import uuid
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils.common.text.converters import to_native
+
+HAS_PACKET_SDK = True
+
+
+try:
+ import packet
+except ImportError:
+ HAS_PACKET_SDK = False
+
+
+PACKET_API_TOKEN_ENV_VAR = "PACKET_API_TOKEN"
+
+STATES = ["present", "absent"]
+
+
+def is_valid_uuid(myuuid):
+ try:
+ val = uuid.UUID(myuuid, version=4)
+ except ValueError:
+ return False
+ return str(val) == myuuid
+
+
+def get_volume_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['name'] == spec or v['description'] == spec
+
+
+def get_device_selector(spec):
+ if is_valid_uuid(spec):
+ return lambda v: v['id'] == spec
+ else:
+ return lambda v: v['hostname'] == spec
+
+
+def do_attach(packet_conn, vol_id, dev_id):
+ api_method = "storage/{0}/attachments".format(vol_id)
+ packet_conn.call_api(
+ api_method,
+ params={"device_id": dev_id},
+ type="POST")
+
+
+def do_detach(packet_conn, vol, dev_id=None):
+ def dev_match(a):
+ return (dev_id is None) or (a['device']['id'] == dev_id)
+ for a in vol['attachments']:
+ if dev_match(a):
+ packet_conn.call_api(a['href'], type="DELETE")
+
+
+def validate_selected(l, resource_type, spec):
+ if len(l) > 1:
+ _msg = ("more than one {0} matches specification {1}: {2}".format(
+ resource_type, spec, l))
+ raise Exception(_msg)
+ if len(l) == 0:
+ _msg = "no {0} matches specification: {1}".format(resource_type, spec)
+ raise Exception(_msg)
+
+
+def get_attached_dev_ids(volume_dict):
+ if len(volume_dict['attachments']) == 0:
+ return []
+ else:
+ return [a['device']['id'] for a in volume_dict['attachments']]
+
+
+def act_on_volume_attachment(target_state, module, packet_conn):
+ return_dict = {'changed': False}
+ volspec = module.params.get("volume")
+ devspec = module.params.get("device")
+ if devspec is None and target_state == 'present':
+ raise Exception("If you want to attach a volume, you must specify a device.")
+ project_id = module.params.get("project_id")
+ volumes_api_method = "projects/{0}/storage".format(project_id)
+ volumes = packet_conn.call_api(volumes_api_method,
+ params={'include': 'facility,attachments.device'})['volumes']
+ v_match = get_volume_selector(volspec)
+ matching_volumes = [v for v in volumes if v_match(v)]
+ validate_selected(matching_volumes, "volume", volspec)
+ volume = matching_volumes[0]
+ return_dict['volume_id'] = volume['id']
+
+ device = None
+ if devspec is not None:
+ devices_api_method = "projects/{0}/devices".format(project_id)
+ devices = packet_conn.call_api(devices_api_method)['devices']
+ d_match = get_device_selector(devspec)
+ matching_devices = [d for d in devices if d_match(d)]
+ validate_selected(matching_devices, "device", devspec)
+ device = matching_devices[0]
+ return_dict['device_id'] = device['id']
+
+ attached_device_ids = get_attached_dev_ids(volume)
+
+ if target_state == "present":
+ if len(attached_device_ids) == 0:
+ do_attach(packet_conn, volume['id'], device['id'])
+ return_dict['changed'] = True
+ elif device['id'] not in attached_device_ids:
+ # Don't reattach volume which is attached to a different device.
+ # Rather fail than force remove a device on state == 'present'.
+ raise Exception("volume {0} is already attached to device {1}".format(
+ volume, attached_device_ids))
+ else:
+ if device is None:
+ if len(attached_device_ids) > 0:
+ do_detach(packet_conn, volume)
+ return_dict['changed'] = True
+ elif device['id'] in attached_device_ids:
+ do_detach(packet_conn, volume, device['id'])
+ return_dict['changed'] = True
+
+ return return_dict
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(choices=STATES, default="present"),
+ auth_token=dict(
+ type='str',
+ fallback=(env_fallback, [PACKET_API_TOKEN_ENV_VAR]),
+ no_log=True
+ ),
+ volume=dict(type="str", required=True),
+ project_id=dict(type="str", required=True),
+ device=dict(type="str"),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_PACKET_SDK:
+ module.fail_json(msg='packet required for this module')
+
+ if not module.params.get('auth_token'):
+ _fail_msg = ("if Packet API token is not in environment variable {0}, "
+ "the auth_token parameter is required".format(PACKET_API_TOKEN_ENV_VAR))
+ module.fail_json(msg=_fail_msg)
+
+ auth_token = module.params.get('auth_token')
+
+ packet_conn = packet.Manager(auth_token=auth_token)
+
+ state = module.params.get('state')
+
+ if state in STATES:
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ try:
+ module.exit_json(
+ **act_on_volume_attachment(state, module, packet_conn))
+ except Exception as e:
+ module.fail_json(
+ msg="failed to set volume_attachment state {0}: {1}".format(state, to_native(e)))
+ else:
+ module.fail_json(msg="{0} is not a valid state for this module".format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pacman.py b/ansible_collections/community/general/plugins/modules/pacman.py
new file mode 100644
index 000000000..66f58155d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pacman.py
@@ -0,0 +1,859 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Afterburn <https://github.com/afterburn>
+# Copyright (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# Copyright (c) 2015, Indrajit Raychaudhuri <irc+code@indrajit.com>
+# Copyright (c) 2022, Jean Raby <jean@raby.sh>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: pacman
+short_description: Manage packages with I(pacman)
+description:
+ - Manage packages with the I(pacman) package manager, which is used by Arch Linux and its variants.
+author:
+ - Indrajit Raychaudhuri (@indrajitr)
+ - Aaron Bull Schaefer (@elasticdog) <aaron@elasticdog.com>
+ - Maxime de Roucy (@tchernomax)
+ - Jean Raby (@jraby)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ name:
+ description:
+ - Name or list of names of the package(s) or file(s) to install, upgrade, or remove.
+ Can't be used in combination with C(upgrade).
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
+ - C(present) and C(installed) will simply ensure that a desired package is installed.
+ - C(latest) will update the specified package if it is not of the latest available version.
+ - C(absent) and C(removed) will remove the specified package.
+ default: present
+ choices: [ absent, installed, latest, present, removed ]
+ type: str
+
+ force:
+ description:
+ - When removing packages, forcefully remove them, without any checks.
+ Same as I(extra_args="--nodeps --nodeps").
+ When combined with I(update_cache), force a refresh of all package databases.
+ Same as I(update_cache_extra_args="--refresh --refresh").
+ default: false
+ type: bool
+
+ remove_nosave:
+ description:
+ - When removing packages, do not save modified configuration files as C(.pacsave) files.
+ (passes C(--nosave) to pacman)
+ version_added: 4.6.0
+ default: false
+ type: bool
+
+ executable:
+ description:
+ - Path of the binary to use. This can either be C(pacman) or a pacman compatible AUR helper.
+ - Pacman compatibility is unfortunately ill defined, in particular, this modules makes
+ extensive use of the C(--print-format) directive which is known not to be implemented by
+ some AUR helpers (notably, C(yay)).
+ - Beware that AUR helpers might behave unexpectedly and are therefore not recommended.
+ default: pacman
+ type: str
+ version_added: 3.1.0
+
+ extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(state).
+ default: ''
+ type: str
+
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists.
+ - This can be run as part of a package installation or as a separate step.
+ - If not specified, it defaults to C(false).
+ - Please note that this option only had an influence on the module's C(changed) state
+ if I(name) and I(upgrade) are not specified before community.general 5.0.0.
+ See the examples for how to keep the old behavior.
+ type: bool
+
+ update_cache_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(update_cache).
+ default: ''
+ type: str
+
+ upgrade:
+ description:
+ - Whether or not to upgrade the whole system.
+ Can't be used in combination with C(name).
+ - If not specified, it defaults to C(false).
+ type: bool
+
+ upgrade_extra_args:
+ description:
+ - Additional option to pass to pacman when enforcing C(upgrade).
+ default: ''
+ type: str
+
+ reason:
+ description:
+ - The install reason to set for the packages.
+ choices: [ dependency, explicit ]
+ type: str
+ version_added: 5.4.0
+
+ reason_for:
+ description:
+ - Set the install reason for C(all) packages or only for C(new) packages.
+ - In case of I(state=latest) already installed packages which will be updated to a newer version are not counted as C(new).
+ default: new
+ choices: [ all, new ]
+ type: str
+ version_added: 5.4.0
+
+notes:
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+ - To use an AUR helper (I(executable) option), a few extra setup steps might be required beforehand.
+ For example, a dedicated build user with permissions to install packages could be necessary.
+"""
+
+RETURN = """
+packages:
+ description:
+ - A list of packages that have been changed.
+ - Before community.general 4.5.0 this was only returned when I(upgrade=true).
+ In community.general 4.5.0, it was sometimes omitted when the package list is empty,
+ but since community.general 4.6.0 it is always returned when I(name) is specified or
+ I(upgrade=true).
+ returned: success and I(name) is specified or I(upgrade=true)
+ type: list
+ elements: str
+ sample: [ package, other-package ]
+
+cache_updated:
+ description:
+ - The changed status of C(pacman -Sy).
+ - Useful when I(name) or I(upgrade=true) are specified next to I(update_cache=true).
+ returned: success, when I(update_cache=true)
+ type: bool
+ sample: false
+ version_added: 4.6.0
+
+stdout:
+ description:
+ - Output from pacman.
+ returned: success, when needed
+ type: str
+ sample: ":: Synchronizing package databases... core is up to date :: Starting full system upgrade..."
+ version_added: 4.1.0
+
+stderr:
+ description:
+ - Error output from pacman.
+ returned: success, when needed
+ type: str
+ sample: "warning: libtool: local (2.4.6+44+gb9b44533-14) is newer than core (2.4.6+42+gb88cebd5-15)\nwarning ..."
+ version_added: 4.1.0
+"""
+
+EXAMPLES = """
+- name: Install package foo from repo
+ community.general.pacman:
+ name: foo
+ state: present
+
+- name: Install package bar from file
+ community.general.pacman:
+ name: ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Install package foo from repo and bar from file
+ community.general.pacman:
+ name:
+ - foo
+ - ~/bar-1.0-1-any.pkg.tar.xz
+ state: present
+
+- name: Install package from AUR using a Pacman compatible AUR helper
+ community.general.pacman:
+ name: foo
+ state: present
+ executable: yay
+ extra_args: --builddir /var/cache/yay
+
+- name: Upgrade package foo
+ # The 'changed' state of this call will indicate whether the cache was
+ # updated *or* whether foo was installed/upgraded.
+ community.general.pacman:
+ name: foo
+ state: latest
+ update_cache: true
+
+- name: Remove packages foo and bar
+ community.general.pacman:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Recursively remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ extra_args: --recursive
+
+- name: Run the equivalent of "pacman -Sy" as a separate step
+ community.general.pacman:
+ update_cache: true
+
+- name: Run the equivalent of "pacman -Su" as a separate step
+ community.general.pacman:
+ upgrade: true
+
+- name: Run the equivalent of "pacman -Syu" as a separate step
+ # Since community.general 5.0.0 the 'changed' state of this call
+ # will be 'true' in case the cache was updated, or when a package
+ # was updated.
+ #
+ # The previous behavior was to only indicate whether something was
+ # upgraded. To keep the old behavior, add the following to the task:
+ #
+ # register: result
+ # changed_when: result.packages | length > 0
+ community.general.pacman:
+ update_cache: true
+ upgrade: true
+
+- name: Run the equivalent of "pacman -Rdd", force remove package baz
+ community.general.pacman:
+ name: baz
+ state: absent
+ force: true
+
+- name: Install foo as dependency and leave reason untouched if already installed
+ community.general.pacman:
+ name: foo
+ state: present
+ reason: dependency
+ reason_for: new
+
+- name: Run the equivalent of "pacman -S --asexplicit", mark foo as explicit and install it if not present
+ community.general.pacman:
+ name: foo
+ state: present
+ reason: explicit
+ reason_for: all
+"""
+
+import shlex
+from ansible.module_utils.basic import AnsibleModule
+from collections import defaultdict, namedtuple
+
+
+class Package(object):
+ def __init__(self, name, source, source_is_URL=False):
+ self.name = name
+ self.source = source
+ self.source_is_URL = source_is_URL
+
+ def __eq__(self, o):
+ return self.name == o.name and self.source == o.source and self.source_is_URL == o.source_is_URL
+
+ def __lt__(self, o):
+ return self.name < o.name
+
+ def __repr__(self):
+ return 'Package("%s", "%s", %s)' % (self.name, self.source, self.source_is_URL)
+
+
+VersionTuple = namedtuple("VersionTuple", ["current", "latest"])
+
+
+class Pacman(object):
+ def __init__(self, module):
+ self.m = module
+
+ self.m.run_command_environ_update = dict(LC_ALL="C")
+ p = self.m.params
+
+ self._msgs = []
+ self._stdouts = []
+ self._stderrs = []
+ self.changed = False
+ self.exit_params = {}
+
+ self.pacman_path = self.m.get_bin_path(p["executable"], True)
+
+ self._cached_database = None
+
+ # Normalize for old configs
+ if p["state"] == "installed":
+ self.target_state = "present"
+ elif p["state"] == "removed":
+ self.target_state = "absent"
+ else:
+ self.target_state = p["state"]
+
+ def add_exit_infos(self, msg=None, stdout=None, stderr=None):
+ if msg:
+ self._msgs.append(msg)
+ if stdout:
+ self._stdouts.append(stdout)
+ if stderr:
+ self._stderrs.append(stderr)
+
+ def _set_mandatory_exit_params(self):
+ msg = "\n".join(self._msgs)
+ stdouts = "\n".join(self._stdouts)
+ stderrs = "\n".join(self._stderrs)
+ if stdouts:
+ self.exit_params["stdout"] = stdouts
+ if stderrs:
+ self.exit_params["stderr"] = stderrs
+ self.exit_params["msg"] = msg # mandatory, but might be empty
+
+ def fail(self, msg=None, stdout=None, stderr=None, **kwargs):
+ self.add_exit_infos(msg, stdout, stderr)
+ self._set_mandatory_exit_params()
+ if kwargs:
+ self.exit_params.update(**kwargs)
+ self.m.fail_json(**self.exit_params)
+
+ def success(self):
+ self._set_mandatory_exit_params()
+ self.m.exit_json(changed=self.changed, **self.exit_params)
+
+ def run(self):
+ if self.m.params["update_cache"]:
+ self.update_package_db()
+
+ if not (self.m.params["name"] or self.m.params["upgrade"]):
+ self.success()
+
+ self.inventory = self._build_inventory()
+ if self.m.params["upgrade"]:
+ self.upgrade()
+ self.success()
+
+ if self.m.params["name"]:
+ pkgs = self.package_list()
+
+ if self.target_state == "absent":
+ self.remove_packages(pkgs)
+ self.success()
+ else:
+ self.install_packages(pkgs)
+ self.success()
+
+ # This shouldn't happen...
+ self.fail("This is a bug")
+
+ def install_packages(self, pkgs):
+ pkgs_to_install = []
+ pkgs_to_install_from_url = []
+ pkgs_to_set_reason = []
+ for p in pkgs:
+ if self.m.params["reason"] and (
+ p.name not in self.inventory["pkg_reasons"]
+ or self.m.params["reason_for"] == "all"
+ and self.inventory["pkg_reasons"][p.name] != self.m.params["reason"]
+ ):
+ pkgs_to_set_reason.append(p.name)
+ if p.source_is_URL:
+ # URL packages bypass the latest / upgradable_pkgs test
+ # They go through the dry-run to let pacman decide if they will be installed
+ pkgs_to_install_from_url.append(p)
+ continue
+ if (
+ p.name not in self.inventory["installed_pkgs"]
+ or self.target_state == "latest"
+ and p.name in self.inventory["upgradable_pkgs"]
+ ):
+ pkgs_to_install.append(p)
+
+ if len(pkgs_to_install) == 0 and len(pkgs_to_install_from_url) == 0 and len(pkgs_to_set_reason) == 0:
+ self.exit_params["packages"] = []
+ self.add_exit_infos("package(s) already installed")
+ return
+
+ cmd_base = [
+ self.pacman_path,
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ ]
+ if self.m.params["extra_args"]:
+ cmd_base.extend(self.m.params["extra_args"])
+
+ def _build_install_diff(pacman_verb, pkglist):
+ # Dry run to build the installation diff
+
+ cmd = cmd_base + [pacman_verb, "--print-format", "%n %v"] + [p.source for p in pkglist]
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ self.fail("Failed to list package(s) to install", cmd=cmd, stdout=stdout, stderr=stderr)
+
+ name_ver = [l.strip() for l in stdout.splitlines()]
+ before = []
+ after = []
+ to_be_installed = []
+ for p in name_ver:
+ # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs "loading packages..." on stdout. strip that.
+ # When installing from URLs, pacman can also output a 'nothing to do' message. strip that too.
+ if "loading packages" in p or "there is nothing to do" in p:
+ continue
+ name, version = p.split()
+ if name in self.inventory["installed_pkgs"]:
+ before.append("%s-%s-%s" % (name, self.inventory["installed_pkgs"][name], self.inventory["pkg_reasons"][name]))
+ if name in pkgs_to_set_reason:
+ after.append("%s-%s-%s" % (name, version, self.m.params["reason"]))
+ elif name in self.inventory["pkg_reasons"]:
+ after.append("%s-%s-%s" % (name, version, self.inventory["pkg_reasons"][name]))
+ else:
+ after.append("%s-%s" % (name, version))
+ to_be_installed.append(name)
+
+ return (to_be_installed, before, after)
+
+ before = []
+ after = []
+ installed_pkgs = []
+
+ if pkgs_to_install:
+ p, b, a = _build_install_diff("--sync", pkgs_to_install)
+ installed_pkgs.extend(p)
+ before.extend(b)
+ after.extend(a)
+ if pkgs_to_install_from_url:
+ p, b, a = _build_install_diff("--upgrade", pkgs_to_install_from_url)
+ installed_pkgs.extend(p)
+ before.extend(b)
+ after.extend(a)
+
+ if len(installed_pkgs) == 0 and len(pkgs_to_set_reason) == 0:
+ # This can happen with URL packages if pacman decides there's nothing to do
+ self.exit_params["packages"] = []
+ self.add_exit_infos("package(s) already installed")
+ return
+
+ self.changed = True
+
+ self.exit_params["diff"] = {
+ "before": "\n".join(sorted(before)) + "\n" if before else "",
+ "after": "\n".join(sorted(after)) + "\n" if after else "",
+ }
+
+ changed_reason_pkgs = [p for p in pkgs_to_set_reason if p not in installed_pkgs]
+
+ if self.m.check_mode:
+ self.add_exit_infos("Would have installed %d packages" % (len(installed_pkgs) + len(changed_reason_pkgs)))
+ self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs)
+ return
+
+ # actually do it
+ def _install_packages_for_real(pacman_verb, pkglist):
+ cmd = cmd_base + [pacman_verb] + [p.source for p in pkglist]
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr)
+ self.add_exit_infos(stdout=stdout, stderr=stderr)
+ self._invalidate_database()
+
+ if pkgs_to_install:
+ _install_packages_for_real("--sync", pkgs_to_install)
+ if pkgs_to_install_from_url:
+ _install_packages_for_real("--upgrade", pkgs_to_install_from_url)
+
+ # set reason
+ if pkgs_to_set_reason:
+ cmd = [self.pacman_path, "--noconfirm", "--database"]
+ if self.m.params["reason"] == "dependency":
+ cmd.append("--asdeps")
+ else:
+ cmd.append("--asexplicit")
+ cmd.extend(pkgs_to_set_reason)
+
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ self.fail("Failed to install package(s)", cmd=cmd, stdout=stdout, stderr=stderr)
+ self.add_exit_infos(stdout=stdout, stderr=stderr)
+
+ self.exit_params["packages"] = sorted(installed_pkgs + changed_reason_pkgs)
+ self.add_exit_infos("Installed %d package(s)" % (len(installed_pkgs) + len(changed_reason_pkgs)))
+
+ def remove_packages(self, pkgs):
+ # filter out pkgs that are already absent
+ pkg_names_to_remove = [p.name for p in pkgs if p.name in self.inventory["installed_pkgs"]]
+
+ if len(pkg_names_to_remove) == 0:
+ self.exit_params["packages"] = []
+ self.add_exit_infos("package(s) already absent")
+ return
+
+ # There's something to do, set this in advance
+ self.changed = True
+
+ cmd_base = [self.pacman_path, "--remove", "--noconfirm", "--noprogressbar"]
+ cmd_base += self.m.params["extra_args"]
+ cmd_base += ["--nodeps", "--nodeps"] if self.m.params["force"] else []
+ # nosave_args conflicts with --print-format. Added later.
+ # https://github.com/ansible-collections/community.general/issues/4315
+
+ # This is a bit of a TOCTOU but it is better than parsing the output of
+ # pacman -R, which is different depending on the user config (VerbosePkgLists)
+ # Start by gathering what would be removed
+ cmd = cmd_base + ["--print-format", "%n-%v"] + pkg_names_to_remove
+
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ self.fail("failed to list package(s) to remove", cmd=cmd, stdout=stdout, stderr=stderr)
+
+ removed_pkgs = stdout.split()
+ self.exit_params["packages"] = removed_pkgs
+ self.exit_params["diff"] = {
+ "before": "\n".join(removed_pkgs) + "\n", # trailing \n to avoid diff complaints
+ "after": "",
+ }
+
+ if self.m.check_mode:
+ self.exit_params["packages"] = removed_pkgs
+ self.add_exit_infos("Would have removed %d packages" % len(removed_pkgs))
+ return
+
+ nosave_args = ["--nosave"] if self.m.params["remove_nosave"] else []
+ cmd = cmd_base + nosave_args + pkg_names_to_remove
+
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ self.fail("failed to remove package(s)", cmd=cmd, stdout=stdout, stderr=stderr)
+ self._invalidate_database()
+ self.exit_params["packages"] = removed_pkgs
+ self.add_exit_infos("Removed %d package(s)" % len(removed_pkgs), stdout=stdout, stderr=stderr)
+
+ def upgrade(self):
+ """Runs pacman --sync --sysupgrade if there are upgradable packages"""
+
+ if len(self.inventory["upgradable_pkgs"]) == 0:
+ self.add_exit_infos("Nothing to upgrade")
+ return
+
+ self.changed = True # there are upgrades, so there will be changes
+
+ # Build diff based on inventory first.
+ diff = {"before": "", "after": ""}
+ for pkg, versions in self.inventory["upgradable_pkgs"].items():
+ diff["before"] += "%s-%s\n" % (pkg, versions.current)
+ diff["after"] += "%s-%s\n" % (pkg, versions.latest)
+ self.exit_params["diff"] = diff
+ self.exit_params["packages"] = self.inventory["upgradable_pkgs"].keys()
+
+ if self.m.check_mode:
+ self.add_exit_infos(
+ "%d packages would have been upgraded" % (len(self.inventory["upgradable_pkgs"]))
+ )
+ else:
+ cmd = [
+ self.pacman_path,
+ "--sync",
+ "--sysupgrade",
+ "--quiet",
+ "--noconfirm",
+ ]
+ if self.m.params["upgrade_extra_args"]:
+ cmd += self.m.params["upgrade_extra_args"]
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ self._invalidate_database()
+ if rc == 0:
+ self.add_exit_infos("System upgraded", stdout=stdout, stderr=stderr)
+ else:
+ self.fail("Could not upgrade", cmd=cmd, stdout=stdout, stderr=stderr)
+
+ def _list_database(self):
+ """runs pacman --sync --list with some caching"""
+ if self._cached_database is None:
+ dummy, packages, dummy = self.m.run_command([self.pacman_path, '--sync', '--list'], check_rc=True)
+ self._cached_database = packages.splitlines()
+ return self._cached_database
+
+ def _invalidate_database(self):
+ """invalidates the pacman --sync --list cache"""
+ self._cached_database = None
+
+ def update_package_db(self):
+ """runs pacman --sync --refresh"""
+ if self.m.check_mode:
+ self.add_exit_infos("Would have updated the package db")
+ self.changed = True
+ self.exit_params["cache_updated"] = True
+ return
+
+ cmd = [
+ self.pacman_path,
+ "--sync",
+ "--refresh",
+ ]
+ if self.m.params["update_cache_extra_args"]:
+ cmd += self.m.params["update_cache_extra_args"]
+ if self.m.params["force"]:
+ cmd += ["--refresh"]
+ else:
+ # Dump package database to get contents before update
+ pre_state = sorted(self._list_database())
+
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ self._invalidate_database()
+
+ if self.m.params["force"]:
+ # Always changed when force=true
+ self.exit_params["cache_updated"] = True
+ else:
+ # Dump package database to get contents after update
+ post_state = sorted(self._list_database())
+ # If contents changed, set changed=true
+ self.exit_params["cache_updated"] = pre_state != post_state
+ if self.exit_params["cache_updated"]:
+ self.changed = True
+
+ if rc == 0:
+ self.add_exit_infos("Updated package db", stdout=stdout, stderr=stderr)
+ else:
+ self.fail("could not update package db", cmd=cmd, stdout=stdout, stderr=stderr)
+
+ def package_list(self):
+ """Takes the input package list and resolves packages groups to their package list using the inventory,
+ extracts package names from packages given as files or URLs using calls to pacman
+
+ Returns the expanded/resolved list as a list of Package
+ """
+ pkg_list = []
+ for pkg in self.m.params["name"]:
+ if not pkg:
+ continue
+
+ is_URL = False
+ if pkg in self.inventory["available_groups"]:
+ # Expand group members
+ for group_member in self.inventory["available_groups"][pkg]:
+ pkg_list.append(Package(name=group_member, source=group_member))
+ elif pkg in self.inventory["available_pkgs"] or pkg in self.inventory["installed_pkgs"]:
+ # Just a regular pkg, either available in the repositories,
+ # or locally installed, which we need to know for absent state
+ pkg_list.append(Package(name=pkg, source=pkg))
+ else:
+ # Last resort, call out to pacman to extract the info,
+ # pkg is possibly in the <repo>/<pkgname> format, or a filename or a URL
+
+ # Start with <repo>/<pkgname> case
+ cmd = [self.pacman_path, "--sync", "--print-format", "%n", pkg]
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ # fallback to filename / URL
+ cmd = [self.pacman_path, "--upgrade", "--print-format", "%n", pkg]
+ rc, stdout, stderr = self.m.run_command(cmd, check_rc=False)
+ if rc != 0:
+ if self.target_state == "absent":
+ continue # Don't bark for unavailable packages when trying to remove them
+ else:
+ self.fail(
+ msg="Failed to list package %s" % (pkg),
+ cmd=cmd,
+ stdout=stdout,
+ stderr=stderr,
+ rc=rc,
+ )
+ # With Pacman v6.0.1 - libalpm v13.0.1, --upgrade outputs " filename_without_extension downloading..." if the URL is unseen.
+ # In all cases, pacman outputs "loading packages..." on stdout. strip both
+ stdout = stdout.splitlines()[-1]
+ is_URL = True
+ pkg_name = stdout.strip()
+ pkg_list.append(Package(name=pkg_name, source=pkg, source_is_URL=is_URL))
+
+ return pkg_list
+
+ def _build_inventory(self):
+ """Build a cache datastructure used for all pkg lookups
+ Returns a dict:
+ {
+ "installed_pkgs": {pkgname: version},
+ "installed_groups": {groupname: set(pkgnames)},
+ "available_pkgs": {pkgname: version},
+ "available_groups": {groupname: set(pkgnames)},
+ "upgradable_pkgs": {pkgname: (current_version,latest_version)},
+ "pkg_reasons": {pkgname: reason},
+ }
+
+ Fails the module if a package requested for install cannot be found
+ """
+
+ installed_pkgs = {}
+ dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query"], check_rc=True)
+ # Format of a line: "pacman 6.0.1-2"
+ for l in stdout.splitlines():
+ l = l.strip()
+ if not l:
+ continue
+ pkg, ver = l.split()
+ installed_pkgs[pkg] = ver
+
+ installed_groups = defaultdict(set)
+ dummy, stdout, dummy = self.m.run_command(
+ [self.pacman_path, "--query", "--groups"], check_rc=True
+ )
+ # Format of lines:
+ # base-devel file
+ # base-devel findutils
+ # ...
+ for l in stdout.splitlines():
+ l = l.strip()
+ if not l:
+ continue
+ group, pkgname = l.split()
+ installed_groups[group].add(pkgname)
+
+ available_pkgs = {}
+ database = self._list_database()
+ # Format of a line: "core pacman 6.0.1-2"
+ for l in database:
+ l = l.strip()
+ if not l:
+ continue
+ repo, pkg, ver = l.split()[:3]
+ available_pkgs[pkg] = ver
+
+ available_groups = defaultdict(set)
+ dummy, stdout, dummy = self.m.run_command(
+ [self.pacman_path, "--sync", "--groups", "--groups"], check_rc=True
+ )
+ # Format of lines:
+ # vim-plugins vim-airline
+ # vim-plugins vim-airline-themes
+ # vim-plugins vim-ale
+ # ...
+ for l in stdout.splitlines():
+ l = l.strip()
+ if not l:
+ continue
+ group, pkg = l.split()
+ available_groups[group].add(pkg)
+
+ upgradable_pkgs = {}
+ rc, stdout, stderr = self.m.run_command(
+ [self.pacman_path, "--query", "--upgrades"], check_rc=False
+ )
+
+ # non-zero exit with nothing in stdout -> nothing to upgrade, all good
+ # stderr can have warnings, so not checked here
+ if rc == 1 and stdout == "":
+ pass # nothing to upgrade
+ elif rc == 0:
+ # Format of lines:
+ # strace 5.14-1 -> 5.15-1
+ # systemd 249.7-1 -> 249.7-2 [ignored]
+ for l in stdout.splitlines():
+ l = l.strip()
+ if not l:
+ continue
+ if "[ignored]" in l:
+ continue
+ s = l.split()
+ if len(s) != 4:
+ self.fail(msg="Invalid line: %s" % l)
+
+ pkg = s[0]
+ current = s[1]
+ latest = s[3]
+ upgradable_pkgs[pkg] = VersionTuple(current=current, latest=latest)
+ else:
+ # stuff in stdout but rc!=0, abort
+ self.fail(
+ "Couldn't get list of packages available for upgrade",
+ stdout=stdout,
+ stderr=stderr,
+ rc=rc,
+ )
+
+ pkg_reasons = {}
+ dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--explicit"], check_rc=True)
+ # Format of a line: "pacman 6.0.1-2"
+ for l in stdout.splitlines():
+ l = l.strip()
+ if not l:
+ continue
+ pkg = l.split()[0]
+ pkg_reasons[pkg] = "explicit"
+ dummy, stdout, dummy = self.m.run_command([self.pacman_path, "--query", "--deps"], check_rc=True)
+ # Format of a line: "pacman 6.0.1-2"
+ for l in stdout.splitlines():
+ l = l.strip()
+ if not l:
+ continue
+ pkg = l.split()[0]
+ pkg_reasons[pkg] = "dependency"
+
+ return dict(
+ installed_pkgs=installed_pkgs,
+ installed_groups=installed_groups,
+ available_pkgs=available_pkgs,
+ available_groups=available_groups,
+ upgradable_pkgs=upgradable_pkgs,
+ pkg_reasons=pkg_reasons,
+ )
+
+
+def setup_module():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type="list", elements="str", aliases=["pkg", "package"]),
+ state=dict(
+ type="str",
+ default="present",
+ choices=["present", "installed", "latest", "absent", "removed"],
+ ),
+ force=dict(type="bool", default=False),
+ remove_nosave=dict(type="bool", default=False),
+ executable=dict(type="str", default="pacman"),
+ extra_args=dict(type="str", default=""),
+ upgrade=dict(type="bool"),
+ upgrade_extra_args=dict(type="str", default=""),
+ update_cache=dict(type="bool"),
+ update_cache_extra_args=dict(type="str", default=""),
+ reason=dict(type="str", choices=["explicit", "dependency"]),
+ reason_for=dict(type="str", default="new", choices=["new", "all"]),
+ ),
+ required_one_of=[["name", "update_cache", "upgrade"]],
+ mutually_exclusive=[["name", "upgrade"]],
+ supports_check_mode=True,
+ )
+
+ # Split extra_args as the shell would for easier handling later
+ for str_args in ["extra_args", "upgrade_extra_args", "update_cache_extra_args"]:
+ module.params[str_args] = shlex.split(module.params[str_args])
+
+ return module
+
+
+def main():
+
+ Pacman(setup_module()).run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pacman_key.py b/ansible_collections/community/general/plugins/modules/pacman_key.py
new file mode 100644
index 000000000..4d4c4afac
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pacman_key.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, George Rawlinson <george@rawlinson.net.nz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pacman_key
+author:
+ - George Rawlinson (@grawlinson)
+version_added: "3.2.0"
+short_description: Manage pacman's list of trusted keys
+description:
+ - Add or remove gpg keys from the pacman keyring.
+notes:
+ - Use full-length key ID (40 characters).
+ - Keys will be verified when using I(data), I(file), or I(url) unless I(verify) is overridden.
+ - Keys will be locally signed after being imported into the keyring.
+ - If the key ID exists in the keyring, the key will not be added unless I(force_update) is specified.
+ - I(data), I(file), I(url), and I(keyserver) are mutually exclusive.
+requirements:
+ - gpg
+ - pacman-key
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ id:
+ description:
+ - The 40 character identifier of the key.
+ - Including this allows check mode to correctly report the changed state.
+ - Do not specify a subkey ID, instead specify the primary key ID.
+ required: true
+ type: str
+ data:
+ description:
+ - The keyfile contents to add to the keyring.
+ - Must be of C(PGP PUBLIC KEY BLOCK) type.
+ type: str
+ file:
+ description:
+ - The path to a keyfile on the remote server to add to the keyring.
+ - Remote file must be of C(PGP PUBLIC KEY BLOCK) type.
+ type: path
+ url:
+ description:
+ - The URL to retrieve keyfile from.
+ - Remote file must be of C(PGP PUBLIC KEY BLOCK) type.
+ type: str
+ keyserver:
+ description:
+ - The keyserver used to retrieve key from.
+ type: str
+ verify:
+ description:
+ - Whether or not to verify the keyfile's key ID against specified key ID.
+ type: bool
+ default: true
+ force_update:
+ description:
+ - This forces the key to be updated if it already exists in the keyring.
+ type: bool
+ default: false
+ keyring:
+ description:
+ - The full path to the keyring folder on the remote server.
+ - If not specified, module will use pacman's default (C(/etc/pacman.d/gnupg)).
+ - Useful if the remote system requires an alternative gnupg directory.
+ type: path
+ default: /etc/pacman.d/gnupg
+ state:
+ description:
+ - Ensures that the key is present (added) or absent (revoked).
+ default: present
+ choices: [ absent, present ]
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Import a key via local file
+ community.general.pacman_key:
+ data: "{{ lookup('file', 'keyfile.asc') }}"
+ state: present
+
+- name: Import a key via remote file
+ community.general.pacman_key:
+ file: /tmp/keyfile.asc
+ state: present
+
+- name: Import a key via url
+ community.general.pacman_key:
+ id: 01234567890ABCDE01234567890ABCDE12345678
+ url: https://domain.tld/keys/keyfile.asc
+ state: present
+
+- name: Import a key via keyserver
+ community.general.pacman_key:
+ id: 01234567890ABCDE01234567890ABCDE12345678
+ keyserver: keyserver.domain.tld
+
+- name: Import a key into an alternative keyring
+ community.general.pacman_key:
+ id: 01234567890ABCDE01234567890ABCDE12345678
+ file: /tmp/keyfile.asc
+ keyring: /etc/pacman.d/gnupg-alternative
+
+- name: Remove a key from the keyring
+ community.general.pacman_key:
+ id: 01234567890ABCDE01234567890ABCDE12345678
+ state: absent
+'''
+
+RETURN = r''' # '''
+
+import os.path
+import tempfile
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_native
+
+
+class PacmanKey(object):
+ def __init__(self, module):
+ self.module = module
+ # obtain binary paths for gpg & pacman-key
+ self.gpg = module.get_bin_path('gpg', required=True)
+ self.pacman_key = module.get_bin_path('pacman-key', required=True)
+
+ # obtain module parameters
+ keyid = module.params['id']
+ url = module.params['url']
+ data = module.params['data']
+ file = module.params['file']
+ keyserver = module.params['keyserver']
+ verify = module.params['verify']
+ force_update = module.params['force_update']
+ keyring = module.params['keyring']
+ state = module.params['state']
+ self.keylength = 40
+
+ # sanitise key ID & check if key exists in the keyring
+ keyid = self.sanitise_keyid(keyid)
+ key_present = self.key_in_keyring(keyring, keyid)
+
+ # check mode
+ if module.check_mode:
+ if state == "present":
+ changed = (key_present and force_update) or not key_present
+ module.exit_json(changed=changed)
+ elif state == "absent":
+ if key_present:
+ module.exit_json(changed=True)
+ module.exit_json(changed=False)
+
+ if state == "present":
+ if key_present and not force_update:
+ module.exit_json(changed=False)
+
+ if data:
+ file = self.save_key(data)
+ self.add_key(keyring, file, keyid, verify)
+ module.exit_json(changed=True)
+ elif file:
+ self.add_key(keyring, file, keyid, verify)
+ module.exit_json(changed=True)
+ elif url:
+ data = self.fetch_key(url)
+ file = self.save_key(data)
+ self.add_key(keyring, file, keyid, verify)
+ module.exit_json(changed=True)
+ elif keyserver:
+ self.recv_key(keyring, keyid, keyserver)
+ module.exit_json(changed=True)
+ elif state == "absent":
+ if key_present:
+ self.remove_key(keyring, keyid)
+ module.exit_json(changed=True)
+ module.exit_json(changed=False)
+
+ def is_hexadecimal(self, string):
+ """Check if a given string is valid hexadecimal"""
+ try:
+ int(string, 16)
+ except ValueError:
+ return False
+ return True
+
+ def sanitise_keyid(self, keyid):
+ """Sanitise given key ID.
+
+ Strips whitespace, uppercases all characters, and strips leading `0X`.
+ """
+ sanitised_keyid = keyid.strip().upper().replace(' ', '').replace('0X', '')
+ if len(sanitised_keyid) != self.keylength:
+ self.module.fail_json(msg="key ID is not full-length: %s" % sanitised_keyid)
+ if not self.is_hexadecimal(sanitised_keyid):
+ self.module.fail_json(msg="key ID is not hexadecimal: %s" % sanitised_keyid)
+ return sanitised_keyid
+
+ def fetch_key(self, url):
+ """Downloads a key from url"""
+ response, info = fetch_url(self.module, url)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to fetch key at %s, error was %s" % (url, info['msg']))
+ return to_native(response.read())
+
+ def recv_key(self, keyring, keyid, keyserver):
+ """Receives key via keyserver"""
+ cmd = [self.pacman_key, '--gpgdir', keyring, '--keyserver', keyserver, '--recv-keys', keyid]
+ self.module.run_command(cmd, check_rc=True)
+ self.lsign_key(keyring, keyid)
+
+ def lsign_key(self, keyring, keyid):
+ """Locally sign key"""
+ cmd = [self.pacman_key, '--gpgdir', keyring]
+ self.module.run_command(cmd + ['--lsign-key', keyid], check_rc=True)
+
+ def save_key(self, data):
+ "Saves key data to a temporary file"
+ tmpfd, tmpname = tempfile.mkstemp()
+ self.module.add_cleanup_file(tmpname)
+ tmpfile = os.fdopen(tmpfd, "w")
+ tmpfile.write(data)
+ tmpfile.close()
+ return tmpname
+
+ def add_key(self, keyring, keyfile, keyid, verify):
+ """Add key to pacman's keyring"""
+ if verify:
+ self.verify_keyfile(keyfile, keyid)
+ cmd = [self.pacman_key, '--gpgdir', keyring, '--add', keyfile]
+ self.module.run_command(cmd, check_rc=True)
+ self.lsign_key(keyring, keyid)
+
+ def remove_key(self, keyring, keyid):
+ """Remove key from pacman's keyring"""
+ cmd = [self.pacman_key, '--gpgdir', keyring, '--delete', keyid]
+ self.module.run_command(cmd, check_rc=True)
+
+ def verify_keyfile(self, keyfile, keyid):
+ """Verify that keyfile matches the specified key ID"""
+ if keyfile is None:
+ self.module.fail_json(msg="expected a key, got none")
+ elif keyid is None:
+ self.module.fail_json(msg="expected a key ID, got none")
+
+ rc, stdout, stderr = self.module.run_command(
+ [
+ self.gpg,
+ '--with-colons',
+ '--with-fingerprint',
+ '--batch',
+ '--no-tty',
+ '--show-keys',
+ keyfile
+ ],
+ check_rc=True,
+ )
+
+ extracted_keyid = None
+ for line in stdout.splitlines():
+ if line.startswith('fpr:'):
+ extracted_keyid = line.split(':')[9]
+ break
+
+ if extracted_keyid != keyid:
+ self.module.fail_json(msg="key ID does not match. expected %s, got %s" % (keyid, extracted_keyid))
+
+ def key_in_keyring(self, keyring, keyid):
+ "Check if the key ID is in pacman's keyring"
+ rc, stdout, stderr = self.module.run_command(
+ [
+ self.gpg,
+ '--with-colons',
+ '--batch',
+ '--no-tty',
+ '--no-default-keyring',
+ '--keyring=%s/pubring.gpg' % keyring,
+ '--list-keys', keyid
+ ],
+ check_rc=False,
+ )
+ if rc != 0:
+ if stderr.find("No public key") >= 0:
+ return False
+ else:
+ self.module.fail_json(msg="gpg returned an error: %s" % stderr)
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ id=dict(type='str', required=True),
+ data=dict(type='str'),
+ file=dict(type='path'),
+ url=dict(type='str'),
+ keyserver=dict(type='str'),
+ verify=dict(type='bool', default=True),
+ force_update=dict(type='bool', default=False),
+ keyring=dict(type='path', default='/etc/pacman.d/gnupg'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=(('data', 'file', 'url', 'keyserver'),),
+ required_if=[('state', 'present', ('data', 'file', 'url', 'keyserver'), True)],
+ )
+ PacmanKey(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty.py b/ansible_collections/community/general/plugins/modules/pagerduty.py
new file mode 100644
index 000000000..bed3629be
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pagerduty.py
@@ -0,0 +1,288 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty
+short_description: Create PagerDuty maintenance windows
+description:
+ - This module will let you create PagerDuty maintenance windows
+author:
+ - "Andrew Newdigate (@suprememoocow)"
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+ - "Bruce Pennypacker (@bpennypacker)"
+requirements:
+ - PagerDuty API access
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Create a maintenance window or get a list of ongoing windows.
+ required: true
+ choices: [ "running", "started", "ongoing", "absent" ]
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ user:
+ type: str
+ description:
+ - PagerDuty user ID. Obsolete. Please, use I(token) for authorization.
+ token:
+ type: str
+ description:
+ - A pagerduty token, generated on the pagerduty site. It is used for authorization.
+ required: true
+ requester_id:
+ type: str
+ description:
+ - ID of user making the request. Only needed when creating a maintenance_window.
+ service:
+ type: list
+ elements: str
+ description:
+ - A comma separated list of PagerDuty service IDs.
+ aliases: [ services ]
+ window_id:
+ type: str
+ description:
+ - ID of maintenance window. Only needed when absent a maintenance_window.
+ hours:
+ type: str
+ description:
+ - Length of maintenance window in hours.
+ default: '1'
+ minutes:
+ type: str
+ description:
+ - Maintenance window in minutes (this is added to the hours).
+ default: '0'
+ desc:
+ type: str
+ description:
+ - Short description of maintenance window.
+ default: Created by Ansible
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+- name: List ongoing maintenance windows using a token
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ state: ongoing
+
+- name: Create a 1 hour maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ token: yourtoken
+ state: running
+ service: FOO123
+
+- name: Create a 5 minute maintenance window for service FOO123
+ community.general.pagerduty:
+ name: companyabc
+ token: xxxxxxxxxxxxxx
+ hours: 0
+ minutes: 5
+ state: running
+ service: FOO123
+
+
+- name: Create a 4 hour maintenance window for service FOO123 with the description "deployment"
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: running
+ service: FOO123
+ hours: 4
+ desc: deployment
+ register: pd_window
+
+- name: Delete the previous maintenance window
+ community.general.pagerduty:
+ name: companyabc
+ user: example@example.com
+ state: absent
+ window_id: '{{ pd_window.result.maintenance_window.id }}'
+
+# Delete a maintenance window from a separate playbook than its creation,
+# and if it is the only existing maintenance window
+- name: Check
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: ongoing
+ register: pd_window
+
+- name: Delete
+ community.general.pagerduty:
+ requester_id: XXXXXXX
+ token: yourtoken
+ state: absent
+ window_id: "{{ pd_window.result.maintenance_windows[0].id }}"
+'''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+class PagerDutyRequest(object):
+ def __init__(self, module, name, user, token):
+ self.module = module
+ self.name = name
+ self.user = user
+ self.token = token
+ self.headers = {
+ 'Content-Type': 'application/json',
+ "Authorization": self._auth_header(),
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ def ongoing(self, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows?filter=ongoing"
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers)
+ if info['status'] != 200:
+ self.module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, False
+
+ def create(self, requester_id, service, hours, minutes, desc, http_call=fetch_url):
+ if not requester_id:
+ self.module.fail_json(msg="requester_id is required when maintenance window should be created")
+
+ url = 'https://api.pagerduty.com/maintenance_windows'
+
+ headers = dict(self.headers)
+ headers.update({'From': requester_id})
+
+ start, end = self._compute_start_end_time(hours, minutes)
+ services = self._create_services_payload(service)
+
+ request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'services': services}}
+
+ data = json.dumps(request_data)
+ response, info = http_call(self.module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 201:
+ self.module.fail_json(msg="failed to create the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _create_services_payload(self, service):
+ if (isinstance(service, list)):
+ return [{'id': s, 'type': 'service_reference'} for s in service]
+ else:
+ return [{'id': service, 'type': 'service_reference'}]
+
+ def _compute_start_end_time(self, hours, minutes):
+ now = datetime.datetime.utcnow()
+ later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
+ start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
+ end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
+ return start, end
+
+ def absent(self, window_id, http_call=fetch_url):
+ url = "https://api.pagerduty.com/maintenance_windows/" + window_id
+ headers = dict(self.headers)
+
+ response, info = http_call(self.module, url, headers=headers, method='DELETE')
+ if info['status'] != 204:
+ self.module.fail_json(msg="failed to delete the window: %s" % info['msg'])
+
+ json_out = self._read_response(response)
+
+ return False, json_out, True
+
+ def _auth_header(self):
+ return "Token token=%s" % self.token
+
+ def _read_response(self, response):
+ try:
+ return json.loads(response.read())
+ except Exception:
+ return ""
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'started', 'ongoing', 'absent']),
+ name=dict(required=False),
+ user=dict(required=False),
+ token=dict(required=True, no_log=True),
+ service=dict(required=False, type='list', elements='str', aliases=["services"]),
+ window_id=dict(required=False),
+ requester_id=dict(required=False),
+ hours=dict(default='1', required=False), # @TODO change to int?
+ minutes=dict(default='0', required=False), # @TODO change to int?
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs=dict(default=True, type='bool'),
+ )
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ user = module.params['user']
+ service = module.params['service']
+ window_id = module.params['window_id']
+ hours = module.params['hours']
+ minutes = module.params['minutes']
+ token = module.params['token']
+ desc = module.params['desc']
+ requester_id = module.params['requester_id']
+
+ pd = PagerDutyRequest(module, name, user, token)
+
+ if state == "running" or state == "started":
+ if not service:
+ module.fail_json(msg="service not specified")
+ (rc, out, changed) = pd.create(requester_id, service, hours, minutes, desc)
+ if rc == 0:
+ changed = True
+
+ if state == "ongoing":
+ (rc, out, changed) = pd.ongoing()
+
+ if state == "absent":
+ (rc, out, changed) = pd.absent(window_id)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_alert.py b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
new file mode 100644
index 000000000..45bec92c6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pagerduty_alert.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pagerduty_alert
+short_description: Trigger, acknowledge or resolve PagerDuty incidents
+description:
+ - This module will let you trigger, acknowledge or resolve a PagerDuty incident by sending events
+author:
+ - "Amanpreet Singh (@ApsOps)"
+requirements:
+ - PagerDuty API access
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - PagerDuty unique subdomain. Obsolete. It is not used with PagerDuty REST v2 API.
+ service_id:
+ type: str
+ description:
+ - ID of PagerDuty service when incidents will be triggered, acknowledged or resolved.
+ required: true
+ service_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services. Obsolete. Please use I(integration_key).
+ integration_key:
+ type: str
+ description:
+ - The GUID of one of your "Generic API" services.
+ - This is the "integration key" listed on a "Integrations" tab of PagerDuty service.
+ state:
+ type: str
+ description:
+ - Type of event to be sent.
+ required: true
+ choices:
+ - 'triggered'
+ - 'acknowledged'
+ - 'resolved'
+ api_key:
+ type: str
+ description:
+ - The pagerduty API key (readonly access), generated on the pagerduty site.
+ required: true
+ desc:
+ type: str
+ description:
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
+ The maximum length is 1024 characters.
+ - For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
+ required: false
+ default: Created via Ansible
+ incident_key:
+ type: str
+ description:
+ - Identifies the incident to which this I(state) should be applied.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
+ problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ required: false
+ client:
+ type: str
+ description:
+ - The name of the monitoring client that is triggering this event.
+ required: false
+ client_url:
+ type: str
+ description:
+ - The URL of the monitoring client that is triggering this event.
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Trigger an incident with just the basic options
+ community.general.pagerduty_alert:
+ name: companyabc
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+
+- name: Trigger an incident with more options
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: triggered
+ desc: problem that led to this trigger
+ incident_key: somekey
+ client: Sample Monitoring Service
+ client_url: http://service.example.com
+
+- name: Acknowledge an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: acknowledged
+ incident_key: somekey
+ desc: "some text for incident's log"
+
+- name: Resolve an incident based on incident_key
+ community.general.pagerduty_alert:
+ integration_key: xxx
+ api_key: yourapikey
+ service_id: PDservice
+ state: resolved
+ incident_key: somekey
+ desc: "some text for incident's log"
+'''
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import urlparse, urlencode, urlunparse
+
+
+def check(module, name, state, service_id, integration_key, api_key, incident_key=None, http_call=fetch_url):
+ url = 'https://api.pagerduty.com/incidents'
+ headers = {
+ "Content-type": "application/json",
+ "Authorization": "Token token=%s" % api_key,
+ 'Accept': 'application/vnd.pagerduty+json;version=2'
+ }
+
+ params = {
+ 'service_ids[]': service_id,
+ 'sort_by': 'incident_number:desc',
+ 'time_zone': 'UTC'
+ }
+ if incident_key:
+ params['incident_key'] = incident_key
+
+ url_parts = list(urlparse(url))
+ url_parts[4] = urlencode(params, True)
+
+ url = urlunparse(url_parts)
+
+ response, info = http_call(module, url, method='get', headers=headers)
+
+ if info['status'] != 200:
+ module.fail_json(msg="failed to check current incident status."
+ "Reason: %s" % info['msg'])
+
+ incidents = json.loads(response.read())["incidents"]
+ msg = "No corresponding incident"
+
+ if len(incidents) == 0:
+ if state in ('acknowledged', 'resolved'):
+ return msg, False
+ return msg, True
+ elif state != incidents[0]["status"]:
+ return incidents[0], True
+
+ return incidents[0], False
+
+
+def send_event(module, service_key, event_type, desc,
+ incident_key=None, client=None, client_url=None):
+ url = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
+ headers = {
+ "Content-type": "application/json"
+ }
+
+ data = {
+ "service_key": service_key,
+ "event_type": event_type,
+ "incident_key": incident_key,
+ "description": desc,
+ "client": client,
+ "client_url": client_url
+ }
+
+ response, info = fetch_url(module, url, method='post',
+ headers=headers, data=json.dumps(data))
+ if info['status'] != 200:
+ module.fail_json(msg="failed to %s. Reason: %s" %
+ (event_type, info['msg']))
+ json_out = json.loads(response.read())
+ return json_out
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ service_id=dict(required=True),
+ service_key=dict(required=False, no_log=True),
+ integration_key=dict(required=False, no_log=True),
+ api_key=dict(required=True, no_log=True),
+ state=dict(required=True,
+ choices=['triggered', 'acknowledged', 'resolved']),
+ client=dict(required=False, default=None),
+ client_url=dict(required=False, default=None),
+ desc=dict(required=False, default='Created via Ansible'),
+ incident_key=dict(required=False, default=None, no_log=False)
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ service_id = module.params['service_id']
+ integration_key = module.params['integration_key']
+ service_key = module.params['service_key']
+ api_key = module.params['api_key']
+ state = module.params['state']
+ client = module.params['client']
+ client_url = module.params['client_url']
+ desc = module.params['desc']
+ incident_key = module.params['incident_key']
+
+ if integration_key is None:
+ if service_key is not None:
+ integration_key = service_key
+ module.warn('"service_key" is obsolete parameter and will be removed.'
+ ' Please, use "integration_key" instead')
+ else:
+ module.fail_json(msg="'integration_key' is required parameter")
+
+ state_event_dict = {
+ 'triggered': 'trigger',
+ 'acknowledged': 'acknowledge',
+ 'resolved': 'resolve'
+ }
+
+ event_type = state_event_dict[state]
+
+ if event_type != 'trigger' and incident_key is None:
+ module.fail_json(msg="incident_key is required for "
+ "acknowledge or resolve events")
+
+ out, changed = check(module, name, state, service_id,
+ integration_key, api_key, incident_key)
+
+ if not module.check_mode and changed is True:
+ out = send_event(module, integration_key, event_type, desc,
+ incident_key, client, client_url)
+
+ module.exit_json(result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_change.py b/ansible_collections/community/general/plugins/modules/pagerduty_change.py
new file mode 100644
index 000000000..6af5d58ea
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pagerduty_change.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Adam Vaughan (@adamvaughan) avaughan@pagerduty.com
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: pagerduty_change
+short_description: Track a code or infrastructure change as a PagerDuty change event
+version_added: 1.3.0
+description:
+ - This module will let you create a PagerDuty change event each time the module is run.
+ - This is not an idempotent action and a new change event will be created each time it is run.
+author:
+ - Adam Vaughan (@adamvaughan)
+requirements:
+ - PagerDuty integration key
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ details:
+ - Check mode simply does nothing except returning C(changed=true) in case the I(url) seems to be correct.
+ diff_mode:
+ support: none
+options:
+ integration_key:
+ description:
+ - The integration key that identifies the service the change was made to.
+ This can be found by adding an integration to a service in PagerDuty.
+ required: true
+ type: str
+ summary:
+ description:
+ - A short description of the change that occurred.
+ required: true
+ type: str
+ source:
+ description:
+ - The source of the change event.
+ default: Ansible
+ type: str
+ user:
+ description:
+ - The name of the user or process that triggered this deployment.
+ type: str
+ repo:
+ description:
+ - The URL of the project repository.
+ required: false
+ type: str
+ revision:
+ description:
+ - An identifier of the revision being deployed, typically a number or SHA from a version control system.
+ required: false
+ type: str
+ environment:
+ description:
+ - The environment name, typically C(production), C(staging), etc.
+ required: false
+ type: str
+ link_url:
+ description:
+ - A URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ link_text:
+ description:
+ - Descriptive text for a URL where more information about the deployment can be obtained.
+ required: false
+ type: str
+ url:
+ description:
+ - URL to submit the change event to.
+ required: false
+ default: https://events.pagerduty.com/v2/change/enqueue
+ type: str
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target URL will not be validated.
+ This should only be used on personally controlled sites using self-signed certificates.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Track the deployment as a PagerDuty change event
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+
+- name: Track the deployment as a PagerDuty change event with more details
+ community.general.pagerduty_change:
+ integration_key: abc123abc123abc123abc123abc123ab
+ summary: The application was deployed
+ source: Ansible Deploy
+ user: ansible
+ repo: github.com/ansible/ansible
+ revision: '4.2'
+ environment: production
+ link_url: https://github.com/ansible-collections/community.general/pull/1269
+ link_text: View changes on GitHub
+'''
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.basic import AnsibleModule
+from datetime import datetime
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ integration_key=dict(required=True, type='str', no_log=True),
+ summary=dict(required=True, type='str'),
+ source=dict(required=False, default='Ansible', type='str'),
+ user=dict(required=False, type='str'),
+ repo=dict(required=False, type='str'),
+ revision=dict(required=False, type='str'),
+ environment=dict(required=False, type='str'),
+ link_url=dict(required=False, type='str'),
+ link_text=dict(required=False, type='str'),
+ url=dict(required=False,
+ default='https://events.pagerduty.com/v2/change/enqueue', type='str'),
+ validate_certs=dict(default=True, type='bool')
+ ),
+ supports_check_mode=True
+ )
+
+ # API documented at https://developer.pagerduty.com/docs/events-api-v2/send-change-events/
+
+ url = module.params['url']
+ headers = {'Content-Type': 'application/json'}
+
+ if module.check_mode:
+ _response, info = fetch_url(
+ module, url, headers=headers, method='POST')
+
+ if info['status'] == 400:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Checking the PagerDuty change event API returned an unexpected response: %d' % (info['status']))
+
+ custom_details = {}
+
+ if module.params['user']:
+ custom_details['user'] = module.params['user']
+
+ if module.params['repo']:
+ custom_details['repo'] = module.params['repo']
+
+ if module.params['revision']:
+ custom_details['revision'] = module.params['revision']
+
+ if module.params['environment']:
+ custom_details['environment'] = module.params['environment']
+
+ now = datetime.utcnow()
+ timestamp = now.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+ payload = {
+ 'summary': module.params['summary'],
+ 'source': module.params['source'],
+ 'timestamp': timestamp,
+ 'custom_details': custom_details
+ }
+
+ event = {
+ 'routing_key': module.params['integration_key'],
+ 'payload': payload
+ }
+
+ if module.params['link_url']:
+ link = {
+ 'href': module.params['link_url']
+ }
+
+ if module.params['link_text']:
+ link['text'] = module.params['link_text']
+
+ event['links'] = [link]
+
+ _response, info = fetch_url(
+ module, url, data=module.jsonify(event), headers=headers, method='POST')
+
+ if info['status'] == 202:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(
+ msg='Creating PagerDuty change event failed with %d' % (info['status']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pagerduty_user.py b/ansible_collections/community/general/plugins/modules/pagerduty_user.py
new file mode 100644
index 000000000..9c9805bff
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pagerduty_user.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: pagerduty_user
+short_description: Manage a user account on PagerDuty
+description:
+ - This module manages the creation/removal of a user account on PagerDuty.
+version_added: '1.3.0'
+author: Zainab Alsaffar (@zanssa)
+requirements:
+ - pdpyras python module = 4.1.1
+ - PagerDuty API Access
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ access_token:
+ description:
+ - An API access token to authenticate with the PagerDuty REST API.
+ required: true
+ type: str
+ pd_user:
+ description:
+ - Name of the user in PagerDuty.
+ required: true
+ type: str
+ pd_email:
+ description:
+ - The user's email address.
+ - I(pd_email) is the unique identifier used and cannot be updated using this module.
+ required: true
+ type: str
+ pd_role:
+ description:
+ - The user's role.
+ choices: ['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']
+ default: 'responder'
+ type: str
+ state:
+ description:
+ - State of the user.
+ - On C(present), it creates a user if the user doesn't exist.
+ - On C(absent), it removes a user if the account exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+ pd_teams:
+ description:
+ - The teams to which the user belongs.
+ - Required if I(state=present).
+ type: list
+ elements: str
+'''
+
+EXAMPLES = r'''
+- name: Create a user account on PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ pd_role: user_pd_role
+ pd_teams: user_pd_teams
+ state: "present"
+
+- name: Remove a user account from PagerDuty
+ community.general.pagerduty_user:
+ access_token: 'Your_Access_token'
+ pd_user: user_full_name
+ pd_email: user_email
+ state: "absent"
+'''
+
+RETURN = r''' # '''
+
+from os import path
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils import deps
+
+with deps.declare("pdpyras", url="https://github.com/PagerDuty/pdpyras"):
+ from pdpyras import APISession, PDClientError
+
+
+class PagerDutyUser(object):
+ def __init__(self, module, session):
+ self._module = module
+ self._apisession = session
+
+ # check if the user exists
+ def does_user_exist(self, pd_email):
+ for user in self._apisession.iter_all('users'):
+ if user['email'] == pd_email:
+ return user['id']
+
+ # create a user account on PD
+ def add_pd_user(self, pd_name, pd_email, pd_role):
+ try:
+ user = self._apisession.persist('users', 'email', {
+ "name": pd_name,
+ "email": pd_email,
+ "type": "user",
+ "role": pd_role,
+ })
+ return user
+
+ except PDClientError as e:
+ if e.response.status_code == 400:
+ self._module.fail_json(
+ msg="Failed to add %s due to invalid argument" % (pd_name))
+ if e.response.status_code == 401:
+ self._module.fail_json(msg="Failed to add %s due to invalid API key" % (pd_name))
+ if e.response.status_code == 402:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to perform the action within the API token" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to add %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to add %s due to reaching the limit of making requests" % (pd_name))
+
+ # delete a user account from PD
+ def delete_user(self, pd_user_id, pd_name):
+ try:
+ user_path = path.join('/users/', pd_user_id)
+ self._apisession.rdelete(user_path)
+
+ except PDClientError as e:
+ if e.response.status_code == 404:
+ self._module.fail_json(
+ msg="Failed to remove %s as user was not found" % (pd_name))
+ if e.response.status_code == 403:
+ self._module.fail_json(
+ msg="Failed to remove %s due to inability to review the requested resource within the API token" % (pd_name))
+ if e.response.status_code == 401:
+ # print out the list of incidents
+ pd_incidents = self.get_incidents_assigned_to_user(pd_user_id)
+ self._module.fail_json(msg="Failed to remove %s as user has assigned incidents %s" % (pd_name, pd_incidents))
+ if e.response.status_code == 429:
+ self._module.fail_json(
+ msg="Failed to remove %s due to reaching the limit of making requests" % (pd_name))
+
+ # get incidents assigned to a user
+ def get_incidents_assigned_to_user(self, pd_user_id):
+ incident_info = {}
+ incidents = self._apisession.list_all('incidents', params={'user_ids[]': [pd_user_id]})
+
+ for incident in incidents:
+ incident_info = {
+ 'title': incident['title'],
+ 'key': incident['incident_key'],
+ 'status': incident['status']
+ }
+ return incident_info
+
+ # add a user to a team/teams
+ def add_user_to_teams(self, pd_user_id, pd_teams, pd_role):
+ updated_team = None
+ for team in pd_teams:
+ team_info = self._apisession.find('teams', team, attribute='name')
+ if team_info is not None:
+ try:
+ updated_team = self._apisession.rput('/teams/' + team_info['id'] + '/users/' + pd_user_id, json={
+ 'role': pd_role
+ })
+ except PDClientError:
+ updated_team = None
+ return updated_team
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ access_token=dict(type='str', required=True, no_log=True),
+ pd_user=dict(type='str', required=True),
+ pd_email=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ pd_role=dict(type='str', default='responder',
+ choices=['global_admin', 'manager', 'responder', 'observer', 'stakeholder', 'limited_stakeholder', 'restricted_access']),
+ pd_teams=dict(type='list', elements='str', required=False)),
+ required_if=[['state', 'present', ['pd_teams']], ],
+ supports_check_mode=True,
+ )
+
+ deps.validate(module)
+
+ access_token = module.params['access_token']
+ pd_user = module.params['pd_user']
+ pd_email = module.params['pd_email']
+ state = module.params['state']
+ pd_role = module.params['pd_role']
+ pd_teams = module.params['pd_teams']
+
+ if pd_role:
+ pd_role_gui_value = {
+ 'global_admin': 'admin',
+ 'manager': 'user',
+ 'responder': 'limited_user',
+ 'observer': 'observer',
+ 'stakeholder': 'read_only_user',
+ 'limited_stakeholder': 'read_only_limited_user',
+ 'restricted_access': 'restricted_access'
+ }
+ pd_role = pd_role_gui_value[pd_role]
+
+ # authenticate with PD API
+ try:
+ session = APISession(access_token)
+ except PDClientError as e:
+ module.fail_json(msg="Failed to authenticate with PagerDuty: %s" % e)
+
+ user = PagerDutyUser(module, session)
+
+ user_exists = user.does_user_exist(pd_email)
+
+ if user_exists:
+ if state == "absent":
+ # remove user
+ if not module.check_mode:
+ user.delete_user(user_exists, pd_user)
+ module.exit_json(changed=True, result="Successfully deleted user %s" % pd_user)
+ else:
+ module.exit_json(changed=False, result="User %s already exists." % pd_user)
+
+ # in case that the user does not exist
+ else:
+ if state == "absent":
+ module.exit_json(changed=False, result="User %s was not found." % pd_user)
+
+ else:
+ # add user, adds user with the default notification rule and contact info (email)
+ if not module.check_mode:
+ user.add_pd_user(pd_user, pd_email, pd_role)
+ # get user's id
+ pd_user_id = user.does_user_exist(pd_email)
+ # add a user to the team/s
+ user.add_user_to_teams(pd_user_id, pd_teams, pd_role)
+ module.exit_json(changed=True, result="Successfully created & added user %s to team %s" % (pd_user, pd_teams))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pam_limits.py b/ansible_collections/community/general/plugins/modules/pam_limits.py
new file mode 100644
index 000000000..dbb70045d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pam_limits.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Sebastien Rohaut <sebastien.rohaut@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: pam_limits
+author:
+- "Sebastien Rohaut (@usawa)"
+short_description: Modify Linux PAM limits
+description:
+ - The C(pam_limits) module modifies PAM limits.
+ - The default file is C(/etc/security/limits.conf).
+ - For the full documentation, see C(man 5 limits.conf).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ version_added: 2.0.0
+ diff_mode:
+ support: full
+ version_added: 2.0.0
+options:
+ domain:
+ type: str
+ description:
+ - A username, @groupname, wildcard, UID/GID range.
+ required: true
+ limit_type:
+ type: str
+ description:
+ - Limit type, see C(man 5 limits.conf) for an explanation.
+ required: true
+ choices: [ "hard", "soft", "-" ]
+ limit_item:
+ type: str
+ description:
+ - The limit to be set.
+ required: true
+ choices:
+ - "core"
+ - "data"
+ - "fsize"
+ - "memlock"
+ - "nofile"
+ - "rss"
+ - "stack"
+ - "cpu"
+ - "nproc"
+ - "as"
+ - "maxlogins"
+ - "maxsyslogins"
+ - "priority"
+ - "locks"
+ - "sigpending"
+ - "msgqueue"
+ - "nice"
+ - "rtprio"
+ - "chroot"
+ value:
+ type: str
+ description:
+ - The value of the limit.
+ - Value must either be C(unlimited), C(infinity) or C(-1), all of which indicate no limit, or a limit of 0 or larger.
+ - Value must be a number in the range -20 to 19 inclusive, if I(limit_item) is set to C(nice) or C(priority).
+ - Refer to the C(man 5 limits.conf) manual pages for more details.
+ required: true
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ required: false
+ type: bool
+ default: false
+ use_min:
+ description:
+ - If set to C(true), the minimal value will be used or conserved.
+ - If the specified value is inferior to the value in the file,
+ file content is replaced with the new value, else content is not modified.
+ required: false
+ type: bool
+ default: false
+ use_max:
+ description:
+ - If set to C(true), the maximal value will be used or conserved.
+ - If the specified value is superior to the value in the file,
+ file content is replaced with the new value, else content is not modified.
+ required: false
+ type: bool
+ default: false
+ dest:
+ type: str
+ description:
+ - Modify the limits.conf path.
+ required: false
+ default: "/etc/security/limits.conf"
+ comment:
+ type: str
+ description:
+ - Comment associated with the limit.
+ required: false
+ default: ''
+notes:
+ - If I(dest) file does not exist, it is created.
+'''
+
+EXAMPLES = r'''
+- name: Add or modify nofile soft limit for the user joe
+ community.general.pam_limits:
+ domain: joe
+ limit_type: soft
+ limit_item: nofile
+ value: 64000
+
+- name: Add or modify fsize hard limit for the user smith. Keep or set the maximal value
+ community.general.pam_limits:
+ domain: smith
+ limit_type: hard
+ limit_item: fsize
+ value: 1000000
+ use_max: true
+
+- name: Add or modify memlock, both soft and hard, limit for the user james with a comment
+ community.general.pam_limits:
+ domain: james
+ limit_type: '-'
+ limit_item: memlock
+ value: unlimited
+ comment: unlimited memory lock for james
+
+- name: Add or modify hard nofile limits for wildcard domain
+ community.general.pam_limits:
+ domain: '*'
+ limit_type: hard
+ limit_item: nofile
+ value: 39693561
+'''
+
+import os
+import re
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def _assert_is_valid_value(module, item, value, prefix=''):
+ if item in ['nice', 'priority']:
+ try:
+ valid = -20 <= int(value) <= 19
+ except ValueError:
+ valid = False
+ if not valid:
+ module.fail_json(msg="%s Value of %r for item %r is invalid. Value must be a number in the range -20 to 19 inclusive. "
+ "Refer to the limits.conf(5) manual pages for more details." % (prefix, value, item))
+ elif not (value in ['unlimited', 'infinity', '-1'] or value.isdigit()):
+ module.fail_json(msg="%s Value of %r for item %r is invalid. Value must either be 'unlimited', 'infinity' or -1, all of "
+ "which indicate no limit, or a limit of 0 or larger. Refer to the limits.conf(5) manual pages for "
+ "more details." % (prefix, value, item))
+
+
+def main():
+ pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
+ 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
+
+ pam_types = ['soft', 'hard', '-']
+
+ limits_conf = '/etc/security/limits.conf'
+
+ module = AnsibleModule(
+ # not checking because of daisy chain to file module
+ argument_spec=dict(
+ domain=dict(required=True, type='str'),
+ limit_type=dict(required=True, type='str', choices=pam_types),
+ limit_item=dict(required=True, type='str', choices=pam_items),
+ value=dict(required=True, type='str'),
+ use_max=dict(default=False, type='bool'),
+ use_min=dict(default=False, type='bool'),
+ backup=dict(default=False, type='bool'),
+ dest=dict(default=limits_conf, type='str'),
+ comment=dict(required=False, default='', type='str')
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ limit_type = module.params['limit_type']
+ limit_item = module.params['limit_item']
+ value = module.params['value']
+ use_max = module.params['use_max']
+ use_min = module.params['use_min']
+ backup = module.params['backup']
+ limits_conf = module.params['dest']
+ new_comment = module.params['comment']
+
+ changed = False
+
+ if os.path.isfile(limits_conf):
+ if not os.access(limits_conf, os.W_OK):
+ module.fail_json(msg="%s is not writable. Use sudo" % limits_conf)
+ else:
+ limits_conf_dir = os.path.dirname(limits_conf)
+ if os.path.isdir(limits_conf_dir) and os.access(limits_conf_dir, os.W_OK):
+ open(limits_conf, 'a').close()
+ changed = True
+ else:
+ module.fail_json(msg="directory %s is not writable (check presence, access rights, use sudo)" % limits_conf_dir)
+
+ if use_max and use_min:
+ module.fail_json(msg="Cannot use use_min and use_max at the same time.")
+
+ _assert_is_valid_value(module, limit_item, value)
+
+ # Backup
+ if backup:
+ backup_file = module.backup_local(limits_conf)
+
+ space_pattern = re.compile(r'\s+')
+
+ message = ''
+ f = open(limits_conf, 'rb')
+ # Tempfile
+ nf = tempfile.NamedTemporaryFile(mode='w+')
+
+ found = False
+ new_value = value
+
+ for line in f:
+ line = to_native(line, errors='surrogate_or_strict')
+ if line.startswith('#'):
+ nf.write(line)
+ continue
+
+ newline = re.sub(space_pattern, ' ', line).strip()
+ if not newline:
+ nf.write(line)
+ continue
+
+ # Remove comment in line
+ newline = newline.split('#', 1)[0]
+ try:
+ old_comment = line.split('#', 1)[1]
+ except Exception:
+ old_comment = ''
+
+ newline = newline.rstrip()
+
+ if not new_comment:
+ new_comment = old_comment
+
+ line_fields = newline.split(' ')
+
+ if len(line_fields) != 4:
+ nf.write(line)
+ continue
+
+ line_domain = line_fields[0]
+ line_type = line_fields[1]
+ line_item = line_fields[2]
+ actual_value = line_fields[3]
+
+ _assert_is_valid_value(module, line_item, actual_value,
+ prefix="Invalid configuration found in '%s'." % limits_conf)
+
+ # Found the line
+ if line_domain == domain and line_type == limit_type and line_item == limit_item:
+ found = True
+ if value == actual_value:
+ message = line
+ nf.write(line)
+ continue
+
+ if line_type not in ['nice', 'priority']:
+ actual_value_unlimited = actual_value in ['unlimited', 'infinity', '-1']
+ value_unlimited = value in ['unlimited', 'infinity', '-1']
+ else:
+ actual_value_unlimited = value_unlimited = False
+
+ if use_max:
+ if actual_value_unlimited:
+ new_value = actual_value
+ elif value_unlimited:
+ new_value = value
+ else:
+ new_value = str(max(int(value), int(actual_value)))
+
+ if use_min:
+ if actual_value_unlimited and value_unlimited:
+ new_value = actual_value
+ elif actual_value_unlimited:
+ new_value = value
+ elif value_unlimited:
+ new_value = actual_value
+ else:
+ new_value = str(min(int(value), int(actual_value)))
+
+ # Change line only if value has changed
+ if new_value != actual_value:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+ else:
+ message = line
+ nf.write(line)
+ else:
+ nf.write(line)
+
+ if not found:
+ changed = True
+ if new_comment:
+ new_comment = "\t#" + new_comment
+ new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + new_value + new_comment + "\n"
+ message = new_limit
+ nf.write(new_limit)
+
+ f.close()
+ nf.flush()
+
+ with open(limits_conf, 'r') as content:
+ content_current = content.read()
+
+ with open(nf.name, 'r') as content:
+ content_new = content.read()
+
+ if not module.check_mode:
+ # Copy tempfile to newfile
+ module.atomic_move(nf.name, limits_conf)
+
+ try:
+ nf.close()
+ except Exception:
+ pass
+
+ res_args = dict(
+ changed=changed,
+ msg=message,
+ diff=dict(before=content_current, after=content_new),
+ )
+
+ if backup:
+ res_args['backup_file'] = backup_file
+
+ module.exit_json(**res_args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pamd.py b/ansible_collections/community/general/plugins/modules/pamd.py
new file mode 100644
index 000000000..6ffc8624e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pamd.py
@@ -0,0 +1,853 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Kenneth D. Evensen <kdevensen@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: pamd
+author:
+ - Kenneth D. Evensen (@kevensen)
+short_description: Manage PAM Modules
+description:
+ - Edit PAM service's type, control, module path and module arguments.
+ - In order for a PAM rule to be modified, the type, control and
+ module_path must match an existing rule. See man(5) pam.d for details.
+notes:
+ - This module does not handle authselect profiles.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name generally refers to the PAM service file to
+ change, for example system-auth.
+ type: str
+ required: true
+ type:
+ description:
+ - The type of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ control:
+ description:
+ - The control of the PAM rule being modified.
+ - This may be a complicated control with brackets. If this is the case, be
+ sure to put "[bracketed controls]" in quotes.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ module_path:
+ description:
+ - The module path of the PAM rule being modified.
+ - The C(type), C(control) and C(module_path) all must match a rule to be modified.
+ type: str
+ required: true
+ new_type:
+ description:
+ - The new type to assign to the new rule.
+ type: str
+ choices: [ account, -account, auth, -auth, password, -password, session, -session ]
+ new_control:
+ description:
+ - The new control to assign to the new rule.
+ type: str
+ new_module_path:
+ description:
+ - The new module path to be assigned to the new rule.
+ type: str
+ module_arguments:
+ description:
+ - When state is C(updated), the module_arguments will replace existing module_arguments.
+ - When state is C(args_absent) args matching those listed in module_arguments will be removed.
+ - When state is C(args_present) any args listed in module_arguments are added if
+ missing from the existing rule.
+ - Furthermore, if the module argument takes a value denoted by C(=),
+ the value will be changed to that specified in module_arguments.
+ type: list
+ elements: str
+ state:
+ description:
+ - The default of C(updated) will modify an existing rule if type,
+ control and module_path all match an existing rule.
+ - With C(before), the new rule will be inserted before a rule matching type,
+ control and module_path.
+ - Similarly, with C(after), the new rule will be inserted after an existing rulematching type,
+ control and module_path.
+ - With either C(before) or C(after) new_type, new_control, and new_module_path must all be specified.
+ - If state is C(args_absent) or C(args_present), new_type, new_control, and new_module_path will be ignored.
+ - State C(absent) will remove the rule. The 'absent' state was added in Ansible 2.4.
+ type: str
+ choices: [ absent, before, after, args_absent, args_present, updated ]
+ default: updated
+ path:
+ description:
+ - This is the path to the PAM service files.
+ type: path
+ default: /etc/pam.d
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Update pamd rule's control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_control: sufficient
+
+- name: Update pamd rule's complex control in /etc/pam.d/system-auth
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ new_control: '[success=2 default=ignore]'
+
+- name: Insert a new rule before an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ new_type: auth
+ new_control: sufficient
+ new_module_path: pam_faillock.so
+ state: before
+
+- name: Insert a new rule pam_wheel.so with argument 'use_uid' after an \
+ existing rule pam_rootok.so
+ community.general.pamd:
+ name: su
+ type: auth
+ control: sufficient
+ module_path: pam_rootok.so
+ new_type: auth
+ new_control: required
+ new_module_path: pam_wheel.so
+ module_arguments: 'use_uid'
+ state: after
+
+- name: Remove module arguments from an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: ''
+ state: updated
+
+- name: Replace all module arguments in an existing rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'preauth
+ silent
+ deny=3
+ unlock_time=604800
+ fail_interval=900'
+ state: updated
+
+- name: Remove specific arguments from a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_absent
+
+- name: Ensure specific arguments are present in a rule
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments: crond,quiet
+ state: args_present
+
+- name: Ensure specific arguments are present in a rule (alternative)
+ community.general.pamd:
+ name: system-auth
+ type: session
+ control: '[success=1 default=ignore]'
+ module_path: pam_succeed_if.so
+ module_arguments:
+ - crond
+ - quiet
+ state: args_present
+
+- name: Module arguments requiring commas must be listed as a Yaml list
+ community.general.pamd:
+ name: special-module
+ type: account
+ control: required
+ module_path: pam_access.so
+ module_arguments:
+ - listsep=,
+ state: args_present
+
+- name: Update specific argument value in a rule
+ community.general.pamd:
+ name: system-auth
+ type: auth
+ control: required
+ module_path: pam_faillock.so
+ module_arguments: 'fail_interval=300'
+ state: args_present
+
+- name: Add pam common-auth rule for duo
+ community.general.pamd:
+ name: common-auth
+ new_type: auth
+ new_control: '[success=1 default=ignore]'
+ new_module_path: '/lib64/security/pam_duo.so'
+ state: after
+ type: auth
+ module_path: pam_sss.so
+ control: 'requisite'
+'''
+
+RETURN = r'''
+change_count:
+ description: How many rules were changed.
+ type: int
+ sample: 1
+ returned: success
+backupdest:
+ description:
+ - "The file name of the backup file, if created."
+ returned: success
+ type: str
+...
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import os
+import re
+from tempfile import NamedTemporaryFile
+from datetime import datetime
+
+
+RULE_REGEX = re.compile(r"""(?P<rule_type>-?(?:auth|account|session|password))\s+
+ (?P<control>\[.*\]|\S*)\s+
+ (?P<path>\S*)\s*
+ (?P<args>.*)\s*""", re.X)
+RULE_ARG_REGEX = re.compile(r"(\[.*\]|\S*)")
+
+VALID_TYPES = ['account', '-account', 'auth', '-auth', 'password', '-password', 'session', '-session']
+
+
+class PamdLine(object):
+
+ def __init__(self, line):
+ self.line = line
+ self.prev = None
+ self.next = None
+
+ @property
+ def is_valid(self):
+ if self.line.strip() == '':
+ return True
+ return False
+
+ def validate(self):
+ if not self.is_valid:
+ return False, "Rule is not valid " + self.line
+ return True, "Rule is valid " + self.line
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ return False
+
+ def __str__(self):
+ return str(self.line)
+
+
+class PamdEmptyLine(PamdLine):
+ pass
+
+
+class PamdComment(PamdLine):
+
+ def __init__(self, line):
+ super(PamdComment, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('#'):
+ return True
+ return False
+
+
+class PamdInclude(PamdLine):
+ def __init__(self, line):
+ super(PamdInclude, self).__init__(line)
+
+ @property
+ def is_valid(self):
+ if self.line.startswith('@include'):
+ return True
+ return False
+
+
+class PamdRule(PamdLine):
+
+ valid_simple_controls = ['required', 'requisite', 'sufficient', 'optional', 'include', 'substack', 'definitive']
+ valid_control_values = ['success', 'open_err', 'symbol_err', 'service_err', 'system_err', 'buf_err',
+ 'perm_denied', 'auth_err', 'cred_insufficient', 'authinfo_unavail', 'user_unknown',
+ 'maxtries', 'new_authtok_reqd', 'acct_expired', 'session_err', 'cred_unavail',
+ 'cred_expired', 'cred_err', 'no_module_data', 'conv_err', 'authtok_err',
+ 'authtok_recover_err', 'authtok_lock_busy', 'authtok_disable_aging', 'try_again',
+ 'ignore', 'abort', 'authtok_expired', 'module_unknown', 'bad_item', 'conv_again',
+ 'incomplete', 'default']
+ valid_control_actions = ['ignore', 'bad', 'die', 'ok', 'done', 'reset']
+
+ def __init__(self, rule_type, rule_control, rule_path, rule_args=None):
+ self.prev = None
+ self.next = None
+ self._control = None
+ self._args = None
+ self.rule_type = rule_type
+ self.rule_control = rule_control
+
+ self.rule_path = rule_path
+ self.rule_args = rule_args
+
+ # Method to check if a rule matches the type, control and path.
+ def matches(self, rule_type, rule_control, rule_path, rule_args=None):
+ return (rule_type == self.rule_type and
+ rule_control == self.rule_control and
+ rule_path == self.rule_path)
+
+ @classmethod
+ def rule_from_string(cls, line):
+ rule_match = RULE_REGEX.search(line)
+ rule_args = parse_module_arguments(rule_match.group('args'))
+ return cls(rule_match.group('rule_type'), rule_match.group('control'), rule_match.group('path'), rule_args)
+
+ def __str__(self):
+ if self.rule_args:
+ return '{0: <11}{1} {2} {3}'.format(self.rule_type, self.rule_control, self.rule_path, ' '.join(self.rule_args))
+ return '{0: <11}{1} {2}'.format(self.rule_type, self.rule_control, self.rule_path)
+
+ @property
+ def rule_control(self):
+ if isinstance(self._control, list):
+ return '[' + ' '.join(self._control) + ']'
+ return self._control
+
+ @rule_control.setter
+ def rule_control(self, control):
+ if control.startswith('['):
+ control = control.replace(' = ', '=').replace('[', '').replace(']', '')
+ self._control = control.split(' ')
+ else:
+ self._control = control
+
+ @property
+ def rule_args(self):
+ if not self._args:
+ return []
+ return self._args
+
+ @rule_args.setter
+ def rule_args(self, args):
+ self._args = parse_module_arguments(args)
+
+ @property
+ def line(self):
+ return str(self)
+
+ @classmethod
+ def is_action_unsigned_int(cls, string_num):
+ number = 0
+ try:
+ number = int(string_num)
+ except ValueError:
+ return False
+
+ if number >= 0:
+ return True
+ return False
+
+ @property
+ def is_valid(self):
+ return self.validate()[0]
+
+ def validate(self):
+ # Validate the rule type
+ if self.rule_type not in VALID_TYPES:
+ return False, "Rule type, " + self.rule_type + ", is not valid in rule " + self.line
+ # Validate the rule control
+ if isinstance(self._control, str) and self.rule_control not in PamdRule.valid_simple_controls:
+ return False, "Rule control, " + self.rule_control + ", is not valid in rule " + self.line
+ elif isinstance(self._control, list):
+ for control in self._control:
+ value, action = control.split("=")
+ if value not in PamdRule.valid_control_values:
+ return False, "Rule control value, " + value + ", is not valid in rule " + self.line
+ if action not in PamdRule.valid_control_actions and not PamdRule.is_action_unsigned_int(action):
+ return False, "Rule control action, " + action + ", is not valid in rule " + self.line
+
+ # TODO: Validate path
+
+ return True, "Rule is valid " + self.line
+
+
+# PamdService encapsulates an entire service and contains one or more rules. It seems the best way is to do this
+# as a doubly linked list.
+class PamdService(object):
+
+ def __init__(self, content):
+ self._head = None
+ self._tail = None
+ for line in content.splitlines():
+ if line.lstrip().startswith('#'):
+ pamd_line = PamdComment(line)
+ elif line.lstrip().startswith('@include'):
+ pamd_line = PamdInclude(line)
+ elif line.strip() == '':
+ pamd_line = PamdEmptyLine(line)
+ else:
+ pamd_line = PamdRule.rule_from_string(line)
+
+ self.append(pamd_line)
+
+ def append(self, pamd_line):
+ if self._head is None:
+ self._head = self._tail = pamd_line
+ else:
+ pamd_line.prev = self._tail
+ pamd_line.next = None
+ self._tail.next = pamd_line
+ self._tail = pamd_line
+
+ def remove(self, rule_type, rule_control, rule_path):
+ current_line = self._head
+ changed = 0
+
+ while current_line is not None:
+ if current_line.matches(rule_type, rule_control, rule_path):
+ if current_line.prev is not None:
+ current_line.prev.next = current_line.next
+ if current_line.next is not None:
+ current_line.next.prev = current_line.prev
+ else:
+ self._head = current_line.next
+ current_line.next.prev = None
+ changed += 1
+
+ current_line = current_line.next
+ return changed
+
+ def get(self, rule_type, rule_control, rule_path):
+ lines = []
+ current_line = self._head
+ while current_line is not None:
+
+ if isinstance(current_line, PamdRule) and current_line.matches(rule_type, rule_control, rule_path):
+ lines.append(current_line)
+
+ current_line = current_line.next
+
+ return lines
+
+ def has_rule(self, rule_type, rule_control, rule_path):
+ if self.get(rule_type, rule_control, rule_path):
+ return True
+ return False
+
+ def update_rule(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ new_args = parse_module_arguments(new_args, return_none=True)
+
+ changes = 0
+ for current_rule in rules_to_find:
+ rule_changed = False
+ if new_type:
+ if current_rule.rule_type != new_type:
+ rule_changed = True
+ current_rule.rule_type = new_type
+ if new_control:
+ if current_rule.rule_control != new_control:
+ rule_changed = True
+ current_rule.rule_control = new_control
+ if new_path:
+ if current_rule.rule_path != new_path:
+ rule_changed = True
+ current_rule.rule_path = new_path
+ if new_args is not None:
+ if current_rule.rule_args != new_args:
+ rule_changed = True
+ current_rule.rule_args = new_args
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def insert_before(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist before the existing rule
+ # 2. The new rule exists
+
+ for current_rule in rules_to_find:
+ # Create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ # First we'll get the previous rule.
+ previous_rule = current_rule.prev
+
+ # Next we may have to loop backwards if the previous line is a comment. If it
+ # is, we'll get the previous "rule's" previous.
+ while previous_rule is not None and isinstance(previous_rule, (PamdComment, PamdEmptyLine)):
+ previous_rule = previous_rule.prev
+ # Next we'll see if the previous rule matches what we are trying to insert.
+ if previous_rule is not None and not previous_rule.matches(new_type, new_control, new_path):
+ # First set the original previous rule's next to the new_rule
+ previous_rule.next = new_rule
+ # Second, set the new_rule's previous to the original previous
+ new_rule.prev = previous_rule
+ # Third, set the new rule's next to the current rule
+ new_rule.next = current_rule
+ # Fourth, set the current rule's previous to the new_rule
+ current_rule.prev = new_rule
+
+ changes += 1
+
+ # Handle the case where it is the first rule in the list.
+ elif previous_rule is None:
+ # This is the case where the current rule is not only the first rule
+ # but the first line as well. So we set the head to the new rule
+ if current_rule.prev is None:
+ self._head = new_rule
+ # This case would occur if the previous line was a comment.
+ else:
+ current_rule.prev.next = new_rule
+ new_rule.prev = current_rule.prev
+ new_rule.next = current_rule
+ current_rule.prev = new_rule
+ changes += 1
+
+ return changes
+
+ def insert_after(self, rule_type, rule_control, rule_path,
+ new_type=None, new_control=None, new_path=None, new_args=None):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+ changes = 0
+ # There are two cases to consider.
+ # 1. The new rule doesn't exist after the existing rule
+ # 2. The new rule exists
+ for current_rule in rules_to_find:
+ # First we'll get the next rule.
+ next_rule = current_rule.next
+ # Next we may have to loop forwards if the next line is a comment. If it
+ # is, we'll get the next "rule's" next.
+ while next_rule is not None and isinstance(next_rule, (PamdComment, PamdEmptyLine)):
+ next_rule = next_rule.next
+
+ # First we create a new rule
+ new_rule = PamdRule(new_type, new_control, new_path, new_args)
+ if next_rule is not None and not next_rule.matches(new_type, new_control, new_path):
+ # If the previous rule doesn't match we'll insert our new rule.
+
+ # Second set the original next rule's previous to the new_rule
+ next_rule.prev = new_rule
+ # Third, set the new_rule's next to the original next rule
+ new_rule.next = next_rule
+ # Fourth, set the new rule's previous to the current rule
+ new_rule.prev = current_rule
+ # Fifth, set the current rule's next to the new_rule
+ current_rule.next = new_rule
+
+ changes += 1
+
+ # This is the case where the current_rule is the last in the list
+ elif next_rule is None:
+ new_rule.prev = self._tail
+ new_rule.next = None
+ self._tail.next = new_rule
+ self._tail = new_rule
+
+ current_rule.next = new_rule
+ changes += 1
+
+ return changes
+
+ def add_module_arguments(self, rule_type, rule_control, rule_path, args_to_add):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_add = parse_module_arguments(args_to_add)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ rule_changed = False
+
+ # create some structures to evaluate the situation
+ simple_new_args = set()
+ key_value_new_args = dict()
+
+ for arg in args_to_add:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_new_args[key] = value
+ else:
+ simple_new_args.add(arg)
+
+ key_value_new_args_set = set(key_value_new_args)
+
+ simple_current_args = set()
+ key_value_current_args = dict()
+
+ for arg in current_rule.rule_args:
+ if arg.startswith("["):
+ continue
+ elif "=" in arg:
+ key, value = arg.split("=")
+ key_value_current_args[key] = value
+ else:
+ simple_current_args.add(arg)
+
+ key_value_current_args_set = set(key_value_current_args)
+
+ new_args_to_add = list()
+
+ # Handle new simple arguments
+ if simple_new_args.difference(simple_current_args):
+ for arg in simple_new_args.difference(simple_current_args):
+ new_args_to_add.append(arg)
+
+ # Handle new key value arguments
+ if key_value_new_args_set.difference(key_value_current_args_set):
+ for key in key_value_new_args_set.difference(key_value_current_args_set):
+ new_args_to_add.append(key + '=' + key_value_new_args[key])
+
+ if new_args_to_add:
+ current_rule.rule_args += new_args_to_add
+ rule_changed = True
+
+ # Handle existing key value arguments when value is not equal
+ if key_value_new_args_set.intersection(key_value_current_args_set):
+ for key in key_value_new_args_set.intersection(key_value_current_args_set):
+ if key_value_current_args[key] != key_value_new_args[key]:
+ arg_index = current_rule.rule_args.index(key + '=' + key_value_current_args[key])
+ current_rule.rule_args[arg_index] = str(key + '=' + key_value_new_args[key])
+ rule_changed = True
+
+ if rule_changed:
+ changes += 1
+
+ return changes
+
+ def remove_module_arguments(self, rule_type, rule_control, rule_path, args_to_remove):
+ # Get a list of rules we want to change
+ rules_to_find = self.get(rule_type, rule_control, rule_path)
+
+ args_to_remove = parse_module_arguments(args_to_remove)
+
+ changes = 0
+
+ for current_rule in rules_to_find:
+ if not args_to_remove:
+ args_to_remove = []
+
+ # Let's check to see if there are any args to remove by finding the intersection
+ # of the rule's current args and the args_to_remove lists
+ if not list(set(current_rule.rule_args) & set(args_to_remove)):
+ continue
+
+ # There are args to remove, so we create a list of new_args absent the args
+ # to remove.
+ current_rule.rule_args = [arg for arg in current_rule.rule_args if arg not in args_to_remove]
+
+ changes += 1
+
+ return changes
+
+ def validate(self):
+ current_line = self._head
+
+ while current_line is not None:
+ curr_validate = current_line.validate()
+ if not curr_validate[0]:
+ return curr_validate
+ current_line = current_line.next
+ return True, "Module is valid"
+
+ def __str__(self):
+ lines = []
+ current_line = self._head
+
+ mark = "# Updated by Ansible - %s" % datetime.now().isoformat()
+ while current_line is not None:
+ lines.append(str(current_line))
+ current_line = current_line.next
+
+ if len(lines) <= 1:
+ lines.insert(0, "")
+ lines.insert(1, mark)
+ else:
+ if lines[1].startswith("# Updated by Ansible"):
+ lines[1] = mark
+ else:
+ lines.insert(1, mark)
+
+ return '\n'.join(lines) + '\n'
+
+
+def parse_module_arguments(module_arguments, return_none=False):
+ # If args is None, return empty list by default.
+ # But if return_none is True, then return None
+ if module_arguments is None:
+ return None if return_none else []
+ if isinstance(module_arguments, list) and len(module_arguments) == 1 and not module_arguments[0]:
+ return []
+
+ if not isinstance(module_arguments, list):
+ module_arguments = [module_arguments]
+
+ # From this point on, module_arguments is guaranteed to be a list, empty or not
+ parsed_args = []
+
+ re_clear_spaces = re.compile(r"\s*=\s*")
+ for arg in module_arguments:
+ for item in filter(None, RULE_ARG_REGEX.findall(arg)):
+ if not item.startswith("["):
+ re_clear_spaces.sub("=", item)
+ parsed_args.append(item)
+
+ return parsed_args
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True, choices=VALID_TYPES),
+ control=dict(type='str', required=True),
+ module_path=dict(type='str', required=True),
+ new_type=dict(type='str', choices=VALID_TYPES),
+ new_control=dict(type='str'),
+ new_module_path=dict(type='str'),
+ module_arguments=dict(type='list', elements='str'),
+ state=dict(type='str', default='updated', choices=['absent', 'after', 'args_absent', 'args_present', 'before', 'updated']),
+ path=dict(type='path', default='/etc/pam.d'),
+ backup=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_if=[
+ ("state", "args_present", ["module_arguments"]),
+ ("state", "args_absent", ["module_arguments"]),
+ ("state", "before", ["new_control", "new_type", "new_module_path"]),
+ ("state", "after", ["new_control", "new_type", "new_module_path"]),
+ ],
+ )
+ content = str()
+ fname = os.path.join(module.params["path"], module.params["name"])
+
+ # Open the file and read the content or fail
+ try:
+ with open(fname, 'r') as service_file_obj:
+ content = service_file_obj.read()
+ except IOError as e:
+ # If unable to read the file, fail out
+ module.fail_json(msg='Unable to open/read PAM module file %s with error %s.' % (fname, str(e)))
+
+ # Assuming we didn't fail, create the service
+ service = PamdService(content)
+ # Set the action
+ action = module.params['state']
+
+ changes = 0
+
+ # Take action
+ if action == 'updated':
+ changes = service.update_rule(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'before':
+ changes = service.insert_before(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'after':
+ changes = service.insert_after(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['new_type'], module.params['new_control'], module.params['new_module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_absent':
+ changes = service.remove_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'args_present':
+ if [arg for arg in parse_module_arguments(module.params['module_arguments']) if arg.startswith("[")]:
+ module.fail_json(msg="Unable to process bracketed '[' complex arguments with 'args_present'. Please use 'updated'.")
+
+ changes = service.add_module_arguments(module.params['type'], module.params['control'], module.params['module_path'],
+ module.params['module_arguments'])
+ elif action == 'absent':
+ changes = service.remove(module.params['type'], module.params['control'], module.params['module_path'])
+
+ valid, msg = service.validate()
+
+ # If the module is not valid (meaning one of the rules is invalid), we will fail
+ if not valid:
+ module.fail_json(msg=msg)
+
+ result = dict(
+ changed=(changes > 0),
+ change_count=changes,
+ backupdest='',
+ )
+
+ # If not check mode and something changed, backup the original if necessary then write out the file or fail
+ if not module.check_mode and result['changed']:
+ # First, create a backup if desired.
+ if module.params['backup']:
+ result['backupdest'] = module.backup_local(fname)
+ try:
+ temp_file = NamedTemporaryFile(mode='w', dir=module.tmpdir, delete=False)
+ with open(temp_file.name, 'w') as fd:
+ fd.write(str(service))
+
+ except IOError:
+ module.fail_json(msg='Unable to create temporary file %s' % temp_file)
+
+ module.atomic_move(temp_file.name, os.path.realpath(fname))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/parted.py b/ansible_collections/community/general/plugins/modules/parted.py
new file mode 100644
index 000000000..8e6038180
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/parted.py
@@ -0,0 +1,810 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Fabrizio Colonna <colofabrix@tin.it>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+author:
+ - Fabrizio Colonna (@ColOfAbRiX)
+module: parted
+short_description: Configure block device partitions
+description:
+ - This module allows configuring block device partition using the C(parted)
+ command line tool. For a full description of the fields and the options
+ check the GNU parted manual.
+requirements:
+ - This module requires C(parted) version 1.8.3 and above.
+ - Option I(align) (except C(undefined)) requires C(parted) 2.1 or above.
+ - If the version of C(parted) is below 3.1, it requires a Linux version running
+ the C(sysfs) file system C(/sys/).
+ - Requires the C(resizepart) command when using the I(resize) parameter.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ device:
+ description:
+ - The block device (disk) where to operate.
+ - Regular files can also be partitioned, but it is recommended to create a
+ loopback device using C(losetup) to easily access its partitions.
+ type: str
+ required: true
+ align:
+ description:
+ - Set alignment for newly created partitions. Use C(undefined) for parted default aligment.
+ type: str
+ choices: [ cylinder, minimal, none, optimal, undefined ]
+ default: optimal
+ number:
+ description:
+ - The partition number being affected.
+ - Required when performing any action on the disk, except fetching information.
+ type: int
+ unit:
+ description:
+ - Selects the current default unit that Parted will use to display
+ locations and capacities on the disk and to interpret those given by the
+ user if they are not suffixed by an unit.
+ - When fetching information about a disk, it is recommended to always specify a unit.
+ type: str
+ choices: [ s, B, KB, KiB, MB, MiB, GB, GiB, TB, TiB, '%', cyl, chs, compact ]
+ default: KiB
+ label:
+ description:
+ - Disk label type or partition table to use.
+ - If I(device) already contains a different label, it will be changed to I(label)
+ and any previous partitions will be lost.
+ - A I(name) must be specified for a C(gpt) partition table.
+ type: str
+ choices: [ aix, amiga, bsd, dvh, gpt, loop, mac, msdos, pc98, sun ]
+ default: msdos
+ part_type:
+ description:
+ - May be specified only with I(label=msdos) or I(label=dvh).
+ - Neither I(part_type) nor I(name) may be used with I(label=sun).
+ type: str
+ choices: [ extended, logical, primary ]
+ default: primary
+ part_start:
+ description:
+ - Where the partition will start as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ - Using negative values may require setting of I(fs_type) (see notes).
+ type: str
+ default: 0%
+ part_end:
+ description:
+ - Where the partition will end as offset from the beginning of the disk,
+ that is, the "distance" from the start of the disk. Negative numbers
+ specify distance from the end of the disk.
+ - The distance can be specified with all the units supported by parted
+ (except compat) and it is case sensitive, e.g. C(10GiB), C(15%).
+ type: str
+ default: 100%
+ name:
+ description:
+ - Sets the name for the partition number (GPT, Mac, MIPS and PC98 only).
+ type: str
+ flags:
+ description: A list of the flags that has to be set on the partition.
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to create or delete a partition.
+ - If set to C(info) the module will only return the device information.
+ type: str
+ choices: [ absent, present, info ]
+ default: info
+ fs_type:
+ description:
+ - If specified and the partition does not exist, will set filesystem type to given partition.
+ - Parameter optional, but see notes below about negative I(part_start) values.
+ type: str
+ version_added: '0.2.0'
+ resize:
+ description:
+ - Call C(resizepart) on existing partitions to match the size specified by I(part_end).
+ type: bool
+ default: false
+ version_added: '1.3.0'
+
+notes:
+ - When fetching information about a new disk and when the version of parted
+ installed on the system is before version 3.1, the module queries the kernel
+ through C(/sys/) to obtain disk information. In this case the units CHS and
+ CYL are not supported.
+ - Negative I(part_start) start values were rejected if I(fs_type) was not given.
+ This bug was fixed in parted 3.2.153. If you want to use negative I(part_start),
+ specify I(fs_type) as well or make sure your system contains newer parted.
+'''
+
+RETURN = r'''
+partition_info:
+ description: Current partition information
+ returned: success
+ type: complex
+ contains:
+ disk:
+ description: Generic device information.
+ type: dict
+ partitions:
+ description: List of device partitions.
+ type: list
+ script:
+ description: parted script executed by module
+ type: str
+ sample: {
+ "disk": {
+ "dev": "/dev/sdb",
+ "logical_block": 512,
+ "model": "VMware Virtual disk",
+ "physical_block": 512,
+ "size": 5.0,
+ "table": "msdos",
+ "unit": "gib"
+ },
+ "partitions": [{
+ "begin": 0.0,
+ "end": 1.0,
+ "flags": ["boot", "lvm"],
+ "fstype": "",
+ "name": "",
+ "num": 1,
+ "size": 1.0
+ }, {
+ "begin": 1.0,
+ "end": 5.0,
+ "flags": [],
+ "fstype": "",
+ "name": "",
+ "num": 2,
+ "size": 4.0
+ }],
+ "script": "unit KiB print "
+ }
+'''
+
+EXAMPLES = r'''
+- name: Create a new ext4 primary partition
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ fs_type: ext4
+
+- name: Remove partition number 1
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: absent
+
+- name: Create a new primary partition with a size of 1GiB
+ community.general.parted:
+ device: /dev/sdb
+ number: 1
+ state: present
+ part_end: 1GiB
+
+- name: Create a new primary partition for LVM
+ community.general.parted:
+ device: /dev/sdb
+ number: 2
+ flags: [ lvm ]
+ state: present
+ part_start: 1GiB
+
+- name: Create a new primary partition with a size of 1GiB at disk's end
+ community.general.parted:
+ device: /dev/sdb
+ number: 3
+ state: present
+ fs_type: ext3
+ part_start: -1GiB
+
+# Example on how to read info and reuse it in subsequent task
+- name: Read device information (always use unit when probing)
+ community.general.parted: device=/dev/sdb unit=MiB
+ register: sdb_info
+
+- name: Remove all partitions from disk
+ community.general.parted:
+ device: /dev/sdb
+ number: '{{ item.num }}'
+ state: absent
+ loop: '{{ sdb_info.partitions }}'
+
+- name: Extend an existing partition to fill all available space
+ community.general.parted:
+ device: /dev/sdb
+ number: "{{ sdb_info.partitions | length }}"
+ part_end: "100%"
+ resize: true
+ state: present
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+import math
+import re
+import os
+
+
+# Reference prefixes (International System of Units and IEC)
+units_si = ['B', 'KB', 'MB', 'GB', 'TB']
+units_iec = ['KiB', 'MiB', 'GiB', 'TiB']
+parted_units = units_si + units_iec + ['s', '%', 'cyl', 'chs', 'compact']
+
+
+def parse_unit(size_str, unit=''):
+ """
+ Parses a string containing a size or boundary information
+ """
+ matches = re.search(r'^(-?[\d.]+) *([\w%]+)?$', size_str)
+ if matches is None:
+ # "<cylinder>,<head>,<sector>" format
+ matches = re.search(r'^(\d+),(\d+),(\d+)$', size_str)
+ if matches is None:
+ module.fail_json(
+ msg="Error interpreting parted size output: '%s'" % size_str
+ )
+
+ size = {
+ 'cylinder': int(matches.group(1)),
+ 'head': int(matches.group(2)),
+ 'sector': int(matches.group(3))
+ }
+ unit = 'chs'
+
+ else:
+ # Normal format: "<number>[<unit>]"
+ if matches.group(2) is not None:
+ unit = matches.group(2)
+
+ size = float(matches.group(1))
+
+ return size, unit
+
+
+def parse_partition_info(parted_output, unit):
+ """
+ Parses the output of parted and transforms the data into
+ a dictionary.
+
+ Parted Machine Parseable Output:
+ See: https://lists.alioth.debian.org/pipermail/parted-devel/2006-December/00
+ 0573.html
+ - All lines end with a semicolon (;)
+ - The first line indicates the units in which the output is expressed.
+ CHS, CYL and BYT stands for CHS, Cylinder and Bytes respectively.
+ - The second line is made of disk information in the following format:
+ "path":"size":"transport-type":"logical-sector-size":"physical-sector-siz
+ e":"partition-table-type":"model-name";
+ - If the first line was either CYL or CHS, the next line will contain
+ information on no. of cylinders, heads, sectors and cylinder size.
+ - Partition information begins from the next line. This is of the format:
+ (for BYT)
+ "number":"begin":"end":"size":"filesystem-type":"partition-name":"flags-s
+ et";
+ (for CHS/CYL)
+ "number":"begin":"end":"filesystem-type":"partition-name":"flags-set";
+ """
+ lines = [x for x in parted_output.split('\n') if x.strip() != '']
+
+ # Generic device info
+ generic_params = lines[1].rstrip(';').split(':')
+
+ # The unit is read once, because parted always returns the same unit
+ size, unit = parse_unit(generic_params[1], unit)
+
+ generic = {
+ 'dev': generic_params[0],
+ 'size': size,
+ 'unit': unit.lower(),
+ 'table': generic_params[5],
+ 'model': generic_params[6],
+ 'logical_block': int(generic_params[3]),
+ 'physical_block': int(generic_params[4])
+ }
+
+ # CYL and CHS have an additional line in the output
+ if unit in ['cyl', 'chs']:
+ chs_info = lines[2].rstrip(';').split(':')
+ cyl_size, cyl_unit = parse_unit(chs_info[3])
+ generic['chs_info'] = {
+ 'cylinders': int(chs_info[0]),
+ 'heads': int(chs_info[1]),
+ 'sectors': int(chs_info[2]),
+ 'cyl_size': cyl_size,
+ 'cyl_size_unit': cyl_unit.lower()
+ }
+ lines = lines[1:]
+
+ parts = []
+ for line in lines[2:]:
+ part_params = line.rstrip(';').split(':')
+
+ # CHS use a different format than BYT, but contrary to what stated by
+ # the author, CYL is the same as BYT. I've tested this undocumented
+ # behaviour down to parted version 1.8.3, which is the first version
+ # that supports the machine parseable output.
+ if unit != 'chs':
+ size = parse_unit(part_params[3])[0]
+ fstype = part_params[4]
+ name = part_params[5]
+ flags = part_params[6]
+
+ else:
+ size = ""
+ fstype = part_params[3]
+ name = part_params[4]
+ flags = part_params[5]
+
+ parts.append({
+ 'num': int(part_params[0]),
+ 'begin': parse_unit(part_params[1])[0],
+ 'end': parse_unit(part_params[2])[0],
+ 'size': size,
+ 'fstype': fstype,
+ 'name': name,
+ 'flags': [f.strip() for f in flags.split(', ') if f != ''],
+ 'unit': unit.lower(),
+ })
+
+ return {'generic': generic, 'partitions': parts}
+
+
+def format_disk_size(size_bytes, unit):
+ """
+ Formats a size in bytes into a different unit, like parted does. It doesn't
+ manage CYL and CHS formats, though.
+ This function has been adapted from https://github.com/Distrotech/parted/blo
+ b/279d9d869ff472c52b9ec2e180d568f0c99e30b0/libparted/unit.c
+ """
+ global units_si, units_iec # pylint: disable=global-variable-not-assigned
+
+ unit = unit.lower()
+
+ # Shortcut
+ if size_bytes == 0:
+ return 0.0, 'b'
+
+ # Cases where we default to 'compact'
+ if unit in ['', 'compact', 'cyl', 'chs']:
+ index = max(0, int(
+ (math.log10(size_bytes) - 1.0) / 3.0
+ ))
+ unit = 'b'
+ if index < len(units_si):
+ unit = units_si[index]
+
+ # Find the appropriate multiplier
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** units_iec.index(unit)
+
+ output = size_bytes // multiplier * (1 + 1E-16)
+
+ # Corrections to round up as per IEEE754 standard
+ if output < 10:
+ w = output + 0.005
+ elif output < 100:
+ w = output + 0.05
+ else:
+ w = output + 0.5
+
+ if w < 10:
+ precision = 2
+ elif w < 100:
+ precision = 1
+ else:
+ precision = 0
+
+ # Round and return
+ return round(output, precision), unit
+
+
+def convert_to_bytes(size_str, unit):
+ size = float(size_str)
+ multiplier = 1.0
+ if unit in units_si:
+ multiplier = 1000.0 ** units_si.index(unit)
+ elif unit in units_iec:
+ multiplier = 1024.0 ** (units_iec.index(unit) + 1)
+ elif unit in ['', 'compact', 'cyl', 'chs']:
+ # As per format_disk_size, default to compact, which defaults to megabytes
+ multiplier = 1000.0 ** units_si.index("MB")
+
+ output = size * multiplier
+ return int(output)
+
+
+def get_unlabeled_device_info(device, unit):
+ """
+ Fetches device information directly from the kernel and it is used when
+ parted cannot work because of a missing label. It always returns a 'unknown'
+ label.
+ """
+ device_name = os.path.basename(device)
+ base = "/sys/block/%s" % device_name
+
+ vendor = read_record(base + "/device/vendor", "Unknown")
+ model = read_record(base + "/device/model", "model")
+ logic_block = int(read_record(base + "/queue/logical_block_size", 0))
+ phys_block = int(read_record(base + "/queue/physical_block_size", 0))
+ size_bytes = int(read_record(base + "/size", 0)) * logic_block
+
+ size, unit = format_disk_size(size_bytes, unit)
+
+ return {
+ 'generic': {
+ 'dev': device,
+ 'table': "unknown",
+ 'size': size,
+ 'unit': unit,
+ 'logical_block': logic_block,
+ 'physical_block': phys_block,
+ 'model': "%s %s" % (vendor, model),
+ },
+ 'partitions': []
+ }
+
+
+def get_device_info(device, unit):
+ """
+ Fetches information about a disk and its partitions and it returns a
+ dictionary.
+ """
+ global module, parted_exec # pylint: disable=global-variable-not-assigned
+
+ # If parted complains about missing labels, it means there are no partitions.
+ # In this case only, use a custom function to fetch information and emulate
+ # parted formats for the unit.
+ label_needed = check_parted_label(device)
+ if label_needed:
+ return get_unlabeled_device_info(device, unit)
+
+ command = "%s -s -m %s -- unit '%s' print" % (parted_exec, device, unit)
+ rc, out, err = module.run_command(command)
+ if rc != 0 and 'unrecognised disk label' not in err:
+ module.fail_json(msg=(
+ "Error while getting device information with parted "
+ "script: '%s'" % command),
+ rc=rc, out=out, err=err
+ )
+
+ return parse_partition_info(out, unit)
+
+
+def check_parted_label(device):
+ """
+ Determines if parted needs a label to complete its duties. Versions prior
+ to 3.1 don't return data when there is no label. For more information see:
+ http://upstream.rosalinux.ru/changelogs/libparted/3.1/changelog.html
+ """
+ global parted_exec # pylint: disable=global-variable-not-assigned
+
+ # Check the version
+ parted_major, parted_minor, dummy = parted_version()
+ if (parted_major == 3 and parted_minor >= 1) or parted_major > 3:
+ return False
+
+ # Older parted versions return a message in the stdout and RC > 0.
+ rc, out, err = module.run_command("%s -s -m %s print" % (parted_exec, device))
+ if rc != 0 and 'unrecognised disk label' in out.lower():
+ return True
+
+ return False
+
+
+def parse_parted_version(out):
+ """
+ Returns version tuple from the output of "parted --version" command
+ """
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ return None, None, None
+
+ # Sample parted versions (see as well test unit):
+ # parted (GNU parted) 3.3
+ # parted (GNU parted) 3.4.5
+ # parted (GNU parted) 3.3.14-dfc61
+ matches = re.search(r'^parted.+\s(\d+)\.(\d+)(?:\.(\d+))?', lines[0].strip())
+
+ if matches is None:
+ return None, None, None
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+
+def parted_version():
+ """
+ Returns the major and minor version of parted installed on the system.
+ """
+ global module, parted_exec # pylint: disable=global-variable-not-assigned
+
+ rc, out, err = module.run_command("%s --version" % parted_exec)
+ if rc != 0:
+ module.fail_json(
+ msg="Failed to get parted version.", rc=rc, out=out, err=err
+ )
+
+ (major, minor, rev) = parse_parted_version(out)
+ if major is None:
+ module.fail_json(msg="Failed to get parted version.", rc=0, out=out)
+
+ return major, minor, rev
+
+
+def parted(script, device, align):
+ """
+ Runs a parted script.
+ """
+ global module, parted_exec # pylint: disable=global-variable-not-assigned
+
+ align_option = '-a %s' % align
+ if align == 'undefined':
+ align_option = ''
+
+ if script and not module.check_mode:
+ command = "%s -s -m %s %s -- %s" % (parted_exec, align_option, device, script)
+ rc, out, err = module.run_command(command)
+
+ if rc != 0:
+ module.fail_json(
+ msg="Error while running parted script: %s" % command.strip(),
+ rc=rc, out=out, err=err
+ )
+
+
+def read_record(file_path, default=None):
+ """
+ Reads the first line of a file and returns it.
+ """
+ try:
+ f = open(file_path, 'r')
+ try:
+ return f.readline().strip()
+ finally:
+ f.close()
+ except IOError:
+ return default
+
+
+def part_exists(partitions, attribute, number):
+ """
+ Looks if a partition that has a specific value for a specific attribute
+ actually exists.
+ """
+ return any(
+ part[attribute] and
+ part[attribute] == number for part in partitions
+ )
+
+
+def check_size_format(size_str):
+ """
+ Checks if the input string is an allowed size
+ """
+ size, unit = parse_unit(size_str)
+ return unit in parted_units
+
+
+def main():
+ global module, units_si, units_iec, parted_exec # pylint: disable=global-variable-not-assigned
+
+ changed = False
+ output_script = ""
+ script = ""
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(type='str', required=True),
+ align=dict(type='str', default='optimal', choices=['cylinder', 'minimal', 'none', 'optimal', 'undefined']),
+ number=dict(type='int'),
+
+ # unit <unit> command
+ unit=dict(type='str', default='KiB', choices=parted_units),
+
+ # mklabel <label-type> command
+ label=dict(type='str', default='msdos', choices=['aix', 'amiga', 'bsd', 'dvh', 'gpt', 'loop', 'mac', 'msdos', 'pc98', 'sun']),
+
+ # mkpart <part-type> [<fs-type>] <start> <end> command
+ part_type=dict(type='str', default='primary', choices=['extended', 'logical', 'primary']),
+ part_start=dict(type='str', default='0%'),
+ part_end=dict(type='str', default='100%'),
+ fs_type=dict(type='str'),
+
+ # name <partition> <name> command
+ name=dict(type='str'),
+
+ # set <partition> <flag> <state> command
+ flags=dict(type='list', elements='str'),
+
+ # rm/mkpart command
+ state=dict(type='str', default='info', choices=['absent', 'info', 'present']),
+
+ # resize part
+ resize=dict(type='bool', default=False),
+ ),
+ required_if=[
+ ['state', 'present', ['number']],
+ ['state', 'absent', ['number']],
+ ],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C', 'LC_CTYPE': 'C'}
+
+ # Data extraction
+ device = module.params['device']
+ align = module.params['align']
+ number = module.params['number']
+ unit = module.params['unit']
+ label = module.params['label']
+ part_type = module.params['part_type']
+ part_start = module.params['part_start']
+ part_end = module.params['part_end']
+ name = module.params['name']
+ state = module.params['state']
+ flags = module.params['flags']
+ fs_type = module.params['fs_type']
+ resize = module.params['resize']
+
+ # Parted executable
+ parted_exec = module.get_bin_path('parted', True)
+
+ # Conditioning
+ if number is not None and number < 1:
+ module.fail_json(msg="The partition number must be greater then 0.")
+ if not check_size_format(part_start):
+ module.fail_json(
+ msg="The argument 'part_start' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_start)
+ )
+ if not check_size_format(part_end):
+ module.fail_json(
+ msg="The argument 'part_end' doesn't respect required format."
+ "The size unit is case sensitive.",
+ err=parse_unit(part_end)
+ )
+
+ # Read the current disk information
+ current_device = get_device_info(device, unit)
+ current_parts = current_device['partitions']
+
+ if state == 'present':
+
+ # Assign label if required
+ mklabel_needed = current_device['generic'].get('table', None) != label
+ if mklabel_needed:
+ script += "mklabel %s " % label
+
+ # Create partition if required
+ if part_type and (mklabel_needed or not part_exists(current_parts, 'num', number)):
+ script += "mkpart %s %s%s %s " % (
+ part_type,
+ '%s ' % fs_type if fs_type is not None else '',
+ part_start,
+ part_end
+ )
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # If partition exists, try to resize
+ if resize and part_exists(current_parts, 'num', number):
+ # Ensure new end is different to current
+ partition = [p for p in current_parts if p['num'] == number][0]
+ current_part_end = convert_to_bytes(partition['end'], unit)
+
+ size, parsed_unit = parse_unit(part_end, unit)
+ if parsed_unit == "%":
+ size = int((int(current_device['generic']['size']) * size) / 100)
+ parsed_unit = unit
+
+ desired_part_end = convert_to_bytes(size, parsed_unit)
+
+ if current_part_end != desired_part_end:
+ script += "resizepart %s %s " % (
+ number,
+ part_end
+ )
+
+ # Execute the script and update the data structure.
+ # This will create the partition for the next steps
+ if script:
+ output_script += script
+ parted(script, device, align)
+ changed = True
+ script = ""
+
+ if not module.check_mode:
+ current_parts = get_device_info(device, unit)['partitions']
+
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ if changed and module.check_mode:
+ partition = {'flags': []} # Empty structure for the check-mode
+ else:
+ partition = [p for p in current_parts if p['num'] == number][0]
+
+ # Assign name to the partition
+ if name is not None and partition.get('name', None) != name:
+ # Wrap double quotes in single quotes so the shell doesn't strip
+ # the double quotes as those need to be included in the arg
+ # passed to parted
+ script += 'name %s \'"%s"\' ' % (number, name)
+
+ # Manage flags
+ if flags:
+ # Parted infers boot with esp, if you assign esp, boot is set
+ # and if boot is unset, esp is also unset.
+ if 'esp' in flags and 'boot' not in flags:
+ flags.append('boot')
+
+ # Compute only the changes in flags status
+ flags_off = list(set(partition['flags']) - set(flags))
+ flags_on = list(set(flags) - set(partition['flags']))
+
+ for f in flags_on:
+ script += "set %s %s on " % (number, f)
+
+ for f in flags_off:
+ script += "set %s %s off " % (number, f)
+
+ # Set the unit of the run
+ if unit and script:
+ script = "unit %s %s" % (unit, script)
+
+ # Execute the script
+ if script:
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'absent':
+ # Remove the partition
+ if part_exists(current_parts, 'num', number) or module.check_mode:
+ script = "rm %s " % number
+ output_script += script
+ changed = True
+ parted(script, device, align)
+
+ elif state == 'info':
+ output_script = "unit '%s' print " % unit
+
+ # Final status of the device
+ final_device_status = get_device_info(device, unit)
+ module.exit_json(
+ changed=changed,
+ disk=final_device_status['generic'],
+ partitions=final_device_status['partitions'],
+ script=output_script.strip()
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pear.py b/ansible_collections/community/general/plugins/modules/pear.py
new file mode 100644
index 000000000..d7cb01b92
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pear.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Afterburn <https://github.com/afterburn>
+# Copyright (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
+# Copyright (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pear
+short_description: Manage pear/pecl packages
+description:
+ - Manage PHP packages with the pear package manager.
+author:
+ - Jonathan Lestrelin (@jle64) <jonathan.lestrelin@gmail.com>
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name of the package to install, upgrade, or remove.
+ required: true
+ aliases: [pkg]
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "installed", "latest", "absent", "removed"]
+ executable:
+ type: path
+ description:
+ - Path to the pear executable.
+ prompts:
+ description:
+ - List of regular expressions that can be used to detect prompts during pear package installation to answer the expected question.
+ - Prompts will be processed in the same order as the packages list.
+ - You can optionnally specify an answer to any question in the list.
+ - If no answer is provided, the list item will only contain the regular expression.
+ - "To specify an answer, the item will be a dict with the regular expression as key and the answer as value C(my_regular_expression: 'an_answer')."
+ - You can provide a list containing items with or without answer.
+ - A prompt list can be shorter or longer than the packages list but will issue a warning.
+ - If you want to specify that a package will not need prompts in the middle of a list, C(null).
+ type: list
+ elements: raw
+ version_added: 0.2.0
+'''
+
+EXAMPLES = r'''
+- name: Install pear package
+ community.general.pear:
+ name: Net_URL2
+ state: present
+
+- name: Install pecl package
+ community.general.pear:
+ name: pecl/json_post
+ state: present
+
+- name: Install pecl package with expected prompt
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]
+
+- name: Install pecl package with expected prompt and an answer
+ community.general.pear:
+ name: pecl/apcu
+ state: present
+ prompts:
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once with prompts.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - I am a test prompt because gnupg doesnt asks anything
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Install multiple pear/pecl packages at once skipping the first prompt.
+ Prompts will be processed on the same order as the packages order.
+ If there is more prompts than packages, packages without prompts will be installed without any prompt expected.
+ If there is more packages than prompts, additionnal prompts will be ignored.
+ community.general.pear:
+ name: pecl/gnupg, pecl/apcu
+ state: present
+ prompts:
+ - null
+ - (.*)Enable internal debugging in APCu \[no\]: "yes"
+
+- name: Upgrade package
+ community.general.pear:
+ name: Net_URL2
+ state: latest
+
+- name: Remove packages
+ community.general.pear:
+ name: Net_URL2,pecl/json_post
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_local_version(pear_output):
+ """Take pear remoteinfo output and get the installed version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Installed ' in line:
+ installed = line.rsplit(None, 1)[-1].strip()
+ if installed == '-':
+ continue
+ return installed
+ return None
+
+
+def _get_pear_path(module):
+ if module.params['executable'] and os.path.isfile(module.params['executable']):
+ result = module.params['executable']
+ else:
+ result = module.get_bin_path('pear', True, [module.params['executable']])
+ return result
+
+
+def get_repository_version(pear_output):
+ """Take pear remote-info output and get the latest version"""
+ lines = pear_output.split('\n')
+ for line in lines:
+ if 'Latest ' in line:
+ return line.rsplit(None, 1)[-1].strip()
+ return None
+
+
+def query_package(module, name, state="present"):
+ """Query the package status in both the local system and the repository.
+ Returns a boolean to indicate if the package is installed,
+ and a second boolean to indicate if the package is up-to-date."""
+ if state == "present":
+ lcmd = "%s info %s" % (_get_pear_path(module), name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if lrc != 0:
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s remote-info %s" % (_get_pear_path(module), name)
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+
+ # get the version installed locally (if any)
+ lversion = get_local_version(rstdout)
+
+ # get the version in the repository
+ rversion = get_repository_version(rstdout)
+
+ if rrc == 0:
+ # Return True to indicate that the package is installed locally,
+ # and the result of the version number comparison
+ # to determine if the package is up-to-date.
+ return True, (lversion == rversion)
+
+ return False, False
+
+
+def remove_packages(module, packages):
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, package)
+ if not installed:
+ continue
+
+ cmd = "%s uninstall %s" % (_get_pear_path(module), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s: %s" % (package, to_text(stdout + stderr)))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, state, packages, prompts):
+ install_c = 0
+ has_prompt = bool(prompts)
+ default_stdin = "\n"
+
+ if has_prompt:
+ nb_prompts = len(prompts)
+ nb_packages = len(packages)
+
+ if nb_prompts > 0 and (nb_prompts != nb_packages):
+ if nb_prompts > nb_packages:
+ diff = nb_prompts - nb_packages
+ msg = "%s packages to install but %s prompts to expect. %s prompts will be ignored" % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ else:
+ diff = nb_packages - nb_prompts
+ msg = "%s packages to install but only %s prompts to expect. %s packages won't be expected to have a prompt" \
+ % (to_text(nb_packages), to_text(nb_prompts), to_text(diff))
+ module.warn(msg)
+
+ # Preparing prompts answer according to item type
+ tmp_prompts = []
+ for _item in prompts:
+ # If the current item is a dict then we expect it's key to be the prompt regex and it's value to be the answer
+ # We also expect here that the dict only has ONE key and the first key will be taken
+ if isinstance(_item, dict):
+ key = list(_item.keys())[0]
+ answer = _item[key] + "\n"
+
+ tmp_prompts.append((key, answer))
+ elif not _item:
+ tmp_prompts.append((None, default_stdin))
+ else:
+ tmp_prompts.append((_item, default_stdin))
+ prompts = tmp_prompts
+ for i, package in enumerate(packages):
+ # if the package is installed and state == present
+ # or state == latest and is up-to-date then skip
+ installed, updated = query_package(module, package)
+ if installed and (state == 'present' or (state == 'latest' and updated)):
+ continue
+
+ if state == 'present':
+ command = 'install'
+
+ if state == 'latest':
+ command = 'upgrade'
+
+ if has_prompt and i < len(prompts):
+ prompt_regex = prompts[i][0]
+ data = prompts[i][1]
+ else:
+ prompt_regex = None
+ data = default_stdin
+
+ cmd = "%s %s %s" % (_get_pear_path(module), command, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False, prompt_regex=prompt_regex, data=data, binary_data=True)
+ if rc != 0:
+ module.fail_json(msg="failed to install %s: %s" % (package, to_text(stdout + stderr)))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already installed")
+
+
+def check_packages(module, packages, state):
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state))
+ else:
+ module.exit_json(change=False, msg="package(s) already %s" % state)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
+ executable=dict(default=None, required=False, type='path'),
+ prompts=dict(default=None, required=False, type='list', elements='raw'),
+ ),
+ supports_check_mode=True)
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, p['state'], pkgs, p["prompts"])
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pids.py b/ansible_collections/community/general/plugins/modules/pids.py
new file mode 100644
index 000000000..665adb142
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pids.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pids
+description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
+short_description: Retrieves process IDs list if the process is running otherwise return empty list
+author:
+ - Saranya Sridharan (@saranyasridharan)
+requirements:
+ - psutil(python module)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description: The name of the process(es) you want to get PID(s) for.
+ type: str
+ pattern:
+ description: The pattern (regular expression) to match the process(es) you want to get PID(s) for.
+ type: str
+ version_added: 3.0.0
+ ignore_case:
+ description: Ignore case in pattern if using the I(pattern) option.
+ type: bool
+ default: false
+ version_added: 3.0.0
+'''
+
+EXAMPLES = r'''
+# Pass the process name
+- name: Getting process IDs of the process
+ community.general.pids:
+ name: python
+ register: pids_of_python
+
+- name: Printing the process IDs obtained
+ ansible.builtin.debug:
+ msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
+
+- name: Getting process IDs of processes matching pattern
+ community.general.pids:
+ pattern: python(2(\.7)?|3(\.6)?)?\s+myapp\.py
+ register: myapp_pids
+'''
+
+RETURN = '''
+pids:
+ description: Process IDs of the given process
+ returned: list of none, one, or more process IDs
+ type: list
+ sample: [100,200]
+'''
+
+import abc
+import re
+from os.path import basename
+
+from ansible.module_utils import six
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils import deps
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+with deps.declare("psutil"):
+ import psutil
+
+
+class PSAdapterError(Exception):
+ pass
+
+
+@six.add_metaclass(abc.ABCMeta)
+class PSAdapter(object):
+ NAME_ATTRS = ('name', 'cmdline')
+ PATTERN_ATTRS = ('name', 'exe', 'cmdline')
+
+ def __init__(self, psutil):
+ self._psutil = psutil
+
+ @staticmethod
+ def from_package(psutil):
+ version = LooseVersion(psutil.__version__)
+ if version < LooseVersion('2.0.0'):
+ return PSAdapter100(psutil)
+ elif version < LooseVersion('5.3.0'):
+ return PSAdapter200(psutil)
+ else:
+ return PSAdapter530(psutil)
+
+ def get_pids_by_name(self, name):
+ return [p.pid for p in self._process_iter(*self.NAME_ATTRS) if self._has_name(p, name)]
+
+ def _process_iter(self, *attrs):
+ return self._psutil.process_iter()
+
+ def _has_name(self, proc, name):
+ attributes = self._get_proc_attributes(proc, *self.NAME_ATTRS)
+ return (compare_lower(attributes['name'], name) or
+ attributes['cmdline'] and compare_lower(attributes['cmdline'][0], name))
+
+ def _get_proc_attributes(self, proc, *attributes):
+ return dict((attribute, self._get_attribute_from_proc(proc, attribute)) for attribute in attributes)
+
+ @staticmethod
+ @abc.abstractmethod
+ def _get_attribute_from_proc(proc, attribute):
+ pass
+
+ def get_pids_by_pattern(self, pattern, ignore_case):
+ flags = 0
+ if ignore_case:
+ flags |= re.I
+
+ try:
+ regex = re.compile(pattern, flags)
+ except re.error as e:
+ raise PSAdapterError("'%s' is not a valid regular expression: %s" % (pattern, to_native(e)))
+
+ return [p.pid for p in self._process_iter(*self.PATTERN_ATTRS) if self._matches_regex(p, regex)]
+
+ def _matches_regex(self, proc, regex):
+ # See https://psutil.readthedocs.io/en/latest/#find-process-by-name for more information
+ attributes = self._get_proc_attributes(proc, *self.PATTERN_ATTRS)
+ matches_name = regex.search(to_native(attributes['name']))
+ matches_exe = attributes['exe'] and regex.search(basename(to_native(attributes['exe'])))
+ matches_cmd = attributes['cmdline'] and regex.search(to_native(' '.join(attributes['cmdline'])))
+
+ return any([matches_name, matches_exe, matches_cmd])
+
+
+class PSAdapter100(PSAdapter):
+ def __init__(self, psutil):
+ super(PSAdapter100, self).__init__(psutil)
+
+ @staticmethod
+ def _get_attribute_from_proc(proc, attribute):
+ return getattr(proc, attribute)
+
+
+class PSAdapter200(PSAdapter):
+ def __init__(self, psutil):
+ super(PSAdapter200, self).__init__(psutil)
+
+ @staticmethod
+ def _get_attribute_from_proc(proc, attribute):
+ method = getattr(proc, attribute)
+ return method()
+
+
+class PSAdapter530(PSAdapter):
+ def __init__(self, psutil):
+ super(PSAdapter530, self).__init__(psutil)
+
+ def _process_iter(self, *attrs):
+ return self._psutil.process_iter(attrs=attrs)
+
+ @staticmethod
+ def _get_attribute_from_proc(proc, attribute):
+ return proc.info[attribute]
+
+
+def compare_lower(a, b):
+ if a is None or b is None:
+ # this could just be "return False" but would lead to surprising behavior if both a and b are None
+ return a == b
+
+ return a.lower() == b.lower()
+
+
+class Pids(object):
+ def __init__(self, module):
+
+ deps.validate(module)
+
+ self._ps = PSAdapter.from_package(psutil)
+
+ self._module = module
+ self._name = module.params['name']
+ self._pattern = module.params['pattern']
+ self._ignore_case = module.params['ignore_case']
+
+ self._pids = []
+
+ def execute(self):
+ if self._name:
+ self._pids = self._ps.get_pids_by_name(self._name)
+ else:
+ try:
+ self._pids = self._ps.get_pids_by_pattern(self._pattern, self._ignore_case)
+ except PSAdapterError as e:
+ self._module.fail_json(msg=to_native(e))
+
+ return self._module.exit_json(**self.result)
+
+ @property
+ def result(self):
+ return {
+ 'pids': self._pids,
+ }
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type="str"),
+ pattern=dict(type="str"),
+ ignore_case=dict(type="bool", default=False),
+ ),
+ required_one_of=[
+ ('name', 'pattern')
+ ],
+ mutually_exclusive=[
+ ('name', 'pattern')
+ ],
+ supports_check_mode=True,
+ )
+
+ Pids(module).execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pingdom.py b/ansible_collections/community/general/plugins/modules/pingdom.py
new file mode 100644
index 000000000..bd4826a78
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pingdom.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: pingdom
+short_description: Pause/unpause Pingdom alerts
+description:
+ - This module will let you pause/unpause Pingdom alerts
+author:
+ - "Dylan Silva (@thaumos)"
+ - "Justin Johns (!UNKNOWN)"
+requirements:
+ - "This pingdom python library: https://github.com/mbabineau/pingdom-python"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the check should be running or paused.
+ required: true
+ choices: [ "running", "paused", "started", "stopped" ]
+ checkid:
+ type: str
+ description:
+ - Pingdom ID of the check.
+ required: true
+ uid:
+ type: str
+ description:
+ - Pingdom user ID.
+ required: true
+ passwd:
+ type: str
+ description:
+ - Pingdom user password.
+ required: true
+ key:
+ type: str
+ description:
+ - Pingdom API key.
+ required: true
+notes:
+ - This module does not yet have support to add/remove checks.
+'''
+
+EXAMPLES = '''
+- name: Pause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: paused
+
+- name: Unpause the check with the ID of 12345
+ community.general.pingdom:
+ uid: example@example.com
+ passwd: password123
+ key: apipassword123
+ checkid: 12345
+ state: running
+'''
+
+import traceback
+
+PINGDOM_IMP_ERR = None
+try:
+ import pingdom
+ HAS_PINGDOM = True
+except Exception:
+ PINGDOM_IMP_ERR = traceback.format_exc()
+ HAS_PINGDOM = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+def pause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=True)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "paused": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def unpause(checkid, uid, passwd, key):
+
+ c = pingdom.PingdomConnection(uid, passwd, key)
+ c.modify_check(checkid, paused=False)
+ check = c.get_check(checkid)
+ name = check.name
+ result = check.status
+ # if result != "up": # api output buggy - accept raw exception for now
+ # return (True, name, result)
+ return (False, name, result)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']),
+ checkid=dict(required=True),
+ uid=dict(required=True),
+ passwd=dict(required=True, no_log=True),
+ key=dict(required=True, no_log=True),
+ )
+ )
+
+ if not HAS_PINGDOM:
+ module.fail_json(msg=missing_required_lib("pingdom"), exception=PINGDOM_IMP_ERR)
+
+ checkid = module.params['checkid']
+ state = module.params['state']
+ uid = module.params['uid']
+ passwd = module.params['passwd']
+ key = module.params['key']
+
+ if (state == "paused" or state == "stopped"):
+ (rc, name, result) = pause(checkid, uid, passwd, key)
+
+ if (state == "running" or state == "started"):
+ (rc, name, result) = unpause(checkid, uid, passwd, key)
+
+ if rc != 0:
+ module.fail_json(checkid=checkid, name=name, status=result)
+
+ module.exit_json(checkid=checkid, name=name, status=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pip_package_info.py b/ansible_collections/community/general/plugins/modules/pip_package_info.py
new file mode 100644
index 000000000..2cde7218d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pip_package_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# started out with AWX's scan_packages module
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: pip_package_info
+short_description: Pip package information
+description:
+ - Return information about installed pip packages
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ clients:
+ description:
+ - A list of the pip executables that will be used to get the packages.
+ They can be supplied with the full path or just the executable name, for example C(pip3.7).
+ default: ['pip']
+ required: false
+ type: list
+ elements: path
+requirements:
+ - The requested pip executables must be installed on the target.
+author:
+ - Matthew Jones (@matburt)
+ - Brian Coca (@bcoca)
+ - Adam Miller (@maxamillion)
+'''
+
+EXAMPLES = '''
+- name: Just get the list from default pip
+ community.general.pip_package_info:
+
+- name: Get the facts for default pip, pip2 and pip3.6
+ community.general.pip_package_info:
+ clients: ['pip', 'pip2', 'pip3.6']
+
+- name: Get from specific paths (virtualenvs?)
+ community.general.pip_package_info:
+ clients: '/home/me/projec42/python/pip3.5'
+'''
+
+RETURN = '''
+packages:
+ description: a dictionary of installed package data
+ returned: always
+ type: dict
+ contains:
+ python:
+ description: A dictionary with each pip client which then contains a list of dicts with python package information
+ returned: always
+ type: dict
+ sample:
+ "packages": {
+ "pip": {
+ "Babel": [
+ {
+ "name": "Babel",
+ "source": "pip",
+ "version": "2.6.0"
+ }
+ ],
+ "Flask": [
+ {
+ "name": "Flask",
+ "source": "pip",
+ "version": "1.0.2"
+ }
+ ],
+ "Flask-SQLAlchemy": [
+ {
+ "name": "Flask-SQLAlchemy",
+ "source": "pip",
+ "version": "2.3.2"
+ }
+ ],
+ "Jinja2": [
+ {
+ "name": "Jinja2",
+ "source": "pip",
+ "version": "2.10"
+ }
+ ],
+ },
+ }
+'''
+import json
+import os
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.facts.packages import CLIMgr
+
+
+class PIP(CLIMgr):
+
+ def __init__(self, pip, module):
+
+ self.CLI = pip
+ self.module = module
+
+ def list_installed(self):
+ rc, out, err = self.module.run_command([self._cli, 'list', '-l', '--format=json'])
+ if rc != 0:
+ raise Exception("Unable to list packages rc=%s : %s" % (rc, err))
+ return json.loads(out)
+
+ def get_package_details(self, package):
+ package['source'] = self.CLI
+ return package
+
+
+def main():
+
+ # start work
+ module = AnsibleModule(
+ argument_spec=dict(
+ clients=dict(type='list', elements='path', default=['pip']),
+ ),
+ supports_check_mode=True)
+ packages = {}
+ results = {'packages': {}}
+ clients = module.params['clients']
+
+ found = 0
+ for pip in clients:
+
+ if not os.path.basename(pip).startswith('pip'):
+ module.warn('Skipping invalid pip client: %s' % (pip))
+ continue
+ try:
+ pip_mgr = PIP(pip, module)
+ if pip_mgr.is_available():
+ found += 1
+ packages[pip] = pip_mgr.get_packages()
+ except Exception as e:
+ module.warn('Failed to retrieve packages with %s: %s' % (pip, to_text(e)))
+ continue
+
+ if found == 0:
+ module.fail_json(msg='Unable to use any of the supplied pip clients: %s' % clients)
+
+ # return info
+ results['packages'] = packages
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pipx.py b/ansible_collections/community/general/plugins/modules/pipx.py
new file mode 100644
index 000000000..dfa2f4300
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pipx.py
@@ -0,0 +1,330 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pipx
+short_description: Manages applications installed with pipx
+version_added: 3.8.0
+description:
+ - Manage Python applications installed in isolated virtualenvs using pipx.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ state:
+ type: str
+ choices: [present, absent, install, uninstall, uninstall_all, inject, upgrade, upgrade_all, reinstall, reinstall_all, latest]
+ default: install
+ description:
+ - Desired state for the application.
+ - The states C(present) and C(absent) are aliases to C(install) and C(uninstall), respectively.
+ - The state C(latest) is equivalent to executing the task twice, with state C(install) and then C(upgrade).
+ It was added in community.general 5.5.0.
+ name:
+ type: str
+ description:
+ - >
+ The name of the application to be installed. It must to be a simple package name.
+ For passing package specifications or installing from URLs or directories,
+ please use the I(source) option.
+ source:
+ type: str
+ description:
+ - >
+ If the application source, such as a package with version specifier, or an URL,
+ directory or any other accepted specification. See C(pipx) documentation for more details.
+ - When specified, the C(pipx) command will use I(source) instead of I(name).
+ install_apps:
+ description:
+ - Add apps from the injected packages.
+ - Only used when I(state=inject).
+ type: bool
+ default: false
+ version_added: 6.5.0
+ install_deps:
+ description:
+ - Include applications of dependent packages.
+ - Only used when I(state=install), I(state=latest), or I(state=inject).
+ type: bool
+ default: false
+ inject_packages:
+ description:
+ - Packages to be injected into an existing virtual environment.
+ - Only used when I(state=inject).
+ type: list
+ elements: str
+ force:
+ description:
+ - Force modification of the application's virtual environment. See C(pipx) for details.
+ - Only used when I(state=install), I(state=upgrade), I(state=upgrade_all), I(state=latest), or I(state=inject).
+ type: bool
+ default: false
+ include_injected:
+ description:
+ - Upgrade the injected packages along with the application.
+ - Only used when I(state=upgrade), I(state=upgrade_all), or I(state=latest).
+ - This is used with I(state=upgrade) and I(state=latest) since community.general 6.6.0.
+ type: bool
+ default: false
+ index_url:
+ description:
+ - Base URL of Python Package Index.
+ - Only used when I(state=install), I(state=upgrade), I(state=latest), or I(state=inject).
+ type: str
+ python:
+ description:
+ - Python version to be used when creating the application virtual environment. Must be 3.6+.
+ - Only used when I(state=install), I(state=latest), I(state=reinstall), or I(state=reinstall_all).
+ type: str
+ system_site_packages:
+ description:
+ - Give application virtual environment access to the system site-packages directory.
+ - Only used when I(state=install) or I(state=latest).
+ type: bool
+ default: false
+ version_added: 6.6.0
+ executable:
+ description:
+ - Path to the C(pipx) installed in the system.
+ - >
+ If not specified, the module will use C(python -m pipx) to run the tool,
+ using the same Python interpreter as ansible itself.
+ type: path
+ editable:
+ description:
+ - Install the project in editable mode.
+ type: bool
+ default: false
+ version_added: 4.6.0
+ pip_args:
+ description:
+ - Arbitrary arguments to pass directly to C(pip).
+ type: str
+ version_added: 4.6.0
+notes:
+ - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
+ - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
+ - >
+ This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
+ passed using the R(environment Ansible keyword, playbooks_environment).
+ - This module requires C(pipx) version 0.16.2.1 or above.
+ - Please note that C(pipx) requires Python 3.6 or above.
+ - >
+ This first implementation does not verify whether a specified version constraint has been installed or not.
+ Hence, when using version operators, C(pipx) module will always try to execute the operation,
+ even when the application was previously installed.
+ This feature will be added in the future.
+ - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
+author:
+ - "Alexei Znamensky (@russoz)"
+'''
+
+EXAMPLES = '''
+- name: Install tox
+ community.general.pipx:
+ name: tox
+
+- name: Install tox from git repository
+ community.general.pipx:
+ name: tox
+ source: git+https://github.com/tox-dev/tox.git
+
+- name: Upgrade tox
+ community.general.pipx:
+ name: tox
+ state: upgrade
+
+- name: Reinstall black with specific Python version
+ community.general.pipx:
+ name: black
+ state: reinstall
+ python: 3.7
+
+- name: Uninstall pycowsay
+ community.general.pipx:
+ name: pycowsay
+ state: absent
+'''
+
+
+import json
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner
+
+from ansible.module_utils.facts.compat import ansible_facts
+
+
+class PipX(StateModuleHelper):
+ output_params = ['name', 'source', 'index_url', 'force', 'installdeps']
+ module = dict(
+ argument_spec=dict(
+ state=dict(type='str', default='install',
+ choices=['present', 'absent', 'install', 'uninstall', 'uninstall_all',
+ 'inject', 'upgrade', 'upgrade_all', 'reinstall', 'reinstall_all', 'latest']),
+ name=dict(type='str'),
+ source=dict(type='str'),
+ install_apps=dict(type='bool', default=False),
+ install_deps=dict(type='bool', default=False),
+ inject_packages=dict(type='list', elements='str'),
+ force=dict(type='bool', default=False),
+ include_injected=dict(type='bool', default=False),
+ index_url=dict(type='str'),
+ python=dict(type='str'),
+ system_site_packages=dict(type='bool', default=False),
+ executable=dict(type='path'),
+ editable=dict(type='bool', default=False),
+ pip_args=dict(type='str'),
+ ),
+ required_if=[
+ ('state', 'present', ['name']),
+ ('state', 'install', ['name']),
+ ('state', 'absent', ['name']),
+ ('state', 'uninstall', ['name']),
+ ('state', 'upgrade', ['name']),
+ ('state', 'reinstall', ['name']),
+ ('state', 'latest', ['name']),
+ ('state', 'inject', ['name', 'inject_packages']),
+ ],
+ supports_check_mode=True,
+ )
+
+ def _retrieve_installed(self):
+ def process_list(rc, out, err):
+ if not out:
+ return {}
+
+ results = {}
+ raw_data = json.loads(out)
+ for venv_name, venv in raw_data['venvs'].items():
+ results[venv_name] = {
+ 'version': venv['metadata']['main_package']['package_version'],
+ 'injected': dict(
+ (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items()
+ ),
+ }
+ return results
+
+ installed = self.runner('_list', output_process=process_list).run(_list=1)
+
+ if self.vars.name is not None:
+ app_list = installed.get(self.vars.name)
+ if app_list:
+ return {self.vars.name: app_list}
+ else:
+ return {}
+
+ return installed
+
+ def __init_module__(self):
+ if self.vars.executable:
+ self.command = [self.vars.executable]
+ else:
+ facts = ansible_facts(self.module, gather_subset=['python'])
+ self.command = [facts['python']['executable'], '-m', 'pipx']
+ self.runner = pipx_runner(self.module, self.command)
+
+ self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
+
+ def __quit_module__(self):
+ self.vars.application = self._retrieve_installed()
+
+ def _capture_results(self, ctx):
+ self.vars.stdout = ctx.results_out
+ self.vars.stderr = ctx.results_err
+ self.vars.cmd = ctx.cmd
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+ def state_install(self):
+ if not self.vars.application or self.vars.force:
+ self.changed = True
+ with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx:
+ ctx.run(name_source=[self.vars.name, self.vars.source])
+ self._capture_results(ctx)
+
+ state_present = state_install
+
+ def state_upgrade(self):
+ if not self.vars.application:
+ self.do_raise("Trying to upgrade a non-existent application: {0}".format(self.vars.name))
+ if self.vars.force:
+ self.changed = True
+
+ with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ def state_uninstall(self):
+ if self.vars.application:
+ with self.runner('state name', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ state_absent = state_uninstall
+
+ def state_reinstall(self):
+ if not self.vars.application:
+ self.do_raise("Trying to reinstall a non-existent application: {0}".format(self.vars.name))
+ self.changed = True
+ with self.runner('state name python', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ def state_inject(self):
+ if not self.vars.application:
+ self.do_raise("Trying to inject packages into a non-existent application: {0}".format(self.vars.name))
+ if self.vars.force:
+ self.changed = True
+ with self.runner('state index_url install_apps install_deps force editable pip_args name inject_packages', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ def state_uninstall_all(self):
+ with self.runner('state', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ def state_reinstall_all(self):
+ with self.runner('state python', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ def state_upgrade_all(self):
+ if self.vars.force:
+ self.changed = True
+ with self.runner('state include_injected force', check_mode_skip=True) as ctx:
+ ctx.run()
+ self._capture_results(ctx)
+
+ def state_latest(self):
+ if not self.vars.application or self.vars.force:
+ self.changed = True
+ with self.runner('state index_url install_deps force python system_site_packages editable pip_args name_source', check_mode_skip=True) as ctx:
+ ctx.run(state='install', name_source=[self.vars.name, self.vars.source])
+ self._capture_results(ctx)
+
+ with self.runner('state include_injected index_url force editable pip_args name', check_mode_skip=True) as ctx:
+ ctx.run(state='upgrade')
+ self._capture_results(ctx)
+
+
+def main():
+ PipX.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pipx_info.py b/ansible_collections/community/general/plugins/modules/pipx_info.py
new file mode 100644
index 000000000..e2bb7fdae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pipx_info.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pipx_info
+short_description: Rretrieves information about applications installed with pipx
+version_added: 5.6.0
+description:
+ - Retrieve details about Python applications installed in isolated virtualenvs using pipx.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ name:
+ description:
+ - Name of an application installed with C(pipx).
+ type: str
+ include_deps:
+ description:
+ - Include dependent packages in the output.
+ type: bool
+ default: false
+ include_injected:
+ description:
+ - Include injected packages in the output.
+ type: bool
+ default: false
+ include_raw:
+ description:
+ - Returns the raw output of C(pipx list --json).
+ - The raw output is not affected by I(include_deps) or I(include_injected).
+ type: bool
+ default: false
+ executable:
+ description:
+ - Path to the C(pipx) installed in the system.
+ - >
+ If not specified, the module will use C(python -m pipx) to run the tool,
+ using the same Python interpreter as ansible itself.
+ type: path
+notes:
+ - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip).
+ - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module.
+ - >
+ This module will honor C(pipx) environment variables such as but not limited to C(PIPX_HOME) and C(PIPX_BIN_DIR)
+ passed using the R(environment Ansible keyword, playbooks_environment).
+ - This module requires C(pipx) version 0.16.2.1 or above.
+ - Please note that C(pipx) requires Python 3.6 or above.
+ - See also the C(pipx) documentation at U(https://pypa.github.io/pipx/).
+author:
+ - "Alexei Znamensky (@russoz)"
+'''
+
+EXAMPLES = '''
+- name: retrieve all installed applications
+ community.general.pipx_info: {}
+
+- name: retrieve all installed applications, include dependencies and injected packages
+ community.general.pipx_info:
+ include_deps: true
+ include_injected: true
+
+- name: retrieve application tox
+ community.general.pipx_info:
+ name: tox
+ include_deps: true
+
+- name: retrieve application ansible-lint, include dependencies
+ community.general.pipx_info:
+ name: ansible-lint
+ include_deps: true
+'''
+
+RETURN = '''
+application:
+ description: The list of installed applications
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ name:
+ description: The name of the installed application.
+ returned: success
+ type: str
+ sample: "tox"
+ version:
+ description: The version of the installed application.
+ returned: success
+ type: str
+ sample: "3.24.0"
+ dependencies:
+ description: The dependencies of the installed application, when I(include_deps=true).
+ returned: success
+ type: list
+ elements: str
+ sample: ["virtualenv"]
+ injected:
+ description: The injected packages for the installed application, when I(include_injected=true).
+ returned: success
+ type: dict
+ sample:
+ licenses: "0.6.1"
+
+raw_output:
+ description: The raw output of the C(pipx list) command, when I(include_raw=true). Used for debugging.
+ returned: success
+ type: dict
+
+cmd:
+ description: Command executed to obtain the list of installed applications.
+ returned: success
+ type: list
+ elements: str
+ sample: [
+ "/usr/bin/python3.10",
+ "-m",
+ "pipx",
+ "list",
+ "--include-injected",
+ "--json"
+ ]
+'''
+
+import json
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.pipx import pipx_runner
+
+from ansible.module_utils.facts.compat import ansible_facts
+
+
+class PipXInfo(ModuleHelper):
+ output_params = ['name']
+ module = dict(
+ argument_spec=dict(
+ name=dict(type='str'),
+ include_deps=dict(type='bool', default=False),
+ include_injected=dict(type='bool', default=False),
+ include_raw=dict(type='bool', default=False),
+ executable=dict(type='path'),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ if self.vars.executable:
+ self.command = [self.vars.executable]
+ else:
+ facts = ansible_facts(self.module, gather_subset=['python'])
+ self.command = [facts['python']['executable'], '-m', 'pipx']
+ self.runner = pipx_runner(self.module, self.command)
+
+ # self.vars.set('application', self._retrieve_installed(), change=True, diff=True)
+
+ def __run__(self):
+ def process_list(rc, out, err):
+ if not out:
+ return []
+
+ results = []
+ raw_data = json.loads(out)
+ if self.vars.include_raw:
+ self.vars.raw_output = raw_data
+
+ if self.vars.name:
+ if self.vars.name in raw_data['venvs']:
+ data = {self.vars.name: raw_data['venvs'][self.vars.name]}
+ else:
+ data = {}
+ else:
+ data = raw_data['venvs']
+
+ for venv_name, venv in data.items():
+ entry = {
+ 'name': venv_name,
+ 'version': venv['metadata']['main_package']['package_version']
+ }
+ if self.vars.include_injected:
+ entry['injected'] = dict(
+ (k, v['package_version']) for k, v in venv['metadata']['injected_packages'].items()
+ )
+ if self.vars.include_deps:
+ entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies'])
+ results.append(entry)
+
+ return results
+
+ with self.runner('_list', output_process=process_list) as ctx:
+ self.vars.application = ctx.run(_list=1)
+ self._capture_results(ctx)
+
+ def _capture_results(self, ctx):
+ self.vars.cmd = ctx.cmd
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+
+def main():
+ PipXInfo.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pkg5.py b/ansible_collections/community/general/plugins/modules/pkg5.py
new file mode 100644
index 000000000..f6bc77a71
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pkg5.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Peter Oliver <ansible@mavit.org.uk>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: pkg5
+author:
+- Peter Oliver (@mavit)
+short_description: Manages packages with the Solaris 11 Image Packaging System
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+notes:
+ - The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - An FRMI of the package(s) to be installed/removed/updated.
+ - Multiple packages may be specified, separated by C(,).
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Whether to install (I(present), I(latest)), or remove (I(absent)) a package.
+ choices: [ absent, latest, present, installed, removed, uninstalled ]
+ default: present
+ type: str
+ accept_licenses:
+ description:
+ - Accept any licences.
+ type: bool
+ default: false
+ aliases: [ accept, accept_licences ]
+ be_name:
+ description:
+ - Creates a new boot environment with the given name.
+ type: str
+ refresh:
+ description:
+ - Refresh publishers before execution.
+ type: bool
+ default: true
+'''
+EXAMPLES = '''
+- name: Install Vim
+ community.general.pkg5:
+ name: editor/vim
+
+- name: Install Vim without refreshing publishers
+ community.general.pkg5:
+ name: editor/vim
+ refresh: false
+
+- name: Remove finger daemon
+ community.general.pkg5:
+ name: service/network/finger
+ state: absent
+
+- name: Install several packages at once
+ community.general.pkg5:
+ name:
+ - /file/gnu-findutils
+ - /text/gnu-grep
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled']),
+ accept_licenses=dict(type='bool', default=False, aliases=['accept', 'accept_licences']),
+ be_name=dict(type='str'),
+ refresh=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ params = module.params
+ packages = []
+
+ # pkg(5) FRMIs include a comma before the release number, but
+ # AnsibleModule will have split this into multiple items for us.
+ # Try to spot where this has happened and fix it.
+ for fragment in params['name']:
+ if re.search(r'^\d+(?:\.\d+)*', fragment) and packages and re.search(r'@[^,]*$', packages[-1]):
+ packages[-1] += ',' + fragment
+ else:
+ packages.append(fragment)
+
+ if params['state'] in ['present', 'installed']:
+ ensure(module, 'present', packages, params)
+ elif params['state'] in ['latest']:
+ ensure(module, 'latest', packages, params)
+ elif params['state'] in ['absent', 'uninstalled', 'removed']:
+ ensure(module, 'absent', packages, params)
+
+
+def ensure(module, state, packages, params):
+ response = {
+ 'results': [],
+ 'msg': '',
+ }
+ behaviour = {
+ 'present': {
+ 'filter': lambda p: not is_installed(module, p),
+ 'subcommand': 'install',
+ },
+ 'latest': {
+ 'filter': lambda p: (
+ not is_installed(module, p) or not is_latest(module, p)
+ ),
+ 'subcommand': 'install',
+ },
+ 'absent': {
+ 'filter': lambda p: is_installed(module, p),
+ 'subcommand': 'uninstall',
+ },
+ }
+
+ if module.check_mode:
+ dry_run = ['-n']
+ else:
+ dry_run = []
+
+ if params['accept_licenses']:
+ accept_licenses = ['--accept']
+ else:
+ accept_licenses = []
+
+ if params['be_name']:
+ beadm = ['--be-name=' + module.params['be_name']]
+ else:
+ beadm = []
+
+ if params['refresh']:
+ no_refresh = []
+ else:
+ no_refresh = ['--no-refresh']
+
+ to_modify = list(filter(behaviour[state]['filter'], packages))
+ if to_modify:
+ rc, out, err = module.run_command(['pkg', behaviour[state]['subcommand']] + dry_run + accept_licenses + beadm + no_refresh + ['-q', '--'] + to_modify)
+ response['rc'] = rc
+ response['results'].append(out)
+ response['msg'] += err
+ response['changed'] = True
+ if rc == 4:
+ response['changed'] = False
+ response['failed'] = False
+ elif rc != 0:
+ module.fail_json(**response)
+
+ module.exit_json(**response)
+
+
+def is_installed(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '--', package])
+ return not bool(int(rc))
+
+
+def is_latest(module, package):
+ rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
+ return bool(int(rc))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pkg5_publisher.py b/ansible_collections/community/general/plugins/modules/pkg5_publisher.py
new file mode 100644
index 000000000..9d1b38138
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pkg5_publisher.py
@@ -0,0 +1,210 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkg5_publisher
+author: "Peter Oliver (@mavit)"
+short_description: Manages Solaris 11 Image Packaging System publishers
+description:
+ - IPS packages are the native packages in Solaris 11 and higher.
+ - This modules will configure which publishers a client will download IPS
+ packages from.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The publisher's name.
+ required: true
+ aliases: [ publisher ]
+ type: str
+ state:
+ description:
+ - Whether to ensure that a publisher is present or absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ sticky:
+ description:
+ - Packages installed from a sticky repository can only receive updates
+ from that repository.
+ type: bool
+ enabled:
+ description:
+ - Is the repository enabled or disabled?
+ type: bool
+ origin:
+ description:
+ - A path or URL to the repository.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+ mirror:
+ description:
+ - A path or URL to the repository mirror.
+ - Multiple values may be provided.
+ type: list
+ elements: str
+'''
+EXAMPLES = '''
+- name: Fetch packages for the solaris publisher direct from Oracle
+ community.general.pkg5_publisher:
+ name: solaris
+ sticky: true
+ origin: https://pkg.oracle.com/solaris/support/
+
+- name: Configure a publisher for locally-produced packages
+ community.general.pkg5_publisher:
+ name: site
+ origin: 'https://pkg.example.com/site/'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['publisher']),
+ state=dict(default='present', choices=['present', 'absent']),
+ sticky=dict(type='bool'),
+ enabled=dict(type='bool'),
+ # search_after=dict(),
+ # search_before=dict(),
+ origin=dict(type='list', elements='str'),
+ mirror=dict(type='list', elements='str'),
+ )
+ )
+
+ for option in ['origin', 'mirror']:
+ if module.params[option] == ['']:
+ module.params[option] = []
+
+ if module.params['state'] == 'present':
+ modify_publisher(module, module.params)
+ else:
+ unset_publisher(module, module.params['name'])
+
+
+def modify_publisher(module, params):
+ name = params['name']
+ existing = get_publishers(module)
+
+ if name in existing:
+ for option in ['origin', 'mirror', 'sticky', 'enabled']:
+ if params[option] is not None:
+ if params[option] != existing[name][option]:
+ return set_publisher(module, params)
+ else:
+ return set_publisher(module, params)
+
+ module.exit_json()
+
+
+def set_publisher(module, params):
+ name = params['name']
+ args = []
+
+ if params['origin'] is not None:
+ args.append('--remove-origin=*')
+ args.extend(['--add-origin=' + u for u in params['origin']])
+ if params['mirror'] is not None:
+ args.append('--remove-mirror=*')
+ args.extend(['--add-mirror=' + u for u in params['mirror']])
+
+ if params['sticky'] is not None and params['sticky']:
+ args.append('--sticky')
+ elif params['sticky'] is not None:
+ args.append('--non-sticky')
+
+ if params['enabled'] is not None and params['enabled']:
+ args.append('--enable')
+ elif params['enabled'] is not None:
+ args.append('--disable')
+
+ rc, out, err = module.run_command(
+ ["pkg", "set-publisher"] + args + [name],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def unset_publisher(module, publisher):
+ if publisher not in get_publishers(module):
+ module.exit_json()
+
+ rc, out, err = module.run_command(
+ ["pkg", "unset-publisher", publisher],
+ check_rc=True
+ )
+ response = {
+ 'rc': rc,
+ 'results': [out],
+ 'msg': err,
+ 'changed': True,
+ }
+ if rc != 0:
+ module.fail_json(**response)
+ module.exit_json(**response)
+
+
+def get_publishers(module):
+ rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
+
+ lines = out.splitlines()
+ keys = lines.pop(0).lower().split("\t")
+
+ publishers = {}
+ for line in lines:
+ values = dict(zip(keys, map(unstringify, line.split("\t"))))
+ name = values['publisher']
+
+ if name not in publishers:
+ publishers[name] = dict(
+ (k, values[k]) for k in ['sticky', 'enabled']
+ )
+ publishers[name]['origin'] = []
+ publishers[name]['mirror'] = []
+
+ if values['type'] is not None:
+ publishers[name][values['type']].append(values['uri'])
+
+ return publishers
+
+
+def unstringify(val):
+ if val == "-" or val == '':
+ return None
+ elif val == "true":
+ return True
+ elif val == "false":
+ return False
+ else:
+ return val
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pkgin.py b/ansible_collections/community/general/plugins/modules/pkgin.py
new file mode 100644
index 000000000..c08b25218
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pkgin.py
@@ -0,0 +1,396 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
+# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
+# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
+#
+# Written by Shaun Zinck
+# Based on pacman module written by Afterburn <http://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgin
+short_description: Package manager for SmartOS, NetBSD, et al
+description:
+ - "The standard package manager for SmartOS, but also usable on NetBSD
+ or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
+author:
+ - "Larry Gilbert (@L2G)"
+ - "Shaun Zinck (@szinck)"
+ - "Jasper Lievisse Adriaanse (@jasperla)"
+notes:
+ - "Known bug with pkgin < 0.8.0: if a package is removed and another
+ package depends on it, the other package will be silently removed as
+ well. New to Ansible 1.9: check-mode support."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of package to install/remove;
+ - multiple names may be given, separated by commas
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - Intended state of the package
+ choices: [ 'present', 'absent' ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update repository database. Can be run with other steps or on it's own.
+ type: bool
+ default: false
+ upgrade:
+ description:
+ - Upgrade main packages to their newer versions
+ type: bool
+ default: false
+ full_upgrade:
+ description:
+ - Upgrade all packages to their newer versions
+ type: bool
+ default: false
+ clean:
+ description:
+ - Clean packages cache
+ type: bool
+ default: false
+ force:
+ description:
+ - Force package reinstall
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgin:
+ name: foo
+ state: present
+
+- name: Install specific version of foo package
+ community.general.pkgin:
+ name: foo-2.0.1
+ state: present
+
+- name: Update cache and install foo package
+ community.general.pkgin:
+ name: foo
+ update_cache: true
+
+- name: Remove package foo
+ community.general.pkgin:
+ name: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.pkgin:
+ name: foo,bar
+ state: absent
+
+- name: Update repositories as a separate step
+ community.general.pkgin:
+ update_cache: true
+
+- name: Upgrade main packages (equivalent to pkgin upgrade)
+ community.general.pkgin:
+ upgrade: true
+
+- name: Upgrade all packages (equivalent to pkgin full-upgrade)
+ community.general.pkgin:
+ full_upgrade: true
+
+- name: Force-upgrade all packages (equivalent to pkgin -F full-upgrade)
+ community.general.pkgin:
+ full_upgrade: true
+ force: true
+
+- name: Clean packages cache (equivalent to pkgin clean)
+ community.general.pkgin:
+ clean: true
+'''
+
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class PackageState(object):
+ PRESENT = 1
+ NOT_INSTALLED = 2
+ OUTDATED = 4
+ NOT_FOUND = 8
+
+
+def query_package(module, name):
+ """Search for the package by name and return state of the package.
+ """
+
+ # test whether '-p' (parsable) flag is supported.
+ rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
+
+ if rc == 0:
+ pflag = '-p'
+ splitchar = ';'
+ else:
+ pflag = ''
+ splitchar = ' '
+
+ # Use "pkgin search" to find the package. The regular expression will
+ # only match on the complete name.
+ rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
+
+ # rc will not be 0 unless the search was a success
+ if rc == 0:
+
+ # Search results may contain more than one line (e.g., 'emacs'), so iterate
+ # through each line to see if we have a match.
+ packages = out.split('\n')
+
+ for package in packages:
+
+ # Break up line at spaces. The first part will be the package with its
+ # version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
+ # of the package:
+ # '' - not installed
+ # '<' - installed but out of date
+ # '=' - installed and up to date
+ # '>' - installed but newer than the repository version
+ pkgname_with_version, raw_state = package.split(splitchar)[0:2]
+
+ # Search for package, stripping version
+ # (results in sth like 'gcc47-libs' or 'emacs24-nox11')
+ pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
+
+ # Do not proceed unless we have a match
+ if not pkg_search_obj:
+ continue
+
+ # Grab matched string
+ pkgname_without_version = pkg_search_obj.group(1)
+
+ if name not in (pkgname_with_version, pkgname_without_version):
+ continue
+
+ # The package was found; now return its state
+ if raw_state == '<':
+ return PackageState.OUTDATED
+ elif raw_state == '=' or raw_state == '>':
+ return PackageState.PRESENT
+ else:
+ # Package found but not installed
+ return PackageState.NOT_INSTALLED
+ # no fall-through
+
+ # No packages were matched
+ return PackageState.NOT_FOUND
+
+ # Search failed
+ return PackageState.NOT_FOUND
+
+
+def format_action_message(module, action, count):
+ vars = {"actioned": action,
+ "count": count}
+
+ if module.check_mode:
+ message = "would have %(actioned)s %(count)d package" % vars
+ else:
+ message = "%(actioned)s %(count)d package" % vars
+
+ if count == 1:
+ return message
+ else:
+ return message + "s"
+
+
+def format_pkgin_command(module, command, package=None):
+ # Not all commands take a package argument, so cover this up by passing
+ # an empty string. Some commands (e.g. 'update') will ignore extra
+ # arguments, however this behaviour cannot be relied on for others.
+ if package is None:
+ package = ""
+
+ if module.params["force"]:
+ force = "-F"
+ else:
+ force = ""
+
+ vars = {"pkgin": PKGIN_PATH,
+ "command": command,
+ "package": package,
+ "force": force}
+
+ if module.check_mode:
+ return "%(pkgin)s -n %(command)s %(package)s" % vars
+ else:
+ return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if query_package(module, package) in [PackageState.NOT_INSTALLED, PackageState.NOT_FOUND]:
+ continue
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "remove", package))
+
+ if not module.check_mode and query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=out, stderr=err)
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages):
+
+ install_c = 0
+
+ for package in packages:
+ query_result = query_package(module, package)
+ if query_result in [PackageState.PRESENT, PackageState.OUTDATED]:
+ continue
+ elif query_result is PackageState.NOT_FOUND:
+ module.fail_json(msg="failed to find package %s for installation" % package)
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "install", package))
+
+ if not module.check_mode and not query_package(module, package) in [PackageState.PRESENT, PackageState.OUTDATED]:
+ module.fail_json(msg="failed to install %s: %s" % (package, out), stdout=out, stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c), stdout=out, stderr=err)
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_package_db(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "update"))
+
+ if rc == 0:
+ if re.search('database for.*is up-to-date\n$', out):
+ return False, "database is up-to-date"
+ else:
+ return True, "updated repository database"
+ else:
+ module.fail_json(msg="could not update package db", stdout=out, stderr=err)
+
+
+def do_upgrade_packages(module, full=False):
+ if full:
+ cmd = "full-upgrade"
+ else:
+ cmd = "upgrade"
+
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, cmd))
+
+ if rc == 0:
+ if re.search('^nothing to do.\n$', out):
+ module.exit_json(changed=False, msg="nothing left to upgrade")
+ else:
+ module.fail_json(msg="could not %s packages" % cmd, stdout=out, stderr=err)
+
+
+def upgrade_packages(module):
+ do_upgrade_packages(module)
+
+
+def full_upgrade_packages(module):
+ do_upgrade_packages(module, True)
+
+
+def clean_cache(module):
+ rc, out, err = module.run_command(
+ format_pkgin_command(module, "clean"))
+
+ if rc == 0:
+ # There's no indication if 'clean' actually removed anything,
+ # so assume it did.
+ module.exit_json(changed=True, msg="cleaned caches")
+ else:
+ module.fail_json(msg="could not clean package cache", stdout=out, stderr=err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ full_upgrade=dict(default=False, type='bool'),
+ clean=dict(default=False, type='bool'),
+ force=dict(default=False, type='bool')),
+ required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
+ supports_check_mode=True)
+
+ global PKGIN_PATH
+ PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ p = module.params
+
+ if p["update_cache"]:
+ c, msg = update_package_db(module)
+ if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
+ module.exit_json(changed=c, msg=msg)
+
+ if p["upgrade"]:
+ upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded packages')
+
+ if p["full_upgrade"]:
+ full_upgrade_packages(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='upgraded all packages')
+
+ if p["clean"]:
+ clean_cache(module)
+ if not p['name']:
+ module.exit_json(changed=True, msg='cleaned caches')
+
+ pkgs = p["name"]
+
+ if p["state"] == "present":
+ install_packages(module, pkgs)
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pkgng.py b/ansible_collections/community/general/plugins/modules/pkgng.py
new file mode 100644
index 000000000..b9d4422c0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pkgng.py
@@ -0,0 +1,540 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, bleader
+# Written by bleader <bleader@ratonland.org>
+# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pkgng
+short_description: Package manager for FreeBSD >= 9.0
+description:
+ - Manage binary packages for FreeBSD using 'pkgng' which is available in versions after 9.0.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name or list of names of packages to install/remove.
+ - "With I(name=*), I(state=latest) will operate, but I(state=present) and I(state=absent) will be noops."
+ - >
+ Warning: In Ansible 2.9 and earlier this module had a misfeature
+ where I(name=*) with I(state=latest) or I(state=present) would
+ install every package from every package repository, filling up
+ the machines disk. Avoid using them unless you are certain that
+ your role will only be used with newer versions.
+ required: true
+ aliases: [pkg]
+ type: list
+ elements: str
+ state:
+ description:
+ - State of the package.
+ - 'Note: C(latest) added in 2.7.'
+ choices: [ 'present', 'latest', 'absent' ]
+ required: false
+ default: present
+ type: str
+ cached:
+ description:
+ - Use local package base instead of fetching an updated one.
+ type: bool
+ required: false
+ default: false
+ annotation:
+ description:
+ - A list of keyvalue-pairs of the form
+ C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
+ C(-) denotes removing an annotation, and C(:) denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ type: list
+ elements: str
+ pkgsite:
+ description:
+ - For pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages. If not specified, use settings from
+ C(/usr/local/etc/pkg.conf).
+ - For newer pkgng versions, specify a the name of a repository
+ configured in C(/usr/local/etc/pkg/repos).
+ required: false
+ type: str
+ rootdir:
+ description:
+ - For pkgng versions 1.5 and later, pkg will install all packages
+ within the specified root directory.
+ - Can not be used together with I(chroot) or I(jail) options.
+ required: false
+ type: path
+ chroot:
+ description:
+ - Pkg will chroot in the specified environment.
+ - Can not be used together with I(rootdir) or I(jail) options.
+ required: false
+ type: path
+ jail:
+ description:
+ - Pkg will execute in the given jail name or id.
+ - Can not be used together with I(chroot) or I(rootdir) options.
+ type: str
+ autoremove:
+ description:
+ - Remove automatically installed packages which are no longer needed.
+ required: false
+ type: bool
+ default: false
+ ignore_osver:
+ description:
+ - Ignore FreeBSD OS version check, useful on -STABLE and -CURRENT branches.
+ - Defines the C(IGNORE_OSVERSION) environment variable.
+ required: false
+ type: bool
+ default: false
+ version_added: 1.3.0
+author: "bleader (@bleader)"
+notes:
+ - When using pkgsite, be careful that already in cache packages won't be downloaded again.
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.pkgng:
+ name: foo
+ state: present
+
+- name: Annotate package foo and bar
+ community.general.pkgng:
+ name:
+ - foo
+ - bar
+ annotation: '+test1=baz,-test2,:test3=foobar'
+
+- name: Remove packages foo and bar
+ community.general.pkgng:
+ name:
+ - foo
+ - bar
+ state: absent
+
+# "latest" support added in 2.7
+- name: Upgrade package baz
+ community.general.pkgng:
+ name: baz
+ state: latest
+
+- name: Upgrade all installed packages (see warning for the name option first!)
+ community.general.pkgng:
+ name: "*"
+ state: latest
+'''
+
+
+from collections import defaultdict
+import re
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, run_pkgng, name):
+
+ rc, out, err = run_pkgng('info', '-g', '-e', name)
+
+ return rc == 0
+
+
+def query_update(module, run_pkgng, name):
+
+ # Check to see if a package upgrade is available.
+ # rc = 0, no updates available or package not installed
+ # rc = 1, updates available
+ rc, out, err = run_pkgng('upgrade', '-g', '-n', name)
+
+ return rc == 1
+
+
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command([pkgng_path, '-v'])
+ version = [int(x) for x in re.split(r'[\._]', out)]
+
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def upgrade_packages(module, run_pkgng):
+ # Run a 'pkg upgrade', updating all packages.
+ upgraded_c = 0
+
+ pkgng_args = ['upgrade']
+ pkgng_args.append('-n' if module.check_mode else '-y')
+ rc, out, err = run_pkgng(*pkgng_args, check_rc=(not module.check_mode))
+
+ matches = re.findall('^Number of packages to be (?:upgraded|reinstalled): ([0-9]+)', out, re.MULTILINE)
+ for match in matches:
+ upgraded_c += int(match)
+
+ if upgraded_c > 0:
+ return (True, "updated %s package(s)" % upgraded_c, out, err)
+ return (False, "no packages need upgrades", out, err)
+
+
+def remove_packages(module, run_pkgng, packages):
+ remove_c = 0
+ stdout = ""
+ stderr = ""
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, run_pkgng, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = run_pkgng('delete', '-y', package)
+ stdout += out
+ stderr += err
+
+ if not module.check_mode and query_package(module, run_pkgng, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out), stdout=stdout, stderr=stderr)
+
+ remove_c += 1
+
+ if remove_c > 0:
+ return (True, "removed %s package(s)" % remove_c, stdout, stderr)
+
+ return (False, "package(s) already absent", stdout, stderr)
+
+
+def install_packages(module, run_pkgng, packages, cached, state):
+ action_queue = defaultdict(list)
+ action_count = defaultdict(int)
+ stdout = ""
+ stderr = ""
+
+ if not module.check_mode and not cached:
+ rc, out, err = run_pkgng('update')
+ stdout += out
+ stderr += err
+ if rc != 0:
+ module.fail_json(msg="Could not update catalogue [%d]: %s %s" % (rc, out, err), stdout=stdout, stderr=stderr)
+
+ for package in packages:
+ already_installed = query_package(module, run_pkgng, package)
+ if already_installed and state == "present":
+ continue
+
+ if (
+ already_installed and state == "latest"
+ and not query_update(module, run_pkgng, package)
+ ):
+ continue
+
+ if already_installed:
+ action_queue["upgrade"].append(package)
+ else:
+ action_queue["install"].append(package)
+
+ # install/upgrade all named packages with one pkg command
+ for (action, package_list) in action_queue.items():
+ if module.check_mode:
+ # Do nothing, but count up how many actions
+ # would be performed so that the changed/msg
+ # is correct.
+ action_count[action] += len(package_list)
+ continue
+
+ pkgng_args = [action, '-g', '-U', '-y'] + package_list
+ rc, out, err = run_pkgng(*pkgng_args)
+ stdout += out
+ stderr += err
+
+ # individually verify packages are in requested state
+ for package in package_list:
+ verified = False
+ if action == 'install':
+ verified = query_package(module, run_pkgng, package)
+ elif action == 'upgrade':
+ verified = not query_update(module, run_pkgng, package)
+
+ if verified:
+ action_count[action] += 1
+ else:
+ module.fail_json(msg="failed to %s %s" % (action, package), stdout=stdout, stderr=stderr)
+
+ if sum(action_count.values()) > 0:
+ past_tense = {'install': 'installed', 'upgrade': 'upgraded'}
+ messages = []
+ for (action, count) in action_count.items():
+ messages.append("%s %s package%s" % (past_tense.get(action, action), count, "s" if count != 1 else ""))
+
+ return (True, '; '.join(messages), stdout, stderr)
+
+ return (False, "package(s) already %s" % (state), stdout, stderr)
+
+
+def annotation_query(module, run_pkgng, package, tag):
+ rc, out, err = run_pkgng('info', '-g', '-A', package)
+ match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, run_pkgng, package, tag, value):
+ _value = annotation_query(module, run_pkgng, package, tag)
+ if not _value:
+ # Annotation does not exist, add it.
+ if not module.check_mode:
+ rc, out, err = run_pkgng('annotate', '-y', '-A', package, tag, data=value, binary_data=True)
+ if rc != 0:
+ module.fail_json(msg="could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ msg="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+
+def annotation_delete(module, run_pkgng, package, tag, value):
+ _value = annotation_query(module, run_pkgng, package, tag)
+ if _value:
+ if not module.check_mode:
+ rc, out, err = run_pkgng('annotate', '-y', '-D', package, tag)
+ if rc != 0:
+ module.fail_json(msg="could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+
+def annotation_modify(module, run_pkgng, package, tag, value):
+ _value = annotation_query(module, run_pkgng, package, tag)
+ if not _value:
+ # No such tag
+ module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ if not module.check_mode:
+ rc, out, err = run_pkgng('annotate', '-y', '-M', package, tag, data=value, binary_data=True)
+
+ # pkg sometimes exits with rc == 1, even though the modification succeeded
+ # Check the output for a success message
+ if (
+ rc != 0
+ and re.search(r'^%s-[^:]+: Modified annotation tagged: %s' % (package, tag), out, flags=re.MULTILINE) is None
+ ):
+ module.fail_json(msg="failed to annotate %s, could not change annotation %s to %s: %s"
+ % (package, tag, value, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, run_pkgng, packages, annotations):
+ annotate_c = 0
+ if len(annotations) == 1:
+ # Split on commas with optional trailing whitespace,
+ # to support the old style of multiple annotations
+ # on a single line, rather than YAML list syntax
+ annotations = re.split(r'\s*,\s*', annotations[0])
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for annotation_string in annotations:
+ # Note to future maintainers: A dash (-) in a regex character class ([-+:] below)
+ # must appear as the first character in the class, or it will be interpreted
+ # as a range of characters.
+ annotation = \
+ re.match(r'(?P<operation>[-+:])(?P<tag>[^=]+)(=(?P<value>.+))?', annotation_string)
+
+ if annotation is None:
+ module.fail_json(
+ msg="failed to annotate %s, invalid annotate string: %s"
+ % (package, annotation_string)
+ )
+
+ annotation = annotation.groupdict()
+ if operation[annotation['operation']](module, run_pkgng, package, annotation['tag'], annotation['value']):
+ annotate_c += 1
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
+
+
+def autoremove_packages(module, run_pkgng):
+ stdout = ""
+ stderr = ""
+ rc, out, err = run_pkgng('autoremove', '-n')
+
+ autoremove_c = 0
+
+ match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
+ if match:
+ autoremove_c = int(match.group(1))
+
+ if autoremove_c == 0:
+ return (False, "no package(s) to autoremove", stdout, stderr)
+
+ if not module.check_mode:
+ rc, out, err = run_pkgng('autoremove', '-y')
+ stdout += out
+ stderr += err
+
+ return (True, "autoremoved %d package(s)" % (autoremove_c), stdout, stderr)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "latest", "absent"], required=False),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ cached=dict(default=False, type='bool'),
+ ignore_osver=dict(default=False, required=False, type='bool'),
+ annotation=dict(required=False, type='list', elements='str'),
+ pkgsite=dict(required=False),
+ rootdir=dict(required=False, type='path'),
+ chroot=dict(required=False, type='path'),
+ jail=dict(required=False, type='str'),
+ autoremove=dict(default=False, type='bool')),
+ supports_check_mode=True,
+ mutually_exclusive=[["rootdir", "chroot", "jail"]])
+
+ pkgng_path = module.get_bin_path('pkg', True)
+
+ p = module.params
+
+ pkgs = p["name"]
+
+ changed = False
+ msgs = []
+ stdout = ""
+ stderr = ""
+ dir_arg = None
+
+ if p["rootdir"] is not None:
+ rootdir_not_supported = pkgng_older_than(module, pkgng_path, [1, 5, 0])
+ if rootdir_not_supported:
+ module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
+ else:
+ dir_arg = "--rootdir=%s" % (p["rootdir"])
+
+ if p["ignore_osver"]:
+ ignore_osver_not_supported = pkgng_older_than(module, pkgng_path, [1, 11, 0])
+ if ignore_osver_not_supported:
+ module.fail_json(msg="To use option 'ignore_osver' pkg version must be 1.11 or greater")
+
+ if p["chroot"] is not None:
+ dir_arg = '--chroot=%s' % (p["chroot"])
+
+ if p["jail"] is not None:
+ dir_arg = '--jail=%s' % (p["jail"])
+
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ repo_flag_not_supported = pkgng_older_than(module, pkgng_path, [1, 1, 4])
+
+ def run_pkgng(action, *args, **kwargs):
+ cmd = [pkgng_path, dir_arg, action]
+
+ pkgng_env = {'BATCH': 'yes'}
+
+ if p["ignore_osver"]:
+ pkgng_env['IGNORE_OSVERSION'] = 'yes'
+
+ if p['pkgsite'] is not None and action in ('update', 'install', 'upgrade',):
+ if repo_flag_not_supported:
+ pkgng_env['PACKAGESITE'] = p['pkgsite']
+ else:
+ cmd.append('--repository=%s' % (p['pkgsite'],))
+
+ # If environ_update is specified to be "passed through"
+ # to module.run_command, then merge its values into pkgng_env
+ pkgng_env.update(kwargs.pop('environ_update', dict()))
+
+ return module.run_command(cmd + list(args), environ_update=pkgng_env, **kwargs)
+
+ if pkgs == ['*'] and p["state"] == 'latest':
+ # Operate on all installed packages. Only state: latest makes sense here.
+ _changed, _msg, _stdout, _stderr = upgrade_packages(module, run_pkgng)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ # Operate on named packages
+ if len(pkgs) == 1:
+ # The documentation used to show multiple packages specified in one line
+ # with comma or space delimiters. That doesn't result in a YAML list, and
+ # wrong actions (install vs upgrade) can be reported if those
+ # comma- or space-delimited strings make it to the pkg command line.
+ pkgs = re.split(r'[,\s]', pkgs[0])
+ named_packages = [pkg for pkg in pkgs if pkg != '*']
+ if p["state"] in ("present", "latest") and named_packages:
+ _changed, _msg, _out, _err = install_packages(module, run_pkgng, named_packages,
+ p["cached"], p["state"])
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ elif p["state"] == "absent" and named_packages:
+ _changed, _msg, _out, _err = remove_packages(module, run_pkgng, named_packages)
+ stdout += _out
+ stderr += _err
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["autoremove"]:
+ _changed, _msg, _stdout, _stderr = autoremove_packages(module, run_pkgng)
+ changed = changed or _changed
+ stdout += _stdout
+ stderr += _stderr
+ msgs.append(_msg)
+
+ if p["annotation"] is not None:
+ _changed, _msg = annotate_packages(module, run_pkgng, pkgs, p["annotation"])
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs), stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pkgutil.py b/ansible_collections/community/general/plugins/modules/pkgutil.py
new file mode 100644
index 000000000..5af74c1f3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pkgutil.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Alexander Winkler <mail () winkler-alexander.de>
+# based on svr4pkg by
+# Boyd Adamson <boyd () boydadamson.com> (2012)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: pkgutil
+short_description: OpenCSW package management on Solaris
+description:
+- This module installs, updates and removes packages from the OpenCSW project for Solaris.
+- Unlike the M(community.general.svr4pkg) module, it will resolve and download dependencies.
+- See U(https://www.opencsw.org/) for more information about the project.
+author:
+- Alexander Winkler (@dermute)
+- David Ponessa (@scathatheworm)
+extends_documentation_fragment:
+- community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ details:
+ - In order to check the availability of packages, the catalog cache under C(/var/opt/csw/pkgutil) may be refreshed even in check mode.
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the package.
+ - When using I(state=latest), this can be C('*'), which updates all installed packages managed by pkgutil.
+ type: list
+ required: true
+ elements: str
+ aliases: [ pkg ]
+ site:
+ description:
+ - The repository path to install the package from.
+ - Its global definition is in C(/etc/opt/csw/pkgutil.conf).
+ required: false
+ type: str
+ state:
+ description:
+ - Whether to install (C(present)/C(installed)), or remove (C(absent)/C(removed)) packages.
+ - The upgrade (C(latest)) operation will update/install the packages to the latest version available.
+ type: str
+ required: true
+ choices: [ absent, installed, latest, present, removed ]
+ update_catalog:
+ description:
+ - If you always want to refresh your catalog from the mirror, even when it's not stale, set this to C(true).
+ type: bool
+ default: false
+ force:
+ description:
+ - To allow the update process to downgrade packages to match what is present in the repository, set this to C(true).
+ - This is useful for rolling back to stable from testing, or similar operations.
+ type: bool
+ default: false
+ version_added: 1.2.0
+'''
+
+EXAMPLES = r'''
+- name: Install a package
+ community.general.pkgutil:
+ name: CSWcommon
+ state: present
+
+- name: Install a package from a specific repository
+ community.general.pkgutil:
+ name: CSWnrpe
+ site: ftp://myinternal.repo/opencsw/kiel
+ state: latest
+
+- name: Remove a package
+ community.general.pkgutil:
+ name: CSWtop
+ state: absent
+
+- name: Install several packages
+ community.general.pkgutil:
+ name:
+ - CSWsudo
+ - CSWtop
+ state: present
+
+- name: Update all packages
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+
+- name: Update all packages and force versions to match latest in catalog
+ community.general.pkgutil:
+ name: '*'
+ state: latest
+ force: true
+'''
+
+RETURN = r''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def packages_not_installed(module, names):
+ ''' Check if each package is installed and return list of the ones absent '''
+ pkgs = []
+ for pkg in names:
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc != 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_installed(module, names):
+ ''' Check if each package is installed and return list of the ones present '''
+ pkgs = []
+ for pkg in names:
+ if not pkg.startswith('CSW'):
+ continue
+ rc, out, err = run_command(module, ['pkginfo', '-q', pkg])
+ if rc == 0:
+ pkgs.append(pkg)
+ return pkgs
+
+
+def packages_not_latest(module, names, site, update_catalog):
+ ''' Check status of each package and return list of the ones with an upgrade available '''
+ cmd = ['pkgutil']
+ if update_catalog:
+ cmd.append('-U')
+ cmd.append('-c')
+ if site is not None:
+ cmd.extend(['-t', site])
+ if names != ['*']:
+ cmd.extend(names)
+ rc, out, err = run_command(module, cmd)
+
+ # Find packages in the catalog which are not up to date
+ packages = []
+ for line in out.split('\n')[1:-1]:
+ if 'catalog' not in line and 'SAME' not in line:
+ packages.append(line.split(' ')[0])
+
+ # Remove duplicates
+ return list(set(packages))
+
+
+def run_command(module, cmd, **kwargs):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True, ['/opt/csw/bin'])
+ return module.run_command(cmd, **kwargs)
+
+
+def package_install(module, state, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-iy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend(['-t', site])
+ if force:
+ cmd.append('-f')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def package_upgrade(module, pkgs, site, update_catalog, force):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-uy')
+ if update_catalog:
+ cmd.append('-U')
+ if site is not None:
+ cmd.extend(['-t', site])
+ if force:
+ cmd.append('-f')
+ cmd += pkgs
+ return run_command(module, cmd)
+
+
+def package_uninstall(module, pkgs):
+ cmd = ['pkgutil']
+ if module.check_mode:
+ cmd.append('-n')
+ cmd.append('-ry')
+ cmd.extend(pkgs)
+ return run_command(module, cmd)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True, aliases=['pkg']),
+ state=dict(type='str', required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']),
+ site=dict(type='str'),
+ update_catalog=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ name = module.params['name']
+ state = module.params['state']
+ site = module.params['site']
+ update_catalog = module.params['update_catalog']
+ force = module.params['force']
+
+ rc = None
+ out = ''
+ err = ''
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ if state in ['installed', 'present']:
+ # Fail with an explicit error when trying to "install" '*'
+ if name == ['*']:
+ module.fail_json(msg="Can not use 'state: present' with name: '*'")
+
+ # Build list of packages that are actually not installed from the ones requested
+ pkgs = packages_not_installed(module, name)
+
+ # If the package list is empty then all packages are already present
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_install(module, state, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['latest']:
+ # When using latest for *
+ if name == ['*']:
+ # Check for packages that are actually outdated
+ pkgs = packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list comes up empty, everything is already up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ # If there are packages to update, just empty the list and run the command without it
+ # pkgutil logic is to update all when run without packages names
+ pkgs = []
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+ else:
+ # Build list of packages that are either outdated or not installed
+ pkgs = packages_not_installed(module, name)
+ pkgs += packages_not_latest(module, name, site, update_catalog)
+
+ # If the package list is empty that means all packages are installed and up to date
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_upgrade(module, pkgs, site, update_catalog, force)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ elif state in ['absent', 'removed']:
+ # Build list of packages requested for removal that are actually present
+ pkgs = packages_installed(module, name)
+
+ # If the list is empty, no packages need to be removed
+ if pkgs == []:
+ module.exit_json(changed=False)
+
+ (rc, out, err) = package_uninstall(module, pkgs)
+ if rc != 0:
+ module.fail_json(msg=(err or out))
+
+ if rc is None:
+ # pkgutil was not executed because the package was already present/absent/up to date
+ result['changed'] = False
+ elif rc == 0:
+ result['changed'] = True
+ else:
+ result['changed'] = False
+ result['failed'] = True
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pmem.py b/ansible_collections/community/general/plugins/modules/pmem.py
new file mode 100644
index 000000000..d7fcb8e01
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pmem.py
@@ -0,0 +1,637 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Masayoshi Mizuma <msys.mizuma@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Masayoshi Mizuma (@mizumm)
+module: pmem
+short_description: Configure Intel Optane Persistent Memory modules
+version_added: 4.5.0
+description:
+ - This module allows Configuring Intel Optane Persistent Memory modules
+ (PMem) using ipmctl and ndctl command line tools.
+requirements:
+ - ipmctl and ndctl command line tools
+ - xmltodict
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ appdirect:
+ description:
+ - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)).
+ - Create AppDirect capacity utilizing hardware interleaving across the
+ requested PMem modules if applicable given the specified target.
+ - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100)
+ type: int
+ appdirect_interleaved:
+ description:
+ - Create AppDirect capacity that is interleaved any other PMem modules.
+ type: bool
+ required: false
+ default: true
+ memorymode:
+ description:
+ - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)).
+ type: int
+ reserved:
+ description:
+ - Percentage of the capacity to reserve (C(0)-C(100)). I(reserved) will not be mapped
+ into the system physical address space and will be presented as reserved
+ capacity with Show Device and Show Memory Resources Commands.
+ - I(reserved) will be set automatically if this is not configured.
+ type: int
+ required: false
+ socket:
+ description:
+ - This enables to set the configuration for each socket by using the socket ID.
+ - Total of I(appdirect), I(memorymode) and I(reserved) must be C(100) within one socket.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description: The socket ID of the PMem module.
+ type: int
+ required: true
+ appdirect:
+ description:
+ - Percentage of the total capacity to use in AppDirect Mode (C(0)-C(100)) within the socket ID.
+ type: int
+ required: true
+ appdirect_interleaved:
+ description:
+ - Create AppDirect capacity that is interleaved any other PMem modules within the socket ID.
+ type: bool
+ required: false
+ default: true
+ memorymode:
+ description:
+ - Percentage of the total capacity to use in Memory Mode (C(0)-C(100)) within the socket ID.
+ type: int
+ required: true
+ reserved:
+ description:
+ - Percentage of the capacity to reserve (C(0)-C(100)) within the socket ID.
+ type: int
+ namespace:
+ description:
+ - This enables to set the configuration for the namespace of the PMem.
+ type: list
+ elements: dict
+ suboptions:
+ mode:
+ description:
+ - The mode of namespace. The detail of the mode is in the man page of ndctl-create-namespace.
+ type: str
+ required: true
+ choices: ['raw', 'sector', 'fsdax', 'devdax']
+ type:
+ description:
+ - The type of namespace. The detail of the type is in the man page of ndctl-create-namespace.
+ type: str
+ required: false
+ choices: ['pmem', 'blk']
+ size:
+ description:
+ - The size of namespace. This option supports the suffixes C(k) or C(K) or C(KB) for KiB,
+ C(m) or C(M) or C(MB) for MiB, C(g) or C(G) or C(GB) for GiB and C(t) or C(T) or C(TB) for TiB.
+ - This option is required if multiple namespaces are configured.
+ - If this option is not set, all of the available space of a region is configured.
+ type: str
+ required: false
+ namespace_append:
+ description:
+ - Enable to append the new namespaces to the system.
+ - The default is C(false) so the all existing namespaces not listed in I(namespace) are removed.
+ type: bool
+ default: false
+ required: false
+'''
+
+RETURN = r'''
+reboot_required:
+ description: Indicates that the system reboot is required to complete the PMem configuration.
+ returned: success
+ type: bool
+ sample: true
+result:
+ description:
+ - Shows the value of AppDirect, Memory Mode and Reserved size in bytes.
+ - If I(socket) argument is provided, shows the values in each socket with C(socket) which contains the socket ID.
+ - If I(namespace) argument is provided, shows the detail of each namespace.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ appdirect:
+ description: AppDirect size in bytes.
+ type: int
+ memorymode:
+ description: Memory Mode size in bytes.
+ type: int
+ reserved:
+ description: Reserved size in bytes.
+ type: int
+ socket:
+ description: The socket ID to be configured.
+ type: int
+ namespace:
+ description: The list of the detail of namespace.
+ type: list
+ sample: [
+ {
+ "appdirect": 111669149696,
+ "memorymode": 970662608896,
+ "reserved": 3626500096,
+ "socket": 0
+ },
+ {
+ "appdirect": 111669149696,
+ "memorymode": 970662608896,
+ "reserved": 3626500096,
+ "socket": 1
+ }
+ ]
+'''
+
+EXAMPLES = r'''
+- name: Configure the Pmem as AppDirect 10, Memory Mode 70, and the Reserved 20 percent.
+ community.general.pmem:
+ appdirect: 10
+ memorymode: 70
+
+- name: Configure the Pmem as AppDirect 10, Memory Mode 80, and the Reserved 10 percent.
+ community.general.pmem:
+ appdirect: 10
+ memorymode: 80
+ reserved: 10
+
+- name: Configure the Pmem as AppDirect with not interleaved 10, Memory Mode 70, and the Reserved 20 percent.
+ community.general.pmem:
+ appdirect: 10
+ appdirect_interleaved: false
+ memorymode: 70
+
+- name: Configure the Pmem each socket.
+ community.general.pmem:
+ socket:
+ - id: 0
+ appdirect: 10
+ appdirect_interleaved: false
+ memorymode: 70
+ reserved: 20
+ - id: 1
+ appdirect: 10
+ memorymode: 80
+ reserved: 10
+
+- name: Configure the two namespaces.
+ community.general.pmem:
+ namespace:
+ - size: 1GB
+ type: pmem
+ mode: raw
+ - size: 320MB
+ type: pmem
+ mode: sector
+'''
+
+import json
+import re
+import traceback
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib, human_to_bytes
+
+try:
+ import xmltodict
+except ImportError:
+ HAS_XMLTODICT_LIBRARY = False
+ XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc()
+else:
+ HAS_XMLTODICT_LIBRARY = True
+ XMLTODICT_LIBRARY_IMPORT_ERROR = None
+
+
+class PersistentMemory(object):
+ def __init__(self):
+ module = AnsibleModule(
+ argument_spec=dict(
+ appdirect=dict(type='int'),
+ appdirect_interleaved=dict(type='bool', default=True),
+ memorymode=dict(type='int'),
+ reserved=dict(type='int'),
+ socket=dict(
+ type='list', elements='dict',
+ options=dict(
+ id=dict(required=True, type='int'),
+ appdirect=dict(required=True, type='int'),
+ appdirect_interleaved=dict(type='bool', default=True),
+ memorymode=dict(required=True, type='int'),
+ reserved=dict(type='int'),
+ ),
+ ),
+ namespace=dict(
+ type='list', elements='dict',
+ options=dict(
+ mode=dict(required=True, type='str', choices=['raw', 'sector', 'fsdax', 'devdax']),
+ type=dict(type='str', choices=['pmem', 'blk']),
+ size=dict(type='str'),
+ ),
+ ),
+ namespace_append=dict(type='bool', default=False),
+ ),
+ required_together=(
+ ['appdirect', 'memorymode'],
+ ),
+ required_one_of=(
+ ['appdirect', 'memorymode', 'socket', 'namespace'],
+ ),
+ mutually_exclusive=(
+ ['appdirect', 'socket'],
+ ['memorymode', 'socket'],
+ ['appdirect', 'namespace'],
+ ['memorymode', 'namespace'],
+ ['socket', 'namespace'],
+ ['appdirect', 'namespace_append'],
+ ['memorymode', 'namespace_append'],
+ ['socket', 'namespace_append'],
+ ),
+ )
+
+ if not HAS_XMLTODICT_LIBRARY:
+ module.fail_json(
+ msg=missing_required_lib('xmltodict'),
+ exception=XMLTODICT_LIBRARY_IMPORT_ERROR)
+
+ self.ipmctl_exec = module.get_bin_path('ipmctl', True)
+ self.ndctl_exec = module.get_bin_path('ndctl', True)
+
+ self.appdirect = module.params['appdirect']
+ self.interleaved = module.params['appdirect_interleaved']
+ self.memmode = module.params['memorymode']
+ self.reserved = module.params['reserved']
+ self.socket = module.params['socket']
+ self.namespace = module.params['namespace']
+ self.namespace_append = module.params['namespace_append']
+
+ self.module = module
+ self.changed = False
+ self.result = []
+
+ def pmem_run_command(self, command, returnCheck=True):
+ # in case command[] has number
+ cmd = [str(part) for part in command]
+
+ self.module.log(msg='pmem_run_command: execute: %s' % cmd)
+
+ rc, out, err = self.module.run_command(cmd)
+
+ self.module.log(msg='pmem_run_command: result: %s' % out)
+
+ if returnCheck and rc != 0:
+ self.module.fail_json(msg='Error while running: %s' %
+ cmd, rc=rc, out=out, err=err)
+
+ return out
+
+ def pmem_run_ipmctl(self, command, returnCheck=True):
+
+ command = [self.ipmctl_exec] + command
+
+ return self.pmem_run_command(command, returnCheck)
+
+ def pmem_run_ndctl(self, command, returnCheck=True):
+
+ command = [self.ndctl_exec] + command
+
+ return self.pmem_run_command(command, returnCheck)
+
+ def pmem_is_dcpmm_installed(self):
+ # To check this system has dcpmm
+ command = ['show', '-system', '-capabilities']
+ return self.pmem_run_ipmctl(command)
+
+ def pmem_get_region_align_size(self, region):
+ aligns = []
+ for rg in region:
+ if rg['align'] not in aligns:
+ aligns.append(rg['align'])
+
+ return aligns
+
+ def pmem_get_available_region_size(self, region):
+ available_size = []
+ for rg in region:
+ available_size.append(rg['available_size'])
+
+ return available_size
+
+ def pmem_get_available_region_type(self, region):
+ types = []
+ for rg in region:
+ if rg['type'] not in types:
+ types.append(rg['type'])
+
+ return types
+
+ def pmem_argument_check(self):
+ def namespace_check(self):
+ command = ['list', '-R']
+ out = self.pmem_run_ndctl(command)
+ if not out:
+ return 'Available region(s) is not in this system.'
+ region = json.loads(out)
+
+ aligns = self.pmem_get_region_align_size(region)
+ if len(aligns) != 1:
+ return 'Not supported the regions whose alignment size is different.'
+
+ available_size = self.pmem_get_available_region_size(region)
+ types = self.pmem_get_available_region_type(region)
+ for ns in self.namespace:
+ if ns['size']:
+ try:
+ size_byte = human_to_bytes(ns['size'])
+ except ValueError:
+ return 'The format of size: NNN TB|GB|MB|KB|T|G|M|K|B'
+
+ if size_byte % aligns[0] != 0:
+ return 'size: %s should be align with %d' % (ns['size'], aligns[0])
+
+ is_space_enough = False
+ for i, avail in enumerate(available_size):
+ if avail > size_byte:
+ available_size[i] -= size_byte
+ is_space_enough = True
+ break
+
+ if is_space_enough is False:
+ return 'There is not available region for size: %s' % ns['size']
+
+ ns['size_byte'] = size_byte
+
+ elif len(self.namespace) != 1:
+ return 'size option is required to configure multiple namespaces'
+
+ if ns['type'] not in types:
+ return 'type %s is not supported in this system. Supported type: %s' % (ns['type'], types)
+
+ return None
+
+ def percent_check(self, appdirect, memmode, reserved=None):
+ if appdirect is None or (appdirect < 0 or appdirect > 100):
+ return 'appdirect percent should be from 0 to 100.'
+ if memmode is None or (memmode < 0 or memmode > 100):
+ return 'memorymode percent should be from 0 to 100.'
+
+ if reserved is None:
+ if appdirect + memmode > 100:
+ return 'Total percent should be less equal 100.'
+ else:
+ if reserved < 0 or reserved > 100:
+ return 'reserved percent should be from 0 to 100.'
+ if appdirect + memmode + reserved != 100:
+ return 'Total percent should be 100.'
+
+ def socket_id_check(self):
+ command = ['show', '-o', 'nvmxml', '-socket']
+ out = self.pmem_run_ipmctl(command)
+ sockets_dict = xmltodict.parse(out, dict_constructor=dict)['SocketList']['Socket']
+ socket_ids = []
+ for sl in sockets_dict:
+ socket_ids.append(int(sl['SocketID'], 16))
+
+ for skt in self.socket:
+ if skt['id'] not in socket_ids:
+ return 'Invalid socket number: %d' % skt['id']
+
+ return None
+
+ if self.namespace:
+ return namespace_check(self)
+ elif self.socket is None:
+ return percent_check(self, self.appdirect, self.memmode, self.reserved)
+ else:
+ ret = socket_id_check(self)
+ if ret is not None:
+ return ret
+
+ for skt in self.socket:
+ ret = percent_check(
+ self, skt['appdirect'], skt['memorymode'], skt['reserved'])
+ if ret is not None:
+ return ret
+
+ return None
+
+ def pmem_remove_namespaces(self):
+ command = ['list', '-N']
+ out = self.pmem_run_ndctl(command)
+
+ # There's nothing namespaces in this system. Nothing to do.
+ if not out:
+ return
+
+ namespaces = json.loads(out)
+
+ # Disable and destroy all namespaces
+ for ns in namespaces:
+ command = ['disable-namespace', ns['dev']]
+ self.pmem_run_ndctl(command)
+
+ command = ['destroy-namespace', ns['dev']]
+ self.pmem_run_ndctl(command)
+
+ return
+
+ def pmem_delete_goal(self):
+ # delete the goal request
+ command = ['delete', '-goal']
+ self.pmem_run_ipmctl(command)
+
+ def pmem_init_env(self):
+ if self.namespace is None or (self.namespace and self.namespace_append is False):
+ self.pmem_remove_namespaces()
+ if self.namespace is None:
+ self.pmem_delete_goal()
+
+ def pmem_get_capacity(self, skt=None):
+ command = ['show', '-d', 'Capacity', '-u', 'B', '-o', 'nvmxml', '-dimm']
+ if skt:
+ command += ['-socket', skt['id']]
+ out = self.pmem_run_ipmctl(command)
+
+ dimm_list = xmltodict.parse(out, dict_constructor=dict)['DimmList']['Dimm']
+ capacity = 0
+ for entry in dimm_list:
+ for key, v in entry.items():
+ if key == 'Capacity':
+ capacity += int(v.split()[0])
+
+ return capacity
+
+ def pmem_create_memory_allocation(self, skt=None):
+ def build_ipmctl_creation_opts(self, skt=None):
+ ipmctl_opts = []
+
+ if skt:
+ appdirect = skt['appdirect']
+ memmode = skt['memorymode']
+ reserved = skt['reserved']
+ socket_id = skt['id']
+ ipmctl_opts += ['-socket', socket_id]
+ else:
+ appdirect = self.appdirect
+ memmode = self.memmode
+ reserved = self.reserved
+
+ if reserved is None:
+ res = 100 - memmode - appdirect
+ ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % res]
+ else:
+ ipmctl_opts += ['memorymode=%d' % memmode, 'reserved=%d' % reserved]
+
+ if self.interleaved:
+ ipmctl_opts += ['PersistentMemoryType=AppDirect']
+ else:
+ ipmctl_opts += ['PersistentMemoryType=AppDirectNotInterleaved']
+
+ return ipmctl_opts
+
+ def is_allocation_good(self, ipmctl_out, command):
+ warning = re.compile('WARNING')
+ error = re.compile('.*Error.*')
+ ignore_error = re.compile(
+ 'Do you want to continue? [y/n] Error: Invalid data input.')
+
+ errmsg = ''
+ rc = True
+ for line in ipmctl_out.splitlines():
+ if warning.match(line):
+ errmsg = '%s (command: %s)' % (line, command)
+ rc = False
+ break
+ elif error.match(line):
+ if not ignore_error:
+ errmsg = '%s (command: %s)' % (line, command)
+ rc = False
+ break
+
+ return rc, errmsg
+
+ def get_allocation_result(self, goal, skt=None):
+ ret = {'appdirect': 0, 'memorymode': 0}
+
+ if skt:
+ ret['socket'] = skt['id']
+
+ out = xmltodict.parse(goal, dict_constructor=dict)['ConfigGoalList']['ConfigGoal']
+ for entry in out:
+
+ # Probably it's a bug of ipmctl to show the socket goal
+ # which isn't specified by the -socket option.
+ # Anyway, filter the noise out here:
+ if skt and skt['id'] != int(entry['SocketID'], 16):
+ continue
+
+ for key, v in entry.items():
+ if key == 'MemorySize':
+ ret['memorymode'] += int(v.split()[0])
+ elif key == 'AppDirect1Size' or key == 'AapDirect2Size':
+ ret['appdirect'] += int(v.split()[0])
+
+ capacity = self.pmem_get_capacity(skt)
+ ret['reserved'] = capacity - ret['appdirect'] - ret['memorymode']
+
+ return ret
+
+ reboot_required = False
+
+ ipmctl_opts = build_ipmctl_creation_opts(self, skt)
+
+ # First, do dry run ipmctl create command to check the error and warning.
+ command = ['create', '-goal'] + ipmctl_opts
+ out = self.pmem_run_ipmctl(command, returnCheck=False)
+ rc, errmsg = is_allocation_good(self, out, command)
+ if rc is False:
+ return reboot_required, {}, errmsg
+
+ # Run actual creation here
+ command = ['create', '-u', 'B', '-o', 'nvmxml', '-force', '-goal'] + ipmctl_opts
+ goal = self.pmem_run_ipmctl(command)
+ ret = get_allocation_result(self, goal, skt)
+ reboot_required = True
+
+ return reboot_required, ret, ''
+
+ def pmem_config_namespaces(self, namespace):
+ command = ['create-namespace', '-m', namespace['mode']]
+ if namespace['type']:
+ command += ['-t', namespace['type']]
+ if 'size_byte' in namespace:
+ command += ['-s', namespace['size_byte']]
+
+ self.pmem_run_ndctl(command)
+
+ return None
+
+
+def main():
+
+ pmem = PersistentMemory()
+
+ pmem.pmem_is_dcpmm_installed()
+
+ error = pmem.pmem_argument_check()
+ if error:
+ pmem.module.fail_json(msg=error)
+
+ pmem.pmem_init_env()
+ pmem.changed = True
+
+ if pmem.namespace:
+ for ns in pmem.namespace:
+ pmem.pmem_config_namespaces(ns)
+
+ command = ['list', '-N']
+ out = pmem.pmem_run_ndctl(command)
+ all_ns = json.loads(out)
+
+ pmem.result = all_ns
+ reboot_required = False
+ elif pmem.socket is None:
+ reboot_required, ret, errmsg = pmem.pmem_create_memory_allocation()
+ if errmsg:
+ pmem.module.fail_json(msg=errmsg)
+ pmem.result.append(ret)
+ else:
+ for skt in pmem.socket:
+ skt_reboot_required, skt_ret, skt_errmsg = pmem.pmem_create_memory_allocation(skt)
+
+ if skt_errmsg:
+ pmem.module.fail_json(msg=skt_errmsg)
+
+ if skt_reboot_required:
+ reboot_required = True
+
+ pmem.result.append(skt_ret)
+
+ pmem.module.exit_json(
+ changed=pmem.changed,
+ reboot_required=reboot_required,
+ result=pmem.result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/portage.py b/ansible_collections/community/general/plugins/modules/portage.py
new file mode 100644
index 000000000..1c6b36537
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/portage.py
@@ -0,0 +1,587 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, William L Thomson Jr
+# Copyright (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann <sokann@gmail.com>
+# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ aliases: [name]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - State of the package atom
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
+ type: str
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ type: bool
+ default: false
+
+ backtrack:
+ description:
+ - Set backtrack value (C(--backtrack)).
+ type: int
+ version_added: 5.8.0
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ type: bool
+ default: false
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ type: bool
+ default: false
+
+ changed_use:
+ description:
+ - Include installed packages where USE flags have changed, except when
+ - flags that the user has not enabled are added or removed
+ - (--changed-use)
+ type: bool
+ default: false
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ type: bool
+ default: false
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ type: bool
+ default: true
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ type: bool
+ default: false
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ type: bool
+ default: false
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ type: bool
+ default: false
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ type: bool
+ default: false
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ type: bool
+ default: false
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If C(yes), perform "emerge --sync"
+ - If C(web), perform "emerge-webrsync"
+ choices: [ "web", "yes", "no" ]
+ type: str
+
+ getbinpkgonly:
+ description:
+ - Merge only packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: false
+ version_added: 1.3.0
+
+ getbinpkg:
+ description:
+ - Prefer packages specified at C(PORTAGE_BINHOST) in C(make.conf).
+ type: bool
+ default: false
+
+ usepkgonly:
+ description:
+ - Merge only binaries (no compiling).
+ type: bool
+ default: false
+
+ usepkg:
+ description:
+ - Tries to use the binary package(s) in the locally available packages directory.
+ type: bool
+ default: false
+
+ keepgoing:
+ description:
+ - Continue as much as possible after an error.
+ type: bool
+ default: false
+
+ jobs:
+ description:
+ - Specifies the number of packages to build simultaneously.
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --jobs setting values
+ type: int
+
+ loadavg:
+ description:
+ - Specifies that no new builds should be started if there are
+ - other builds running and the load average is at least LOAD
+ - "Since version 2.6: Value of 0 or False resets any previously added"
+ - --load-average setting values
+ type: float
+
+ withbdeps:
+ description:
+ - Specifies that build time dependencies should be installed.
+ type: bool
+ version_added: 5.8.0
+
+ quietbuild:
+ description:
+ - Redirect all build output to logs alone, and do not display it
+ - on stdout (--quiet-build)
+ type: bool
+ default: false
+
+ quietfail:
+ description:
+ - Suppresses display of the build log on stdout (--quiet-fail)
+ - Only the die message and the path of the build log will be
+ - displayed on stdout.
+ type: bool
+ default: false
+
+author:
+ - "William L Thomson Jr (@wltjr)"
+ - "Yap Sok Ann (@sayap)"
+ - "Andrew Udvare (@Tatsh)"
+'''
+
+EXAMPLES = '''
+- name: Make sure package foo is installed
+ community.general.portage:
+ package: foo
+ state: present
+
+- name: Make sure package foo is not installed
+ community.general.portage:
+ package: foo
+ state: absent
+
+- name: Update package foo to the latest version (os specific alternative to latest)
+ community.general.portage:
+ package: foo
+ update: true
+
+- name: Install package foo using PORTAGE_BINHOST setup
+ community.general.portage:
+ package: foo
+ getbinpkg: true
+
+- name: Re-install world from binary packages only and do not allow any compiling
+ community.general.portage:
+ package: '@world'
+ usepkgonly: true
+
+- name: Sync repositories and update world
+ community.general.portage:
+ package: '@world'
+ update: true
+ deep: true
+ sync: true
+
+- name: Remove unneeded packages
+ community.general.portage:
+ depclean: true
+
+- name: Remove package foo if it is not explicitly needed
+ community.general.portage:
+ package: foo
+ state: absent
+ depclean: true
+'''
+
+import os
+import re
+import sys
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.respawn import has_respawned, respawn_module
+from ansible.module_utils.common.text.converters import to_native
+
+
+try:
+ from portage.dbapi import vartree
+ from portage.exception import InvalidAtom
+ HAS_PORTAGE = True
+ PORTAGE_IMPORT_ERROR = None
+except ImportError:
+ HAS_PORTAGE = False
+ PORTAGE_IMPORT_ERROR = traceback.format_exc()
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ vdb = vartree.vardbapi()
+ try:
+ exists = vdb.match(atom)
+ except InvalidAtom:
+ return False
+ return bool(exists)
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.exit_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet --ask=n' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, package querying is done one-by-one,
+# but emerge is done in one go. If that is not desirable, split the
+# packages into multiple tasks instead of joining them together with
+# comma.
+
+
+def emerge_packages(module, packages):
+ """Run emerge command against given list of atoms."""
+ p = module.params
+
+ if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not (p['update'] or p['state'] == 'latest'):
+ for package in packages:
+ if p['noreplace'] and not p['changed_use'] and not p['newuse'] and not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+ if module.check_mode:
+ module.exit_json(changed=True, msg='Packages would be installed.')
+
+ args = []
+ emerge_flags = {
+ 'update': '--update',
+ 'deep': '--deep',
+ 'newuse': '--newuse',
+ 'changed_use': '--changed-use',
+ 'oneshot': '--oneshot',
+ 'noreplace': '--noreplace',
+ 'nodeps': '--nodeps',
+ 'onlydeps': '--onlydeps',
+ 'quiet': '--quiet',
+ 'verbose': '--verbose',
+ 'getbinpkgonly': '--getbinpkgonly',
+ 'getbinpkg': '--getbinpkg',
+ 'usepkgonly': '--usepkgonly',
+ 'usepkg': '--usepkg',
+ 'keepgoing': '--keep-going',
+ 'quietbuild': '--quiet-build',
+ 'quietfail': '--quiet-fail',
+ }
+ for flag, arg in emerge_flags.items():
+ if p[flag]:
+ args.append(arg)
+
+ if p['state'] and p['state'] == 'latest':
+ args.append("--update")
+
+ emerge_flags = {
+ 'jobs': '--jobs',
+ 'loadavg': '--load-average',
+ 'backtrack': '--backtrack',
+ 'withbdeps': '--with-bdeps',
+ }
+
+ for flag, arg in emerge_flags.items():
+ flag_val = p[flag]
+
+ if flag_val is None:
+ """Fallback to default: don't use this argument at all."""
+ continue
+
+ """Add the --flag=value pair."""
+ if isinstance(flag_val, bool):
+ args.extend((arg, to_native('y' if flag_val else 'n')))
+ elif not flag_val:
+ """If the value is 0 or 0.0: add the flag, but not the value."""
+ args.append(arg)
+ else:
+ args.extend((arg, to_native(flag_val)))
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ # Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
+ # this error
+ if (p['usepkgonly'] or p['getbinpkg'] or p['getbinpkgonly']) \
+ and 'Permission denied (publickey).' in err:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Please check your PORTAGE_BINHOST configuration in make.conf '
+ 'and your SSH authorized_keys file',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
+ msg = 'Packages installed.'
+ break
+ elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
+ msg = 'Packages would be installed.'
+ break
+ else:
+ changed = False
+ msg = 'No packages installed.'
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg=msg,
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ args.append('--ask=n')
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed', 'latest']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(type='list', elements='str', default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=False, type='bool'),
+ backtrack=dict(default=None, type='int'),
+ deep=dict(default=False, type='bool'),
+ newuse=dict(default=False, type='bool'),
+ changed_use=dict(default=False, type='bool'),
+ oneshot=dict(default=False, type='bool'),
+ noreplace=dict(default=True, type='bool'),
+ nodeps=dict(default=False, type='bool'),
+ onlydeps=dict(default=False, type='bool'),
+ depclean=dict(default=False, type='bool'),
+ quiet=dict(default=False, type='bool'),
+ verbose=dict(default=False, type='bool'),
+ sync=dict(default=None, choices=['yes', 'web', 'no']),
+ getbinpkgonly=dict(default=False, type='bool'),
+ getbinpkg=dict(default=False, type='bool'),
+ usepkgonly=dict(default=False, type='bool'),
+ usepkg=dict(default=False, type='bool'),
+ keepgoing=dict(default=False, type='bool'),
+ jobs=dict(default=None, type='int'),
+ loadavg=dict(default=None, type='float'),
+ withbdeps=dict(default=None, type='bool'),
+ quietbuild=dict(default=False, type='bool'),
+ quietfail=dict(default=False, type='bool'),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[
+ ['nodeps', 'onlydeps'],
+ ['quiet', 'verbose'],
+ ['quietbuild', 'verbose'],
+ ['quietfail', 'verbose'],
+ ],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PORTAGE:
+ if sys.executable != '/usr/bin/python' and not has_respawned():
+ respawn_module('/usr/bin/python')
+ else:
+ module.fail_json(msg=missing_required_lib('portage'),
+ exception=PORTAGE_IMPORT_ERROR)
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+
+ p = module.params
+
+ if p['sync'] and p['sync'].strip() != 'no':
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ module.exit_json(msg='Sync successfully finished.')
+
+ packages = []
+ if p['package']:
+ packages.extend(p['package'])
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/portinstall.py b/ansible_collections/community/general/plugins/modules/portinstall.py
new file mode 100644
index 000000000..e263b7181
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/portinstall.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, berenddeboer
+# Written by berenddeboer <berend@pobox.com>
+# Based on pkgng module written by bleader <bleader at ratonland.org>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: portinstall
+short_description: Installing packages from FreeBSD's ports system
+description:
+ - Manage packages for FreeBSD using 'portinstall'.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - name of package to install/remove
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - state of the package
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ type: str
+ use_packages:
+ description:
+ - use packages instead of ports whenever available
+ type: bool
+ required: false
+ default: true
+author: "berenddeboer (@berenddeboer)"
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.portinstall:
+ name: foo
+ state: present
+
+- name: Install package security/cyrus-sasl2-saslauthd
+ community.general.portinstall:
+ name: security/cyrus-sasl2-saslauthd
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.portinstall:
+ name: foo,bar
+ state: absent
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def query_package(module, name):
+
+ pkg_info_path = module.get_bin_path('pkg_info', False)
+
+ # Assume that if we have pkg_info, we haven't upgraded to pkgng
+ if pkg_info_path:
+ pkgng = False
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, shlex_quote(name)), use_unsafe_shell=True)
+ else:
+ pkgng = True
+ pkg_info_path = module.get_bin_path('pkg', True)
+ pkg_info_path = pkg_info_path + " info"
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
+
+ found = rc == 0
+
+ if not found:
+ # databases/mysql55-client installs as mysql-client, so try solving
+ # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
+ # some package is installed
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ if pkgng:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+ else:
+ rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
+
+ found = rc == 0
+
+ return found
+
+
+def matching_packages(module, name):
+
+ ports_glob_path = module.get_bin_path('ports_glob', True)
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
+ # counts the number of packages found
+ occurrences = out.count('\n')
+ if occurrences == 0:
+ name_without_digits = re.sub('[0-9]', '', name)
+ if name != name_without_digits:
+ rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
+ occurrences = out.count('\n')
+ return occurrences
+
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ pkg_glob_path = module.get_bin_path('pkg_glob', True)
+
+ # If pkg_delete not found, we assume pkgng
+ pkg_delete_path = module.get_bin_path('pkg_delete', False)
+ if not pkg_delete_path:
+ pkg_delete_path = module.get_bin_path('pkg', True)
+ pkg_delete_path = pkg_delete_path + " delete -y"
+
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, shlex_quote(package)), use_unsafe_shell=True)
+
+ if query_package(module, package):
+ name_without_digits = re.sub('[0-9]', '', package)
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path,
+ shlex_quote(name_without_digits)),
+ use_unsafe_shell=True)
+ if query_package(module, package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, packages, use_packages):
+
+ install_c = 0
+
+ # If portinstall not found, automagically install
+ portinstall_path = module.get_bin_path('portinstall', False)
+ if not portinstall_path:
+ pkg_path = module.get_bin_path('pkg', False)
+ if pkg_path:
+ module.run_command("pkg install -y portupgrade")
+ portinstall_path = module.get_bin_path('portinstall', True)
+
+ if use_packages:
+ portinstall_params = "--use-packages"
+ else:
+ portinstall_params = ""
+
+ for package in packages:
+ if query_package(module, package):
+ continue
+
+ # TODO: check how many match
+ matches = matching_packages(module, package)
+ if matches == 1:
+ rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
+ if not query_package(module, package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out))
+ elif matches == 0:
+ module.fail_json(msg="no matches for package %s" % (package))
+ else:
+ module.fail_json(msg="%s matches found for package name %s" % (matches, package))
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=["present", "absent"]),
+ name=dict(aliases=["pkg"], required=True),
+ use_packages=dict(type='bool', default=True)))
+
+ p = module.params
+
+ pkgs = p["name"].split(",")
+
+ if p["state"] == "present":
+ install_packages(module, pkgs, p["use_packages"])
+
+ elif p["state"] == "absent":
+ remove_packages(module, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org.py b/ansible_collections/community/general/plugins/modules/pritunl_org.py
new file mode 100644
index 000000000..df2df4494
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pritunl_org.py
@@ -0,0 +1,206 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: pritunl_org
+author: Florian Dambrine (@Lowess)
+version_added: 2.5.0
+short_description: Manages Pritunl Organizations using the Pritunl API
+description:
+ - A module to manage Pritunl organizations using the Pritunl API.
+extends_documentation_fragment:
+ - community.general.pritunl
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ required: true
+ aliases:
+ - org
+ description:
+ - The name of the organization to manage in Pritunl.
+
+ force:
+ type: bool
+ default: false
+ description:
+ - If I(force) is C(true) and I(state) is C(absent), the module
+ will delete the organization, no matter if it contains users
+ or not. By default I(force) is C(false), which will cause the
+ module to fail the deletion of the organization when it contains
+ users.
+
+ state:
+ type: str
+ default: 'present'
+ choices:
+ - present
+ - absent
+ description:
+ - If C(present), the module adds organization I(name) to
+ Pritunl. If C(absent), attempt to delete the organization
+ from Pritunl (please read about I(force) usage).
+"""
+
+EXAMPLES = """
+- name: Ensure the organization named MyOrg exists
+ community.general.pritunl_org:
+ state: present
+ name: MyOrg
+
+- name: Ensure the organization named MyOrg does not exist
+ community.general.pritunl_org:
+ state: absent
+ name: MyOrg
+"""
+
+RETURN = """
+response:
+ description: JSON representation of a Pritunl Organization.
+ returned: success
+ type: dict
+ sample:
+ {
+ "auth_api": false,
+ "name": "Foo",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "csftwlu6uhralzi2dpmhekz3",
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
+ PritunlException,
+ delete_pritunl_organization,
+ post_pritunl_organization,
+ list_pritunl_organizations,
+ get_pritunl_settings,
+ pritunl_argument_spec,
+)
+
+
+def add_pritunl_organization(module):
+ result = {}
+
+ org_name = module.params.get("name")
+
+ org_obj_list = list_pritunl_organizations(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {"filters": {"name": org_name}},
+ )
+ )
+
+ # If the organization already exists
+ if len(org_obj_list) > 0:
+ result["changed"] = False
+ result["response"] = org_obj_list[0]
+ else:
+ # Otherwise create it
+ response = post_pritunl_organization(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {"organization_name": org_name},
+ )
+ )
+ result["changed"] = True
+ result["response"] = response
+
+ module.exit_json(**result)
+
+
+def remove_pritunl_organization(module):
+ result = {}
+
+ org_name = module.params.get("name")
+ force = module.params.get("force")
+
+ org_obj_list = []
+
+ org_obj_list = list_pritunl_organizations(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "filters": {"name": org_name},
+ },
+ )
+ )
+
+ # No organization found
+ if len(org_obj_list) == 0:
+ result["changed"] = False
+ result["response"] = {}
+
+ else:
+ # Otherwise attempt to delete it
+ org = org_obj_list[0]
+
+ # Only accept deletion under specific conditions
+ if force or org["user_count"] == 0:
+ response = delete_pritunl_organization(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {"organization_id": org["id"]},
+ )
+ )
+ result["changed"] = True
+ result["response"] = response
+ else:
+ module.fail_json(
+ msg=(
+ "Can not remove organization '%s' with %d attached users. "
+ "Either set 'force' option to true or remove active users "
+ "from the organization"
+ )
+ % (org_name, org["user_count"])
+ )
+
+ module.exit_json(**result)
+
+
+def main():
+ argument_spec = pritunl_argument_spec()
+
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type="str", aliases=["org"]),
+ force=dict(required=False, type="bool", default=False),
+ state=dict(
+ required=False, choices=["present", "absent"], default="present"
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ state = module.params.get("state")
+
+ try:
+ if state == "present":
+ add_pritunl_organization(module)
+ elif state == "absent":
+ remove_pritunl_organization(module)
+ except PritunlException as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_org_info.py b/ansible_collections/community/general/plugins/modules/pritunl_org_info.py
new file mode 100644
index 000000000..979e29b5a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pritunl_org_info.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: pritunl_org_info
+author: Florian Dambrine (@Lowess)
+version_added: 2.5.0
+short_description: List Pritunl Organizations using the Pritunl API
+description:
+ - A module to list Pritunl organizations using the Pritunl API.
+extends_documentation_fragment:
+ - community.general.pritunl
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ organization:
+ type: str
+ required: false
+ aliases:
+ - org
+ default: null
+ description:
+ - Name of the Pritunl organization to search for.
+ If none provided, the module will return all Pritunl
+ organizations.
+"""
+
+EXAMPLES = """
+- name: List all existing Pritunl organizations
+ community.general.pritunl_org_info:
+
+- name: Search for an organization named MyOrg
+ community.general.pritunl_user_info:
+ organization: MyOrg
+"""
+
+RETURN = """
+organizations:
+ description: List of Pritunl organizations.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "auth_api": false,
+ "name": "FooOrg",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "csftwlu6uhralzi2dpmhekz3",
+ },
+ {
+ "auth_api": false,
+ "name": "MyOrg",
+ "auth_token": null,
+ "user_count": 3,
+ "auth_secret": null,
+ "id": "58070daee63f3b2e6e472c36",
+ },
+ {
+ "auth_api": false,
+ "name": "BarOrg",
+ "auth_token": null,
+ "user_count": 0,
+ "auth_secret": null,
+ "id": "v1sncsxxybnsylc8gpqg85pg",
+ }
+ ]
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
+ PritunlException,
+ get_pritunl_settings,
+ list_pritunl_organizations,
+ pritunl_argument_spec,
+)
+
+
+def get_pritunl_organizations(module):
+ org_name = module.params.get("organization")
+
+ organizations = []
+
+ organizations = list_pritunl_organizations(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {"filters": {"name": org_name} if org_name else None},
+ )
+ )
+
+ if org_name and len(organizations) == 0:
+ # When an org_name is provided but no organization match return an error
+ module.fail_json(msg="Organization '%s' does not exist" % org_name)
+
+ result = {}
+ result["changed"] = False
+ result["organizations"] = organizations
+
+ module.exit_json(**result)
+
+
+def main():
+ argument_spec = pritunl_argument_spec()
+
+ argument_spec.update(
+ dict(
+ organization=dict(required=False, type="str", default=None, aliases=["org"])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ try:
+ get_pritunl_organizations(module)
+ except PritunlException as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user.py b/ansible_collections/community/general/plugins/modules/pritunl_user.py
new file mode 100644
index 000000000..5aac23393
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pritunl_user.py
@@ -0,0 +1,361 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: pritunl_user
+author: "Florian Dambrine (@Lowess)"
+version_added: 2.3.0
+short_description: Manage Pritunl Users using the Pritunl API
+description:
+ - A module to manage Pritunl users using the Pritunl API.
+extends_documentation_fragment:
+ - community.general.pritunl
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ organization:
+ type: str
+ required: true
+ aliases:
+ - org
+ description:
+ - The name of the organization the user is part of.
+
+ state:
+ type: str
+ default: 'present'
+ choices:
+ - present
+ - absent
+ description:
+ - If C(present), the module adds user I(user_name) to
+ the Pritunl I(organization). If C(absent), removes the user
+ I(user_name) from the Pritunl I(organization).
+
+ user_name:
+ type: str
+ required: true
+ default: null
+ description:
+ - Name of the user to create or delete from Pritunl.
+
+ user_email:
+ type: str
+ required: false
+ default: null
+ description:
+ - Email address associated with the user I(user_name).
+
+ user_type:
+ type: str
+ required: false
+ default: client
+ choices:
+ - client
+ - server
+ description:
+ - Type of the user I(user_name).
+
+ user_groups:
+ type: list
+ elements: str
+ required: false
+ default: null
+ description:
+ - List of groups associated with the user I(user_name).
+
+ user_disabled:
+ type: bool
+ required: false
+ default: null
+ description:
+ - Enable/Disable the user I(user_name).
+
+ user_gravatar:
+ type: bool
+ required: false
+ default: null
+ description:
+ - Enable/Disable Gravatar usage for the user I(user_name).
+
+ user_mac_addresses:
+ type: list
+ elements: str
+ description:
+ - Allowed MAC addresses for the user I(user_name).
+ version_added: 5.0.0
+"""
+
+EXAMPLES = """
+- name: Create the user Foo with email address foo@bar.com in MyOrg
+ community.general.pritunl_user:
+ state: present
+ organization: MyOrg
+ user_name: Foo
+ user_email: foo@bar.com
+ user_mac_addresses:
+ - "00:00:00:00:00:99"
+
+- name: Disable the user Foo but keep it in Pritunl
+ community.general.pritunl_user:
+ state: present
+ organization: MyOrg
+ user_name: Foo
+ user_email: foo@bar.com
+ user_disabled: true
+
+- name: Make sure the user Foo is not part of MyOrg anymore
+ community.general.pritunl_user:
+ state: absent
+ organization: MyOrg
+ user_name: Foo
+"""
+
+RETURN = """
+response:
+ description: JSON representation of Pritunl Users.
+ returned: success
+ type: dict
+ sample:
+ {
+ "audit": false,
+ "auth_type": "google",
+ "bypass_secondary": false,
+ "client_to_client": false,
+ "disabled": false,
+ "dns_mapping": null,
+ "dns_servers": null,
+ "dns_suffix": null,
+ "email": "foo@bar.com",
+ "gravatar": true,
+ "groups": [
+ "foo", "bar"
+ ],
+ "id": "5d070dafe63q3b2e6s472c3b",
+ "name": "foo@acme.com",
+ "network_links": [],
+ "organization": "58070daee6sf342e6e4s2c36",
+ "organization_name": "Acme",
+ "otp_auth": true,
+ "otp_secret": "35H5EJA3XB2$4CWG",
+ "pin": false,
+ "port_forwarding": [],
+ "servers": [],
+ }
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
+ PritunlException,
+ delete_pritunl_user,
+ get_pritunl_settings,
+ list_pritunl_organizations,
+ list_pritunl_users,
+ post_pritunl_user,
+ pritunl_argument_spec,
+)
+
+
+def add_or_update_pritunl_user(module):
+ result = {}
+
+ org_name = module.params.get("organization")
+ user_name = module.params.get("user_name")
+
+ user_params = {
+ "name": user_name,
+ "email": module.params.get("user_email"),
+ "groups": module.params.get("user_groups"),
+ "disabled": module.params.get("user_disabled"),
+ "gravatar": module.params.get("user_gravatar"),
+ "mac_addresses": module.params.get("user_mac_addresses"),
+ "type": module.params.get("user_type"),
+ }
+
+ org_obj_list = list_pritunl_organizations(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {"filters": {"name": org_name}},
+ )
+ )
+
+ if len(org_obj_list) == 0:
+ module.fail_json(
+ msg="Can not add user to organization '%s' which does not exist" % org_name
+ )
+
+ org_id = org_obj_list[0]["id"]
+
+ # Grab existing users from this org
+ users = list_pritunl_users(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "organization_id": org_id,
+ "filters": {"name": user_name},
+ },
+ )
+ )
+
+ # Check if the pritunl user already exists
+ if len(users) > 0:
+ # Compare remote user params with local user_params and trigger update if needed
+ user_params_changed = False
+ for key in user_params.keys():
+ # When a param is not specified grab existing ones to prevent from changing it with the PUT request
+ if user_params[key] is None:
+ user_params[key] = users[0][key]
+
+ # 'groups' and 'mac_addresses' are list comparison
+ if key == "groups" or key == "mac_addresses":
+ if set(users[0][key]) != set(user_params[key]):
+ user_params_changed = True
+
+ # otherwise it is either a boolean or a string
+ else:
+ if users[0][key] != user_params[key]:
+ user_params_changed = True
+
+ # Trigger a PUT on the API to update the current user if settings have changed
+ if user_params_changed:
+ response = post_pritunl_user(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "organization_id": org_id,
+ "user_id": users[0]["id"],
+ "user_data": user_params,
+ },
+ )
+ )
+
+ result["changed"] = True
+ result["response"] = response
+ else:
+ result["changed"] = False
+ result["response"] = users
+ else:
+ response = post_pritunl_user(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "organization_id": org_id,
+ "user_data": user_params,
+ },
+ )
+ )
+ result["changed"] = True
+ result["response"] = response
+
+ module.exit_json(**result)
+
+
+def remove_pritunl_user(module):
+ result = {}
+
+ org_name = module.params.get("organization")
+ user_name = module.params.get("user_name")
+
+ org_obj_list = []
+
+ org_obj_list = list_pritunl_organizations(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "filters": {"name": org_name},
+ },
+ )
+ )
+
+ if len(org_obj_list) == 0:
+ module.fail_json(
+ msg="Can not remove user '%s' from a non existing organization '%s'"
+ % (user_name, org_name)
+ )
+
+ org_id = org_obj_list[0]["id"]
+
+ # Grab existing users from this org
+ users = list_pritunl_users(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "organization_id": org_id,
+ "filters": {"name": user_name},
+ },
+ )
+ )
+
+ # Check if the pritunl user exists, if not, do nothing
+ if len(users) == 0:
+ result["changed"] = False
+ result["response"] = {}
+
+ # Otherwise remove the org from Pritunl
+ else:
+ response = delete_pritunl_user(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "organization_id": org_id,
+ "user_id": users[0]["id"],
+ },
+ )
+ )
+ result["changed"] = True
+ result["response"] = response
+
+ module.exit_json(**result)
+
+
+def main():
+ argument_spec = pritunl_argument_spec()
+
+ argument_spec.update(
+ dict(
+ organization=dict(required=True, type="str", aliases=["org"]),
+ state=dict(
+ required=False, choices=["present", "absent"], default="present"
+ ),
+ user_name=dict(required=True, type="str"),
+ user_type=dict(
+ required=False, choices=["client", "server"], default="client"
+ ),
+ user_email=dict(required=False, type="str", default=None),
+ user_groups=dict(required=False, type="list", elements="str", default=None),
+ user_disabled=dict(required=False, type="bool", default=None),
+ user_gravatar=dict(required=False, type="bool", default=None),
+ user_mac_addresses=dict(required=False, type="list", elements="str", default=None),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ state = module.params.get("state")
+
+ try:
+ if state == "present":
+ add_or_update_pritunl_user(module)
+ elif state == "absent":
+ remove_pritunl_user(module)
+ except PritunlException as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pritunl_user_info.py b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py
new file mode 100644
index 000000000..7b0399061
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pritunl_user_info.py
@@ -0,0 +1,174 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: pritunl_user_info
+author: "Florian Dambrine (@Lowess)"
+version_added: 2.3.0
+short_description: List Pritunl Users using the Pritunl API
+description:
+ - A module to list Pritunl users using the Pritunl API.
+extends_documentation_fragment:
+ - community.general.pritunl
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ organization:
+ type: str
+ required: true
+ aliases:
+ - org
+ description:
+ - The name of the organization the user is part of.
+
+ user_name:
+ type: str
+ required: false
+ description:
+ - Name of the user to filter on Pritunl.
+
+ user_type:
+ type: str
+ required: false
+ default: client
+ choices:
+ - client
+ - server
+ description:
+ - Type of the user I(user_name).
+"""
+
+EXAMPLES = """
+- name: List all existing users part of the organization MyOrg
+ community.general.pritunl_user_info:
+ state: list
+ organization: MyOrg
+
+- name: Search for the user named Florian part of the organization MyOrg
+ community.general.pritunl_user_info:
+ state: list
+ organization: MyOrg
+ user_name: Florian
+"""
+
+RETURN = """
+users:
+ description: List of Pritunl users.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "audit": false,
+ "auth_type": "google",
+ "bypass_secondary": false,
+ "client_to_client": false,
+ "disabled": false,
+ "dns_mapping": null,
+ "dns_servers": null,
+ "dns_suffix": null,
+ "email": "foo@bar.com",
+ "gravatar": true,
+ "groups": [
+ "foo", "bar"
+ ],
+ "id": "5d070dafe63q3b2e6s472c3b",
+ "name": "foo@acme.com",
+ "network_links": [],
+ "organization": "58070daee6sf342e6e4s2c36",
+ "organization_name": "Acme",
+ "otp_auth": true,
+ "otp_secret": "35H5EJA3XB2$4CWG",
+ "pin": false,
+ "port_forwarding": [],
+ "servers": [],
+ }
+ ]
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api import (
+ PritunlException,
+ get_pritunl_settings,
+ list_pritunl_organizations,
+ list_pritunl_users,
+ pritunl_argument_spec,
+)
+
+
+def get_pritunl_user(module):
+ user_name = module.params.get("user_name")
+ user_type = module.params.get("user_type")
+ org_name = module.params.get("organization")
+
+ org_obj_list = []
+
+ org_obj_list = list_pritunl_organizations(
+ **dict_merge(get_pritunl_settings(module), {"filters": {"name": org_name}})
+ )
+
+ if len(org_obj_list) == 0:
+ module.fail_json(
+ msg="Can not list users from the organization '%s' which does not exist"
+ % org_name
+ )
+
+ org_id = org_obj_list[0]["id"]
+
+ users = list_pritunl_users(
+ **dict_merge(
+ get_pritunl_settings(module),
+ {
+ "organization_id": org_id,
+ "filters": (
+ {"type": user_type}
+ if user_name is None
+ else {"name": user_name, "type": user_type}
+ ),
+ },
+ )
+ )
+
+ result = {}
+ result["changed"] = False
+ result["users"] = users
+
+ module.exit_json(**result)
+
+
+def main():
+ argument_spec = pritunl_argument_spec()
+
+ argument_spec.update(
+ dict(
+ organization=dict(required=True, type="str", aliases=["org"]),
+ user_name=dict(required=False, type="str", default=None),
+ user_type=dict(
+ required=False,
+ choices=["client", "server"],
+ default="client",
+ ),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ try:
+ get_pritunl_user(module)
+ except PritunlException as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks.py b/ansible_collections/community/general/plugins/modules/profitbricks.py
new file mode 100644
index 000000000..c8bcceb93
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/profitbricks.py
@@ -0,0 +1,667 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
+ for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ type: bool
+ default: true
+ name:
+ description:
+ - The name of the virtual machine.
+ type: str
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ elements: str
+ default: []
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ type: str
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ default: 2
+ type: int
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ default: 2048
+ type: int
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ type: str
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ type: int
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ type: str
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ elements: str
+ default: []
+ count:
+ description:
+ - The number of virtual machines to create.
+ type: int
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ type: str
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ type: bool
+ default: false
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ type: int
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: true
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ type: bool
+ default: true
+ state:
+ description:
+ - create or terminate instances
+ - 'The choices available are: C(running), C(stopped), C(absent), C(present).'
+ type: str
+ default: 'present'
+ disk_type:
+ description:
+ - the type of disk to be allocated.
+ type: str
+ choices: [SSD, HDD]
+ default: HDD
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example
+- name: Create three servers and enumerate their names
+ community.general.profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+- name: Remove virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+- name: Start virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+- name: Stop virtual machines
+ community.general.profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+'''
+
+import re
+import uuid
+import time
+import traceback
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.common.text.converters import to_native
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception as e:
+ module.fail_json(msg="failed to terminate the virtual server: %s" % to_native(e), exception=traceback.format_exc())
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % to_native(e), exception=traceback.format_exc())
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ community.general.profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(no_log=True),
+ ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', elements='str', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set instance state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
new file mode 100644
index 000000000..a096db752
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_datacenter.py
@@ -0,0 +1,266 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
+ on profitbricks >= 1.0.0
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ type: str
+ description:
+ description:
+ - The description of the virtual datacenter.
+ type: str
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ type: str
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Create or terminate datacenters.
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a datacenter
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+- name: Destroy a datacenter (remove all servers, volumes, and other objects in the datacenter)
+ community.general.profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if uuid_match.match(name):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600, type='int'),
+ state=dict(default='present'), # @TODO add choices
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_nic.py b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
new file mode 100644
index 000000000..17a30b052
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_nic.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ type: str
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ - If not specified, it defaults to a value based on UUID4.
+ type: str
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: true
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: true
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+- name: Remove a NIC
+ community.general.profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _make_default_name():
+ return str(uuid.uuid4()).replace('-', '')[:10]
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+ if name is None:
+ name = _make_default_name()
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(required=True),
+ server=dict(required=True),
+ name=dict(),
+ lan=dict(),
+ subscription_user=dict(required=True),
+ subscription_password=dict(required=True, no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ ),
+ required_if=(
+ ('state', 'absent', ['name']),
+ ('state', 'present', ['lan']),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict) # @FIXME changed not calculated?
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
new file mode 100644
index 000000000..f9d257b68
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_volume.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ type: str
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ type: str
+ size:
+ description:
+ - The size of the volume.
+ type: int
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ type: str
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ type: str
+ image_password:
+ description:
+ - Password set for the administrative user.
+ type: str
+ required: false
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ type: list
+ elements: str
+ default: []
+ disk_type:
+ description:
+ - The disk type of the volume.
+ type: str
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ - "The available choices are: C(LINUX), C(WINDOWS), C(UNKNOWN), C(OTHER)."
+ type: str
+ required: false
+ default: UNKNOWN
+ count:
+ description:
+ - The number of volumes you wish to create.
+ type: int
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: true
+ type: bool
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ type: list
+ elements: str
+ default: []
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+ server:
+ description:
+ - Server name to attach the volume to.
+ type: str
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Create multiple volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: true
+ wait_timeout: 500
+ state: present
+
+- name: Remove Volumes
+ community.general.profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+import traceback
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.common.text.converters import to_native
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message, exception=traceback.format_exc())
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if uuid_match.match(n):
+ _delete_volume(module, profitbricks, datacenter, n)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception as e:
+ module.fail_json(msg='failed to attach volume: %s' % to_native(e), exception=traceback.format_exc())
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(no_log=True),
+ ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', elements='str', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume state: %s' % to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
new file mode 100644
index 000000000..75cd73df3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/profitbricks_volume_attachments.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ type: str
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ type: str
+ volume:
+ description:
+ - The volume name or ID.
+ type: str
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
+ type: str
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
+ type: str
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: true
+ type: bool
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ type: int
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ - "The available choices are: C(present), C(absent)."
+ type: str
+ required: false
+ default: 'present'
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (@baldwinSPC) <baldwin@stackpointcloud.com>
+'''
+
+EXAMPLES = '''
+- name: Attach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+- name: Detach a volume
+ community.general.profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+try:
+ from profitbricks.client import ProfitBricksService
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+uuid_match = re.compile(
+ r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise:
+ return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(no_log=True),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox.py b/ansible_collections/community/general/plugins/modules/proxmox.py
new file mode 100644
index 000000000..315ee601a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox.py
@@ -0,0 +1,826 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: Management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+ - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ password:
+ description:
+ - the instance root password
+ type: str
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ - must be unique if vmid is not passed
+ type: str
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ type: str
+ disk:
+ description:
+ - This option was previously described as "hard disk size in GB for instance" however several formats describing
+ a lxc mount are permitted.
+ - Older versions of Proxmox will accept a numeric value for size using the I(storage) parameter to automatically
+ choose which storage to allocate from, however new versions enforce the C(<STORAGE>:<SIZE>) syntax.
+ - "Additional options are available by using some combination of the following key-value pairs as a
+ comma-delimited list C([volume=]<volume> [,acl=<1|0>] [,mountoptions=<opt[;opt...]>] [,quota=<1|0>]
+ [,replicate=<1|0>] [,ro=<1|0>] [,shared=<1|0>] [,size=<DiskSize>])."
+ - See U(https://pve.proxmox.com/wiki/Linux_Container) for a full description.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(3).
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ type: int
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ type: int
+ memory:
+ description:
+ - memory size in MB for instance
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
+ type: int
+ swap:
+ description:
+ - swap memory size in MB for instance
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
+ type: int
+ netif:
+ description:
+ - specifies network interfaces for the container. As a hash/dictionary defining interfaces.
+ type: dict
+ features:
+ description:
+ - Specifies a list of features to be enabled. For valid options, see U(https://pve.proxmox.com/wiki/Linux_Container#pct_options).
+ - Some features require the use of a privileged container.
+ type: list
+ elements: str
+ version_added: 2.0.0
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container. As a hash/dictionary defining mount points
+ type: dict
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ type: str
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ type: bool
+ storage:
+ description:
+ - target storage
+ type: str
+ default: 'local'
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
+ type: int
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ type: str
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ type: str
+ tags:
+ description:
+ - List of tags to apply to the container.
+ - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
+ - Tags are only available in Proxmox 7+.
+ type: list
+ elements: str
+ version_added: 6.2.0
+ timeout:
+ description:
+ - timeout for operations
+ type: int
+ default: 30
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ type: bool
+ default: false
+ purge:
+ description:
+ - Remove container from all related configurations.
+ - For example backup jobs, replication jobs, or HA.
+ - Related ACLs and Firewall entries will always be removed.
+ - Used with state C(absent).
+ type: bool
+ default: false
+ version_added: 2.3.0
+ state:
+ description:
+ - Indicate desired state of the instance
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+ pubkey:
+ description:
+ - Public key to add to /root/.ssh/authorized_keys. This was added on Proxmox 4.2, it is ignored for earlier versions
+ type: str
+ unprivileged:
+ description:
+ - Indicate if the container should be unprivileged.
+ - >
+ The default value for this parameter is C(false) but that is deprecated
+ and it will be replaced with C(true) in community.general 7.0.0.
+ type: bool
+ description:
+ description:
+ - Specify the description for the container. Only used on the configuration web interface.
+ - This is saved as a comment inside the configuration file.
+ type: str
+ version_added: '0.2.0'
+ hookscript:
+ description:
+ - Script that will be executed during various steps in the containers lifetime.
+ type: str
+ version_added: '0.2.0'
+ proxmox_default_behavior:
+ description:
+ - As of community.general 4.0.0, various options no longer have default values.
+ These default values caused problems when users expected different behavior from Proxmox
+ by default or filled options which caused problems when set.
+ - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
+ are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
+ which makes sure these options have no defaults.
+ - This affects the I(disk), I(cores), I(cpus), I(memory), I(onboot), I(swap), I(cpuunits) options.
+ type: str
+ default: no_defaults
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+ clone:
+ description:
+ - ID of the container to be cloned.
+ - I(description), I(hostname), and I(pool) will be copied from the cloned container if not specified.
+ - The type of clone created is defined by the I(clone_type) parameter.
+ - This operator is only supported for Proxmox clusters that use LXC containerization (PVE version >= 4).
+ type: int
+ version_added: 4.3.0
+ clone_type:
+ description:
+ - Type of the clone created.
+ - C(full) creates a full clone, and I(storage) must be specified.
+ - C(linked) creates a linked clone, and the cloned container must be a template container.
+ - C(opportunistic) creates a linked clone if the cloned container is a template container, and a full clone if not.
+ I(storage) may be specified, if not it will fall back to the default.
+ type: str
+ choices: ['full', 'linked', 'opportunistic']
+ default: opportunistic
+ version_added: 4.3.0
+author: Sergei Antipov (@UnderGreen)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.proxmox.selection
+ - community.general.attributes
+'''
+
+EXAMPLES = r'''
+- name: Create new container with minimal options
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with hookscript and description
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ hookscript: 'local:snippets/vm_hook.sh'
+ description: created with ansible
+
+- name: Create new container automatically selecting the next available vmid.
+ community.general.proxmox:
+ node: 'uk-mc02'
+ api_user: 'root@pam'
+ api_password: '1q2w3e'
+ api_host: 'node1'
+ password: '123456'
+ hostname: 'example.org'
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options with force(it will rewrite existing container)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ force: true
+
+- name: Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+- name: Create new container with minimal options defining network interface with dhcp
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining network interface with static ip
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
+
+- name: Create new container with minimal options defining a mount with 8GB
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
+
+- name: Create new container with minimal options defining a cpu core limit
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ cores: 2
+
+- name: Create a new container with nesting enabled and allows the use of CIFS/NFS inside the container.
+ community.general.proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ features:
+ - nesting=1
+ - mount=cifs,nfs
+
+- name: >
+ Create a linked clone of the template container with id 100. The newly created container with be a
+ linked clone, because no storage parameter is defined
+ community.general.proxmox:
+ vmid: 201
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ clone: 100
+ hostname: clone.example.org
+
+- name: Create a full clone of the container with id 100
+ community.general.proxmox:
+ vmid: 201
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ clone: 100
+ hostname: clone.example.org
+ storage: local
+
+- name: Start container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+
+- name: >
+ Start container with mount. You should enter a 90-second timeout because servers
+ with additional disks take longer to boot
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+ timeout: 90
+
+- name: Stop container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+- name: Stop container with force
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ force: true
+ state: stopped
+
+- name: Restart container(stopped or mounted container you can't restart)
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: restarted
+
+- name: Remove container
+ community.general.proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: absent
+'''
+
+import re
+import time
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ ansible_to_proxmox_bool, proxmox_auth_argument_spec, ProxmoxAnsible)
+
+VZ_TYPE = None
+
+
+class ProxmoxLxcAnsible(ProxmoxAnsible):
+ def content_check(self, node, ostemplate, template_store):
+ return [True for cnt in self.proxmox_api.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate]
+
+ def is_template_container(self, node, vmid):
+ """Check if the specified container is a template."""
+ proxmox_node = self.proxmox_api.nodes(node)
+ config = getattr(proxmox_node, VZ_TYPE)(vmid).config.get()
+ return config['template']
+
+ def create_instance(self, vmid, node, disk, storage, cpus, memory, swap, timeout, clone, **kwargs):
+
+ # Version limited features
+ minimum_version = {
+ 'tags': 7,
+ }
+ proxmox_node = self.proxmox_api.nodes(node)
+
+ # Remove all empty kwarg entries
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ version = self.version()
+ pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
+
+ # Fail on unsupported features
+ for option, version in minimum_version.items():
+ if pve_major_version < version and option in kwargs:
+ self.module.fail_json(changed=False, msg="Feature {option} is only supported in PVE {version}+, and you're using PVE {pve_major_version}".
+ format(option=option, version=version, pve_major_version=pve_major_version))
+
+ if VZ_TYPE == 'lxc':
+ kwargs['cpulimit'] = cpus
+ kwargs['rootfs'] = disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ if 'pubkey' in kwargs:
+ if self.version() >= LooseVersion('4.2'):
+ kwargs['ssh-public-keys'] = kwargs['pubkey']
+ del kwargs['pubkey']
+ else:
+ kwargs['cpus'] = cpus
+ kwargs['disk'] = disk
+
+ # LXC tags are expected to be valid and presented as a comma/semi-colon delimited string
+ if 'tags' in kwargs:
+ re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$')
+ for tag in kwargs['tags']:
+ if not re_tag.match(tag):
+ self.module.fail_json(msg='%s is not a valid tag' % tag)
+ kwargs['tags'] = ",".join(kwargs['tags'])
+
+ if clone is not None:
+ if VZ_TYPE != 'lxc':
+ self.module.fail_json(changed=False, msg="Clone operator is only supported for LXC enabled proxmox clusters.")
+
+ clone_is_template = self.is_template_container(node, clone)
+
+ # By default, create a full copy only when the cloned container is not a template.
+ create_full_copy = not clone_is_template
+
+ # Only accept parameters that are compatible with the clone endpoint.
+ valid_clone_parameters = ['hostname', 'pool', 'description']
+ if self.module.params['storage'] is not None and clone_is_template:
+ # Cloning a template, so create a full copy instead of a linked copy
+ create_full_copy = True
+ elif self.module.params['storage'] is None and not clone_is_template:
+ # Not cloning a template, but also no defined storage. This isn't possible.
+ self.module.fail_json(changed=False, msg="Cloned container is not a template, storage needs to be specified.")
+
+ if self.module.params['clone_type'] == 'linked':
+ if not clone_is_template:
+ self.module.fail_json(changed=False, msg="'linked' clone type is specified, but cloned container is not a template container.")
+ # Don't need to do more, by default create_full_copy is set to false already
+ elif self.module.params['clone_type'] == 'opportunistic':
+ if not clone_is_template:
+ # Cloned container is not a template, so we need our 'storage' parameter
+ valid_clone_parameters.append('storage')
+ elif self.module.params['clone_type'] == 'full':
+ create_full_copy = True
+ valid_clone_parameters.append('storage')
+
+ clone_parameters = {}
+
+ if create_full_copy:
+ clone_parameters['full'] = '1'
+ else:
+ clone_parameters['full'] = '0'
+ for param in valid_clone_parameters:
+ if self.module.params[param] is not None:
+ clone_parameters[param] = self.module.params[param]
+
+ taskid = getattr(proxmox_node, VZ_TYPE)(clone).clone.post(newid=vmid, **clone_parameters)
+ else:
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if self.api_task_ok(node, taskid):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+ def start_instance(self, vm, vmid, timeout):
+ taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+ def stop_instance(self, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+ def umount_instance(self, vm, vmid, timeout):
+ taskid = getattr(self.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ proxmox_args = dict(
+ vmid=dict(type='int', required=False),
+ node=dict(),
+ pool=dict(),
+ password=dict(no_log=True),
+ hostname=dict(),
+ ostemplate=dict(),
+ disk=dict(type='str'),
+ cores=dict(type='int'),
+ cpus=dict(type='int'),
+ memory=dict(type='int'),
+ swap=dict(type='int'),
+ netif=dict(type='dict'),
+ mounts=dict(type='dict'),
+ ip_address=dict(),
+ onboot=dict(type='bool'),
+ features=dict(type='list', elements='str'),
+ storage=dict(default='local'),
+ cpuunits=dict(type='int'),
+ nameserver=dict(),
+ searchdomain=dict(),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ purge=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ pubkey=dict(type='str'),
+ unprivileged=dict(type='bool'),
+ description=dict(type='str'),
+ hookscript=dict(type='str'),
+ proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
+ clone=dict(type='int'),
+ clone_type=dict(default='opportunistic', choices=['full', 'linked', 'opportunistic']),
+ tags=dict(type='list', elements='str')
+ )
+ module_args.update(proxmox_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_if=[
+ ('state', 'present', ['node', 'hostname']),
+ ('state', 'present', ('clone', 'ostemplate'), True), # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
+ # either clone a container or create a new one from a template file.
+ ],
+ required_together=[
+ ('api_token_id', 'api_token_secret')
+ ],
+ required_one_of=[('api_password', 'api_token_id')],
+ mutually_exclusive=[('clone', 'ostemplate')], # Creating a new container is done either by cloning an existing one, or based on a template.
+ )
+
+ proxmox = ProxmoxLxcAnsible(module)
+
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc'
+
+ state = module.params['state']
+ vmid = module.params['vmid']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ hostname = module.params['hostname']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+ clone = module.params['clone']
+
+ if module.params['unprivileged'] is None:
+ module.params['unprivileged'] = False
+ module.deprecate(
+ 'The default value `false` for the parameter "unprivileged" is deprecated and it will be replaced with `true`',
+ version='7.0.0',
+ collection_name='community.general'
+ )
+
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ disk="3",
+ cores=1,
+ cpus=1,
+ memory=512,
+ swap=0,
+ onboot=False,
+ cpuunits=1000,
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and state == 'present':
+ vmid = proxmox.get_nextvmid()
+ elif not vmid and hostname:
+ vmid = proxmox.get_vmid(hostname)
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ # Create a new container
+ if state == 'present' and clone is None:
+ try:
+ if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if (not module.params['vmid'] and
+ proxmox.get_vmid(hostname, ignore_missing=True) and
+ not module.params['force']):
+ vmid = proxmox.get_vmid(hostname)
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
+ elif not proxmox.get_node(node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not proxmox.content_check(node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+ except Exception as e:
+ module.fail_json(msg="Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
+
+ try:
+ proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone,
+ cores=module.params['cores'],
+ pool=module.params['pool'],
+ password=module.params['password'],
+ hostname=module.params['hostname'],
+ ostemplate=module.params['ostemplate'],
+ netif=module.params['netif'],
+ mounts=module.params['mounts'],
+ ip_address=module.params['ip_address'],
+ onboot=ansible_to_proxmox_bool(module.params['onboot']),
+ cpuunits=module.params['cpuunits'],
+ nameserver=module.params['nameserver'],
+ searchdomain=module.params['searchdomain'],
+ force=ansible_to_proxmox_bool(module.params['force']),
+ pubkey=module.params['pubkey'],
+ features=",".join(module.params['features']) if module.params['features'] is not None else None,
+ unprivileged=ansible_to_proxmox_bool(module.params['unprivileged']),
+ description=module.params['description'],
+ hookscript=module.params['hookscript'],
+ tags=module.params['tags'])
+
+ module.exit_json(changed=True, msg="Deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception as e:
+ module.fail_json(msg="Creation of %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+
+ # Clone a container
+ elif state == 'present' and clone is not None:
+ try:
+ if proxmox.get_vm(vmid, ignore_missing=True) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if (not module.params['vmid'] and
+ proxmox.get_vmid(hostname, ignore_missing=True) and
+ not module.params['force']):
+ vmid = proxmox.get_vmid(hostname)
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, vmid))
+ if not proxmox.get_vm(clone, ignore_missing=True):
+ module.exit_json(changed=False, msg="Container to be cloned does not exist")
+ except Exception as e:
+ module.fail_json(msg="Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}".format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))
+
+ try:
+ proxmox.create_instance(vmid, node, disk, storage, cpus, memory, swap, timeout, clone)
+
+ module.exit_json(changed=True, msg="Cloned VM %s from %s" % (vmid, clone))
+ except Exception as e:
+ module.fail_json(msg="Cloning %s VM %s failed with exception: %s" % (VZ_TYPE, vmid, e))
+
+ elif state == 'started':
+ try:
+ vm = proxmox.get_vm(vmid)
+ if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if proxmox.start_instance(vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'stopped':
+ try:
+ vm = proxmox.get_vm(vmid)
+
+ if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if proxmox.umount_instance(vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'restarted':
+ try:
+ vm = proxmox.get_vm(vmid)
+
+ vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
+ if vm_status in ['stopped', 'mounted']:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if (proxmox.stop_instance(vm, vmid, timeout, force=module.params['force']) and
+ proxmox.start_instance(vm, vmid, timeout)):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'absent':
+ if not vmid:
+ module.exit_json(changed=False, msg='VM with hostname = %s is already absent' % hostname)
+ try:
+ vm = proxmox.get_vm(vmid, ignore_missing=True)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE)(vmid).status.current.get()['status']
+ if vm_status == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if vm_status == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ delete_params = {}
+
+ if module.params['purge']:
+ delete_params['purge'] = 1
+
+ taskid = getattr(proxmox.proxmox_api.nodes(vm['node']), VZ_TYPE).delete(vmid, **delete_params)
+
+ while timeout:
+ if proxmox.api_task_ok(vm['node'], taskid):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout -= 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_disk.py b/ansible_collections/community/general/plugins/modules/proxmox_disk.py
new file mode 100644
index 000000000..df6735cc0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_disk.py
@@ -0,0 +1,767 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2022, Castor Sky (@castorsky) <csky57@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_disk
+short_description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster
+version_added: 5.7.0
+description:
+ - Allows you to perform some supported operations on a disk in Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
+author: "Castor Sky (@castorsky) <csky57@gmail.com>"
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - The unique name of the VM.
+ - You can specify either I(name) or I(vmid) or both of them.
+ type: str
+ vmid:
+ description:
+ - The unique ID of the VM.
+ - You can specify either I(vmid) or I(name) or both of them.
+ type: int
+ disk:
+ description:
+ - The disk key (C(unused[n]), C(ide[n]), C(sata[n]), C(scsi[n]) or C(virtio[n])) you want to operate on.
+ - Disk buses (IDE, SATA and so on) have fixed ranges of C(n) that accepted by Proxmox API.
+ - >
+ For IDE: 0-3;
+ for SCSI: 0-30;
+ for SATA: 0-5;
+ for VirtIO: 0-15;
+ for Unused: 0-255.
+ type: str
+ required: true
+ state:
+ description:
+ - Indicates desired state of the disk.
+ - >
+ I(state=present) can be used to create, replace disk or update options in existing disk. It will create missing
+ disk or update options in existing one by default. See the I(create) parameter description to control behavior
+ of this option.
+ - Some updates on options (like I(cache)) are not being applied instantly and require VM restart.
+ - >
+ Use I(state=detached) to detach existing disk from VM but do not remove it entirely.
+ When I(state=detached) and disk is C(unused[n]) it will be left in same state (not removed).
+ - >
+ I(state=moved) may be used to change backing storage for the disk in bounds of the same VM
+ or to send the disk to another VM (using the same backing storage).
+ - >
+ I(state=resized) intended to change the disk size. As of Proxmox 7.2 you can only increase the disk size
+ because shrinking disks is not supported by the PVE API and has to be done manually.
+ - To entirely remove the disk from backing storage use I(state=absent).
+ type: str
+ choices: ['present', 'resized', 'detached', 'moved', 'absent']
+ default: present
+ create:
+ description:
+ - With I(create) flag you can control behavior of I(state=present).
+ - When I(create=disabled) it will not create new disk (if not exists) but will update options in existing disk.
+ - When I(create=regular) it will either create new disk (if not exists) or update options in existing disk.
+ - When I(create=forced) it will always create new disk (if disk exists it will be detached and left unused).
+ type: str
+ choices: ['disabled', 'regular', 'forced']
+ default: regular
+ storage:
+ description:
+ - The drive's backing storage.
+ - Used only when I(state) is C(present).
+ type: str
+ size:
+ description:
+ - Desired volume size in GB to allocate when I(state=present) (specify I(size) without suffix).
+ - >
+ New (or additional) size of volume when I(state=resized). With the C(+) sign
+ the value is added to the actual size of the volume
+ and without it, the value is taken as an absolute one.
+ type: str
+ bwlimit:
+ description:
+ - Override I/O bandwidth limit (in KB/s).
+ - Used only when I(state=moved).
+ type: int
+ delete_moved:
+ description:
+ - Delete the original disk after successful copy.
+ - By default the original disk is kept as unused disk.
+ - Used only when I(state=moved).
+ type: bool
+ target_disk:
+ description:
+ - The config key the disk will be moved to on the target VM (for example, C(ide0) or C(scsi1)).
+ - Default is the source disk key.
+ - Used only when I(state=moved).
+ type: str
+ target_storage:
+ description:
+ - Move the disk to this storage when I(state=moved).
+ - You can move between storages only in scope of one VM.
+ - Mutually exclusive with I(target_vmid).
+ - Consider increasing I(timeout) in case of large disk images or slow storage backend.
+ type: str
+ target_vmid:
+ description:
+ - The (unique) ID of the VM where disk will be placed when I(state=moved).
+ - You can move disk between VMs only when the same storage is used.
+ - Mutually exclusive with I(target_vmid).
+ type: int
+ timeout:
+ description:
+ - Timeout in seconds to wait for slow operations such as importing disk or moving disk between storages.
+ - Used only when I(state) is C(present) or C(moved).
+ type: int
+ default: 600
+ aio:
+ description:
+ - AIO type to use.
+ type: str
+ choices: ['native', 'threads', 'io_uring']
+ backup:
+ description:
+ - Whether the drive should be included when making backups.
+ type: bool
+ bps_max_length:
+ description:
+ - Maximum length of total r/w I/O bursts in seconds.
+ type: int
+ bps_rd_max_length:
+ description:
+ - Maximum length of read I/O bursts in seconds.
+ type: int
+ bps_wr_max_length:
+ description:
+ - Maximum length of write I/O bursts in seconds.
+ type: int
+ cache:
+ description:
+ - The drive's cache mode.
+ type: str
+ choices: ['none', 'writethrough', 'writeback', 'unsafe', 'directsync']
+ cyls:
+ description:
+ - Force the drive's physical geometry to have a specific cylinder count.
+ type: int
+ detect_zeroes:
+ description:
+ - Control whether to detect and try to optimize writes of zeroes.
+ type: bool
+ discard:
+ description:
+ - Control whether to pass discard/trim requests to the underlying storage.
+ type: str
+ choices: ['ignore', 'on']
+ format:
+ description:
+ - The drive's backing file's data format.
+ type: str
+ choices: ['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']
+ heads:
+ description:
+ - Force the drive's physical geometry to have a specific head count.
+ type: int
+ import_from:
+ description:
+ - Import volume from this existing one.
+ - Volume string format
+ - C(<STORAGE>:<VMID>/<FULL_NAME>) or C(<ABSOLUTE_PATH>/<FULL_NAME>)
+ - Attention! Only root can use absolute paths.
+ - This parameter is mutually exclusive with I(size).
+ - Increase I(timeout) parameter when importing large disk images or using slow storage.
+ type: str
+ iops:
+ description:
+ - Maximum total r/w I/O in operations per second.
+ - You can specify either total limit or per operation (mutually exclusive with I(iops_rd) and I(iops_wr)).
+ type: int
+ iops_max:
+ description:
+ - Maximum unthrottled total r/w I/O pool in operations per second.
+ type: int
+ iops_max_length:
+ description:
+ - Maximum length of total r/w I/O bursts in seconds.
+ type: int
+ iops_rd:
+ description:
+ - Maximum read I/O in operations per second.
+ - You can specify either read or total limit (mutually exclusive with I(iops)).
+ type: int
+ iops_rd_max:
+ description:
+ - Maximum unthrottled read I/O pool in operations per second.
+ type: int
+ iops_rd_max_length:
+ description:
+ - Maximum length of read I/O bursts in seconds.
+ type: int
+ iops_wr:
+ description:
+ - Maximum write I/O in operations per second.
+ - You can specify either write or total limit (mutually exclusive with I(iops)).
+ type: int
+ iops_wr_max:
+ description:
+ - Maximum unthrottled write I/O pool in operations per second.
+ type: int
+ iops_wr_max_length:
+ description:
+ - Maximum length of write I/O bursts in seconds.
+ type: int
+ iothread:
+ description:
+ - Whether to use iothreads for this drive (only for SCSI and VirtIO)
+ type: bool
+ mbps:
+ description:
+ - Maximum total r/w speed in megabytes per second.
+ - Can be fractional but use with caution - fractionals less than 1 are not supported officially.
+ - You can specify either total limit or per operation (mutually exclusive with I(mbps_rd) and I(mbps_wr)).
+ type: float
+ mbps_max:
+ description:
+ - Maximum unthrottled total r/w pool in megabytes per second.
+ type: float
+ mbps_rd:
+ description:
+ - Maximum read speed in megabytes per second.
+ - You can specify either read or total limit (mutually exclusive with I(mbps)).
+ type: float
+ mbps_rd_max:
+ description:
+ - Maximum unthrottled read pool in megabytes per second.
+ type: float
+ mbps_wr:
+ description:
+ - Maximum write speed in megabytes per second.
+ - You can specify either write or total limit (mutually exclusive with I(mbps)).
+ type: float
+ mbps_wr_max:
+ description:
+ - Maximum unthrottled write pool in megabytes per second.
+ type: float
+ media:
+ description:
+ - The drive's media type.
+ type: str
+ choices: ['cdrom', 'disk']
+ queues:
+ description:
+ - Number of queues (SCSI only).
+ type: int
+ replicate:
+ description:
+ - Whether the drive should considered for replication jobs.
+ type: bool
+ rerror:
+ description:
+ - Read error action.
+ type: str
+ choices: ['ignore', 'report', 'stop']
+ ro:
+ description:
+ - Whether the drive is read-only.
+ type: bool
+ scsiblock:
+ description:
+ - Whether to use scsi-block for full passthrough of host block device.
+ - Can lead to I/O errors in combination with low memory or high memory fragmentation on host.
+ type: bool
+ secs:
+ description:
+ - Force the drive's physical geometry to have a specific sector count.
+ type: int
+ serial:
+ description:
+ - The drive's reported serial number, url-encoded, up to 20 bytes long.
+ type: str
+ shared:
+ description:
+ - Mark this locally-managed volume as available on all nodes.
+ - This option does not share the volume automatically, it assumes it is shared already!
+ type: bool
+ snapshot:
+ description:
+ - Control qemu's snapshot mode feature.
+ - If activated, changes made to the disk are temporary and will be discarded when the VM is shutdown.
+ type: bool
+ ssd:
+ description:
+ - Whether to expose this drive as an SSD, rather than a rotational hard disk.
+ type: bool
+ trans:
+ description:
+ - Force disk geometry bios translation mode.
+ type: str
+ choices: ['auto', 'lba', 'none']
+ werror:
+ description:
+ - Write error action.
+ type: str
+ choices: ['enospc', 'ignore', 'report', 'stop']
+ wwn:
+ description:
+ - The drive's worldwide name, encoded as 16 bytes hex string, prefixed by C(0x).
+ type: str
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Create new disk in VM (do not rewrite in case it exists already)
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ name: vm-name
+ disk: scsi3
+ backup: true
+ cache: none
+ storage: local-zfs
+ size: 5
+ state: present
+
+- name: Create new disk in VM (force rewrite in case it exists already)
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ vmid: 101
+ disk: scsi3
+ format: qcow2
+ storage: local
+ size: 16
+ create: forced
+ state: present
+
+- name: Update existing disk
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ vmid: 101
+ disk: ide0
+ backup: false
+ ro: true
+ aio: native
+ state: present
+
+- name: Grow existing disk
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ vmid: 101
+ disk: sata4
+ size: +5G
+ state: resized
+
+- name: Detach disk (leave it unused)
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ name: vm-name
+ disk: virtio0
+ state: detached
+
+- name: Move disk to another storage
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_password: secret
+ vmid: 101
+ disk: scsi7
+ target_storage: local
+ format: qcow2
+ state: moved
+
+- name: Move disk from one VM to another
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_token_id: token1
+ api_token_secret: some-token-data
+ vmid: 101
+ disk: scsi7
+ target_vmid: 201
+ state: moved
+
+- name: Remove disk permanently
+ community.general.proxmox_disk:
+ api_host: node1
+ api_user: root@pam
+ api_password: secret
+ vmid: 101
+ disk: scsi4
+ state: absent
+'''
+
+RETURN = '''
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 101
+msg:
+ description: A short message on what the module did.
+ returned: always
+ type: str
+ sample: "Disk scsi3 created in VM 101"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec,
+ ProxmoxAnsible)
+from re import compile, match, sub
+from time import sleep
+
+
+def disk_conf_str_to_dict(config_string):
+ config = config_string.split(',')
+ storage_volume = config.pop(0).split(':')
+ config.sort()
+ storage_name = storage_volume[0]
+ volume_name = storage_volume[1]
+ config_current = dict(
+ volume='%s:%s' % (storage_name, volume_name),
+ storage_name=storage_name,
+ volume_name=volume_name
+ )
+
+ for option in config:
+ k, v = option.split('=')
+ config_current[k] = v
+
+ return config_current
+
+
+class ProxmoxDiskAnsible(ProxmoxAnsible):
+ create_update_fields = [
+ 'aio', 'backup', 'bps_max_length', 'bps_rd_max_length', 'bps_wr_max_length',
+ 'cache', 'cyls', 'detect_zeroes', 'discard', 'format', 'heads', 'import_from', 'iops', 'iops_max',
+ 'iops_max_length', 'iops_rd', 'iops_rd_max', 'iops_rd_max_length', 'iops_wr', 'iops_wr_max',
+ 'iops_wr_max_length', 'iothread', 'mbps', 'mbps_max', 'mbps_rd', 'mbps_rd_max', 'mbps_wr', 'mbps_wr_max',
+ 'media', 'queues', 'replicate', 'rerror', 'ro', 'scsiblock', 'secs', 'serial', 'shared', 'snapshot',
+ 'ssd', 'trans', 'werror', 'wwn'
+ ]
+ supported_bus_num_ranges = dict(
+ ide=range(0, 4),
+ scsi=range(0, 31),
+ sata=range(0, 6),
+ virtio=range(0, 16),
+ unused=range(0, 256)
+ )
+
+ def get_create_attributes(self):
+ # Sanitize parameters dictionary:
+ # - Remove not defined args
+ # - Ensure True and False converted to int.
+ # - Remove unnecessary parameters
+ params = dict((k, v) for k, v in self.module.params.items() if v is not None and k in self.create_update_fields)
+ params.update(dict((k, int(v)) for k, v in params.items() if isinstance(v, bool)))
+ return params
+
+ def wait_till_complete_or_timeout(self, node_name, task_id):
+ timeout = self.module.params['timeout']
+ while timeout:
+ if self.api_task_ok(node_name, task_id):
+ return True
+ timeout -= 1
+ if timeout <= 0:
+ return False
+ sleep(1)
+
+ def create_disk(self, disk, vmid, vm, vm_config):
+ create = self.module.params['create']
+ if create == 'disabled' and disk not in vm_config:
+ # NOOP
+ return False, "Disk %s not found in VM %s and creation was disabled in parameters." % (disk, vmid)
+
+ if (create == 'regular' and disk not in vm_config) or (create == 'forced'):
+ # CREATE
+ attributes = self.get_create_attributes()
+ import_string = attributes.pop('import_from', None)
+
+ if import_string:
+ config_str = "%s:%s,import-from=%s" % (self.module.params["storage"], "0", import_string)
+ timeout_str = "Reached timeout while importing VM disk. Last line in task before timeout: %s"
+ ok_str = "Disk %s imported into VM %s"
+ else:
+ config_str = "%s:%s" % (self.module.params["storage"], self.module.params["size"])
+ ok_str = "Disk %s created in VM %s"
+ timeout_str = "Reached timeout while creating VM disk. Last line in task before timeout: %s"
+
+ for k, v in attributes.items():
+ config_str += ',%s=%s' % (k, v)
+
+ disk_config_to_apply = {self.module.params["disk"]: config_str}
+
+ if create in ['disabled', 'regular'] and disk in vm_config:
+ # UPDATE
+ disk_config = disk_conf_str_to_dict(vm_config[disk])
+ config_str = disk_config["volume"]
+ ok_str = "Disk %s updated in VM %s"
+ attributes = self.get_create_attributes()
+ # 'import_from' fails on disk updates
+ attributes.pop('import_from', None)
+
+ for k, v in attributes.items():
+ config_str += ',%s=%s' % (k, v)
+
+ # Now compare old and new config to detect if changes are needed
+ for option in ['size', 'storage_name', 'volume', 'volume_name']:
+ attributes.update({option: disk_config[option]})
+ # Values in params are numbers, but strings are needed to compare with disk_config
+ attributes = dict((k, str(v)) for k, v in attributes.items())
+ if disk_config == attributes:
+ return False, "Disk %s is up to date in VM %s" % (disk, vmid)
+
+ disk_config_to_apply = {self.module.params["disk"]: config_str}
+
+ current_task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.post(**disk_config_to_apply)
+ task_success = self.wait_till_complete_or_timeout(vm['node'], current_task_id)
+ if task_success:
+ return True, ok_str % (disk, vmid)
+ else:
+ self.module.fail_json(
+ msg=timeout_str % self.proxmox_api.nodes(vm['node']).tasks(current_task_id).log.get()[:1]
+ )
+
+ def move_disk(self, disk, vmid, vm, vm_config):
+ params = dict()
+ params['disk'] = disk
+ params['vmid'] = vmid
+ params['bwlimit'] = self.module.params['bwlimit']
+ params['storage'] = self.module.params['target_storage']
+ params['target-disk'] = self.module.params['target_disk']
+ params['target-vmid'] = self.module.params['target_vmid']
+ params['format'] = self.module.params['format']
+ params['delete'] = 1 if self.module.params.get('delete_moved', False) else 0
+ # Remove not defined args
+ params = dict((k, v) for k, v in params.items() if v is not None)
+
+ if params.get('storage', False):
+ disk_config = disk_conf_str_to_dict(vm_config[disk])
+ if params['storage'] == disk_config['storage_name']:
+ return False
+
+ task_id = self.proxmox_api.nodes(vm['node']).qemu(vmid).move_disk.post(**params)
+ task_success = self.wait_till_complete_or_timeout(vm['node'], task_id)
+ if task_success:
+ return True
+ else:
+ self.module.fail_json(
+ msg='Reached timeout while waiting for moving VM disk. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(task_id).log.get()[:1]
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ disk_args = dict(
+ # Proxmox native parameters
+ aio=dict(type='str', choices=['native', 'threads', 'io_uring']),
+ backup=dict(type='bool'),
+ bps_max_length=dict(type='int'),
+ bps_rd_max_length=dict(type='int'),
+ bps_wr_max_length=dict(type='int'),
+ cache=dict(type='str', choices=['none', 'writethrough', 'writeback', 'unsafe', 'directsync']),
+ cyls=dict(type='int'),
+ detect_zeroes=dict(type='bool'),
+ discard=dict(type='str', choices=['ignore', 'on']),
+ format=dict(type='str', choices=['raw', 'cow', 'qcow', 'qed', 'qcow2', 'vmdk', 'cloop']),
+ heads=dict(type='int'),
+ import_from=dict(type='str'),
+ iops=dict(type='int'),
+ iops_max=dict(type='int'),
+ iops_max_length=dict(type='int'),
+ iops_rd=dict(type='int'),
+ iops_rd_max=dict(type='int'),
+ iops_rd_max_length=dict(type='int'),
+ iops_wr=dict(type='int'),
+ iops_wr_max=dict(type='int'),
+ iops_wr_max_length=dict(type='int'),
+ iothread=dict(type='bool'),
+ mbps=dict(type='float'),
+ mbps_max=dict(type='float'),
+ mbps_rd=dict(type='float'),
+ mbps_rd_max=dict(type='float'),
+ mbps_wr=dict(type='float'),
+ mbps_wr_max=dict(type='float'),
+ media=dict(type='str', choices=['cdrom', 'disk']),
+ queues=dict(type='int'),
+ replicate=dict(type='bool'),
+ rerror=dict(type='str', choices=['ignore', 'report', 'stop']),
+ ro=dict(type='bool'),
+ scsiblock=dict(type='bool'),
+ secs=dict(type='int'),
+ serial=dict(type='str'),
+ shared=dict(type='bool'),
+ snapshot=dict(type='bool'),
+ ssd=dict(type='bool'),
+ trans=dict(type='str', choices=['auto', 'lba', 'none']),
+ werror=dict(type='str', choices=['enospc', 'ignore', 'report', 'stop']),
+ wwn=dict(type='str'),
+
+ # Disk moving relates parameters
+ bwlimit=dict(type='int'),
+ target_storage=dict(type='str'),
+ target_disk=dict(type='str'),
+ target_vmid=dict(type='int'),
+ delete_moved=dict(type='bool'),
+ timeout=dict(type='int', default='600'),
+
+ # Module related parameters
+ name=dict(type='str'),
+ vmid=dict(type='int'),
+ disk=dict(type='str', required=True),
+ storage=dict(type='str'),
+ size=dict(type='str'),
+ state=dict(type='str', choices=['present', 'resized', 'detached', 'moved', 'absent'],
+ default='present'),
+ create=dict(type='str', choices=['disabled', 'regular', 'forced'], default='regular'),
+ )
+
+ module_args.update(disk_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[('api_token_id', 'api_token_secret')],
+ required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
+ required_if=[
+ ('create', 'forced', ['storage']),
+ ('state', 'resized', ['size']),
+ ],
+ required_by={
+ 'target_disk': 'target_vmid',
+ 'mbps_max': 'mbps',
+ 'mbps_rd_max': 'mbps_rd',
+ 'mbps_wr_max': 'mbps_wr',
+ 'bps_max_length': 'mbps_max',
+ 'bps_rd_max_length': 'mbps_rd_max',
+ 'bps_wr_max_length': 'mbps_wr_max',
+ 'iops_max': 'iops',
+ 'iops_rd_max': 'iops_rd',
+ 'iops_wr_max': 'iops_wr',
+ 'iops_max_length': 'iops_max',
+ 'iops_rd_max_length': 'iops_rd_max',
+ 'iops_wr_max_length': 'iops_wr_max',
+ },
+ supports_check_mode=False,
+ mutually_exclusive=[
+ ('target_vmid', 'target_storage'),
+ ('mbps', 'mbps_rd'),
+ ('mbps', 'mbps_wr'),
+ ('iops', 'iops_rd'),
+ ('iops', 'iops_wr'),
+ ('import_from', 'size'),
+ ]
+ )
+
+ proxmox = ProxmoxDiskAnsible(module)
+
+ disk = module.params['disk']
+ # Verify disk name has appropriate name
+ disk_regex = compile(r'^([a-z]+)([0-9]+)$')
+ disk_bus = sub(disk_regex, r'\1', disk)
+ disk_number = int(sub(disk_regex, r'\2', disk))
+ if disk_bus not in proxmox.supported_bus_num_ranges:
+ proxmox.module.fail_json(msg='Unsupported disk bus: %s' % disk_bus)
+ elif disk_number not in proxmox.supported_bus_num_ranges[disk_bus]:
+ bus_range = proxmox.supported_bus_num_ranges[disk_bus]
+ proxmox.module.fail_json(msg='Disk %s number not in range %s..%s ' % (disk, bus_range[0], bus_range[-1]))
+
+ name = module.params['name']
+ state = module.params['state']
+ vmid = module.params['vmid'] or proxmox.get_vmid(name)
+
+ # Ensure VM id exists and retrieve its config
+ vm = None
+ vm_config = None
+ try:
+ vm = proxmox.get_vm(vmid)
+ vm_config = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
+ except Exception as e:
+ proxmox.module.fail_json(msg='Getting information for VM %s failed with exception: %s' % (vmid, str(e)))
+
+ # Do not try to perform actions on missing disk
+ if disk not in vm_config and state in ['resized', 'moved']:
+ module.fail_json(vmid=vmid, msg='Unable to process missing disk %s in VM %s' % (disk, vmid))
+
+ if state == 'present':
+ try:
+ success, message = proxmox.create_disk(disk, vmid, vm, vm_config)
+ if success:
+ module.exit_json(changed=True, vmid=vmid, msg=message)
+ else:
+ module.exit_json(changed=False, vmid=vmid, msg=message)
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to create/update disk %s in VM %s: %s' % (disk, vmid, str(e)))
+
+ elif state == 'detached':
+ try:
+ if disk_bus == 'unused':
+ module.exit_json(changed=False, vmid=vmid, msg='Disk %s already detached in VM %s' % (disk, vmid))
+ if disk not in vm_config:
+ module.exit_json(changed=False, vmid=vmid, msg="Disk %s not present in VM %s config" % (disk, vmid))
+ proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=0)
+ module.exit_json(changed=True, vmid=vmid, msg="Disk %s detached from VM %s" % (disk, vmid))
+ except Exception as e:
+ module.fail_json(msg="Failed to detach disk %s from VM %s with exception: %s" % (disk, vmid, str(e)))
+
+ elif state == 'moved':
+ try:
+ disk_config = disk_conf_str_to_dict(vm_config[disk])
+ disk_storage = disk_config["storage_name"]
+ if proxmox.move_disk(disk, vmid, vm, vm_config):
+ module.exit_json(changed=True, vmid=vmid,
+ msg="Disk %s moved from VM %s storage %s" % (disk, vmid, disk_storage))
+ else:
+ module.exit_json(changed=False, vmid=vmid, msg="Disk %s already at %s storage" % (disk, disk_storage))
+ except Exception as e:
+ module.fail_json(msg="Failed to move disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
+
+ elif state == 'resized':
+ try:
+ size = module.params['size']
+ if not match(r'^\+?\d+(\.\d+)?[KMGT]?$', size):
+ module.fail_json(msg="Unrecognized size pattern for disk %s: %s" % (disk, size))
+ disk_config = disk_conf_str_to_dict(vm_config[disk])
+ actual_size = disk_config['size']
+ if size == actual_size:
+ module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already %s size" % (disk, size))
+ proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).resize.set(disk=disk, size=size)
+ module.exit_json(changed=True, vmid=vmid, msg="Disk %s resized in VM %s" % (disk, vmid))
+ except Exception as e:
+ module.fail_json(msg="Failed to resize disk %s in VM %s with exception: %s" % (disk, vmid, str(e)))
+
+ elif state == 'absent':
+ try:
+ if disk not in vm_config:
+ module.exit_json(changed=False, vmid=vmid, msg="Disk %s is already absent in VM %s" % (disk, vmid))
+ proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).unlink.put(idlist=disk, force=1)
+ module.exit_json(changed=True, vmid=vmid, msg="Disk %s removed from VM %s" % (disk, vmid))
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to remove disk %s from VM %s: %s' % (disk, vmid, str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
new file mode 100644
index 000000000..7435695a9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_domain_info.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Tristan Le Guern (@tleguern) <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_domain_info
+short_description: Retrieve information about one or more Proxmox VE domains
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE domains.
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm', 'name']
+ type: str
+author: Tristan Le Guern (@tleguern)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+
+EXAMPLES = '''
+- name: List existing domains
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_domains
+
+- name: Retrieve information about the pve domain
+ community.general.proxmox_domain_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_domain_pve
+'''
+
+
+RETURN = '''
+proxmox_domains:
+ description: List of authentication domains.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the realm.
+ returned: on success
+ type: str
+ realm:
+ description: Realm name.
+ returned: on success
+ type: str
+ type:
+ description: Realm type.
+ returned: on success
+ type: str
+ digest:
+ description: Realm hash.
+ returned: on success, can be absent
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxDomainInfoAnsible(ProxmoxAnsible):
+ def get_domain(self, realm):
+ try:
+ domain = self.proxmox_api.access.domains.get(realm)
+ except Exception:
+ self.module.fail_json(msg="Domain '%s' does not exist" % realm)
+ domain['realm'] = realm
+ return domain
+
+ def get_domains(self):
+ domains = self.proxmox_api.access.domains.get()
+ return domains
+
+
+def proxmox_domain_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ domain_info_args = proxmox_domain_info_argument_spec()
+ module_args.update(domain_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ proxmox = ProxmoxDomainInfoAnsible(module)
+ domain = module.params['domain']
+
+ if domain:
+ domains = [proxmox.get_domain(realm=domain)]
+ else:
+ domains = proxmox.get_domains()
+ result['proxmox_domains'] = domains
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_group_info.py b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
new file mode 100644
index 000000000..531a9dae7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_group_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_group_info
+short_description: Retrieve information about one or more Proxmox VE groups
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE groups
+options:
+ group:
+ description:
+ - Restrict results to a specific group.
+ aliases: ['groupid', 'name']
+ type: str
+author: Tristan Le Guern (@tleguern)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+
+EXAMPLES = '''
+- name: List existing groups
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_groups
+
+- name: Retrieve information about the admin group
+ community.general.proxmox_group_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ group: admin
+ register: proxmox_group_admin
+'''
+
+
+RETURN = '''
+proxmox_groups:
+ description: List of groups.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the group.
+ returned: on success, can be absent
+ type: str
+ groupid:
+ description: Group name.
+ returned: on success
+ type: str
+ users:
+ description: List of users in the group.
+ returned: on success
+ type: list
+ elements: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxGroupInfoAnsible(ProxmoxAnsible):
+ def get_group(self, groupid):
+ try:
+ group = self.proxmox_api.access.groups.get(groupid)
+ except Exception:
+ self.module.fail_json(msg="Group '%s' does not exist" % groupid)
+ group['groupid'] = groupid
+ return ProxmoxGroup(group)
+
+ def get_groups(self):
+ groups = self.proxmox_api.access.groups.get()
+ return [ProxmoxGroup(group) for group in groups]
+
+
+class ProxmoxGroup:
+ def __init__(self, group):
+ self.group = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in group.items():
+ if k == 'users' and isinstance(v, str):
+ self.group['users'] = v.split(',')
+ elif k == 'members':
+ self.group['users'] = group['members']
+ else:
+ self.group[k] = v
+
+
+def proxmox_group_info_argument_spec():
+ return dict(
+ group=dict(type='str', aliases=['groupid', 'name']),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ group_info_args = proxmox_group_info_argument_spec()
+ module_args.update(group_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ proxmox = ProxmoxGroupInfoAnsible(module)
+ group = module.params['group']
+
+ if group:
+ groups = [proxmox.get_group(groupid=group)]
+ else:
+ groups = proxmox.get_groups()
+ result['proxmox_groups'] = [group.group for group in groups]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_kvm.py b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
new file mode 100644
index 000000000..1dba5f4ea
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_kvm.py
@@ -0,0 +1,1433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016, Abdoul Bah (@helldorado) <bahabdoul at gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_kvm
+short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster
+description:
+ - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+ - Since community.general 4.0.0 on, there are no more default values, see I(proxmox_default_behavior).
+author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ archive:
+ description:
+ - Specify a path to an archive to restore (instead of creating or cloning a VM).
+ type: str
+ version_added: 6.5.0
+ acpi:
+ description:
+ - Specify if ACPI should be enabled/disabled.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true).
+ type: bool
+ agent:
+ description:
+ - Specify if the QEMU Guest Agent should be enabled/disabled.
+ - Since community.general 5.5.0, this can also be a string instead of a boolean.
+ This allows to specify values such as C(enabled=1,fstrim_cloned_disks=1).
+ type: str
+ args:
+ description:
+ - Pass arbitrary arguments to kvm.
+ - This option is for experts only!
+ - If I(proxmox_default_behavior) is set to C(compatiblity), this option has a default of
+ C(-serial unix:/var/run/qemu-server/<vmid>.serial,server,nowait).
+ type: str
+ autostart:
+ description:
+ - Specify if the VM should be automatically restarted after crash (currently ignored in PVE API).
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ type: bool
+ balloon:
+ description:
+ - Specify the amount of RAM for the VM in MB.
+ - Using zero disables the balloon driver.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(0).
+ type: int
+ bios:
+ description:
+ - Specify the BIOS implementation.
+ type: str
+ choices: ['seabios', 'ovmf']
+ boot:
+ description:
+ - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - You can combine to set order.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(cnd).
+ type: str
+ bootdisk:
+ description:
+ - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ type: str
+ cicustom:
+ description:
+ - 'cloud-init: Specify custom files to replace the automatically generated ones at start.'
+ type: str
+ version_added: 1.3.0
+ cipassword:
+ description:
+ - 'cloud-init: password of default user to create.'
+ type: str
+ version_added: 1.3.0
+ citype:
+ description:
+ - 'cloud-init: Specifies the cloud-init configuration format.'
+ - The default depends on the configured operating system type (C(ostype)).
+ - We use the C(nocloud) format for Linux, and C(configdrive2) for Windows.
+ type: str
+ choices: ['nocloud', 'configdrive2']
+ version_added: 1.3.0
+ ciuser:
+ description:
+ - 'cloud-init: username of default user to create.'
+ type: str
+ version_added: 1.3.0
+ clone:
+ description:
+ - Name of VM to be cloned. If I(vmid) is set, I(clone) can take an arbitrary value but is required for initiating the clone.
+ type: str
+ cores:
+ description:
+ - Specify number of cores per socket.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ type: int
+ cpu:
+ description:
+ - Specify emulated CPU type.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(kvm64).
+ type: str
+ cpulimit:
+ description:
+ - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
+ - If the computer has 2 CPUs, it has total of '2' CPU time
+ type: int
+ cpuunits:
+ description:
+ - Specify CPU weight for a VM.
+ - You can disable fair-scheduler configuration by setting this to 0
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1000).
+ type: int
+ delete:
+ description:
+ - Specify a list of settings you want to delete.
+ type: str
+ description:
+ description:
+ - Specify the description for the VM. Only used on the configuration web interface.
+ - This is saved as comment inside the configuration file.
+ type: str
+ digest:
+ description:
+ - Specify if to prevent changes if current configuration file has different SHA1 digest.
+ - This can be used to prevent concurrent modifications.
+ type: str
+ efidisk0:
+ description:
+ - Specify a hash/dictionary of EFI disk options.
+ - Requires I(bios=ovmf) to be set to be able to use it.
+ type: dict
+ suboptions:
+ storage:
+ description:
+ - C(storage) is the storage identifier where to create the disk.
+ type: str
+ format:
+ description:
+ - C(format) is the drive's backing file's data format. Please refer to the Proxmox VE Administrator Guide,
+ section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest
+ version, tables 3 to 14) to find out format supported by the provided storage backend.
+ type: str
+ efitype:
+ description:
+ - C(efitype) indicates the size of the EFI disk.
+ - C(2m) will allow for a 2MB EFI disk, which will be enough to persist boot order and new boot entries.
+ - C(4m) will allow for a 4MB EFI disk, which will additionally allow to store EFI keys in order to enable
+ Secure Boot
+ type: str
+ choices:
+ - 2m
+ - 4m
+ pre_enrolled_keys:
+ description:
+ - C(pre_enrolled_keys) indicates whether EFI keys for Secure Boot should be enrolled C(1) in the VM firmware
+ upon creation or not (0).
+ - If set to C(1), Secure Boot will also be enabled by default when the VM is created.
+ type: bool
+ version_added: 4.5.0
+ force:
+ description:
+ - Allow to force stop VM.
+ - Can be used with states C(stopped), C(restarted) and C(absent).
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ type: bool
+ format:
+ description:
+ - Target drive's backing file's data format.
+ - Used only with clone
+ - Use I(format=unspecified) and I(full=false) for a linked clone.
+ - Please refer to the Proxmox VE Administrator Guide, section Proxmox VE Storage (see
+ U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for the latest version, tables 3 to 14) to find out format
+ supported by the provided storage backend.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(qcow2).
+ If I(proxmox_default_behavior) is set to C(no_defaults), not specifying this option is equivalent to setting it to C(unspecified).
+ type: str
+ choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk", "unspecified" ]
+ freeze:
+ description:
+ - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
+ type: bool
+ full:
+ description:
+ - Create a full copy of all disk. This is always done when you clone a normal VM.
+ - For VM templates, we try to create a linked clone by default.
+ - Used only with clone
+ type: bool
+ default: true
+ hostpci:
+ description:
+ - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
+ - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
+ - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) I(default=1) Specify whether or not the device's ROM will be visible in the guest's memory map.
+ - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ hotplug:
+ description:
+ - Selectively enable hotplug features.
+ - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
+ - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ type: str
+ hugepages:
+ description:
+ - Enable/disable hugepages memory.
+ type: str
+ choices: ['any', '2', '1024']
+ ide:
+ description:
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
+ Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
+ the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
+ type: dict
+ ipconfig:
+ description:
+ - 'cloud-init: Set the IP configuration.'
+ - A hash/dictionary of network ip configurations. C(ipconfig='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ipconfig[n]) where 0 ≤ n ≤ network interfaces.
+ - Values allowed are - C("[gw=<GatewayIPv4>] [,gw6=<GatewayIPv6>] [,ip=<IPv4Format/CIDR>] [,ip6=<IPv6Format/CIDR>]").
+ - 'cloud-init: Specify IP addresses and gateways for the corresponding interface.'
+ - IP addresses use CIDR notation, gateways are optional but they should be in the same subnet of specified IP address.
+ - The special string 'dhcp' can be used for IP addresses to use DHCP, in which case no explicit gateway should be provided.
+ - For IPv6 the special string 'auto' can be used to use stateless autoconfiguration.
+ - If cloud-init is enabled and neither an IPv4 nor an IPv6 address is specified, it defaults to using dhcp on IPv4.
+ type: dict
+ version_added: 1.3.0
+ keyboard:
+ description:
+ - Sets the keyboard layout for VNC server.
+ type: str
+ kvm:
+ description:
+ - Enable/disable KVM hardware virtualization.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true).
+ type: bool
+ localtime:
+ description:
+ - Sets the real time clock to local time.
+ - This is enabled by default if ostype indicates a Microsoft OS.
+ type: bool
+ lock:
+ description:
+ - Lock/unlock the VM.
+ type: str
+ choices: ['migrate', 'backup', 'snapshot', 'rollback']
+ machine:
+ description:
+ - Specifies the Qemu machine type.
+ - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ type: str
+ memory:
+ description:
+ - Memory size in MB for instance.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(512).
+ type: int
+ migrate_downtime:
+ description:
+ - Sets maximum tolerated downtime (in seconds) for migrations.
+ type: int
+ migrate_speed:
+ description:
+ - Sets maximum speed (in MB/s) for migrations.
+ - A value of 0 is no limit.
+ type: int
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for C(state=present).
+ type: str
+ nameservers:
+ description:
+ - 'cloud-init: DNS server IP address(es).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ net:
+ description:
+ - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",bridge="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
+ - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
+ - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
+ - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
+ - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
+ - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
+ type: dict
+ newid:
+ description:
+ - VMID for the clone. Used only with clone.
+ - If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ type: int
+ numa:
+ description:
+ - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
+ - C(cpus) CPUs accessing this NUMA node.
+ - C(hostnodes) Host NUMA nodes to use.
+ - C(memory) Amount of memory this NUMA node provides.
+ - C(policy) NUMA allocation policy.
+ type: dict
+ numa_enabled:
+ description:
+ - Enables NUMA.
+ type: bool
+ onboot:
+ description:
+ - Specifies whether a VM will be started during system bootup.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(true).
+ type: bool
+ ostype:
+ description:
+ - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
+ - The l26 is Linux 2.6/3.X Kernel.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(l26).
+ type: str
+ choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']
+ parallel:
+ description:
+ - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
+ - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
+ type: dict
+ protection:
+ description:
+ - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
+ type: bool
+ reboot:
+ description:
+ - Allow reboot. If set to C(true), the VM exit on reboot.
+ type: bool
+ revert:
+ description:
+ - Revert a pending change.
+ type: str
+ sata:
+ description:
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
+ Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
+ the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
+ type: dict
+ scsi:
+ description:
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
+ Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html) for
+ the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
+ type: dict
+ scsihw:
+ description:
+ - Specifies the SCSI controller model.
+ type: str
+ choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
+ searchdomains:
+ description:
+ - 'cloud-init: Sets DNS search domain(s).'
+ - If unset, PVE host settings are used.
+ type: list
+ elements: str
+ version_added: 1.3.0
+ serial:
+ description:
+ - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
+ - Values allowed are - C((/dev/.+|socket)).
+ - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
+ type: dict
+ shares:
+ description:
+ - Rets amount of memory shares for auto-ballooning. (0 - 50000).
+ - The larger the number is, the more memory this VM gets.
+ - The number is relative to weights of all other running VMs.
+ - Using 0 disables auto-ballooning, this means no limit.
+ type: int
+ skiplock:
+ description:
+ - Ignore locks
+ - Only root is allowed to use this option.
+ type: bool
+ smbios:
+ description:
+ - Specifies SMBIOS type 1 fields.
+ type: str
+ snapname:
+ description:
+ - The name of the snapshot. Used only with clone.
+ type: str
+ sockets:
+ description:
+ - Sets the number of CPU sockets. (1 - N).
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(1).
+ type: int
+ sshkeys:
+ description:
+ - 'cloud-init: SSH key to assign to the default user. NOT TESTED with multiple keys but a multi-line value should work.'
+ type: str
+ version_added: 1.3.0
+ startdate:
+ description:
+ - Sets the initial date of the real time clock.
+ - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ type: str
+ startup:
+ description:
+ - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Order is a non-negative number defining the general startup order.
+ - Shutdown in done with reverse ordering.
+ type: str
+ state:
+ description:
+ - Indicates desired state of the instance.
+ - If C(current), the current state of the VM will be fetched. You can access it with C(results.status)
+ type: str
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ default: present
+ storage:
+ description:
+ - Target storage for full clone.
+ type: str
+ tablet:
+ description:
+ - Enables/disables the USB tablet device.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ type: bool
+ tags:
+ description:
+ - List of tags to apply to the VM instance.
+ - Tags must start with C([a-z0-9_]) followed by zero or more of the following characters C([a-z0-9_-+.]).
+ - Tags are only available in Proxmox 6+.
+ type: list
+ elements: str
+ version_added: 2.3.0
+ target:
+ description:
+ - Target node. Only allowed if the original VM is on shared storage.
+ - Used only with clone
+ type: str
+ tdf:
+ description:
+ - Enables/disables time drift fix.
+ type: bool
+ template:
+ description:
+ - Enables/disables the template.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(false).
+ type: bool
+ timeout:
+ description:
+ - Timeout for operations.
+ type: int
+ default: 30
+ update:
+ description:
+ - If C(true), the VM will be updated with new value.
+ - Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
+ - C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
+ - Update of C(pool) is disabled. It needs an additional API endpoint not covered by this module.
+ type: bool
+ default: false
+ vcpus:
+ description:
+ - Sets number of hotplugged vcpus.
+ type: int
+ vga:
+ description:
+ - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
+ - This option has no default unless I(proxmox_default_behavior) is set to C(compatiblity); then the default is C(std).
+ type: str
+ choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
+ virtio:
+ description:
+ - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive's backing file's data format. C(qcow2|raw|subvol). Please refer to the Proxmox VE
+ Administrator Guide, section Proxmox VE Storage (see U(https://pve.proxmox.com/pve-docs/chapter-pvesm.html)
+ for the latest version, tables 3 to 14) to find out format supported by the provided storage backend.
+ type: dict
+ watchdog:
+ description:
+ - Creates a virtual hardware watchdog device.
+ type: str
+ proxmox_default_behavior:
+ description:
+ - As of community.general 4.0.0, various options no longer have default values.
+ These default values caused problems when users expected different behavior from Proxmox
+ by default or filled options which caused problems when set.
+ - The value C(compatibility) (default before community.general 4.0.0) will ensure that the default values
+ are used when the values are not explicitly specified by the user. The new default is C(no_defaults),
+ which makes sure these options have no defaults.
+ - This affects the I(acpi), I(autostart), I(balloon), I(boot), I(cores), I(cpu),
+ I(cpuunits), I(force), I(format), I(kvm), I(memory), I(onboot), I(ostype), I(sockets),
+ I(tablet), I(template), I(vga), options.
+ type: str
+ default: no_defaults
+ choices:
+ - compatibility
+ - no_defaults
+ version_added: "1.3.0"
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.proxmox.selection
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Create new VM with minimal options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+
+- name: Create a VM from archive (backup)
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ archive: backup-storage:backup/vm/140/2023-03-08T06:41:23Z
+ name: spynal
+
+- name: Create new VM with minimal options and given vmid
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ vmid: 100
+
+- name: Create new VM with two network interface options
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ net1: 'e1000,bridge=vmbr2'
+
+- name: Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ net:
+ net0: 'virtio,bridge=vmbr1,rate=200'
+ virtio:
+ virtio0: 'VMs_LVM:10'
+ virtio1: 'VMs:2,format=qcow2'
+ virtio2: 'VMs:5,format=raw'
+ cores: 4
+ vcpus: 2
+
+- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot disabled by default
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ sata:
+ sata0: 'VMs_LVM:10,format=raw'
+ bios: ovmf
+ efidisk0:
+ storage: VMs_LVM_thin
+ format: raw
+ efitype: 4m
+ pre_enrolled_keys: false
+
+- name: Create VM with 1 10GB SATA disk and an EFI disk, with Secure Boot enabled by default
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ sata:
+ sata0: 'VMs_LVM:10,format=raw'
+ bios: ovmf
+ efidisk0:
+ storage: VMs_LVM
+ format: raw
+ efitype: 4m
+ pre_enrolled_keys: 1
+
+- name: >
+ Clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ format: qcow2
+ timeout: 500
+
+- name: >
+ Create linked clone VM with only source VM name.
+ The VM source is spynal.
+ The target VM name is zavala
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: spynal
+ name: zavala
+ node: sabrewulf
+ storage: VMs
+ full: false
+ format: unspecified
+ timeout: 500
+
+- name: Clone VM with source vmid and target newid and raw format
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ clone: arbitrary_name
+ vmid: 108
+ newid: 152
+ name: zavala
+ node: sabrewulf
+ storage: LVM_STO
+ format: raw
+ timeout: 300
+
+- name: Create new VM and lock it for snapshot
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ lock: snapshot
+
+- name: Create new VM and set protection to disable the remove VM and remove disk operations
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ protection: true
+
+- name: Create new VM using cloud-init with a username and password
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ ciuser: mylinuxuser
+ cipassword: supersecret
+ searchdomains: 'mydomain.internal'
+ nameservers: 1.1.1.1
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24,gw=192.168.1.1'
+
+- name: Create new VM using Cloud-Init with an ssh key
+ community.general.proxmox_kvm:
+ node: sabrewulf
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ ide:
+ ide2: 'local:cloudinit,format=qcow2'
+ sshkeys: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILJkVm98B71lD5XHfihwcYHE9TVpsJmK1vR1JcaU82L+'
+ searchdomains: 'mydomain.internal'
+ nameservers:
+ - '1.1.1.1'
+ - '8.8.8.8'
+ net:
+ net0: 'virtio,bridge=vmbr1,tag=77'
+ ipconfig:
+ ipconfig0: 'ip=192.168.1.1/24'
+
+- name: Start VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: started
+
+- name: Stop VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+
+- name: Stop VM with force
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: stopped
+ force: true
+
+- name: Restart VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: restarted
+
+- name: Remove VM
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ state: absent
+
+- name: Update VM configuration
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ cores: 8
+ memory: 16384
+ update: true
+
+- name: Delete QEMU parameters
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ delete: 'args,template,cpulimit'
+
+- name: Revert a pending change
+ community.general.proxmox_kvm:
+ api_user: root@pam
+ api_password: secret
+ api_host: helldorado
+ name: spynal
+ node: sabrewulf
+ revert: 'template,cpulimit'
+'''
+
+RETURN = '''
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+status:
+ description: The current virtual machine status.
+ returned: success, not clone, not absent, not update
+ type: str
+ sample: running
+msg:
+ description: A short message
+ returned: always
+ type: str
+ sample: "VM kropta with vmid = 110 is running"
+'''
+
+import re
+import time
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.parsing.convert_bool import boolean
+
+
+def parse_mac(netstr):
+ return re.search('=(.*?),', netstr).group(1)
+
+
+def parse_dev(devstr):
+ return re.search('(.*?)(,|$)', devstr).group(1)
+
+
+class ProxmoxKvmAnsible(ProxmoxAnsible):
+ def get_vminfo(self, node, vmid, **kwargs):
+ global results
+ results = {}
+ mac = {}
+ devices = {}
+ try:
+ vm = self.proxmox_api.nodes(node).qemu(vmid).config.get()
+ except Exception as e:
+ self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ # Convert all dict in kwargs to elements.
+ # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Split information by type
+ re_net = re.compile(r'net[0-9]')
+ re_dev = re.compile(r'(virtio|ide|scsi|sata|efidisk)[0-9]')
+ for k in kwargs.keys():
+ if re_net.match(k):
+ mac[k] = parse_mac(vm[k])
+ elif re_dev.match(k):
+ devices[k] = parse_dev(vm[k])
+
+ results['mac'] = mac
+ results['devices'] = devices
+ results['vmid'] = int(vmid)
+
+ def settings(self, vmid, node, **kwargs):
+ proxmox_node = self.proxmox_api.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+
+ return proxmox_node.qemu(vmid).config.set(**kwargs) is None
+
+ def wait_for_task(self, node, taskid):
+ timeout = self.module.params['timeout']
+
+ while timeout:
+ if self.api_task_ok(node, taskid):
+ # Wait an extra second as the API can be a ahead of the hypervisor
+ time.sleep(1)
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ break
+ time.sleep(1)
+ return False
+
+ def create_vm(self, vmid, newid, node, name, memory, cpu, cores, sockets, update, **kwargs):
+ # Available only in PVE 4
+ only_v4 = ['force', 'protection', 'skiplock']
+ only_v6 = ['ciuser', 'cipassword', 'sshkeys', 'ipconfig', 'tags']
+
+ # valide clone parameters
+ valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
+ clone_params = {}
+ # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
+ vm_args = "-serial unix:/var/run/qemu-server/{0}.serial,server,nowait".format(vmid)
+
+ proxmox_node = self.proxmox_api.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
+ kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
+
+ version = self.version()
+ pve_major_version = 3 if version < LooseVersion('4.0') else version.version[0]
+
+ # The features work only on PVE 4+
+ if pve_major_version < 4:
+ for p in only_v4:
+ if p in kwargs:
+ del kwargs[p]
+
+ # The features work only on PVE 6
+ if pve_major_version < 6:
+ for p in only_v6:
+ if p in kwargs:
+ del kwargs[p]
+
+ # 'sshkeys' param expects an urlencoded string
+ if 'sshkeys' in kwargs:
+ urlencoded_ssh_keys = quote(kwargs['sshkeys'], safe='')
+ kwargs['sshkeys'] = str(urlencoded_ssh_keys)
+
+ # If update, don't update disk (virtio, efidisk0, ide, sata, scsi) and network interface
+ # pool parameter not supported by qemu/<vmid>/config endpoint on "update" (PVE 6.2) - only with "create"
+ if update:
+ if 'virtio' in kwargs:
+ del kwargs['virtio']
+ if 'sata' in kwargs:
+ del kwargs['sata']
+ if 'scsi' in kwargs:
+ del kwargs['scsi']
+ if 'ide' in kwargs:
+ del kwargs['ide']
+ if 'efidisk0' in kwargs:
+ del kwargs['efidisk0']
+ if 'net' in kwargs:
+ del kwargs['net']
+ if 'force' in kwargs:
+ del kwargs['force']
+ if 'pool' in kwargs:
+ del kwargs['pool']
+
+ # Check that the bios option is set to ovmf if the efidisk0 option is present
+ if 'efidisk0' in kwargs:
+ if ('bios' not in kwargs) or ('ovmf' != kwargs['bios']):
+ self.module.fail_json(msg='efidisk0 cannot be used if bios is not set to ovmf. ')
+
+ # Flatten efidisk0 option to a string so that it's a string which is what Proxmoxer and the API expect
+ if 'efidisk0' in kwargs:
+ efidisk0_str = ''
+ # Regexp to catch underscores in keys name, to replace them after by hypens
+ hyphen_re = re.compile(r'_')
+ # If present, the storage definition should be the first argument
+ if 'storage' in kwargs['efidisk0']:
+ efidisk0_str += kwargs['efidisk0'].get('storage') + ':1,'
+ kwargs['efidisk0'].pop('storage')
+ # Join other elements from the dict as key=value using commas as separator, replacing any underscore in key
+ # by hyphens (needed for pre_enrolled_keys to pre-enrolled-keys)
+ efidisk0_str += ','.join([hyphen_re.sub('-', k) + "=" + str(v) for k, v in kwargs['efidisk0'].items()
+ if 'storage' != k])
+ kwargs['efidisk0'] = efidisk0_str
+
+ # Convert all dict in kwargs to elements.
+ # For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n], ipconfig[n]
+ for k in list(kwargs.keys()):
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ if 'agent' in kwargs:
+ try:
+ # The API also allows booleans instead of e.g. `enabled=1` for backward-compatibility.
+ kwargs['agent'] = int(boolean(kwargs['agent'], strict=True))
+ except TypeError:
+ # Not something that Ansible would parse as a boolean.
+ pass
+
+ # Rename numa_enabled to numa, according the API documentation
+ if 'numa_enabled' in kwargs:
+ kwargs['numa'] = kwargs['numa_enabled']
+ del kwargs['numa_enabled']
+
+ # PVE api expects strings for the following params
+ if 'nameservers' in self.module.params:
+ nameservers = self.module.params.pop('nameservers')
+ if nameservers:
+ kwargs['nameserver'] = ' '.join(nameservers)
+ if 'searchdomains' in self.module.params:
+ searchdomains = self.module.params.pop('searchdomains')
+ if searchdomains:
+ kwargs['searchdomain'] = ' '.join(searchdomains)
+
+ # VM tags are expected to be valid and presented as a comma/semi-colon delimited string
+ if 'tags' in kwargs:
+ re_tag = re.compile(r'^[a-z0-9_][a-z0-9_\-\+\.]*$')
+ for tag in kwargs['tags']:
+ if not re_tag.match(tag):
+ self.module.fail_json(msg='%s is not a valid tag' % tag)
+ kwargs['tags'] = ",".join(kwargs['tags'])
+
+ # -args and skiplock require root@pam user - but can not use api tokens
+ if self.module.params['api_user'] == "root@pam" and self.module.params['args'] is None:
+ if not update and self.module.params['proxmox_default_behavior'] == 'compatibility':
+ kwargs['args'] = vm_args
+ elif self.module.params['api_user'] == "root@pam" and self.module.params['args'] is not None:
+ kwargs['args'] = self.module.params['args']
+ elif self.module.params['api_user'] != "root@pam" and self.module.params['args'] is not None:
+ self.module.fail_json(msg='args parameter require root@pam user. ')
+
+ if self.module.params['api_user'] != "root@pam" and self.module.params['skiplock'] is not None:
+ self.module.fail_json(msg='skiplock parameter require root@pam user. ')
+
+ if update:
+ if proxmox_node.qemu(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
+ return True
+ else:
+ return False
+ elif self.module.params['clone'] is not None:
+ for param in valid_clone_params:
+ if self.module.params[param] is not None:
+ clone_params[param] = self.module.params[param]
+ clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
+ taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
+ else:
+ taskid = proxmox_node.qemu.create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
+
+ if not self.wait_for_task(node, taskid):
+ self.module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+ def start_vm(self, vm):
+ vmid = vm['vmid']
+ proxmox_node = self.proxmox_api.nodes(vm['node'])
+ taskid = proxmox_node.qemu(vmid).status.start.post()
+ if not self.wait_for_task(vm['node'], taskid):
+ self.module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+ def stop_vm(self, vm, force):
+ vmid = vm['vmid']
+ proxmox_node = self.proxmox_api.nodes(vm['node'])
+ taskid = proxmox_node.qemu(vmid).status.shutdown.post(forceStop=(1 if force else 0))
+ if not self.wait_for_task(vm['node'], taskid):
+ self.module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ return False
+ return True
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ kvm_args = dict(
+ archive=dict(type='str'),
+ acpi=dict(type='bool'),
+ agent=dict(type='str'),
+ args=dict(type='str'),
+ autostart=dict(type='bool'),
+ balloon=dict(type='int'),
+ bios=dict(choices=['seabios', 'ovmf']),
+ boot=dict(type='str'),
+ bootdisk=dict(type='str'),
+ cicustom=dict(type='str'),
+ cipassword=dict(type='str', no_log=True),
+ citype=dict(type='str', choices=['nocloud', 'configdrive2']),
+ ciuser=dict(type='str'),
+ clone=dict(type='str'),
+ cores=dict(type='int'),
+ cpu=dict(type='str'),
+ cpulimit=dict(type='int'),
+ cpuunits=dict(type='int'),
+ delete=dict(type='str'),
+ description=dict(type='str'),
+ digest=dict(type='str'),
+ efidisk0=dict(type='dict',
+ options=dict(
+ storage=dict(type='str'),
+ format=dict(type='str'),
+ efitype=dict(type='str', choices=['2m', '4m']),
+ pre_enrolled_keys=dict(type='bool'),
+ )),
+ force=dict(type='bool'),
+ format=dict(type='str', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'unspecified']),
+ freeze=dict(type='bool'),
+ full=dict(type='bool', default=True),
+ hostpci=dict(type='dict'),
+ hotplug=dict(type='str'),
+ hugepages=dict(choices=['any', '2', '1024']),
+ ide=dict(type='dict'),
+ ipconfig=dict(type='dict'),
+ keyboard=dict(type='str'),
+ kvm=dict(type='bool'),
+ localtime=dict(type='bool'),
+ lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
+ machine=dict(type='str'),
+ memory=dict(type='int'),
+ migrate_downtime=dict(type='int'),
+ migrate_speed=dict(type='int'),
+ name=dict(type='str'),
+ nameservers=dict(type='list', elements='str'),
+ net=dict(type='dict'),
+ newid=dict(type='int'),
+ node=dict(),
+ numa=dict(type='dict'),
+ numa_enabled=dict(type='bool'),
+ onboot=dict(type='bool'),
+ ostype=dict(choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'win10', 'win11', 'l24', 'l26', 'solaris']),
+ parallel=dict(type='dict'),
+ pool=dict(type='str'),
+ protection=dict(type='bool'),
+ reboot=dict(type='bool'),
+ revert=dict(type='str'),
+ sata=dict(type='dict'),
+ scsi=dict(type='dict'),
+ scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
+ serial=dict(type='dict'),
+ searchdomains=dict(type='list', elements='str'),
+ shares=dict(type='int'),
+ skiplock=dict(type='bool'),
+ smbios=dict(type='str'),
+ snapname=dict(type='str'),
+ sockets=dict(type='int'),
+ sshkeys=dict(type='str', no_log=False),
+ startdate=dict(type='str'),
+ startup=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ storage=dict(type='str'),
+ tablet=dict(type='bool'),
+ tags=dict(type='list', elements='str'),
+ target=dict(type='str'),
+ tdf=dict(type='bool'),
+ template=dict(type='bool'),
+ timeout=dict(type='int', default=30),
+ update=dict(type='bool', default=False),
+ vcpus=dict(type='int'),
+ vga=dict(choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
+ virtio=dict(type='dict'),
+ vmid=dict(type='int'),
+ watchdog=dict(),
+ proxmox_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
+ )
+ module_args.update(kvm_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
+ required_if=[('state', 'present', ['node'])],
+ )
+
+ clone = module.params['clone']
+ cpu = module.params['cpu']
+ cores = module.params['cores']
+ delete = module.params['delete']
+ memory = module.params['memory']
+ name = module.params['name']
+ newid = module.params['newid']
+ node = module.params['node']
+ revert = module.params['revert']
+ sockets = module.params['sockets']
+ state = module.params['state']
+ update = bool(module.params['update'])
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+
+ if module.params['proxmox_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ acpi=True,
+ autostart=False,
+ balloon=0,
+ boot='cnd',
+ cores=1,
+ cpu='kvm64',
+ cpuunits=1000,
+ format='qcow2',
+ kvm=True,
+ memory=512,
+ ostype='l26',
+ sockets=1,
+ tablet=False,
+ template=False,
+ vga='std',
+ )
+ for param, value in old_default_values.items():
+ if module.params[param] is None:
+ module.params[param] = value
+
+ if module.params['format'] == 'unspecified':
+ module.params['format'] = None
+
+ proxmox = ProxmoxKvmAnsible(module)
+
+ # If vmid is not defined then retrieve its value from the vm name,
+ # the cloned vm name or retrieve the next free VM id from ProxmoxAPI.
+ if not vmid:
+ if state == 'present' and not update and not clone and not delete and not revert:
+ try:
+ vmid = proxmox.get_nextvmid()
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+ else:
+ clone_target = clone or name
+ vmid = proxmox.get_vmid(clone_target, ignore_missing=True)
+
+ if clone is not None:
+ # If newid is not defined then retrieve the next free id from ProxmoxAPI
+ if not newid:
+ try:
+ newid = proxmox.get_nextvmid()
+ except Exception:
+ module.fail_json(msg="Can't get the next vmid for VM {0} automatically. Ensure your cluster state is good".format(name))
+
+ # Ensure source VM name exists when cloning
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % clone)
+
+ # Ensure source VM id exists when cloning
+ proxmox.get_vm(vmid)
+
+ # Ensure the chosen VM name doesn't already exist when cloning
+ existing_vmid = proxmox.get_vmid(name, ignore_missing=True)
+ if existing_vmid:
+ module.exit_json(changed=False, vmid=existing_vmid, msg="VM with name <%s> already exists" % name)
+
+ # Ensure the chosen VM id doesn't already exist when cloning
+ if proxmox.get_vm(newid, ignore_missing=True):
+ module.exit_json(changed=False, vmid=vmid, msg="vmid %s with VM name %s already exists" % (newid, name))
+
+ if delete is not None:
+ try:
+ proxmox.settings(vmid, node, delete=delete)
+ module.exit_json(changed=True, vmid=vmid, msg="Settings has deleted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to delete settings on VM {0} with vmid {1}: '.format(name, vmid) + str(e))
+
+ if revert is not None:
+ try:
+ proxmox.settings(vmid, node, revert=revert)
+ module.exit_json(changed=True, vmid=vmid, msg="Settings has reverted on VM {0} with vmid {1}".format(name, vmid))
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to revert settings on VM {0} with vmid {1}: Maybe is not a pending task... '.format(name, vmid) + str(e))
+
+ if state == 'present':
+ try:
+ if proxmox.get_vm(vmid, ignore_missing=True) and not (update or clone):
+ module.exit_json(changed=False, vmid=vmid, msg="VM with vmid <%s> already exists" % vmid)
+ elif proxmox.get_vmid(name, ignore_missing=True) and not (update or clone):
+ module.exit_json(changed=False, vmid=proxmox.get_vmid(name), msg="VM with name <%s> already exists" % name)
+ elif not node:
+ module.fail.json(msg='node is mandatory for creating/updating VM')
+ elif update and not any([vmid, name]):
+ module.fail_json(msg='vmid or name is mandatory for updating VM')
+ elif not proxmox.get_node(node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
+ proxmox.create_vm(vmid, newid, node, name, memory, cpu, cores, sockets, update,
+ archive=module.params['archive'],
+ acpi=module.params['acpi'],
+ agent=module.params['agent'],
+ autostart=module.params['autostart'],
+ balloon=module.params['balloon'],
+ bios=module.params['bios'],
+ boot=module.params['boot'],
+ bootdisk=module.params['bootdisk'],
+ cicustom=module.params['cicustom'],
+ cipassword=module.params['cipassword'],
+ citype=module.params['citype'],
+ ciuser=module.params['ciuser'],
+ cpulimit=module.params['cpulimit'],
+ cpuunits=module.params['cpuunits'],
+ description=module.params['description'],
+ digest=module.params['digest'],
+ efidisk0=module.params['efidisk0'],
+ force=module.params['force'],
+ freeze=module.params['freeze'],
+ hostpci=module.params['hostpci'],
+ hotplug=module.params['hotplug'],
+ hugepages=module.params['hugepages'],
+ ide=module.params['ide'],
+ ipconfig=module.params['ipconfig'],
+ keyboard=module.params['keyboard'],
+ kvm=module.params['kvm'],
+ localtime=module.params['localtime'],
+ lock=module.params['lock'],
+ machine=module.params['machine'],
+ migrate_downtime=module.params['migrate_downtime'],
+ migrate_speed=module.params['migrate_speed'],
+ net=module.params['net'],
+ numa=module.params['numa'],
+ numa_enabled=module.params['numa_enabled'],
+ onboot=module.params['onboot'],
+ ostype=module.params['ostype'],
+ parallel=module.params['parallel'],
+ pool=module.params['pool'],
+ protection=module.params['protection'],
+ reboot=module.params['reboot'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ scsihw=module.params['scsihw'],
+ serial=module.params['serial'],
+ shares=module.params['shares'],
+ skiplock=module.params['skiplock'],
+ smbios1=module.params['smbios'],
+ snapname=module.params['snapname'],
+ sshkeys=module.params['sshkeys'],
+ startdate=module.params['startdate'],
+ startup=module.params['startup'],
+ tablet=module.params['tablet'],
+ tags=module.params['tags'],
+ target=module.params['target'],
+ tdf=module.params['tdf'],
+ template=module.params['template'],
+ vcpus=module.params['vcpus'],
+ vga=module.params['vga'],
+ virtio=module.params['virtio'],
+ watchdog=module.params['watchdog'])
+
+ if not clone:
+ proxmox.get_vminfo(node, vmid,
+ ide=module.params['ide'],
+ net=module.params['net'],
+ sata=module.params['sata'],
+ scsi=module.params['scsi'],
+ virtio=module.params['virtio'])
+ if update:
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s with vmid %s updated" % (name, vmid))
+ elif clone is not None:
+ module.exit_json(changed=True, vmid=newid, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
+ else:
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+ except Exception as e:
+ if update:
+ module.fail_json(vmid=vmid, msg="Unable to update vm {0} with vmid {1}=".format(name, vmid) + str(e))
+ elif clone is not None:
+ module.fail_json(vmid=vmid, msg="Unable to clone vm {0} from vmid {1}=".format(name, vmid) + str(e))
+ else:
+ module.fail_json(vmid=vmid, msg="creation of qemu VM %s with vmid %s failed with exception=%s" % (name, vmid, e))
+
+ elif state == 'started':
+ status = {}
+ try:
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = proxmox.get_vm(vmid)
+ status['status'] = vm['status']
+ if vm['status'] == 'running':
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is already running" % vmid, **status)
+
+ if proxmox.start_vm(vm):
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s started" % vmid, **status)
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg="starting of VM %s failed with exception: %s" % (vmid, e), **status)
+
+ elif state == 'stopped':
+ status = {}
+ try:
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = proxmox.get_vm(vmid)
+
+ status['status'] = vm['status']
+ if vm['status'] == 'stopped':
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is already stopped" % vmid, **status)
+
+ if proxmox.stop_vm(vm, force=module.params['force']):
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is shutting down" % vmid, **status)
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg="stopping of VM %s failed with exception: %s" % (vmid, e), **status)
+
+ elif state == 'restarted':
+ status = {}
+ try:
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+
+ vm = proxmox.get_vm(vmid)
+ status['status'] = vm['status']
+ if vm['status'] == 'stopped':
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is not running" % vmid, **status)
+
+ if proxmox.stop_vm(vm, force=module.params['force']) and proxmox.start_vm(vm):
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s is restarted" % vmid, **status)
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg="restarting of VM %s failed with exception: %s" % (vmid, e), **status)
+
+ elif state == 'absent':
+ status = {}
+ if not vmid:
+ module.exit_json(changed=False, msg='VM with name = %s is already absent' % name)
+ try:
+ vm = proxmox.get_vm(vmid, ignore_missing=True)
+ if not vm:
+ module.exit_json(changed=False, vmid=vmid)
+
+ proxmox_node = proxmox.proxmox_api.nodes(vm['node'])
+ status['status'] = vm['status']
+ if vm['status'] == 'running':
+ if module.params['force']:
+ proxmox.stop_vm(vm, True)
+ else:
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s is running. Stop it before deletion or use force=true." % vmid)
+ taskid = proxmox_node.qemu.delete(vmid)
+ if not proxmox.wait_for_task(vm['node'], taskid):
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s' %
+ proxmox_node.tasks(taskid).log.get()[:1])
+ else:
+ module.exit_json(changed=True, vmid=vmid, msg="VM %s removed" % vmid)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
+
+ elif state == 'current':
+ status = {}
+ if not vmid:
+ module.fail_json(msg='VM with name = %s does not exist in cluster' % name)
+ vm = proxmox.get_vm(vmid)
+ if not name:
+ name = vm.get('name', '(unnamed)')
+ current = proxmox.proxmox_api.nodes(vm['node']).qemu(vmid).status.current.get()['status']
+ status['status'] = current
+ if status:
+ module.exit_json(changed=False, vmid=vmid, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_nic.py b/ansible_collections/community/general/plugins/modules/proxmox_nic.py
new file mode 100644
index 000000000..26d07c7ec
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_nic.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Lammert Hellinga (@Kogelvis) <lammert@hellinga.it>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_nic
+short_description: Management of a NIC of a Qemu(KVM) VM in a Proxmox VE cluster
+version_added: 3.1.0
+description:
+ - Allows you to create/update/delete a NIC on Qemu(KVM) Virtual Machines in a Proxmox VE cluster.
+author: "Lammert Hellinga (@Kogelvis) <lammert@hellinga.it>"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ bridge:
+ description:
+ - Add this interface to the specified bridge device. The Proxmox VE default bridge is called C(vmbr0).
+ type: str
+ firewall:
+ description:
+ - Whether this interface should be protected by the firewall.
+ type: bool
+ default: false
+ interface:
+ description:
+ - Name of the interface, should be C(net[n]) where C(1 ≤ n ≤ 31).
+ type: str
+ required: true
+ link_down:
+ description:
+ - Whether this interface should be disconnected (like pulling the plug).
+ type: bool
+ default: false
+ mac:
+ description:
+ - C(XX:XX:XX:XX:XX:XX) should be a unique MAC address. This is automatically generated if not specified.
+ - When not specified this module will keep the MAC address the same when changing an existing interface.
+ type: str
+ model:
+ description:
+ - The NIC emulator model.
+ type: str
+ choices: ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet',
+ 'rtl8139', 'virtio', 'vmxnet3']
+ default: virtio
+ mtu:
+ description:
+ - Force MTU, for C(virtio) model only, setting will be ignored otherwise.
+ - Set to C(1) to use the bridge MTU.
+ - Value should be C(1 ≤ n ≤ 65520).
+ type: int
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for I(state=present).
+ type: str
+ queues:
+ description:
+ - Number of packet queues to be used on the device.
+ - Value should be C(0 ≤ n ≤ 16).
+ type: int
+ rate:
+ description:
+ - Rate limit in MBps (MegaBytes per second) as floating point number.
+ type: float
+ state:
+ description:
+ - Indicates desired state of the NIC.
+ type: str
+ choices: ['present', 'absent']
+ default: present
+ tag:
+ description:
+ - VLAN tag to apply to packets on this interface.
+ - Value should be C(1 ≤ n ≤ 4094).
+ type: int
+ trunks:
+ description:
+ - List of VLAN trunks to pass through this interface.
+ type: list
+ elements: int
+ vmid:
+ description:
+ - Specifies the instance ID.
+ type: int
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Create NIC net0 targeting the vm by name
+ community.general.proxmox_nic:
+ api_user: root@pam
+ api_password: secret
+ api_host: proxmoxhost
+ name: my_vm
+ interface: net0
+ bridge: vmbr0
+ tag: 3
+
+- name: Create NIC net0 targeting the vm by id
+ community.general.proxmox_nic:
+ api_user: root@pam
+ api_password: secret
+ api_host: proxmoxhost
+ vmid: 103
+ interface: net0
+ bridge: vmbr0
+ mac: "12:34:56:C0:FF:EE"
+ firewall: true
+
+- name: Delete NIC net0 targeting the vm by name
+ community.general.proxmox_nic:
+ api_user: root@pam
+ api_password: secret
+ api_host: proxmoxhost
+ name: my_vm
+ interface: net0
+ state: absent
+'''
+
+RETURN = '''
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+msg:
+ description: A short message
+ returned: always
+ type: str
+ sample: "Nic net0 unchanged on VM with vmid 103"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxNicAnsible(ProxmoxAnsible):
+ def update_nic(self, vmid, interface, model, **kwargs):
+ vm = self.get_vm(vmid)
+
+ try:
+ vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
+ except Exception as e:
+ self.module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ if interface in vminfo:
+ # Convert the current config to a dictionary
+ config = vminfo[interface].split(',')
+ config.sort()
+
+ config_current = {}
+
+ for i in config:
+ kv = i.split('=')
+ try:
+ config_current[kv[0]] = kv[1]
+ except IndexError:
+ config_current[kv[0]] = ''
+
+ # determine the current model nic and mac-address
+ models = ['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em', 'i82551', 'i82557b',
+ 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet', 'rtl8139', 'virtio', 'vmxnet3']
+ current_model = set(models) & set(config_current.keys())
+ current_model = current_model.pop()
+ current_mac = config_current[current_model]
+
+ # build nic config string
+ config_provided = "{0}={1}".format(model, current_mac)
+ else:
+ config_provided = model
+
+ if kwargs['mac']:
+ config_provided = "{0}={1}".format(model, kwargs['mac'])
+
+ if kwargs['bridge']:
+ config_provided += ",bridge={0}".format(kwargs['bridge'])
+
+ if kwargs['firewall']:
+ config_provided += ",firewall=1"
+
+ if kwargs['link_down']:
+ config_provided += ',link_down=1'
+
+ if kwargs['mtu']:
+ config_provided += ",mtu={0}".format(kwargs['mtu'])
+ if model != 'virtio':
+ self.module.warn(
+ 'Ignoring MTU for nic {0} on VM with vmid {1}, '
+ 'model should be set to \'virtio\': '.format(interface, vmid))
+
+ if kwargs['queues']:
+ config_provided += ",queues={0}".format(kwargs['queues'])
+
+ if kwargs['rate']:
+ config_provided += ",rate={0}".format(kwargs['rate'])
+
+ if kwargs['tag']:
+ config_provided += ",tag={0}".format(kwargs['tag'])
+
+ if kwargs['trunks']:
+ config_provided += ",trunks={0}".format(';'.join(str(x) for x in kwargs['trunks']))
+
+ net = {interface: config_provided}
+ vm = self.get_vm(vmid)
+
+ if ((interface not in vminfo) or (vminfo[interface] != config_provided)):
+ if not self.module.check_mode:
+ self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(**net)
+ return True
+
+ return False
+
+ def delete_nic(self, vmid, interface):
+ vm = self.get_vm(vmid)
+ vminfo = self.proxmox_api.nodes(vm['node']).qemu(vmid).config.get()
+
+ if interface in vminfo:
+ if not self.module.check_mode:
+ self.proxmox_api.nodes(vm['node']).qemu(vmid).config.set(delete=interface)
+ return True
+
+ return False
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ nic_args = dict(
+ bridge=dict(type='str'),
+ firewall=dict(type='bool', default=False),
+ interface=dict(type='str', required=True),
+ link_down=dict(type='bool', default=False),
+ mac=dict(type='str'),
+ model=dict(choices=['e1000', 'e1000-82540em', 'e1000-82544gc', 'e1000-82545em',
+ 'i82551', 'i82557b', 'i82559er', 'ne2k_isa', 'ne2k_pci', 'pcnet',
+ 'rtl8139', 'virtio', 'vmxnet3'], default='virtio'),
+ mtu=dict(type='int'),
+ name=dict(type='str'),
+ queues=dict(type='int'),
+ rate=dict(type='float'),
+ state=dict(default='present', choices=['present', 'absent']),
+ tag=dict(type='int'),
+ trunks=dict(type='list', elements='int'),
+ vmid=dict(type='int'),
+ )
+ module_args.update(nic_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[('api_token_id', 'api_token_secret')],
+ required_one_of=[('name', 'vmid'), ('api_password', 'api_token_id')],
+ supports_check_mode=True,
+ )
+
+ proxmox = ProxmoxNicAnsible(module)
+
+ interface = module.params['interface']
+ model = module.params['model']
+ name = module.params['name']
+ state = module.params['state']
+ vmid = module.params['vmid']
+
+ # If vmid is not defined then retrieve its value from the vm name,
+ if not vmid:
+ vmid = proxmox.get_vmid(name)
+
+ # Ensure VM id exists
+ proxmox.get_vm(vmid)
+
+ if state == 'present':
+ try:
+ if proxmox.update_nic(vmid, interface, model,
+ bridge=module.params['bridge'],
+ firewall=module.params['firewall'],
+ link_down=module.params['link_down'],
+ mac=module.params['mac'],
+ mtu=module.params['mtu'],
+ queues=module.params['queues'],
+ rate=module.params['rate'],
+ tag=module.params['tag'],
+ trunks=module.params['trunks']):
+ module.exit_json(changed=True, vmid=vmid, msg="Nic {0} updated on VM with vmid {1}".format(interface, vmid))
+ else:
+ module.exit_json(vmid=vmid, msg="Nic {0} unchanged on VM with vmid {1}".format(interface, vmid))
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to change nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e))
+
+ elif state == 'absent':
+ try:
+ if proxmox.delete_nic(vmid, interface):
+ module.exit_json(changed=True, vmid=vmid, msg="Nic {0} deleted on VM with vmid {1}".format(interface, vmid))
+ else:
+ module.exit_json(vmid=vmid, msg="Nic {0} does not exist on VM with vmid {1}".format(interface, vmid))
+ except Exception as e:
+ module.fail_json(vmid=vmid, msg='Unable to delete nic {0} on VM with vmid {1}: '.format(interface, vmid) + str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_snap.py b/ansible_collections/community/general/plugins/modules/proxmox_snap.py
new file mode 100644
index 000000000..0c17f8376
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_snap.py
@@ -0,0 +1,363 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, Jeffrey van Pelt (@Thulium-Drake) <jeff@vanpelt.one>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: proxmox_snap
+short_description: Snapshot management of instances in Proxmox VE cluster
+version_added: 2.0.0
+description:
+ - Allows you to create/delete/restore snapshots from instances in Proxmox VE cluster.
+ - Supports both KVM and LXC, OpenVZ has not been tested, as it is no longer supported on Proxmox VE.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ hostname:
+ description:
+ - The instance name.
+ type: str
+ vmid:
+ description:
+ - The instance id.
+ - If not set, will be fetched from PromoxAPI based on the hostname.
+ type: str
+ state:
+ description:
+ - Indicate desired state of the instance snapshot.
+ - The C(rollback) value was added in community.general 4.8.0.
+ choices: ['present', 'absent', 'rollback']
+ default: present
+ type: str
+ force:
+ description:
+ - For removal from config file, even if removing disk snapshot fails.
+ default: false
+ type: bool
+ unbind:
+ description:
+ - This option only applies to LXC containers.
+ - Allows to snapshot a container even if it has configured mountpoints.
+ - Temporarily disables all configured mountpoints, takes snapshot, and finally restores original configuration.
+ - If running, the container will be stopped and restarted to apply config changes.
+ - Due to restrictions in the Proxmox API this option can only be used authenticating as C(root@pam) with I(api_password), API tokens do not work either.
+ - See U(https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config) (PUT tab) for more details.
+ default: false
+ type: bool
+ version_added: 5.7.0
+ vmstate:
+ description:
+ - Snapshot includes RAM.
+ default: false
+ type: bool
+ description:
+ description:
+ - Specify the description for the snapshot. Only used on the configuration web interface.
+ - This is saved as a comment inside the configuration file.
+ type: str
+ timeout:
+ description:
+ - Timeout for operations.
+ default: 30
+ type: int
+ snapname:
+ description:
+ - Name of the snapshot that has to be created/deleted/restored.
+ default: 'ansible_snap'
+ type: str
+
+notes:
+ - Requires proxmoxer and requests modules on host. These modules can be installed with pip.
+requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+author: Jeffrey van Pelt (@Thulium-Drake)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+'''
+
+EXAMPLES = r'''
+- name: Create new container snapshot
+ community.general.proxmox_snap:
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ vmid: 100
+ state: present
+ snapname: pre-updates
+
+- name: Create new snapshot for a container with configured mountpoints
+ community.general.proxmox_snap:
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ vmid: 100
+ state: present
+ unbind: true # requires root@pam+password auth, API tokens are not supported
+ snapname: pre-updates
+
+- name: Remove container snapshot
+ community.general.proxmox_snap:
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ vmid: 100
+ state: absent
+ snapname: pre-updates
+
+- name: Rollback container snapshot
+ community.general.proxmox_snap:
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ vmid: 100
+ state: rollback
+ snapname: pre-updates
+'''
+
+RETURN = r'''#'''
+
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxSnapAnsible(ProxmoxAnsible):
+ def snapshot(self, vm, vmid):
+ return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).snapshot
+
+ def vmconfig(self, vm, vmid):
+ return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).config
+
+ def vmstatus(self, vm, vmid):
+ return getattr(self.proxmox_api.nodes(vm['node']), vm['type'])(vmid).status
+
+ def _container_mp_get(self, vm, vmid):
+ cfg = self.vmconfig(vm, vmid).get()
+ mountpoints = {}
+ for key, value in cfg.items():
+ if key.startswith('mp'):
+ mountpoints[key] = value
+ return mountpoints
+
+ def _container_mp_disable(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
+ # shutdown container if running
+ if vmstatus == 'running':
+ self.shutdown_instance(vm, vmid, timeout)
+ # delete all mountpoints configs
+ self.vmconfig(vm, vmid).put(delete=' '.join(mountpoints))
+
+ def _container_mp_restore(self, vm, vmid, timeout, unbind, mountpoints, vmstatus):
+ # NOTE: requires auth as `root@pam`, API tokens are not supported
+ # see https://pve.proxmox.com/pve-docs/api-viewer/#/nodes/{node}/lxc/{vmid}/config
+ # restore original config
+ self.vmconfig(vm, vmid).put(**mountpoints)
+ # start container (if was running before snap)
+ if vmstatus == 'running':
+ self.start_instance(vm, vmid, timeout)
+
+ def start_instance(self, vm, vmid, timeout):
+ taskid = self.vmstatus(vm, vmid).start.post()
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for VM to start. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+ time.sleep(1)
+ return False
+
+ def shutdown_instance(self, vm, vmid, timeout):
+ taskid = self.vmstatus(vm, vmid).shutdown.post()
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ timeout -= 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for VM to stop. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+ time.sleep(1)
+ return False
+
+ def snapshot_create(self, vm, vmid, timeout, snapname, description, vmstate, unbind):
+ if self.module.check_mode:
+ return True
+
+ if vm['type'] == 'lxc':
+ if unbind is True:
+ # check if credentials will work
+ # WARN: it is crucial this check runs here!
+ # The correct permissions are required only to reconfig mounts.
+ # Not checking now would allow to remove the configuration BUT
+ # fail later, leaving the container in a misconfigured state.
+ if (
+ self.module.params['api_user'] != 'root@pam'
+ or not self.module.params['api_password']
+ ):
+ self.module.fail_json(msg='`unbind=True` requires authentication as `root@pam` with `api_password`, API tokens are not supported.')
+ return False
+ mountpoints = self._container_mp_get(vm, vmid)
+ vmstatus = self.vmstatus(vm, vmid).current().get()['status']
+ if mountpoints:
+ self._container_mp_disable(vm, vmid, timeout, unbind, mountpoints, vmstatus)
+ taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description)
+ else:
+ taskid = self.snapshot(vm, vmid).post(snapname=snapname, description=description, vmstate=int(vmstate))
+
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ if vm['type'] == 'lxc' and unbind is True and mountpoints:
+ self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
+ return True
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for creating VM snapshot. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ timeout -= 1
+ if vm['type'] == 'lxc' and unbind is True and mountpoints:
+ self._container_mp_restore(vm, vmid, timeout, unbind, mountpoints, vmstatus)
+ return False
+
+ def snapshot_remove(self, vm, vmid, timeout, snapname, force):
+ if self.module.check_mode:
+ return True
+
+ taskid = self.snapshot(vm, vmid).delete(snapname, force=int(force))
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for removing VM snapshot. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ timeout -= 1
+ return False
+
+ def snapshot_rollback(self, vm, vmid, timeout, snapname):
+ if self.module.check_mode:
+ return True
+
+ taskid = self.snapshot(vm, vmid)(snapname).post("rollback")
+ while timeout:
+ if self.api_task_ok(vm['node'], taskid):
+ return True
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for rolling back VM snapshot. Last line in task before timeout: %s' %
+ self.proxmox_api.nodes(vm['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ timeout -= 1
+ return False
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ snap_args = dict(
+ vmid=dict(required=False),
+ hostname=dict(),
+ timeout=dict(type='int', default=30),
+ state=dict(default='present', choices=['present', 'absent', 'rollback']),
+ description=dict(type='str'),
+ snapname=dict(type='str', default='ansible_snap'),
+ force=dict(type='bool', default=False),
+ unbind=dict(type='bool', default=False),
+ vmstate=dict(type='bool', default=False),
+ )
+ module_args.update(snap_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True
+ )
+
+ proxmox = ProxmoxSnapAnsible(module)
+
+ state = module.params['state']
+ vmid = module.params['vmid']
+ hostname = module.params['hostname']
+ description = module.params['description']
+ snapname = module.params['snapname']
+ timeout = module.params['timeout']
+ force = module.params['force']
+ unbind = module.params['unbind']
+ vmstate = module.params['vmstate']
+
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and hostname:
+ vmid = proxmox.get_vmid(hostname)
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ vm = proxmox.get_vm(vmid)
+
+ if state == 'present':
+ try:
+ for i in proxmox.snapshot(vm, vmid).get():
+ if i['name'] == snapname:
+ module.exit_json(changed=False, msg="Snapshot %s is already present" % snapname)
+
+ if proxmox.snapshot_create(vm, vmid, timeout, snapname, description, vmstate, unbind):
+ if module.check_mode:
+ module.exit_json(changed=False, msg="Snapshot %s would be created" % snapname)
+ else:
+ module.exit_json(changed=True, msg="Snapshot %s created" % snapname)
+
+ except Exception as e:
+ module.fail_json(msg="Creating snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
+
+ elif state == 'absent':
+ try:
+ snap_exist = False
+
+ for i in proxmox.snapshot(vm, vmid).get():
+ if i['name'] == snapname:
+ snap_exist = True
+ continue
+
+ if not snap_exist:
+ module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname)
+ else:
+ if proxmox.snapshot_remove(vm, vmid, timeout, snapname, force):
+ if module.check_mode:
+ module.exit_json(changed=False, msg="Snapshot %s would be removed" % snapname)
+ else:
+ module.exit_json(changed=True, msg="Snapshot %s removed" % snapname)
+
+ except Exception as e:
+ module.fail_json(msg="Removing snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
+ elif state == 'rollback':
+ try:
+ snap_exist = False
+
+ for i in proxmox.snapshot(vm, vmid).get():
+ if i['name'] == snapname:
+ snap_exist = True
+ continue
+
+ if not snap_exist:
+ module.exit_json(changed=False, msg="Snapshot %s does not exist" % snapname)
+ if proxmox.snapshot_rollback(vm, vmid, timeout, snapname):
+ if module.check_mode:
+ module.exit_json(changed=True, msg="Snapshot %s would be rolled back" % snapname)
+ else:
+ module.exit_json(changed=True, msg="Snapshot %s rolled back" % snapname)
+
+ except Exception as e:
+ module.fail_json(msg="Rollback of snapshot %s of VM %s failed with exception: %s" % (snapname, vmid, to_native(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
new file mode 100644
index 000000000..fd3759364
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_storage_info.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Tristan Le Guern (@tleguern) <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_storage_info
+short_description: Retrieve information about one or more Proxmox VE storages
+version_added: 2.2.0
+description:
+ - Retrieve information about one or more Proxmox VE storages.
+options:
+ storage:
+ description:
+ - Only return informations on a specific storage.
+ aliases: ['name']
+ type: str
+ type:
+ description:
+ - Filter on a specifc storage type.
+ type: str
+author: Tristan Le Guern (@tleguern)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+notes:
+ - Storage specific options can be returned by this module, please look at the documentation at U(https://pve.proxmox.com/wiki/Storage).
+'''
+
+
+EXAMPLES = '''
+- name: List existing storages
+ community.general.proxmox_storage_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_storages
+
+- name: List NFS storages only
+ community.general.proxmox_storage_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ type: nfs
+ register: proxmox_storages_nfs
+
+- name: Retrieve information about the lvm2 storage
+ community.general.proxmox_storage_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ storage: lvm2
+ register: proxmox_storage_lvm
+'''
+
+
+RETURN = '''
+proxmox_storages:
+ description: List of storage pools.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ content:
+ description: Proxmox content types available in this storage
+ returned: on success
+ type: list
+ elements: str
+ digest:
+ description: Storage's digest
+ returned: on success
+ type: str
+ nodes:
+ description: List of nodes associated to this storage
+ returned: on success, if storage is not local
+ type: list
+ elements: str
+ path:
+ description: Physical path to this storage
+ returned: on success
+ type: str
+ prune-backups:
+ description: Backup retention options
+ returned: on success
+ type: list
+ elements: dict
+ shared:
+ description: Is this storage shared
+ returned: on success
+ type: bool
+ storage:
+ description: Storage name
+ returned: on success
+ type: str
+ type:
+ description: Storage type
+ returned: on success
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
+
+
+class ProxmoxStorageInfoAnsible(ProxmoxAnsible):
+ def get_storage(self, storage):
+ try:
+ storage = self.proxmox_api.storage.get(storage)
+ except Exception:
+ self.module.fail_json(msg="Storage '%s' does not exist" % storage)
+ return ProxmoxStorage(storage)
+
+ def get_storages(self, type=None):
+ storages = self.proxmox_api.storage.get(type=type)
+ storages = [ProxmoxStorage(storage) for storage in storages]
+ return storages
+
+
+class ProxmoxStorage:
+ def __init__(self, storage):
+ self.storage = storage
+ # Convert proxmox representation of lists, dicts and boolean for easier
+ # manipulation within ansible.
+ if 'shared' in self.storage:
+ self.storage['shared'] = proxmox_to_ansible_bool(self.storage['shared'])
+ if 'content' in self.storage:
+ self.storage['content'] = self.storage['content'].split(',')
+ if 'nodes' in self.storage:
+ self.storage['nodes'] = self.storage['nodes'].split(',')
+ if 'prune-backups' in storage:
+ options = storage['prune-backups'].split(',')
+ self.storage['prune-backups'] = dict()
+ for option in options:
+ k, v = option.split('=')
+ self.storage['prune-backups'][k] = v
+
+
+def proxmox_storage_info_argument_spec():
+ return dict(
+ storage=dict(type='str', aliases=['name']),
+ type=dict(type='str'),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ storage_info_args = proxmox_storage_info_argument_spec()
+ module_args.update(storage_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ mutually_exclusive=[('storage', 'type')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ proxmox = ProxmoxStorageInfoAnsible(module)
+ storage = module.params['storage']
+ storagetype = module.params['type']
+
+ if storage:
+ storages = [proxmox.get_storage(storage)]
+ else:
+ storages = proxmox.get_storages(type=storagetype)
+ result['proxmox_storages'] = [storage.storage for storage in storages]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
new file mode 100644
index 000000000..a2e66b38d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_tasks_info.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: proxmox_tasks_info
+short_description: Retrieve information about one or more Proxmox VE tasks
+version_added: 3.8.0
+description:
+ - Retrieve information about one or more Proxmox VE tasks.
+author: 'Andreas Botzner (@paginabianca) <andreas at botzner dot com>'
+options:
+ node:
+ description:
+ - Node where to get tasks.
+ required: true
+ type: str
+ task:
+ description:
+ - Return specific task.
+ aliases: ['upid', 'name']
+ type: str
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+
+EXAMPLES = '''
+- name: List tasks on node01
+ community.general.proxmox_task_info:
+ api_host: proxmoxhost
+ api_user: root@pam
+ api_password: '{{ password | default(omit) }}'
+ api_token_id: '{{ token_id | default(omit) }}'
+ api_token_secret: '{{ token_secret | default(omit) }}'
+ node: node01
+ register: result
+
+- name: Retrieve information about specific tasks on node01
+ community.general.proxmox_task_info:
+ api_host: proxmoxhost
+ api_user: root@pam
+ api_password: '{{ password | default(omit) }}'
+ api_token_id: '{{ token_id | default(omit) }}'
+ api_token_secret: '{{ token_secret | default(omit) }}'
+ task: 'UPID:node01:00003263:16167ACE:621EE230:srvreload:networking:root@pam:'
+ node: node01
+ register: proxmox_tasks
+'''
+
+
+RETURN = '''
+proxmox_tasks:
+ description: List of tasks.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ id:
+ description: ID of the task.
+ returned: on success
+ type: str
+ node:
+ description: Node name.
+ returned: on success
+ type: str
+ pid:
+ description: PID of the task.
+ returned: on success
+ type: int
+ pstart:
+ description: pastart of the task.
+ returned: on success
+ type: int
+ starttime:
+ description: Starting time of the task.
+ returned: on success
+ type: int
+ type:
+ description: Type of the task.
+ returned: on success
+ type: str
+ upid:
+ description: UPID of the task.
+ returned: on success
+ type: str
+ user:
+ description: User that owns the task.
+ returned: on success
+ type: str
+ endtime:
+ description: Endtime of the task.
+ returned: on success, can be absent
+ type: int
+ status:
+ description: Status of the task.
+ returned: on success, can be absent
+ type: str
+ failed:
+ description: If the task failed.
+ returned: when status is defined
+ type: bool
+msg:
+ description: Short message.
+ returned: on failure
+ type: str
+ sample: 'Task: UPID:xyz:xyz does not exist on node: proxmoxnode'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxTaskInfoAnsible(ProxmoxAnsible):
+ def get_task(self, upid, node):
+ tasks = self.get_tasks(node)
+ for task in tasks:
+ if task.info['upid'] == upid:
+ return [task]
+
+ def get_tasks(self, node):
+ tasks = self.proxmox_api.nodes(node).tasks.get()
+ return [ProxmoxTask(task) for task in tasks]
+
+
+class ProxmoxTask:
+ def __init__(self, task):
+ self.info = dict()
+ for k, v in task.items():
+ if k == 'status' and isinstance(v, str):
+ self.info[k] = v
+ if v != 'OK':
+ self.info['failed'] = True
+ else:
+ self.info[k] = v
+
+
+def proxmox_task_info_argument_spec():
+ return dict(
+ task=dict(type='str', aliases=['upid', 'name'], required=False),
+ node=dict(type='str', required=True),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ task_info_args = proxmox_task_info_argument_spec()
+ module_args.update(task_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[('api_token_id', 'api_token_secret')],
+ required_one_of=[('api_password', 'api_token_id')],
+ supports_check_mode=True)
+ result = dict(changed=False)
+
+ proxmox = ProxmoxTaskInfoAnsible(module)
+ upid = module.params['task']
+ node = module.params['node']
+ if upid:
+ tasks = proxmox.get_task(upid=upid, node=node)
+ else:
+ tasks = proxmox.get_tasks(node=node)
+ if tasks is not None:
+ result['proxmox_tasks'] = [task.info for task in tasks]
+ module.exit_json(**result)
+ else:
+ result['msg'] = 'Task: {0} does not exist on node: {1}.'.format(
+ upid, node)
+ module.fail_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_template.py b/ansible_collections/community/general/plugins/modules/proxmox_template.py
new file mode 100644
index 000000000..2bf24ff84
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_template.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: Management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ node:
+ description:
+ - Proxmox VE node on which to operate.
+ type: str
+ src:
+ description:
+ - Path to uploaded file.
+ - Required only for I(state=present).
+ type: path
+ template:
+ description:
+ - The template name.
+ - Required for I(state=absent) to delete a template.
+ - Required for I(state=present) to download an appliance container template (pveam).
+ type: str
+ content_type:
+ description:
+ - Content type.
+ - Required only for I(state=present).
+ type: str
+ default: 'vztmpl'
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - Target storage.
+ type: str
+ default: 'local'
+ timeout:
+ description:
+ - Timeout for operations.
+ type: int
+ default: 30
+ force:
+ description:
+ - It can only be used with I(state=present), existing template will be overwritten.
+ type: bool
+ default: false
+ state:
+ description:
+ - Indicate desired state of the template.
+ type: str
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires C(proxmoxer) and C(requests) modules on host. This modules can be installed with M(ansible.builtin.pip).
+author: Sergei Antipov (@UnderGreen)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Upload new openvz template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: >
+ Upload new openvz template with minimal options use environment
+ PROXMOX_PASSWORD variable(you should export it before)
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+- name: Upload new openvz template with all options and force overwrite
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+ force: true
+
+- name: Delete template with minimal options
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ template: ubuntu-14.04-x86_64.tar.gz
+ state: absent
+
+- name: Download proxmox appliance container template
+ community.general.proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ template: ubuntu-20.04-standard_20.04-1_amd64.tar.gz
+'''
+
+import os
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (proxmox_auth_argument_spec, ProxmoxAnsible)
+
+
+class ProxmoxTemplateAnsible(ProxmoxAnsible):
+ def get_template(self, node, storage, content_type, template):
+ return [True for tmpl in self.proxmox_api.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template)]
+
+ def task_status(self, node, taskid, timeout):
+ """
+ Check the task status and wait until the task is completed or the timeout is reached.
+ """
+ while timeout:
+ if self.api_task_ok(node, taskid):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for uploading/downloading template. Last line in task before timeout: %s' %
+ self.proxmox_api.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+ def upload_template(self, node, storage, content_type, realpath, timeout):
+ taskid = self.proxmox_api.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath, 'rb'))
+ return self.task_status(node, taskid, timeout)
+
+ def download_template(self, node, storage, template, timeout):
+ taskid = self.proxmox_api.nodes(node).aplinfo.post(storage=storage, template=template)
+ return self.task_status(node, taskid, timeout)
+
+ def delete_template(self, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ self.proxmox_api.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not self.get_template(node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ self.module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ template_args = dict(
+ node=dict(),
+ src=dict(type='path'),
+ template=dict(),
+ content_type=dict(default='vztmpl', choices=['vztmpl', 'iso']),
+ storage=dict(default='local'),
+ timeout=dict(type='int', default=30),
+ force=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ module_args.update(template_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_together=[('api_token_id', 'api_token_secret')],
+ required_one_of=[('api_password', 'api_token_id')],
+ required_if=[('state', 'absent', ['template'])]
+ )
+
+ proxmox = ProxmoxTemplateAnsible(module)
+
+ state = module.params['state']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ # download appliance template
+ if content_type == 'vztmpl' and not src:
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param for downloading appliance template is mandatory')
+
+ if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s already exists' % (storage, content_type, template))
+
+ if proxmox.download_template(node, storage, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s downloaded' % (storage, content_type, template))
+
+ template = os.path.basename(src)
+ if proxmox.get_template(node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(src) and os.path.isfile(src)):
+ module.fail_json(msg='template file on path %s not exists' % src)
+
+ if proxmox.upload_template(node, storage, content_type, src, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="uploading/downloading of template %s failed with exception: %s" % (template, e))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not proxmox.get_template(node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if proxmox.delete_template(node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % (template, e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/proxmox_user_info.py b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
new file mode 100644
index 000000000..a515f2b45
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/proxmox_user_info.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: proxmox_user_info
+short_description: Retrieve information about one or more Proxmox VE users
+version_added: 1.3.0
+description:
+ - Retrieve information about one or more Proxmox VE users
+options:
+ domain:
+ description:
+ - Restrict results to a specific authentication realm.
+ aliases: ['realm']
+ type: str
+ user:
+ description:
+ - Restrict results to a specific user.
+ aliases: ['name']
+ type: str
+ userid:
+ description:
+ - Restrict results to a specific user ID, which is a concatenation of a user and domain parts.
+ type: str
+author: Tristan Le Guern (@tleguern)
+extends_documentation_fragment:
+ - community.general.proxmox.documentation
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = '''
+- name: List existing users
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ register: proxmox_users
+
+- name: List existing users in the pve authentication realm
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ domain: pve
+ register: proxmox_users_pve
+
+- name: Retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ userid: admin@pve
+ register: proxmox_user_admin
+
+- name: Alternative way to retrieve information about admin@pve
+ community.general.proxmox_user_info:
+ api_host: helldorado
+ api_user: root@pam
+ api_password: "{{ password | default(omit) }}"
+ api_token_id: "{{ token_id | default(omit) }}"
+ api_token_secret: "{{ token_secret | default(omit) }}"
+ user: admin
+ domain: pve
+ register: proxmox_user_admin
+'''
+
+
+RETURN = '''
+proxmox_users:
+ description: List of users.
+ returned: always, but can be empty
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the user.
+ returned: on success
+ type: str
+ domain:
+ description: User's authentication realm, also the right part of the user ID.
+ returned: on success
+ type: str
+ email:
+ description: User's email address.
+ returned: on success
+ type: str
+ enabled:
+ description: User's account state.
+ returned: on success
+ type: bool
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ firstname:
+ description: User's first name.
+ returned: on success
+ type: str
+ groups:
+ description: List of groups which the user is a member of.
+ returned: on success
+ type: list
+ elements: str
+ keys:
+ description: User's two factor authentication keys.
+ returned: on success
+ type: str
+ lastname:
+ description: User's last name.
+ returned: on success
+ type: str
+ tokens:
+ description: List of API tokens associated to the user.
+ returned: on success
+ type: list
+ elements: dict
+ contains:
+ comment:
+ description: Short description of the token.
+ returned: on success
+ type: str
+ expire:
+ description: Expiration date in seconds since EPOCH. Zero means no expiration.
+ returned: on success
+ type: int
+ privsep:
+ description: Describe if the API token is further restricted with ACLs or is fully privileged.
+ returned: on success
+ type: bool
+ tokenid:
+ description: Token name.
+ returned: on success
+ type: str
+ user:
+ description: User's login name, also the left part of the user ID.
+ returned: on success
+ type: str
+ userid:
+ description: Proxmox user ID, represented as user@realm.
+ returned: on success
+ type: str
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.proxmox import (
+ proxmox_auth_argument_spec, ProxmoxAnsible, proxmox_to_ansible_bool)
+
+
+class ProxmoxUserInfoAnsible(ProxmoxAnsible):
+ def get_user(self, userid):
+ try:
+ user = self.proxmox_api.access.users.get(userid)
+ except Exception:
+ self.module.fail_json(msg="User '%s' does not exist" % userid)
+ user['userid'] = userid
+ return ProxmoxUser(user)
+
+ def get_users(self, domain=None):
+ users = self.proxmox_api.access.users.get(full=1)
+ users = [ProxmoxUser(user) for user in users]
+ if domain:
+ return [user for user in users if user.user['domain'] == domain]
+ return users
+
+
+class ProxmoxUser:
+ def __init__(self, user):
+ self.user = dict()
+ # Data representation is not the same depending on API calls
+ for k, v in user.items():
+ if k == 'enable':
+ self.user['enabled'] = proxmox_to_ansible_bool(user['enable'])
+ elif k == 'userid':
+ self.user['user'] = user['userid'].split('@')[0]
+ self.user['domain'] = user['userid'].split('@')[1]
+ self.user[k] = v
+ elif k in ['groups', 'tokens'] and (v == '' or v is None):
+ self.user[k] = []
+ elif k == 'groups' and type(v) == str:
+ self.user['groups'] = v.split(',')
+ elif k == 'tokens' and type(v) == list:
+ for token in v:
+ if 'privsep' in token:
+ token['privsep'] = proxmox_to_ansible_bool(token['privsep'])
+ self.user['tokens'] = v
+ elif k == 'tokens' and type(v) == dict:
+ self.user['tokens'] = list()
+ for tokenid, tokenvalues in v.items():
+ t = tokenvalues
+ t['tokenid'] = tokenid
+ if 'privsep' in tokenvalues:
+ t['privsep'] = proxmox_to_ansible_bool(tokenvalues['privsep'])
+ self.user['tokens'].append(t)
+ else:
+ self.user[k] = v
+
+
+def proxmox_user_info_argument_spec():
+ return dict(
+ domain=dict(type='str', aliases=['realm']),
+ user=dict(type='str', aliases=['name']),
+ userid=dict(type='str'),
+ )
+
+
+def main():
+ module_args = proxmox_auth_argument_spec()
+ user_info_args = proxmox_user_info_argument_spec()
+ module_args.update(user_info_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ required_one_of=[('api_password', 'api_token_id')],
+ required_together=[('api_token_id', 'api_token_secret')],
+ mutually_exclusive=[('user', 'userid'), ('domain', 'userid')],
+ supports_check_mode=True
+ )
+ result = dict(
+ changed=False
+ )
+
+ proxmox = ProxmoxUserInfoAnsible(module)
+ domain = module.params['domain']
+ user = module.params['user']
+ if user and domain:
+ userid = user + '@' + domain
+ else:
+ userid = module.params['userid']
+
+ if userid:
+ users = [proxmox.get_user(userid=userid)]
+ else:
+ users = proxmox.get_users(domain=domain)
+ result['proxmox_users'] = [user.user for user in users]
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pubnub_blocks.py b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
new file mode 100644
index 000000000..a03553c5c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pubnub_blocks.py
@@ -0,0 +1,639 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# PubNub Real-time Cloud-Hosted Push API and Push Notification Client
+# Frameworks
+# Copyright (C) 2016 PubNub Inc.
+# http://www.pubnub.com/
+# http://www.pubnub.com/terms
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pubnub_blocks
+short_description: PubNub blocks management module
+description:
+ - "This module allows Ansible to interface with the PubNub BLOCKS
+ infrastructure by providing the following operations: create / remove,
+ start / stop and rename for blocks and create / modify / remove for event
+ handlers."
+author:
+ - PubNub <support@pubnub.com> (@pubnub)
+ - Sergey Mamontov <sergey@pubnub.com> (@parfeon)
+requirements:
+ - "python >= 2.7"
+ - "pubnub_blocks_client >= 1.0"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ email:
+ description:
+ - Email from account for which new session should be started.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ default: ''
+ password:
+ description:
+ - Password which match to account to which specified C(email) belong.
+ - "Not required if C(cache) contains result of previous module call (in
+ same play)."
+ required: false
+ type: str
+ default: ''
+ cache:
+ description: >
+ In case if single play use blocks management module few times it is
+ preferred to enabled 'caching' by making previous module to share
+ gathered artifacts and pass them to this parameter.
+ required: false
+ type: dict
+ default: {}
+ account:
+ description:
+ - "Name of PubNub account for from which C(application) will be used to
+ manage blocks."
+ - "User's account will be used if value not set or empty."
+ type: str
+ default: ''
+ application:
+ description:
+ - "Name of target PubNub application for which blocks configuration on
+ specific C(keyset) will be done."
+ type: str
+ required: true
+ keyset:
+ description:
+ - Name of application's keys set which is bound to managed blocks.
+ type: str
+ required: true
+ state:
+ description:
+ - "Intended block state after event handlers creation / update process
+ will be completed."
+ required: false
+ default: 'present'
+ choices: ['started', 'stopped', 'present', 'absent']
+ type: str
+ name:
+ description:
+ - Name of managed block which will be later visible on admin.pubnub.com.
+ required: true
+ type: str
+ description:
+ description:
+ - Short block description which will be later visible on
+ admin.pubnub.com. Used only if block doesn't exists and won't change
+ description for existing block.
+ required: false
+ type: str
+ event_handlers:
+ description:
+ - "List of event handlers which should be updated for specified block
+ C(name)."
+ - "Each entry for new event handler should contain: C(name), C(src),
+ C(channels), C(event). C(name) used as event handler name which can be
+ used later to make changes to it."
+ - C(src) is full path to file with event handler code.
+ - "C(channels) is name of channel from which event handler is waiting
+ for events."
+ - "C(event) is type of event which is able to trigger event handler:
+ I(js-before-publish), I(js-after-publish), I(js-after-presence)."
+ - "Each entry for existing handlers should contain C(name) (so target
+ handler can be identified). Rest parameters (C(src), C(channels) and
+ C(event)) can be added if changes required for them."
+ - "It is possible to rename event handler by adding C(changes) key to
+ event handler payload and pass dictionary, which will contain single key
+ C(name), where new name should be passed."
+ - "To remove particular event handler it is possible to set C(state) for
+ it to C(absent) and it will be removed."
+ required: false
+ default: []
+ type: list
+ elements: dict
+ changes:
+ description:
+ - "List of fields which should be changed by block itself (doesn't
+ affect any event handlers)."
+ - "Possible options for change is: C(name)."
+ required: false
+ default: {}
+ type: dict
+ validate_certs:
+ description:
+ - "This key allow to try skip certificates check when performing REST API
+ calls. Sometimes host may have issues with certificates on it and this
+ will cause problems to call PubNub REST API."
+ - If check should be ignored C(False) should be passed to this parameter.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+# Event handler create example.
+- name: Create single event handler
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ src: '{{ path_to_handler_source }}'
+ name: '{{ handler_name }}'
+ event: 'js-before-publish'
+ channels: '{{ handler_channel }}'
+
+# Change event handler trigger event type.
+- name: Change event handler 'event'
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ event_handlers:
+ -
+ name: '{{ handler_name }}'
+ event: 'js-after-publish'
+
+# Stop block and event handlers.
+- name: Stopping block
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: stop
+
+# Multiple module calls with cached result passing
+- name: Create '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ email: '{{ email }}'
+ password: '{{ password }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+- name: Add '{{ event_handler_1_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_1_source }}'
+ name: '{{ event_handler_1_name }}'
+ channels: '{{ event_handler_1_channel }}'
+ event: 'js-before-publish'
+- name: Add '{{ event_handler_2_name }}' handler to '{{ block_name }}'
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: present
+ event_handlers:
+ -
+ src: '{{ path_to_handler_2_source }}'
+ name: '{{ event_handler_2_name }}'
+ channels: '{{ event_handler_2_channel }}'
+ event: 'js-before-publish'
+- name: Start '{{ block_name }}' block
+ register: module_cache
+ community.general.pubnub_blocks:
+ cache: '{{ module_cache }}'
+ application: '{{ app_name }}'
+ keyset: '{{ keyset_name }}'
+ name: '{{ block_name }}'
+ state: started
+'''
+
+RETURN = '''
+module_cache:
+ description:
+ - Cached account information. In case if with single play module
+ used few times it is better to pass cached data to next module calls to speed
+ up process.
+ type: dict
+ returned: always
+'''
+import copy
+import os
+
+try:
+ # Import PubNub BLOCKS client.
+ from pubnub_blocks_client import User, Account, Owner, Application, Keyset # noqa: F401, pylint: disable=unused-import
+ from pubnub_blocks_client import Block, EventHandler
+ from pubnub_blocks_client import exceptions
+ HAS_PUBNUB_BLOCKS_CLIENT = True
+except ImportError:
+ HAS_PUBNUB_BLOCKS_CLIENT = False
+ User = None
+ Account = None
+ Owner = None
+ Application = None
+ Keyset = None
+ Block = None
+ EventHandler = None
+ exceptions = None
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_text
+
+
+def pubnub_user(module):
+ """Create and configure user model if it possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+
+ :rtype: User
+ :return: Reference on initialized and ready to use user or 'None' in
+ case if not all required information has been passed to block.
+ """
+ user = None
+ params = module.params
+
+ if params.get('cache') and params['cache'].get('module_cache'):
+ cache = params['cache']['module_cache']
+ user = User()
+ user.restore(cache=copy.deepcopy(cache['pnm_user']))
+ elif params.get('email') and params.get('password'):
+ user = User(email=params.get('email'), password=params.get('password'))
+ else:
+ err_msg = 'It looks like not account credentials has been passed or ' \
+ '\'cache\' field doesn\'t have result of previous module ' \
+ 'call.'
+ module.fail_json(msg='Missing account credentials.',
+ description=err_msg, changed=False)
+
+ return user
+
+
+def pubnub_account(module, user):
+ """Create and configure account if it is possible.
+
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type user: User
+ :param user: Reference on authorized user for which one of accounts
+ should be used during manipulations with block.
+
+ :rtype: Account
+ :return: Reference on initialized and ready to use account or 'None' in
+ case if not all required information has been passed to block.
+ """
+ params = module.params
+ if params.get('account'):
+ account_name = params.get('account')
+ account = user.account(name=params.get('account'))
+ if account is None:
+ err_frmt = 'It looks like there is no \'{0}\' account for ' \
+ 'authorized user. Please make sure what correct ' \
+ 'name has been passed during module configuration.'
+ module.fail_json(msg='Missing account.',
+ description=err_frmt.format(account_name),
+ changed=False)
+ else:
+ account = user.accounts()[0]
+
+ return account
+
+
+def pubnub_application(module, account):
+ """Retrieve reference on target application from account model.
+
+ NOTE: In case if account authorization will fail or there is no
+ application with specified name, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model from which reference
+ on application should be fetched.
+
+ :rtype: Application
+ :return: Reference on initialized and ready to use application model.
+ """
+ application = None
+ params = module.params
+ try:
+ application = account.application(params['application'])
+ except (exceptions.AccountError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=dict(account))
+
+ if application is None:
+ err_fmt = 'There is no \'{0}\' application for {1}. Make sure what ' \
+ 'correct application name has been passed. If application ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ email = account.owner.email
+ module.fail_json(msg=err_fmt.format(params['application'], email),
+ changed=account.changed, module_cache=dict(account))
+
+ return application
+
+
+def pubnub_keyset(module, account, application):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no keyset with specified name, module will
+ exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be
+ used in case of error to export cached data.
+ :type application: Application
+ :param application: Reference on PubNub application model from which
+ reference on keyset should be fetched.
+
+ :rtype: Keyset
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ params = module.params
+ keyset = application.keyset(params['keyset'])
+ if keyset is None:
+ err_fmt = 'There is no \'{0}\' keyset for \'{1}\' application. Make ' \
+ 'sure what correct keyset name has been passed. If keyset ' \
+ 'doesn\'t exist you can create it on admin.pubnub.com.'
+ module.fail_json(msg=err_fmt.format(params['keyset'],
+ application.name),
+ changed=account.changed, module_cache=dict(account))
+
+ return keyset
+
+
+def pubnub_block(module, account, keyset):
+ """Retrieve reference on target keyset from application model.
+
+ NOTE: In case if there is no block with specified name and module
+ configured to start/stop it, module will exit with error.
+ :type module: AnsibleModule
+ :param module: Reference on module which contain module launch
+ information and status report methods.
+ :type account: Account
+ :param account: Reference on PubNub account model which will be used in
+ case of error to export cached data.
+ :type keyset: Keyset
+ :param keyset: Reference on keyset model from which reference on block
+ should be fetched.
+
+ :rtype: Block
+ :return: Reference on initialized and ready to use keyset model.
+ """
+ block = None
+ params = module.params
+ try:
+ block = keyset.block(params['name'])
+ except (exceptions.KeysetError, exceptions.GeneralPubNubError) as exc:
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed, module_cache=dict(account))
+
+ # Report error because block doesn't exists and at the same time
+ # requested to start/stop.
+ if block is None and params['state'] in ['started', 'stopped']:
+ block_name = params.get('name')
+ module.fail_json(msg="'{0}' block doesn't exists.".format(block_name),
+ changed=account.changed, module_cache=dict(account))
+
+ if block is None and params['state'] == 'present':
+ block = Block(name=params.get('name'),
+ description=params.get('description'))
+ keyset.add_block(block)
+
+ if block:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+ if params.get('description'):
+ block.description = params.get('description')
+
+ return block
+
+
+def pubnub_event_handler(block, data):
+ """Retrieve reference on target event handler from application model.
+
+ :type block: Block
+ :param block: Reference on block model from which reference on event
+ handlers should be fetched.
+ :type data: dict
+ :param data: Reference on dictionary which contain information about
+ event handler and whether it should be created or not.
+
+ :rtype: EventHandler
+ :return: Reference on initialized and ready to use event handler model.
+ 'None' will be returned in case if there is no handler with
+ specified name and no request to create it.
+ """
+ event_handler = block.event_handler(data['name'])
+
+ # Prepare payload for event handler update.
+ changed_name = (data.pop('changes').get('name')
+ if 'changes' in data else None)
+ name = data.get('name') or changed_name
+ channels = data.get('channels')
+ event = data.get('event')
+ code = _content_of_file_at_path(data.get('src'))
+ state = data.get('state') or 'present'
+
+ # Create event handler if required.
+ if event_handler is None and state == 'present':
+ event_handler = EventHandler(name=name, channels=channels, event=event,
+ code=code)
+ block.add_event_handler(event_handler)
+
+ # Update event handler if required.
+ if event_handler is not None and state == 'present':
+ if name is not None:
+ event_handler.name = name
+ if channels is not None:
+ event_handler.channels = channels
+ if event is not None:
+ event_handler.event = event
+ if code is not None:
+ event_handler.code = code
+
+ return event_handler
+
+
+def _failure_title_from_exception(exception):
+ """Compose human-readable title for module error title.
+
+ Title will be based on status codes if they has been provided.
+ :type exception: exceptions.GeneralPubNubError
+ :param exception: Reference on exception for which title should be
+ composed.
+
+ :rtype: str
+ :return: Reference on error tile which should be shown on module
+ failure.
+ """
+ title = 'General REST API access error.'
+ if exception.code == exceptions.PN_AUTHORIZATION_MISSING_CREDENTIALS:
+ title = 'Authorization error: missing credentials.'
+ elif exception.code == exceptions.PN_AUTHORIZATION_WRONG_CREDENTIALS:
+ title = 'Authorization error: wrong credentials.'
+ elif exception.code == exceptions.PN_USER_INSUFFICIENT_RIGHTS:
+ title = 'API access error: insufficient access rights.'
+ elif exception.code == exceptions.PN_API_ACCESS_TOKEN_EXPIRED:
+ title = 'API access error: time token expired.'
+ elif exception.code == exceptions.PN_KEYSET_BLOCK_EXISTS:
+ title = 'Block create did fail: block with same name already exists).'
+ elif exception.code == exceptions.PN_KEYSET_BLOCKS_FETCH_DID_FAIL:
+ title = 'Unable fetch list of blocks for keyset.'
+ elif exception.code == exceptions.PN_BLOCK_CREATE_DID_FAIL:
+ title = 'Block creation did fail.'
+ elif exception.code == exceptions.PN_BLOCK_UPDATE_DID_FAIL:
+ title = 'Block update did fail.'
+ elif exception.code == exceptions.PN_BLOCK_REMOVE_DID_FAIL:
+ title = 'Block removal did fail.'
+ elif exception.code == exceptions.PN_BLOCK_START_STOP_DID_FAIL:
+ title = 'Block start/stop did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_MISSING_FIELDS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_BLOCK_EVENT_HANDLER_EXISTS:
+ title = 'Event handler creation did fail: missing fields.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_CREATE_DID_FAIL:
+ title = 'Event handler creation did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_UPDATE_DID_FAIL:
+ title = 'Event handler update did fail.'
+ elif exception.code == exceptions.PN_EVENT_HANDLER_REMOVE_DID_FAIL:
+ title = 'Event handler removal did fail.'
+
+ return title
+
+
+def _content_of_file_at_path(path):
+ """Read file content.
+
+ Try read content of file at specified path.
+ :type path: str
+ :param path: Full path to location of file which should be read'ed.
+ :rtype: content
+ :return: File content or 'None'
+ """
+ content = None
+ if path and os.path.exists(path):
+ with open(path, mode="rt") as opened_file:
+ b_content = opened_file.read()
+ try:
+ content = to_text(b_content, errors='surrogate_or_strict')
+ except UnicodeError:
+ pass
+
+ return content
+
+
+def main():
+ fields = dict(
+ email=dict(default='', required=False, type='str'),
+ password=dict(default='', required=False, type='str', no_log=True),
+ account=dict(default='', required=False, type='str'),
+ application=dict(required=True, type='str'),
+ keyset=dict(required=True, type='str', no_log=False),
+ state=dict(default='present', type='str',
+ choices=['started', 'stopped', 'present', 'absent']),
+ name=dict(required=True, type='str'), description=dict(type='str'),
+ event_handlers=dict(default=list(), type='list', elements='dict'),
+ changes=dict(default=dict(), type='dict'),
+ cache=dict(default=dict(), type='dict'),
+ validate_certs=dict(default=True, type='bool'))
+ module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
+
+ if not HAS_PUBNUB_BLOCKS_CLIENT:
+ module.fail_json(msg='pubnub_blocks_client required for this module.')
+
+ params = module.params
+
+ # Authorize user.
+ user = pubnub_user(module)
+ # Initialize PubNub account instance.
+ account = pubnub_account(module, user=user)
+ # Try fetch application with which module should work.
+ application = pubnub_application(module, account=account)
+ # Try fetch keyset with which module should work.
+ keyset = pubnub_keyset(module, account=account, application=application)
+ # Try fetch block with which module should work.
+ block = pubnub_block(module, account=account, keyset=keyset)
+ is_new_block = block is not None and block.uid == -1
+
+ # Check whether block should be removed or not.
+ if block is not None and params['state'] == 'absent':
+ keyset.remove_block(block)
+ block = None
+
+ if block is not None:
+ # Update block information if required.
+ if params.get('changes') and params['changes'].get('name'):
+ block.name = params['changes']['name']
+
+ # Process event changes to event handlers.
+ for event_handler_data in params.get('event_handlers') or list():
+ state = event_handler_data.get('state') or 'present'
+ event_handler = pubnub_event_handler(data=event_handler_data,
+ block=block)
+ if state == 'absent' and event_handler:
+ block.delete_event_handler(event_handler)
+
+ # Update block operation state if required.
+ if block and not is_new_block:
+ if params['state'] == 'started':
+ block.start()
+ elif params['state'] == 'stopped':
+ block.stop()
+
+ # Save current account state.
+ if not module.check_mode:
+ try:
+ account.save()
+ except (exceptions.APIAccessError, exceptions.KeysetError,
+ exceptions.BlockError, exceptions.EventHandlerError,
+ exceptions.GeneralPubNubError) as exc:
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ exc_msg = _failure_title_from_exception(exc)
+ exc_descr = exc.message if hasattr(exc, 'message') else exc.args[0]
+ module.fail_json(msg=exc_msg, description=exc_descr,
+ changed=account.changed,
+ module_cache=module_cache)
+
+ # Report module execution results.
+ module_cache = dict(account)
+ module_cache.update(dict(pnm_user=dict(user)))
+ changed_will_change = account.changed or account.will_change
+ module.exit_json(changed=changed_will_change, module_cache=module_cache)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pulp_repo.py b/ansible_collections/community/general/plugins/modules/pulp_repo.py
new file mode 100644
index 000000000..d7333f89e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pulp_repo.py
@@ -0,0 +1,743 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Joe Adams <@sysadmind>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pulp_repo
+author: "Joe Adams (@sysadmind)"
+short_description: Add or remove Pulp repos from a remote host
+description:
+ - Add or remove Pulp repos from a remote host.
+ - Note, this is for Pulp 2 only.
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ add_export_distributor:
+ description:
+ - Whether or not to add the export distributor to new C(rpm) repositories.
+ type: bool
+ default: false
+ feed:
+ description:
+ - Upstream feed URL to receive updates from.
+ type: str
+ force_basic_auth:
+ description:
+ - httplib2, the library used by the M(ansible.builtin.uri) module only sends
+ authentication information when a webservice responds to an initial
+ request with a 401 status. Since some basic auth services do not
+ properly send a 401, logins will fail. This option forces the sending of
+ the Basic authentication header upon initial request.
+ type: bool
+ default: false
+ generate_sqlite:
+ description:
+ - Boolean flag to indicate whether sqlite files should be generated during
+ a repository publish.
+ required: false
+ type: bool
+ default: false
+ feed_ca_cert:
+ description:
+ - CA certificate string used to validate the feed source SSL certificate.
+ This can be the file content or the path to the file.
+ type: str
+ aliases: [ importer_ssl_ca_cert ]
+ feed_client_cert:
+ description:
+ - Certificate used as the client certificate when synchronizing the
+ repository. This is used to communicate authentication information to
+ the feed source. The value to this option must be the full path to the
+ certificate. The specified file may be the certificate itself or a
+ single file containing both the certificate and private key. This can be
+ the file content or the path to the file.
+ type: str
+ aliases: [ importer_ssl_client_cert ]
+ feed_client_key:
+ description:
+ - Private key to the certificate specified in I(importer_ssl_client_cert),
+ assuming it is not included in the certificate file itself. This can be
+ the file content or the path to the file.
+ type: str
+ aliases: [ importer_ssl_client_key ]
+ name:
+ description:
+ - Name of the repo to add or remove. This correlates to repo-id in Pulp.
+ required: true
+ type: str
+ aliases: [ repo ]
+ proxy_host:
+ description:
+ - Proxy url setting for the pulp repository importer. This is in the
+ format scheme://host.
+ required: false
+ default: null
+ type: str
+ proxy_port:
+ description:
+ - Proxy port setting for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_username:
+ description:
+ - Proxy username for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ proxy_password:
+ description:
+ - Proxy password for the pulp repository importer.
+ required: false
+ default: null
+ type: str
+ publish_distributor:
+ description:
+ - Distributor to use when state is C(publish). The default is to
+ publish all distributors.
+ type: str
+ pulp_host:
+ description:
+ - URL of the pulp server to connect to.
+ default: https://127.0.0.1
+ type: str
+ relative_url:
+ description:
+ - Relative URL for the local repository. It's required when state=present.
+ type: str
+ repo_type:
+ description:
+ - Repo plugin type to use (i.e. C(rpm), C(docker)).
+ default: rpm
+ type: str
+ repoview:
+ description:
+ - Whether to generate repoview files for a published repository. Setting
+ this to C(true) automatically activates C(generate_sqlite).
+ required: false
+ type: bool
+ default: false
+ serve_http:
+ description:
+ - Make the repo available over HTTP.
+ type: bool
+ default: false
+ serve_https:
+ description:
+ - Make the repo available over HTTPS.
+ type: bool
+ default: true
+ state:
+ description:
+ - The repo state. A state of C(sync) will queue a sync of the repo.
+ This is asynchronous but not delayed like a scheduled sync. A state of
+ C(publish) will use the repository's distributor to publish the content.
+ default: present
+ choices: [ "present", "absent", "sync", "publish" ]
+ type: str
+ url_password:
+ description:
+ - The password for use in HTTP basic authentication to the pulp API.
+ If the I(url_username) parameter is not specified, the I(url_password)
+ parameter will not be used.
+ url_username:
+ description:
+ - The username for use in HTTP basic authentication to the pulp API.
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be
+ used on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ wait_for_completion:
+ description:
+ - Wait for asynchronous tasks to complete before returning.
+ type: bool
+ default: false
+notes:
+ - This module can currently only create distributors and importers on rpm
+ repositories. Contributions to support other repo types are welcome.
+extends_documentation_fragment:
+ - ansible.builtin.url
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Create a new repo with name 'my_repo'
+ community.general.pulp_repo:
+ name: my_repo
+ relative_url: my/repo
+ state: present
+
+- name: Create a repo with a feed and a relative URL
+ community.general.pulp_repo:
+ name: my_centos_updates
+ repo_type: rpm
+ feed: http://mirror.centos.org/centos/6/updates/x86_64/
+ relative_url: centos/6/updates
+ url_username: admin
+ url_password: admin
+ force_basic_auth: true
+ state: present
+
+- name: Remove a repo from the pulp server
+ community.general.pulp_repo:
+ name: my_old_repo
+ repo_type: rpm
+ state: absent
+'''
+
+RETURN = '''
+repo:
+ description: Name of the repo that the action was performed on.
+ returned: success
+ type: str
+ sample: my_repo
+'''
+
+import json
+import os
+from time import sleep
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.urls import url_argument_spec
+
+
+class pulp_server(object):
+ """
+ Class to interact with a Pulp server
+ """
+
+ def __init__(self, module, pulp_host, repo_type, wait_for_completion=False):
+ self.module = module
+ self.host = pulp_host
+ self.repo_type = repo_type
+ self.repo_cache = dict()
+ self.wait_for_completion = wait_for_completion
+
+ def check_repo_exists(self, repo_id):
+ try:
+ self.get_repo_config_by_id(repo_id)
+ except IndexError:
+ return False
+ else:
+ return True
+
+ def compare_repo_distributor_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ for key, value in kwargs.items():
+ if key not in distributor['config'].keys():
+ return False
+
+ if not distributor['config'][key] == value:
+ return False
+
+ return True
+
+ def compare_repo_importer_config(self, repo_id, **kwargs):
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for importer in repo_config['importers']:
+ for key, value in kwargs.items():
+ if value is not None:
+ if key not in importer['config'].keys():
+ return False
+
+ if not importer['config'][key] == value:
+ return False
+
+ return True
+
+ def create_repo(
+ self,
+ repo_id,
+ relative_url,
+ feed=None,
+ generate_sqlite=False,
+ serve_http=False,
+ serve_https=True,
+ proxy_host=None,
+ proxy_port=None,
+ proxy_username=None,
+ proxy_password=None,
+ repoview=False,
+ ssl_ca_cert=None,
+ ssl_client_cert=None,
+ ssl_client_key=None,
+ add_export_distributor=False
+ ):
+ url = "%s/pulp/api/v2/repositories/" % self.host
+ data = dict()
+ data['id'] = repo_id
+ data['distributors'] = []
+
+ if self.repo_type == 'rpm':
+ yum_distributor = dict()
+ yum_distributor['distributor_id'] = "yum_distributor"
+ yum_distributor['distributor_type_id'] = "yum_distributor"
+ yum_distributor['auto_publish'] = True
+ yum_distributor['distributor_config'] = dict()
+ yum_distributor['distributor_config']['http'] = serve_http
+ yum_distributor['distributor_config']['https'] = serve_https
+ yum_distributor['distributor_config']['relative_url'] = relative_url
+ yum_distributor['distributor_config']['repoview'] = repoview
+ yum_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(yum_distributor)
+
+ if add_export_distributor:
+ export_distributor = dict()
+ export_distributor['distributor_id'] = "export_distributor"
+ export_distributor['distributor_type_id'] = "export_distributor"
+ export_distributor['auto_publish'] = False
+ export_distributor['distributor_config'] = dict()
+ export_distributor['distributor_config']['http'] = serve_http
+ export_distributor['distributor_config']['https'] = serve_https
+ export_distributor['distributor_config']['relative_url'] = relative_url
+ export_distributor['distributor_config']['repoview'] = repoview
+ export_distributor['distributor_config']['generate_sqlite'] = generate_sqlite or repoview
+ data['distributors'].append(export_distributor)
+
+ data['importer_type_id'] = "yum_importer"
+ data['importer_config'] = dict()
+
+ if feed:
+ data['importer_config']['feed'] = feed
+
+ if proxy_host:
+ data['importer_config']['proxy_host'] = proxy_host
+
+ if proxy_port:
+ data['importer_config']['proxy_port'] = proxy_port
+
+ if proxy_username:
+ data['importer_config']['proxy_username'] = proxy_username
+
+ if proxy_password:
+ data['importer_config']['proxy_password'] = proxy_password
+
+ if ssl_ca_cert:
+ data['importer_config']['ssl_ca_cert'] = ssl_ca_cert
+
+ if ssl_client_cert:
+ data['importer_config']['ssl_client_cert'] = ssl_client_cert
+
+ if ssl_client_key:
+ data['importer_config']['ssl_client_key'] = ssl_client_key
+
+ data['notes'] = {
+ "_repo-type": "rpm-repo"
+ }
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 201:
+ self.module.fail_json(
+ msg="Failed to create repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+ else:
+ return True
+
+ def delete_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='DELETE')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to delete repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def get_repo_config_by_id(self, repo_id):
+ if repo_id not in self.repo_cache.keys():
+ repo_array = [x for x in self.repo_list if x['id'] == repo_id]
+ self.repo_cache[repo_id] = repo_array[0]
+
+ return self.repo_cache[repo_id]
+
+ def publish_repo(self, repo_id, publish_distributor):
+ url = "%s/pulp/api/v2/repositories/%s/actions/publish/" % (self.host, repo_id)
+
+ # If there's no distributor specified, we will publish them all
+ if publish_distributor is None:
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ data = dict()
+ data['id'] = distributor['id']
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=distributor['id'])
+ else:
+ data = dict()
+ data['id'] = publish_distributor
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to publish the repo",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url,
+ distributor=publish_distributor)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def sync_repo(self, repo_id):
+ url = "%s/pulp/api/v2/repositories/%s/actions/sync/" % (self.host, repo_id)
+ response, info = fetch_url(self.module, url, data='', method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to schedule a sync of the repo.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ if self.wait_for_completion:
+ self.verify_tasks_completed(json.load(response))
+
+ return True
+
+ def update_repo_distributor_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/distributors/" % (self.host, repo_id)
+ repo_config = self.get_repo_config_by_id(repo_id)
+
+ for distributor in repo_config['distributors']:
+ distributor_url = "%s%s/" % (url, distributor['id'])
+ data = dict()
+ data['distributor_config'] = dict()
+
+ for key, value in kwargs.items():
+ data['distributor_config'][key] = value
+
+ response, info = fetch_url(
+ self.module,
+ distributor_url,
+ data=json.dumps(data),
+ method='PUT')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the relative url for the repository.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ def update_repo_importer_config(self, repo_id, **kwargs):
+ url = "%s/pulp/api/v2/repositories/%s/importers/" % (self.host, repo_id)
+ data = dict()
+ importer_config = dict()
+
+ for key, value in kwargs.items():
+ if value is not None:
+ importer_config[key] = value
+
+ data['importer_config'] = importer_config
+
+ if self.repo_type == 'rpm':
+ data['importer_type_id'] = "yum_importer"
+
+ response, info = fetch_url(
+ self.module,
+ url,
+ data=json.dumps(data),
+ method='POST')
+
+ if info['status'] != 202:
+ self.module.fail_json(
+ msg="Failed to set the repo importer configuration",
+ status_code=info['status'],
+ response=info['msg'],
+ importer_config=importer_config,
+ url=url)
+
+ def set_repo_list(self):
+ url = "%s/pulp/api/v2/repositories/?details=true" % self.host
+ response, info = fetch_url(self.module, url, method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Request failed",
+ status_code=info['status'],
+ response=info['msg'],
+ url=url)
+
+ self.repo_list = json.load(response)
+
+ def verify_tasks_completed(self, response_dict):
+ for task in response_dict['spawned_tasks']:
+ task_url = "%s%s" % (self.host, task['_href'])
+
+ while True:
+ response, info = fetch_url(
+ self.module,
+ task_url,
+ data='',
+ method='GET')
+
+ if info['status'] != 200:
+ self.module.fail_json(
+ msg="Failed to check async task status.",
+ status_code=info['status'],
+ response=info['msg'],
+ url=task_url)
+
+ task_dict = json.load(response)
+
+ if task_dict['state'] == 'finished':
+ return True
+
+ if task_dict['state'] == 'error':
+ self.module.fail_json(msg="Asynchronous task failed to complete.", error=task_dict['error'])
+
+ sleep(2)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ add_export_distributor=dict(default=False, type='bool'),
+ feed=dict(),
+ generate_sqlite=dict(default=False, type='bool'),
+ feed_ca_cert=dict(aliases=['importer_ssl_ca_cert']),
+ feed_client_cert=dict(aliases=['importer_ssl_client_cert']),
+ feed_client_key=dict(aliases=['importer_ssl_client_key'], no_log=True),
+ name=dict(required=True, aliases=['repo']),
+ proxy_host=dict(),
+ proxy_port=dict(),
+ proxy_username=dict(),
+ proxy_password=dict(no_log=True),
+ publish_distributor=dict(),
+ pulp_host=dict(default="https://127.0.0.1"),
+ relative_url=dict(),
+ repo_type=dict(default="rpm"),
+ repoview=dict(default=False, type='bool'),
+ serve_http=dict(default=False, type='bool'),
+ serve_https=dict(default=True, type='bool'),
+ state=dict(
+ default="present",
+ choices=['absent', 'present', 'sync', 'publish']),
+ wait_for_completion=dict(default=False, type="bool"))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ add_export_distributor = module.params['add_export_distributor']
+ feed = module.params['feed']
+ generate_sqlite = module.params['generate_sqlite']
+ importer_ssl_ca_cert = module.params['feed_ca_cert']
+ importer_ssl_client_cert = module.params['feed_client_cert']
+ importer_ssl_client_key = module.params['feed_client_key']
+ proxy_host = module.params['proxy_host']
+ proxy_port = module.params['proxy_port']
+ proxy_username = module.params['proxy_username']
+ proxy_password = module.params['proxy_password']
+ publish_distributor = module.params['publish_distributor']
+ pulp_host = module.params['pulp_host']
+ relative_url = module.params['relative_url']
+ repo = module.params['name']
+ repo_type = module.params['repo_type']
+ repoview = module.params['repoview']
+ serve_http = module.params['serve_http']
+ serve_https = module.params['serve_https']
+ state = module.params['state']
+ wait_for_completion = module.params['wait_for_completion']
+
+ if (state == 'present') and (not relative_url):
+ module.fail_json(msg="When state is present, relative_url is required.")
+
+ # Ensure that the importer_ssl_* is the content and not a file path
+ if importer_ssl_ca_cert is not None:
+ importer_ssl_ca_cert_file_path = os.path.abspath(importer_ssl_ca_cert)
+ if os.path.isfile(importer_ssl_ca_cert_file_path):
+ importer_ssl_ca_cert_file_object = open(importer_ssl_ca_cert_file_path, 'r')
+ try:
+ importer_ssl_ca_cert = importer_ssl_ca_cert_file_object.read()
+ finally:
+ importer_ssl_ca_cert_file_object.close()
+
+ if importer_ssl_client_cert is not None:
+ importer_ssl_client_cert_file_path = os.path.abspath(importer_ssl_client_cert)
+ if os.path.isfile(importer_ssl_client_cert_file_path):
+ importer_ssl_client_cert_file_object = open(importer_ssl_client_cert_file_path, 'r')
+ try:
+ importer_ssl_client_cert = importer_ssl_client_cert_file_object.read()
+ finally:
+ importer_ssl_client_cert_file_object.close()
+
+ if importer_ssl_client_key is not None:
+ importer_ssl_client_key_file_path = os.path.abspath(importer_ssl_client_key)
+ if os.path.isfile(importer_ssl_client_key_file_path):
+ importer_ssl_client_key_file_object = open(importer_ssl_client_key_file_path, 'r')
+ try:
+ importer_ssl_client_key = importer_ssl_client_key_file_object.read()
+ finally:
+ importer_ssl_client_key_file_object.close()
+
+ server = pulp_server(module, pulp_host, repo_type, wait_for_completion=wait_for_completion)
+ server.set_repo_list()
+ repo_exists = server.check_repo_exists(repo)
+
+ changed = False
+
+ if state == 'absent' and repo_exists:
+ if not module.check_mode:
+ server.delete_repo(repo)
+
+ changed = True
+
+ if state == 'sync':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be synced.")
+
+ if not module.check_mode:
+ server.sync_repo(repo)
+
+ changed = True
+
+ if state == 'publish':
+ if not repo_exists:
+ module.fail_json(msg="Repository was not found. The repository can not be published.")
+
+ if not module.check_mode:
+ server.publish_repo(repo, publish_distributor)
+
+ changed = True
+
+ if state == 'present':
+ if not repo_exists:
+ if not module.check_mode:
+ server.create_repo(
+ repo_id=repo,
+ relative_url=relative_url,
+ feed=feed,
+ generate_sqlite=generate_sqlite,
+ serve_http=serve_http,
+ serve_https=serve_https,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ repoview=repoview,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key,
+ add_export_distributor=add_export_distributor)
+
+ changed = True
+
+ else:
+ # Check to make sure all the settings are correct
+ # The importer config gets overwritten on set and not updated, so
+ # we set the whole config at the same time.
+ if not server.compare_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key
+ ):
+ if not module.check_mode:
+ server.update_repo_importer_config(
+ repo,
+ feed=feed,
+ proxy_host=proxy_host,
+ proxy_port=proxy_port,
+ proxy_username=proxy_username,
+ proxy_password=proxy_password,
+ ssl_ca_cert=importer_ssl_ca_cert,
+ ssl_client_cert=importer_ssl_client_cert,
+ ssl_client_key=importer_ssl_client_key)
+
+ changed = True
+
+ if relative_url is not None:
+ if not server.compare_repo_distributor_config(
+ repo,
+ relative_url=relative_url
+ ):
+ if not module.check_mode:
+ server.update_repo_distributor_config(
+ repo,
+ relative_url=relative_url)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, generate_sqlite=generate_sqlite):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, generate_sqlite=generate_sqlite)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, repoview=repoview):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, repoview=repoview)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, http=serve_http):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, http=serve_http)
+
+ changed = True
+
+ if not server.compare_repo_distributor_config(repo, https=serve_https):
+ if not module.check_mode:
+ server.update_repo_distributor_config(repo, https=serve_https)
+
+ changed = True
+
+ module.exit_json(changed=changed, repo=repo)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/puppet.py b/ansible_collections/community/general/plugins/modules/puppet.py
new file mode 100644
index 000000000..cd580791b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/puppet.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Hewlett-Packard Development Company, L.P.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: puppet
+short_description: Runs puppet
+description:
+ - Runs I(puppet) agent or apply in a reliable manner.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ timeout:
+ description:
+ - How long to wait for I(puppet) to finish.
+ type: str
+ default: 30m
+ puppetmaster:
+ description:
+ - The hostname of the puppetmaster to contact.
+ type: str
+ modulepath:
+ description:
+ - Path to an alternate location for puppet modules.
+ type: str
+ manifest:
+ description:
+ - Path to the manifest file to run puppet apply on.
+ type: str
+ noop:
+ description:
+ - Override puppet.conf noop mode.
+ - When C(true), run Puppet agent with C(--noop) switch set.
+ - When C(false), run Puppet agent with C(--no-noop) switch set.
+ - When unset (default), use default or puppet.conf value if defined.
+ type: bool
+ facts:
+ description:
+ - A dict of values to pass in as persistent external facter facts.
+ type: dict
+ facter_basename:
+ description:
+ - Basename of the facter output file.
+ type: str
+ default: ansible
+ environment:
+ description:
+ - Puppet environment to be used.
+ type: str
+ confdir:
+ description:
+ - Path to the directory containing the puppet.conf file.
+ type: str
+ version_added: 5.1.0
+ logdest:
+ description:
+ - Where the puppet logs should go, if puppet apply is being used.
+ - C(all) will go to both C(console) and C(syslog).
+ - C(stdout) will be deprecated and replaced by C(console).
+ type: str
+ choices: [ all, stdout, syslog ]
+ default: stdout
+ certname:
+ description:
+ - The name to use when handling certificates.
+ type: str
+ tags:
+ description:
+ - A list of puppet tags to be used.
+ type: list
+ elements: str
+ skip_tags:
+ description:
+ - A list of puppet tags to be excluded.
+ type: list
+ elements: str
+ version_added: 6.6.0
+ execute:
+ description:
+ - Execute a specific piece of Puppet code.
+ - It has no effect with a puppetmaster.
+ type: str
+ use_srv_records:
+ description:
+ - Toggles use_srv_records flag
+ type: bool
+ summarize:
+ description:
+ - Whether to print a transaction summary.
+ type: bool
+ default: false
+ verbose:
+ description:
+ - Print extra information.
+ type: bool
+ default: false
+ debug:
+ description:
+ - Enable full debugging.
+ type: bool
+ default: false
+ show_diff:
+ description:
+ - Whether to print file changes details
+ - Alias C(show-diff) has been deprecated and will be removed in community.general 7.0.0.
+ aliases: ['show-diff']
+ type: bool
+ default: false
+requirements:
+- puppet
+author:
+- Monty Taylor (@emonty)
+'''
+
+EXAMPLES = r'''
+- name: Run puppet agent and fail if anything goes wrong
+ community.general.puppet:
+
+- name: Run puppet and timeout in 5 minutes
+ community.general.puppet:
+ timeout: 5m
+
+- name: Run puppet using a different environment
+ community.general.puppet:
+ environment: testing
+
+- name: Run puppet using a specific certname
+ community.general.puppet:
+ certname: agent01.example.com
+
+- name: Run puppet using a specific piece of Puppet code. Has no effect with a puppetmaster
+ community.general.puppet:
+ execute: include ::mymodule
+
+- name: Run puppet using a specific tags
+ community.general.puppet:
+ tags:
+ - update
+ - nginx
+ skip_tags:
+ - service
+
+- name: Run puppet agent in noop mode
+ community.general.puppet:
+ noop: true
+
+- name: Run a manifest with debug, log to both syslog and console, specify module path
+ community.general.puppet:
+ modulepath: /etc/puppet/modules:/opt/stack/puppet-modules:/usr/share/openstack-puppet/modules
+ logdest: all
+ manifest: /var/lib/example/puppet_step_config.pp
+'''
+
+import json
+import os
+import stat
+
+import ansible_collections.community.general.plugins.module_utils.puppet as puppet_utils
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def _write_structured_data(basedir, basename, data):
+ if not os.path.exists(basedir):
+ os.makedirs(basedir)
+ file_path = os.path.join(basedir, "{0}.json".format(basename))
+ # This is more complex than you might normally expect because we want to
+ # open the file with only u+rw set. Also, we use the stat constants
+ # because ansible still supports python 2.4 and the octal syntax changed
+ out_file = os.fdopen(
+ os.open(
+ file_path, os.O_CREAT | os.O_WRONLY,
+ stat.S_IRUSR | stat.S_IWUSR), 'w')
+ out_file.write(json.dumps(data).encode('utf8'))
+ out_file.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(type='str', default='30m'),
+ puppetmaster=dict(type='str'),
+ modulepath=dict(type='str'),
+ manifest=dict(type='str'),
+ confdir=dict(type='str'),
+ noop=dict(type='bool'),
+ logdest=dict(type='str', default='stdout', choices=['all', 'stdout', 'syslog']),
+ # The following is not related to Ansible's diff; see https://github.com/ansible-collections/community.general/pull/3980#issuecomment-1005666154
+ show_diff=dict(
+ type='bool', default=False, aliases=['show-diff'],
+ deprecated_aliases=[dict(name='show-diff', version='7.0.0', collection_name='community.general')]),
+ facts=dict(type='dict'),
+ facter_basename=dict(type='str', default='ansible'),
+ environment=dict(type='str'),
+ certname=dict(type='str'),
+ tags=dict(type='list', elements='str'),
+ skip_tags=dict(type='list', elements='str'),
+ execute=dict(type='str'),
+ summarize=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ use_srv_records=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('puppetmaster', 'manifest'),
+ ('puppetmaster', 'manifest', 'execute'),
+ ('puppetmaster', 'modulepath'),
+ ],
+ )
+ p = module.params
+
+ if p['manifest']:
+ if not os.path.exists(p['manifest']):
+ module.fail_json(
+ msg="Manifest file %(manifest)s not found." % dict(
+ manifest=p['manifest']))
+
+ # Check if puppet is disabled here
+ if not p['manifest']:
+ puppet_utils.ensure_agent_enabled(module)
+
+ if module.params['facts'] and not module.check_mode:
+ _write_structured_data(
+ puppet_utils.get_facter_dir(),
+ module.params['facter_basename'],
+ module.params['facts'])
+
+ runner = puppet_utils.puppet_runner(module)
+
+ if not p['manifest'] and not p['execute']:
+ args_order = "_agent_fixed puppetmaster show_diff confdir environment tags skip_tags certname noop use_srv_records"
+ with runner(args_order) as ctx:
+ rc, stdout, stderr = ctx.run()
+ else:
+ args_order = "_apply_fixed logdest modulepath environment certname tags skip_tags noop _execute summarize debug verbose"
+ with runner(args_order) as ctx:
+ rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']])
+
+ if rc == 0:
+ # success
+ module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
+ elif rc == 1:
+ # rc==1 could be because it's disabled
+ # rc==1 could also mean there was a compilation failure
+ disabled = "administratively disabled" in stdout
+ if disabled:
+ msg = "puppet is disabled"
+ else:
+ msg = "puppet did not run"
+ module.exit_json(
+ rc=rc, disabled=disabled, msg=msg,
+ error=True, stdout=stdout, stderr=stderr)
+ elif rc == 2:
+ # success with changes
+ module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
+ elif rc == 124:
+ # timeout
+ module.exit_json(
+ rc=rc, msg="%s timed out" % ctx.cmd, stdout=stdout, stderr=stderr)
+ else:
+ # failure
+ module.fail_json(
+ rc=rc, msg="%s failed with return code: %d" % (ctx.cmd, rc),
+ stdout=stdout, stderr=stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pushbullet.py b/ansible_collections/community/general/plugins/modules/pushbullet.py
new file mode 100644
index 000000000..c7e20c373
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pushbullet.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+author: "Willy Barro (@willybarro)"
+requirements: [ pushbullet.py ]
+module: pushbullet
+short_description: Sends notifications to Pushbullet
+description:
+ - This module sends push notifications via Pushbullet to channels or devices.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ api_key:
+ type: str
+ description:
+ - Push bullet API token
+ required: true
+ channel:
+ type: str
+ description:
+ - The channel TAG you wish to broadcast a push notification,
+ as seen on the "My Channels" > "Edit your channel" at
+ Pushbullet page.
+ device:
+ type: str
+ description:
+ - The device NAME you wish to send a push notification,
+ as seen on the Pushbullet main page.
+ push_type:
+ type: str
+ description:
+ - Thing you wish to push.
+ default: note
+ choices: [ "note", "link" ]
+ title:
+ type: str
+ description:
+ - Title of the notification.
+ required: true
+ body:
+ type: str
+ description:
+ - Body of the notification, e.g. Details of the fault you're alerting.
+ url:
+ type: str
+ description:
+ - URL field, used when I(push_type) is C(link).
+
+notes:
+ - Requires pushbullet.py Python package on the remote host.
+ You can install it via pip with ($ pip install pushbullet.py).
+ See U(https://github.com/randomchars/pushbullet.py)
+'''
+
+EXAMPLES = '''
+- name: Sends a push notification to a device
+ community.general.pushbullet:
+ api_key: "ABC123abc123ABC123abc123ABC123ab"
+ device: "Chrome"
+ title: "You may see this on Google Chrome"
+
+- name: Sends a link to a device
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ device: Chrome
+ push_type: link
+ title: Ansible Documentation
+ body: https://docs.ansible.com/
+
+- name: Sends a push notification to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: Broadcasting a message to the #my-awesome-channel folks
+
+- name: Sends a push notification with title and body to a channel
+ community.general.pushbullet:
+ api_key: ABC123abc123ABC123abc123ABC123ab
+ channel: my-awesome-channel
+ title: ALERT! Signup service is down
+ body: Error rate on signup service is over 90% for more than 2 minutes
+'''
+
+import traceback
+
+PUSHBULLET_IMP_ERR = None
+try:
+ from pushbullet import PushBullet
+ from pushbullet.errors import InvalidKeyError, PushError
+except ImportError:
+ PUSHBULLET_IMP_ERR = traceback.format_exc()
+ pushbullet_found = False
+else:
+ pushbullet_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+
+# ===========================================
+# Main
+#
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_key=dict(type='str', required=True, no_log=True),
+ channel=dict(type='str', default=None),
+ device=dict(type='str', default=None),
+ push_type=dict(type='str', default="note", choices=['note', 'link']),
+ title=dict(type='str', required=True),
+ body=dict(type='str', default=None),
+ url=dict(type='str', default=None),
+ ),
+ mutually_exclusive=(
+ ['channel', 'device'],
+ ),
+ supports_check_mode=True
+ )
+
+ api_key = module.params['api_key']
+ channel = module.params['channel']
+ device = module.params['device']
+ push_type = module.params['push_type']
+ title = module.params['title']
+ body = module.params['body']
+ url = module.params['url']
+
+ if not pushbullet_found:
+ module.fail_json(msg=missing_required_lib('pushbullet.py'), exception=PUSHBULLET_IMP_ERR)
+
+ # Init pushbullet
+ try:
+ pb = PushBullet(api_key)
+ target = None
+ except InvalidKeyError:
+ module.fail_json(msg="Invalid api_key")
+
+ # Checks for channel/device
+ if device is None and channel is None:
+ module.fail_json(msg="You need to provide a channel or a device.")
+
+ # Search for given device
+ if device is not None:
+ devices_by_nickname = {}
+ for d in pb.devices:
+ devices_by_nickname[d.nickname] = d
+
+ if device in devices_by_nickname:
+ target = devices_by_nickname[device]
+ else:
+ module.fail_json(msg="Device '%s' not found. Available devices: '%s'" % (device, "', '".join(devices_by_nickname.keys())))
+
+ # Search for given channel
+ if channel is not None:
+ channels_by_tag = {}
+ for c in pb.channels:
+ channels_by_tag[c.channel_tag] = c
+
+ if channel in channels_by_tag:
+ target = channels_by_tag[channel]
+ else:
+ module.fail_json(msg="Channel '%s' not found. Available channels: '%s'" % (channel, "', '".join(channels_by_tag.keys())))
+
+ # If in check mode, exit saying that we succeeded
+ if module.check_mode:
+ module.exit_json(changed=False, msg="OK")
+
+ # Send push notification
+ try:
+ if push_type == "link":
+ target.push_link(title, url, body)
+ else:
+ target.push_note(title, body)
+ module.exit_json(changed=False, msg="OK")
+ except PushError as e:
+ module.fail_json(msg="An error occurred, Pushbullet's response: %s" % str(e))
+
+ module.fail_json(msg="An unknown error has occurred")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/pushover.py b/ansible_collections/community/general/plugins/modules/pushover.py
new file mode 100644
index 000000000..f5493731f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/pushover.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2012, Jim Richardson <weaselkeeper@gmail.com>
+# Copyright (c) 2019, Bernd Arnold <wopfel@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: pushover
+short_description: Send notifications via U(https://pushover.net)
+description:
+ - Send notifications via pushover, to subscriber list of devices, and email
+ addresses. Requires pushover app on devices.
+notes:
+ - You will require a pushover.net account to use this module. But no account
+ is required to receive messages.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ msg:
+ type: str
+ description:
+ - What message you wish to send.
+ required: true
+ app_token:
+ type: str
+ description:
+ - Pushover issued token identifying your pushover app.
+ required: true
+ user_key:
+ type: str
+ description:
+ - Pushover issued authentication key for your user.
+ required: true
+ title:
+ type: str
+ description:
+ - Message title.
+ required: false
+ pri:
+ type: str
+ description:
+ - Message priority (see U(https://pushover.net) for details).
+ required: false
+ default: '0'
+ choices: [ '-2', '-1', '0', '1', '2' ]
+ device:
+ type: str
+ description:
+ - A device the message should be sent to. Multiple devices can be specified, separated by a comma.
+ required: false
+ version_added: 1.2.0
+
+author:
+ - "Jim Richardson (@weaselkeeper)"
+ - "Bernd Arnold (@wopfel)"
+'''
+
+EXAMPLES = '''
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} is acting strange ...'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net
+ community.general.pushover:
+ title: 'Alert!'
+ msg: '{{ inventory_hostname }} has exploded in flames, It is now time to panic'
+ pri: 1
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ delegate_to: localhost
+
+- name: Send notifications via pushover.net to a specific device
+ community.general.pushover:
+ msg: '{{ inventory_hostname }} has been lost somewhere'
+ app_token: wxfdksl
+ user_key: baa5fe97f2c5ab3ca8f0bb59
+ device: admins-iPhone
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class Pushover(object):
+ ''' Instantiates a pushover object, use it to send notifications '''
+ base_uri = 'https://api.pushover.net'
+
+ def __init__(self, module, user, token):
+ self.module = module
+ self.user = user
+ self.token = token
+
+ def run(self, priority, msg, title, device):
+ ''' Do, whatever it is, we do. '''
+
+ url = '%s/1/messages.json' % (self.base_uri)
+
+ # parse config
+ options = dict(user=self.user,
+ token=self.token,
+ priority=priority,
+ message=msg)
+
+ if title is not None:
+ options = dict(options,
+ title=title)
+
+ if device is not None:
+ options = dict(options,
+ device=device)
+
+ data = urlencode(options)
+
+ headers = {"Content-type": "application/x-www-form-urlencoded"}
+ r, info = fetch_url(self.module, url, method='POST', data=data, headers=headers)
+ if info['status'] != 200:
+ raise Exception(info)
+
+ return r.read()
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ title=dict(type='str'),
+ msg=dict(required=True),
+ app_token=dict(required=True, no_log=True),
+ user_key=dict(required=True, no_log=True),
+ pri=dict(required=False, default='0', choices=['-2', '-1', '0', '1', '2']),
+ device=dict(type='str'),
+ ),
+ )
+
+ msg_object = Pushover(module, module.params['user_key'], module.params['app_token'])
+ try:
+ response = msg_object.run(module.params['pri'], module.params['msg'], module.params['title'], module.params['device'])
+ except Exception:
+ module.fail_json(msg='Unable to send msg via pushover')
+
+ module.exit_json(msg='message sent successfully: %s' % response, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/python_requirements_info.py b/ansible_collections/community/general/plugins/modules/python_requirements_info.py
new file mode 100644
index 000000000..231114a1d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/python_requirements_info.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: python_requirements_info
+short_description: Show python path and assert dependency versions
+description:
+ - Get info about available Python requirements on the target host, including listing required libraries and gathering versions.
+ - This module was called C(python_requirements_facts) before Ansible 2.9. The usage did not change.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ dependencies:
+ type: list
+ elements: str
+ description: >
+ A list of version-likes or module names to check for installation.
+ Supported operators: <, >, <=, >=, or ==. The bare module name like
+ I(ansible), the module with a specific version like I(boto3==1.6.1), or a
+ partial version like I(requests>2) are all valid specifications.
+ default: []
+author:
+ - Will Thames (@willthames)
+ - Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = '''
+- name: Show python lib/site paths
+ community.general.python_requirements_info:
+
+- name: Check for modern boto3 and botocore versions
+ community.general.python_requirements_info:
+ dependencies:
+ - boto3>1.6
+ - botocore<2
+'''
+
+RETURN = '''
+python:
+ description: path to python version used
+ returned: always
+ type: str
+ sample: /usr/local/opt/python@2/bin/python2.7
+python_version:
+ description: version of python
+ returned: always
+ type: str
+ sample: "2.7.15 (default, May 1 2018, 16:44:08)\n[GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.1)]"
+python_version_info:
+ description: breakdown version of python
+ returned: always
+ type: dict
+ contains:
+ major:
+ description: The C(major) component of the python interpreter version.
+ returned: always
+ type: int
+ sample: 3
+ minor:
+ description: The C(minor) component of the python interpreter version.
+ returned: always
+ type: int
+ sample: 8
+ micro:
+ description: The C(micro) component of the python interpreter version.
+ returned: always
+ type: int
+ sample: 10
+ releaselevel:
+ description: The C(releaselevel) component of the python interpreter version.
+ returned: always
+ type: str
+ sample: final
+ serial:
+ description: The C(serial) component of the python interpreter version.
+ returned: always
+ type: int
+ sample: 0
+ version_added: 4.2.0
+python_system_path:
+ description: List of paths python is looking for modules in
+ returned: always
+ type: list
+ sample:
+ - /usr/local/opt/python@2/site-packages/
+ - /usr/lib/python/site-packages/
+valid:
+ description: A dictionary of dependencies that matched their desired versions. If no version was specified, then I(desired) will be null
+ returned: always
+ type: dict
+ sample:
+ boto3:
+ desired: null
+ installed: 1.7.60
+ botocore:
+ desired: botocore<2
+ installed: 1.10.60
+mismatched:
+ description: A dictionary of dependencies that did not satisfy the desired version
+ returned: always
+ type: dict
+ sample:
+ botocore:
+ desired: botocore>2
+ installed: 1.10.60
+not_found:
+ description: A list of packages that could not be imported at all, and are not installed
+ returned: always
+ type: list
+ sample:
+ - boto4
+ - requests
+'''
+
+import re
+import sys
+import operator
+
+HAS_DISTUTILS = False
+try:
+ import pkg_resources
+ from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+ HAS_DISTUTILS = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+
+operations = {
+ '<=': operator.le,
+ '>=': operator.ge,
+ '<': operator.lt,
+ '>': operator.gt,
+ '==': operator.eq,
+}
+
+python_version_info = dict(
+ major=sys.version_info[0],
+ minor=sys.version_info[1],
+ micro=sys.version_info[2],
+ releaselevel=sys.version_info[3],
+ serial=sys.version_info[4],
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dependencies=dict(type='list', elements='str', default=[])
+ ),
+ supports_check_mode=True,
+ )
+ if not HAS_DISTUTILS:
+ module.fail_json(
+ msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
+ python=sys.executable,
+ python_version=sys.version,
+ python_version_info=python_version_info,
+ python_system_path=sys.path,
+ )
+ pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(?:(==|[><]=?)([0-9.]+))?$')
+
+ results = dict(
+ not_found=[],
+ mismatched={},
+ valid={},
+ )
+
+ for dep in module.params['dependencies']:
+ match = pkg_dep_re.match(dep)
+ if not match:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
+ pkg, op, version = match.groups()
+ if op is not None and op not in operations:
+ module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
+ try:
+ existing = pkg_resources.get_distribution(pkg).version
+ except pkg_resources.DistributionNotFound:
+ # not there
+ results['not_found'].append(pkg)
+ continue
+ if op is None and version is None:
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': None,
+ }
+ elif operations[op](LooseVersion(existing), LooseVersion(version)):
+ results['valid'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+ else:
+ results['mismatched'][pkg] = {
+ 'installed': existing,
+ 'desired': dep,
+ }
+
+ module.exit_json(
+ python=sys.executable,
+ python_version=sys.version,
+ python_version_info=python_version_info,
+ python_system_path=sys.path,
+ **results
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax.py b/ansible_collections/community/general/plugins/modules/rax.py
new file mode 100644
index 000000000..47c0a6d1b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax.py
@@ -0,0 +1,903 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax
+short_description: Create / delete an instance in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud instance and optionally
+ waits for it to be 'running'.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number with the name of the
+ created servers. Only applicable when used with the I(group) attribute
+ or meta key.
+ type: bool
+ default: true
+ boot_from_volume:
+ description:
+ - Whether or not to boot the instance from a Cloud Block Storage volume.
+ If C(true) and I(image) is specified a new volume will be created at
+ boot time. I(boot_volume_size) is required with I(image) to create a
+ new volume at boot time.
+ type: bool
+ default: false
+ boot_volume:
+ type: str
+ description:
+ - Cloud Block Storage ID or Name to use as the boot volume of the
+ instance
+ boot_volume_size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes. This is only required with
+ I(image) and I(boot_from_volume).
+ default: 100
+ boot_volume_terminate:
+ description:
+ - Whether the I(boot_volume) or newly created volume from I(image) will
+ be terminated when the server is terminated
+ type: bool
+ default: false
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: false
+ count:
+ type: int
+ description:
+ - number of instances to launch
+ default: 1
+ count_offset:
+ type: int
+ description:
+ - number count to start at
+ default: 1
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified it will assume the value C(auto).
+ choices:
+ - auto
+ - manual
+ exact_count:
+ description:
+ - Explicitly ensure an exact count of instances, used with
+ state=active/present. If specified as C(true) and I(count) is less than
+ the servers matched, servers will be deleted to match the count. If
+ the number of matched servers is fewer than specified in I(count)
+ additional servers will be added.
+ type: bool
+ default: false
+ extra_client_args:
+ type: dict
+ default: {}
+ description:
+ - A hash of key/value pairs to be used when creating the cloudservers
+ client. This is considered an advanced option, use it wisely and
+ with caution.
+ extra_create_args:
+ type: dict
+ default: {}
+ description:
+ - A hash of key/value pairs to be used when creating a new server.
+ This is considered an advanced option, use it wisely and with caution.
+ files:
+ type: dict
+ default: {}
+ description:
+ - Files to insert into the instance. remotefilename:localcontent
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ group:
+ type: str
+ description:
+ - host group to assign to server, is also used for idempotent operations
+ to ensure a specific number of instances
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name).
+ With I(boot_from_volume), a Cloud Block Storage volume will be created
+ with this image
+ instance_ids:
+ type: list
+ elements: str
+ description:
+ - list of instance ids, currently only used when state='absent' to
+ remove instances
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ aliases:
+ - keypair
+ meta:
+ type: dict
+ default: {}
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the instance
+ networks:
+ type: list
+ elements: str
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: false
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Jesse Keating (@omgjlk)"
+ - "Matt Martz (@sivel)"
+notes:
+ - I(exact_count) can be "destructive" if the number of running servers in
+ the I(group) is larger than that specified in I(count). In such a case, the
+ I(state) is effectively set to C(absent) and the extra servers are deleted.
+ In the case of deletion, the returned data structure will have C(action)
+ set to C(delete), and the oldest servers in the group will be deleted.
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Server
+ gather_facts: false
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: rax-test1
+ flavor: 5
+ image: b11d9567-e412-4255-96b9-bd63ab23bcfe
+ key_name: my_rackspace_key
+ files:
+ /root/test.txt: /home/localuser/test.txt
+ wait: true
+ state: present
+ networks:
+ - private
+ - public
+ register: rax
+
+- name: Build an exact count of cloud servers with incremented names
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: Server build requests
+ local_action:
+ module: rax
+ credentials: ~/.raxpub
+ name: test%03d.example.org
+ flavor: performance1-1
+ image: ubuntu-1204-lts-precise-pangolin
+ state: present
+ count: 10
+ count_offset: 10
+ exact_count: true
+ group: test
+ wait: true
+ register: rax
+'''
+
+import json
+import os
+import re
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (FINAL_STATUSES, rax_argument_spec, rax_find_bootable_volume,
+ rax_find_image, rax_find_network, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+from ansible.module_utils.six.moves import xrange
+from ansible.module_utils.six import string_types
+
+
+def rax_find_server_image(module, server, image, boot_volume):
+ if not image and boot_volume:
+ vol = rax_find_bootable_volume(module, pyrax, server,
+ exit=False)
+ if not vol:
+ return None
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if vol_image_id:
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if server_image:
+ server.image = dict(id=server_image)
+
+ # Match image IDs taking care of boot from volume
+ if image and not server.image:
+ vol = rax_find_bootable_volume(module, pyrax, server)
+ volume_image_metadata = vol.volume_image_metadata
+ vol_image_id = volume_image_metadata.get('image_id')
+ if not vol_image_id:
+ return None
+ server_image = rax_find_image(module, pyrax,
+ vol_image_id, exit=False)
+ if image != server_image:
+ return None
+
+ server.image = dict(id=server_image)
+ elif image and server.image['id'] != image:
+ return None
+
+ return server.image
+
+
+def create(module, names=None, flavor=None, image=None, meta=None, key_name=None,
+ files=None, wait=True, wait_timeout=300, disk_config=None,
+ group=None, nics=None, extra_create_args=None, user_data=None,
+ config_drive=False, existing=None, block_device_mapping_v2=None):
+ names = [] if names is None else names
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ nics = [] if nics is None else nics
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+ existing = [] if existing is None else existing
+ block_device_mapping_v2 = [] if block_device_mapping_v2 is None else block_device_mapping_v2
+
+ cs = pyrax.cloudservers
+ changed = False
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(os.path.expanduser(user_data)):
+ try:
+ user_data = os.path.expanduser(user_data)
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ # Handle the file contents
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ fileobj = open(lpath, 'r')
+ files[rpath] = fileobj.read()
+ fileobj.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+ try:
+ servers = []
+ bdmv2 = block_device_mapping_v2
+ for name in names:
+ servers.append(cs.servers.create(name=name, image=image,
+ flavor=flavor, meta=meta,
+ key_name=key_name,
+ files=files, nics=nics,
+ disk_config=disk_config,
+ config_drive=config_drive,
+ userdata=user_data,
+ block_device_mapping_v2=bdmv2,
+ **extra_create_args))
+ except Exception as e:
+ if e.message:
+ msg = str(e.message)
+ else:
+ msg = repr(e)
+ module.fail_json(msg=msg)
+ else:
+ changed = True
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+
+ if not filter(lambda s: s.status not in FINAL_STATUSES,
+ servers):
+ break
+ time.sleep(5)
+
+ success = []
+ error = []
+ timeout = []
+ for server in servers:
+ try:
+ server.get()
+ except Exception:
+ server.status = 'ERROR'
+ instance = rax_to_dict(server, 'server')
+ if server.status == 'ACTIVE' or not wait:
+ success.append(instance)
+ elif server.status == 'ERROR':
+ error.append(instance)
+ elif wait:
+ timeout.append(instance)
+
+ untouched = [rax_to_dict(s, 'server') for s in existing]
+ instances = success + untouched
+
+ results = {
+ 'changed': changed,
+ 'action': 'create',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to build'
+ elif error:
+ results['msg'] = 'Failed to build all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def delete(module, instance_ids=None, wait=True, wait_timeout=300, kept=None):
+ instance_ids = [] if instance_ids is None else instance_ids
+ kept = [] if kept is None else kept
+
+ cs = pyrax.cloudservers
+
+ changed = False
+ instances = {}
+ servers = []
+
+ for instance_id in instance_ids:
+ servers.append(cs.servers.get(instance_id))
+
+ for server in servers:
+ try:
+ server.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ changed = True
+
+ instance = rax_to_dict(server, 'server')
+ instances[instance['id']] = instance
+
+ # If requested, wait for server deletion
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ for server in servers:
+ instance_id = server.id
+ try:
+ server.get()
+ except Exception:
+ instances[instance_id]['status'] = 'DELETED'
+ instances[instance_id]['rax_status'] = 'DELETED'
+
+ if not filter(lambda s: s['status'] not in ('', 'DELETED',
+ 'ERROR'),
+ instances.values()):
+ break
+
+ time.sleep(5)
+
+ timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
+ instances.values())
+ error = filter(lambda s: s['status'] in ('ERROR'),
+ instances.values())
+ success = filter(lambda s: s['status'] in ('', 'DELETED'),
+ instances.values())
+
+ instances = [rax_to_dict(s, 'server') for s in kept]
+
+ results = {
+ 'changed': changed,
+ 'action': 'delete',
+ 'instances': instances,
+ 'success': success,
+ 'error': error,
+ 'timeout': timeout,
+ 'instance_ids': {
+ 'instances': [i['id'] for i in instances],
+ 'success': [i['id'] for i in success],
+ 'error': [i['id'] for i in error],
+ 'timeout': [i['id'] for i in timeout]
+ }
+ }
+
+ if timeout:
+ results['msg'] = 'Timeout waiting for all servers to delete'
+ elif error:
+ results['msg'] = 'Failed to delete all servers'
+
+ if 'msg' in results:
+ module.fail_json(**results)
+ else:
+ module.exit_json(**results)
+
+
+def cloudservers(module, state=None, name=None, flavor=None, image=None,
+ meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
+ disk_config=None, count=1, group=None, instance_ids=None,
+ exact_count=False, networks=None, count_offset=0,
+ auto_increment=False, extra_create_args=None, user_data=None,
+ config_drive=False, boot_from_volume=False,
+ boot_volume=None, boot_volume_size=None,
+ boot_volume_terminate=False):
+ meta = {} if meta is None else meta
+ files = {} if files is None else files
+ instance_ids = [] if instance_ids is None else instance_ids
+ networks = [] if networks is None else networks
+ extra_create_args = {} if extra_create_args is None else extra_create_args
+
+ cs = pyrax.cloudservers
+ cnw = pyrax.cloud_networks
+ if not cnw:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present' or (state == 'absent' and instance_ids is None):
+ if not boot_from_volume and not boot_volume and not image:
+ module.fail_json(msg='image is required for the "rax" module')
+
+ for arg, value in dict(name=name, flavor=flavor).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax" module' %
+ arg)
+
+ if boot_from_volume and not image and not boot_volume:
+ module.fail_json(msg='image or boot_volume are required for the '
+ '"rax" with boot_from_volume')
+
+ if boot_from_volume and image and not boot_volume_size:
+ module.fail_json(msg='boot_volume_size is required for the "rax" '
+ 'module with boot_from_volume and image')
+
+ if boot_from_volume and image and boot_volume:
+ image = None
+
+ servers = []
+
+ # Add the group meta key
+ if group and 'group' not in meta:
+ meta['group'] = group
+ elif 'group' in meta and group is None:
+ group = meta['group']
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ # When using state=absent with group, the absent block won't match the
+ # names properly. Use the exact_count functionality to decrease the count
+ # to the desired level
+ was_absent = False
+ if group is not None and state == 'absent':
+ exact_count = True
+ state = 'present'
+ was_absent = True
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ # act on the state
+ if state == 'present':
+ # Idempotent ensurance of a specific count of servers
+ if exact_count is not False:
+ # See if we can find servers that match our options
+ if group is None:
+ module.fail_json(msg='"group" must be provided when using '
+ '"exact_count"')
+
+ if auto_increment:
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset, count_offset + count)
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ else: # Not auto incrementing
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ # available_numbers not needed here, we inspect auto_increment
+ # again later
+
+ # If state was absent but the count was changed,
+ # assume we only wanted to remove that number of instances
+ if was_absent:
+ diff = len(servers) - count
+ if diff < 0:
+ count = 0
+ else:
+ count = diff
+
+ if len(servers) > count:
+ # We have more servers than we need, set state='absent'
+ # and delete the extras, this should delete the oldest
+ state = 'absent'
+ kept = servers[:count]
+ del servers[:count]
+ instance_ids = []
+ for server in servers:
+ instance_ids.append(server.id)
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout, kept=kept)
+ elif len(servers) < count:
+ # we have fewer servers than we need
+ if auto_increment:
+ # auto incrementing server numbers
+ names = []
+ name_slice = count - len(servers)
+ numbers_to_use = available_numbers[:name_slice]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # We are not auto incrementing server numbers,
+ # create a list of 'name' that matches how many we need
+ names = [name] * (count - len(servers))
+ else:
+ # we have the right number of servers, just return info
+ # about all of the matched servers
+ instances = []
+ instance_ids = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+ instance_ids.append(server.id)
+ module.exit_json(changed=False, action=None,
+ instances=instances,
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+ else: # not called with exact_count=True
+ if group is not None:
+ if auto_increment:
+ # we are auto incrementing server numbers, but not with
+ # exact_count
+ numbers = set()
+
+ # See if the name is a printf like string, if not append
+ # %d to the end
+ try:
+ name % 0
+ except TypeError as e:
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ # regex pattern to match printf formatting
+ pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
+ for server in cs.servers.list():
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+ if server.metadata.get('group') == group:
+ servers.append(server)
+ match = re.search(pattern, server.name)
+ if match:
+ number = int(match.group(1))
+ numbers.add(number)
+
+ number_range = xrange(count_offset,
+ count_offset + count + len(numbers))
+ available_numbers = list(set(number_range)
+ .difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ # Not auto incrementing
+ names = [name] * count
+ else:
+ # No group was specified, and not using exact_count
+ # Perform more simplistic matching
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ servers = []
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if server.metadata != meta:
+ continue
+ servers.append(server)
+
+ if len(servers) >= count:
+ # We have more servers than were requested, don't do
+ # anything. Not running with exact_count=True, so we assume
+ # more is OK
+ instances = []
+ for server in servers:
+ instances.append(rax_to_dict(server, 'server'))
+
+ instance_ids = [i['id'] for i in instances]
+ module.exit_json(changed=False, action=None,
+ instances=instances, success=[], error=[],
+ timeout=[],
+ instance_ids={'instances': instance_ids,
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ # We need more servers to reach out target, create names for
+ # them, we aren't performing auto_increment here
+ names = [name] * (count - len(servers))
+
+ block_device_mapping_v2 = []
+ if boot_from_volume:
+ mapping = {
+ 'boot_index': '0',
+ 'delete_on_termination': boot_volume_terminate,
+ 'destination_type': 'volume',
+ }
+ if image:
+ mapping.update({
+ 'uuid': image,
+ 'source_type': 'image',
+ 'volume_size': boot_volume_size,
+ })
+ image = None
+ elif boot_volume:
+ volume = rax_find_volume(module, pyrax, boot_volume)
+ mapping.update({
+ 'uuid': pyrax.utils.get_id(volume),
+ 'source_type': 'volume',
+ })
+ block_device_mapping_v2.append(mapping)
+
+ create(module, names=names, flavor=flavor, image=image,
+ meta=meta, key_name=key_name, files=files, wait=wait,
+ wait_timeout=wait_timeout, disk_config=disk_config, group=group,
+ nics=nics, extra_create_args=extra_create_args,
+ user_data=user_data, config_drive=config_drive,
+ existing=servers,
+ block_device_mapping_v2=block_device_mapping_v2)
+
+ elif state == 'absent':
+ if instance_ids is None:
+ # We weren't given an explicit list of server IDs to delete
+ # Let's match instead
+ search_opts = {
+ 'name': '^%s$' % name,
+ 'flavor': flavor
+ }
+ for server in cs.servers.list(search_opts=search_opts):
+ # Ignore DELETED servers
+ if server.status == 'DELETED':
+ continue
+
+ if not rax_find_server_image(module, server, image,
+ boot_volume):
+ continue
+
+ # Ignore servers with non matching metadata
+ if meta != server.metadata:
+ continue
+
+ servers.append(server)
+
+ # Build a list of server IDs to delete
+ instance_ids = []
+ for server in servers:
+ if len(instance_ids) < count:
+ instance_ids.append(server.id)
+ else:
+ break
+
+ if not instance_ids:
+ # No server IDs were matched for deletion, or no IDs were
+ # explicitly provided, just exit and don't do anything
+ module.exit_json(changed=False, action=None, instances=[],
+ success=[], error=[], timeout=[],
+ instance_ids={'instances': [],
+ 'success': [], 'error': [],
+ 'timeout': []})
+
+ delete(module, instance_ids=instance_ids, wait=wait,
+ wait_timeout=wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ auto_increment=dict(default=True, type='bool'),
+ boot_from_volume=dict(default=False, type='bool'),
+ boot_volume=dict(type='str'),
+ boot_volume_size=dict(type='int', default=100),
+ boot_volume_terminate=dict(type='bool', default=False),
+ config_drive=dict(default=False, type='bool'),
+ count=dict(default=1, type='int'),
+ count_offset=dict(default=1, type='int'),
+ disk_config=dict(choices=['auto', 'manual']),
+ exact_count=dict(default=False, type='bool'),
+ extra_client_args=dict(type='dict', default={}),
+ extra_create_args=dict(type='dict', default={}),
+ files=dict(type='dict', default={}),
+ flavor=dict(),
+ group=dict(),
+ image=dict(),
+ instance_ids=dict(type='list', elements='str'),
+ key_name=dict(aliases=['keypair']),
+ meta=dict(type='dict', default={}),
+ name=dict(),
+ networks=dict(type='list', elements='str', default=['public', 'private']),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ auto_increment = module.params.get('auto_increment')
+ boot_from_volume = module.params.get('boot_from_volume')
+ boot_volume = module.params.get('boot_volume')
+ boot_volume_size = module.params.get('boot_volume_size')
+ boot_volume_terminate = module.params.get('boot_volume_terminate')
+ config_drive = module.params.get('config_drive')
+ count = module.params.get('count')
+ count_offset = module.params.get('count_offset')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ exact_count = module.params.get('exact_count', False)
+ extra_client_args = module.params.get('extra_client_args')
+ extra_create_args = module.params.get('extra_create_args')
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ group = module.params.get('group')
+ image = module.params.get('image')
+ instance_ids = module.params.get('instance_ids')
+ key_name = module.params.get('key_name')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ if extra_client_args:
+ pyrax.cloudservers = pyrax.connect_to_cloudservers(
+ region=pyrax.cloudservers.client.region_name,
+ **extra_client_args)
+ client = pyrax.cloudservers.client
+ if 'bypass_url' in extra_client_args:
+ client.management_url = extra_client_args['bypass_url']
+
+ if pyrax.cloudservers is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloudservers(module, state=state, name=name, flavor=flavor,
+ image=image, meta=meta, key_name=key_name, files=files,
+ wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
+ count=count, group=group, instance_ids=instance_ids,
+ exact_count=exact_count, networks=networks,
+ count_offset=count_offset, auto_increment=auto_increment,
+ extra_create_args=extra_create_args, user_data=user_data,
+ config_drive=config_drive, boot_from_volume=boot_from_volume,
+ boot_volume=boot_volume, boot_volume_size=boot_volume_size,
+ boot_volume_terminate=boot_volume_terminate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs.py b/ansible_collections/community/general/plugins/modules/rax_cbs.py
new file mode 100644
index 000000000..c99626904
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_cbs.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs
+short_description: Manipulate Rackspace Cloud Block Storage Volumes
+description:
+ - Manipulate Rackspace Cloud Block Storage Volumes
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ description:
+ type: str
+ description:
+ - Description to give the volume being created.
+ image:
+ type: str
+ description:
+ - Image to use for bootable volumes. Can be an C(id), C(human_id) or
+ C(name). This option requires C(pyrax>=1.9.3).
+ meta:
+ type: dict
+ default: {}
+ description:
+ - A hash of metadata to associate with the volume.
+ name:
+ type: str
+ description:
+ - Name to give the volume being created.
+ required: true
+ size:
+ type: int
+ description:
+ - Size of the volume to create in Gigabytes.
+ default: 100
+ snapshot_id:
+ type: str
+ description:
+ - The id of the snapshot to create the volume from.
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource.
+ choices:
+ - present
+ - absent
+ default: present
+ volume_type:
+ type: str
+ description:
+ - Type of the volume being created.
+ choices:
+ - SATA
+ - SSD
+ default: SATA
+ wait:
+ description:
+ - Wait for the volume to be in state C(available) before returning.
+ type: bool
+ default: false
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds.
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a Block Storage Volume
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume create request
+ local_action:
+ module: rax_cbs
+ credentials: ~/.raxpub
+ name: my-volume
+ description: My Volume
+ volume_type: SSD
+ size: 150
+ region: DFW
+ wait: true
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_volume
+'''
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
+ rax_required_together, rax_to_dict, setup_rax_module)
+
+
+def cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image):
+ changed = False
+ volume = None
+ instance = {}
+
+ cbs = pyrax.cloud_blockstorage
+
+ if cbs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if image:
+ # pyrax<1.9.3 did not have support for specifying an image when
+ # creating a volume which is required for bootable volumes
+ if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
+ module.fail_json(msg='Creating a bootable volume requires '
+ 'pyrax>=1.9.3')
+ image = rax_find_image(module, pyrax, image)
+
+ volume = rax_find_volume(module, pyrax, name)
+
+ if state == 'present':
+ if not volume:
+ kwargs = dict()
+ if image:
+ kwargs['image'] = image
+ try:
+ volume = cbs.create(name, size=size, volume_type=volume_type,
+ description=description,
+ metadata=meta,
+ snapshot_id=snapshot_id, **kwargs)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(volume, interval=5,
+ attempts=attempts)
+
+ volume.get()
+ instance = rax_to_dict(volume)
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait and volume.status not in VOLUME_STATUS:
+ result['msg'] = 'Timeout waiting on %s' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if volume:
+ instance = rax_to_dict(volume)
+ try:
+ volume.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ description=dict(type='str'),
+ image=dict(type='str'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ size=dict(type='int', default=100),
+ snapshot_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ description = module.params.get('description')
+ image = module.params.get('image')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ size = module.params.get('size')
+ snapshot_id = module.params.get('snapshot_id')
+ state = module.params.get('state')
+ volume_type = module.params.get('volume_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout,
+ image)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
new file mode 100644
index 000000000..8f540fa0f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_cbs_attachments.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cbs_attachments
+short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
+description:
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ device:
+ type: str
+ description:
+ - The device path to attach the volume to, e.g. /dev/xvde.
+ - Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
+ volume:
+ type: str
+ description:
+ - Name or id of the volume to attach/detach
+ required: true
+ server:
+ type: str
+ description:
+ - Name or id of the server to attach/detach
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ wait:
+ description:
+ - wait for the volume to be in 'in-use'/'available' state before returning
+ type: bool
+ default: false
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Attach a Block Storage Volume
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume attach request
+ local_action:
+ module: rax_cbs_attachments
+ credentials: ~/.raxpub
+ volume: my-volume
+ server: my-server
+ device: /dev/xvdd
+ region: DFW
+ wait: true
+ state: present
+ register: my_volume
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (NON_CALLABLES,
+ rax_argument_spec,
+ rax_find_server,
+ rax_find_volume,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout):
+ cbs = pyrax.cloud_blockstorage
+ cs = pyrax.cloudservers
+
+ if cbs is None or cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ changed = False
+ instance = {}
+
+ volume = rax_find_volume(module, pyrax, volume)
+
+ if not volume:
+ module.fail_json(msg='No matching storage volumes were found')
+
+ if state == 'present':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ changed = False
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+ else:
+ try:
+ volume.attach_to_instance(server, mountpoint=device)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+
+ for key, value in vars(volume).items():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(volume, 'status', 'in-use',
+ interval=5, attempts=attempts)
+
+ volume.get()
+ result['volume'] = rax_to_dict(volume)
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ server = rax_find_server(module, pyrax, server)
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ try:
+ volume.detach()
+ if wait:
+ pyrax.utils.wait_until(volume, 'status', 'available',
+ interval=3, attempts=0,
+ verbose=False)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+ changed = True
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+
+ result = dict(changed=changed, volume=rax_to_dict(volume))
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ device=dict(required=False),
+ volume=dict(required=True),
+ server=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ device = module.params.get('device')
+ volume = module.params.get('volume')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb.py b/ansible_collections/community/general/plugins/modules/rax_cdb.py
new file mode 100644
index 000000000..cf0366d3b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_cdb.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb
+short_description: Create/delete or resize a Rackspace Cloud Databases instance
+description:
+ - creates / deletes or resize a Rackspace Cloud Databases instance
+ and optionally waits for it to be 'running'. The name option needs to be
+ unique since it's used to identify the instance.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name of the databases server instance
+ required: true
+ flavor:
+ type: int
+ description:
+ - flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
+ default: 1
+ volume:
+ type: int
+ description:
+ - Volume size of the database 1-150GB
+ default: 2
+ cdb_type:
+ type: str
+ description:
+ - type of instance (i.e. MySQL, MariaDB, Percona)
+ default: MySQL
+ aliases: ['type']
+ cdb_version:
+ type: str
+ description:
+ - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
+ - "The available choices are: C(5.1), C(5.6) and C(10)."
+ default: '5.6'
+ aliases: ['version']
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ type: bool
+ default: false
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a Cloud Databases
+ gather_facts: false
+ tasks:
+ - name: Server build request
+ local_action:
+ module: rax_cdb
+ credentials: ~/.raxpub
+ region: IAD
+ name: db-server1
+ flavor: 1
+ volume: 2
+ cdb_type: MySQL
+ cdb_version: 5.6
+ wait: true
+ state: present
+ register: rax_db_server
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_instance(name):
+
+ cdb = pyrax.cloud_databases
+ instances = cdb.list()
+ if instances:
+ for instance in instances:
+ if instance.name == name:
+ return instance
+ return False
+
+
+def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ for arg, value in dict(name=name, flavor=flavor,
+ volume=volume, type=cdb_type, version=cdb_version
+ ).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb"'
+ ' module' % arg)
+
+ if not (volume >= 1 and volume <= 150):
+ module.fail_json(msg='volume is required to be between 1 and 150')
+
+ cdb = pyrax.cloud_databases
+
+ flavors = []
+ for item in cdb.list_flavors():
+ flavors.append(item.id)
+
+ if not (flavor in flavors):
+ module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
+
+ changed = False
+
+ instance = find_instance(name)
+
+ if not instance:
+ action = 'create'
+ try:
+ instance = cdb.create(name=name, flavor=flavor, volume=volume,
+ type=cdb_type, version=cdb_version)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ else:
+ action = None
+
+ if instance.volume.size != volume:
+ action = 'resize'
+ if instance.volume.size > volume:
+ module.fail_json(changed=False, action=action,
+ msg='The new volume size must be larger than '
+ 'the current volume size',
+ cdb=rax_to_dict(instance))
+ instance.resize_volume(volume)
+ changed = True
+
+ if int(instance.flavor.id) != flavor:
+ action = 'resize'
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+ instance.resize(flavor)
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'ACTIVE':
+ module.fail_json(changed=changed, action=action,
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be created' % name)
+
+ module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
+
+
+def delete_instance(module, name, wait, wait_timeout):
+
+ if not name:
+ module.fail_json(msg='name is required for the "rax_cdb" module')
+
+ changed = False
+
+ instance = find_instance(name)
+ if not instance:
+ module.exit_json(changed=False, action='delete')
+
+ try:
+ instance.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ if wait:
+ pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
+ attempts=wait_timeout)
+
+ if wait and instance.status != 'SHUTDOWN':
+ module.fail_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance),
+ msg='Timeout waiting for "%s" databases instance to '
+ 'be deleted' % name)
+
+ module.exit_json(changed=changed, action='delete',
+ cdb=rax_to_dict(instance))
+
+
+def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
+
+ # act on the state
+ if state == 'present':
+ save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout)
+ elif state == 'absent':
+ delete_instance(module, name, wait, wait_timeout)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ flavor=dict(type='int', default=1),
+ volume=dict(type='int', default=2),
+ cdb_type=dict(type='str', default='MySQL', aliases=['type']),
+ cdb_version=dict(type='str', default='5.6', aliases=['version']),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ flavor = module.params.get('flavor')
+ volume = module.params.get('volume')
+ cdb_type = module.params.get('cdb_type')
+ cdb_version = module.params.get('cdb_version')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_database.py b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
new file mode 100644
index 000000000..35b076aad
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_cdb_database.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_cdb_database
+short_description: Create / delete a database in the Cloud Databases
+description:
+ - create / delete a database in the Cloud Databases.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: true
+ name:
+ type: str
+ description:
+ - Name to give to the database
+ required: true
+ character_set:
+ type: str
+ description:
+ - Set of symbols and encodings
+ default: 'utf8'
+ collate:
+ type: str
+ description:
+ - Set of rules for comparing characters in a character set
+ default: 'utf8_general_ci'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a database in Cloud Databases
+ tasks:
+ - name: Database build request
+ local_action:
+ module: rax_cdb_database
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ name: db1
+ state: present
+ register: rax_db_database
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_database(instance, name):
+ try:
+ database = instance.get_database(name)
+ except Exception:
+ return False
+
+ return database
+
+
+def save_database(module, cdb_id, name, character_set, collate):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if not database:
+ try:
+ database = instance.create_database(name=name,
+ character_set=character_set,
+ collate=collate)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='create',
+ database=rax_to_dict(database))
+
+
+def delete_database(module, cdb_id, name):
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ database = find_database(instance, name)
+
+ if database:
+ try:
+ database.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete',
+ database=rax_to_dict(database))
+
+
+def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
+
+ # act on the state
+ if state == 'present':
+ save_database(module, cdb_id, name, character_set, collate)
+ elif state == 'absent':
+ delete_database(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ character_set=dict(type='str', default='utf8'),
+ collate=dict(type='str', default='utf8_general_ci'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('name')
+ character_set = module.params.get('character_set')
+ collate = module.params.get('collate')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_database(module, state, cdb_id, name, character_set, collate)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_cdb_user.py b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
new file mode 100644
index 000000000..a2cd675d9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_cdb_user.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_cdb_user
+short_description: Create / delete a Rackspace Cloud Database
+description:
+ - create / delete a database in the Cloud Databases.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ cdb_id:
+ type: str
+ description:
+ - The databases server UUID
+ required: true
+ db_username:
+ type: str
+ description:
+ - Name of the database user
+ required: true
+ db_password:
+ type: str
+ description:
+ - Database user password
+ required: true
+ databases:
+ type: list
+ elements: str
+ description:
+ - Name of the databases that the user can access
+ default: []
+ host:
+ type: str
+ description:
+ - Specifies the host from which a user is allowed to connect to
+ the database. Possible values are a string containing an IPv4 address
+ or "%" to allow connecting from any host
+ default: '%'
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: "Simon JAILLET (@jails)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a user in Cloud Databases
+ tasks:
+ - name: User build request
+ local_action:
+ module: rax_cdb_user
+ credentials: ~/.raxpub
+ region: IAD
+ cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
+ db_username: user1
+ db_password: user1
+ databases: ['db1']
+ state: present
+ register: rax_db_user
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_text
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, rax_to_dict, setup_rax_module
+
+
+def find_user(instance, name):
+ try:
+ user = instance.get_user(name)
+ except Exception:
+ return False
+
+ return user
+
+
+def save_user(module, cdb_id, name, password, databases, host):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user" '
+ 'module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if not user:
+ action = 'create'
+ try:
+ user = instance.create_user(name=name,
+ password=password,
+ database_names=databases,
+ host=host)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+ else:
+ action = 'update'
+
+ if user.host != host:
+ changed = True
+
+ user.update(password=password, host=host)
+
+ former_dbs = set([item.name for item in user.list_user_access()])
+ databases = set(databases)
+
+ if databases != former_dbs:
+ try:
+ revoke_dbs = [db for db in former_dbs if db not in databases]
+ user.revoke_user_access(db_names=revoke_dbs)
+
+ new_dbs = [db for db in databases if db not in former_dbs]
+ user.grant_user_access(db_names=new_dbs)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
+
+
+def delete_user(module, cdb_id, name):
+
+ for arg, value in dict(cdb_id=cdb_id, name=name).items():
+ if not value:
+ module.fail_json(msg='%s is required for the "rax_cdb_user"'
+ ' module' % arg)
+
+ cdb = pyrax.cloud_databases
+
+ try:
+ instance = cdb.get(cdb_id)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ changed = False
+
+ user = find_user(instance, name)
+
+ if user:
+ try:
+ user.delete()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ changed = True
+
+ module.exit_json(changed=changed, action='delete')
+
+
+def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
+
+ # act on the state
+ if state == 'present':
+ save_user(module, cdb_id, name, password, databases, host)
+ elif state == 'absent':
+ delete_user(module, cdb_id, name)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cdb_id=dict(type='str', required=True),
+ db_username=dict(type='str', required=True),
+ db_password=dict(type='str', required=True, no_log=True),
+ databases=dict(type='list', elements='str', default=[]),
+ host=dict(type='str', default='%'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cdb_id = module.params.get('cdb_id')
+ name = module.params.get('db_username')
+ password = module.params.get('db_password')
+ databases = module.params.get('databases')
+ host = to_text(module.params.get('host'), errors='surrogate_or_strict')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+ rax_cdb_user(module, state, cdb_id, name, password, databases, host)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb.py b/ansible_collections/community/general/plugins/modules/rax_clb.py
new file mode 100644
index 000000000..9a4ca4f89
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_clb.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb
+short_description: Create / delete a load balancer in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud load balancer.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ algorithm:
+ type: str
+ description:
+ - algorithm for the balancer being created
+ choices:
+ - RANDOM
+ - LEAST_CONNECTIONS
+ - ROUND_ROBIN
+ - WEIGHTED_LEAST_CONNECTIONS
+ - WEIGHTED_ROUND_ROBIN
+ default: LEAST_CONNECTIONS
+ meta:
+ type: dict
+ default: {}
+ description:
+ - A hash of metadata to associate with the instance
+ name:
+ type: str
+ description:
+ - Name to give the load balancer
+ required: true
+ port:
+ type: int
+ description:
+ - Port for the balancer being created
+ default: 80
+ protocol:
+ type: str
+ description:
+ - Protocol for the balancer being created
+ choices:
+ - DNS_TCP
+ - DNS_UDP
+ - FTP
+ - HTTP
+ - HTTPS
+ - IMAPS
+ - IMAPv4
+ - LDAP
+ - LDAPS
+ - MYSQL
+ - POP3
+ - POP3S
+ - SMTP
+ - TCP
+ - TCP_CLIENT_FIRST
+ - UDP
+ - UDP_STREAM
+ - SFTP
+ default: HTTP
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ timeout:
+ type: int
+ description:
+ - timeout for communication between the balancer and the node
+ default: 30
+ type:
+ type: str
+ description:
+ - type of interface for the balancer being created
+ choices:
+ - PUBLIC
+ - SERVICENET
+ default: PUBLIC
+ vip_id:
+ type: str
+ description:
+ - Virtual IP ID to use when creating the load balancer for purposes of
+ sharing an IP with another load balancer of another protocol
+ wait:
+ description:
+ - wait for the balancer to be in state 'running' before returning
+ type: bool
+ default: false
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a Load Balancer
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Balancer create request
+ local_action:
+ module: rax_clb
+ credentials: ~/.raxpub
+ name: my-lb
+ port: 8080
+ protocol: HTTP
+ type: SERVICENET
+ timeout: 30
+ region: DFW
+ wait: true
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_lb
+'''
+
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (CLB_ALGORITHMS,
+ CLB_PROTOCOLS,
+ rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id):
+ if int(timeout) < 30:
+ module.fail_json(msg='"timeout" must be greater than or equal to 30')
+
+ changed = False
+ balancers = []
+
+ clb = pyrax.cloud_loadbalancers
+ if not clb:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ balancer_list = clb.list()
+ while balancer_list:
+ retrieved = clb.list(marker=balancer_list.pop().id)
+ balancer_list.extend(retrieved)
+ if len(retrieved) < 2:
+ break
+
+ for balancer in balancer_list:
+ if name != balancer.name and name != balancer.id:
+ continue
+
+ balancers.append(balancer)
+
+ if len(balancers) > 1:
+ module.fail_json(msg='Multiple Load Balancers were matched by name, '
+ 'try using the Load Balancer ID instead')
+
+ if state == 'present':
+ if isinstance(meta, dict):
+ metadata = [dict(key=k, value=v) for k, v in meta.items()]
+
+ if not balancers:
+ try:
+ virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
+ balancer = clb.create(name, metadata=metadata, port=port,
+ algorithm=algorithm, protocol=protocol,
+ timeout=timeout, virtual_ips=virtual_ips)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ balancer = balancers[0]
+ setattr(balancer, 'metadata',
+ [dict(key=k, value=v) for k, v in
+ balancer.get_metadata().items()])
+ atts = {
+ 'name': name,
+ 'algorithm': algorithm,
+ 'port': port,
+ 'protocol': protocol,
+ 'timeout': timeout
+ }
+ for att, value in atts.items():
+ current = getattr(balancer, att)
+ if current != value:
+ changed = True
+
+ if changed:
+ balancer.update(**atts)
+
+ if balancer.metadata != metadata:
+ balancer.set_metadata(meta)
+ changed = True
+
+ virtual_ips = [clb.VirtualIP(type=vip_type)]
+ current_vip_types = set([v.type for v in balancer.virtual_ips])
+ vip_types = set([v.type for v in virtual_ips])
+ if current_vip_types != vip_types:
+ module.fail_json(msg='Load balancer Virtual IP type cannot '
+ 'be changed')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ instance = rax_to_dict(balancer, 'clb')
+
+ result = dict(changed=changed, balancer=instance)
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if balancers:
+ balancer = balancers[0]
+ try:
+ balancer.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ instance = rax_to_dict(balancer, 'clb')
+
+ if wait:
+ attempts = wait_timeout // 5
+ pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
+ interval=5, attempts=attempts)
+ else:
+ instance = {}
+
+ module.exit_json(changed=changed, balancer=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ algorithm=dict(choices=CLB_ALGORITHMS,
+ default='LEAST_CONNECTIONS'),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ port=dict(type='int', default=80),
+ protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
+ state=dict(default='present', choices=['present', 'absent']),
+ timeout=dict(type='int', default=30),
+ type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
+ vip_id=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ algorithm = module.params.get('algorithm')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ port = module.params.get('port')
+ protocol = module.params.get('protocol')
+ state = module.params.get('state')
+ timeout = int(module.params.get('timeout'))
+ vip_id = module.params.get('vip_id')
+ vip_type = module.params.get('type')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
+ vip_type, timeout, wait, wait_timeout, vip_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
new file mode 100644
index 000000000..219f0c2ba
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_clb_nodes.py
@@ -0,0 +1,293 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_clb_nodes
+short_description: Add, modify and remove nodes from a Rackspace Cloud Load Balancer
+description:
+ - Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ address:
+ type: str
+ required: false
+ description:
+ - IP address or domain name of the node
+ condition:
+ type: str
+ required: false
+ choices:
+ - enabled
+ - disabled
+ - draining
+ description:
+ - Condition for the node, which determines its role within the load
+ balancer
+ load_balancer_id:
+ type: int
+ required: true
+ description:
+ - Load balancer id
+ node_id:
+ type: int
+ required: false
+ description:
+ - Node id
+ port:
+ type: int
+ required: false
+ description:
+ - Port number of the load balanced service on the node
+ state:
+ type: str
+ required: false
+ default: "present"
+ choices:
+ - present
+ - absent
+ description:
+ - Indicate desired state of the node
+ type:
+ type: str
+ required: false
+ choices:
+ - primary
+ - secondary
+ description:
+ - Type of node
+ wait:
+ required: false
+ default: false
+ type: bool
+ description:
+ - Wait for the load balancer to become active before returning
+ wait_timeout:
+ type: int
+ required: false
+ default: 30
+ description:
+ - How long to wait before giving up and returning an error
+ weight:
+ type: int
+ required: false
+ description:
+ - Weight of node
+ virtualenv:
+ type: path
+ description:
+ - Virtualenv to execute this module in
+author: "Lukasz Kawczynski (@neuroid)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Add a new node to the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ address: 10.2.2.3
+ port: 80
+ condition: enabled
+ type: primary
+ wait: true
+ credentials: /path/to/credentials
+
+- name: Drain connections from a node
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ condition: draining
+ wait: true
+ credentials: /path/to/credentials
+
+- name: Remove a node from the load balancer
+ local_action:
+ module: rax_clb_nodes
+ load_balancer_id: 71
+ node_id: 410
+ state: absent
+ wait: true
+ credentials: /path/to/credentials
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_clb_node_to_dict, rax_required_together, setup_rax_module
+
+
+def _activate_virtualenv(path):
+ activate_this = os.path.join(path, 'bin', 'activate_this.py')
+ with open(activate_this) as f:
+ code = compile(f.read(), activate_this, 'exec')
+ exec(code)
+
+
+def _get_node(lb, node_id=None, address=None, port=None):
+ """Return a matching node"""
+ for node in getattr(lb, 'nodes', []):
+ match_list = []
+ if node_id is not None:
+ match_list.append(getattr(node, 'id', None) == node_id)
+ if address is not None:
+ match_list.append(getattr(node, 'address', None) == address)
+ if port is not None:
+ match_list.append(getattr(node, 'port', None) == port)
+
+ if match_list and all(match_list):
+ return node
+
+ return None
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ condition=dict(choices=['enabled', 'disabled', 'draining']),
+ load_balancer_id=dict(required=True, type='int'),
+ node_id=dict(type='int'),
+ port=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ type=dict(choices=['primary', 'secondary']),
+ virtualenv=dict(type='path'),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=30, type='int'),
+ weight=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params['address']
+ condition = (module.params['condition'] and
+ module.params['condition'].upper())
+ load_balancer_id = module.params['load_balancer_id']
+ node_id = module.params['node_id']
+ port = module.params['port']
+ state = module.params['state']
+ typ = module.params['type'] and module.params['type'].upper()
+ virtualenv = module.params['virtualenv']
+ wait = module.params['wait']
+ wait_timeout = module.params['wait_timeout'] or 1
+ weight = module.params['weight']
+
+ if virtualenv:
+ try:
+ _activate_virtualenv(virtualenv)
+ except IOError as e:
+ module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
+ virtualenv, e))
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.cloud_loadbalancers:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ node = _get_node(lb, node_id, address, port)
+
+ result = rax_clb_node_to_dict(node)
+
+ if state == 'absent':
+ if not node: # Removing a non-existent node
+ module.exit_json(changed=False, state=state)
+ try:
+ lb.delete_node(node)
+ result = {}
+ except pyrax.exc.NotFound:
+ module.exit_json(changed=False, state=state)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # present
+ if not node:
+ if node_id: # Updating a non-existent node
+ msg = 'Node %d not found' % node_id
+ if lb.nodes:
+ msg += (' (available nodes: %s)' %
+ ', '.join([str(x.id) for x in lb.nodes]))
+ module.fail_json(msg=msg)
+ else: # Creating a new node
+ try:
+ node = pyrax.cloudloadbalancers.Node(
+ address=address, port=port, condition=condition,
+ weight=weight, type=typ)
+ resp, body = lb.add_nodes([node])
+ result.update(body['nodes'][0])
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ else: # Updating an existing node
+ mutable = {
+ 'condition': condition,
+ 'type': typ,
+ 'weight': weight,
+ }
+
+ for name in list(mutable):
+ value = mutable[name]
+ if value is None or value == getattr(node, name):
+ mutable.pop(name)
+
+ if not mutable:
+ module.exit_json(changed=False, state=state, node=result)
+
+ try:
+ # The diff has to be set explicitly to update node's weight and
+ # type; this should probably be fixed in pyrax
+ lb.update_node(node, diff=mutable)
+ result.update(mutable)
+ except pyrax.exc.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if wait:
+ pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
+ attempts=wait_timeout)
+ if lb.status != 'ACTIVE':
+ module.fail_json(
+ msg='Load balancer not active after %ds (current status: %s)' %
+ (wait_timeout, lb.status.lower()))
+
+ kwargs = {'node': result} if result else {}
+ module.exit_json(changed=True, state=state, **kwargs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
new file mode 100644
index 000000000..5dca9d3ec
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_clb_ssl.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer
+description:
+ - Set up, reconfigure, or remove SSL termination for an existing load balancer.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ loadbalancer:
+ type: str
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ type: str
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ type: bool
+ private_key:
+ type: str
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ type: str
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ type: str
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ type: int
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ type: bool
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ type: bool
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ type: bool
+ wait_timeout:
+ type: int
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ community.general.rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ community.general.rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout // 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.items():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(no_log=True),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_dns.py b/ansible_collections/community/general/plugins/modules/rax_dns.py
new file mode 100644
index 000000000..e70b76914
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_dns.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns
+short_description: Manage domains on Rackspace Cloud DNS
+description:
+ - Manage domains on Rackspace Cloud DNS.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ email:
+ type: str
+ description:
+ - Email address of the domain administrator
+ name:
+ type: str
+ description:
+ - Domain name to create
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of domain in seconds
+ default: 3600
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create domain
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Domain create request
+ local_action:
+ module: rax_dns
+ credentials: ~/.raxpub
+ name: example.org
+ email: admin@example.org
+ register: rax_dns
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns(module, comment, email, name, state, ttl):
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not email:
+ module.fail_json(msg='An "email" attribute is required for '
+ 'creating a domain')
+
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ domain = dns.create(name=name, emailAddress=email, ttl=ttl,
+ comment=comment)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(domain, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(domain, 'ttl', None):
+ update['ttl'] = ttl
+ if email != getattr(domain, 'emailAddress', None):
+ update['emailAddress'] = email
+
+ if update:
+ try:
+ domain.update(**update)
+ changed = True
+ domain.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=name)
+ except pyrax.exceptions.NotFound:
+ domain = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if domain:
+ try:
+ domain.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, domain=rax_to_dict(domain))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ email=dict(),
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ email = module.params.get('email')
+ name = module.params.get('name')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+
+ setup_rax_module(module, pyrax, False)
+
+ rax_dns(module, comment, email, name, state, ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_dns_record.py b/ansible_collections/community/general/plugins/modules/rax_dns_record.py
new file mode 100644
index 000000000..fd3ad47ce
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_dns_record.py
@@ -0,0 +1,362 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_dns_record
+short_description: Manage DNS records on Rackspace Cloud DNS
+description:
+ - Manage DNS records on Rackspace Cloud DNS.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ comment:
+ type: str
+ description:
+ - Brief description of the domain. Maximum length of 160 characters
+ data:
+ type: str
+ description:
+ - IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
+ SRV/TXT
+ required: true
+ domain:
+ type: str
+ description:
+ - Domain name to create the record in. This is an invalid option when
+ type=PTR
+ loadbalancer:
+ type: str
+ description:
+ - Load Balancer ID to create a PTR record for. Only used with type=PTR
+ name:
+ type: str
+ description:
+ - FQDN record name to create
+ required: true
+ overwrite:
+ description:
+ - Add new records if data doesn't match, instead of updating existing
+ record with matching name. If there are already multiple records with
+ matching name and overwrite=true, this module will fail.
+ default: true
+ type: bool
+ priority:
+ type: int
+ description:
+ - Required for MX and SRV records, but forbidden for other record types.
+ If specified, must be an integer from 0 to 65535.
+ server:
+ type: str
+ description:
+ - Server ID to create a PTR record for. Only used with type=PTR
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ ttl:
+ type: int
+ description:
+ - Time to live of record in seconds
+ default: 3600
+ type:
+ type: str
+ description:
+ - DNS record type
+ choices:
+ - A
+ - AAAA
+ - CNAME
+ - MX
+ - NS
+ - SRV
+ - TXT
+ - PTR
+ required: true
+notes:
+ - "It is recommended that plays utilizing this module be run with
+ C(serial: 1) to avoid exceeding the API request limit imposed by
+ the Rackspace CloudDNS API"
+ - To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
+ supplied
+ - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
+ - C(PTR) record support was added in version 1.7
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create DNS Records
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Create A record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ domain: example.org
+ name: www.example.org
+ data: "{{ rax_accessipv4 }}"
+ type: A
+ register: a_record
+
+ - name: Create PTR record
+ local_action:
+ module: rax_dns_record
+ credentials: ~/.raxpub
+ server: "{{ rax_id }}"
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ register: ptr_record
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_find_loadbalancer,
+ rax_find_server,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
+ name=None, server=None, state='present', ttl=7200):
+ changed = False
+ results = []
+
+ dns = pyrax.cloud_dns
+
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if loadbalancer:
+ item = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ elif server:
+ item = rax_find_server(module, pyrax, server)
+
+ if state == 'present':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ if record.ttl != ttl or record.name != name:
+ try:
+ dns.update_ptr_record(item, record, name, data, ttl)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ record.ttl = ttl
+ record.name = name
+ results.append(rax_to_dict(record))
+ break
+ else:
+ results.append(rax_to_dict(record))
+ break
+
+ if not results:
+ record = dict(name=name, type='PTR', data=data, ttl=ttl,
+ comment=comment)
+ try:
+ results = dns.add_ptr_records(item, [record])
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+ elif state == 'absent':
+ current = dns.list_ptr_records(item)
+ for record in current:
+ if record.data == data:
+ results.append(rax_to_dict(record))
+ break
+
+ if results:
+ try:
+ dns.delete_ptr_records(item, data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, records=results)
+
+
+def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
+ overwrite=True, priority=None, record_type='A',
+ state='present', ttl=7200):
+ """Function for manipulating record types other than PTR"""
+
+ changed = False
+
+ dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not priority and record_type in ['MX', 'SRV']:
+ module.fail_json(msg='A "priority" attribute is required for '
+ 'creating a MX or SRV record')
+
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ if overwrite:
+ record = domain.find_record(record_type, name=name)
+ else:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='overwrite=true and there are multiple matching records')
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ try:
+ record_data = {
+ 'type': record_type,
+ 'name': name,
+ 'data': data,
+ 'ttl': ttl
+ }
+ if comment:
+ record_data.update(dict(comment=comment))
+ if priority and record_type.upper() in ['MX', 'SRV']:
+ record_data.update(dict(priority=priority))
+
+ record = domain.add_records([record_data])[0]
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ update = {}
+ if comment != getattr(record, 'comment', None):
+ update['comment'] = comment
+ if ttl != getattr(record, 'ttl', None):
+ update['ttl'] = ttl
+ if priority != getattr(record, 'priority', None):
+ update['priority'] = priority
+ if data != getattr(record, 'data', None):
+ update['data'] = data
+
+ if update:
+ try:
+ record.update(**update)
+ changed = True
+ record.get()
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ domain = dns.find(name=domain)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ try:
+ record = domain.find_record(record_type, name=name, data=data)
+ except pyrax.exceptions.DomainRecordNotFound as e:
+ record = {}
+ except pyrax.exceptions.DomainRecordNotUnique as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if record:
+ try:
+ record.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, record=rax_to_dict(record))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ comment=dict(),
+ data=dict(required=True),
+ domain=dict(),
+ loadbalancer=dict(),
+ name=dict(required=True),
+ overwrite=dict(type='bool', default=True),
+ priority=dict(type='int'),
+ server=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ ttl=dict(type='int', default=3600),
+ type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
+ 'SRV', 'TXT', 'PTR'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ required_one_of=[
+ ['server', 'loadbalancer', 'domain'],
+ ],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ comment = module.params.get('comment')
+ data = module.params.get('data')
+ domain = module.params.get('domain')
+ loadbalancer = module.params.get('loadbalancer')
+ name = module.params.get('name')
+ overwrite = module.params.get('overwrite')
+ priority = module.params.get('priority')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ ttl = module.params.get('ttl')
+ record_type = module.params.get('type')
+
+ setup_rax_module(module, pyrax, False)
+
+ if record_type.upper() == 'PTR':
+ if not server and not loadbalancer:
+ module.fail_json(msg='one of the following is required: '
+ 'server,loadbalancer')
+ rax_dns_record_ptr(module, data=data, comment=comment,
+ loadbalancer=loadbalancer, name=name, server=server,
+ state=state, ttl=ttl)
+ else:
+ rax_dns_record(module, comment=comment, data=data, domain=domain,
+ name=name, overwrite=overwrite, priority=priority,
+ record_type=record_type, state=state, ttl=ttl)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_facts.py b/ansible_collections/community/general/plugins/modules/rax_facts.py
new file mode 100644
index 000000000..9e63fec38
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_facts.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_facts
+short_description: Gather facts for Rackspace Cloud Servers
+description:
+ - Gather facts for Rackspace Cloud Servers.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to retrieve facts for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to retrieve facts for
+ name:
+ type: str
+ description:
+ - Server name to retrieve facts for
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+
+'''
+
+EXAMPLES = '''
+- name: Gather info about servers
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Get facts about servers
+ local_action:
+ module: rax_facts
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ - name: Map some facts
+ ansible.builtin.set_fact:
+ ansible_ssh_host: "{{ rax_accessipv4 }}"
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_facts(module, address, name, server_id):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ ansible_facts = {}
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ servers[:] = [server for server in servers if server.status != "DELETED"]
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif len(servers) == 1:
+ ansible_facts = rax_to_dict(servers[0], 'server')
+
+ module.exit_json(changed=changed, ansible_facts=ansible_facts)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ supports_check_mode=True,
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+
+ setup_rax_module(module, pyrax)
+
+ rax_facts(module, address, name, server_id)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_files.py b/ansible_collections/community/general/plugins/modules/rax_files.py
new file mode 100644
index 000000000..2d52ebc0f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_files.py
@@ -0,0 +1,402 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files
+short_description: Manipulate Rackspace Cloud Files Containers
+description:
+ - Manipulate Rackspace Cloud Files Containers.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing containers.
+ Selecting this option is only appropriate when setting type=meta
+ type: bool
+ default: false
+ container:
+ type: str
+ description:
+ - The container to use for container or metadata operations.
+ meta:
+ type: dict
+ default: {}
+ description:
+ - A hash of items to set as metadata values on a container
+ private:
+ description:
+ - Used to set a container as private, removing it from the CDN. B(Warning!)
+ Private containers, if previously made public, can have live objects
+ available until the TTL on cached objects expires
+ type: bool
+ default: false
+ public:
+ description:
+ - Used to set a container as public, available via the Cloud Files CDN
+ type: bool
+ default: false
+ region:
+ type: str
+ description:
+ - Region to create an instance in
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent', 'list']
+ default: present
+ ttl:
+ type: int
+ description:
+ - In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
+ Setting a TTL is only appropriate for containers that are public
+ type:
+ type: str
+ description:
+ - Type of object to do work on, i.e. metadata object or a container object
+ choices:
+ - container
+ - meta
+ default: container
+ web_error:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP error page when accessed by the CDN URL
+ web_index:
+ type: str
+ description:
+ - Sets an object to be presented as the HTTP index page when accessed by the CDN URL
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Containers"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "List all containers"
+ community.general.rax_files:
+ state: list
+
+ - name: "Create container called 'mycontainer'"
+ community.general.rax_files:
+ container: mycontainer
+
+ - name: "Create container 'mycontainer2' with metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ meta:
+ key: value
+ file_for: someuser@example.com
+
+ - name: "Set a container's web index page"
+ community.general.rax_files:
+ container: mycontainer
+ web_index: index.html
+
+ - name: "Set a container's web error page"
+ community.general.rax_files:
+ container: mycontainer
+ web_error: error.html
+
+ - name: "Make container public"
+ community.general.rax_files:
+ container: mycontainer
+ public: true
+
+ - name: "Make container public with a 24 hour TTL"
+ community.general.rax_files:
+ container: mycontainer
+ public: true
+ ttl: 86400
+
+ - name: "Make container private"
+ community.general.rax_files:
+ container: mycontainer
+ private: true
+
+- name: "Test Cloud Files Containers Metadata Storage"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+
+ - name: "Set mycontainer2 metadata"
+ community.general.rax_files:
+ container: mycontainer2
+ type: meta
+ meta:
+ uploaded_by: someuser@example.com
+
+ - name: "Remove mycontainer2 metadata"
+ community.general.rax_files:
+ container: "mycontainer2"
+ type: meta
+ state: absent
+ meta:
+ key: ""
+ file_for: ""
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError as e:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=True)
+META_PREFIX = 'x-container-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _fetch_meta(module, container):
+ EXIT_DICT['meta'] = dict()
+ try:
+ for k, v in container.get_metadata().items():
+ split_key = k.split(META_PREFIX)[-1]
+ EXIT_DICT['meta'][split_key] = v
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+
+def meta(cf, module, container_, state, meta_, clear_meta):
+ c = _get_container(module, cf, container_)
+
+ if meta_ and state == 'present':
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ elif meta_ and state == 'absent':
+ remove_results = []
+ for k, v in meta_.items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+ elif state == 'absent':
+ remove_results = []
+ for k, v in c.get_metadata().items():
+ c.remove_metadata_key(k)
+ remove_results.append(k)
+ EXIT_DICT['deleted_meta_keys'] = remove_results
+
+ _fetch_meta(module, c)
+ _locals = locals().keys()
+
+ EXIT_DICT['container'] = c.name
+ if 'meta_set' in _locals or 'remove_results' in _locals:
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
+ private, web_index, web_error):
+ if public and private:
+ module.fail_json(msg='container cannot be simultaneously '
+ 'set to public and private')
+
+ if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
+ module.fail_json(msg='state cannot be omitted when setting/removing '
+ 'attributes on a container')
+
+ if state == 'list':
+ # We don't care if attributes are specified, let's list containers
+ EXIT_DICT['containers'] = cf.list_containers()
+ module.exit_json(**EXIT_DICT)
+
+ try:
+ c = cf.get_container(container_)
+ except pyrax.exc.NoSuchContainer as e:
+ # Make the container if state=present, otherwise bomb out
+ if state == 'present':
+ try:
+ c = cf.create_container(container_)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['created'] = True
+ else:
+ module.fail_json(msg=e.message)
+ else:
+ # Successfully grabbed a container object
+ # Delete if state is absent
+ if state == 'absent':
+ try:
+ cont_deleted = c.delete()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['deleted'] = True
+
+ if meta_:
+ try:
+ meta_set = c.set_metadata(meta_, clear=clear_meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ finally:
+ _fetch_meta(module, c)
+
+ if ttl:
+ try:
+ c.cdn_ttl = ttl
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['ttl'] = c.cdn_ttl
+
+ if public:
+ try:
+ cont_public = c.make_public()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
+ ssl_url=c.cdn_ssl_uri,
+ streaming_url=c.cdn_streaming_uri,
+ ios_uri=c.cdn_ios_uri)
+
+ if private:
+ try:
+ cont_private = c.make_private()
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_private'] = True
+
+ if web_index:
+ try:
+ cont_web_index = c.set_web_index_page(web_index)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_index'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ if web_error:
+ try:
+ cont_err_index = c.set_web_error_page(web_error)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+ else:
+ EXIT_DICT['set_error'] = True
+ finally:
+ _fetch_meta(module, c)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['objs_in_container'] = c.object_count
+ EXIT_DICT['total_bytes'] = c.total_bytes
+
+ _locals = locals().keys()
+ if ('cont_deleted' in _locals
+ or 'meta_set' in _locals
+ or 'cont_public' in _locals
+ or 'cont_private' in _locals
+ or 'cont_web_index' in _locals
+ or 'cont_err_index' in _locals):
+ EXIT_DICT['changed'] = True
+
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "container":
+ container(cf, module, container_, state, meta_, clear_meta, ttl,
+ public, private, web_index, web_error)
+ else:
+ meta(cf, module, container_, state, meta_, clear_meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(),
+ state=dict(choices=['present', 'absent', 'list'],
+ default='present'),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ type=dict(choices=['container', 'meta'], default='container'),
+ ttl=dict(type='int'),
+ public=dict(default=False, type='bool'),
+ private=dict(default=False, type='bool'),
+ web_index=dict(),
+ web_error=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container_ = module.params.get('container')
+ state = module.params.get('state')
+ meta_ = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ typ = module.params.get('type')
+ ttl = module.params.get('ttl')
+ public = module.params.get('public')
+ private = module.params.get('private')
+ web_index = module.params.get('web_index')
+ web_error = module.params.get('web_error')
+
+ if state in ['present', 'absent'] and not container_:
+ module.fail_json(msg='please specify a container name')
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting '
+ 'metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_files_objects.py b/ansible_collections/community/general/plugins/modules/rax_files_objects.py
new file mode 100644
index 000000000..08a5cd4e2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_files_objects.py
@@ -0,0 +1,558 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_files_objects
+short_description: Upload, download, and delete objects in Rackspace Cloud Files
+description:
+ - Upload, download, and delete objects in Rackspace Cloud Files.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ clear_meta:
+ description:
+ - Optionally clear existing metadata when applying metadata to existing objects.
+ Selecting this option is only appropriate when setting I(type=meta).
+ type: bool
+ default: false
+ container:
+ type: str
+ description:
+ - The container to use for file object operations.
+ required: true
+ dest:
+ type: str
+ description:
+ - The destination of a C(get) operation; i.e. a local directory, C(/home/user/myfolder).
+ Used to specify the destination of an operation on a remote object; i.e. a file name,
+ C(file1), or a comma-separated list of remote objects, C(file1,file2,file17).
+ expires:
+ type: int
+ description:
+ - Used to set an expiration in seconds on an uploaded file or folder.
+ meta:
+ type: dict
+ default: {}
+ description:
+ - Items to set as metadata values on an uploaded file or folder.
+ method:
+ type: str
+ description:
+ - >
+ The method of operation to be performed: C(put) to upload files, C(get) to download files or
+ C(delete) to remove remote objects in Cloud Files.
+ choices:
+ - get
+ - put
+ - delete
+ default: get
+ src:
+ type: str
+ description:
+ - Source from which to upload files. Used to specify a remote object as a source for
+ an operation, i.e. a file name, C(file1), or a comma-separated list of remote objects,
+ C(file1,file2,file17). Parameters I(src) and I(dest) are mutually exclusive on remote-only object operations
+ structure:
+ description:
+ - Used to specify whether to maintain nested directory structure when downloading objects
+ from Cloud Files. Setting to false downloads the contents of a container to a single,
+ flat directory
+ type: bool
+ default: true
+ type:
+ type: str
+ description:
+ - Type of object to do work on
+ - Metadata object or a file object
+ choices:
+ - file
+ - meta
+ default: file
+author: "Paul Durivage (@angstwad)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: "Test Cloud Files Objects"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ dest: ~/Downloads/testcont
+
+ - name: "Get single object from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1
+ dest: ~/Downloads/testcont
+
+ - name: "Get several objects from test container"
+ community.general.rax_files_objects:
+ container: testcont
+ src: file1,file2,file3
+ dest: ~/Downloads/testcont
+
+ - name: "Delete one object in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file1
+
+ - name: "Delete several objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: file2,file3,file4
+
+ - name: "Delete all objects in test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+
+ - name: "Upload all files to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/onehundred
+
+ - name: "Upload one file to test container"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file1
+
+ - name: "Upload one file to test container with metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ src: ~/Downloads/testcont/file2
+ method: put
+ meta:
+ testkey: testdata
+ who_uploaded_this: someuser@example.com
+
+ - name: "Upload one file to test container with TTL of 60 seconds"
+ community.general.rax_files_objects:
+ container: testcont
+ method: put
+ src: ~/Downloads/testcont/file3
+ expires: 60
+
+ - name: "Attempt to get remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: get
+ src: FileThatDoesNotExist.jpg
+ dest: ~/Downloads/testcont
+ ignore_errors: true
+
+ - name: "Attempt to delete remote object that does not exist"
+ community.general.rax_files_objects:
+ container: testcont
+ method: delete
+ dest: FileThatDoesNotExist.jpg
+ ignore_errors: true
+
+- name: "Test Cloud Files Objects Metadata"
+ hosts: local
+ gather_facts: false
+ tasks:
+ - name: "Get metadata on one object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file2
+
+ - name: "Get metadata on several objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file2,file1
+
+ - name: "Set metadata on an object"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: put
+ meta:
+ key1: value1
+ key2: value2
+ clear_meta: true
+
+ - name: "Verify metadata is set"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ src: file17
+
+ - name: "Delete metadata"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+ dest: file17
+ method: delete
+ meta:
+ key1: ''
+ key2: ''
+
+ - name: "Get metadata on all objects"
+ community.general.rax_files_objects:
+ container: testcont
+ type: meta
+'''
+
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+EXIT_DICT = dict(success=False)
+META_PREFIX = 'x-object-meta-'
+
+
+def _get_container(module, cf, container):
+ try:
+ return cf.get_container(container)
+ except pyrax.exc.NoSuchContainer as e:
+ module.fail_json(msg=e.message)
+
+
+def _upload_folder(cf, folder, container, ttl=None, headers=None):
+ """ Uploads a folder to Cloud Files.
+ """
+ total_bytes = 0
+ for root, dummy, files in os.walk(folder):
+ for fname in files:
+ full_path = os.path.join(root, fname)
+ obj_name = os.path.relpath(full_path, folder)
+ obj_size = os.path.getsize(full_path)
+ cf.upload_file(container, full_path, obj_name=obj_name, return_none=True, ttl=ttl, headers=headers)
+ total_bytes += obj_size
+ return total_bytes
+
+
+def upload(module, cf, container, src, dest, meta, expires):
+ """ Uploads a single object or a folder to Cloud Files Optionally sets an
+ metadata, TTL value (expires), or Content-Disposition and Content-Encoding
+ headers.
+ """
+ if not src:
+ module.fail_json(msg='src must be specified when uploading')
+
+ c = _get_container(module, cf, container)
+ src = os.path.abspath(os.path.expanduser(src))
+ is_dir = os.path.isdir(src)
+
+ if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
+ module.fail_json(msg='src must be a file or a directory')
+ if dest and is_dir:
+ module.fail_json(msg='dest cannot be set when whole '
+ 'directories are uploaded')
+
+ cont_obj = None
+ total_bytes = 0
+ try:
+ if dest and not is_dir:
+ cont_obj = c.upload_file(src, obj_name=dest, ttl=expires, headers=meta)
+ elif is_dir:
+ total_bytes = _upload_folder(cf, src, c, ttl=expires, headers=meta)
+ else:
+ cont_obj = c.upload_file(src, ttl=expires, headers=meta)
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['success'] = True
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
+ if cont_obj or total_bytes > 0:
+ EXIT_DICT['changed'] = True
+ if meta:
+ EXIT_DICT['meta'] = dict(updated=True)
+
+ if cont_obj:
+ EXIT_DICT['bytes'] = cont_obj.total_bytes
+ EXIT_DICT['etag'] = cont_obj.etag
+ else:
+ EXIT_DICT['bytes'] = total_bytes
+
+ module.exit_json(**EXIT_DICT)
+
+
+def download(module, cf, container, src, dest, structure):
+ """ Download objects from Cloud Files to a local path specified by "dest".
+ Optionally disable maintaining a directory structure by by passing a
+ false value to "structure".
+ """
+ # Looking for an explicit destination
+ if not dest:
+ module.fail_json(msg='dest is a required argument when '
+ 'downloading from Cloud Files')
+
+ # Attempt to fetch the container by name
+ c = _get_container(module, cf, container)
+
+ # Accept a single object name or a comma-separated list of objs
+ # If not specified, get the entire container
+ if src:
+ objs = map(str.strip, src.split(','))
+ else:
+ objs = c.get_object_names()
+
+ dest = os.path.abspath(os.path.expanduser(dest))
+ is_dir = os.path.isdir(dest)
+
+ if not is_dir:
+ module.fail_json(msg='dest must be a directory')
+
+ try:
+ results = [c.download_object(obj, dest, structure=structure) for obj in objs]
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ len_results = len(results)
+ len_objs = len(objs)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['requested_downloaded'] = results
+ if results:
+ EXIT_DICT['changed'] = True
+ if len_results == len_objs:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
+ else:
+ EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
+ "downloaded" % (len_results, len_objs)
+ module.exit_json(**EXIT_DICT)
+
+
+def delete(module, cf, container, src, dest):
+ """ Delete specific objects by proving a single file name or a
+ comma-separated list to src OR dest (but not both). Omitting file name(s)
+ assumes the entire container is to be deleted.
+ """
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+
+ c = _get_container(module, cf, container)
+
+ objs = dest or src
+ if objs:
+ objs = map(str.strip, objs.split(','))
+ else:
+ objs = c.get_object_names()
+
+ num_objs = len(objs)
+
+ try:
+ results = [c.delete_object(obj) for obj in objs]
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ num_deleted = results.count(True)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['deleted'] = num_deleted
+ EXIT_DICT['requested_deleted'] = objs
+
+ if num_deleted:
+ EXIT_DICT['changed'] = True
+
+ if num_objs == num_deleted:
+ EXIT_DICT['success'] = True
+ EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
+ else:
+ EXIT_DICT['msg'] = ("Error: only %s of %s objects "
+ "deleted" % (num_deleted, num_objs))
+ module.exit_json(**EXIT_DICT)
+
+
+def get_meta(module, cf, container, src, dest):
+ """ Get metadata for a single file, comma-separated list, or entire
+ container
+ """
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
+ "have been specified on both src and dest args")
+
+ c = _get_container(module, cf, container)
+
+ objs = dest or src
+ if objs:
+ objs = map(str.strip, objs.split(','))
+ else:
+ objs = c.get_object_names()
+
+ try:
+ results = dict()
+ for obj in objs:
+ meta = c.get_object(obj).get_metadata()
+ results[obj] = dict((k.split(META_PREFIX)[-1], v) for k, v in meta.items())
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['container'] = c.name
+ if results:
+ EXIT_DICT['meta_results'] = results
+ EXIT_DICT['success'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def put_meta(module, cf, container, src, dest, meta, clear_meta):
+ """ Set metadata on a container, single file, or comma-separated list.
+ Passing a true value to clear_meta clears the metadata stored in Cloud
+ Files before setting the new metadata to the value of "meta".
+ """
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; files to set meta"
+ " have been specified on both src and dest args")
+ objs = dest or src
+ objs = map(str.strip, objs.split(','))
+
+ c = _get_container(module, cf, container)
+
+ try:
+ results = [c.get_object(obj).set_metadata(meta, clear=clear_meta) for obj in objs]
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_changed'] = True
+ module.exit_json(**EXIT_DICT)
+
+
+def delete_meta(module, cf, container, src, dest, meta):
+ """ Removes metadata keys and values specified in meta, if any. Deletes on
+ all objects specified by src or dest (but not both), if any; otherwise it
+ deletes keys on all objects in the container
+ """
+ if src and dest:
+ module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
+ "deleted have been specified on both src and dest"
+ " args")
+ objs = dest or src
+ objs = map(str.strip, objs.split(','))
+
+ c = _get_container(module, cf, container)
+
+ try:
+ for obj in objs:
+ o = c.get_object(obj)
+ results = [
+ o.remove_metadata_key(k)
+ for k in (meta or o.get_metadata())
+ ]
+ except Exception as e:
+ module.fail_json(msg=e.message)
+
+ EXIT_DICT['container'] = c.name
+ EXIT_DICT['success'] = True
+ if results:
+ EXIT_DICT['changed'] = True
+ EXIT_DICT['num_deleted'] = len(results)
+ module.exit_json(**EXIT_DICT)
+
+
+def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
+ structure, expires):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
+
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if typ == "file":
+ if method == 'get':
+ download(module, cf, container, src, dest, structure)
+
+ if method == 'put':
+ upload(module, cf, container, src, dest, meta, expires)
+
+ if method == 'delete':
+ delete(module, cf, container, src, dest)
+
+ else:
+ if method == 'get':
+ get_meta(module, cf, container, src, dest)
+
+ if method == 'put':
+ put_meta(module, cf, container, src, dest, meta, clear_meta)
+
+ if method == 'delete':
+ delete_meta(module, cf, container, src, dest, meta)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ container=dict(required=True),
+ src=dict(),
+ dest=dict(),
+ method=dict(default='get', choices=['put', 'get', 'delete']),
+ type=dict(default='file', choices=['file', 'meta']),
+ meta=dict(type='dict', default=dict()),
+ clear_meta=dict(default=False, type='bool'),
+ structure=dict(default=True, type='bool'),
+ expires=dict(type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ container = module.params.get('container')
+ src = module.params.get('src')
+ dest = module.params.get('dest')
+ method = module.params.get('method')
+ typ = module.params.get('type')
+ meta = module.params.get('meta')
+ clear_meta = module.params.get('clear_meta')
+ structure = module.params.get('structure')
+ expires = module.params.get('expires')
+
+ if clear_meta and not typ == 'meta':
+ module.fail_json(msg='clear_meta can only be used when setting metadata')
+
+ setup_rax_module(module, pyrax)
+ cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_identity.py b/ansible_collections/community/general/plugins/modules/rax_identity.py
new file mode 100644
index 000000000..19f803953
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_identity.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_identity
+short_description: Load Rackspace Cloud Identity
+description:
+ - Verifies Rackspace Cloud credentials and returns identity information.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices: ['present']
+ default: present
+ required: false
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Load Rackspace Cloud Identity
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Identity
+ local_action:
+ module: rax_identity
+ credentials: ~/.raxpub
+ region: DFW
+ register: rackspace_identity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def cloud_identity(module, state, identity):
+ instance = dict(
+ authenticated=identity.authenticated,
+ credentials=identity._creds_file
+ )
+ changed = False
+
+ instance.update(rax_to_dict(identity))
+ instance['services'] = instance.get('services', {}).keys()
+
+ if state == 'present':
+ if not identity.authenticated:
+ module.fail_json(msg='Credentials could not be verified!')
+
+ module.exit_json(changed=changed, identity=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ if not pyrax.identity:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloud_identity(module, state, pyrax.identity)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_keypair.py b/ansible_collections/community/general/plugins/modules/rax_keypair.py
new file mode 100644
index 000000000..22750f03c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_keypair.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_keypair
+short_description: Create a keypair for use with Rackspace Cloud Servers
+description:
+ - Create a keypair for use with Rackspace Cloud Servers.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name of keypair
+ required: true
+ public_key:
+ type: str
+ description:
+ - Public Key string to upload. Can be a file path or string
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+notes:
+ - Keypairs cannot be manipulated, only created and deleted. To "update" a
+ keypair you must first delete and then recreate.
+ - The ability to specify a file path for the public key was added in 1.7
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ region: DFW
+ register: keypair
+ - name: Create local public key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.public_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
+ - name: Create local private key
+ local_action:
+ module: copy
+ content: "{{ keypair.keypair.private_key }}"
+ dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
+
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
+ region: DFW
+ register: keypair
+'''
+import os
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
+ rax_required_together,
+ rax_to_dict,
+ setup_rax_module,
+ )
+
+
+def rax_keypair(module, name, public_key, state):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ keypair = {}
+
+ if state == 'present':
+ if public_key and os.path.isfile(public_key):
+ try:
+ f = open(public_key)
+ public_key = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % public_key)
+
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except cs.exceptions.NotFound:
+ try:
+ keypair = cs.keypairs.create(name, public_key)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ keypair = cs.keypairs.find(name=name)
+ except Exception:
+ pass
+
+ if keypair:
+ try:
+ keypair.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True),
+ public_key=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ public_key = module.params.get('public_key')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ rax_keypair(module, name, public_key, state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_meta.py b/ansible_collections/community/general/plugins/modules/rax_meta.py
new file mode 100644
index 000000000..751300858
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_meta.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_meta
+short_description: Manipulate metadata for Rackspace Cloud Servers
+description:
+ - Manipulate metadata for Rackspace Cloud Servers.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ address:
+ type: str
+ description:
+ - Server IP address to modify metadata for, will match any IP assigned to
+ the server
+ id:
+ type: str
+ description:
+ - Server ID to modify metadata for
+ name:
+ type: str
+ description:
+ - Server name to modify metadata for
+ meta:
+ type: dict
+ default: {}
+ description:
+ - A hash of metadata to associate with the instance
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Set metadata for a server
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Set metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+ meta:
+ group: primary_group
+ groups:
+ - group_two
+ - group_three
+ app: my_app
+
+ - name: Clear metadata
+ local_action:
+ module: rax_meta
+ credentials: ~/.raxpub
+ name: "{{ inventory_hostname }}"
+ region: DFW
+'''
+
+import json
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+from ansible.module_utils.six import string_types
+
+
+def rax_meta(module, address, name, server_id, meta):
+ changed = False
+
+ cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ search_opts = {}
+ if name:
+ search_opts = dict(name='^%s$' % name)
+ try:
+ servers = cs.servers.list(search_opts=search_opts)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif address:
+ servers = []
+ try:
+ for server in cs.servers.list():
+ for addresses in server.networks.values():
+ if address in addresses:
+ servers.append(server)
+ break
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ elif server_id:
+ servers = []
+ try:
+ servers.append(cs.servers.get(server_id))
+ except Exception as e:
+ pass
+
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers found matching provided '
+ 'search parameters')
+ elif not servers:
+ module.fail_json(msg='Failed to find a server matching provided '
+ 'search parameters')
+
+ # Normalize and ensure all metadata values are strings
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ server = servers[0]
+ if server.metadata == meta:
+ changed = False
+ else:
+ changed = True
+ removed = set(server.metadata.keys()).difference(meta.keys())
+ cs.servers.delete_meta(server, list(removed))
+ cs.servers.set_meta(server, meta)
+ server.get()
+
+ module.exit_json(changed=changed, meta=server.metadata)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ address=dict(),
+ id=dict(),
+ name=dict(),
+ meta=dict(type='dict', default=dict()),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[['address', 'id', 'name']],
+ required_one_of=[['address', 'id', 'name']],
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ address = module.params.get('address')
+ server_id = module.params.get('id')
+ name = module.params.get('name')
+ meta = module.params.get('meta')
+
+ setup_rax_module(module, pyrax)
+
+ rax_meta(module, address, name, server_id, meta)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
new file mode 100644
index 000000000..f6e650ec0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_alarm.py
@@ -0,0 +1,237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm
+description:
+ - Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ type: str
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ type: str
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ type: str
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ type: str
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ type: str
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ community.general.rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_check.py b/ansible_collections/community/general/plugins/modules/rax_mon_check.py
new file mode 100644
index 000000000..6a0ad03a3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_check.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+ - Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ entity_id:
+ type: str
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ type: str
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ type: str
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ - |
+ Choices for this option are:
+ - C(remote.dns)
+ - C(remote.ftp-banner)
+ - C(remote.http)
+ - C(remote.imap-banner)
+ - C(remote.mssql-banner)
+ - C(remote.mysql-banner)
+ - C(remote.ping)
+ - C(remote.pop3-banner)
+ - C(remote.postgresql-banner)
+ - C(remote.smtp-banner)
+ - C(remote.smtp)
+ - C(remote.ssh)
+ - C(remote.tcp)
+ - C(remote.telnet-banner)
+ - C(agent.filesystem)
+ - C(agent.memory)
+ - C(agent.load_average)
+ - C(agent.cpu)
+ - C(agent.disk)
+ - C(agent.network)
+ - C(agent.plugin)
+ required: true
+ monitoring_zones_poll:
+ type: str
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ type: str
+ description:
+ - One of I(target_hostname) and I(target_alias) is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ type: str
+ description:
+ - One of I(target_alias) and I(target_hostname) is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ I(ip_addresses) hash to resolve an IP address to target.
+ details:
+ type: dict
+ default: {}
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If C(true), ensure the check is created, but don't actually use it yet.
+ type: bool
+ default: false
+ metadata:
+ type: dict
+ default: {}
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ type: int
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ type: int
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ community.general.rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.items():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_entity.py b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
new file mode 100644
index 000000000..b42bd173b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_entity.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+ - Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ label:
+ type: str
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ type: str
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ default: present
+ agent_id:
+ type: str
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ type: dict
+ default: {}
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ type: dict
+ default: {}
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ community.general.rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
new file mode 100644
index 000000000..91d079359
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_notification.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ type: str
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ type: dict
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
new file mode 100644
index 000000000..ac8b189aa
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_mon_notification_plan.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+ - Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm.
+ - This module relies on the C(pyrax) package which is deprecated in favour of using Openstack API.
+ - Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ default: present
+ label:
+ type: str
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ type: list
+ elements: str
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ type: list
+ elements: str
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ type: list
+ elements: str
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson (@smashwilson)
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ community.general.rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list', elements='str'),
+ warning_state=dict(type='list', elements='str'),
+ ok_state=dict(type='list', elements='str'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_network.py b/ansible_collections/community/general/plugins/modules/rax_network.py
new file mode 100644
index 000000000..22f148366
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_network.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_network
+short_description: Create / delete an isolated network in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud isolated network.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ label:
+ type: str
+ description:
+ - Label (name) to give the network
+ required: true
+ cidr:
+ type: str
+ description:
+ - cidr of the network being created
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Jesse Keating (@omgjlk)"
+extends_documentation_fragment:
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build an Isolated Network
+ gather_facts: false
+
+ tasks:
+ - name: Network create request
+ local_action:
+ module: rax_network
+ credentials: ~/.raxpub
+ label: my-net
+ cidr: 192.168.3.0/24
+ state: present
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_network(module, state, label, cidr):
+ changed = False
+ network = None
+ networks = []
+
+ if not pyrax.cloud_networks:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ if not cidr:
+ module.fail_json(msg='missing required arguments: cidr')
+
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ except pyrax.exceptions.NetworkNotFound:
+ try:
+ network = pyrax.cloud_networks.create(label, cidr=cidr)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ elif state == 'absent':
+ try:
+ network = pyrax.cloud_networks.find_network_by_label(label)
+ network.delete()
+ changed = True
+ except pyrax.exceptions.NetworkNotFound:
+ pass
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if network:
+ instance = dict(id=network.id,
+ label=network.label,
+ cidr=network.cidr)
+ networks.append(instance)
+
+ module.exit_json(changed=changed, networks=networks)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present',
+ choices=['present', 'absent']),
+ label=dict(required=True),
+ cidr=dict()
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ cidr = module.params.get('cidr')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_network(module, state, label, cidr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_queue.py b/ansible_collections/community/general/plugins/modules/rax_queue.py
new file mode 100644
index 000000000..00f730b27
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_queue.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_queue
+short_description: Create / delete a queue in Rackspace Public Cloud
+description:
+ - creates / deletes a Rackspace Public Cloud queue.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - Name to give the queue
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author:
+ - "Christopher H. Laco (@claco)"
+ - "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+- name: Build a Queue
+ gather_facts: false
+ hosts: local
+ connection: local
+ tasks:
+ - name: Queue create request
+ local_action:
+ module: rax_queue
+ credentials: ~/.raxpub
+ name: my-queue
+ region: DFW
+ state: present
+ register: my_queue
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
+
+
+def cloud_queue(module, state, name):
+ for arg in (state, name):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_queue' % arg)
+
+ changed = False
+ queues = []
+ instance = {}
+
+ cq = pyrax.queues
+ if not cq:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ for queue in cq.list():
+ if name != queue.name:
+ continue
+
+ queues.append(queue)
+
+ if len(queues) > 1:
+ module.fail_json(msg='Multiple Queues were matched by name')
+
+ if state == 'present':
+ if not queues:
+ try:
+ queue = cq.create(name)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ queue = queues[0]
+
+ instance = dict(name=queue.name)
+ result = dict(changed=changed, queue=instance)
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if queues:
+ queue = queues[0]
+ try:
+ queue.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, queue=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_queue(module, state, name)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_group.py b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
new file mode 100644
index 000000000..677a75b33
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_scaling_group.py
@@ -0,0 +1,441 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_group
+short_description: Manipulate Rackspace Cloud Autoscale Groups
+description:
+ - Manipulate Rackspace Cloud Autoscale Groups
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ config_drive:
+ description:
+ - Attach read-only configuration drive to server as label config-2
+ type: bool
+ default: false
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ disk_config:
+ type: str
+ description:
+ - Disk partitioning strategy
+ - If not specified, it will fallback to C(auto).
+ choices:
+ - auto
+ - manual
+ files:
+ type: dict
+ default: {}
+ description:
+ - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
+ flavor:
+ type: str
+ description:
+ - flavor to use for the instance
+ required: true
+ image:
+ type: str
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ required: true
+ key_name:
+ type: str
+ description:
+ - key pair to use on the instance
+ loadbalancers:
+ type: list
+ elements: dict
+ description:
+ - List of load balancer C(id) and C(port) hashes
+ max_entities:
+ type: int
+ description:
+ - The maximum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ meta:
+ type: dict
+ default: {}
+ description:
+ - A hash of metadata to associate with the instance
+ min_entities:
+ type: int
+ description:
+ - The minimum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ name:
+ type: str
+ description:
+ - Name to give the scaling group
+ required: true
+ networks:
+ type: list
+ elements: str
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ server_name:
+ type: str
+ description:
+ - The base name for servers created by Autoscale
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ user_data:
+ type: str
+ description:
+ - Data to be uploaded to the servers config drive. This option implies
+ I(config_drive). Can be a file path or a string
+ wait:
+ description:
+ - wait for the scaling group to finish provisioning the minimum amount of
+ servers
+ type: bool
+ default: false
+ wait_timeout:
+ type: int
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_group:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ flavor: performance1-1
+ image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
+ min_entities: 5
+ max_entities: 10
+ name: ASG Test
+ server_name: asgtest
+ loadbalancers:
+ - id: 228385
+ port: 80
+ register: asg
+'''
+
+import base64
+import json
+import os
+import time
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (
+ rax_argument_spec, rax_find_image, rax_find_network,
+ rax_required_together, rax_to_dict, setup_rax_module,
+ rax_scaling_group_personality_file,
+)
+from ansible.module_utils.six import string_types
+
+
+def rax_asg(module, cooldown=300, disk_config=None, files=None, flavor=None,
+ image=None, key_name=None, loadbalancers=None, meta=None,
+ min_entities=0, max_entities=0, name=None, networks=None,
+ server_name=None, state='present', user_data=None,
+ config_drive=False, wait=True, wait_timeout=300):
+ files = {} if files is None else files
+ loadbalancers = [] if loadbalancers is None else loadbalancers
+ meta = {} if meta is None else meta
+ networks = [] if networks is None else networks
+
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate clients. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if user_data:
+ config_drive = True
+
+ if user_data and os.path.isfile(user_data):
+ try:
+ f = open(user_data)
+ user_data = f.read()
+ f.close()
+ except Exception as e:
+ module.fail_json(msg='Failed to load %s' % user_data)
+
+ if state == 'present':
+ # Normalize and ensure all metadata values are strings
+ if meta:
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, string_types):
+ meta[k] = '%s' % v
+
+ if image:
+ image = rax_find_image(module, pyrax, image)
+
+ nics = []
+ if networks:
+ for network in networks:
+ nics.extend(rax_find_network(module, pyrax, network))
+
+ for nic in nics:
+ # pyrax is currently returning net-id, but we need uuid
+ # this check makes this forward compatible for a time when
+ # pyrax uses uuid instead
+ if nic.get('net-id'):
+ nic.update(uuid=nic['net-id'])
+ del nic['net-id']
+
+ # Handle the file contents
+ personality = rax_scaling_group_personality_file(module, files)
+
+ lbs = []
+ if loadbalancers:
+ for lb in loadbalancers:
+ try:
+ lb_id = int(lb.get('id'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer ID is not an integer: '
+ '%s' % lb.get('id'))
+ try:
+ port = int(lb.get('port'))
+ except (ValueError, TypeError):
+ module.fail_json(msg='Load balancer port is not an '
+ 'integer: %s' % lb.get('port'))
+ if not lb_id or not port:
+ continue
+ lbs.append((lb_id, port))
+
+ try:
+ sg = au.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch as e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ sg = au.create(name, cooldown=cooldown,
+ min_entities=min_entities,
+ max_entities=max_entities,
+ launch_config_type='launch_server',
+ server_name=server_name, image=image,
+ flavor=flavor, disk_config=disk_config,
+ metadata=meta, personality=personality,
+ networks=nics, load_balancers=lbs,
+ key_name=key_name, config_drive=config_drive,
+ user_data=user_data)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if not changed:
+ # Scaling Group Updates
+ group_args = {}
+ if cooldown != sg.cooldown:
+ group_args['cooldown'] = cooldown
+
+ if min_entities != sg.min_entities:
+ group_args['min_entities'] = min_entities
+
+ if max_entities != sg.max_entities:
+ group_args['max_entities'] = max_entities
+
+ if group_args:
+ changed = True
+ sg.update(**group_args)
+
+ # Launch Configuration Updates
+ lc = sg.get_launch_config()
+ lc_args = {}
+ if server_name != lc.get('name'):
+ lc_args['server_name'] = server_name
+
+ if image != lc.get('image'):
+ lc_args['image'] = image
+
+ if flavor != lc.get('flavor'):
+ lc_args['flavor'] = flavor
+
+ disk_config = disk_config or 'AUTO'
+ if ((disk_config or lc.get('disk_config')) and
+ disk_config != lc.get('disk_config', 'AUTO')):
+ lc_args['disk_config'] = disk_config
+
+ if (meta or lc.get('meta')) and meta != lc.get('metadata'):
+ lc_args['metadata'] = meta
+
+ test_personality = []
+ for p in personality:
+ test_personality.append({
+ 'path': p['path'],
+ 'contents': base64.b64encode(p['contents'])
+ })
+ if ((test_personality or lc.get('personality')) and
+ test_personality != lc.get('personality')):
+ lc_args['personality'] = personality
+
+ if nics != lc.get('networks'):
+ lc_args['networks'] = nics
+
+ if lbs != lc.get('load_balancers'):
+ # Work around for https://github.com/rackspace/pyrax/pull/393
+ lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
+
+ if key_name != lc.get('key_name'):
+ lc_args['key_name'] = key_name
+
+ if config_drive != lc.get('config_drive', False):
+ lc_args['config_drive'] = config_drive
+
+ if (user_data and
+ base64.b64encode(user_data) != lc.get('user_data')):
+ lc_args['user_data'] = user_data
+
+ if lc_args:
+ # Work around for https://github.com/rackspace/pyrax/pull/389
+ if 'flavor' not in lc_args:
+ lc_args['flavor'] = lc.get('flavor')
+ changed = True
+ sg.update_launch_config(**lc_args)
+
+ sg.get()
+
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ state = sg.get_state()
+ if state["pending_capacity"] == 0:
+ break
+
+ time.sleep(5)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+ else:
+ try:
+ sg = au.find(name=name)
+ sg.delete()
+ changed = True
+ except pyrax.exceptions.NotFound as e:
+ sg = {}
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ config_drive=dict(default=False, type='bool'),
+ cooldown=dict(type='int', default=300),
+ disk_config=dict(choices=['auto', 'manual']),
+ files=dict(type='dict', default={}),
+ flavor=dict(required=True),
+ image=dict(required=True),
+ key_name=dict(),
+ loadbalancers=dict(type='list', elements='dict'),
+ meta=dict(type='dict', default={}),
+ min_entities=dict(type='int', required=True),
+ max_entities=dict(type='int', required=True),
+ name=dict(required=True),
+ networks=dict(type='list', elements='str', default=['public', 'private']),
+ server_name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ config_drive = module.params.get('config_drive')
+ cooldown = module.params.get('cooldown')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ image = module.params.get('image')
+ key_name = module.params.get('key_name')
+ loadbalancers = module.params.get('loadbalancers')
+ meta = module.params.get('meta')
+ min_entities = module.params.get('min_entities')
+ max_entities = module.params.get('max_entities')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ server_name = module.params.get('server_name')
+ state = module.params.get('state')
+ user_data = module.params.get('user_data')
+
+ if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
+ module.fail_json(msg='min_entities and max_entities must be an '
+ 'integer between 0 and 1000')
+
+ if not 0 <= cooldown <= 86400:
+ module.fail_json(msg='cooldown must be an integer between 0 and 86400')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asg(module, cooldown=cooldown, disk_config=disk_config,
+ files=files, flavor=flavor, image=image, meta=meta,
+ key_name=key_name, loadbalancers=loadbalancers,
+ min_entities=min_entities, max_entities=max_entities,
+ name=name, networks=networks, server_name=server_name,
+ state=state, config_drive=config_drive, user_data=user_data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
new file mode 100644
index 000000000..60b48bb2a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rax_scaling_policy.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rax_scaling_policy
+short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
+description:
+ - Manipulate Rackspace Cloud Autoscale Scaling Policy
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ at:
+ type: str
+ description:
+ - The UTC time when this policy will be executed. The time must be
+ formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
+ C(2013-05-19T08:07:08Z)
+ change:
+ type: int
+ description:
+ - The change, either as a number of servers or as a percentage, to make
+ in the scaling group. If this is a percentage, you must set
+ I(is_percent) to C(true) also.
+ cron:
+ type: str
+ description:
+ - The time when the policy will be executed, as a cron entry. For
+ example, if this is parameter is set to C(1 0 * * *)
+ cooldown:
+ type: int
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ default: 300
+ desired_capacity:
+ type: int
+ description:
+ - The desired server capacity of the scaling the group; that is, how
+ many servers should be in the scaling group.
+ is_percent:
+ description:
+ - Whether the value in I(change) is a percent value
+ default: false
+ type: bool
+ name:
+ type: str
+ description:
+ - Name to give the policy
+ required: true
+ policy_type:
+ type: str
+ description:
+ - The type of policy that will be executed for the current release.
+ choices:
+ - webhook
+ - schedule
+ required: true
+ scaling_group:
+ type: str
+ description:
+ - Name of the scaling group that this policy will be added to
+ required: true
+ state:
+ type: str
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: "Matt Martz (@sivel)"
+extends_documentation_fragment:
+ - community.general.rackspace
+ - community.general.rackspace.openstack
+ - community.general.attributes
+
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ at: '2013-05-19T08:07:08Z'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - at
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asps_at
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cron: '1 0 * * *'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - cron
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asp_cron
+
+ - community.general.rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ desired_capacity: 5
+ name: ASG Test Policy - webhook
+ policy_type: webhook
+ scaling_group: ASG Test
+ register: asp_webhook
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rax import (UUID, rax_argument_spec, rax_required_together, rax_to_dict,
+ setup_rax_module)
+
+
+def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
+ desired_capacity=0, is_percent=False, name=None,
+ policy_type=None, scaling_group=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(scaling_group)
+ except ValueError:
+ try:
+ sg = au.find(name=scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ try:
+ sg = au.get(scaling_group)
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ if state == 'present':
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ if at:
+ args = dict(at=at)
+ elif cron:
+ args = dict(cron=cron)
+ else:
+ args = None
+
+ if not policies:
+ try:
+ policy = sg.add_policy(name, policy_type=policy_type,
+ cooldown=cooldown, change=change,
+ is_percent=is_percent,
+ desired_capacity=desired_capacity,
+ args=args)
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ else:
+ policy = policies[0]
+ kwargs = {}
+ if policy_type != policy.type:
+ kwargs['policy_type'] = policy_type
+
+ if cooldown != policy.cooldown:
+ kwargs['cooldown'] = cooldown
+
+ if hasattr(policy, 'change') and change != policy.change:
+ kwargs['change'] = change
+
+ if hasattr(policy, 'changePercent') and is_percent is False:
+ kwargs['change'] = change
+ kwargs['is_percent'] = False
+ elif hasattr(policy, 'change') and is_percent is True:
+ kwargs['change'] = change
+ kwargs['is_percent'] = True
+
+ if hasattr(policy, 'desiredCapacity') and change:
+ kwargs['change'] = change
+ elif ((hasattr(policy, 'change') or
+ hasattr(policy, 'changePercent')) and desired_capacity):
+ kwargs['desired_capacity'] = desired_capacity
+
+ if hasattr(policy, 'args') and args != policy.args:
+ kwargs['args'] = args
+
+ if kwargs:
+ policy.update(**kwargs)
+ changed = True
+
+ policy.get()
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+ else:
+ try:
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ elif not policies:
+ policy = {}
+ else:
+ policy.delete()
+ changed = True
+ except Exception as e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ at=dict(),
+ change=dict(type='int'),
+ cron=dict(),
+ cooldown=dict(type='int', default=300),
+ desired_capacity=dict(type='int'),
+ is_percent=dict(type='bool', default=False),
+ name=dict(required=True),
+ policy_type=dict(required=True, choices=['webhook', 'schedule']),
+ scaling_group=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['cron', 'at'],
+ ['change', 'desired_capacity'],
+ ]
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ at = module.params.get('at')
+ change = module.params.get('change')
+ cron = module.params.get('cron')
+ cooldown = module.params.get('cooldown')
+ desired_capacity = module.params.get('desired_capacity')
+ is_percent = module.params.get('is_percent')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ scaling_group = module.params.get('scaling_group')
+ state = module.params.get('state')
+
+ if (at or cron) and policy_type == 'webhook':
+ module.fail_json(msg='policy_type=schedule is required for a time '
+ 'based policy')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
+ desired_capacity=desired_capacity, is_percent=is_percent,
+ name=name, policy_type=policy_type, scaling_group=scaling_group,
+ state=state)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/read_csv.py b/ansible_collections/community/general/plugins/modules/read_csv.py
new file mode 100644
index 000000000..f2a359fa7
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/read_csv.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: read_csv
+short_description: Read a CSV file
+description:
+- Read a CSV file and return a list or a dictionary, containing one dictionary per row.
+author:
+- Dag Wieers (@dagwieers)
+extends_documentation_fragment:
+- community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ path:
+ description:
+ - The CSV filename to read data from.
+ type: path
+ required: true
+ aliases: [ filename ]
+ key:
+ description:
+ - The column name used as a key for the resulting dictionary.
+ - If C(key) is unset, the module returns a list of dictionaries,
+ where each dictionary is a row in the CSV file.
+ type: str
+ dialect:
+ description:
+ - The CSV dialect to use when parsing the CSV file.
+ - Possible values include C(excel), C(excel-tab) or C(unix).
+ type: str
+ default: excel
+ fieldnames:
+ description:
+ - A list of field names for every column.
+ - This is needed if the CSV does not have a header.
+ type: list
+ elements: str
+ unique:
+ description:
+ - Whether the C(key) used is expected to be unique.
+ type: bool
+ default: true
+ delimiter:
+ description:
+ - A one-character string used to separate fields.
+ - When using this parameter, you change the default value used by I(dialect).
+ - The default value depends on the dialect used.
+ type: str
+ skipinitialspace:
+ description:
+ - Whether to ignore any whitespaces immediately following the delimiter.
+ - When using this parameter, you change the default value used by I(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+ strict:
+ description:
+ - Whether to raise an exception on bad CSV input.
+ - When using this parameter, you change the default value used by I(dialect).
+ - The default value depends on the dialect used.
+ type: bool
+notes:
+- Ansible also ships with the C(csvfile) lookup plugin, which can be used to do selective lookups in CSV files from Jinja.
+'''
+
+EXAMPLES = r'''
+# Example CSV file with header
+#
+# name,uid,gid
+# dag,500,500
+# jeroen,501,500
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ community.general.read_csv:
+ path: users.csv
+ key: name
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.dict.dag.name }} has UID {{ users.dict.dag.uid }} and GID {{ users.dict.dag.gid }}'
+
+# Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ register: users
+ delegate_to: localhost
+
+- ansible.builtin.debug:
+ msg: 'User {{ users.list.1.name }} has UID {{ users.list.1.uid }} and GID {{ users.list.1.gid }}'
+
+# Example CSV file without header and semi-colon delimiter
+#
+# dag;500;500
+# jeroen;501;500
+
+# Read a CSV file without headers
+- name: Read users from CSV file and return a list
+ community.general.read_csv:
+ path: users.csv
+ fieldnames: name,uid,gid
+ delimiter: ';'
+ register: users
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+dict:
+ description: The CSV content as a dictionary.
+ returned: success
+ type: dict
+ sample:
+ dag:
+ name: dag
+ uid: 500
+ gid: 500
+ jeroen:
+ name: jeroen
+ uid: 501
+ gid: 500
+list:
+ description: The CSV content as a list.
+ returned: success
+ type: list
+ sample:
+ - name: dag
+ uid: 500
+ gid: 500
+ - name: jeroen
+ uid: 501
+ gid: 500
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError,
+ DialectNotAvailableError,
+ CustomDialectFailureError)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['filename']),
+ dialect=dict(type='str', default='excel'),
+ key=dict(type='str', no_log=False),
+ fieldnames=dict(type='list', elements='str'),
+ unique=dict(type='bool', default=True),
+ delimiter=dict(type='str'),
+ skipinitialspace=dict(type='bool'),
+ strict=dict(type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ path = module.params['path']
+ dialect = module.params['dialect']
+ key = module.params['key']
+ fieldnames = module.params['fieldnames']
+ unique = module.params['unique']
+
+ dialect_params = {
+ "delimiter": module.params['delimiter'],
+ "skipinitialspace": module.params['skipinitialspace'],
+ "strict": module.params['strict'],
+ }
+
+ try:
+ dialect = initialize_dialect(dialect, **dialect_params)
+ except (CustomDialectFailureError, DialectNotAvailableError) as e:
+ module.fail_json(msg=to_native(e))
+
+ try:
+ with open(path, 'rb') as f:
+ data = f.read()
+ except (IOError, OSError) as e:
+ module.fail_json(msg="Unable to open file: %s" % to_native(e))
+
+ reader = read_csv(data, dialect, fieldnames)
+
+ if key and key not in reader.fieldnames:
+ module.fail_json(msg="Key '%s' was not found in the CSV header fields: %s" % (key, ', '.join(reader.fieldnames)))
+
+ data_dict = dict()
+ data_list = list()
+
+ if key is None:
+ try:
+ for row in reader:
+ data_list.append(row)
+ except CSVError as e:
+ module.fail_json(msg="Unable to process file: %s" % to_native(e))
+ else:
+ try:
+ for row in reader:
+ if unique and row[key] in data_dict:
+ module.fail_json(msg="Key '%s' is not unique for value '%s'" % (key, row[key]))
+ data_dict[row[key]] = row
+ except CSVError as e:
+ module.fail_json(msg="Unable to process file: %s" % to_native(e))
+
+ module.exit_json(dict=data_dict, list=data_list)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redfish_command.py b/ansible_collections/community/general/plugins/modules/redfish_command.py
new file mode 100644
index 000000000..400677eab
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redfish_command.py
@@ -0,0 +1,959 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_command
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller ex. reboot, log management.
+ - Manages OOB controller users ex. add, remove, update.
+ - Manages system power ex. on, off, graceful and forced reboot.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ username:
+ description:
+ - Username for authenticating to OOB controller.
+ type: str
+ password:
+ description:
+ - Password for authenticating to OOB controller.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to OOB controller.
+ type: str
+ version_added: 2.3.0
+ session_uri:
+ description:
+ - URI of the session resource.
+ type: str
+ version_added: 2.3.0
+ id:
+ required: false
+ aliases: [ account_id ]
+ description:
+ - ID of account to delete/modify.
+ - Can also be used in account creation to work around vendor issues where the ID of the new user is required in the POST request.
+ type: str
+ new_username:
+ required: false
+ aliases: [ account_username ]
+ description:
+ - Username of account to add/delete/modify.
+ type: str
+ new_password:
+ required: false
+ aliases: [ account_password ]
+ description:
+ - New password of account to add/modify.
+ type: str
+ roleid:
+ required: false
+ aliases: [ account_roleid ]
+ description:
+ - Role of account to add/modify.
+ type: str
+ bootdevice:
+ required: false
+ description:
+ - Boot device when setting boot configuration.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to OOB controller.
+ default: 10
+ type: int
+ boot_override_mode:
+ description:
+ - Boot mode when using an override.
+ type: str
+ choices: [ Legacy, UEFI ]
+ version_added: 3.5.0
+ uefi_target:
+ required: false
+ description:
+ - UEFI boot target when bootdevice is "UefiTarget".
+ type: str
+ boot_next:
+ required: false
+ description:
+ - BootNext target when bootdevice is "UefiBootNext".
+ type: str
+ update_username:
+ required: false
+ aliases: [ account_updatename ]
+ description:
+ - New user name for updating account_username.
+ type: str
+ version_added: '0.2.0'
+ account_properties:
+ required: false
+ description:
+ - Properties of account service to update.
+ type: dict
+ default: {}
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - ID of the System, Manager or Chassis to modify.
+ type: str
+ version_added: '0.2.0'
+ update_image_uri:
+ required: false
+ description:
+ - URI of the image for the update.
+ type: str
+ version_added: '0.2.0'
+ update_protocol:
+ required: false
+ description:
+ - Protocol for the update.
+ type: str
+ version_added: '0.2.0'
+ update_targets:
+ required: false
+ description:
+ - List of target resource URIs to apply the update to.
+ type: list
+ elements: str
+ default: []
+ version_added: '0.2.0'
+ update_creds:
+ required: false
+ description:
+ - Credentials for retrieving the update image.
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ username:
+ required: false
+ description:
+ - Username for retrieving the update image.
+ type: str
+ password:
+ required: false
+ description:
+ - Password for retrieving the update image.
+ type: str
+ update_apply_time:
+ required: false
+ description:
+ - Time when to apply the update.
+ type: str
+ choices:
+ - Immediate
+ - OnReset
+ - AtMaintenanceWindowStart
+ - InMaintenanceWindowOnReset
+ - OnStartUpdateRequest
+ version_added: '6.1.0'
+ update_handle:
+ required: false
+ description:
+ - Handle to check the status of an update in progress.
+ type: str
+ version_added: '6.1.0'
+ virtual_media:
+ required: false
+ description:
+ - Options for VirtualMedia commands.
+ type: dict
+ version_added: '0.2.0'
+ suboptions:
+ media_types:
+ required: false
+ description:
+ - List of media types appropriate for the image.
+ type: list
+ elements: str
+ default: []
+ image_url:
+ required: false
+ description:
+ - URL of the image to insert or eject.
+ type: str
+ inserted:
+ required: false
+ description:
+ - Indicates that the image is treated as inserted on command completion.
+ type: bool
+ default: true
+ write_protected:
+ required: false
+ description:
+ - Indicates that the media is treated as write-protected.
+ type: bool
+ default: true
+ username:
+ required: false
+ description:
+ - Username for accessing the image URL.
+ type: str
+ password:
+ required: false
+ description:
+ - Password for accessing the image URL.
+ type: str
+ transfer_protocol_type:
+ required: false
+ description:
+ - Network protocol to use with the image.
+ type: str
+ transfer_method:
+ required: false
+ description:
+ - Transfer method to use with the image.
+ type: str
+ strip_etag_quotes:
+ description:
+ - Removes surrounding quotes of etag used in C(If-Match) header
+ of C(PATCH) requests.
+ - Only use this option to resolve bad vendor implementation where
+ C(If-Match) only matches the unquoted etag string.
+ type: bool
+ default: false
+ version_added: 3.7.0
+ bios_attributes:
+ required: false
+ description:
+ - BIOS attributes that needs to be verified in the given server.
+ type: dict
+ version_added: 6.4.0
+
+author:
+ - "Jose Delarosa (@jose-delarosa)"
+ - "T S Kushal (@TSKushal)"
+'''
+
+EXAMPLES = '''
+ - name: Restart system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulRestart
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Turn system power off
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceOff
+ resource_id: 437XR1138R2
+
+ - name: Restart system power forcefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerForceRestart
+ resource_id: 437XR1138R2
+
+ - name: Shutdown system power gracefully
+ community.general.redfish_command:
+ category: Systems
+ command: PowerGracefulShutdown
+ resource_id: 437XR1138R2
+
+ - name: Turn system power on
+ community.general.redfish_command:
+ category: Systems
+ command: PowerOn
+ resource_id: 437XR1138R2
+
+ - name: Reboot system power
+ community.general.redfish_command:
+ category: Systems
+ command: PowerReboot
+ resource_id: 437XR1138R2
+
+ - name: Set one-time boot device to {{ bootdevice }}
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to UefiTarget of "/0x31/0x33/0x01/0x01"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiTarget"
+ uefi_target: "/0x31/0x33/0x01/0x01"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot device to BootNext target of "Boot0001"
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ resource_id: 437XR1138R2
+ bootdevice: "UefiBootNext"
+ boot_next: "Boot0001"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: EnableContinuousBootOverride
+ resource_id: 437XR1138R2
+ bootdevice: "{{ bootdevice }}"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set one-time boot to BiosSetup
+ community.general.redfish_command:
+ category: Systems
+ command: SetOneTimeBoot
+ boot_next: BiosSetup
+ boot_override_mode: Legacy
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable persistent boot device override
+ community.general.redfish_command:
+ category: Systems
+ command: DisableBootOverride
+
+ - name: Set system indicator LED to blink using security token for auth
+ community.general.redfish_command:
+ category: Systems
+ command: IndicatorLedBlink
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ auth_token: "{{ result.session.token }}"
+
+ - name: Add user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Add user using new option aliases
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+ account_roleid: "{{ account_roleid }}"
+
+ - name: Delete user
+ community.general.redfish_command:
+ category: Accounts
+ command: DeleteUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Disable user
+ community.general.redfish_command:
+ category: Accounts
+ command: DisableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+
+ - name: Add and enable user
+ community.general.redfish_command:
+ category: Accounts
+ command: AddUser,EnableUser
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ new_username: "{{ new_username }}"
+ new_password: "{{ new_password }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user password
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserPassword
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_password: "{{ account_password }}"
+
+ - name: Update user role
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserRole
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ roleid: "{{ roleid }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ account_updatename: "{{ account_updatename }}"
+
+ - name: Update user name
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateUserName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_username: "{{ account_username }}"
+ update_username: "{{ update_username }}"
+
+ - name: Update AccountService properties
+ community.general.redfish_command:
+ category: Accounts
+ command: UpdateAccountServiceProperties
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ account_properties:
+ AccountLockoutThreshold: 5
+ AccountLockoutDuration: 600
+
+ - name: Clear Manager Logs with a timeout of 20 seconds
+ community.general.redfish_command:
+ category: Manager
+ command: ClearLogs
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Create session
+ community.general.redfish_command:
+ category: Sessions
+ command: CreateSession
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Set chassis indicator LED to blink using security token for auth
+ community.general.redfish_command:
+ category: Chassis
+ command: IndicatorLedBlink
+ resource_id: 1U
+ baseuri: "{{ baseuri }}"
+ auth_token: "{{ result.session.token }}"
+
+ - name: Delete session using security token created by CreateSesssion above
+ community.general.redfish_command:
+ category: Sessions
+ command: DeleteSession
+ baseuri: "{{ baseuri }}"
+ auth_token: "{{ result.session.token }}"
+ session_uri: "{{ result.session.uri }}"
+
+ - name: Clear Sessions
+ community.general.redfish_command:
+ category: Sessions
+ command: ClearSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Simple update
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: https://example.com/myupdate.img
+
+ - name: Simple update with additional options
+ community.general.redfish_command:
+ category: Update
+ command: SimpleUpdate
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: //example.com/myupdate.img
+ update_protocol: FTP
+ update_targets:
+ - /redfish/v1/UpdateService/FirmwareInventory/BMC
+ update_creds:
+ username: operator
+ password: supersecretpwd
+
+ - name: Perform requested operations to continue the update
+ community.general.redfish_command:
+ category: Update
+ command: PerformRequestedOperations
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_handle: /redfish/v1/TaskService/TaskMonitors/735
+
+ - name: Insert Virtual Media
+ community.general.redfish_command:
+ category: Systems
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ media_types:
+ - CD
+ - DVD
+ resource_id: 1
+
+ - name: Insert Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ media_types:
+ - CD
+ - DVD
+ resource_id: BMC
+
+ - name: Eject Virtual Media
+ community.general.redfish_command:
+ category: Systems
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ resource_id: 1
+
+ - name: Eject Virtual Media
+ community.general.redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: 'http://example.com/images/SomeLinux-current.iso'
+ resource_id: BMC
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: GracefulRestart
+ resource_id: BMC
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Restart manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulRestart
+ resource_id: BMC
+
+ - name: Turn manager power off
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceOff
+ resource_id: BMC
+
+ - name: Restart manager power forcefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerForceRestart
+ resource_id: BMC
+
+ - name: Shutdown manager power gracefully
+ community.general.redfish_command:
+ category: Manager
+ command: PowerGracefulShutdown
+ resource_id: BMC
+
+ - name: Turn manager power on
+ community.general.redfish_command:
+ category: Manager
+ command: PowerOn
+ resource_id: BMC
+
+ - name: Reboot manager power
+ community.general.redfish_command:
+ category: Manager
+ command: PowerReboot
+ resource_id: BMC
+
+ - name: Verify BIOS attributes
+ community.general.redfish_command:
+ category: Systems
+ command: VerifyBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ bios_attributes:
+ SubNumaClustering: "Disabled"
+ WorkloadProfile: "Virtualization-MaxPerformance"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+return_values:
+ description: Dictionary containing command-specific response data from the action.
+ returned: on success
+ type: dict
+ version_added: 6.1.0
+ sample: {
+ "update_status": {
+ "handle": "/redfish/v1/TaskService/TaskMonitors/735",
+ "messages": [],
+ "resets_requested": [],
+ "ret": true,
+ "status": "New"
+ }
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils.common.text.converters import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["PowerOn", "PowerForceOff", "PowerForceRestart", "PowerGracefulRestart",
+ "PowerGracefulShutdown", "PowerReboot", "SetOneTimeBoot", "EnableContinuousBootOverride", "DisableBootOverride",
+ "IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink", "VirtualMediaInsert", "VirtualMediaEject", "VerifyBiosAttributes"],
+ "Chassis": ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"],
+ "Accounts": ["AddUser", "EnableUser", "DeleteUser", "DisableUser",
+ "UpdateUserRole", "UpdateUserPassword", "UpdateUserName",
+ "UpdateAccountServiceProperties"],
+ "Sessions": ["ClearSessions", "CreateSession", "DeleteSession"],
+ "Manager": ["GracefulRestart", "ClearLogs", "VirtualMediaInsert",
+ "VirtualMediaEject", "PowerOn", "PowerForceOff", "PowerForceRestart",
+ "PowerGracefulRestart", "PowerGracefulShutdown", "PowerReboot"],
+ "Update": ["SimpleUpdate", "PerformRequestedOperations"],
+}
+
+
+def main():
+ result = {}
+ return_values = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ session_uri=dict(),
+ id=dict(aliases=["account_id"]),
+ new_username=dict(aliases=["account_username"]),
+ new_password=dict(aliases=["account_password"], no_log=True),
+ roleid=dict(aliases=["account_roleid"]),
+ update_username=dict(type='str', aliases=["account_updatename"]),
+ account_properties=dict(type='dict', default={}),
+ bootdevice=dict(),
+ timeout=dict(type='int', default=10),
+ uefi_target=dict(),
+ boot_next=dict(),
+ boot_override_mode=dict(choices=['Legacy', 'UEFI']),
+ resource_id=dict(),
+ update_image_uri=dict(),
+ update_protocol=dict(),
+ update_targets=dict(type='list', elements='str', default=[]),
+ update_creds=dict(
+ type='dict',
+ options=dict(
+ username=dict(),
+ password=dict(no_log=True)
+ )
+ ),
+ update_apply_time=dict(choices=['Immediate', 'OnReset', 'AtMaintenanceWindowStart',
+ 'InMaintenanceWindowOnReset', 'OnStartUpdateRequest']),
+ update_handle=dict(),
+ virtual_media=dict(
+ type='dict',
+ options=dict(
+ media_types=dict(type='list', elements='str', default=[]),
+ image_url=dict(),
+ inserted=dict(type='bool', default=True),
+ write_protected=dict(type='bool', default=True),
+ username=dict(),
+ password=dict(no_log=True),
+ transfer_protocol_type=dict(),
+ transfer_method=dict(),
+ )
+ ),
+ strip_etag_quotes=dict(type='bool', default=False),
+ bios_attributes=dict(type="dict")
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # user to add/modify/delete
+ user = {'account_id': module.params['id'],
+ 'account_username': module.params['new_username'],
+ 'account_password': module.params['new_password'],
+ 'account_roleid': module.params['roleid'],
+ 'account_updatename': module.params['update_username'],
+ 'account_properties': module.params['account_properties']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # update options
+ update_opts = {
+ 'update_image_uri': module.params['update_image_uri'],
+ 'update_protocol': module.params['update_protocol'],
+ 'update_targets': module.params['update_targets'],
+ 'update_creds': module.params['update_creds'],
+ 'update_apply_time': module.params['update_apply_time'],
+ 'update_handle': module.params['update_handle'],
+ }
+
+ # Boot override options
+ boot_opts = {
+ 'bootdevice': module.params['bootdevice'],
+ 'uefi_target': module.params['uefi_target'],
+ 'boot_next': module.params['boot_next'],
+ 'boot_override_mode': module.params['boot_override_mode'],
+ }
+
+ # VirtualMedia options
+ virtual_media = module.params['virtual_media']
+
+ # Etag options
+ strip_etag_quotes = module.params['strip_etag_quotes']
+
+ # BIOS Attributes options
+ bios_attributes = module.params['bios_attributes']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Accounts":
+ ACCOUNTS_COMMANDS = {
+ "AddUser": rf_utils.add_user,
+ "EnableUser": rf_utils.enable_user,
+ "DeleteUser": rf_utils.delete_user,
+ "DisableUser": rf_utils.disable_user,
+ "UpdateUserRole": rf_utils.update_user_role,
+ "UpdateUserPassword": rf_utils.update_user_password,
+ "UpdateUserName": rf_utils.update_user_name,
+ "UpdateAccountServiceProperties": rf_utils.update_accountservice_properties
+ }
+
+ # execute only if we find an Account service resource
+ result = rf_utils._find_accountservice_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ result = ACCOUNTS_COMMANDS[command](user)
+
+ elif category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command.startswith('Power'):
+ result = rf_utils.manage_system_power(command)
+ elif command == "SetOneTimeBoot":
+ boot_opts['override_enabled'] = 'Once'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "EnableContinuousBootOverride":
+ boot_opts['override_enabled'] = 'Continuous'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command == "DisableBootOverride":
+ boot_opts['override_enabled'] = 'Disabled'
+ result = rf_utils.set_boot_override(boot_opts)
+ elif command.startswith('IndicatorLed'):
+ result = rf_utils.manage_system_indicator_led(command)
+ elif command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media, category)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media, category)
+ elif command == 'VerifyBiosAttributes':
+ result = rf_utils.verify_bios_attributes(bios_attributes)
+
+ elif category == "Chassis":
+ result = rf_utils._find_chassis_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ led_commands = ["IndicatorLedOn", "IndicatorLedOff", "IndicatorLedBlink"]
+
+ # Check if more than one led_command is present
+ num_led_commands = sum([command in led_commands for command in command_list])
+ if num_led_commands > 1:
+ result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
+ else:
+ for command in command_list:
+ if command in led_commands:
+ result = rf_utils.manage_chassis_indicator_led(command)
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ClearSessions":
+ result = rf_utils.clear_sessions()
+ elif command == "CreateSession":
+ result = rf_utils.create_session()
+ elif command == "DeleteSession":
+ result = rf_utils.delete_session(module.params['session_uri'])
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ # standardize on the Power* commands, but allow the the legacy
+ # GracefulRestart command
+ if command == 'GracefulRestart':
+ command = 'PowerGracefulRestart'
+
+ if command.startswith('Power'):
+ result = rf_utils.manage_manager_power(command)
+ elif command == 'ClearLogs':
+ result = rf_utils.clear_logs()
+ elif command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media, category)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media, category)
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "SimpleUpdate":
+ result = rf_utils.simple_update(update_opts)
+ if 'update_status' in result:
+ return_values['update_status'] = result['update_status']
+ elif command == "PerformRequestedOperations":
+ result = rf_utils.perform_requested_update_operations(update_opts['update_handle'])
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ del result['ret']
+ changed = result.get('changed', True)
+ session = result.get('session', dict())
+ module.exit_json(changed=changed, session=session,
+ msg='Action was successful',
+ return_values=return_values)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redfish_config.py b/ansible_collections/community/general/plugins/modules/redfish_config.py
new file mode 100644
index 000000000..9f31870e3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redfish_config.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_config
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ set or update a configuration attribute.
+ - Manages BIOS configuration settings.
+ - Manages OOB controller configuration settings.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ username:
+ description:
+ - Username for authenticating to OOB controller.
+ type: str
+ password:
+ description:
+ - Password for authenticating to OOB controller.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to OOB controller.
+ type: str
+ version_added: 2.3.0
+ bios_attributes:
+ required: false
+ description:
+ - Dictionary of BIOS attributes to update.
+ default: {}
+ type: dict
+ version_added: '0.2.0'
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to OOB controller.
+ default: 10
+ type: int
+ boot_order:
+ required: false
+ description:
+ - List of BootOptionReference strings specifying the BootOrder.
+ default: []
+ type: list
+ elements: str
+ version_added: '0.2.0'
+ network_protocols:
+ required: false
+ description:
+ - Setting dict of manager services to update.
+ type: dict
+ default: {}
+ version_added: '0.2.0'
+ resource_id:
+ required: false
+ description:
+ - ID of the System, Manager or Chassis to modify.
+ type: str
+ version_added: '0.2.0'
+ nic_addr:
+ required: false
+ description:
+ - EthernetInterface Address string on OOB controller.
+ default: 'null'
+ type: str
+ version_added: '0.2.0'
+ nic_config:
+ required: false
+ description:
+ - Setting dict of EthernetInterface on OOB controller.
+ type: dict
+ default: {}
+ version_added: '0.2.0'
+ strip_etag_quotes:
+ description:
+ - Removes surrounding quotes of etag used in C(If-Match) header
+ of C(PATCH) requests.
+ - Only use this option to resolve bad vendor implementation where
+ C(If-Match) only matches the unquoted etag string.
+ type: bool
+ default: false
+ version_added: 3.7.0
+ hostinterface_config:
+ required: false
+ description:
+ - Setting dict of HostInterface on OOB controller.
+ type: dict
+ default: {}
+ version_added: '4.1.0'
+ hostinterface_id:
+ required: false
+ description:
+ - Redfish HostInterface instance ID if multiple HostInterfaces are present.
+ type: str
+ version_added: '4.1.0'
+ sessions_config:
+ required: false
+ description:
+ - Setting dict of Sessions.
+ type: dict
+ default: {}
+ version_added: '5.7.0'
+
+author:
+ - "Jose Delarosa (@jose-delarosa)"
+ - "T S Kushal (@TSKushal)"
+'''
+
+EXAMPLES = '''
+ - name: Set BootMode to UEFI
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Uefi"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set multiple BootMode attributes
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable PXE Boot for NIC1
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosAttributes
+ resource_id: 437XR1138R2
+ bios_attributes:
+ PxeDev1EnDis: Enabled
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set BIOS default settings with a timeout of 20 seconds
+ community.general.redfish_config:
+ category: Systems
+ command: SetBiosDefaultSettings
+ resource_id: 437XR1138R2
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+
+ - name: Set boot order
+ community.general.redfish_config:
+ category: Systems
+ command: SetBootOrder
+ boot_order:
+ - Boot0002
+ - Boot0001
+ - Boot0000
+ - Boot0003
+ - Boot0004
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set boot order to the default
+ community.general.redfish_config:
+ category: Systems
+ command: SetDefaultBootOrder
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager Network Protocols
+ community.general.redfish_config:
+ category: Manager
+ command: SetNetworkProtocols
+ network_protocols:
+ SNMP:
+ ProtocolEnabled: true
+ Port: 161
+ HTTP:
+ ProtocolEnabled: false
+ Port: 8080
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set Manager NIC
+ community.general.redfish_config:
+ category: Manager
+ command: SetManagerNic
+ nic_config:
+ DHCPv4:
+ DHCPEnabled: false
+ IPv4StaticAddresses:
+ Address: 192.168.1.3
+ Gateway: 192.168.1.1
+ SubnetMask: 255.255.255.0
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Disable Host Interface
+ community.general.redfish_config:
+ category: Manager
+ command: SetHostInterface
+ hostinterface_config:
+ InterfaceEnabled: false
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable Host Interface for HostInterface resource ID '2'
+ community.general.redfish_config:
+ category: Manager
+ command: SetHostInterface
+ hostinterface_config:
+ InterfaceEnabled: true
+ hostinterface_id: "2"
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Set SessionService Session Timeout to 30 minutes
+ community.general.redfish_config:
+ category: Sessions
+ command: SetSessionService
+ sessions_config:
+ SessionTimeout: 1800
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Enable SecureBoot
+ community.general.redfish_config:
+ category: Systems
+ command: EnableSecureBoot
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+from ansible.module_utils.common.text.converters import to_native
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["SetBiosDefaultSettings", "SetBiosAttributes", "SetBootOrder",
+ "SetDefaultBootOrder", "EnableSecureBoot"],
+ "Manager": ["SetNetworkProtocols", "SetManagerNic", "SetHostInterface"],
+ "Sessions": ["SetSessionService"],
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ bios_attributes=dict(type='dict', default={}),
+ timeout=dict(type='int', default=10),
+ boot_order=dict(type='list', elements='str', default=[]),
+ network_protocols=dict(
+ type='dict',
+ default={}
+ ),
+ resource_id=dict(),
+ nic_addr=dict(default='null'),
+ nic_config=dict(
+ type='dict',
+ default={}
+ ),
+ strip_etag_quotes=dict(type='bool', default=False),
+ hostinterface_config=dict(type='dict', default={}),
+ hostinterface_id=dict(),
+ sessions_config=dict(type='dict', default={}),
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # BIOS attributes to update
+ bios_attributes = module.params['bios_attributes']
+
+ # boot order
+ boot_order = module.params['boot_order']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # manager nic
+ nic_addr = module.params['nic_addr']
+ nic_config = module.params['nic_config']
+
+ # Etag options
+ strip_etag_quotes = module.params['strip_etag_quotes']
+
+ # HostInterface config options
+ hostinterface_config = module.params['hostinterface_config']
+
+ # HostInterface instance ID
+ hostinterface_id = module.params['hostinterface_id']
+
+ # Sessions config options
+ sessions_config = module.params['sessions_config']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module,
+ resource_id=resource_id, data_modification=True, strip_etag_quotes=strip_etag_quotes)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a System resource
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetBiosDefaultSettings":
+ result = rf_utils.set_bios_default_settings()
+ elif command == "SetBiosAttributes":
+ result = rf_utils.set_bios_attributes(bios_attributes)
+ elif command == "SetBootOrder":
+ result = rf_utils.set_boot_order(boot_order)
+ elif command == "SetDefaultBootOrder":
+ result = rf_utils.set_default_boot_order()
+ elif command == "EnableSecureBoot":
+ result = rf_utils.enable_secure_boot()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetNetworkProtocols":
+ result = rf_utils.set_network_protocols(module.params['network_protocols'])
+ elif command == "SetManagerNic":
+ result = rf_utils.set_manager_nic(nic_addr, nic_config)
+ elif command == "SetHostInterface":
+ result = rf_utils.set_hostinterface_attributes(hostinterface_config, hostinterface_id)
+
+ elif category == "Sessions":
+ # execute only if we find a Sessions resource
+ result = rf_utils._find_sessionservice_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == "SetSessionService":
+ result = rf_utils.set_session_service(sessions_config)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ if result.get('warning'):
+ module.warn(to_native(result['warning']))
+
+ module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redfish_info.py b/ansible_collections/community/general/plugins/modules/redfish_info.py
new file mode 100644
index 000000000..364df40b5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redfish_info.py
@@ -0,0 +1,569 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017-2018 Dell EMC Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redfish_info
+short_description: Manages Out-Of-Band controllers using Redfish APIs
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+ - Information retrieved is placed in a location specified by the user.
+ - This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.redfish_info) module no longer returns C(ansible_facts)!
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ category:
+ required: false
+ description:
+ - List of categories to execute on OOB controller.
+ default: ['Systems']
+ type: list
+ elements: str
+ command:
+ required: false
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ username:
+ description:
+ - Username for authenticating to OOB controller.
+ type: str
+ password:
+ description:
+ - Password for authenticating to OOB controller.
+ type: str
+ auth_token:
+ description:
+ - Security token for authenticating to OOB controller.
+ type: str
+ version_added: 2.3.0
+ timeout:
+ description:
+ - Timeout in seconds for HTTP requests to OOB controller.
+ default: 10
+ type: int
+ update_handle:
+ required: false
+ description:
+ - Handle to check the status of an update in progress.
+ type: str
+ version_added: '6.1.0'
+
+author: "Jose Delarosa (@jose-delarosa)"
+'''
+
+EXAMPLES = '''
+ - name: Get CPU inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
+
+ - name: Get CPU model
+ community.general.redfish_info:
+ category: Systems
+ command: GetCpuInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
+
+ - name: Get memory inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetMemoryInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Get fan inventory with a timeout of 20 seconds
+ community.general.redfish_info:
+ category: Chassis
+ command: GetFanInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: 20
+ register: result
+
+ - name: Get Virtual Media information
+ community.general.redfish_info:
+ category: Manager
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Virtual Media information from Systems
+ community.general.redfish_info:
+ category: Systems
+ command: GetVirtualMedia
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
+
+ - name: Get Volume Inventory
+ community.general.redfish_info:
+ category: Systems
+ command: GetVolumeInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
+
+ - name: Get Session information
+ community.general.redfish_info:
+ category: Sessions
+ command: GetSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
+
+ - name: Get default inventory information
+ community.general.redfish_info:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts | to_nice_json }}"
+
+ - name: Get several inventories
+ community.general.redfish_info:
+ category: Systems
+ command: GetNicInventory,GetBiosAttributes
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system inventory and user information
+ community.general.redfish_info:
+ category: Systems,Accounts
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get default system, user and firmware information
+ community.general.redfish_info:
+ category: ["Systems", "Accounts", "Update"]
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager NIC inventory information
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerNicInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get boot override information
+ community.general.redfish_info:
+ category: Systems
+ command: GetBootOverride
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis inventory
+ community.general.redfish_info:
+ category: Chassis
+ command: GetChassisInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in the Manager category
+ community.general.redfish_info:
+ category: Manager
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware update capability information
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareUpdateCapabilities
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get firmware inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetFirmwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get software inventory
+ community.general.redfish_info:
+ category: Update
+ command: GetSoftwareInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get the status of an update operation
+ community.general.redfish_info:
+ category: Update
+ command: GetUpdateStatus
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_handle: /redfish/v1/TaskService/TaskMonitors/735
+
+ - name: Get Manager Services
+ community.general.redfish_info:
+ category: Manager
+ command: GetNetworkProtocols
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get all information available in all categories
+ community.general.redfish_info:
+ category: all
+ command: all
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get system health report
+ community.general.redfish_info:
+ category: Systems
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get chassis health report
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager health report
+ community.general.redfish_info:
+ category: Manager
+ command: GetHealthReport
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get manager Redfish Host Interface inventory
+ community.general.redfish_info:
+ category: Manager
+ command: GetHostInterfaces
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get Manager Inventory
+ community.general.redfish_info:
+ category: Manager
+ command: GetManagerInventory
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get HPE Thermal Config
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHPEThermalConfig
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+ - name: Get HPE Fan Percent Minimum
+ community.general.redfish_info:
+ category: Chassis
+ command: GetHPEFanPercentMin
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+'''
+
+RETURN = '''
+result:
+ description: different results depending on task
+ returned: always
+ type: dict
+ sample: List of CPUs on system
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
+ "GetMemoryInventory", "GetNicInventory", "GetHealthReport",
+ "GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
+ "GetBiosAttributes", "GetBootOrder", "GetBootOverride", "GetVirtualMedia"],
+ "Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower",
+ "GetChassisThermals", "GetChassisInventory", "GetHealthReport", "GetHPEThermalConfig", "GetHPEFanPercentMin"],
+ "Accounts": ["ListUsers"],
+ "Sessions": ["GetSessions"],
+ "Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities", "GetSoftwareInventory",
+ "GetUpdateStatus"],
+ "Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs", "GetNetworkProtocols",
+ "GetHealthReport", "GetHostInterfaces", "GetManagerInventory"],
+}
+
+CATEGORY_COMMANDS_DEFAULT = {
+ "Systems": "GetSystemInventory",
+ "Chassis": "GetFanInventory",
+ "Accounts": "ListUsers",
+ "Update": "GetFirmwareInventory",
+ "Sessions": "GetSessions",
+ "Manager": "GetManagerNicInventory"
+}
+
+
+def main():
+ result = {}
+ category_list = []
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(type='list', elements='str', default=['Systems']),
+ command=dict(type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10),
+ update_handle=dict(),
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=True,
+ )
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # update handle
+ update_handle = module.params['update_handle']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = RedfishUtils(creds, root_uri, timeout, module)
+
+ # Build Category list
+ if "all" in module.params['category']:
+ for entry in CATEGORY_COMMANDS_ALL:
+ category_list.append(entry)
+ else:
+ # one or more categories specified
+ category_list = module.params['category']
+
+ for category in category_list:
+ command_list = []
+ # Build Command list for each Category
+ if category in CATEGORY_COMMANDS_ALL:
+ if not module.params['command']:
+ # True if we don't specify a command --> use default
+ command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
+ elif "all" in module.params['command']:
+ for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
+ command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
+ # one or more commands
+ else:
+ command_list = module.params['command']
+ # Verify that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg="Invalid Command: %s" % cmd)
+ else:
+ # Fail if even one category given is invalid
+ module.fail_json(msg="Invalid Category: %s" % category)
+
+ # Organize by Categories / Commands
+ if category == "Systems":
+ # execute only if we find a Systems resource
+ resource = rf_utils._find_systems_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSystemInventory":
+ result["system"] = rf_utils.get_multi_system_inventory()
+ elif command == "GetCpuInventory":
+ result["cpu"] = rf_utils.get_multi_cpu_inventory()
+ elif command == "GetMemoryInventory":
+ result["memory"] = rf_utils.get_multi_memory_inventory()
+ elif command == "GetNicInventory":
+ result["nic"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetStorageControllerInventory":
+ result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
+ elif command == "GetDiskInventory":
+ result["disk"] = rf_utils.get_multi_disk_inventory()
+ elif command == "GetVolumeInventory":
+ result["volume"] = rf_utils.get_multi_volume_inventory()
+ elif command == "GetBiosAttributes":
+ result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
+ elif command == "GetBootOrder":
+ result["boot_order"] = rf_utils.get_multi_boot_order()
+ elif command == "GetBootOverride":
+ result["boot_override"] = rf_utils.get_multi_boot_override()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_system_health_report()
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia(category)
+
+ elif category == "Chassis":
+ # execute only if we find Chassis resource
+ resource = rf_utils._find_chassis_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFanInventory":
+ result["fan"] = rf_utils.get_fan_inventory()
+ elif command == "GetPsuInventory":
+ result["psu"] = rf_utils.get_psu_inventory()
+ elif command == "GetChassisThermals":
+ result["thermals"] = rf_utils.get_chassis_thermals()
+ elif command == "GetChassisPower":
+ result["chassis_power"] = rf_utils.get_chassis_power()
+ elif command == "GetChassisInventory":
+ result["chassis"] = rf_utils.get_chassis_inventory()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_chassis_health_report()
+ elif command == "GetHPEThermalConfig":
+ result["hpe_thermal_config"] = rf_utils.get_hpe_thermal_config()
+ elif command == "GetHPEFanPercentMin":
+ result["hpe_fan_percent_min"] = rf_utils.get_hpe_fan_percent_min()
+
+ elif category == "Accounts":
+ # execute only if we find an Account service resource
+ resource = rf_utils._find_accountservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "ListUsers":
+ result["user"] = rf_utils.list_users()
+
+ elif category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetFirmwareInventory":
+ result["firmware"] = rf_utils.get_firmware_inventory()
+ elif command == "GetSoftwareInventory":
+ result["software"] = rf_utils.get_software_inventory()
+ elif command == "GetFirmwareUpdateCapabilities":
+ result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
+ elif command == "GetUpdateStatus":
+ result["update_status"] = rf_utils.get_update_status(update_handle)
+
+ elif category == "Sessions":
+ # execute only if we find SessionService resources
+ resource = rf_utils._find_sessionservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetSessions":
+ result["session"] = rf_utils.get_sessions()
+
+ elif category == "Manager":
+ # execute only if we find a Manager service resource
+ resource = rf_utils._find_managers_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+
+ for command in command_list:
+ if command == "GetManagerNicInventory":
+ result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
+ elif command == "GetVirtualMedia":
+ result["virtual_media"] = rf_utils.get_multi_virtualmedia(category)
+ elif command == "GetLogs":
+ result["log"] = rf_utils.get_logs()
+ elif command == "GetNetworkProtocols":
+ result["network_protocols"] = rf_utils.get_network_protocols()
+ elif command == "GetHealthReport":
+ result["health_report"] = rf_utils.get_multi_manager_health_report()
+ elif command == "GetHostInterfaces":
+ result["host_interfaces"] = rf_utils.get_hostinterfaces()
+ elif command == "GetManagerInventory":
+ result["manager"] = rf_utils.get_multi_manager_inventory()
+
+ # Return data back
+ module.exit_json(redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redhat_subscription.py b/ansible_collections/community/general/plugins/modules/redhat_subscription.py
new file mode 100644
index 000000000..79b0d4b4c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redhat_subscription.py
@@ -0,0 +1,1237 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) James Laska (jlaska@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: redhat_subscription
+short_description: Manage registration and subscriptions to RHSM using C(subscription-manager)
+description:
+ - Manage registration and subscription to the Red Hat Subscription Management entitlement platform using the C(subscription-manager) command,
+ registering using D-Bus if possible.
+author: "Barnaby Court (@barnabycourt)"
+notes:
+ - |
+ The module tries to use the D-Bus C(rhsm) service (part of C(subscription-manager))
+ to register, starting from community.general 6.5.0: this is done so credentials
+ (username, password, activation keys) can be passed to C(rhsm) in a secure way.
+ C(subscription-manager) itself gets credentials only as arguments of command line
+ parameters, which is I(not) secure, as they can be easily stolen by checking the
+ process listing on the system. Due to limitations of the D-Bus interface of C(rhsm),
+ the module will I(not) use D-Bus for registation when trying either to register
+ using I(token), or when specifying I(environment), or when the system is old
+ (typically RHEL 6 and older).
+ - In order to register a system, subscription-manager requires either a username and password, or an activationkey and an Organization ID.
+ - Since 2.5 values for I(server_hostname), I(server_insecure), I(rhsm_baseurl),
+ I(server_proxy_hostname), I(server_proxy_port), I(server_proxy_user) and
+ I(server_proxy_password) are no longer taken from the C(/etc/rhsm/rhsm.conf)
+ config file and default to None.
+ - It is possible to interact with C(subscription-manager) only as root,
+ so root permissions are required to successfully run this module.
+ - Since community.general 6.5.0, credentials (that is, I(username) and I(password),
+ I(activationkey), or I(token)) are needed only in case the the system is not registered,
+ or I(force_register) is specified; this makes it possible to use the module to tweak an
+ already registered system, for example attaching pools to it (using I(pool), or I(pool_ids)),
+ and modifying the C(syspurpose) attributes (using I(syspurpose)).
+requirements:
+ - subscription-manager
+ - Optionally the C(dbus) Python library; this is usually included in the OS
+ as it is used by C(subscription-manager).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - whether to register and subscribe (C(present)), or unregister (C(absent)) a system
+ choices: [ "present", "absent" ]
+ default: "present"
+ type: str
+ username:
+ description:
+ - access.redhat.com or Red Hat Satellite or Katello username
+ type: str
+ password:
+ description:
+ - access.redhat.com or Red Hat Satellite or Katello password
+ type: str
+ token:
+ description:
+ - sso.redhat.com API access token.
+ type: str
+ version_added: 6.3.0
+ server_hostname:
+ description:
+ - Specify an alternative Red Hat Subscription Management or Red Hat Satellite or Katello server
+ type: str
+ server_insecure:
+ description:
+ - Enable or disable https server certificate verification when connecting to C(server_hostname)
+ type: str
+ server_prefix:
+ description:
+ - Specify the prefix when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server.
+ type: str
+ version_added: 3.3.0
+ server_port:
+ description:
+ - Specify the port when registering to the Red Hat Subscription Management or Red Hat Satellite or Katello server.
+ type: str
+ version_added: 3.3.0
+ rhsm_baseurl:
+ description:
+ - Specify CDN baseurl
+ type: str
+ rhsm_repo_ca_cert:
+ description:
+ - Specify an alternative location for a CA certificate for CDN
+ type: str
+ server_proxy_hostname:
+ description:
+ - Specify an HTTP proxy hostname.
+ type: str
+ server_proxy_scheme:
+ description:
+ - Specify an HTTP proxy scheme, for example C(http) or C(https).
+ type: str
+ version_added: 6.2.0
+ server_proxy_port:
+ description:
+ - Specify an HTTP proxy port.
+ type: str
+ server_proxy_user:
+ description:
+ - Specify a user for HTTP proxy with basic authentication
+ type: str
+ server_proxy_password:
+ description:
+ - Specify a password for HTTP proxy with basic authentication
+ type: str
+ auto_attach:
+ description:
+ - Upon successful registration, auto-consume available subscriptions
+ - Added in favor of deprecated autosubscribe in 2.5.
+ type: bool
+ aliases: [autosubscribe]
+ activationkey:
+ description:
+ - supply an activation key for use with registration
+ type: str
+ org_id:
+ description:
+ - Organization ID to use in conjunction with activationkey
+ type: str
+ environment:
+ description:
+ - Register with a specific environment in the destination org. Used with Red Hat Satellite or Katello
+ type: str
+ pool:
+ description:
+ - |
+ Specify a subscription pool name to consume. Regular expressions accepted. Use I(pool_ids) instead if
+ possible, as it is much faster. Mutually exclusive with I(pool_ids).
+ default: '^$'
+ type: str
+ pool_ids:
+ description:
+ - |
+ Specify subscription pool IDs to consume. Prefer over I(pool) when possible as it is much faster.
+ A pool ID may be specified as a C(string) - just the pool ID (ex. C(0123456789abcdef0123456789abcdef)),
+ or as a C(dict) with the pool ID as the key, and a quantity as the value (ex.
+ C(0123456789abcdef0123456789abcdef: 2). If the quantity is provided, it is used to consume multiple
+ entitlements from a pool (the pool must support this). Mutually exclusive with I(pool).
+ default: []
+ type: list
+ elements: raw
+ consumer_type:
+ description:
+ - The type of unit to register, defaults to system
+ type: str
+ consumer_name:
+ description:
+ - Name of the system to register, defaults to the hostname
+ type: str
+ consumer_id:
+ description:
+ - |
+ References an existing consumer ID to resume using a previous registration
+ for this system. If the system's identity certificate is lost or corrupted,
+ this option allows it to resume using its previous identity and subscriptions.
+ The default is to not specify a consumer ID so a new ID is created.
+ type: str
+ force_register:
+ description:
+ - Register the system even if it is already registered
+ type: bool
+ default: false
+ release:
+ description:
+ - Set a release version
+ type: str
+ syspurpose:
+ description:
+ - Set syspurpose attributes in file C(/etc/rhsm/syspurpose/syspurpose.json)
+ and synchronize these attributes with RHSM server. Syspurpose attributes help attach
+ the most appropriate subscriptions to the system automatically. When C(syspurpose.json) file
+ already contains some attributes, then new attributes overwrite existing attributes.
+ When some attribute is not listed in the new list of attributes, the existing
+ attribute will be removed from C(syspurpose.json) file. Unknown attributes are ignored.
+ type: dict
+ suboptions:
+ usage:
+ description: Syspurpose attribute usage
+ type: str
+ role:
+ description: Syspurpose attribute role
+ type: str
+ service_level_agreement:
+ description: Syspurpose attribute service_level_agreement
+ type: str
+ addons:
+ description: Syspurpose attribute addons
+ type: list
+ elements: str
+ sync:
+ description:
+ - When this option is true, then syspurpose attributes are synchronized with
+ RHSM server immediately. When this option is false, then syspurpose attributes
+ will be synchronized with RHSM server by rhsmcertd daemon.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Register as user (joe_user) with password (somepass) and auto-subscribe to available content.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+
+- name: Same as above but subscribe to a specific pool by ID.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids: 0123456789abcdef0123456789abcdef
+
+- name: Register and subscribe to multiple pools.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef
+ - 1123456789abcdef0123456789abcdef
+
+- name: Same as above but consume multiple entitlements.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ pool_ids:
+ - 0123456789abcdef0123456789abcdef: 2
+ - 1123456789abcdef0123456789abcdef: 4
+
+- name: Register and pull existing system data.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ consumer_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+
+- name: Register with activationkey and consume subscriptions matching Red Hat Enterprise Server or Red Hat Virtualization
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^(Red Hat Enterprise Server|Red Hat Virtualization)$'
+
+- name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription)
+ community.general.redhat_subscription:
+ state: present
+ activationkey: 1-222333444
+ org_id: 222333444
+ pool: '^Red Hat Enterprise Server$'
+
+- name: Register as user credentials into given environment (against Red Hat Satellite or Katello), and auto-subscribe.
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ environment: Library
+ auto_attach: true
+
+- name: Register as user (joe_user) with password (somepass) and a specific release
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ release: 7.4
+
+- name: Register as user (joe_user) with password (somepass), set syspurpose attributes and synchronize them with server
+ community.general.redhat_subscription:
+ state: present
+ username: joe_user
+ password: somepass
+ auto_attach: true
+ syspurpose:
+ usage: "Production"
+ role: "Red Hat Enterprise Server"
+ service_level_agreement: "Premium"
+ addons:
+ - addon1
+ - addon2
+ sync: true
+'''
+
+RETURN = '''
+subscribed_pool_ids:
+ description: List of pool IDs to which system is now subscribed
+ returned: success
+ type: dict
+ sample: {
+ "8a85f9815ab905d3015ab928c7005de4": "1"
+ }
+'''
+
+from os.path import isfile
+from os import getuid, unlink
+import re
+import shutil
+import tempfile
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six.moves import configparser
+from ansible.module_utils import distro
+
+
+SUBMAN_CMD = None
+
+
+class RegistrationBase(object):
+
+ REDHAT_REPO = "/etc/yum.repos.d/redhat.repo"
+
+ def __init__(self, module, username=None, password=None, token=None):
+ self.module = module
+ self.username = username
+ self.password = password
+ self.token = token
+
+ def configure(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def enable(self):
+ # Remove any existing redhat.repo
+ if isfile(self.REDHAT_REPO):
+ unlink(self.REDHAT_REPO)
+
+ def register(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unregister(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def unsubscribe(self):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def update_plugin_conf(self, plugin, enabled=True):
+ plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
+
+ if isfile(plugin_conf):
+ tmpfd, tmpfile = tempfile.mkstemp()
+ shutil.copy2(plugin_conf, tmpfile)
+ cfg = configparser.ConfigParser()
+ cfg.read([tmpfile])
+
+ if enabled:
+ cfg.set('main', 'enabled', '1')
+ else:
+ cfg.set('main', 'enabled', '0')
+
+ fd = open(tmpfile, 'w+')
+ cfg.write(fd)
+ fd.close()
+ self.module.atomic_move(tmpfile, plugin_conf)
+
+ def subscribe(self, **kwargs):
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+
+class Rhsm(RegistrationBase):
+ def __init__(self, module, username=None, password=None, token=None):
+ RegistrationBase.__init__(self, module, username, password, token)
+ self.module = module
+
+ def enable(self):
+ '''
+ Enable the system to receive updates from subscription-manager.
+ This involves updating affected yum plugins and removing any
+ conflicting yum repositories.
+ '''
+ RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', True)
+
+ def configure(self, **kwargs):
+ '''
+ Configure the system as directed for registration with RHSM
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ args = [SUBMAN_CMD, 'config']
+
+ # Pass supplied **kwargs as parameters to subscription-manager. Ignore
+ # non-configuration parameters and replace '_' with '.'. For example,
+ # 'server_hostname' becomes '--server.hostname'.
+ options = []
+ for k, v in sorted(kwargs.items()):
+ if re.search(r'^(server|rhsm)_', k) and v is not None:
+ options.append('--%s=%s' % (k.replace('_', '.', 1), v))
+
+ # When there is nothing to configure, then it is not necessary
+ # to run config command, because it only returns current
+ # content of current configuration file
+ if len(options) == 0:
+ return
+
+ args.extend(options)
+
+ self.module.run_command(args, check_rc=True)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system
+ Returns:
+ * Boolean - whether the current system is currently registered to
+ RHSM.
+ '''
+
+ args = [SUBMAN_CMD, 'identity']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def _can_connect_to_dbus(self):
+ """
+ Checks whether it is possible to connect to the system D-Bus bus.
+
+ :returns: bool -- whether it is possible to connect to the system D-Bus bus.
+ """
+
+ try:
+ # Technically speaking, subscription-manager uses dbus-python
+ # as D-Bus library, so this ought to work; better be safe than
+ # sorry, I guess...
+ import dbus
+ except ImportError:
+ self.module.debug('dbus Python module not available, will use CLI')
+ return False
+
+ try:
+ bus = dbus.SystemBus()
+ msg = dbus.lowlevel.SignalMessage('/', 'com.example', 'test')
+ bus.send_message(msg)
+ bus.flush()
+
+ except dbus.exceptions.DBusException as e:
+ self.module.debug('Failed to connect to system D-Bus bus, will use CLI: %s' % e)
+ return False
+
+ self.module.debug('Verified system D-Bus bus as usable')
+ return True
+
+ def register(self, was_registered, username, password, token, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register, environment,
+ release):
+ '''
+ Register the current system to the provided RHSM or Red Hat Satellite
+ or Katello server
+
+ Raises:
+ * Exception - if any error occurs during the registration
+ '''
+ # There is no support for token-based registration in the D-Bus API
+ # of rhsm, so always use the CLI in that case;
+ # also, since the specified environments are names, and the D-Bus APIs
+ # require IDs for the environments, use the CLI also in that case
+ if not token and not environment and self._can_connect_to_dbus():
+ self._register_using_dbus(was_registered, username, password, auto_attach,
+ activationkey, org_id, consumer_type,
+ consumer_name, consumer_id,
+ force_register, environment, release)
+ return
+ self._register_using_cli(username, password, token, auto_attach,
+ activationkey, org_id, consumer_type,
+ consumer_name, consumer_id,
+ force_register, environment, release)
+
+ def _register_using_cli(self, username, password, token, auto_attach,
+ activationkey, org_id, consumer_type, consumer_name,
+ consumer_id, force_register, environment, release):
+ '''
+ Register using the 'subscription-manager' command
+
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'register']
+
+ # Generate command arguments
+ if force_register:
+ args.extend(['--force'])
+
+ if org_id:
+ args.extend(['--org', org_id])
+
+ if auto_attach:
+ args.append('--auto-attach')
+
+ if consumer_type:
+ args.extend(['--type', consumer_type])
+
+ if consumer_name:
+ args.extend(['--name', consumer_name])
+
+ if consumer_id:
+ args.extend(['--consumerid', consumer_id])
+
+ if environment:
+ args.extend(['--environment', environment])
+
+ if activationkey:
+ args.extend(['--activationkey', activationkey])
+ elif token:
+ args.extend(['--token', token])
+ else:
+ if username:
+ args.extend(['--username', username])
+ if password:
+ args.extend(['--password', password])
+
+ if release:
+ args.extend(['--release', release])
+
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ def _register_using_dbus(self, was_registered, username, password, auto_attach,
+ activationkey, org_id, consumer_type, consumer_name,
+ consumer_id, force_register, environment, release):
+ '''
+ Register using D-Bus (connecting to the rhsm service)
+
+ Raises:
+ * Exception - if error occurs during the D-Bus communication
+ '''
+ import dbus
+
+ SUBSCRIPTION_MANAGER_LOCALE = 'C'
+ # Seconds to wait for Registration to complete over DBus;
+ # 10 minutes should be a pretty generous timeout.
+ REGISTRATION_TIMEOUT = 600
+
+ def str2int(s, default=0):
+ try:
+ return int(s)
+ except ValueError:
+ return default
+
+ distro_id = distro.id()
+ distro_version_parts = distro.version_parts()
+ distro_version = tuple(str2int(p) for p in distro_version_parts)
+
+ # Stop the rhsm service when using systemd (which means Fedora or
+ # RHEL 7+): this is because the service may not use new configuration bits
+ # - with subscription-manager < 1.26.5-1 (in RHEL < 8.2);
+ # fixed later by https://github.com/candlepin/subscription-manager/pull/2175
+ # - sporadically: https://bugzilla.redhat.com/show_bug.cgi?id=2049296
+ if distro_id == 'fedora' or distro_version[0] >= 7:
+ cmd = ['systemctl', 'stop', 'rhsm']
+ self.module.run_command(cmd, check_rc=True, expand_user_and_vars=False)
+
+ # While there is a 'force' options for the registration, it is actually
+ # not implemented (and thus it does not work)
+ # - in RHEL 7 and earlier
+ # - in RHEL 8 before 8.8: https://bugzilla.redhat.com/show_bug.cgi?id=2118486
+ # - in RHEL 9 before 9.2: https://bugzilla.redhat.com/show_bug.cgi?id=2121350
+ # Hence, use it only when implemented, manually unregistering otherwise.
+ # Match it on RHEL, since we know about it; other distributions
+ # will need their own logic.
+ dbus_force_option_works = False
+ if (distro_id == 'rhel' and
+ ((distro_version[0] == 8 and distro_version[1] >= 8) or
+ (distro_version[0] == 9 and distro_version[1] >= 2) or
+ distro_version[0] > 9)):
+ dbus_force_option_works = True
+
+ if force_register and not dbus_force_option_works and was_registered:
+ self.unregister()
+
+ register_opts = {}
+ if consumer_type:
+ register_opts['consumer_type'] = consumer_type
+ if consumer_name:
+ register_opts['name'] = consumer_name
+ if consumer_id:
+ register_opts['consumerid'] = consumer_id
+ if environment:
+ # The option for environments used to be 'environment' in versions
+ # of RHEL before 8.6, and then it changed to 'environments'; since
+ # the Register*() D-Bus functions reject unknown options, we have
+ # to pass the right option depending on the version -- funky.
+ def supports_option_environments():
+ # subscription-manager in any supported Fedora version
+ # has the new option.
+ if distro_id == 'fedora':
+ return True
+ # Check for RHEL 8 >= 8.6, or RHEL >= 9.
+ if distro_id == 'rhel' and \
+ ((distro_version[0] == 8 and distro_version[1] >= 6) or
+ distro_version[0] >= 9):
+ return True
+ # CentOS: similar checks as for RHEL, with one extra bit:
+ # if the 2nd part of the version is empty, it means it is
+ # CentOS Stream, and thus we can assume it has the latest
+ # version of subscription-manager.
+ if distro_id == 'centos' and \
+ ((distro_version[0] == 8 and
+ (distro_version[1] >= 6 or distro_version_parts[1] == '')) or
+ distro_version[0] >= 9):
+ return True
+ # Unknown or old distro: assume it does not support
+ # the new option.
+ return False
+
+ environment_key = 'environment'
+ if supports_option_environments():
+ environment_key = 'environments'
+ register_opts[environment_key] = environment
+ if force_register and dbus_force_option_works and was_registered:
+ register_opts['force'] = True
+ # Wrap it as proper D-Bus dict
+ register_opts = dbus.Dictionary(register_opts, signature='sv', variant_level=1)
+
+ connection_opts = {}
+ # Wrap it as proper D-Bus dict
+ connection_opts = dbus.Dictionary(connection_opts, signature='sv', variant_level=1)
+
+ bus = dbus.SystemBus()
+ register_server = bus.get_object('com.redhat.RHSM1',
+ '/com/redhat/RHSM1/RegisterServer')
+ address = register_server.Start(
+ SUBSCRIPTION_MANAGER_LOCALE,
+ dbus_interface='com.redhat.RHSM1.RegisterServer',
+ )
+
+ try:
+ # Use the private bus to register the system
+ self.module.debug('Connecting to the private DBus')
+ private_bus = dbus.connection.Connection(address)
+
+ try:
+ if activationkey:
+ args = (
+ org_id,
+ [activationkey],
+ register_opts,
+ connection_opts,
+ SUBSCRIPTION_MANAGER_LOCALE,
+ )
+ private_bus.call_blocking(
+ 'com.redhat.RHSM1',
+ '/com/redhat/RHSM1/Register',
+ 'com.redhat.RHSM1.Register',
+ 'RegisterWithActivationKeys',
+ 'sasa{sv}a{sv}s',
+ args,
+ timeout=REGISTRATION_TIMEOUT,
+ )
+ else:
+ args = (
+ org_id or '',
+ username,
+ password,
+ register_opts,
+ connection_opts,
+ SUBSCRIPTION_MANAGER_LOCALE,
+ )
+ private_bus.call_blocking(
+ 'com.redhat.RHSM1',
+ '/com/redhat/RHSM1/Register',
+ 'com.redhat.RHSM1.Register',
+ 'Register',
+ 'sssa{sv}a{sv}s',
+ args,
+ timeout=REGISTRATION_TIMEOUT,
+ )
+
+ except dbus.exceptions.DBusException as e:
+ # Sometimes we get NoReply but the registration has succeeded.
+ # Check the registration status before deciding if this is an error.
+ if e.get_dbus_name() == 'org.freedesktop.DBus.Error.NoReply':
+ if not self.is_registered():
+ # Host is not registered so re-raise the error
+ raise
+ else:
+ raise
+ # Host was registered so continue
+ finally:
+ # Always shut down the private bus
+ self.module.debug('Shutting down private DBus instance')
+ register_server.Stop(
+ SUBSCRIPTION_MANAGER_LOCALE,
+ dbus_interface='com.redhat.RHSM1.RegisterServer',
+ )
+
+ # Make sure to refresh all the local data: this will fetch all the
+ # certificates, update redhat.repo, etc.
+ self.module.run_command([SUBMAN_CMD, 'refresh'],
+ check_rc=True, expand_user_and_vars=False)
+
+ if auto_attach:
+ args = [SUBMAN_CMD, 'attach', '--auto']
+ self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ # There is no support for setting the release via D-Bus, so invoke
+ # the CLI for this.
+ if release:
+ args = [SUBMAN_CMD, 'release', '--set', release]
+ self.module.run_command(args, check_rc=True, expand_user_and_vars=False)
+
+ def unsubscribe(self, serials=None):
+ '''
+ Unsubscribe a system from subscribed channels
+ Args:
+ serials(list or None): list of serials to unsubscribe. If
+ serials is none or an empty list, then
+ all subscribed channels will be removed.
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ items = []
+ if serials is not None and serials:
+ items = ["--serial=%s" % s for s in serials]
+ if serials is None:
+ items = ["--all"]
+
+ if items:
+ args = [SUBMAN_CMD, 'remove'] + items
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ return serials
+
+ def unregister(self):
+ '''
+ Unregister a currently registered system
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ args = [SUBMAN_CMD, 'unregister']
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ self.update_plugin_conf('rhnplugin', False)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def subscribe(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression. It matches regexp against available pool ids first.
+ If any pool ids match, subscribe to those pools and return.
+
+ If no pool ids match, then match regexp against available pool product
+ names. Note this can still easily match many many pools. Then subscribe
+ to those pools.
+
+ Since a pool id is a more specific match, we only fallback to matching
+ against names if we didn't match pool ids.
+
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+ # See https://github.com/ansible/ansible/issues/19466
+
+ # subscribe to pools whose pool id matches regexp (and only the pool id)
+ subscribed_pool_ids = self.subscribe_pool(regexp)
+
+ # If we found any matches, we are done
+ # Don't attempt to match pools by product name
+ if subscribed_pool_ids:
+ return subscribed_pool_ids
+
+ # We didn't match any pool ids.
+ # Now try subscribing to pools based on product name match
+ # Note: This can match lots of product names.
+ subscribed_by_product_pool_ids = self.subscribe_product(regexp)
+ if subscribed_by_product_pool_ids:
+ return subscribed_by_product_pool_ids
+
+ # no matches
+ return []
+
+ def subscribe_by_pool_ids(self, pool_ids):
+ """
+ Try to subscribe to the list of pool IDs
+ """
+ available_pools = RhsmPools(self.module)
+
+ available_pool_ids = [p.get_pool_id() for p in available_pools]
+
+ for pool_id, quantity in sorted(pool_ids.items()):
+ if pool_id in available_pool_ids:
+ args = [SUBMAN_CMD, 'attach', '--pool', pool_id]
+ if quantity is not None:
+ args.extend(['--quantity', to_native(quantity)])
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
+ else:
+ self.module.fail_json(msg='Pool ID: %s not in list of available pools' % pool_id)
+ return pool_ids
+
+ def subscribe_pool(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_pools(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def subscribe_product(self, regexp):
+ '''
+ Subscribe current system to available pools matching the specified
+ regular expression
+ Raises:
+ * Exception - if error occurs while running command
+ '''
+
+ # Available pools ready for subscription
+ available_pools = RhsmPools(self.module)
+
+ subscribed_pool_ids = []
+ for pool in available_pools.filter_products(regexp):
+ pool.subscribe()
+ subscribed_pool_ids.append(pool.get_pool_id())
+ return subscribed_pool_ids
+
+ def update_subscriptions(self, regexp):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+ pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter_pools(regexp)]
+ pool_ids_to_keep.extend([p.get_pool_id() for p in consumed_pools.filter_products(regexp)])
+
+ serials_to_remove = [p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep]
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ subscribed_pool_ids = self.subscribe(regexp)
+
+ if subscribed_pool_ids or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids,
+ 'unsubscribed_serials': serials}
+
+ def update_subscriptions_by_pool_ids(self, pool_ids):
+ changed = False
+ consumed_pools = RhsmPools(self.module, consumed=True)
+
+ existing_pools = {}
+ serials_to_remove = []
+ for p in consumed_pools:
+ pool_id = p.get_pool_id()
+ quantity_used = p.get_quantity_used()
+ existing_pools[pool_id] = quantity_used
+
+ quantity = pool_ids.get(pool_id, 0)
+ if quantity is not None and quantity != quantity_used:
+ serials_to_remove.append(p.Serial)
+
+ serials = self.unsubscribe(serials=serials_to_remove)
+
+ missing_pools = {}
+ for pool_id, quantity in sorted(pool_ids.items()):
+ quantity_used = existing_pools.get(pool_id, 0)
+ if quantity is None and quantity_used == 0 or quantity not in (None, 0, quantity_used):
+ missing_pools[pool_id] = quantity
+
+ self.subscribe_by_pool_ids(missing_pools)
+
+ if missing_pools or serials:
+ changed = True
+ return {'changed': changed, 'subscribed_pool_ids': list(missing_pools.keys()),
+ 'unsubscribed_serials': serials}
+
+ def sync_syspurpose(self):
+ """
+ Try to synchronize syspurpose attributes with server
+ """
+ args = [SUBMAN_CMD, 'status']
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+
+
+class RhsmPool(object):
+ '''
+ Convenience class for housing subscription information
+ '''
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+ def __str__(self):
+ return str(self.__getattribute__('_name'))
+
+ def get_pool_id(self):
+ return getattr(self, 'PoolId', getattr(self, 'PoolID'))
+
+ def get_quantity_used(self):
+ return int(getattr(self, 'QuantityUsed'))
+
+ def subscribe(self):
+ args = "subscription-manager attach --pool %s" % self.get_pool_id()
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+class RhsmPools(object):
+ """
+ This class is used for manipulating pools subscriptions with RHSM
+ """
+
+ def __init__(self, module, consumed=False):
+ self.module = module
+ self.products = self._load_product_list(consumed)
+
+ def __iter__(self):
+ return self.products.__iter__()
+
+ def _load_product_list(self, consumed=False):
+ """
+ Loads list of all available or consumed pools for system in data structure
+
+ Args:
+ consumed(bool): if True list consumed pools, else list available pools (default False)
+ """
+ args = "subscription-manager list"
+ if consumed:
+ args += " --consumed"
+ else:
+ args += " --available"
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True, environ_update=lang_env)
+
+ products = []
+ for line in stdout.split('\n'):
+ # Remove leading+trailing whitespace
+ line = line.strip()
+ # An empty line implies the end of a output group
+ if len(line) == 0:
+ continue
+ # If a colon ':' is found, parse
+ elif ':' in line:
+ (key, value) = line.split(':', 1)
+ key = key.strip().replace(" ", "") # To unify
+ value = value.strip()
+ if key in ['ProductName', 'SubscriptionName']:
+ # Remember the name for later processing
+ products.append(RhsmPool(self.module, _name=value, key=value))
+ elif products:
+ # Associate value with most recently recorded product
+ products[-1].__setattr__(key, value)
+ # FIXME - log some warning?
+ # else:
+ # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
+ return products
+
+ def filter_pools(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose pool id matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product.get_pool_id()):
+ yield product
+
+ def filter_products(self, regexp='^$'):
+ '''
+ Return a list of RhsmPools whose product name matches the provided regular expression
+ '''
+ r = re.compile(regexp)
+ for product in self.products:
+ if r.search(product._name):
+ yield product
+
+
+class SysPurpose(object):
+ """
+ This class is used for reading and writing to syspurpose.json file
+ """
+
+ SYSPURPOSE_FILE_PATH = "/etc/rhsm/syspurpose/syspurpose.json"
+
+ ALLOWED_ATTRIBUTES = ['role', 'usage', 'service_level_agreement', 'addons']
+
+ def __init__(self, path=None):
+ """
+ Initialize class used for reading syspurpose json file
+ """
+ self.path = path or self.SYSPURPOSE_FILE_PATH
+
+ def update_syspurpose(self, new_syspurpose):
+ """
+ Try to update current syspurpose with new attributes from new_syspurpose
+ """
+ syspurpose = {}
+ syspurpose_changed = False
+ for key, value in new_syspurpose.items():
+ if key in self.ALLOWED_ATTRIBUTES:
+ if value is not None:
+ syspurpose[key] = value
+ elif key == 'sync':
+ pass
+ else:
+ raise KeyError("Attribute: %s not in list of allowed attributes: %s" %
+ (key, self.ALLOWED_ATTRIBUTES))
+ current_syspurpose = self._read_syspurpose()
+ if current_syspurpose != syspurpose:
+ syspurpose_changed = True
+ # Update current syspurpose with new values
+ current_syspurpose.update(syspurpose)
+ # When some key is not listed in new syspurpose, then delete it from current syspurpose
+ # and ignore custom attributes created by user (e.g. "foo": "bar")
+ for key in list(current_syspurpose):
+ if key in self.ALLOWED_ATTRIBUTES and key not in syspurpose:
+ del current_syspurpose[key]
+ self._write_syspurpose(current_syspurpose)
+ return syspurpose_changed
+
+ def _write_syspurpose(self, new_syspurpose):
+ """
+ This function tries to update current new_syspurpose attributes to
+ json file.
+ """
+ with open(self.path, "w") as fp:
+ fp.write(json.dumps(new_syspurpose, indent=2, ensure_ascii=False, sort_keys=True))
+
+ def _read_syspurpose(self):
+ """
+ Read current syspurpuse from json file.
+ """
+ current_syspurpose = {}
+ try:
+ with open(self.path, "r") as fp:
+ content = fp.read()
+ except IOError:
+ pass
+ else:
+ current_syspurpose = json.loads(content)
+ return current_syspurpose
+
+
+def main():
+
+ # Load RHSM configuration from file
+ rhsm = Rhsm(None)
+
+ # Note: the default values for parameters are:
+ # 'type': 'str', 'default': None, 'required': False
+ # So there is no need to repeat these values for each parameter.
+ module = AnsibleModule(
+ argument_spec={
+ 'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'username': {},
+ 'password': {'no_log': True},
+ 'token': {'no_log': True},
+ 'server_hostname': {},
+ 'server_insecure': {},
+ 'server_prefix': {},
+ 'server_port': {},
+ 'rhsm_baseurl': {},
+ 'rhsm_repo_ca_cert': {},
+ 'auto_attach': {'aliases': ['autosubscribe'], 'type': 'bool'},
+ 'activationkey': {'no_log': True},
+ 'org_id': {},
+ 'environment': {},
+ 'pool': {'default': '^$'},
+ 'pool_ids': {'default': [], 'type': 'list', 'elements': 'raw'},
+ 'consumer_type': {},
+ 'consumer_name': {},
+ 'consumer_id': {},
+ 'force_register': {'default': False, 'type': 'bool'},
+ 'server_proxy_hostname': {},
+ 'server_proxy_scheme': {},
+ 'server_proxy_port': {},
+ 'server_proxy_user': {},
+ 'server_proxy_password': {'no_log': True},
+ 'release': {},
+ 'syspurpose': {
+ 'type': 'dict',
+ 'options': {
+ 'role': {},
+ 'usage': {},
+ 'service_level_agreement': {},
+ 'addons': {'type': 'list', 'elements': 'str'},
+ 'sync': {'type': 'bool', 'default': False}
+ }
+ }
+ },
+ required_together=[['username', 'password'],
+ ['server_proxy_hostname', 'server_proxy_port'],
+ ['server_proxy_user', 'server_proxy_password']],
+ mutually_exclusive=[['activationkey', 'username'],
+ ['activationkey', 'token'],
+ ['token', 'username'],
+ ['activationkey', 'consumer_id'],
+ ['activationkey', 'environment'],
+ ['activationkey', 'auto_attach'],
+ ['pool', 'pool_ids']],
+ required_if=[['force_register', True, ['username', 'activationkey', 'token'], True]],
+ )
+
+ if getuid() != 0:
+ module.fail_json(
+ msg="Interacting with subscription-manager requires root permissions ('become: true')"
+ )
+
+ rhsm.module = module
+ state = module.params['state']
+ username = module.params['username']
+ password = module.params['password']
+ token = module.params['token']
+ server_hostname = module.params['server_hostname']
+ server_insecure = module.params['server_insecure']
+ server_prefix = module.params['server_prefix']
+ server_port = module.params['server_port']
+ rhsm_baseurl = module.params['rhsm_baseurl']
+ rhsm_repo_ca_cert = module.params['rhsm_repo_ca_cert']
+ auto_attach = module.params['auto_attach']
+ activationkey = module.params['activationkey']
+ org_id = module.params['org_id']
+ if activationkey and not org_id:
+ module.fail_json(msg='org_id is required when using activationkey')
+ environment = module.params['environment']
+ pool = module.params['pool']
+ pool_ids = {}
+ for value in module.params['pool_ids']:
+ if isinstance(value, dict):
+ if len(value) != 1:
+ module.fail_json(msg='Unable to parse pool_ids option.')
+ pool_id, quantity = list(value.items())[0]
+ else:
+ pool_id, quantity = value, None
+ pool_ids[pool_id] = quantity
+ consumer_type = module.params["consumer_type"]
+ consumer_name = module.params["consumer_name"]
+ consumer_id = module.params["consumer_id"]
+ force_register = module.params["force_register"]
+ server_proxy_hostname = module.params['server_proxy_hostname']
+ server_proxy_port = module.params['server_proxy_port']
+ server_proxy_user = module.params['server_proxy_user']
+ server_proxy_password = module.params['server_proxy_password']
+ release = module.params['release']
+ syspurpose = module.params['syspurpose']
+
+ global SUBMAN_CMD
+ SUBMAN_CMD = module.get_bin_path('subscription-manager', True)
+
+ syspurpose_changed = False
+ if syspurpose is not None:
+ try:
+ syspurpose_changed = SysPurpose().update_syspurpose(syspurpose)
+ except Exception as err:
+ module.fail_json(msg="Failed to update syspurpose attributes: %s" % to_native(err))
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Cache the status of the system before the changes
+ was_registered = rhsm.is_registered
+
+ # Register system
+ if was_registered and not force_register:
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ try:
+ rhsm.sync_syspurpose()
+ except Exception as e:
+ module.fail_json(msg="Failed to synchronize syspurpose attributes: %s" % to_native(e))
+ if pool != '^$' or pool_ids:
+ try:
+ if pool_ids:
+ result = rhsm.update_subscriptions_by_pool_ids(pool_ids)
+ else:
+ result = rhsm.update_subscriptions(pool)
+ except Exception as e:
+ module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(**result)
+ else:
+ if syspurpose_changed is True:
+ module.exit_json(changed=True, msg="Syspurpose attributes changed.")
+ else:
+ module.exit_json(changed=False, msg="System already registered.")
+ else:
+ if not username and not activationkey and not token:
+ module.fail_json(msg="state is present but any of the following are missing: username, activationkey, token")
+ try:
+ rhsm.enable()
+ rhsm.configure(**module.params)
+ rhsm.register(was_registered, username, password, token, auto_attach, activationkey, org_id,
+ consumer_type, consumer_name, consumer_id, force_register,
+ environment, release)
+ if syspurpose and 'sync' in syspurpose and syspurpose['sync'] is True:
+ rhsm.sync_syspurpose()
+ if pool_ids:
+ subscribed_pool_ids = rhsm.subscribe_by_pool_ids(pool_ids)
+ elif pool != '^$':
+ subscribed_pool_ids = rhsm.subscribe(pool)
+ else:
+ subscribed_pool_ids = []
+ except Exception as e:
+ module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, to_native(e)))
+ else:
+ module.exit_json(changed=True,
+ msg="System successfully registered to '%s'." % server_hostname,
+ subscribed_pool_ids=subscribed_pool_ids)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhsm.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+ else:
+ try:
+ rhsm.unsubscribe()
+ rhsm.unregister()
+ except Exception as e:
+ module.fail_json(msg="Failed to unregister: %s" % to_native(e))
+ else:
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redis.py b/ansible_collections/community/general/plugins/modules/redis.py
new file mode 100644
index 000000000..1778a067e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redis.py
@@ -0,0 +1,335 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis
+short_description: Various redis commands, replica and flush
+description:
+ - Unified utility to interact with redis instances.
+extends_documentation_fragment:
+ - community.general.redis
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ command:
+ description:
+ - The selected redis command
+ - C(config) ensures a configuration setting on an instance.
+ - C(flush) flushes all the instance or a specified db.
+ - C(replica) sets a redis instance in replica or master mode. (C(slave) is an alias for C(replica).)
+ choices: [ config, flush, replica, slave ]
+ type: str
+ tls:
+ default: false
+ version_added: 4.6.0
+ login_user:
+ version_added: 4.6.0
+ validate_certs:
+ version_added: 4.6.0
+ ca_certs:
+ version_added: 4.6.0
+ master_host:
+ description:
+ - The host of the master instance [replica command]
+ type: str
+ master_port:
+ description:
+ - The port of the master instance [replica command]
+ type: int
+ replica_mode:
+ description:
+ - The mode of the redis instance [replica command]
+ - C(slave) is an alias for C(replica).
+ default: replica
+ choices: [ master, replica, slave ]
+ type: str
+ aliases:
+ - slave_mode
+ db:
+ description:
+ - The database to flush (used in db mode) [flush command]
+ type: int
+ flush_mode:
+ description:
+ - Type of flush (all the dbs in a redis instance or a specific one)
+ [flush command]
+ default: all
+ choices: [ all, db ]
+ type: str
+ name:
+ description:
+ - A redis config key.
+ type: str
+ value:
+ description:
+ - A redis config value. When memory size is needed, it is possible
+ to specify it in the usal form of 1KB, 2M, 400MB where the base is 1024.
+ Units are case insensitive i.e. 1m = 1mb = 1M = 1MB.
+ type: str
+
+notes:
+ - Requires the redis-py Python package on the remote host. You can
+ install it with pip (pip install redis) or with a package manager.
+ https://github.com/andymccurdy/redis-py
+ - If the redis master instance we are making replica of is password protected
+ this needs to be in the redis.conf in the masterauth variable
+
+seealso:
+ - module: community.general.redis_info
+requirements: [ redis ]
+author: "Xabier Larrakoetxea (@slok)"
+'''
+
+EXAMPLES = '''
+- name: Set local redis instance to be a replica of melee.island on port 6377
+ community.general.redis:
+ command: replica
+ master_host: melee.island
+ master_port: 6377
+
+- name: Deactivate replica mode
+ community.general.redis:
+ command: replica
+ replica_mode: master
+
+- name: Flush all the redis db
+ community.general.redis:
+ command: flush
+ flush_mode: all
+
+- name: Flush only one db in a redis instance
+ community.general.redis:
+ command: flush
+ db: 1
+ flush_mode: db
+
+- name: Configure local redis to have 10000 max clients
+ community.general.redis:
+ command: config
+ name: maxclients
+ value: 10000
+
+- name: Configure local redis maxmemory to 4GB
+ community.general.redis:
+ command: config
+ name: maxmemory
+ value: 4GB
+
+- name: Configure local redis to have lua time limit of 100 ms
+ community.general.redis:
+ command: config
+ name: lua-time-limit
+ value: 100
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ import redis
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ redis_found = False
+else:
+ redis_found = True
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.redis import (
+ fail_imports, redis_auth_argument_spec, redis_auth_params)
+import re
+
+
+# Redis module specific support methods.
+def set_replica_mode(client, master_host, master_port):
+ try:
+ return client.slaveof(master_host, master_port)
+ except Exception:
+ return False
+
+
+def set_master_mode(client):
+ try:
+ return client.slaveof()
+ except Exception:
+ return False
+
+
+def flush(client, db=None):
+ try:
+ if not isinstance(db, int):
+ return client.flushall()
+ else:
+ # The passed client has been connected to the database already
+ return client.flushdb()
+ except Exception:
+ return False
+
+
+# Module execution.
+def main():
+ redis_auth_args = redis_auth_argument_spec(tls_default=False)
+ module_args = dict(
+ command=dict(type='str', choices=['config', 'flush', 'replica', 'slave']),
+ master_host=dict(type='str'),
+ master_port=dict(type='int'),
+ replica_mode=dict(type='str', default='replica', choices=['master', 'replica', 'slave'],
+ aliases=["slave_mode"]),
+ db=dict(type='int'),
+ flush_mode=dict(type='str', default='all', choices=['all', 'db']),
+ name=dict(type='str'),
+ value=dict(type='str'),
+ )
+ module_args.update(redis_auth_args)
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ fail_imports(module, module.params['tls'])
+
+ redis_params = redis_auth_params(module)
+
+ command = module.params['command']
+ if command == "slave":
+ command = "replica"
+
+ # Replica Command section -----------
+ if command == "replica":
+ master_host = module.params['master_host']
+ master_port = module.params['master_port']
+ mode = module.params['replica_mode']
+ if mode == "slave":
+ mode = "replica"
+
+ # Check if we have all the data
+ if mode == "replica": # Only need data if we want to be replica
+ if not master_host:
+ module.fail_json(msg='In replica mode master host must be provided')
+
+ if not master_port:
+ module.fail_json(msg='In replica mode master port must be provided')
+
+ # Connect and check
+ r = redis.StrictRedis(**redis_params)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Check if we are already in the mode that we want
+ info = r.info()
+ if mode == "master" and info["role"] == "master":
+ module.exit_json(changed=False, mode=mode)
+
+ elif mode == "replica" and info["role"] == "slave" and info["master_host"] == master_host and info["master_port"] == master_port:
+ status = dict(
+ status=mode,
+ master_host=master_host,
+ master_port=master_port,
+ )
+ module.exit_json(changed=False, mode=status)
+ else:
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "replica":
+ if module.check_mode or set_replica_mode(r, master_host, master_port):
+ info = r.info()
+ status = {
+ 'status': mode,
+ 'master_host': master_host,
+ 'master_port': master_port,
+ }
+ module.exit_json(changed=True, mode=status)
+ else:
+ module.fail_json(msg='Unable to set replica mode')
+
+ else:
+ if module.check_mode or set_master_mode(r):
+ module.exit_json(changed=True, mode=mode)
+ else:
+ module.fail_json(msg='Unable to set master mode')
+
+ # flush Command section -----------
+ elif command == "flush":
+ db = module.params['db']
+ mode = module.params['flush_mode']
+
+ # Check if we have all the data
+ if mode == "db":
+ if db is None:
+ module.fail_json(msg="In db mode the db number must be provided")
+
+ # Connect and check
+ r = redis.StrictRedis(db=db, **redis_params)
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ # Do the stuff
+ # (Check Check_mode before commands so the commands aren't evaluated
+ # if not necessary)
+ if mode == "all":
+ if module.check_mode or flush(r):
+ module.exit_json(changed=True, flushed=True)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush all databases")
+
+ else:
+ if module.check_mode or flush(r, db):
+ module.exit_json(changed=True, flushed=True, db=db)
+ else: # Flush never fails :)
+ module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+
+ try: # try to parse the value as if it were the memory size
+ if re.match(r'^\s*(\d*\.?\d*)\s*([A-Za-z]+)?\s*$', module.params['value'].upper()):
+ value = str(human_to_bytes(module.params['value'].upper()))
+ else:
+ value = module.params['value']
+ except ValueError:
+ value = module.params['value']
+
+ r = redis.StrictRedis(**redis_params)
+
+ try:
+ r.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception as e:
+ module.fail_json(msg="unable to read config: %s" % to_native(e), exception=traceback.format_exc())
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception as e:
+ module.fail_json(msg="unable to write config: %s" % to_native(e), exception=traceback.format_exc())
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ module.fail_json(msg='A valid command must be provided')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redis_data.py b/ansible_collections/community/general/plugins/modules/redis_data.py
new file mode 100644
index 000000000..c0c8dcc9a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redis_data.py
@@ -0,0 +1,257 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis_data
+short_description: Set key value pairs in Redis
+version_added: 3.7.0
+description:
+ - Set key value pairs in Redis database.
+author: "Andreas Botzner (@paginabianca)"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ key:
+ description:
+ - Database key.
+ required: true
+ type: str
+ value:
+ description:
+ - Value that key should be set to.
+ required: false
+ type: str
+ expiration:
+ description:
+ - Expiration time in milliseconds.
+ Setting this flag will always result in a change in the database.
+ required: false
+ type: int
+ non_existing:
+ description:
+ - Only set key if it does not already exist.
+ required: false
+ type: bool
+ existing:
+ description:
+ - Only set key if it already exists.
+ required: false
+ type: bool
+ keep_ttl:
+ description:
+ - Retain the time to live associated with the key.
+ required: false
+ type: bool
+ state:
+ description:
+ - State of the key.
+ default: present
+ type: str
+ choices:
+ - present
+ - absent
+
+extends_documentation_fragment:
+ - community.general.redis.documentation
+ - community.general.attributes
+
+seealso:
+ - module: community.general.redis_data_incr
+ - module: community.general.redis_data_info
+ - module: community.general.redis
+'''
+
+EXAMPLES = '''
+- name: Set key foo=bar on localhost with no username
+ community.general.redis_data:
+ login_host: localhost
+ login_password: supersecret
+ key: foo
+ value: bar
+ state: present
+
+- name: Set key foo=bar if non existing with expiration of 30s
+ community.general.redis_data:
+ login_host: localhost
+ login_password: supersecret
+ key: foo
+ value: bar
+ non_existing: true
+ expiration: 30000
+ state: present
+
+- name: Set key foo=bar if existing and keep current TTL
+ community.general.redis_data:
+ login_host: localhost
+ login_password: supersecret
+ key: foo
+ value: bar
+ existing: true
+ keep_ttl: true
+
+- name: Set key foo=bar on redishost with custom ca-cert file
+ community.general.redis_data:
+ login_host: redishost
+ login_password: supersecret
+ login_user: someuser
+ validate_certs: true
+ ssl_ca_certs: /path/to/ca/certs
+ key: foo
+ value: bar
+
+- name: Delete key foo on localhost with no username
+ community.general.redis_data:
+ login_host: localhost
+ login_password: supersecret
+ key: foo
+ state: absent
+'''
+
+RETURN = '''
+old_value:
+ description: Value of key before setting.
+ returned: on_success if state is C(present) and key exists in database.
+ type: str
+ sample: 'old_value_of_key'
+value:
+ description: Value key was set to.
+ returned: on success if state is C(present).
+ type: str
+ sample: 'new_value_of_key'
+msg:
+ description: A short message.
+ returned: always
+ type: str
+ sample: 'Set key: foo to bar'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redis import (
+ fail_imports, redis_auth_argument_spec, RedisAnsible)
+
+
+def main():
+ redis_auth_args = redis_auth_argument_spec()
+ module_args = dict(
+ key=dict(type='str', required=True, no_log=False),
+ value=dict(type='str', required=False),
+ expiration=dict(type='int', required=False),
+ non_existing=dict(type='bool', required=False),
+ existing=dict(type='bool', required=False),
+ keep_ttl=dict(type='bool', required=False),
+ state=dict(type='str', default='present',
+ choices=['present', 'absent']),
+ )
+ module_args.update(redis_auth_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ required_if=[('state', 'present', ('value',))],
+ mutually_exclusive=[['non_existing', 'existing'],
+ ['keep_ttl', 'expiration']],)
+ fail_imports(module)
+
+ redis = RedisAnsible(module)
+
+ key = module.params['key']
+ value = module.params['value']
+ px = module.params['expiration']
+ nx = module.params['non_existing']
+ xx = module.params['existing']
+ keepttl = module.params['keep_ttl']
+ state = module.params['state']
+ set_args = {'name': key, 'value': value, 'px': px,
+ 'nx': nx, 'xx': xx, 'keepttl': keepttl}
+
+ result = {'changed': False}
+
+ old_value = None
+ try:
+ old_value = redis.connection.get(key)
+ except Exception as e:
+ msg = 'Failed to get value of key: {0} with exception: {1}'.format(
+ key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+
+ if state == 'absent':
+ if module.check_mode:
+ if old_value is None:
+ msg = 'Key: {0} not present'.format(key)
+ result['msg'] = msg
+ module.exit_json(**result)
+ else:
+ msg = 'Deleted key: {0}'.format(key)
+ result['msg'] = msg
+ module.exit_json(**result)
+ try:
+ ret = redis.connection.delete(key)
+ if ret == 0:
+ msg = 'Key: {0} not present'.format(key)
+ result['msg'] = msg
+ module.exit_json(**result)
+ else:
+ msg = 'Deleted key: {0}'.format(key)
+ result['msg'] = msg
+ result['changed'] = True
+ module.exit_json(**result)
+ except Exception as e:
+ msg = 'Failed to delete key: {0} with exception: {1}'.format(
+ key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+
+ old_value = None
+ try:
+ old_value = redis.connection.get(key)
+ except Exception as e:
+ msg = 'Failed to get value of key: {0} with exception: {1}'.format(
+ key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+
+ result['old_value'] = old_value
+ if old_value == value and keepttl is not False and px is None:
+ msg = 'Key {0} already has desired value'.format(key)
+ result['msg'] = msg
+ result['value'] = value
+ module.exit_json(**result)
+ if module.check_mode:
+ result['msg'] = 'Set key: {0}'.format(key)
+ result['value'] = value
+ module.exit_json(**result)
+ try:
+ ret = redis.connection.set(**set_args)
+ if ret is None:
+ if nx:
+ msg = 'Could not set key: {0}. Key already present.'.format(
+ key)
+ else:
+ msg = 'Could not set key: {0}. Key not present.'.format(key)
+ result['msg'] = msg
+ module.fail_json(**result)
+ msg = 'Set key: {0}'.format(key)
+ result['msg'] = msg
+ result['changed'] = True
+ result['value'] = value
+ module.exit_json(**result)
+ except Exception as e:
+ msg = 'Failed to set key: {0} with exception: {2}'.format(key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redis_data_incr.py b/ansible_collections/community/general/plugins/modules/redis_data_incr.py
new file mode 100644
index 000000000..f927fb11f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redis_data_incr.py
@@ -0,0 +1,193 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis_data_incr
+short_description: Increment keys in Redis
+version_added: 4.0.0
+description:
+ - Increment integers or float keys in Redis database and get new value.
+ - Default increment for all keys is 1. For specific increments use the
+ I(increment_int) and I(increment_float) options.
+author: "Andreas Botzner (@paginabianca)"
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - For C(check_mode) to work, the specified I(redis_user) needs permission to
+ run the C(GET) command on the key, otherwise the module will fail.
+ - When using I(check_mode) the module will try to calculate the value that
+ Redis would return. If the key is not present, 0.0 is used as value.
+ diff_mode:
+ support: none
+options:
+ key:
+ description:
+ - Database key.
+ type: str
+ required: true
+ increment_int:
+ description:
+ - Integer amount to increment the key by.
+ required: false
+ type: int
+ increment_float:
+ description:
+ - Float amount to increment the key by.
+ - This only works with keys that contain float values
+ in their string representation.
+ type: float
+ required: false
+
+
+extends_documentation_fragment:
+ - community.general.redis.documentation
+ - community.general.attributes
+
+seealso:
+ - module: community.general.redis_data
+ - module: community.general.redis_data_info
+ - module: community.general.redis
+'''
+
+EXAMPLES = '''
+- name: Increment integer key foo on localhost with no username and print new value
+ community.general.redis_data_incr:
+ login_host: localhost
+ login_password: supersecret
+ key: foo
+ increment_int: 1
+ register: result
+- name: Print new value
+ debug:
+ var: result.value
+
+- name: Increment float key foo by 20.4
+ community.general.redis_data_incr:
+ login_host: redishost
+ login_user: redisuser
+ login_password: somepass
+ key: foo
+ increment_float: '20.4'
+'''
+
+RETURN = '''
+value:
+ description: Incremented value of key
+ returned: on success
+ type: float
+ sample: '4039.4'
+msg:
+ description: A short message.
+ returned: always
+ type: str
+ sample: 'Incremented key: foo by 20.4 to 65.9'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redis import (
+ fail_imports, redis_auth_argument_spec, RedisAnsible)
+
+
+def main():
+ redis_auth_args = redis_auth_argument_spec()
+ module_args = dict(
+ key=dict(type='str', required=True, no_log=False),
+ increment_int=dict(type='int', required=False),
+ increment_float=dict(type='float', required=False),
+ )
+ module_args.update(redis_auth_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ mutually_exclusive=[['increment_int', 'increment_float']],
+ )
+ fail_imports(module)
+
+ redis = RedisAnsible(module)
+ key = module.params['key']
+ increment_float = module.params['increment_float']
+ increment_int = module.params['increment_int']
+ increment = 1
+ if increment_float is not None:
+ increment = increment_float
+ elif increment_int is not None:
+ increment = increment_int
+
+ result = {'changed': False}
+ if module.check_mode:
+ value = 0.0
+ try:
+ res = redis.connection.get(key)
+ if res is not None:
+ value = float(res)
+ except ValueError as e:
+ msg = 'Value: {0} of key: {1} is not incrementable(int or float)'.format(
+ res, key)
+ result['msg'] = msg
+ module.fail_json(**result)
+ except Exception as e:
+ msg = 'Failed to get value of key: {0} with exception: {1}'.format(
+ key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+ msg = 'Incremented key: {0} by {1} to {2}'.format(
+ key, increment, value + increment)
+ result['msg'] = msg
+ result['value'] = float(value + increment)
+ module.exit_json(**result)
+
+ if increment_float is not None:
+ try:
+ value = redis.connection.incrbyfloat(key, increment)
+ msg = 'Incremented key: {0} by {1} to {2}'.format(
+ key, increment, value)
+ result['msg'] = msg
+ result['value'] = float(value)
+ result['changed'] = True
+ module.exit_json(**result)
+ except Exception as e:
+ msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format(
+ key, increment, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+ elif increment_int is not None:
+ try:
+ value = redis.connection.incrby(key, increment)
+ msg = 'Incremented key: {0} by {1} to {2}'.format(
+ key, increment, value)
+ result['msg'] = msg
+ result['value'] = float(value)
+ result['changed'] = True
+ module.exit_json(**result)
+ except Exception as e:
+ msg = 'Failed to increment key: {0} by {1} with exception: {2}'.format(
+ key, increment, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+ else:
+ try:
+ value = redis.connection.incr(key)
+ msg = 'Incremented key: {0} to {1}'.format(key, value)
+ result['msg'] = msg
+ result['value'] = float(value)
+ result['changed'] = True
+ module.exit_json(**result)
+ except Exception as e:
+ msg = 'Failed to increment key: {0} with exception: {1}'.format(
+ key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redis_data_info.py b/ansible_collections/community/general/plugins/modules/redis_data_info.py
new file mode 100644
index 000000000..c0af61905
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redis_data_info.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: redis_data_info
+short_description: Get value of key in Redis database
+version_added: 3.7.0
+description:
+ - Get value of keys in Redis database.
+author: "Andreas Botzner (@paginabianca)"
+options:
+ key:
+ description:
+ - Database key.
+ type: str
+ required: true
+
+extends_documentation_fragment:
+ - community.general.redis
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+seealso:
+ - module: community.general.redis_data
+ - module: community.general.redis_data_incr
+ - module: community.general.redis_info
+ - module: community.general.redis
+'''
+
+EXAMPLES = '''
+- name: Get key foo=bar from loalhost with no username
+ community.general.redis_data_info:
+ login_host: localhost
+ login_password: supersecret
+ key: foo
+
+- name: Get key foo=bar on redishost with custom ca-cert file
+ community.general.redis_data_info:
+ login_host: redishost
+ login_password: supersecret
+ login_user: somuser
+ validate_certs: true
+ ssl_ca_certs: /path/to/ca/certs
+ key: foo
+'''
+
+RETURN = '''
+exists:
+ description: If they key exists in the database.
+ returned: on success
+ type: bool
+value:
+ description: Value key was set to.
+ returned: if existing
+ type: str
+ sample: 'value_of_some_key'
+msg:
+ description: A short message.
+ returned: always
+ type: str
+ sample: 'Got key: foo with value: bar'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.redis import (
+ fail_imports, redis_auth_argument_spec, RedisAnsible)
+
+
+def main():
+ redis_auth_args = redis_auth_argument_spec()
+ module_args = dict(
+ key=dict(type='str', required=True, no_log=False),
+ )
+ module_args.update(redis_auth_args)
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+ fail_imports(module)
+
+ redis = RedisAnsible(module)
+
+ key = module.params['key']
+ result = {'changed': False}
+
+ value = None
+ try:
+ value = redis.connection.get(key)
+ except Exception as e:
+ msg = 'Failed to get value of key "{0}" with exception: {1}'.format(
+ key, str(e))
+ result['msg'] = msg
+ module.fail_json(**result)
+
+ if value is None:
+ msg = 'Key "{0}" does not exist in database'.format(key)
+ result['exists'] = False
+ else:
+ msg = 'Got key "{0}"'.format(key)
+ result['value'] = value
+ result['exists'] = True
+ result['msg'] = msg
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/redis_info.py b/ansible_collections/community/general/plugins/modules/redis_info.py
new file mode 100644
index 000000000..b9900a7ca
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/redis_info.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: redis_info
+short_description: Gather information about Redis servers
+version_added: '0.2.0'
+description:
+- Gathers information and statistics about Redis servers.
+extends_documentation_fragment:
+- community.general.attributes
+- community.general.attributes.info_module
+options:
+ login_host:
+ description:
+ - The host running the database.
+ type: str
+ default: localhost
+ login_port:
+ description:
+ - The port to connect to.
+ type: int
+ default: 6379
+ login_password:
+ description:
+ - The password used to authenticate with, when authentication is enabled for the Redis server.
+ type: str
+notes:
+- Requires the redis-py Python package on the remote host. You can
+ install it with pip (C(pip install redis)) or with a package manager.
+ U(https://github.com/andymccurdy/redis-py)
+seealso:
+- module: community.general.redis
+requirements: [ redis ]
+author: "Pavlo Bashynskyi (@levonet)"
+'''
+
+EXAMPLES = r'''
+- name: Get server information
+ community.general.redis_info:
+ register: result
+
+- name: Print server information
+ ansible.builtin.debug:
+ var: result.info
+'''
+
+RETURN = r'''
+info:
+ description: The default set of server information sections U(https://redis.io/commands/info).
+ returned: success
+ type: dict
+ sample: {
+ "active_defrag_hits": 0,
+ "active_defrag_key_hits": 0,
+ "active_defrag_key_misses": 0,
+ "active_defrag_misses": 0,
+ "active_defrag_running": 0,
+ "allocator_active": 932409344,
+ "allocator_allocated": 932062792,
+ "allocator_frag_bytes": 346552,
+ "allocator_frag_ratio": 1.0,
+ "allocator_resident": 947253248,
+ "allocator_rss_bytes": 14843904,
+ "allocator_rss_ratio": 1.02,
+ "aof_current_rewrite_time_sec": -1,
+ "aof_enabled": 0,
+ "aof_last_bgrewrite_status": "ok",
+ "aof_last_cow_size": 0,
+ "aof_last_rewrite_time_sec": -1,
+ "aof_last_write_status": "ok",
+ "aof_rewrite_in_progress": 0,
+ "aof_rewrite_scheduled": 0,
+ "arch_bits": 64,
+ "atomicvar_api": "atomic-builtin",
+ "blocked_clients": 0,
+ "client_recent_max_input_buffer": 4,
+ "client_recent_max_output_buffer": 0,
+ "cluster_enabled": 0,
+ "config_file": "",
+ "configured_hz": 10,
+ "connected_clients": 4,
+ "connected_slaves": 0,
+ "db0": {
+ "avg_ttl": 1945628530,
+ "expires": 16,
+ "keys": 3341411
+ },
+ "evicted_keys": 0,
+ "executable": "/data/redis-server",
+ "expired_keys": 9,
+ "expired_stale_perc": 1.72,
+ "expired_time_cap_reached_count": 0,
+ "gcc_version": "9.2.0",
+ "hz": 10,
+ "instantaneous_input_kbps": 0.0,
+ "instantaneous_ops_per_sec": 0,
+ "instantaneous_output_kbps": 0.0,
+ "keyspace_hits": 0,
+ "keyspace_misses": 0,
+ "latest_fork_usec": 0,
+ "lazyfree_pending_objects": 0,
+ "loading": 0,
+ "lru_clock": 11603632,
+ "master_repl_offset": 118831417,
+ "master_replid": "0d904704e424e38c3cd896783e9f9d28d4836e5e",
+ "master_replid2": "0000000000000000000000000000000000000000",
+ "maxmemory": 0,
+ "maxmemory_human": "0B",
+ "maxmemory_policy": "noeviction",
+ "mem_allocator": "jemalloc-5.1.0",
+ "mem_aof_buffer": 0,
+ "mem_clients_normal": 49694,
+ "mem_clients_slaves": 0,
+ "mem_fragmentation_bytes": 12355480,
+ "mem_fragmentation_ratio": 1.01,
+ "mem_not_counted_for_evict": 0,
+ "mem_replication_backlog": 1048576,
+ "migrate_cached_sockets": 0,
+ "multiplexing_api": "epoll",
+ "number_of_cached_scripts": 0,
+ "os": "Linux 3.10.0-862.14.4.el7.x86_64 x86_64",
+ "process_id": 1,
+ "pubsub_channels": 0,
+ "pubsub_patterns": 0,
+ "rdb_bgsave_in_progress": 0,
+ "rdb_changes_since_last_save": 671,
+ "rdb_current_bgsave_time_sec": -1,
+ "rdb_last_bgsave_status": "ok",
+ "rdb_last_bgsave_time_sec": -1,
+ "rdb_last_cow_size": 0,
+ "rdb_last_save_time": 1588702236,
+ "redis_build_id": "a31260535f820267",
+ "redis_git_dirty": 0,
+ "redis_git_sha1": 0,
+ "redis_mode": "standalone",
+ "redis_version": "999.999.999",
+ "rejected_connections": 0,
+ "repl_backlog_active": 1,
+ "repl_backlog_first_byte_offset": 118707937,
+ "repl_backlog_histlen": 123481,
+ "repl_backlog_size": 1048576,
+ "role": "master",
+ "rss_overhead_bytes": -3051520,
+ "rss_overhead_ratio": 1.0,
+ "run_id": "8d252f66c3ef89bd60a060cf8dc5cfe3d511c5e4",
+ "second_repl_offset": 118830003,
+ "slave_expires_tracked_keys": 0,
+ "sync_full": 0,
+ "sync_partial_err": 0,
+ "sync_partial_ok": 0,
+ "tcp_port": 6379,
+ "total_commands_processed": 885,
+ "total_connections_received": 10,
+ "total_net_input_bytes": 802709255,
+ "total_net_output_bytes": 31754,
+ "total_system_memory": 135029538816,
+ "total_system_memory_human": "125.76G",
+ "uptime_in_days": 53,
+ "uptime_in_seconds": 4631778,
+ "used_cpu_sys": 4.668282,
+ "used_cpu_sys_children": 0.002191,
+ "used_cpu_user": 4.21088,
+ "used_cpu_user_children": 0.0,
+ "used_memory": 931908760,
+ "used_memory_dataset": 910774306,
+ "used_memory_dataset_perc": "97.82%",
+ "used_memory_human": "888.74M",
+ "used_memory_lua": 37888,
+ "used_memory_lua_human": "37.00K",
+ "used_memory_overhead": 21134454,
+ "used_memory_peak": 932015216,
+ "used_memory_peak_human": "888.84M",
+ "used_memory_peak_perc": "99.99%",
+ "used_memory_rss": 944201728,
+ "used_memory_rss_human": "900.46M",
+ "used_memory_scripts": 0,
+ "used_memory_scripts_human": "0B",
+ "used_memory_startup": 791264
+ }
+'''
+
+import traceback
+
+REDIS_IMP_ERR = None
+try:
+ from redis import StrictRedis
+ HAS_REDIS_PACKAGE = True
+except ImportError:
+ REDIS_IMP_ERR = traceback.format_exc()
+ HAS_REDIS_PACKAGE = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def redis_client(**client_params):
+ return StrictRedis(**client_params)
+
+
+# Module execution.
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ login_host=dict(type='str', default='localhost'),
+ login_port=dict(type='int', default=6379),
+ login_password=dict(type='str', no_log=True),
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAS_REDIS_PACKAGE:
+ module.fail_json(msg=missing_required_lib('redis'), exception=REDIS_IMP_ERR)
+
+ login_host = module.params['login_host']
+ login_port = module.params['login_port']
+ login_password = module.params['login_password']
+
+ # Connect and check
+ client = redis_client(host=login_host, port=login_port, password=login_password)
+ try:
+ client.ping()
+ except Exception as e:
+ module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
+
+ info = client.info()
+ module.exit_json(changed=False, info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rhevm.py b/ansible_collections/community/general/plugins/modules/rhevm.py
new file mode 100644
index 000000000..c129a2df5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rhevm.py
@@ -0,0 +1,1506 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhevm
+short_description: RHEV/oVirt automation
+description:
+ - This module only supports oVirt/RHEV version 3.
+ - A newer module M(ovirt.ovirt.ovirt_vm) supports oVirt/RHV version 4.
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+requirements:
+ - ovirtsdk
+author:
+ - Timothy Vandenbrande (@TimothyVandenbrande)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ type: str
+ default: admin@internal
+ password:
+ description:
+ - The password for user authentication.
+ type: str
+ required: true
+ server:
+ description:
+ - The name/IP of your RHEV-m/oVirt instance.
+ type: str
+ default: 127.0.0.1
+ port:
+ description:
+ - The port on which the API is reachable.
+ type: int
+ default: 443
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ type: bool
+ default: false
+ name:
+ description:
+ - The name of the VM.
+ type: str
+ cluster:
+ description:
+ - The RHEV/oVirt cluster in which you want you VM to start.
+ type: str
+ default: ''
+ datacenter:
+ description:
+ - The RHEV/oVirt datacenter in which you want you VM to start.
+ type: str
+ default: Default
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ type: str
+ choices: [ absent, cd, down, info, ping, present, restarted, up ]
+ default: present
+ image:
+ description:
+ - The template to use for the VM.
+ type: str
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ type: str
+ choices: [ desktop, host, server ]
+ default: server
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ type: str
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ type: int
+ default: 2
+ cpu_share:
+ description:
+ - This parameter is used to configure the CPU share.
+ type: int
+ default: 0
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ type: int
+ default: 1
+ osver:
+ description:
+ - The operating system option in RHEV/oVirt.
+ type: str
+ default: rhel_6x64
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ type: int
+ default: 1
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ type: bool
+ default: true
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ type: list
+ elements: str
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ type: list
+ elements: str
+ aliases: [ interfaces, nics ]
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ type: list
+ elements: str
+ default: [ hd, network ]
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ type: bool
+ default: true
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ type: str
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up').
+ - When I(state = 'down').
+ - When I(state = 'restarted').
+ type: int
+'''
+
+RETURN = r'''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: {
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }
+'''
+
+EXAMPLES = r'''
+- name: Basic get info from VM
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ state: info
+
+- name: Basic create example from image
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: demo
+ cluster: centos
+ image: centos7_x64
+ state: present
+
+- name: Power management
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: uptime_server
+ image: centos7_x64
+ state: down
+
+- name: Multi disk, multi nic create example
+ community.general.rhevm:
+ server: rhevm01
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ cluster: RH
+ name: server007
+ type: server
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: eth0
+ vlan: vlan2202
+ - name: eth1
+ vlan: vlan36
+ - name: eth2
+ vlan: vlan38
+ - name: eth3
+ vlan: vlan2202
+ disks:
+ - name: root
+ size: 10
+ domain: ssd-san
+ - name: swap
+ size: 10
+ domain: 15kiscsi-san
+ - name: opt
+ size: 10
+ domain: 15kiscsi-san
+ - name: var
+ size: 10
+ domain: 10kiscsi-san
+ - name: home
+ size: 10
+ domain: sata-san
+ boot_order:
+ - network
+ - hd
+ state: present
+
+- name: Add a CD to the disk cd_drive
+ community.general.rhevm:
+ user: '{{ rhev.admin.name }}'
+ password: '{{ rhev.admin.pass }}'
+ name: server007
+ cd_drive: rhev-tools-setup.iso
+ state: cd
+
+- name: New host deployment + host network configuration
+ community.general.rhevm:
+ password: '{{ rhevm.admin.pass }}'
+ name: ovirt_node007
+ type: host
+ cluster: rhevm01
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: 172.31.224.200
+ netmask: 255.255.254.0
+ - name: p3p2
+ ip: 172.31.225.200
+ netmask: 255.255.254.0
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: rhevm
+ ip: 172.31.222.200
+ netmask: 255.255.255.0
+ management: true
+ - name: bond0.36
+ network: vlan36
+ ip: 10.2.36.200
+ netmask: 255.255.254.0
+ gateway: 10.2.36.254
+ - name: bond0.2202
+ network: vlan2202
+ - name: bond0.38
+ network: vlan38
+ state: present
+'''
+
+import time
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
+STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
+
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except Exception:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise Exception()
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(memory_policy) * 1024 * 1024 * 1024
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has successfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface:
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface:
+ if 'ip' in iface:
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface:
+ iface['ip'] = ''
+ if 'netmask' not in iface:
+ iface['netmask'] = ''
+ if 'gateway' not in iface:
+ iface['gateway'] = ''
+
+ if 'network' in iface:
+ if 'bond' in iface:
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves=params.Slaves(host_nic=bond),
+ options=params.Options(
+ option=[
+ params.Option(name='miimon', value='100'),
+ params.Option(name='mode', value='4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ),
+ override_configuration=True,
+ bonding=tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network=params.Network(name=iface['network']),
+ name=iface['name'],
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity=False,
+ host_nics=params.HostNics(host_nic=networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_drive)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_drive)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ VM.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if memory_policy == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == memory_policy:
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == memory:
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if memory_policy > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == cpu:
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == cpu_share:
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost:
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
+ user=dict(type='str', default='admin@internal'),
+ password=dict(type='str', required=True, no_log=True),
+ server=dict(type='str', default='127.0.0.1'),
+ port=dict(type='int', default=443),
+ insecure_api=dict(type='bool', default=False),
+ name=dict(type='str'),
+ image=dict(type='str'),
+ datacenter=dict(type='str', default="Default"),
+ type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
+ cluster=dict(type='str', default=''),
+ vmhost=dict(type='str'),
+ vmcpu=dict(type='int', default=2),
+ vmmem=dict(type='int', default=1),
+ disks=dict(type='list', elements='str'),
+ osver=dict(type='str', default="rhel_6x64"),
+ ifaces=dict(type='list', elements='str', aliases=['interfaces', 'nics']),
+ timeout=dict(type='int'),
+ mempol=dict(type='int', default=1),
+ vm_ha=dict(type='bool', default=True),
+ cpu_share=dict(type='int', default=0),
+ boot_order=dict(type='list', elements='str', default=['hd', 'network']),
+ del_prot=dict(type='bool', default=True),
+ cd_drive=dict(type='str'),
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rhn_channel.py b/ansible_collections/community/general/plugins/modules/rhn_channel.py
new file mode 100644
index 000000000..e544af51e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rhn_channel.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Vincent Van de Kussen
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhn_channel
+short_description: Adds or removes Red Hat software channels
+description:
+ - Adds or removes Red Hat software channels.
+author:
+ - Vincent Van der Kussen (@vincentvdk)
+notes:
+ - This module fetches the system id from RHN.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the software channel.
+ required: true
+ type: str
+ sysname:
+ description:
+ - Name of the system as it is known in RHN/Satellite.
+ required: true
+ type: str
+ state:
+ description:
+ - Whether the channel should be present or not, taking action if the state is different from what is stated.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ url:
+ description:
+ - The full URL to the RHN/Satellite API.
+ required: true
+ type: str
+ user:
+ description:
+ - RHN/Satellite login.
+ required: true
+ type: str
+ password:
+ description:
+ - RHN/Satellite password.
+ aliases: [pwd]
+ required: true
+ type: str
+ validate_certs:
+ description:
+ - If C(False), SSL certificates will not be validated.
+ - This should only set to C(False) when used on self controlled sites
+ using self-signed certificates, and you are absolutely sure that nobody
+ can modify traffic between the module and the site.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = '''
+- name: Add a Red Hat software channel
+ community.general.rhn_channel:
+ name: rhel-x86_64-server-v2vwin-6
+ sysname: server01
+ url: https://rhn.redhat.com/rpc/api
+ user: rhnuser
+ password: guessme
+ delegate_to: localhost
+'''
+
+import ssl
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+def get_systemid(client, session, sysname):
+ systems = client.system.listUserSystems(session)
+ for system in systems:
+ if system.get('name') == sysname:
+ idres = system.get('id')
+ idd = int(idres)
+ return idd
+
+
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
+
+
+def base_channels(client, session, sys_id):
+ basechan = client.channel.software.listSystemChannels(session, sys_id)
+ try:
+ chans = [item['label'] for item in basechan]
+ except KeyError:
+ chans = [item['channel_label'] for item in basechan]
+ return chans
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ name=dict(type='str', required=True),
+ sysname=dict(type='str', required=True),
+ url=dict(type='str', required=True),
+ user=dict(type='str', required=True),
+ password=dict(type='str', required=True, aliases=['pwd'], no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ )
+ )
+
+ state = module.params['state']
+ channelname = module.params['name']
+ systname = module.params['sysname']
+ saturl = module.params['url']
+ user = module.params['user']
+ password = module.params['password']
+ validate_certs = module.params['validate_certs']
+
+ ssl_context = None
+ if not validate_certs:
+ try: # Python 2.7.9 and newer
+ ssl_context = ssl.create_unverified_context()
+ except AttributeError: # Legacy Python that doesn't verify HTTPS certificates by default
+ ssl_context = ssl._create_unverified_context()
+ else: # Python 2.7.8 and older
+ ssl._create_default_https_context = ssl._create_unverified_https_context
+
+ # initialize connection
+ if ssl_context:
+ client = xmlrpc_client.ServerProxy(saturl, context=ssl_context)
+ else:
+ client = xmlrpc_client.Server(saturl)
+
+ try:
+ session = client.auth.login(user, password)
+ except Exception as e:
+ module.fail_json(msg="Unable to establish session with Satellite server: %s " % to_text(e))
+
+ if not session:
+ module.fail_json(msg="Failed to establish session with Satellite server.")
+
+ # get systemid
+ try:
+ sys_id = get_systemid(client, session, systname)
+ except Exception as e:
+ module.fail_json(msg="Unable to get system id: %s " % to_text(e))
+
+ if not sys_id:
+ module.fail_json(msg="Failed to get system id.")
+
+ # get channels for system
+ try:
+ chans = base_channels(client, session, sys_id)
+ except Exception as e:
+ module.fail_json(msg="Unable to get channel information: %s " % to_text(e))
+
+ try:
+ if state == 'present':
+ if channelname in chans:
+ module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
+ else:
+ subscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s added" % channelname)
+
+ if state == 'absent':
+ if channelname not in chans:
+ module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
+ else:
+ unsubscribe_channels(channelname, client, session, systname, sys_id)
+ module.exit_json(changed=True, msg="Channel %s removed" % channelname)
+ except Exception as e:
+ module.fail_json(msg='Unable to %s channel (%s): %s' % ('add' if state == 'present' else 'remove', channelname, to_text(e)))
+ finally:
+ client.auth.logout(session)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rhn_register.py b/ansible_collections/community/general/plugins/modules/rhn_register.py
new file mode 100644
index 000000000..1fe9297d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rhn_register.py
@@ -0,0 +1,455 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) James Laska
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: rhn_register
+short_description: Manage Red Hat Network registration using the C(rhnreg_ks) command
+description:
+ - Manage registration to the Red Hat Network.
+author:
+ - James Laska (@jlaska)
+notes:
+ - This is for older Red Hat products. You probably want the M(community.general.redhat_subscription) module instead.
+ - In order to register a system, C(rhnreg_ks) requires either a username and password, or an activationkey.
+requirements:
+ - rhnreg_ks
+ - either libxml2 or lxml
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether to register (C(present)), or unregister (C(absent)) a system.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ username:
+ description:
+ - Red Hat Network username.
+ type: str
+ password:
+ description:
+ - Red Hat Network password.
+ type: str
+ server_url:
+ description:
+ - Specify an alternative Red Hat Network server URL.
+ - The default is the current value of I(serverURL) from C(/etc/sysconfig/rhn/up2date).
+ type: str
+ activationkey:
+ description:
+ - Supply an activation key for use with registration.
+ type: str
+ profilename:
+ description:
+ - Supply an profilename for use with registration.
+ type: str
+ force:
+ description:
+ - Force registration, even if system is already registered.
+ type: bool
+ default: false
+ version_added: 2.0.0
+ ca_cert:
+ description:
+ - Supply a custom ssl CA certificate file for use with registration.
+ type: path
+ aliases: [ sslcacert ]
+ systemorgid:
+ description:
+ - Supply an organizational id for use with registration.
+ type: str
+ channels:
+ description:
+ - Optionally specify a list of channels to subscribe to upon successful registration.
+ type: list
+ elements: str
+ default: []
+ enable_eus:
+ description:
+ - If C(false), extended update support will be requested.
+ type: bool
+ default: false
+ nopackages:
+ description:
+ - If C(true), the registered node will not upload its installed packages information to Satellite server.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = r'''
+- name: Unregister system from RHN
+ community.general.rhn_register:
+ state: absent
+ username: joe_user
+ password: somepass
+
+- name: Register as user with password and auto-subscribe to available content
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+
+- name: Register with activationkey and enable extended update support
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ enable_eus: true
+
+- name: Register with activationkey and set a profilename which may differ from the hostname
+ community.general.rhn_register:
+ state: present
+ activationkey: 1-222333444
+ profilename: host.example.com.custom
+
+- name: Register as user with password against a satellite server
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ server_url: https://xmlrpc.my.satellite/XMLRPC
+
+- name: Register as user with password and enable channels
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ channels: rhel-x86_64-server-6-foo-1,rhel-x86_64-server-6-bar-1
+
+- name: Force-register as user with password to ensure registration is current on server
+ community.general.rhn_register:
+ state: present
+ username: joe_user
+ password: somepass
+ server_url: https://xmlrpc.my.satellite/XMLRPC
+ force: true
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import os
+import sys
+
+# Attempt to import rhn client tools
+sys.path.insert(0, '/usr/share/rhn')
+try:
+ import up2date_client
+ import up2date_client.config
+ HAS_UP2DATE_CLIENT = True
+except ImportError:
+ HAS_UP2DATE_CLIENT = False
+
+# INSERT REDHAT SNIPPETS
+from ansible_collections.community.general.plugins.module_utils import redhat
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import urllib, xmlrpc_client
+
+
+class Rhn(redhat.RegistrationBase):
+
+ def __init__(self, module=None, username=None, password=None):
+ redhat.RegistrationBase.__init__(self, module, username, password)
+ self.config = self.load_config()
+ self.server = None
+ self.session = None
+
+ def logout(self):
+ if self.session is not None:
+ self.server.auth.logout(self.session)
+
+ def load_config(self):
+ '''
+ Read configuration from /etc/sysconfig/rhn/up2date
+ '''
+ if not HAS_UP2DATE_CLIENT:
+ return None
+
+ config = up2date_client.config.initUp2dateConfig()
+
+ return config
+
+ @property
+ def server_url(self):
+ return self.config['serverURL']
+
+ @property
+ def hostname(self):
+ '''
+ Return the non-xmlrpc RHN hostname. This is a convenience method
+ used for displaying a more readable RHN hostname.
+
+ Returns: str
+ '''
+ url = urllib.parse.urlparse(self.server_url)
+ return url[1].replace('xmlrpc.', '')
+
+ @property
+ def systemid(self):
+ systemid = None
+ xpath_str = "//member[name='system_id']/value/string"
+
+ if os.path.isfile(self.config['systemIdPath']):
+ fd = open(self.config['systemIdPath'], 'r')
+ xml_data = fd.read()
+ fd.close()
+
+ # Ugh, xml parsing time ...
+ # First, try parsing with libxml2 ...
+ if systemid is None:
+ try:
+ import libxml2
+ doc = libxml2.parseDoc(xml_data)
+ ctxt = doc.xpathNewContext()
+ systemid = ctxt.xpathEval(xpath_str)[0].content
+ doc.freeDoc()
+ ctxt.xpathFreeContext()
+ except ImportError:
+ pass
+
+ # m-kay, let's try with lxml now ...
+ if systemid is None:
+ try:
+ from lxml import etree
+ root = etree.fromstring(xml_data)
+ systemid = root.xpath(xpath_str)[0].text
+ except ImportError:
+ raise Exception('"libxml2" or "lxml" is required for this module.')
+
+ # Strip the 'ID-' prefix
+ if systemid is not None and systemid.startswith('ID-'):
+ systemid = systemid[3:]
+
+ return int(systemid)
+
+ @property
+ def is_registered(self):
+ '''
+ Determine whether the current system is registered.
+
+ Returns: True|False
+ '''
+ return os.path.isfile(self.config['systemIdPath'])
+
+ def configure_server_url(self, server_url):
+ '''
+ Configure server_url for registration
+ '''
+
+ self.config.set('serverURL', server_url)
+ self.config.save()
+
+ def enable(self):
+ '''
+ Prepare the system for RHN registration. This includes ...
+ * enabling the rhnplugin yum plugin
+ * disabling the subscription-manager yum plugin
+ '''
+ redhat.RegistrationBase.enable(self)
+ self.update_plugin_conf('rhnplugin', True)
+ self.update_plugin_conf('subscription-manager', False)
+
+ def register(self, enable_eus=False, activationkey=None, profilename=None, sslcacert=None, systemorgid=None, nopackages=False):
+ '''
+ Register system to RHN. If enable_eus=True, extended update
+ support will be requested.
+ '''
+ register_cmd = ['/usr/sbin/rhnreg_ks', '--force']
+ if self.username:
+ register_cmd.extend(['--username', self.username, '--password', self.password])
+ if self.server_url:
+ register_cmd.extend(['--serverUrl', self.server_url])
+ if enable_eus:
+ register_cmd.append('--use-eus-channel')
+ if nopackages:
+ register_cmd.append('--nopackages')
+ if activationkey is not None:
+ register_cmd.extend(['--activationkey', activationkey])
+ if profilename is not None:
+ register_cmd.extend(['--profilename', profilename])
+ if sslcacert is not None:
+ register_cmd.extend(['--sslCACert', sslcacert])
+ if systemorgid is not None:
+ register_cmd.extend(['--systemorgid', systemorgid])
+ rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True)
+
+ def api(self, method, *args):
+ '''
+ Convenience RPC wrapper
+ '''
+ if self.server is None:
+ if self.hostname != 'rhn.redhat.com':
+ url = "https://%s/rpc/api" % self.hostname
+ else:
+ url = "https://xmlrpc.%s/rpc/api" % self.hostname
+ self.server = xmlrpc_client.ServerProxy(url)
+ self.session = self.server.auth.login(self.username, self.password)
+
+ func = getattr(self.server, method)
+ return func(self.session, *args)
+
+ def unregister(self):
+ '''
+ Unregister a previously registered system
+ '''
+
+ # Initiate RPC connection
+ self.api('system.deleteSystems', [self.systemid])
+
+ # Remove systemid file
+ os.unlink(self.config['systemIdPath'])
+
+ def subscribe(self, channels):
+ if not channels:
+ return
+
+ if self._is_hosted():
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ new_channels = [item['channel_label'] for item in current_channels]
+ new_channels.extend(channels)
+ return self.api('channel.software.setSystemChannels', self.systemid, list(new_channels))
+
+ else:
+ current_channels = self.api('channel.software.listSystemChannels', self.systemid)
+ current_channels = [item['label'] for item in current_channels]
+ new_base = None
+ new_childs = []
+ for ch in channels:
+ if ch in current_channels:
+ continue
+ if self.api('channel.software.getDetails', ch)['parent_channel_label'] == '':
+ new_base = ch
+ else:
+ if ch not in new_childs:
+ new_childs.append(ch)
+ out_base = 0
+ out_childs = 0
+
+ if new_base:
+ out_base = self.api('system.setBaseChannel', self.systemid, new_base)
+
+ if new_childs:
+ out_childs = self.api('system.setChildChannels', self.systemid, new_childs)
+
+ return out_base and out_childs
+
+ def _is_hosted(self):
+ '''
+ Return True if we are running against Hosted (rhn.redhat.com) or
+ False otherwise (when running against Satellite or Spacewalk)
+ '''
+ return 'rhn.redhat.com' in self.hostname
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ server_url=dict(type='str'),
+ activationkey=dict(type='str', no_log=True),
+ profilename=dict(type='str'),
+ ca_cert=dict(type='path', aliases=['sslcacert']),
+ systemorgid=dict(type='str'),
+ enable_eus=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ nopackages=dict(type='bool', default=False),
+ channels=dict(type='list', elements='str', default=[]),
+ ),
+ # username/password is required for state=absent, or if channels is not empty
+ # (basically anything that uses self.api requires username/password) but it doesn't
+ # look like we can express that with required_if/required_together/mutually_exclusive
+
+ # only username+password can be used for unregister
+ required_if=[['state', 'absent', ['username', 'password']]],
+ )
+
+ if not HAS_UP2DATE_CLIENT:
+ module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?")
+
+ server_url = module.params['server_url']
+ username = module.params['username']
+ password = module.params['password']
+
+ state = module.params['state']
+ force = module.params['force']
+ activationkey = module.params['activationkey']
+ profilename = module.params['profilename']
+ sslcacert = module.params['ca_cert']
+ systemorgid = module.params['systemorgid']
+ channels = module.params['channels']
+ enable_eus = module.params['enable_eus']
+ nopackages = module.params['nopackages']
+
+ rhn = Rhn(module=module, username=username, password=password)
+
+ # use the provided server url and persist it to the rhn config.
+ if server_url:
+ rhn.configure_server_url(server_url)
+
+ if not rhn.server_url:
+ module.fail_json(
+ msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
+ )
+
+ # Ensure system is registered
+ if state == 'present':
+
+ # Check for missing parameters ...
+ if not (activationkey or rhn.username or rhn.password):
+ module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
+ rhn.password))
+ if not activationkey and not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
+
+ # Register system
+ if rhn.is_registered and not force:
+ module.exit_json(changed=False, msg="System already registered.")
+
+ try:
+ rhn.enable()
+ rhn.register(enable_eus, activationkey, profilename, sslcacert, systemorgid, nopackages)
+ rhn.subscribe(channels)
+ except Exception as exc:
+ module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, exc))
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
+
+ # Ensure system is *not* registered
+ if state == 'absent':
+ if not rhn.is_registered:
+ module.exit_json(changed=False, msg="System already unregistered.")
+
+ if not (rhn.username and rhn.password):
+ module.fail_json(msg="Missing arguments, the system is currently registered and unregistration requires a username and password")
+
+ try:
+ rhn.unregister()
+ except Exception as exc:
+ module.fail_json(msg="Failed to unregister: %s" % exc)
+ finally:
+ rhn.logout()
+
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rhsm_release.py b/ansible_collections/community/general/plugins/modules/rhsm_release.py
new file mode 100644
index 000000000..6ac4da6e4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rhsm_release.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_release
+short_description: Set or Unset RHSM Release version
+description:
+ - Sets or unsets the release version used by RHSM repositories.
+notes:
+ - This module will fail on an unregistered system.
+ Use the C(redhat_subscription) module to register a system
+ prior to setting the RHSM release.
+ - It is possible to interact with C(subscription-manager) only as root,
+ so root permissions are required to successfully run this module.
+requirements:
+ - Red Hat Enterprise Linux 6+ with subscription-manager installed
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ release:
+ description:
+ - RHSM release version to use.
+ - To unset either pass C(null) for this option, or omit this option.
+ type: str
+author:
+ - Sean Myers (@seandst)
+'''
+
+EXAMPLES = '''
+# Set release version to 7.1
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "7.1"
+
+# Set release version to 6Server
+- name: Set RHSM release version
+ community.general.rhsm_release:
+ release: "6Server"
+
+# Unset release version
+- name: Unset RHSM release release
+ community.general.rhsm_release:
+ release: null
+'''
+
+RETURN = '''
+current_release:
+ description: The current RHSM release version value
+ returned: success
+ type: str
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+import os
+import re
+
+# Matches release-like values such as 7.2, 5.10, 6Server, 8
+# but rejects unlikely values, like 100Server, 1.100, 7server etc.
+release_matcher = re.compile(r'\b\d{1,2}(?:\.\d{1,2}|Server|Client|Workstation|)\b')
+
+
+def _sm_release(module, *args):
+ # pass args to s-m release, e.g. _sm_release(module, '--set', '0.1') becomes
+ # "subscription-manager release --set 0.1"
+ sm_bin = module.get_bin_path('subscription-manager', required=True)
+ cmd = '{0} release {1}'.format(sm_bin, " ".join(args))
+ # delegate nonzero rc handling to run_command
+ return module.run_command(cmd, check_rc=True)
+
+
+def get_release(module):
+ # Get the current release version, or None if release unset
+ rc, out, err = _sm_release(module, '--show')
+ try:
+ match = release_matcher.findall(out)[0]
+ except IndexError:
+ # 0'th index did not exist; no matches
+ match = None
+
+ return match
+
+
+def set_release(module, release):
+ # Set current release version, or unset if release is None
+ if release is None:
+ args = ('--unset',)
+ else:
+ args = ('--set', release)
+
+ return _sm_release(module, *args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ release=dict(type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ if os.getuid() != 0:
+ module.fail_json(
+ msg="Interacting with subscription-manager requires root permissions ('become: true')"
+ )
+
+ target_release = module.params['release']
+
+ # sanity check: the target release at least looks like a valid release
+ if target_release and not release_matcher.findall(target_release):
+ module.fail_json(msg='"{0}" does not appear to be a valid release.'.format(target_release))
+
+ # Will fail with useful error from s-m if system not subscribed
+ current_release = get_release(module)
+
+ changed = (target_release != current_release)
+ if not module.check_mode and changed:
+ set_release(module, target_release)
+ # If setting the release fails, then a fail_json would have exited with
+ # the s-m error, e.g. "No releases match '7.20'...". If not, then the
+ # current release is now set to the target release (job's done)
+ current_release = target_release
+
+ module.exit_json(current_release=current_release, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rhsm_repository.py b/ansible_collections/community/general/plugins/modules/rhsm_repository.py
new file mode 100644
index 000000000..eea6e3857
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rhsm_repository.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Giovanni Sciortino (@giovannisciortino)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: rhsm_repository
+short_description: Manage RHSM repositories using the subscription-manager command
+description:
+ - Manage (Enable/Disable) RHSM repositories to the Red Hat Subscription
+ Management entitlement platform using the C(subscription-manager) command.
+author: Giovanni Sciortino (@giovannisciortino)
+notes:
+ - In order to manage RHSM repositories the system must be already registered
+ to RHSM manually or using the Ansible C(redhat_subscription) module.
+ - It is possible to interact with C(subscription-manager) only as root,
+ so root permissions are required to successfully run this module.
+
+requirements:
+ - subscription-manager
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ state:
+ description:
+ - If state is equal to present or disabled, indicates the desired
+ repository state.
+ choices: [present, enabled, absent, disabled]
+ default: "enabled"
+ type: str
+ name:
+ description:
+ - The ID of repositories to enable.
+ - To operate on several repositories this can accept a comma separated
+ list or a YAML list.
+ required: true
+ type: list
+ elements: str
+ purge:
+ description:
+ - Disable all currently enabled repositories that are not not specified in C(name).
+ Only set this to C(True) if passing in a list of repositories to the C(name) field.
+ Using this with C(loop) will most likely not have the desired result.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Enable a RHSM repository
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+
+- name: Disable all RHSM repositories
+ community.general.rhsm_repository:
+ name: '*'
+ state: disabled
+
+- name: Enable all repositories starting with rhel-6-server
+ community.general.rhsm_repository:
+ name: rhel-6-server*
+ state: enabled
+
+- name: Disable all repositories except rhel-7-server-rpms
+ community.general.rhsm_repository:
+ name: rhel-7-server-rpms
+ purge: true
+'''
+
+RETURN = '''
+repositories:
+ description:
+ - The list of RHSM repositories with their states.
+ - When this module is used to change the repository states, this list contains the updated states after the changes.
+ returned: success
+ type: list
+'''
+
+import re
+import os
+from fnmatch import fnmatch
+from copy import deepcopy
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_subscription_manager(module, arguments):
+ # Execute subscription-manager with arguments and manage common errors
+ rhsm_bin = module.get_bin_path('subscription-manager')
+ if not rhsm_bin:
+ module.fail_json(msg='The executable file subscription-manager was not found in PATH')
+
+ lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+ rc, out, err = module.run_command("%s %s" % (rhsm_bin, " ".join(arguments)), environ_update=lang_env)
+
+ if rc == 0 and out == 'This system has no repositories available through subscriptions.\n':
+ module.fail_json(msg='This system has no repositories available through subscriptions')
+ elif rc == 1:
+ module.fail_json(msg='subscription-manager failed with the following error: %s' % err)
+ else:
+ return rc, out, err
+
+
+def get_repository_list(module, list_parameter):
+ # Generate RHSM repository list and return a list of dict
+ if list_parameter == 'list_enabled':
+ rhsm_arguments = ['repos', '--list-enabled']
+ elif list_parameter == 'list_disabled':
+ rhsm_arguments = ['repos', '--list-disabled']
+ elif list_parameter == 'list':
+ rhsm_arguments = ['repos', '--list']
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+
+ skip_lines = [
+ '+----------------------------------------------------------+',
+ ' Available Repositories in /etc/yum.repos.d/redhat.repo'
+ ]
+ repo_id_re = re.compile(r'Repo ID:\s+(.*)')
+ repo_name_re = re.compile(r'Repo Name:\s+(.*)')
+ repo_url_re = re.compile(r'Repo URL:\s+(.*)')
+ repo_enabled_re = re.compile(r'Enabled:\s+(.*)')
+
+ repo_id = ''
+ repo_name = ''
+ repo_url = ''
+ repo_enabled = ''
+
+ repo_result = []
+ for line in out.splitlines():
+ if line == '' or line in skip_lines:
+ continue
+
+ repo_id_match = repo_id_re.match(line)
+ if repo_id_match:
+ repo_id = repo_id_match.group(1)
+ continue
+
+ repo_name_match = repo_name_re.match(line)
+ if repo_name_match:
+ repo_name = repo_name_match.group(1)
+ continue
+
+ repo_url_match = repo_url_re.match(line)
+ if repo_url_match:
+ repo_url = repo_url_match.group(1)
+ continue
+
+ repo_enabled_match = repo_enabled_re.match(line)
+ if repo_enabled_match:
+ repo_enabled = repo_enabled_match.group(1)
+
+ repo = {
+ "id": repo_id,
+ "name": repo_name,
+ "url": repo_url,
+ "enabled": True if repo_enabled == '1' else False
+ }
+
+ repo_result.append(repo)
+
+ return repo_result
+
+
+def repository_modify(module, state, name, purge=False):
+ name = set(name)
+ current_repo_list = get_repository_list(module, 'list')
+ updated_repo_list = deepcopy(current_repo_list)
+ matched_existing_repo = {}
+ for repoid in name:
+ matched_existing_repo[repoid] = []
+ for idx, repo in enumerate(current_repo_list):
+ if fnmatch(repo['id'], repoid):
+ matched_existing_repo[repoid].append(repo)
+ # Update current_repo_list to return it as result variable
+ updated_repo_list[idx]['enabled'] = True if state == 'enabled' else False
+
+ changed = False
+ results = []
+ diff_before = ""
+ diff_after = ""
+ rhsm_arguments = ['repos']
+
+ for repoid in matched_existing_repo:
+ if len(matched_existing_repo[repoid]) == 0:
+ results.append("%s is not a valid repository ID" % repoid)
+ module.fail_json(results=results, msg="%s is not a valid repository ID" % repoid)
+ for repo in matched_existing_repo[repoid]:
+ if state in ['disabled', 'absent']:
+ if repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is enabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is disabled for this system\n" % repo['id']
+ results.append("Repository '%s' is disabled for this system" % repo['id'])
+ rhsm_arguments += ['--disable', repo['id']]
+ elif state in ['enabled', 'present']:
+ if not repo['enabled']:
+ changed = True
+ diff_before += "Repository '%s' is disabled for this system\n" % repo['id']
+ diff_after += "Repository '%s' is enabled for this system\n" % repo['id']
+ results.append("Repository '%s' is enabled for this system" % repo['id'])
+ rhsm_arguments += ['--enable', repo['id']]
+
+ # Disable all enabled repos on the system that are not in the task and not
+ # marked as disabled by the task
+ if purge:
+ enabled_repo_ids = set(repo['id'] for repo in updated_repo_list if repo['enabled'])
+ matched_repoids_set = set(matched_existing_repo.keys())
+ difference = enabled_repo_ids.difference(matched_repoids_set)
+ if len(difference) > 0:
+ for repoid in difference:
+ changed = True
+ diff_before.join("Repository '{repoid}'' is enabled for this system\n".format(repoid=repoid))
+ diff_after.join("Repository '{repoid}' is disabled for this system\n".format(repoid=repoid))
+ results.append("Repository '{repoid}' is disabled for this system".format(repoid=repoid))
+ rhsm_arguments.extend(['--disable', repoid])
+
+ diff = {'before': diff_before,
+ 'after': diff_after,
+ 'before_header': "RHSM repositories",
+ 'after_header': "RHSM repositories"}
+
+ if not module.check_mode and changed:
+ rc, out, err = run_subscription_manager(module, rhsm_arguments)
+ results = out.splitlines()
+ module.exit_json(results=results, changed=changed, repositories=updated_repo_list, diff=diff)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='list', elements='str', required=True),
+ state=dict(choices=['enabled', 'disabled', 'present', 'absent'], default='enabled'),
+ purge=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ if os.getuid() != 0:
+ module.fail_json(
+ msg="Interacting with subscription-manager requires root permissions ('become: true')"
+ )
+
+ name = module.params['name']
+ state = module.params['state']
+ purge = module.params['purge']
+
+ repository_modify(module, state, name, purge)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/riak.py b/ansible_collections/community/general/plugins/modules/riak.py
new file mode 100644
index 000000000..024e5424d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/riak.py
@@ -0,0 +1,238 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, James Martin <jmartin@basho.com>, Drew Kerrigan <dkerrigan@basho.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: riak
+short_description: This module handles some common Riak operations
+description:
+ - This module can be used to join nodes to a cluster, check
+ the status of the cluster.
+author:
+ - "James Martin (@jsmartin)"
+ - "Drew Kerrigan (@drewkerrigan)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ command:
+ description:
+ - The command you would like to perform against the cluster.
+ choices: ['ping', 'kv_test', 'join', 'plan', 'commit']
+ type: str
+ config_dir:
+ description:
+ - The path to the riak configuration directory
+ default: /etc/riak
+ type: path
+ http_conn:
+ description:
+ - The ip address and port that is listening for Riak HTTP queries
+ default: 127.0.0.1:8098
+ type: str
+ target_node:
+ description:
+ - The target node for certain operations (join, ping)
+ default: riak@127.0.0.1
+ type: str
+ wait_for_handoffs:
+ description:
+ - Number of seconds to wait for handoffs to complete.
+ type: int
+ default: 0
+ wait_for_ring:
+ description:
+ - Number of seconds to wait for all nodes to agree on the ring.
+ type: int
+ default: 0
+ wait_for_service:
+ description:
+ - Waits for a riak service to come online before continuing.
+ choices: ['kv']
+ type: str
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+- name: "Join's a Riak node to another node"
+ community.general.riak:
+ command: join
+ target_node: riak@10.1.1.1
+
+- name: Wait for handoffs to finish. Use with async and poll.
+ community.general.riak:
+ wait_for_handoffs: true
+
+- name: Wait for riak_kv service to startup
+ community.general.riak:
+ wait_for_service: kv
+'''
+
+import json
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def ring_check(module, riak_admin_bin):
+ cmd = '%s ringready' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0 and 'TRUE All nodes agree on the ring' in out:
+ return True
+ else:
+ return False
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ command=dict(required=False, default=None, choices=[
+ 'ping', 'kv_test', 'join', 'plan', 'commit']),
+ config_dir=dict(default='/etc/riak', type='path'),
+ http_conn=dict(required=False, default='127.0.0.1:8098'),
+ target_node=dict(default='riak@127.0.0.1', required=False),
+ wait_for_handoffs=dict(default=0, type='int'),
+ wait_for_ring=dict(default=0, type='int'),
+ wait_for_service=dict(
+ required=False, default=None, choices=['kv']),
+ validate_certs=dict(default=True, type='bool'))
+ )
+
+ command = module.params.get('command')
+ http_conn = module.params.get('http_conn')
+ target_node = module.params.get('target_node')
+ wait_for_handoffs = module.params.get('wait_for_handoffs')
+ wait_for_ring = module.params.get('wait_for_ring')
+ wait_for_service = module.params.get('wait_for_service')
+
+ # make sure riak commands are on the path
+ riak_bin = module.get_bin_path('riak')
+ riak_admin_bin = module.get_bin_path('riak-admin')
+
+ timeout = time.time() + 120
+ while True:
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout, could not fetch Riak stats.')
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
+ break
+ time.sleep(5)
+
+ # here we attempt to load those stats,
+ try:
+ stats = json.loads(stats_raw)
+ except Exception:
+ module.fail_json(msg='Could not parse Riak stats.')
+
+ node_name = stats['nodename']
+ nodes = stats['ring_members']
+ ring_size = stats['ring_creation_size']
+ rc, out, err = module.run_command([riak_bin, 'version'])
+ version = out.strip()
+
+ result = dict(node_name=node_name,
+ nodes=nodes,
+ ring_size=ring_size,
+ version=version)
+
+ if command == 'ping':
+ cmd = '%s ping %s' % (riak_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['ping'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'kv_test':
+ cmd = '%s test' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['kv_test'] = out
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'join':
+ if nodes.count(node_name) == 1 and len(nodes) > 1:
+ result['join'] = 'Node is already in cluster or staged to be in cluster.'
+ else:
+ cmd = '%s cluster join %s' % (riak_admin_bin, target_node)
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['join'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'plan':
+ cmd = '%s cluster plan' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['plan'] = out
+ if 'Staged Changes' in out:
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+ elif command == 'commit':
+ cmd = '%s cluster commit' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if rc == 0:
+ result['commit'] = out
+ result['changed'] = True
+ else:
+ module.fail_json(msg=out)
+
+# this could take a while, recommend to run in async mode
+ if wait_for_handoffs:
+ timeout = time.time() + wait_for_handoffs
+ while True:
+ cmd = '%s transfers' % riak_admin_bin
+ rc, out, err = module.run_command(cmd)
+ if 'No transfers active' in out:
+ result['handoffs'] = 'No transfers active.'
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for handoffs.')
+
+ if wait_for_service:
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name]
+ rc, out, err = module.run_command(cmd)
+ result['service'] = out
+
+ if wait_for_ring:
+ timeout = time.time() + wait_for_ring
+ while True:
+ if ring_check(module, riak_admin_bin):
+ break
+ time.sleep(10)
+ if time.time() > timeout:
+ module.fail_json(msg='Timeout waiting for nodes to agree on ring.')
+
+ result['ring_ready'] = ring_check(module, riak_admin_bin)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rocketchat.py b/ansible_collections/community/general/plugins/modules/rocketchat.py
new file mode 100644
index 000000000..23d6d529e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rocketchat.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Deepak Kothandan <deepak.kothandan@outlook.com>
+# Copyright (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# Copyright (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: rocketchat
+short_description: Send notifications to Rocket Chat
+description:
+ - The C(rocketchat) module sends notifications to Rocket Chat via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ domain:
+ type: str
+ description:
+ - The domain for your environment without protocol. (i.e.
+ C(example.com) or C(chat.example.com))
+ required: true
+ token:
+ type: str
+ description:
+ - Rocket Chat Incoming Webhook integration token. This provides
+ authentication to Rocket Chat's Incoming webhook for posting
+ messages.
+ required: true
+ protocol:
+ type: str
+ description:
+ - Specify the protocol used to send notification messages before the webhook url. (i.e. http or https)
+ default: https
+ choices:
+ - 'http'
+ - 'https'
+ msg:
+ type: str
+ description:
+ - Message to be sent.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token)
+ specified during the creation of webhook.
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon.
+ default: "https://docs.ansible.com/favicon.ico"
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. The representation for the available emojis can be
+ got from Rocket Chat. (for example :thumbsup:) (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message
+ default: 'normal'
+ choices:
+ - 'normal'
+ - 'good'
+ - 'warning'
+ - 'danger'
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments.
+'''
+
+EXAMPLES = """
+- name: Send notification message via Rocket Chat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Rocket Chat all options
+ community.general.rocketchat:
+ domain: chat.example.com
+ token: thetoken/generatedby/rocketchat
+ msg: '{{ inventory_hostname }} completed'
+ channel: #ansible
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in rocketchat
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+ delegate_to: localhost
+
+- name: Use the attachments API
+ community.general.rocketchat:
+ token: thetoken/generatedby/rocketchat
+ domain: chat.example.com
+ attachments:
+ - text: Display my system load on host A and B
+ color: #ff00dd
+ title: System load
+ fields:
+ - title: System A
+ value: 'load average: 0,74, 0,66, 0,63'
+ short: true
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: true
+ delegate_to: localhost
+"""
+
+RETURN = """
+changed:
+ description: A flag indicating if any change was made or not.
+ returned: success
+ type: bool
+ sample: false
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+ROCKETCHAT_INCOMING_WEBHOOK = '%s://%s/hooks/%s'
+
+
+def build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=text)
+ elif text is not None:
+ payload = dict(attachments=[dict(text=text, color=color)])
+ if channel is not None:
+ if (channel[0] == '#') or (channel[0] == '@'):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ for attachment in attachments:
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+ payload['attachments'].append(attachment)
+
+ payload = "payload=" + module.jsonify(payload)
+ return payload
+
+
+def do_notify_rocketchat(module, domain, token, protocol, payload):
+
+ if token.count('/') < 1:
+ module.fail_json(msg="Invalid Token specified, provide a valid token")
+
+ rocketchat_incoming_webhook = ROCKETCHAT_INCOMING_WEBHOOK % (protocol, domain, token)
+
+ response, info = fetch_url(module, rocketchat_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True),
+ token=dict(type='str', required=True, no_log=True),
+ protocol=dict(type='str', default='https', choices=['http', 'https']),
+ msg=dict(type='str', required=False),
+ channel=dict(type='str'),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str'),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal', choices=['normal', 'good', 'warning', 'danger']),
+ attachments=dict(type='list', elements='dict', required=False)
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ protocol = module.params['protocol']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ color = module.params['color']
+ attachments = module.params['attachments']
+
+ payload = build_payload_for_rocketchat(module, text, channel, username, icon_url, icon_emoji, link_names, color, attachments)
+ do_notify_rocketchat(module, domain, token, protocol, payload)
+
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rollbar_deployment.py b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
new file mode 100644
index 000000000..314e65bc6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rollbar_deployment.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro, <kavu13@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+author: "Max Riveiro (@kavu)"
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ type: str
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ type: str
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ type: str
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ type: str
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ type: str
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ type: str
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(false), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: true
+ type: bool
+'''
+
+EXAMPLES = '''
+ - name: Rollbar deployment notification
+ community.general.rollbar_deployment:
+ token: AAAAAA
+ environment: staging
+ user: ansible
+ revision: '4.2'
+ rollbar_user: admin
+ comment: Test Deploy
+
+ - name: Notify rollbar about current git revision deployment by current user
+ community.general.rollbar_deployment:
+ token: "{{ rollbar_access_token }}"
+ environment: production
+ revision: "{{ lookup('pipe', 'git rev-parse HEAD') }}"
+ user: "{{ lookup('env', 'USER') }}"
+'''
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True, no_log=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default=True, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urlencode(params)
+ response, info = fetch_url(module, url, data=data, method='POST')
+ except Exception as e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % to_native(e), exception=traceback.format_exc())
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py
new file mode 100644
index 000000000..52219cd1b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rpm_ostree_pkg.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Dusty Mabe <dusty@dustymabe.com>
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: rpm_ostree_pkg
+short_description: Install or uninstall overlay additional packages
+version_added: "2.0.0"
+description:
+ - Install or uninstall overlay additional packages using C(rpm-ostree) command.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of overlay package to install or remove.
+ required: true
+ type: list
+ elements: str
+ aliases: [ pkg ]
+ state:
+ description:
+ - State of the overlay package.
+ - C(present) simply ensures that a desired package is installed.
+ - C(absent) removes the specified package.
+ choices: [ 'absent', 'present' ]
+ default: 'present'
+ type: str
+author:
+ - Dusty Mabe (@dustymabe)
+ - Abhijeet Kasurde (@Akasurde)
+'''
+
+EXAMPLES = r'''
+- name: Install overlay package
+ community.general.rpm_ostree_pkg:
+ name: nfs-utils
+ state: present
+
+- name: Remove overlay package
+ community.general.rpm_ostree_pkg:
+ name: nfs-utils
+ state: absent
+'''
+
+RETURN = r'''
+rc:
+ description: Return code of rpm-ostree command.
+ returned: always
+ type: int
+ sample: 0
+changed:
+ description: State changes.
+ returned: always
+ type: bool
+ sample: true
+action:
+ description: Action performed.
+ returned: always
+ type: str
+ sample: 'install'
+packages:
+ description: A list of packages specified.
+ returned: always
+ type: list
+ sample: ['nfs-utils']
+stdout:
+ description: Stdout of rpm-ostree command.
+ returned: always
+ type: str
+ sample: 'Staging deployment...done\n...'
+stderr:
+ description: Stderr of rpm-ostree command.
+ returned: always
+ type: str
+ sample: ''
+cmd:
+ description: Full command used for performed action.
+ returned: always
+ type: str
+ sample: 'rpm-ostree uninstall --allow-inactive --idempotent --unchanged-exit-77 nfs-utils'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class RpmOstreePkg:
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.state = module.params['state']
+
+ def ensure(self):
+ results = dict(
+ rc=0,
+ changed=False,
+ action='',
+ packages=[],
+ stdout='',
+ stderr='',
+ cmd='',
+ )
+
+ # Ensure rpm-ostree command exists
+ cmd = [self.module.get_bin_path('rpm-ostree', required=True)]
+
+ # Decide action to perform
+ if self.state in ('present'):
+ results['action'] = 'install'
+ cmd.append('install')
+ elif self.state in ('absent'):
+ results['action'] = 'uninstall'
+ cmd.append('uninstall')
+
+ # Additional parameters
+ cmd.extend(['--allow-inactive', '--idempotent', '--unchanged-exit-77'])
+ for pkg in self.params['name']:
+ cmd.append(pkg)
+ results['packages'].append(pkg)
+
+ rc, out, err = self.module.run_command(cmd)
+
+ results.update(dict(
+ rc=rc,
+ cmd=' '.join(cmd),
+ stdout=out,
+ stderr=err,
+ ))
+
+ # A few possible options:
+ # - rc=0 - succeeded in making a change
+ # - rc=77 - no change was needed
+ # - rc=? - error
+ if rc == 0:
+ results['changed'] = True
+ elif rc == 77:
+ results['changed'] = False
+ results['rc'] = 0
+ else:
+ self.module.fail_json(msg='non-zero return code', **results)
+
+ self.module.exit_json(**results)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(
+ default="present",
+ choices=['absent', 'present']
+ ),
+ name=dict(
+ aliases=["pkg"],
+ required=True,
+ type='list',
+ elements='str',
+ ),
+ ),
+ )
+
+ rpm_ostree_pkg = RpmOstreePkg(module)
+ rpm_ostree_pkg.ensure()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
new file mode 100644
index 000000000..77026e633
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rundeck_acl_policy.py
@@ -0,0 +1,239 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_acl_policy
+
+short_description: Manage Rundeck ACL policies
+description:
+ - Create, update and remove Rundeck ACL policies through HTTP API.
+author: "Loic Blot (@nerzhul)"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: true
+ api_token:
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ aliases: ["token"]
+ project:
+ type: str
+ description:
+ - Sets the project which receive the ACL policy.
+ - If unset, it's a system ACL policy.
+ policy:
+ type: str
+ description:
+ - Sets the ACL policy content.
+ - ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html.
+ - It can be a YAML string or a pure Ansible inventory YAML object.
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment:
+ - ansible.builtin.url
+ - community.general.attributes
+ - community.general.rundeck
+'''
+
+EXAMPLES = '''
+- name: Create or update a rundeck ACL policy in project Ansible
+ community.general.rundeck_acl_policy:
+ name: "Project_01"
+ api_version: 18
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: present
+ project: "Ansible"
+ policy:
+ description: "my policy"
+ context:
+ application: rundeck
+ for:
+ project:
+ - allow: read
+ by:
+ group: "build"
+
+- name: Remove a rundeck system policy
+ community.general.rundeck_acl_policy:
+ name: "Project_01"
+ url: "https://rundeck.example.org"
+ token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs.
+ returned: failed
+ type: str
+before:
+ description: Dictionary containing ACL policy informations before modification.
+ returned: success
+ type: dict
+after:
+ description: Dictionary containing ACL policy informations after modification.
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rundeck import (
+ api_argument_spec,
+ api_request,
+)
+
+
+class RundeckACLManager:
+ def __init__(self, module):
+ self.module = module
+
+ def get_acl(self):
+ resp, info = api_request(
+ module=self.module,
+ endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ )
+
+ return resp
+
+ def create_or_update_acl(self):
+ facts = self.get_acl()
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
+
+ resp, info = api_request(
+ module=self.module,
+ endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="POST",
+ data={"contents": self.module.params["policy"]},
+ )
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 409:
+ self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"])
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_acl())
+ else:
+ if facts["contents"] == self.module.params["policy"]:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, before=facts, after=facts)
+
+ resp, info = api_request(
+ module=self.module,
+ endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="PUT",
+ data={"contents": self.module.params["policy"]},
+ )
+
+ if info["status"] == 200:
+ self.module.exit_json(changed=True, before=facts, after=self.get_acl())
+ elif info["status"] == 400:
+ self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
+ self.module.params["name"])
+ elif info["status"] == 404:
+ self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"])
+
+ def remove_acl(self):
+ facts = self.get_acl()
+
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ api_request(
+ module=self.module,
+ endpoint="system/acl/%s.aclpolicy" % self.module.params["name"],
+ method="DELETE",
+ )
+
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = api_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ policy=dict(type='str'),
+ project=dict(type='str'),
+ ))
+
+ argument_spec['api_token']['aliases'] = ['token']
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_if=[
+ ['state', 'present', ['policy']],
+ ],
+ supports_check_mode=True,
+ )
+
+ if not bool(re.match("[a-zA-Z0-9,.+_-]+", module.params["name"])):
+ module.fail_json(msg="Name contains forbidden characters. The policy can contain the characters: a-zA-Z0-9,.+_-")
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckACLManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_acl()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_acl()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py b/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py
new file mode 100644
index 000000000..818bde83c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rundeck_job_executions_info.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_job_executions_info
+short_description: Query executions for a Rundeck job
+description:
+ - This module gets the list of executions for a specified Rundeck job.
+author: "Phillipe Smith (@phsmith)"
+version_added: 3.8.0
+options:
+ job_id:
+ type: str
+ description:
+ - The job unique ID.
+ required: true
+ status:
+ type: str
+ description:
+ - The job status to filter.
+ choices: [succeeded, failed, aborted, running]
+ max:
+ type: int
+ description:
+ - Max results to return.
+ default: 20
+ offset:
+ type: int
+ description:
+ - The start point to return the results.
+ default: 0
+extends_documentation_fragment:
+ - community.general.rundeck
+ - url
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = '''
+- name: Get Rundeck job executions info
+ community.general.rundeck_job_executions_info:
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ job_id: "xxxxxxxxxxxxxxxxx"
+ register: rundeck_job_executions_info
+
+- name: Show Rundeck job executions info
+ ansible.builtin.debug:
+ var: rundeck_job_executions_info.executions
+'''
+
+RETURN = '''
+paging:
+ description: Results pagination info.
+ returned: success
+ type: dict
+ contains:
+ count:
+ description: Number of results in the response.
+ type: int
+ returned: success
+ total:
+ description: Total number of results.
+ type: int
+ returned: success
+ offset:
+ description: Offset from first of all results.
+ type: int
+ returned: success
+ max:
+ description: Maximum number of results per page.
+ type: int
+ returned: success
+ sample: {
+ "count": 20,
+ "total": 100,
+ "offset": 0,
+ "max": 20
+ }
+executions:
+ description: Job executions list.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "id": 1,
+ "href": "https://rundeck.example.org/api/39/execution/1",
+ "permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
+ "status": "succeeded",
+ "project": "myproject",
+ "executionType": "user",
+ "user": "admin",
+ "date-started": {
+ "unixtime": 1633525515026,
+ "date": "2021-10-06T13:05:15Z"
+ },
+ "date-ended": {
+ "unixtime": 1633525518386,
+ "date": "2021-10-06T13:05:18Z"
+ },
+ "job": {
+ "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "averageDuration": 6381,
+ "name": "Test",
+ "group": "",
+ "project": "myproject",
+ "description": "",
+ "options": {
+ "exit_code": "0"
+ },
+ "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
+ },
+ "description": "Plugin[com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowStep, nodeStep: false]",
+ "argstring": "-exit_code 0",
+ "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible_collections.community.general.plugins.module_utils.rundeck import (
+ api_argument_spec,
+ api_request
+)
+
+
+class RundeckJobExecutionsInfo(object):
+ def __init__(self, module):
+ self.module = module
+ self.url = self.module.params["url"]
+ self.api_version = self.module.params["api_version"]
+ self.job_id = self.module.params["job_id"]
+ self.offset = self.module.params["offset"]
+ self.max = self.module.params["max"]
+ self.status = self.module.params["status"] or ""
+
+ def job_executions(self):
+ response, info = api_request(
+ module=self.module,
+ endpoint="job/%s/executions?offset=%s&max=%s&status=%s"
+ % (quote(self.job_id), self.offset, self.max, self.status),
+ method="GET"
+ )
+
+ if info["status"] != 200:
+ self.module.fail_json(
+ msg=info["msg"],
+ executions=response
+ )
+
+ self.module.exit_json(msg="Executions info result", **response)
+
+
+def main():
+ argument_spec = api_argument_spec()
+ argument_spec.update(dict(
+ job_id=dict(required=True, type="str"),
+ offset=dict(type="int", default=0),
+ max=dict(type="int", default=20),
+ status=dict(
+ type="str",
+ choices=["succeeded", "failed", "aborted", "running"]
+ )
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckJobExecutionsInfo(module)
+ rundeck.job_executions()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rundeck_job_run.py b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py
new file mode 100644
index 000000000..894f1bb6f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rundeck_job_run.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Phillipe Smith <phsmithcc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_job_run
+short_description: Run a Rundeck job
+description:
+ - This module runs a Rundeck job specified by ID.
+author: "Phillipe Smith (@phsmith)"
+version_added: 3.8.0
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ job_id:
+ type: str
+ description:
+ - The job unique ID.
+ required: true
+ job_options:
+ type: dict
+ description:
+ - The job options for the steps.
+ - Numeric values must be quoted.
+ filter_nodes:
+ type: str
+ description:
+ - Filter the nodes where the jobs must run.
+ - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax).
+ run_at_time:
+ type: str
+ description:
+ - Schedule the job execution to run at specific date and time.
+ - ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00).
+ loglevel:
+ type: str
+ description:
+ - Log level configuration.
+ choices: [debug, verbose, info, warn, error]
+ default: info
+ wait_execution:
+ type: bool
+ description:
+ - Wait until the job finished the execution.
+ default: true
+ wait_execution_delay:
+ type: int
+ description:
+ - Delay, in seconds, between job execution status check requests.
+ default: 5
+ wait_execution_timeout:
+ type: int
+ description:
+ - Job execution wait timeout in seconds.
+ - If the timeout is reached, the job will be aborted.
+ - Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check.
+ default: 120
+ abort_on_timeout:
+ type: bool
+ description:
+ - Send a job abort request if exceeded the I(wait_execution_timeout) specified.
+ default: false
+extends_documentation_fragment:
+ - community.general.rundeck
+ - ansible.builtin.url
+ - community.general.attributes
+'''
+
+EXAMPLES = '''
+- name: Run a Rundeck job
+ community.general.rundeck_job_run:
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ job_id: "xxxxxxxxxxxxxxxxx"
+ register: rundeck_job_run
+
+- name: Show execution info
+ ansible.builtin.debug:
+ var: rundeck_job_run.execution_info
+
+- name: Run a Rundeck job with options
+ community.general.rundeck_job_run:
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ job_id: "xxxxxxxxxxxxxxxxx"
+ job_options:
+ option_1: "value_1"
+ option_2: "value_3"
+ option_3: "value_3"
+ register: rundeck_job_run
+
+- name: Run a Rundeck job with timeout, delay between status check and abort on timeout
+ community.general.rundeck_job_run:
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ job_id: "xxxxxxxxxxxxxxxxx"
+ wait_execution_timeout: 30
+ wait_execution_delay: 10
+ abort_on_timeout: true
+ register: rundeck_job_run
+
+- name: Schedule a Rundeck job
+ community.general.rundeck_job_run:
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ job_id: "xxxxxxxxxxxxxxxxx"
+ run_at_time: "2021-10-05T15:45:00-03:00"
+ register: rundeck_job_schedule
+
+- name: Fire-and-forget a Rundeck job
+ community.general.rundeck_job_run:
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ job_id: "xxxxxxxxxxxxxxxxx"
+ wait_execution: false
+ register: rundeck_job_run
+'''
+
+RETURN = '''
+execution_info:
+ description: Rundeck job execution metadata.
+ returned: always
+ type: dict
+ sample: {
+ "msg": "Job execution succeeded!",
+ "execution_info": {
+ "id": 1,
+ "href": "https://rundeck.example.org/api/39/execution/1",
+ "permalink": "https://rundeck.example.org/project/myproject/execution/show/1",
+ "status": "succeeded",
+ "project": "myproject",
+ "executionType": "user",
+ "user": "admin",
+ "date-started": {
+ "unixtime": 1633449020784,
+ "date": "2021-10-05T15:50:20Z"
+ },
+ "date-ended": {
+ "unixtime": 1633449026358,
+ "date": "2021-10-05T15:50:26Z"
+ },
+ "job": {
+ "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "averageDuration": 4917,
+ "name": "Test",
+ "group": "",
+ "project": "myproject",
+ "description": "",
+ "options": {
+ "exit_code": "0"
+ },
+ "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a",
+ "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a"
+ },
+ "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}",
+ "argstring": "-exit_code 0",
+ "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068",
+ "successfulNodes": [
+ "localhost"
+ ],
+ "output": "Test!"
+ }
+ }
+'''
+
+# Modules import
+from datetime import datetime, timedelta
+from time import sleep
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible_collections.community.general.plugins.module_utils.rundeck import (
+ api_argument_spec,
+ api_request
+)
+
+
+class RundeckJobRun(object):
+ def __init__(self, module):
+ self.module = module
+ self.url = self.module.params["url"]
+ self.api_version = self.module.params["api_version"]
+ self.job_id = self.module.params["job_id"]
+ self.job_options = self.module.params["job_options"] or {}
+ self.filter_nodes = self.module.params["filter_nodes"] or ""
+ self.run_at_time = self.module.params["run_at_time"] or ""
+ self.loglevel = self.module.params["loglevel"].upper()
+ self.wait_execution = self.module.params['wait_execution']
+ self.wait_execution_delay = self.module.params['wait_execution_delay']
+ self.wait_execution_timeout = self.module.params['wait_execution_timeout']
+ self.abort_on_timeout = self.module.params['abort_on_timeout']
+
+ for k, v in self.job_options.items():
+ if not isinstance(v, str):
+ self.module.exit_json(
+ msg="Job option '%s' value must be a string" % k,
+ execution_info={}
+ )
+
+ def job_status_check(self, execution_id):
+ response = dict()
+ timeout = False
+ due = datetime.now() + timedelta(seconds=self.wait_execution_timeout)
+
+ while not timeout:
+ endpoint = "execution/%d" % execution_id
+ response = api_request(module=self.module, endpoint=endpoint)[0]
+ output = api_request(module=self.module,
+ endpoint="execution/%d/output" % execution_id)
+ log_output = "\n".join([x["log"] for x in output[0]["entries"]])
+ response.update({"output": log_output})
+
+ if response["status"] == "aborted":
+ break
+ elif response["status"] == "scheduled":
+ self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time,
+ execution_info=response,
+ changed=True)
+ elif response["status"] == "failed":
+ self.module.fail_json(msg="Job execution failed",
+ execution_info=response)
+ elif response["status"] == "succeeded":
+ self.module.exit_json(msg="Job execution succeeded!",
+ execution_info=response)
+
+ if datetime.now() >= due:
+ timeout = True
+ break
+
+ # Wait for 5s before continue
+ sleep(self.wait_execution_delay)
+
+ response.update({"timed_out": timeout})
+ return response
+
+ def job_run(self):
+ response, info = api_request(
+ module=self.module,
+ endpoint="job/%s/run" % quote(self.job_id),
+ method="POST",
+ data={
+ "loglevel": self.loglevel,
+ "options": self.job_options,
+ "runAtTime": self.run_at_time,
+ "filter": self.filter_nodes
+ }
+ )
+
+ if info["status"] != 200:
+ self.module.fail_json(msg=info["msg"])
+
+ if not self.wait_execution:
+ self.module.exit_json(msg="Job run send successfully!",
+ execution_info=response)
+
+ job_status = self.job_status_check(response["id"])
+
+ if job_status["timed_out"]:
+ if self.abort_on_timeout:
+ api_request(
+ module=self.module,
+ endpoint="execution/%s/abort" % response['id'],
+ method="GET"
+ )
+
+ abort_status = self.job_status_check(response["id"])
+
+ self.module.fail_json(msg="Job execution aborted due the timeout specified",
+ execution_info=abort_status)
+
+ self.module.fail_json(msg="Job execution timed out",
+ execution_info=job_status)
+
+
+def main():
+ argument_spec = api_argument_spec()
+ argument_spec.update(dict(
+ job_id=dict(required=True, type="str"),
+ job_options=dict(type="dict"),
+ filter_nodes=dict(type="str"),
+ run_at_time=dict(type="str"),
+ wait_execution=dict(type="bool", default=True),
+ wait_execution_delay=dict(type="int", default=5),
+ wait_execution_timeout=dict(type="int", default=120),
+ abort_on_timeout=dict(type="bool", default=False),
+ loglevel=dict(
+ type="str",
+ choices=["debug", "verbose", "info", "warn", "error"],
+ default="info"
+ )
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckJobRun(module)
+ rundeck.job_run()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/rundeck_project.py b/ansible_collections/community/general/plugins/modules/rundeck_project.py
new file mode 100644
index 000000000..79ca57568
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/rundeck_project.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Ansible module to manage rundeck projects
+# Copyright (c) 2017, Loic Blot <loic.blot@unix-experience.fr>
+# Sponsored by Infopro Digital. http://www.infopro-digital.com/
+# Sponsored by E.T.A.I. http://www.etai.fr/
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: rundeck_project
+
+short_description: Manage Rundeck projects
+description:
+ - Create and remove Rundeck projects through HTTP API.
+author: "Loic Blot (@nerzhul)"
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Create or remove Rundeck project.
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ type: str
+ description:
+ - Sets the project name.
+ required: true
+ api_token:
+ description:
+ - Sets the token to authenticate against Rundeck API.
+ aliases: ["token"]
+ client_cert:
+ version_added: '0.2.0'
+ client_key:
+ version_added: '0.2.0'
+ force:
+ version_added: '0.2.0'
+ force_basic_auth:
+ version_added: '0.2.0'
+ http_agent:
+ version_added: '0.2.0'
+ url_password:
+ version_added: '0.2.0'
+ url_username:
+ version_added: '0.2.0'
+ use_proxy:
+ version_added: '0.2.0'
+ validate_certs:
+ version_added: '0.2.0'
+extends_documentation_fragment:
+ - ansible.builtin.url
+ - community.general.attributes
+ - community.general.rundeck
+'''
+
+EXAMPLES = '''
+- name: Create a rundeck project
+ community.general.rundeck_project:
+ name: "Project_01"
+ label: "Project 01"
+ description: "My Project 01"
+ url: "https://rundeck.example.org"
+ api_version: 39
+ api_token: "mytoken"
+ state: present
+
+- name: Remove a rundeck project
+ community.general.rundeck_project:
+ name: "Project_01"
+ url: "https://rundeck.example.org"
+ api_token: "mytoken"
+ state: absent
+'''
+
+RETURN = '''
+rundeck_response:
+ description: Rundeck response when a failure occurs
+ returned: failed
+ type: str
+before:
+ description: dictionary containing project information before modification
+ returned: success
+ type: dict
+after:
+ description: dictionary containing project information after modification
+ returned: success
+ type: dict
+'''
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.rundeck import (
+ api_argument_spec,
+ api_request,
+)
+
+
+class RundeckProjectManager(object):
+ def __init__(self, module):
+ self.module = module
+
+ def get_project_facts(self):
+ resp, info = api_request(
+ module=self.module,
+ endpoint="project/%s" % self.module.params["name"],
+ )
+
+ return resp
+
+ def create_or_update_project(self):
+ facts = self.get_project_facts()
+
+ if facts is None:
+ # If in check mode don't create project, simulate a fake project creation
+ if self.module.check_mode:
+ self.module.exit_json(
+ changed=True,
+ before={},
+ after={
+ "name": self.module.params["name"]
+ },
+ )
+
+ resp, info = api_request(
+ module=self.module,
+ endpoint="projects",
+ method="POST",
+ data={
+ "name": self.module.params["name"],
+ "config": {},
+ }
+ )
+
+ if info["status"] == 201:
+ self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
+ else:
+ self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
+ before={}, after=self.get_project_facts())
+ else:
+ self.module.exit_json(changed=False, before=facts, after=facts)
+
+ def remove_project(self):
+ facts = self.get_project_facts()
+ if facts is None:
+ self.module.exit_json(changed=False, before={}, after={})
+ else:
+ # If not in check mode, remove the project
+ if not self.module.check_mode:
+ api_request(
+ module=self.module,
+ endpoint="project/%s" % self.module.params["name"],
+ method="DELETE",
+ )
+
+ self.module.exit_json(changed=True, before=facts, after={})
+
+
+def main():
+ # Also allow the user to set values for fetch_url
+ argument_spec = api_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(required=True, type='str'),
+ ))
+
+ argument_spec['api_token']['aliases'] = ['token']
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True
+ )
+
+ if module.params["api_version"] < 14:
+ module.fail_json(msg="API version should be at least 14")
+
+ rundeck = RundeckProjectManager(module)
+ if module.params['state'] == 'present':
+ rundeck.create_or_update_project()
+ elif module.params['state'] == 'absent':
+ rundeck.remove_project()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/runit.py b/ansible_collections/community/general/plugins/modules/runit.py
new file mode 100644
index 000000000..7c5882af8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/runit.py
@@ -0,0 +1,262 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: runit
+author:
+ - James Sumners (@jsumners)
+short_description: Manage runit services
+description:
+ - Controls runit services on remote hosts using the sv utility.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: true
+ state:
+ description:
+ - C(started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ service (sv restart) and C(killed) will always bounce the service (sv force-stop).
+ C(reloaded) will send a HUP (sv reload).
+ C(once) will run a normally downed sv once (sv once), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ type: bool
+ service_dir:
+ description:
+ - directory runsv watches for services
+ type: str
+ default: /var/service
+ service_src:
+ description:
+ - directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/sv
+'''
+
+EXAMPLES = r'''
+- name: Start sv dnscache, if not running
+ community.general.runit:
+ name: dnscache
+ state: started
+
+- name: Stop sv dnscache, if running
+ community.general.runit:
+ name: dnscache
+ state: stopped
+
+- name: Kill sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: killed
+
+- name: Restart sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: restarted
+
+- name: Reload sv dnscache, in all cases
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+
+- name: Use alternative sv directory location
+ community.general.runit:
+ name: dnscache
+ state: reloaded
+ service_dir: /run/service
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class Sv(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ def __init__(self, module):
+ self.extra_paths = []
+ self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
+ self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.get_status()
+ else:
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+
+ def get_status(self):
+ (rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+ # full_state *may* contain information about the logger:
+ # "down: /etc/service/service-without-logger: 1s, normally up\n"
+ # "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
+ full_state_no_logger = self.full_state.split("; ")[0]
+
+ m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r' (\d+)s', full_state_no_logger)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(r'^run:', full_state_no_logger):
+ self.state = 'started'
+ elif re.search(r'^down:', full_state_no_logger):
+ self.state = 'stopped'
+ else:
+ self.state = 'unknown'
+ return
+
+ def started(self):
+ return self.start()
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, 'start', self.svc_full])
+
+ def stopped(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, 'once', self.svc_full])
+
+ def reloaded(self):
+ return self.reload()
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
+
+ def restarted(self):
+ return self.restart()
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
+
+ def killed(self):
+ return self.kill()
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ (rc, out, err) = self.module.run_command(cmd)
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e))
+ return rc, out, err
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ service_dir=dict(type='str', default='/var/service'),
+ service_src=dict(type='str', default='/etc/sv'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+
+ sv = Sv(module)
+ changed = False
+
+ if enabled is not None and enabled != sv.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ sv.enable()
+ else:
+ sv.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != sv.state:
+ changed = True
+ if not module.check_mode:
+ getattr(sv, state)()
+
+ module.exit_json(changed=changed, sv=sv.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py b/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py
new file mode 100644
index 000000000..14b347e44
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sap_task_list_execute.py
@@ -0,0 +1,348 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Rainer Leber <rainerleber@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sap_task_list_execute
+short_description: Perform SAP Task list execution
+version_added: "3.5.0"
+description:
+ - The C(sap_task_list_execute) module depends on C(pyrfc) Python library (version 2.4.0 and upwards).
+ Depending on distribution you are using, you may need to install additional packages to
+ have these available.
+ - Tasks in the task list which requires manual activities will be confirmed automatically.
+ - This module will use the RFC package C(STC_TM_API).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+requirements:
+ - pyrfc >= 2.4.0
+ - xmltodict
+
+options:
+ conn_username:
+ description: The required username for the SAP system.
+ required: true
+ type: str
+ conn_password:
+ description: The required password for the SAP system.
+ required: true
+ type: str
+ host:
+ description: The required host for the SAP system. Can be either an FQDN or IP Address.
+ required: true
+ type: str
+ sysnr:
+ description:
+ - The system number of the SAP system.
+ - You must quote the value to ensure retaining the leading zeros.
+ default: '00'
+ type: str
+ client:
+ description:
+ - The client number to connect to.
+ - You must quote the value to ensure retaining the leading zeros.
+ default: '000'
+ type: str
+ task_to_execute:
+ description: The task list which will be executed.
+ required: true
+ type: str
+ task_parameters:
+ description:
+ - The tasks and the parameters for execution.
+ - If the task list do not need any parameters. This could be empty.
+ - If only specific tasks from the task list should be executed.
+ The tasks even when no parameter is needed must be provided.
+ Alongside with the module parameter I(task_skip=true).
+ type: list
+ elements: dict
+ suboptions:
+ TASKNAME:
+ description: The name of the task in the task list.
+ type: str
+ required: true
+ FIELDNAME:
+ description: The name of the field of the task.
+ type: str
+ VALUE:
+ description: The value which have to be set.
+ type: raw
+ task_settings:
+ description:
+ - Setting for the execution of the task list. This can be the following as in TCODE SE80 described.
+ Check Mode C(CHECKRUN), Background Processing Active C(BATCH) (this is the default value),
+ Asynchronous Execution C(ASYNC), Trace Mode C(TRACE), Server Name C(BATCH_TARGET).
+ default: ['BATCH']
+ type: list
+ elements: str
+ task_skip:
+ description:
+ - If this parameter is C(true) not defined tasks in I(task_parameters) are skipped.
+ - This could be the case when only certain tasks should run from the task list.
+ default: false
+ type: bool
+
+author:
+ - Rainer Leber (@rainerleber)
+'''
+
+EXAMPLES = r'''
+# Pass in a message
+- name: Test task execution
+ community.general.sap_task_list_execute:
+ conn_username: DDIC
+ conn_password: Passwd1234
+ host: 10.1.8.10
+ sysnr: '01'
+ client: '000'
+ task_to_execute: SAP_BASIS_SSL_CHECK
+ task_settings: batch
+
+- name: Pass in input parameters
+ community.general.sap_task_list_execute:
+ conn_username: DDIC
+ conn_password: Passwd1234
+ host: 10.1.8.10
+ sysnr: '00'
+ client: '000'
+ task_to_execute: SAP_BASIS_SSL_CHECK
+ task_parameters :
+ - { 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO', 'FIELDNAME': 'P_OPT2', 'VALUE': 'X' }
+ - TASKNAME: CL_STCT_CHECK_SEC_CRYPTO
+ FIELDNAME: P_OPT3
+ VALUE: X
+ task_settings: batch
+
+# Exported environement variables.
+- name: Hint if module will fail with error message like ImportError libsapnwrfc.so...
+ community.general.sap_task_list_execute:
+ conn_username: DDIC
+ conn_password: Passwd1234
+ host: 10.1.8.10
+ sysnr: '00'
+ client: '000'
+ task_to_execute: SAP_BASIS_SSL_CHECK
+ task_settings: batch
+ environment:
+ SAPNWRFC_HOME: /usr/local/sap/nwrfcsdk
+ LD_LIBRARY_PATH: /usr/local/sap/nwrfcsdk/lib
+'''
+
+RETURN = r'''
+msg:
+ description: A small execution description.
+ type: str
+ returned: always
+ sample: 'Successful'
+out:
+ description: A complete description of the executed tasks. If this is available.
+ type: list
+ elements: dict
+ returned: on success
+ sample: [...,{
+ "LOG": {
+ "STCTM_S_LOG": [
+ {
+ "ACTIVITY": "U_CONFIG",
+ "ACTIVITY_DESCR": "Configuration changed",
+ "DETAILS": null,
+ "EXEC_ID": "20210728184903.815739",
+ "FIELD": null,
+ "ID": "STC_TASK",
+ "LOG_MSG_NO": "000000",
+ "LOG_NO": null,
+ "MESSAGE": "For radiobutton group ICM too many options are set; choose only one option",
+ "MESSAGE_V1": "ICM",
+ "MESSAGE_V2": null,
+ "MESSAGE_V3": null,
+ "MESSAGE_V4": null,
+ "NUMBER": "048",
+ "PARAMETER": null,
+ "PERIOD": "M",
+ "PERIOD_DESCR": "Maintenance",
+ "ROW": "0",
+ "SRC_LINE": "170",
+ "SRC_OBJECT": "CL_STCTM_REPORT_UI IF_STCTM_UI_TASK~SET_PARAMETERS",
+ "SYSTEM": null,
+ "TIMESTMP": "20210728184903",
+ "TSTPNM": "DDIC",
+ "TYPE": "E"
+ },...
+ ]}}]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import traceback
+try:
+ from pyrfc import Connection
+except ImportError:
+ HAS_PYRFC_LIBRARY = False
+ PYRFC_LIBRARY_IMPORT_ERROR = traceback.format_exc()
+else:
+ HAS_PYRFC_LIBRARY = True
+ PYRFC_LIBRARY_IMPORT_ERROR = None
+try:
+ import xmltodict
+except ImportError:
+ HAS_XMLTODICT_LIBRARY = False
+ XMLTODICT_LIBRARY_IMPORT_ERROR = traceback.format_exc()
+else:
+ HAS_XMLTODICT_LIBRARY = True
+ XMLTODICT_LIBRARY_IMPORT_ERROR = None
+
+
+def call_rfc_method(connection, method_name, kwargs):
+ # PyRFC call function
+ return connection.call(method_name, **kwargs)
+
+
+def process_exec_settings(task_settings):
+ # processes task settings to objects
+ exec_settings = {}
+ for settings in task_settings:
+ temp_dict = {settings.upper(): 'X'}
+ for key, value in temp_dict.items():
+ exec_settings[key] = value
+ return exec_settings
+
+
+def xml_to_dict(xml_raw):
+ try:
+ xml_parsed = xmltodict.parse(xml_raw, dict_constructor=dict)
+ xml_dict = xml_parsed['asx:abap']['asx:values']['SESSION']['TASKLIST']
+ except KeyError:
+ xml_dict = "No logs available."
+ return xml_dict
+
+
+def run_module():
+
+ params_spec = dict(
+ TASKNAME=dict(type='str', required=True),
+ FIELDNAME=dict(type='str'),
+ VALUE=dict(type='raw'),
+ )
+
+ # define available arguments/parameters a user can pass to the module
+ module = AnsibleModule(
+ argument_spec=dict(
+ # values for connection
+ conn_username=dict(type='str', required=True),
+ conn_password=dict(type='str', required=True, no_log=True),
+ host=dict(type='str', required=True),
+ sysnr=dict(type='str', default="00"),
+ client=dict(type='str', default="000"),
+ # values for execution tasks
+ task_to_execute=dict(type='str', required=True),
+ task_parameters=dict(type='list', elements='dict', options=params_spec),
+ task_settings=dict(type='list', elements='str', default=['BATCH']),
+ task_skip=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ )
+ result = dict(changed=False, msg='', out={})
+
+ params = module.params
+
+ username = params['conn_username'].upper()
+ password = params['conn_password']
+ host = params['host']
+ sysnr = params['sysnr']
+ client = params['client']
+
+ task_parameters = params['task_parameters']
+ task_to_execute = params['task_to_execute']
+ task_settings = params['task_settings']
+ task_skip = params['task_skip']
+
+ if not HAS_PYRFC_LIBRARY:
+ module.fail_json(
+ msg=missing_required_lib('pyrfc'),
+ exception=PYRFC_LIBRARY_IMPORT_ERROR)
+
+ if not HAS_XMLTODICT_LIBRARY:
+ module.fail_json(
+ msg=missing_required_lib('xmltodict'),
+ exception=XMLTODICT_LIBRARY_IMPORT_ERROR)
+
+ # basic RFC connection with pyrfc
+ try:
+ conn = Connection(user=username, passwd=password, ashost=host, sysnr=sysnr, client=client)
+ except Exception as err:
+ result['error'] = str(err)
+ result['msg'] = 'Something went wrong connecting to the SAP system.'
+ module.fail_json(**result)
+
+ try:
+ raw_params = call_rfc_method(conn, 'STC_TM_SCENARIO_GET_PARAMETERS',
+ {'I_SCENARIO_ID': task_to_execute})
+ except Exception as err:
+ result['error'] = str(err)
+ result['msg'] = 'The task list does not exsist.'
+ module.fail_json(**result)
+ exec_settings = process_exec_settings(task_settings)
+ # initialize session task
+ session_init = call_rfc_method(conn, 'STC_TM_SESSION_BEGIN',
+ {'I_SCENARIO_ID': task_to_execute,
+ 'I_INIT_ONLY': 'X'})
+ # Confirm Tasks which requires manual activities from Task List Run
+ for task in raw_params['ET_PARAMETER']:
+ call_rfc_method(conn, 'STC_TM_TASK_CONFIRM',
+ {'I_SESSION_ID': session_init['E_SESSION_ID'],
+ 'I_TASKNAME': task['TASKNAME']})
+ if task_skip:
+ for task in raw_params['ET_PARAMETER']:
+ call_rfc_method(conn, 'STC_TM_TASK_SKIP',
+ {'I_SESSION_ID': session_init['E_SESSION_ID'],
+ 'I_TASKNAME': task['TASKNAME'], 'I_SKIP_DEP_TASKS': 'X'})
+ # unskip defined tasks and set parameters
+ if task_parameters is not None:
+ for task in task_parameters:
+ call_rfc_method(conn, 'STC_TM_TASK_UNSKIP',
+ {'I_SESSION_ID': session_init['E_SESSION_ID'],
+ 'I_TASKNAME': task['TASKNAME'], 'I_UNSKIP_DEP_TASKS': 'X'})
+
+ call_rfc_method(conn, 'STC_TM_SESSION_SET_PARAMETERS',
+ {'I_SESSION_ID': session_init['E_SESSION_ID'],
+ 'IT_PARAMETER': task_parameters})
+ # start the task
+ try:
+ session_start = call_rfc_method(conn, 'STC_TM_SESSION_RESUME',
+ {'I_SESSION_ID': session_init['E_SESSION_ID'],
+ 'IS_EXEC_SETTINGS': exec_settings})
+ except Exception as err:
+ result['error'] = str(err)
+ result['msg'] = 'Something went wrong. See error.'
+ module.fail_json(**result)
+ # get task logs because the execution may successfully but the tasks shows errors or warnings
+ # returned value is ABAPXML https://help.sap.com/doc/abapdocu_755_index_htm/7.55/en-US/abenabap_xslt_asxml_general.htm
+ session_log = call_rfc_method(conn, 'STC_TM_SESSION_GET_LOG',
+ {'I_SESSION_ID': session_init['E_SESSION_ID']})
+
+ task_list = xml_to_dict(session_log['E_LOG'])
+
+ result['changed'] = True
+ result['msg'] = session_start['E_STATUS_DESCR']
+ result['out'] = task_list
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sapcar_extract.py b/ansible_collections/community/general/plugins/modules/sapcar_extract.py
new file mode 100644
index 000000000..badd466e1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sapcar_extract.py
@@ -0,0 +1,228 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Rainer Leber <rainerleber@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sapcar_extract
+short_description: Manages SAP SAPCAR archives
+version_added: "3.2.0"
+description:
+ - Provides support for unpacking C(sar)/C(car) files with the SAPCAR binary from SAP and pulling
+ information back into Ansible.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - Always returns C(changed=true) in check mode.
+ diff_mode:
+ support: none
+options:
+ path:
+ description: The path to the SAR/CAR file.
+ type: path
+ required: true
+ dest:
+ description:
+ - The destination where SAPCAR extracts the SAR file. Missing folders will be created.
+ If this parameter is not provided it will unpack in the same folder as the SAR file.
+ type: path
+ binary_path:
+ description:
+ - The path to the SAPCAR binary, for example, C(/home/dummy/sapcar) or C(https://myserver/SAPCAR).
+ If this parameter is not provided the module will look in C(PATH).
+ type: path
+ signature:
+ description:
+ - If C(true) the signature will be extracted.
+ default: false
+ type: bool
+ security_library:
+ description:
+ - The path to the security library, for example, C(/usr/sap/hostctrl/exe/libsapcrytp.so), for signature operations.
+ type: path
+ manifest:
+ description:
+ - The name of the manifest.
+ default: "SIGNATURE.SMF"
+ type: str
+ remove:
+ description:
+ - If C(true) the SAR/CAR file will be removed. B(This should be used with caution!)
+ default: false
+ type: bool
+author:
+ - Rainer Leber (@RainerLeber)
+'''
+
+EXAMPLES = """
+- name: Extract SAR file
+ community.general.sapcar_extract:
+ path: "~/source/hana.sar"
+
+- name: Extract SAR file with destination
+ community.general.sapcar_extract:
+ path: "~/source/hana.sar"
+ dest: "~/test/"
+
+- name: Extract SAR file with destination and download from webserver can be a fileshare as well
+ community.general.sapcar_extract:
+ path: "~/source/hana.sar"
+ dest: "~/dest/"
+ binary_path: "https://myserver/SAPCAR"
+
+- name: Extract SAR file and delete SAR after extract
+ community.general.sapcar_extract:
+ path: "~/source/hana.sar"
+ remove: true
+
+- name: Extract SAR file with manifest
+ community.general.sapcar_extract:
+ path: "~/source/hana.sar"
+ signature: true
+
+- name: Extract SAR file with manifest and rename it
+ community.general.sapcar_extract:
+ path: "~/source/hana.sar"
+ manifest: "MyNewSignature.SMF"
+ signature: true
+"""
+
+import os
+from tempfile import NamedTemporaryFile
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import open_url
+from ansible.module_utils.common.text.converters import to_native
+
+
+def get_list_of_files(dir_name):
+ # create a list of file and directories
+ # names in the given directory
+ list_of_file = os.listdir(dir_name)
+ allFiles = list()
+ # Iterate over all the entries
+ for entry in list_of_file:
+ # Create full path
+ fullPath = os.path.join(dir_name, entry)
+ # If entry is a directory then get the list of files in this directory
+ if os.path.isdir(fullPath):
+ allFiles = allFiles + [fullPath]
+ allFiles = allFiles + get_list_of_files(fullPath)
+ else:
+ allFiles.append(fullPath)
+ return allFiles
+
+
+def download_SAPCAR(binary_path, module):
+ bin_path = None
+ # download sapcar binary if url is provided otherwise path is returned
+ if binary_path is not None:
+ if binary_path.startswith('https://') or binary_path.startswith('http://'):
+ random_file = NamedTemporaryFile(delete=False)
+ with open_url(binary_path) as response:
+ with random_file as out_file:
+ data = response.read()
+ out_file.write(data)
+ os.chmod(out_file.name, 0o700)
+ bin_path = out_file.name
+ module.add_cleanup_file(bin_path)
+ else:
+ bin_path = binary_path
+ return bin_path
+
+
+def check_if_present(command, path, dest, signature, manifest, module):
+ # manipuliating output from SAR file for compare with already extracted files
+ iter_command = [command, '-tvf', path]
+ sar_out = module.run_command(iter_command)[1]
+ sar_raw = sar_out.split("\n")[1:]
+ if dest[-1] != "/":
+ dest = dest + "/"
+ sar_files = [dest + x.split(" ")[-1] for x in sar_raw if x]
+ # remove any SIGNATURE.SMF from list because it will not unpacked if signature is false
+ if not signature:
+ sar_files = [item for item in sar_files if '.SMF' not in item]
+ # if signature is renamed manipulate files in list of sar file for compare.
+ if manifest != "SIGNATURE.SMF":
+ sar_files = [item for item in sar_files if '.SMF' not in item]
+ sar_files = sar_files + [manifest]
+ # get extracted files if present
+ files_extracted = get_list_of_files(dest)
+ # compare extracted files with files in sar file
+ present = all(elem in files_extracted for elem in sar_files)
+ return present
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ dest=dict(type='path'),
+ binary_path=dict(type='path'),
+ signature=dict(type='bool', default=False),
+ security_library=dict(type='path'),
+ manifest=dict(type='str', default="SIGNATURE.SMF"),
+ remove=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+ rc, out, err = [0, "", ""]
+ params = module.params
+ check_mode = module.check_mode
+
+ path = params['path']
+ dest = params['dest']
+ signature = params['signature']
+ security_library = params['security_library']
+ manifest = params['manifest']
+ remove = params['remove']
+
+ bin_path = download_SAPCAR(params['binary_path'], module)
+
+ if dest is None:
+ dest_head_tail = os.path.split(path)
+ dest = dest_head_tail[0] + '/'
+ else:
+ if not os.path.exists(dest):
+ os.makedirs(dest, 0o755)
+
+ if bin_path is not None:
+ command = [module.get_bin_path(bin_path, required=True)]
+ else:
+ try:
+ command = [module.get_bin_path('sapcar', required=True)]
+ except Exception as e:
+ module.fail_json(msg='Failed to find SAPCAR at the expected path or URL "{0}". Please check whether it is available: {1}'
+ .format(bin_path, to_native(e)))
+
+ present = check_if_present(command[0], path, dest, signature, manifest, module)
+
+ if not present:
+ command.extend(['-xvf', path, '-R', dest])
+ if security_library:
+ command.extend(['-L', security_library])
+ if signature:
+ command.extend(['-manifest', manifest])
+ if not check_mode:
+ (rc, out, err) = module.run_command(command, check_rc=True)
+ changed = True
+ else:
+ changed = False
+ out = "already unpacked"
+
+ if remove:
+ os.remove(path)
+
+ module.exit_json(changed=changed, message=rc, stdout=out,
+ stderr=err, command=' '.join(command))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/say.py b/ansible_collections/community/general/plugins/modules/say.py
new file mode 100644
index 000000000..175e5feb0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/say.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Michael DeHaan <michael@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: say
+short_description: Makes a computer to speak
+description:
+ - makes a computer speak! Amuse your friends, annoy your coworkers!
+notes:
+ - In 2.5, this module has been renamed from C(osx_say) to M(community.general.say).
+ - If you like this module, you may also be interested in the osx_say callback plugin.
+ - A list of available voices, with language, can be found by running C(say -v ?) on a OSX host and C(espeak --voices) on a Linux host.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ msg:
+ type: str
+ description:
+ - What to say.
+ required: true
+ voice:
+ type: str
+ description:
+ - What voice to use.
+ required: false
+requirements: [ say or espeak or espeak-ng ]
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan (@mpdehaan)"
+'''
+
+EXAMPLES = '''
+- name: Makes a computer to speak
+ community.general.say:
+ msg: '{{ inventory_hostname }} is all done'
+ voice: Zarvox
+ delegate_to: localhost
+'''
+import platform
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def say(module, executable, msg, voice):
+ cmd = [executable, msg]
+ if voice:
+ cmd.extend(('-v', voice))
+ module.run_command(cmd, check_rc=True)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ msg=dict(required=True),
+ voice=dict(required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ msg = module.params['msg']
+ voice = module.params['voice']
+ possibles = ('say', 'espeak', 'espeak-ng')
+
+ if platform.system() != 'Darwin':
+ # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter
+ voice = None
+
+ for possible in possibles:
+ executable = module.get_bin_path(possible)
+ if executable:
+ break
+ else:
+ module.fail_json(msg='Unable to find either %s' % ', '.join(possibles))
+
+ if module.check_mode:
+ module.exit_json(msg=msg, changed=False)
+
+ say(module, executable, msg, voice)
+
+ module.exit_json(msg=msg, changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute.py b/ansible_collections/community/general/plugins/modules/scaleway_compute.py
new file mode 100644
index 000000000..9bd821807
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_compute.py
@@ -0,0 +1,699 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Compute management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_compute
+short_description: Scaleway compute management module
+author: Remy Leone (@remyleone)
+description:
+ - "This module manages compute instances on Scaleway."
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ public_ip:
+ type: str
+ description:
+ - Manage public IP on a Scaleway server
+ - Could be Scaleway IP address UUID
+ - C(dynamic) Means that IP is destroyed at the same time the host is destroyed
+ - C(absent) Means no public IP at all
+ default: absent
+
+ enable_ipv6:
+ description:
+ - Enable public IPv6 connectivity on the instance
+ default: false
+ type: bool
+
+ image:
+ type: str
+ description:
+ - Image identifier used to start the instance with
+ required: true
+
+ name:
+ type: str
+ description:
+ - Name of the instance
+
+ organization:
+ type: str
+ description:
+ - Organization identifier.
+ - Exactly one of I(project) and I(organization) must be specified.
+
+ project:
+ type: str
+ description:
+ - Project identifier.
+ - Exactly one of I(project) and I(organization) must be specified.
+ version_added: 4.3.0
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+ - running
+ - restarted
+ - stopped
+
+ tags:
+ type: list
+ elements: str
+ description:
+ - List of tags to apply to the instance (5 max)
+ required: false
+ default: []
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ commercial_type:
+ type: str
+ description:
+ - Commercial name of the compute node
+ required: true
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the server to reach the expected state
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the server
+ required: false
+ default: 3
+
+ security_group:
+ type: str
+ description:
+ - Security group unique identifier
+ - If no value provided, the default security group or current security group will be used
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Create a server
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ project: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ tags:
+ - test
+ - www
+
+- name: Create a server attached to a security group
+ community.general.scaleway_compute:
+ name: foobar
+ state: present
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ project: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+ security_group: 4a31b633-118e-4900-bd52-facf1085fc8d
+ tags:
+ - test
+ - www
+
+- name: Destroy it right after
+ community.general.scaleway_compute:
+ name: foobar
+ state: absent
+ image: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+ project: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: ams1
+ commercial_type: VC1S
+'''
+
+RETURN = '''
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+SCALEWAY_SERVER_STATES = (
+ 'stopped',
+ 'stopping',
+ 'starting',
+ 'running',
+ 'locked'
+)
+
+SCALEWAY_TRANSITIONS_STATES = (
+ "stopping",
+ "starting",
+ "pending"
+)
+
+
+def check_image_id(compute_api, image_id):
+ response = compute_api.get(path="images/%s" % image_id)
+
+ if not response.ok:
+ msg = 'Error in getting image %s on %s : %s' % (image_id, compute_api.module.params.get('api_url'), response.json)
+ compute_api.module.fail_json(msg=msg)
+
+
+def fetch_state(compute_api, server):
+ compute_api.module.debug("fetch_state of server: %s" % server["id"])
+ response = compute_api.get(path="servers/%s" % server["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ compute_api.module.debug("Server %s in state: %s" % (server["id"], response.json["server"]["state"]))
+ return response.json["server"]["state"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(compute_api, server, wait=None):
+ if wait is None:
+ wait = compute_api.module.params["wait"]
+ if not wait:
+ return
+
+ wait_timeout = compute_api.module.params["wait_timeout"]
+ wait_sleep_time = compute_api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ compute_api.module.debug("We are going to wait for the server to finish its transition")
+ if fetch_state(compute_api, server) not in SCALEWAY_TRANSITIONS_STATES:
+ compute_api.module.debug("It seems that the server is not in transition anymore.")
+ compute_api.module.debug("Server in state: %s" % fetch_state(compute_api, server))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ compute_api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def public_ip_payload(compute_api, public_ip):
+ # We don't want a public ip
+ if public_ip in ("absent",):
+ return {"dynamic_ip_required": False}
+
+ # IP is only attached to the instance and is released as soon as the instance terminates
+ if public_ip in ("dynamic", "allocated"):
+ return {"dynamic_ip_required": True}
+
+ # We check that the IP we want to attach exists, if so its ID is returned
+ response = compute_api.get("ips")
+ if not response.ok:
+ msg = 'Error during public IP validation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ ip_list = []
+ try:
+ ip_list = response.json["ips"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the IP information from: %s" % response.json)
+
+ lookup = [ip["id"] for ip in ip_list]
+ if public_ip in lookup:
+ return {"public_ip": public_ip}
+
+
+def create_server(compute_api, server):
+ compute_api.module.debug("Starting a create_server")
+ target_server = None
+ data = {"enable_ipv6": server["enable_ipv6"],
+ "tags": server["tags"],
+ "commercial_type": server["commercial_type"],
+ "image": server["image"],
+ "dynamic_ip_required": server["dynamic_ip_required"],
+ "name": server["name"]
+ }
+
+ if server["project"]:
+ data["project"] = server["project"]
+
+ if server["organization"]:
+ data["organization"] = server["organization"]
+
+ if server["security_group"]:
+ data["security_group"] = server["security_group"]
+
+ response = compute_api.post(path="servers", data=data)
+
+ if not response.ok:
+ msg = 'Error during server creation: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def restart_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="reboot")
+
+
+def stop_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweroff")
+
+
+def start_server(compute_api, server):
+ return perform_action(compute_api=compute_api, server=server, action="poweron")
+
+
+def perform_action(compute_api, server, action):
+ response = compute_api.post(path="servers/%s/action" % server["id"],
+ data={"action": action})
+ if not response.ok:
+ msg = 'Error during server %s: (%s) %s' % (action, response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def remove_server(compute_api, server):
+ compute_api.module.debug("Starting remove server strategy")
+ response = compute_api.delete(path="servers/%s" % server["id"])
+ if not response.ok:
+ msg = 'Error during server deletion: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=server)
+
+ return response
+
+
+def present_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting present strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ return changed, target_server
+
+
+def absent_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting absent strategy")
+ changed = False
+ target_server = None
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ return changed, {"status": "Server already absent."}
+ else:
+ target_server = query_results[0]
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be made absent." % target_server["id"]}
+
+ # A server MUST be stopped to be deleted.
+ while fetch_state(compute_api=compute_api, server=target_server) != "stopped":
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+ response = stop_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while stopping a server before removing it [{0}: {1}]'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server, wait=True)
+
+ response = remove_server(compute_api=compute_api, server=target_server)
+
+ if not response.ok:
+ err_msg = 'Error while removing server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=err_msg)
+
+ return changed, {"status": "Server %s deleted" % target_server["id"]}
+
+
+def running_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting running strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being run."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed before running it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("running", "starting"):
+ compute_api.module.debug("running_strategy: Server in state: %s" % current_state)
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s attributes would be changed." % target_server["id"]}
+
+ response = start_server(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while running server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def stop_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting stop strategy")
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ changed = False
+
+ if not query_results:
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being stopped."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ changed = True
+ else:
+ target_server = query_results[0]
+
+ compute_api.module.debug("stop_strategy: Servers are found.")
+
+ if server_attributes_should_be_changed(compute_api=compute_api, target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before stopping it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ current_state = fetch_state(compute_api=compute_api, server=target_server)
+ if current_state not in ("stopped",):
+ compute_api.module.debug("stop_strategy: Server in state: %s" % current_state)
+
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be stopped." % target_server["id"]}
+
+ response = stop_server(compute_api=compute_api, server=target_server)
+ compute_api.module.debug(response.json)
+ compute_api.module.debug(response.ok)
+
+ if not response.ok:
+ msg = 'Error while stopping server [{0}: {1}]'.format(response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+def restart_strategy(compute_api, wished_server):
+ compute_api.module.debug("Starting restart strategy")
+ changed = False
+ query_results = find(compute_api=compute_api, wished_server=wished_server, per_page=1)
+
+ if not query_results:
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "A server would be created before being rebooted."}
+
+ target_server = create_server(compute_api=compute_api, server=wished_server)
+ else:
+ target_server = query_results[0]
+
+ if server_attributes_should_be_changed(compute_api=compute_api,
+ target_server=target_server,
+ wished_server=wished_server):
+ changed = True
+
+ if compute_api.module.check_mode:
+ return changed, {
+ "status": "Server %s attributes would be changed before rebooting it." % target_server["id"]}
+
+ target_server = server_change_attributes(compute_api=compute_api, target_server=target_server, wished_server=wished_server)
+
+ changed = True
+ if compute_api.module.check_mode:
+ return changed, {"status": "Server %s would be rebooted." % target_server["id"]}
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("running",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was running [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ if fetch_state(compute_api=compute_api, server=target_server) in ("stopped",):
+ response = restart_server(compute_api=compute_api, server=target_server)
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+ if not response.ok:
+ msg = 'Error while restarting server that was stopped [{0}: {1}].'.format(response.status_code,
+ response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ return changed, target_server
+
+
+state_strategy = {
+ "present": present_strategy,
+ "restarted": restart_strategy,
+ "stopped": stop_strategy,
+ "running": running_strategy,
+ "absent": absent_strategy
+}
+
+
+def find(compute_api, wished_server, per_page=1):
+ compute_api.module.debug("Getting inside find")
+ # Only the name attribute is accepted in the Compute query API
+ response = compute_api.get("servers", params={"name": wished_server["name"],
+ "per_page": per_page})
+
+ if not response.ok:
+ msg = 'Error during server search: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ search_results = response.json["servers"]
+
+ return search_results
+
+
+PATCH_MUTABLE_SERVER_ATTRIBUTES = (
+ "ipv6",
+ "tags",
+ "name",
+ "dynamic_ip_required",
+ "security_group",
+)
+
+
+def server_attributes_should_be_changed(compute_api, target_server, wished_server):
+ compute_api.module.debug("Checking if server attributes should be changed")
+ compute_api.module.debug("Current Server: %s" % target_server)
+ compute_api.module.debug("Wished Server: %s" % wished_server)
+ debug_dict = dict((x, (target_server[x], wished_server[x]))
+ for x in PATCH_MUTABLE_SERVER_ATTRIBUTES
+ if x in target_server and x in wished_server)
+ compute_api.module.debug("Debug dict %s" % debug_dict)
+ try:
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and wished_server[key] and "id" in target_server[key].keys(
+ ) and target_server[key]["id"] != wished_server[key]:
+ return True
+ # Handling other structure compare simply the two objects content
+ elif not isinstance(target_server[key], dict) and target_server[key] != wished_server[key]:
+ return True
+ return False
+ except AttributeError:
+ compute_api.module.fail_json(msg="Error while checking if attributes should be changed")
+
+
+def server_change_attributes(compute_api, target_server, wished_server):
+ compute_api.module.debug("Starting patching server attributes")
+ patch_payload = dict()
+
+ for key in PATCH_MUTABLE_SERVER_ATTRIBUTES:
+ if key in target_server and key in wished_server:
+ # When you are working with dict, only ID matter as we ask user to put only the resource ID in the playbook
+ if isinstance(target_server[key], dict) and "id" in target_server[key] and wished_server[key]:
+ # Setting all key to current value except ID
+ key_dict = dict((x, target_server[key][x]) for x in target_server[key].keys() if x != "id")
+ # Setting ID to the user specified ID
+ key_dict["id"] = wished_server[key]
+ patch_payload[key] = key_dict
+ elif not isinstance(target_server[key], dict):
+ patch_payload[key] = wished_server[key]
+
+ response = compute_api.patch(path="servers/%s" % target_server["id"],
+ data=patch_payload)
+ if not response.ok:
+ msg = 'Error during server attributes patching: (%s) %s' % (response.status_code, response.json)
+ compute_api.module.fail_json(msg=msg)
+
+ try:
+ target_server = response.json["server"]
+ except KeyError:
+ compute_api.module.fail_json(msg="Error in getting the server information from: %s" % response.json)
+
+ wait_to_complete_state_transition(compute_api=compute_api, server=target_server)
+
+ return target_server
+
+
+def core(module):
+ region = module.params["region"]
+ wished_server = {
+ "state": module.params["state"],
+ "image": module.params["image"],
+ "name": module.params["name"],
+ "commercial_type": module.params["commercial_type"],
+ "enable_ipv6": module.params["enable_ipv6"],
+ "tags": module.params["tags"],
+ "organization": module.params["organization"],
+ "project": module.params["project"],
+ "security_group": module.params["security_group"]
+ }
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ compute_api = Scaleway(module=module)
+
+ check_image_id(compute_api, wished_server["image"])
+
+ # IP parameters of the wished server depends on the configuration
+ ip_payload = public_ip_payload(compute_api=compute_api, public_ip=module.params["public_ip"])
+ wished_server.update(ip_payload)
+
+ changed, summary = state_strategy[wished_server["state"]](compute_api=compute_api, wished_server=wished_server)
+ module.exit_json(changed=changed, msg=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ image=dict(required=True),
+ name=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ commercial_type=dict(required=True),
+ enable_ipv6=dict(default=False, type="bool"),
+ public_ip=dict(default="absent"),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", elements="str", default=[]),
+ organization=dict(),
+ project=dict(),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ security_group=dict(),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('organization', 'project'),
+ ],
+ required_one_of=[
+ ('organization', 'project'),
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py
new file mode 100644
index 000000000..9a9d9adde
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_compute_private_network.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway VPC management module
+#
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_compute_private_network
+short_description: Scaleway compute - private network management
+version_added: 5.2.0
+author: Pascal MANGIN (@pastral)
+description:
+ - This module add or remove a private network to a compute instance
+ (U(https://developer.scaleway.com)).
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the VPC.
+ default: present
+ choices:
+ - present
+ - absent
+
+ project:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ compute_id:
+ type: str
+ description:
+ - ID of the compute instance (see M(community.general.scaleway_compute)).
+ required: true
+
+ private_network_id:
+ type: str
+ description:
+ - ID of the private network (see M(community.general.scaleway_private_network)).
+ required: true
+
+'''
+
+EXAMPLES = '''
+- name: Plug a VM to a private network
+ community.general.scaleway_compute_private_network:
+ project: '{{ scw_project }}'
+ state: present
+ region: par1
+ compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89"
+ private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89"
+ register: nicsvpc_creation_task
+
+- name: Unplug a VM from a private network
+ community.general.scaleway_compute_private_network:
+ project: '{{ scw_project }}'
+ state: absent
+ region: par1
+ compute_id: "12345678-f1e6-40ec-83e5-12345d67ed89"
+ private_network_id: "22345678-f1e6-40ec-83e5-12345d67ed89"
+
+'''
+
+RETURN = '''
+scaleway_compute_private_network:
+ description: Information on the VPC.
+ returned: success when I(state=present)
+ type: dict
+ sample:
+ {
+ "created_at": "2022-01-15T11:11:12.676445Z",
+ "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
+ "name": "network",
+ "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "tags": [
+ "tag1",
+ "tag2",
+ "tag3",
+ "tag4",
+ "tag5"
+ ],
+ "updated_at": "2022-01-15T11:12:04.624837Z",
+ "zone": "fr-par-2"
+ }
+'''
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_nics_info(api, compute_id, private_network_id):
+
+ response = api.get('servers/' + compute_id + '/private_nics')
+ if not response.ok:
+ msg = "Error during get servers information: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+
+ i = 0
+ list_nics = response.json['private_nics']
+
+ while i < len(list_nics):
+ if list_nics[i]['private_network_id'] == private_network_id:
+ return list_nics[i]
+ i += 1
+
+ return None
+
+
+def present_strategy(api, compute_id, private_network_id):
+
+ changed = False
+ nic = get_nics_info(api, compute_id, private_network_id)
+ if nic is not None:
+ return changed, nic
+
+ data = {"private_network_id": private_network_id}
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "a private network would be add to a server"}
+
+ response = api.post(path='servers/' + compute_id + '/private_nics', data=data)
+
+ if not response.ok:
+ api.module.fail_json(msg='Error when adding a private network to a server [{0}: {1}]'.format(response.status_code, response.json))
+
+ return changed, response.json
+
+
+def absent_strategy(api, compute_id, private_network_id):
+
+ changed = False
+ nic = get_nics_info(api, compute_id, private_network_id)
+ if nic is None:
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "private network would be destroyed"}
+
+ response = api.delete('servers/' + compute_id + '/private_nics/' + nic['id'])
+
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting private network from server [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+
+ compute_id = module.params['compute_id']
+ pn_id = module.params['private_network_id']
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, compute_id=compute_id, private_network_id=pn_id)
+ else:
+ changed, summary = present_strategy(api=api, compute_id=compute_id, private_network_id=pn_id)
+ module.exit_json(changed=changed, scaleway_compute_private_network=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ project=dict(required=True),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ compute_id=dict(required=True),
+ private_network_id=dict(required=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container.py b/ansible_collections/community/general/plugins/modules/scaleway_container.py
new file mode 100644
index 000000000..19ffae419
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container.py
@@ -0,0 +1,412 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless container management module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_container
+short_description: Scaleway Container management
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module manages container on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.scaleway_waitable_resource
+ - community.general.attributes
+requirements:
+ - passlib[argon2] >= 1.7.4
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the container.
+ default: present
+ choices:
+ - present
+ - absent
+
+ namespace_id:
+ type: str
+ description:
+ - Container namespace identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the container namespace.
+ required: true
+
+ description:
+ description:
+ - Description of the container namespace.
+ type: str
+ default: ''
+
+ min_scale:
+ description:
+ - Minimum number of replicas for the container.
+ type: int
+
+ max_scale:
+ description:
+ - Maximum number of replicas for the container.
+ type: int
+
+ environment_variables:
+ description:
+ - Environment variables of the container namespace.
+ - Injected in container at runtime.
+ type: dict
+ default: {}
+
+ secret_environment_variables:
+ description:
+ - Secret environment variables of the container namespace.
+ - Updating thoses values will not output a C(changed) state in Ansible.
+ - Injected in container at runtime.
+ type: dict
+ default: {}
+
+ memory_limit:
+ description:
+ - Resources define performance characteristics of your container.
+ - They are allocated to your container at runtime.
+ type: int
+
+ container_timeout:
+ description:
+ - The length of time your handler can spend processing a request before being stopped.
+ type: str
+
+ privacy:
+ description:
+ - Privacy policies define whether a container can be executed anonymously.
+ - Choose C(public) to enable anonymous execution, or C(private) to protect your container with an authentication mechanism provided by the Scaleway API.
+ type: str
+ default: public
+ choices:
+ - public
+ - private
+
+ registry_image:
+ description:
+ - The name of image used for the container.
+ type: str
+ required: true
+
+ max_concurrency:
+ description:
+ - Maximum number of connections per container.
+ - This parameter will be used to trigger autoscaling.
+ type: int
+
+ protocol:
+ description:
+ - Communication protocol of the container.
+ type: str
+ default: http1
+ choices:
+ - http1
+ - h2c
+
+ port:
+ description:
+ - Listen port used to expose the container.
+ type: int
+
+ redeploy:
+ description:
+ - Redeploy the container if update is required.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Create a container
+ community.general.scaleway_container:
+ namespace_id: '{{ scw_container_namespace }}'
+ state: present
+ region: fr-par
+ name: my-awesome-container
+ registry_image: rg.fr-par.scw.cloud/funcscwtestrgy2f9zw/nginx:latest
+ environment_variables:
+ MY_VAR: my_value
+ secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+ register: container_creation_task
+
+- name: Make sure container is deleted
+ community.general.scaleway_container:
+ namespace_id: '{{ scw_container_namespace }}'
+ state: absent
+ region: fr-par
+ name: my-awesome-container
+'''
+
+RETURN = '''
+container:
+ description: The container information.
+ returned: when I(state=present)
+ type: dict
+ sample:
+ cpu_limit: 140
+ description: Container used for testing scaleway_container ansible module
+ domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ http_option: ""
+ id: c9070eb0-d7a4-48dd-9af3-4fb139890721
+ max_concurrency: 50
+ max_scale: 5
+ memory_limit: 256
+ min_scale: 0
+ name: cn-ansible-test
+ namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69
+ port: 80
+ privacy: public
+ protocol: http1
+ region: fr-par
+ registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: created
+ timeout: 300s
+'''
+
+from copy import deepcopy
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+ scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed,
+ SecretVariables
+)
+from ansible.module_utils.basic import AnsibleModule
+
+STABLE_STATES = (
+ "ready",
+ "created",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "description",
+ "min_scale",
+ "max_scale",
+ "environment_variables",
+ "memory_limit",
+ "timeout",
+ "privacy",
+ "registry_image",
+ "max_concurrency",
+ "protocol",
+ "port",
+ "secret_environment_variables"
+)
+
+
+def payload_from_wished_cn(wished_cn):
+ payload = {
+ "namespace_id": wished_cn["namespace_id"],
+ "name": wished_cn["name"],
+ "description": wished_cn["description"],
+ "min_scale": wished_cn["min_scale"],
+ "max_scale": wished_cn["max_scale"],
+ "environment_variables": wished_cn["environment_variables"],
+ "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"]),
+ "memory_limit": wished_cn["memory_limit"],
+ "timeout": wished_cn["timeout"],
+ "privacy": wished_cn["privacy"],
+ "registry_image": wished_cn["registry_image"],
+ "max_concurrency": wished_cn["max_concurrency"],
+ "protocol": wished_cn["protocol"],
+ "port": wished_cn["port"],
+ "redeploy": wished_cn["redeploy"]
+ }
+
+ return payload
+
+
+def absent_strategy(api, wished_cn):
+ changed = False
+
+ cn_list = api.fetch_all_resources("containers")
+ cn_lookup = dict((cn["name"], cn)
+ for cn in cn_list)
+
+ if wished_cn["name"] not in cn_lookup:
+ return changed, {}
+
+ target_cn = cn_lookup[wished_cn["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Container would be destroyed"}
+
+ api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_cn["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting container [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES)
+ return changed, response.json
+
+
+def present_strategy(api, wished_cn):
+ changed = False
+
+ cn_list = api.fetch_all_resources("containers")
+ cn_lookup = dict((cn["name"], cn)
+ for cn in cn_list)
+
+ payload_cn = payload_from_wished_cn(wished_cn)
+
+ if wished_cn["name"] not in cn_lookup:
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A container would be created."}
+
+ # Creation doesn't support `redeploy` parameter
+ del payload_cn["redeploy"]
+
+ # Create container
+ api.warn(payload_cn)
+ creation_response = api.post(path=api.api_path,
+ data=payload_cn)
+
+ if not creation_response.ok:
+ msg = "Error during container creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_cn = cn_lookup[wished_cn["name"]]
+ decoded_target_cn = deepcopy(target_cn)
+ decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"],
+ payload_cn["secret_environment_variables"])
+ patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn,
+ wished=payload_cn,
+ verifiable_mutable_attributes=MUTABLE_ATTRIBUTES,
+ mutable_attributes=MUTABLE_ATTRIBUTES)
+
+ if not patch_payload:
+ return changed, target_cn
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Container attributes would be changed."}
+
+ cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"],
+ data=patch_payload)
+
+ if not cn_patch_response.ok:
+ api.module.fail_json(msg='Error during container attributes update: [{0}: {1}]'.format(
+ cn_patch_response.status_code, cn_patch_response.json['message']))
+
+ api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % target_cn["id"])
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ SecretVariables.ensure_scaleway_secret_package(module)
+
+ region = module.params["region"]
+ wished_container = {
+ "state": module.params["state"],
+ "namespace_id": module.params["namespace_id"],
+ "name": module.params["name"],
+ "description": module.params['description'],
+ "min_scale": module.params["min_scale"],
+ "max_scale": module.params["max_scale"],
+ "environment_variables": module.params['environment_variables'],
+ "secret_environment_variables": module.params['secret_environment_variables'],
+ "memory_limit": module.params["memory_limit"],
+ "timeout": module.params["container_timeout"],
+ "privacy": module.params["privacy"],
+ "registry_image": module.params["registry_image"],
+ "max_concurrency": module.params["max_concurrency"],
+ "protocol": module.params["protocol"],
+ "port": module.params["port"],
+ "redeploy": module.params["redeploy"]
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "containers/v1beta1/regions/%s/containers" % region
+
+ changed, summary = state_strategy[wished_container["state"]](api=api, wished_cn=wished_container)
+
+ module.exit_json(changed=changed, container=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(scaleway_waitable_resource_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ namespace_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True),
+ description=dict(type='str', default=''),
+ min_scale=dict(type='int'),
+ max_scale=dict(type='int'),
+ memory_limit=dict(type='int'),
+ container_timeout=dict(type='str'),
+ privacy=dict(type='str', default='public', choices=['public', 'private']),
+ registry_image=dict(type='str', required=True),
+ max_concurrency=dict(type='int'),
+ protocol=dict(type='str', default='http1', choices=['http1', 'h2c']),
+ port=dict(type='int'),
+ redeploy=dict(type='bool', default=False),
+ environment_variables=dict(type='dict', default={}),
+ secret_environment_variables=dict(type='dict', default={}, no_log=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_info.py b/ansible_collections/community/general/plugins/modules/scaleway_container_info.py
new file mode 100644
index 000000000..20ebece21
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_info.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless container info module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_container_info
+short_description: Retrieve information on Scaleway Container
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module return information about a container on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ namespace_id:
+ type: str
+ description:
+ - Container namespace identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the container.
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Get a container info
+ community.general.scaleway_container_info:
+ namespace_id: '{{ scw_container_namespace }}'
+ region: fr-par
+ name: my-awesome-container
+ register: container_info_task
+'''
+
+RETURN = '''
+container:
+ description: The container information.
+ returned: always
+ type: dict
+ sample:
+ cpu_limit: 140
+ description: Container used for testing scaleway_container ansible module
+ domain_name: cnansibletestgfogtjod-cn-ansible-test.functions.fnc.fr-par.scw.cloud
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ http_option: ""
+ id: c9070eb0-d7a4-48dd-9af3-4fb139890721
+ max_concurrency: 50
+ max_scale: 5
+ memory_limit: 256
+ min_scale: 0
+ name: cn-ansible-test
+ namespace_id: 75e299f1-d1e5-4e6b-bc6e-4fb51cfe1e69
+ port: 80
+ privacy: public
+ protocol: http1
+ region: fr-par
+ registry_image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: created
+ timeout: 300s
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def info_strategy(api, wished_cn):
+ cn_list = api.fetch_all_resources("containers")
+ cn_lookup = dict((fn["name"], fn)
+ for fn in cn_list)
+
+ if wished_cn["name"] not in cn_lookup:
+ msg = "Error during container lookup: Unable to find container named '%s' in namespace '%s'" % (wished_cn["name"],
+ wished_cn["namespace_id"])
+
+ api.module.fail_json(msg=msg)
+
+ target_cn = cn_lookup[wished_cn["name"]]
+
+ response = api.get(path=api.api_path + "/%s" % target_cn["id"])
+ if not response.ok:
+ msg = "Error during container lookup: %s: '%s' (%s)" % (response.info['msg'],
+ response.json['message'],
+ response.json)
+ api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ wished_container = {
+ "namespace_id": module.params["namespace_id"],
+ "name": module.params["name"]
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "containers/v1beta1/regions/%s/containers" % region
+
+ summary = info_strategy(api=api, wished_cn=wished_container)
+
+ module.exit_json(changed=False, container=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ namespace_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py
new file mode 100644
index 000000000..fb01b8672
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace.py
@@ -0,0 +1,296 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless container namespace management module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_container_namespace
+short_description: Scaleway Container namespace management
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module manages container namespaces on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.scaleway_waitable_resource
+ - community.general.attributes
+requirements:
+ - passlib[argon2] >= 1.7.4
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the container namespace.
+ default: present
+ choices:
+ - present
+ - absent
+
+ project_id:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the container namespace.
+ required: true
+
+ description:
+ description:
+ - Description of the container namespace.
+ type: str
+ default: ''
+
+ environment_variables:
+ description:
+ - Environment variables of the container namespace.
+ - Injected in containers at runtime.
+ type: dict
+ default: {}
+
+ secret_environment_variables:
+ description:
+ - Secret environment variables of the container namespace.
+ - Updating thoses values will not output a C(changed) state in Ansible.
+ - Injected in containers at runtime.
+ type: dict
+ default: {}
+'''
+
+EXAMPLES = '''
+- name: Create a container namespace
+ community.general.scaleway_container_namespace:
+ project_id: '{{ scw_project }}'
+ state: present
+ region: fr-par
+ name: my-awesome-container-namespace
+ environment_variables:
+ MY_VAR: my_value
+ secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+ register: container_namespace_creation_task
+
+- name: Make sure container namespace is deleted
+ community.general.scaleway_container_namespace:
+ project_id: '{{ scw_project }}'
+ state: absent
+ region: fr-par
+ name: my-awesome-container-namespace
+'''
+
+RETURN = '''
+container_namespace:
+ description: The container namespace information.
+ returned: when I(state=present)
+ type: dict
+ sample:
+ description: ""
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ id: 531a1fd7-98d2-4a74-ad77-d398324304b8
+ name: my-awesome-container-namespace
+ organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0
+ project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98
+ region: fr-par
+ registry_endpoint: ""
+ registry_namespace_id: ""
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: pending
+'''
+
+from copy import deepcopy
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+ scaleway_waitable_resource_argument_spec,
+ resource_attributes_should_be_changed, SecretVariables
+)
+from ansible.module_utils.basic import AnsibleModule
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "description",
+ "environment_variables",
+ "secret_environment_variables"
+)
+
+
+def payload_from_wished_cn(wished_cn):
+ payload = {
+ "project_id": wished_cn["project_id"],
+ "name": wished_cn["name"],
+ "description": wished_cn["description"],
+ "environment_variables": wished_cn["environment_variables"],
+ "secret_environment_variables": SecretVariables.dict_to_list(wished_cn["secret_environment_variables"])
+ }
+
+ return payload
+
+
+def absent_strategy(api, wished_cn):
+ changed = False
+
+ cn_list = api.fetch_all_resources("namespaces")
+ cn_lookup = dict((cn["name"], cn)
+ for cn in cn_list)
+
+ if wished_cn["name"] not in cn_lookup:
+ return changed, {}
+
+ target_cn = cn_lookup[wished_cn["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Container namespace would be destroyed"}
+
+ api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_cn["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting container namespace [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES)
+ return changed, response.json
+
+
+def present_strategy(api, wished_cn):
+ changed = False
+
+ cn_list = api.fetch_all_resources("namespaces")
+ cn_lookup = dict((cn["name"], cn)
+ for cn in cn_list)
+
+ payload_cn = payload_from_wished_cn(wished_cn)
+
+ if wished_cn["name"] not in cn_lookup:
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A container namespace would be created."}
+
+ # Create container namespace
+ api.warn(payload_cn)
+ creation_response = api.post(path=api.api_path,
+ data=payload_cn)
+
+ if not creation_response.ok:
+ msg = "Error during container namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_cn = cn_lookup[wished_cn["name"]]
+ decoded_target_cn = deepcopy(target_cn)
+ decoded_target_cn["secret_environment_variables"] = SecretVariables.decode(decoded_target_cn["secret_environment_variables"],
+ payload_cn["secret_environment_variables"])
+ patch_payload = resource_attributes_should_be_changed(target=decoded_target_cn,
+ wished=payload_cn,
+ verifiable_mutable_attributes=MUTABLE_ATTRIBUTES,
+ mutable_attributes=MUTABLE_ATTRIBUTES)
+
+ if not patch_payload:
+ return changed, target_cn
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Container namespace attributes would be changed."}
+
+ cn_patch_response = api.patch(path=api.api_path + "/%s" % target_cn["id"],
+ data=patch_payload)
+
+ if not cn_patch_response.ok:
+ api.module.fail_json(msg='Error during container namespace attributes update: [{0}: {1}]'.format(
+ cn_patch_response.status_code, cn_patch_response.json['message']))
+
+ api.wait_to_complete_state_transition(resource=target_cn, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % target_cn["id"])
+ return changed, cn_patch_response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ SecretVariables.ensure_scaleway_secret_package(module)
+
+ region = module.params["region"]
+ wished_container_namespace = {
+ "state": module.params["state"],
+ "project_id": module.params["project_id"],
+ "name": module.params["name"],
+ "description": module.params['description'],
+ "environment_variables": module.params['environment_variables'],
+ "secret_environment_variables": module.params['secret_environment_variables']
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "containers/v1beta1/regions/%s/namespaces" % region
+
+ changed, summary = state_strategy[wished_container_namespace["state"]](api=api, wished_cn=wished_container_namespace)
+
+ module.exit_json(changed=changed, container_namespace=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(scaleway_waitable_resource_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ project_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True),
+ description=dict(type='str', default=''),
+ environment_variables=dict(type='dict', default={}),
+ secret_environment_variables=dict(type='dict', default={}, no_log=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py
new file mode 100644
index 000000000..758720dd5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_namespace_info.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless container namespace info module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_container_namespace_info
+short_description: Retrieve information on Scaleway Container namespace
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module return information about a container namespace on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ project_id:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the container namespace.
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Get a container namespace info
+ community.general.scaleway_container_namespace_info:
+ project_id: '{{ scw_project }}'
+ region: fr-par
+ name: my-awesome-container-namespace
+ register: container_namespace_info_task
+'''
+
+RETURN = '''
+container_namespace:
+ description: The container namespace information.
+ returned: always
+ type: dict
+ sample:
+ description: ""
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ id: 531a1fd7-98d2-4a74-ad77-d398324304b8
+ name: my-awesome-container-namespace
+ organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0
+ project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98
+ region: fr-par
+ registry_endpoint: ""
+ registry_namespace_id: ""
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: pending
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def info_strategy(api, wished_cn):
+ cn_list = api.fetch_all_resources("namespaces")
+ cn_lookup = dict((fn["name"], fn)
+ for fn in cn_list)
+
+ if wished_cn["name"] not in cn_lookup:
+ msg = "Error during container namespace lookup: Unable to find container namespace named '%s' in project '%s'" % (wished_cn["name"],
+ wished_cn["project_id"])
+
+ api.module.fail_json(msg=msg)
+
+ target_cn = cn_lookup[wished_cn["name"]]
+
+ response = api.get(path=api.api_path + "/%s" % target_cn["id"])
+ if not response.ok:
+ msg = "Error during container namespace lookup: %s: '%s' (%s)" % (response.info['msg'],
+ response.json['message'],
+ response.json)
+ api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ wished_container_namespace = {
+ "project_id": module.params["project_id"],
+ "name": module.params["name"]
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "containers/v1beta1/regions/%s/namespaces" % region
+
+ summary = info_strategy(api=api, wished_cn=wished_container_namespace)
+
+ module.exit_json(changed=False, container_namespace=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ project_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py b/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py
new file mode 100644
index 000000000..5eee571ec
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_registry.py
@@ -0,0 +1,272 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Container registry management module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_container_registry
+short_description: Scaleway Container registry management module
+version_added: 5.8.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module manages container registries on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.scaleway_waitable_resource
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the container regitry.
+ default: present
+ choices:
+ - present
+ - absent
+
+ project_id:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the container registry.
+ required: true
+
+ description:
+ description:
+ - Description of the container registry.
+ type: str
+ default: ''
+
+ privacy_policy:
+ type: str
+ description:
+ - Default visibility policy.
+ - Everyone will be able to pull images from a C(public) registry.
+ choices:
+ - public
+ - private
+ default: private
+'''
+
+EXAMPLES = '''
+- name: Create a container registry
+ community.general.scaleway_container_registry:
+ project_id: '{{ scw_project }}'
+ state: present
+ region: fr-par
+ name: my-awesome-container-registry
+ register: container_registry_creation_task
+
+- name: Make sure container registry is deleted
+ community.general.scaleway_container_registry:
+ project_id: '{{ scw_project }}'
+ state: absent
+ region: fr-par
+ name: my-awesome-container-registry
+'''
+
+RETURN = '''
+container_registry:
+ description: The container registry information.
+ returned: when I(state=present)
+ type: dict
+ sample:
+ created_at: "2022-10-14T09:51:07.949716Z"
+ description: Managed by Ansible
+ endpoint: rg.fr-par.scw.cloud/my-awesome-registry
+ id: 0d7d5270-7864-49c2-920b-9fd6731f3589
+ image_count: 0
+ is_public: false
+ name: my-awesome-registry
+ organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58
+ project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be
+ region: fr-par
+ size: 0
+ status: ready
+ status_message: ""
+ updated_at: "2022-10-14T09:51:07.949716Z"
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+ scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed
+)
+from ansible.module_utils.basic import AnsibleModule
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "description",
+ "is_public"
+)
+
+
+def payload_from_wished_cr(wished_cr):
+ payload = {
+ "project_id": wished_cr["project_id"],
+ "name": wished_cr["name"],
+ "description": wished_cr["description"],
+ "is_public": wished_cr["privacy_policy"] == "public"
+ }
+
+ return payload
+
+
+def absent_strategy(api, wished_cr):
+ changed = False
+
+ cr_list = api.fetch_all_resources("namespaces")
+ cr_lookup = dict((cr["name"], cr)
+ for cr in cr_list)
+
+ if wished_cr["name"] not in cr_lookup:
+ return changed, {}
+
+ target_cr = cr_lookup[wished_cr["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Container registry would be destroyed"}
+
+ api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_cr["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting container registry [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES)
+ return changed, response.json
+
+
+def present_strategy(api, wished_cr):
+ changed = False
+
+ cr_list = api.fetch_all_resources("namespaces")
+ cr_lookup = dict((cr["name"], cr)
+ for cr in cr_list)
+
+ payload_cr = payload_from_wished_cr(wished_cr)
+
+ if wished_cr["name"] not in cr_lookup:
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A container registry would be created."}
+
+ # Create container registry
+ api.warn(payload_cr)
+ creation_response = api.post(path=api.api_path,
+ data=payload_cr)
+
+ if not creation_response.ok:
+ msg = "Error during container registry creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_cr = cr_lookup[wished_cr["name"]]
+ patch_payload = resource_attributes_should_be_changed(target=target_cr,
+ wished=payload_cr,
+ verifiable_mutable_attributes=MUTABLE_ATTRIBUTES,
+ mutable_attributes=MUTABLE_ATTRIBUTES)
+
+ if not patch_payload:
+ return changed, target_cr
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Container registry attributes would be changed."}
+
+ cr_patch_response = api.patch(path=api.api_path + "/%s" % target_cr["id"],
+ data=patch_payload)
+
+ if not cr_patch_response.ok:
+ api.module.fail_json(msg='Error during container registry attributes update: [{0}: {1}]'.format(
+ cr_patch_response.status_code, cr_patch_response.json['message']))
+
+ api.wait_to_complete_state_transition(resource=target_cr, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % target_cr["id"])
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ region = module.params["region"]
+ wished_container_registry = {
+ "state": module.params["state"],
+ "project_id": module.params["project_id"],
+ "name": module.params["name"],
+ "description": module.params['description'],
+ "privacy_policy": module.params['privacy_policy']
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "registry/v1/regions/%s/namespaces" % region
+
+ changed, summary = state_strategy[wished_container_registry["state"]](api=api, wished_cr=wished_container_registry)
+
+ module.exit_json(changed=changed, container_registry=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(scaleway_waitable_resource_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ project_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True),
+ description=dict(type='str', default=''),
+ privacy_policy=dict(type='str', default='private', choices=['public', 'private'])
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py b/ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py
new file mode 100644
index 000000000..9c641edcb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_container_registry_info.py
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless container registry info module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_container_registry_info
+short_description: Scaleway Container registry info module
+version_added: 5.8.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module return information about a container registry on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ project_id:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the container registry.
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Get a container registry info
+ community.general.scaleway_container_registry_info:
+ project_id: '{{ scw_project }}'
+ region: fr-par
+ name: my-awesome-container-registry
+ register: container_registry_info_task
+'''
+
+RETURN = '''
+container_registry:
+ description: The container registry information.
+ returned: always
+ type: dict
+ sample:
+ created_at: "2022-10-14T09:51:07.949716Z"
+ description: Managed by Ansible
+ endpoint: rg.fr-par.scw.cloud/my-awesome-registry
+ id: 0d7d5270-7864-49c2-920b-9fd6731f3589
+ image_count: 0
+ is_public: false
+ name: my-awesome-registry
+ organization_id: 10697b59-5c34-4d24-8d15-9ff2d3b89f58
+ project_id: 3da4f0b2-06be-4773-8ec4-5dfa435381be
+ region: fr-par
+ size: 0
+ status: ready
+ status_message: ""
+ updated_at: "2022-10-14T09:51:07.949716Z"
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def info_strategy(api, wished_cn):
+ cn_list = api.fetch_all_resources("namespaces")
+ cn_lookup = dict((fn["name"], fn)
+ for fn in cn_list)
+
+ if wished_cn["name"] not in cn_lookup:
+ msg = "Error during container registries lookup: Unable to find container registry named '%s' in project '%s'" % (wished_cn["name"],
+ wished_cn["project_id"])
+
+ api.module.fail_json(msg=msg)
+
+ target_cn = cn_lookup[wished_cn["name"]]
+
+ response = api.get(path=api.api_path + "/%s" % target_cn["id"])
+ if not response.ok:
+ msg = "Error during container registry lookup: %s: '%s' (%s)" % (response.info['msg'],
+ response.json['message'],
+ response.json)
+ api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ wished_container_namespace = {
+ "project_id": module.params["project_id"],
+ "name": module.params["name"]
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "registry/v1/regions/%s/namespaces" % region
+
+ summary = info_strategy(api=api, wished_cn=wished_container_namespace)
+
+ module.exit_json(changed=False, container_registry=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ project_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
new file mode 100644
index 000000000..edc9f6cab
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_database_backup.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway database backups management module
+#
+# Copyright (C) 2020 Guillaume Rodriguez (g.rodriguez@opendecide.com).
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_database_backup
+short_description: Scaleway database backups management module
+version_added: 1.2.0
+author: Guillaume Rodriguez (@guillaume_ro_fr)
+description:
+ - "This module manages database backups on Scaleway account U(https://developer.scaleway.com)."
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Indicate desired state of the database backup.
+ - C(present) creates a backup.
+ - C(absent) deletes the backup.
+ - C(exported) creates a download link for the backup.
+ - C(restored) restores the backup to a new database.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ - exported
+ - restored
+
+ region:
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ type: str
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ id:
+ description:
+ - UUID used to identify the database backup.
+ - Required for C(absent), C(exported) and C(restored) states.
+ type: str
+
+ name:
+ description:
+ - Name used to identify the database backup.
+ - Required for C(present) state.
+ - Ignored when I(state=absent), I(state=exported) or I(state=restored).
+ type: str
+ required: false
+
+ database_name:
+ description:
+ - Name used to identify the database.
+ - Required for C(present) and C(restored) states.
+ - Ignored when I(state=absent) or I(state=exported).
+ type: str
+ required: false
+
+ instance_id:
+ description:
+ - UUID of the instance associated to the database backup.
+ - Required for C(present) and C(restored) states.
+ - Ignored when I(state=absent) or I(state=exported).
+ type: str
+ required: false
+
+ expires_at:
+ description:
+ - Expiration datetime of the database backup (ISO 8601 format).
+ - Ignored when I(state=absent), I(state=exported) or I(state=restored).
+ type: str
+ required: false
+
+ wait:
+ description:
+ - Wait for the instance to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - Time to wait for the backup to reach the expected state.
+ type: int
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ description:
+ - Time to wait before every attempt to check the state of the backup.
+ type: int
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+ - name: Create a backup
+ community.general.scaleway_database_backup:
+ name: 'my_backup'
+ state: present
+ region: 'fr-par'
+ database_name: 'my-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Export a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: exported
+ region: 'fr-par'
+
+ - name: Restore a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: restored
+ region: 'fr-par'
+ database_name: 'my-new-database'
+ instance_id: '50968a80-2909-4e5c-b1af-a2e19860dddb'
+
+ - name: Remove a backup
+ community.general.scaleway_database_backup:
+ id: '6ef1125a-037e-494f-a911-6d9c49a51691'
+ state: absent
+ region: 'fr-par'
+'''
+
+RETURN = '''
+metadata:
+ description: Backup metadata.
+ returned: when I(state=present), I(state=exported) or I(state=restored)
+ type: dict
+ sample: {
+ "metadata": {
+ "created_at": "2020-08-06T12:42:05.631049Z",
+ "database_name": "my-database",
+ "download_url": null,
+ "download_url_expires_at": null,
+ "expires_at": null,
+ "id": "a15297bd-0c4a-4b4f-8fbb-b36a35b7eb07",
+ "instance_id": "617be32e-6497-4ed7-b4c7-0ee5a81edf49",
+ "instance_name": "my-instance",
+ "name": "backup_name",
+ "region": "fr-par",
+ "size": 600000,
+ "status": "ready",
+ "updated_at": "2020-08-06T12:42:10.581649Z"
+ }
+ }
+'''
+
+import datetime
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ scaleway_argument_spec,
+ SCALEWAY_REGIONS,
+)
+
+stable_states = (
+ 'ready',
+ 'deleting',
+)
+
+
+def wait_to_complete_state_transition(module, account_api, backup=None):
+ wait_timeout = module.params['wait_timeout']
+ wait_sleep_time = module.params['wait_sleep_time']
+
+ if backup is None or backup['status'] in stable_states:
+ return backup
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ module.debug('We are going to wait for the backup to finish its transition')
+
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if not response.ok:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(response.status_code, response.json))
+ break
+ response_json = response.json
+
+ if response_json['status'] in stable_states:
+ module.debug('It seems that the backup is not in transition anymore.')
+ module.debug('Backup in state: %s' % response_json['status'])
+ return response_json
+ time.sleep(wait_sleep_time)
+ else:
+ module.fail_json(msg='Backup takes too long to finish its transition')
+
+
+def present_strategy(module, account_api, backup):
+ name = module.params['name']
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+ expiration_date = module.params['expires_at']
+
+ if backup is not None:
+ if (backup['name'] == name or name is None) and (
+ backup['expires_at'] == expiration_date or expiration_date is None):
+ wait_to_complete_state_transition(module, account_api, backup)
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {}
+ if name is not None:
+ payload['name'] = name
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.patch('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']),
+ payload)
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error modifying backup [{0}: {1}]'.format(response.status_code, response.json))
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ payload = {'name': name, 'database_name': database_name, 'instance_id': instance_id}
+ if expiration_date is not None:
+ payload['expires_at'] = expiration_date
+
+ response = account_api.post('/rdb/v1/regions/%s/backups' % module.params.get('region'), payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error creating backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def absent_strategy(module, account_api, backup):
+ if backup is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup['id']))
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error deleting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def exported_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ if backup['download_url'] is not None:
+ module.exit_json(changed=False, metadata=backup)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+ response = account_api.post(
+ '/rdb/v1/regions/%s/backups/%s/export' % (module.params.get('region'), backup['id']), {})
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error exporting backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+def restored_strategy(module, account_api, backup):
+ if backup is None:
+ module.fail_json(msg=('Backup "%s" not found' % module.params['id']))
+
+ database_name = module.params['database_name']
+ instance_id = module.params['instance_id']
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ backup = wait_to_complete_state_transition(module, account_api, backup)
+
+ payload = {'database_name': database_name, 'instance_id': instance_id}
+ response = account_api.post('/rdb/v1/regions/%s/backups/%s/restore' % (module.params.get('region'), backup['id']),
+ payload)
+
+ if response.ok:
+ result = wait_to_complete_state_transition(module, account_api, response.json)
+ module.exit_json(changed=True, metadata=result)
+
+ module.fail_json(msg='Error restoring backup [{0}: {1}]'.format(response.status_code, response.json))
+
+
+state_strategy = {
+ 'present': present_strategy,
+ 'absent': absent_strategy,
+ 'exported': exported_strategy,
+ 'restored': restored_strategy,
+}
+
+
+def core(module):
+ state = module.params['state']
+ backup_id = module.params['id']
+
+ account_api = Scaleway(module)
+
+ if backup_id is None:
+ backup_by_id = None
+ else:
+ response = account_api.get('/rdb/v1/regions/%s/backups/%s' % (module.params.get('region'), backup_id))
+ status_code = response.status_code
+ backup_json = response.json
+ backup_by_id = None
+ if status_code == 404:
+ backup_by_id = None
+ elif response.ok:
+ backup_by_id = backup_json
+ else:
+ module.fail_json(msg='Error getting backup [{0}: {1}]'.format(status_code, response.json['message']))
+
+ state_strategy[state](module, account_api, backup_by_id)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present', 'exported', 'restored']),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ id=dict(),
+ name=dict(type='str'),
+ database_name=dict(required=False),
+ instance_id=dict(required=False),
+ expires_at=dict(),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300),
+ wait_sleep_time=dict(type='int', default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_together=[
+ ['database_name', 'instance_id'],
+ ],
+ required_if=[
+ ['state', 'present', ['name', 'database_name', 'instance_id']],
+ ['state', 'absent', ['id']],
+ ['state', 'exported', ['id']],
+ ['state', 'restored', ['id', 'database_name', 'instance_id']],
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function.py b/ansible_collections/community/general/plugins/modules/scaleway_function.py
new file mode 100644
index 000000000..378545866
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_function.py
@@ -0,0 +1,394 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless function management module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_function
+short_description: Scaleway Function management
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module manages function on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.scaleway_waitable_resource
+ - community.general.attributes
+requirements:
+ - passlib[argon2] >= 1.7.4
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the function.
+ default: present
+ choices:
+ - present
+ - absent
+
+ namespace_id:
+ type: str
+ description:
+ - Function namespace identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the function.
+ required: true
+
+ description:
+ description:
+ - Description of the function.
+ type: str
+ default: ''
+
+ min_scale:
+ description:
+ - Minimum number of replicas for the function.
+ type: int
+
+ max_scale:
+ description:
+ - Maximum number of replicas for the function.
+ type: int
+
+ environment_variables:
+ description:
+ - Environment variables of the function.
+ - Injected in function at runtime.
+ type: dict
+ default: {}
+
+ secret_environment_variables:
+ description:
+ - Secret environment variables of the function.
+ - Updating thoses values will not output a C(changed) state in Ansible.
+ - Injected in function at runtime.
+ type: dict
+ default: {}
+
+ runtime:
+ description:
+ - Runtime of the function
+ - See U(https://www.scaleway.com/en/docs/compute/functions/reference-content/functions-lifecycle/) for all available runtimes
+ type: str
+ required: true
+
+ memory_limit:
+ description:
+ - Resources define performance characteristics of your function.
+ - They are allocated to your function at runtime.
+ type: int
+
+ function_timeout:
+ description:
+ - The length of time your handler can spend processing a request before being stopped.
+ type: str
+
+ handler:
+ description:
+ - The C(module-name.export) value in your function.
+ type: str
+
+ privacy:
+ description:
+ - Privacy policies define whether a function can be executed anonymously.
+ - Choose C(public) to enable anonymous execution, or C(private) to protect your function with an authentication mechanism provided by the Scaleway API.
+ type: str
+ default: public
+ choices:
+ - public
+ - private
+
+ redeploy:
+ description:
+ - Redeploy the function if update is required.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Create a function
+ community.general.scaleway_function:
+ namespace_id: '{{ scw_function_namespace }}'
+ region: fr-par
+ state: present
+ name: my-awesome-function
+ runtime: python3
+ environment_variables:
+ MY_VAR: my_value
+ secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+ register: function_creation_task
+
+- name: Make sure function is deleted
+ community.general.scaleway_function:
+ namespace_id: '{{ scw_function_namespace }}'
+ region: fr-par
+ state: absent
+ name: my-awesome-function
+'''
+
+RETURN = '''
+function:
+ description: The function information.
+ returned: when I(state=present)
+ type: dict
+ sample:
+ cpu_limit: 140
+ description: Function used for testing scaleway_function ansible module
+ domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ handler: handler.handle
+ http_option: ""
+ id: ceb64dc4-4464-4196-8e20-ecef705475d3
+ max_scale: 5
+ memory_limit: 256
+ min_scale: 0
+ name: fn-ansible-test
+ namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d
+ privacy: public
+ region: fr-par
+ runtime: python310
+ runtime_message: ""
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: created
+ timeout: 300s
+'''
+
+from copy import deepcopy
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+ scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed,
+ SecretVariables
+)
+from ansible.module_utils.basic import AnsibleModule
+
+STABLE_STATES = (
+ "ready",
+ "created",
+ "absent"
+)
+
+VERIFIABLE_MUTABLE_ATTRIBUTES = (
+ "description",
+ "min_scale",
+ "max_scale",
+ "environment_variables",
+ "runtime",
+ "memory_limit",
+ "timeout",
+ "handler",
+ "privacy",
+ "secret_environment_variables"
+)
+
+MUTABLE_ATTRIBUTES = VERIFIABLE_MUTABLE_ATTRIBUTES + (
+ "redeploy",
+)
+
+
+def payload_from_wished_fn(wished_fn):
+ payload = {
+ "namespace_id": wished_fn["namespace_id"],
+ "name": wished_fn["name"],
+ "description": wished_fn["description"],
+ "min_scale": wished_fn["min_scale"],
+ "max_scale": wished_fn["max_scale"],
+ "runtime": wished_fn["runtime"],
+ "memory_limit": wished_fn["memory_limit"],
+ "timeout": wished_fn["timeout"],
+ "handler": wished_fn["handler"],
+ "privacy": wished_fn["privacy"],
+ "redeploy": wished_fn["redeploy"],
+ "environment_variables": wished_fn["environment_variables"],
+ "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"])
+ }
+
+ return payload
+
+
+def absent_strategy(api, wished_fn):
+ changed = False
+
+ fn_list = api.fetch_all_resources("functions")
+ fn_lookup = dict((fn["name"], fn)
+ for fn in fn_list)
+
+ if wished_fn["name"] not in fn_lookup:
+ return changed, {}
+
+ target_fn = fn_lookup[wished_fn["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Function would be destroyed"}
+
+ api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_fn["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting function [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES)
+ return changed, response.json
+
+
+def present_strategy(api, wished_fn):
+ changed = False
+
+ fn_list = api.fetch_all_resources("functions")
+ fn_lookup = dict((fn["name"], fn)
+ for fn in fn_list)
+
+ payload_fn = payload_from_wished_fn(wished_fn)
+
+ if wished_fn["name"] not in fn_lookup:
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A function would be created."}
+
+ # Creation doesn't support `redeploy` parameter
+ del payload_fn["redeploy"]
+
+ # Create function
+ api.warn(payload_fn)
+ creation_response = api.post(path=api.api_path,
+ data=payload_fn)
+
+ if not creation_response.ok:
+ msg = "Error during function creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_fn = fn_lookup[wished_fn["name"]]
+ decoded_target_fn = deepcopy(target_fn)
+ decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"],
+ payload_fn["secret_environment_variables"])
+
+ patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn,
+ wished=payload_fn,
+ verifiable_mutable_attributes=VERIFIABLE_MUTABLE_ATTRIBUTES,
+ mutable_attributes=MUTABLE_ATTRIBUTES)
+
+ if not patch_payload:
+ return changed, target_fn
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Function attributes would be changed."}
+
+ fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"],
+ data=patch_payload)
+
+ if not fn_patch_response.ok:
+ api.module.fail_json(msg='Error during function attributes update: [{0}: {1}]'.format(
+ fn_patch_response.status_code, fn_patch_response.json['message']))
+
+ api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % target_fn["id"])
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ SecretVariables.ensure_scaleway_secret_package(module)
+
+ region = module.params["region"]
+ wished_function = {
+ "state": module.params["state"],
+ "namespace_id": module.params["namespace_id"],
+ "name": module.params["name"],
+ "description": module.params['description'],
+ "min_scale": module.params['min_scale'],
+ "max_scale": module.params['max_scale'],
+ "runtime": module.params["runtime"],
+ "memory_limit": module.params["memory_limit"],
+ "timeout": module.params["function_timeout"],
+ "handler": module.params["handler"],
+ "privacy": module.params["privacy"],
+ "redeploy": module.params["redeploy"],
+ "environment_variables": module.params['environment_variables'],
+ "secret_environment_variables": module.params['secret_environment_variables']
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "functions/v1beta1/regions/%s/functions" % region
+
+ changed, summary = state_strategy[wished_function["state"]](api=api, wished_fn=wished_function)
+
+ module.exit_json(changed=changed, function=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(scaleway_waitable_resource_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ namespace_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True),
+ description=dict(type='str', default=''),
+ min_scale=dict(type='int'),
+ max_scale=dict(type='int'),
+ runtime=dict(type='str', required=True),
+ memory_limit=dict(type='int'),
+ function_timeout=dict(type='str'),
+ handler=dict(type='str'),
+ privacy=dict(type='str', default='public', choices=['public', 'private']),
+ redeploy=dict(type='bool', default=False),
+ environment_variables=dict(type='dict', default={}),
+ secret_environment_variables=dict(type='dict', default={}, no_log=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_info.py b/ansible_collections/community/general/plugins/modules/scaleway_function_info.py
new file mode 100644
index 000000000..c30f0cdb0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_function_info.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless function info module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_function_info
+short_description: Retrieve information on Scaleway Function
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module return information about a function on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ namespace_id:
+ type: str
+ description:
+ - Container namespace identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the function.
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Get a function info
+ community.general.scaleway_function_info:
+ namespace_id: '{{ scw_function_namespace }}'
+ region: fr-par
+ name: my-awesome-function
+ register: function_info_task
+'''
+
+RETURN = '''
+function:
+ description: The function information.
+ returned: always
+ type: dict
+ sample:
+ cpu_limit: 140
+ description: Function used for testing scaleway_function ansible module
+ domain_name: fnansibletestfxamabuc-fn-ansible-test.functions.fnc.fr-par.scw.cloud
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ handler: handler.handle
+ http_option: ""
+ id: ceb64dc4-4464-4196-8e20-ecef705475d3
+ max_scale: 5
+ memory_limit: 256
+ min_scale: 0
+ name: fn-ansible-test
+ namespace_id: 82737d8d-0ebb-4d89-b0ad-625876eca50d
+ privacy: public
+ region: fr-par
+ runtime: python310
+ runtime_message: ""
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: created
+ timeout: 300s
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def info_strategy(api, wished_fn):
+ fn_list = api.fetch_all_resources("functions")
+ fn_lookup = dict((fn["name"], fn)
+ for fn in fn_list)
+
+ if wished_fn["name"] not in fn_lookup:
+ msg = "Error during function lookup: Unable to find function named '%s' in namespace '%s'" % (wished_fn["name"],
+ wished_fn["namespace_id"])
+
+ api.module.fail_json(msg=msg)
+
+ target_fn = fn_lookup[wished_fn["name"]]
+
+ response = api.get(path=api.api_path + "/%s" % target_fn["id"])
+ if not response.ok:
+ msg = "Error during function lookup: %s: '%s' (%s)" % (response.info['msg'],
+ response.json['message'],
+ response.json)
+ api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ wished_function = {
+ "namespace_id": module.params["namespace_id"],
+ "name": module.params["name"]
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "functions/v1beta1/regions/%s/functions" % region
+
+ summary = info_strategy(api=api, wished_fn=wished_function)
+
+ module.exit_json(changed=False, function=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ namespace_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py
new file mode 100644
index 000000000..f6310b35b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless function namespace management module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_function_namespace
+short_description: Scaleway Function namespace management
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module manages function namespaces on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.scaleway_waitable_resource
+ - community.general.attributes
+requirements:
+ - passlib[argon2] >= 1.7.4
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the function namespace.
+ default: present
+ choices:
+ - present
+ - absent
+
+ project_id:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the function namespace.
+ required: true
+
+ description:
+ description:
+ - Description of the function namespace.
+ type: str
+ default: ''
+
+ environment_variables:
+ description:
+ - Environment variables of the function namespace.
+ - Injected in functions at runtime.
+ type: dict
+ default: {}
+
+ secret_environment_variables:
+ description:
+ - Secret environment variables of the function namespace.
+ - Updating thoses values will not output a C(changed) state in Ansible.
+ - Injected in functions at runtime.
+ type: dict
+ default: {}
+'''
+
+EXAMPLES = '''
+- name: Create a function namespace
+ community.general.scaleway_function_namespace:
+ project_id: '{{ scw_project }}'
+ state: present
+ region: fr-par
+ name: my-awesome-function-namespace
+ environment_variables:
+ MY_VAR: my_value
+ secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+ register: function_namespace_creation_task
+
+- name: Make sure function namespace is deleted
+ community.general.scaleway_function_namespace:
+ project_id: '{{ scw_project }}'
+ state: absent
+ region: fr-par
+ name: my-awesome-function-namespace
+'''
+
+RETURN = '''
+function_namespace:
+ description: The function namespace information.
+ returned: when I(state=present)
+ type: dict
+ sample:
+ description: ""
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ id: 531a1fd7-98d2-4a74-ad77-d398324304b8
+ name: my-awesome-function-namespace
+ organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0
+ project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98
+ region: fr-par
+ registry_endpoint: ""
+ registry_namespace_id: ""
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: pending
+'''
+
+from copy import deepcopy
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+ scaleway_waitable_resource_argument_spec, resource_attributes_should_be_changed,
+ SecretVariables
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "description",
+ "environment_variables",
+ "secret_environment_variables",
+)
+
+
+def payload_from_wished_fn(wished_fn):
+ payload = {
+ "project_id": wished_fn["project_id"],
+ "name": wished_fn["name"],
+ "description": wished_fn["description"],
+ "environment_variables": wished_fn["environment_variables"],
+ "secret_environment_variables": SecretVariables.dict_to_list(wished_fn["secret_environment_variables"])
+ }
+
+ return payload
+
+
+def absent_strategy(api, wished_fn):
+ changed = False
+
+ fn_list = api.fetch_all_resources("namespaces")
+ fn_lookup = dict((fn["name"], fn)
+ for fn in fn_list)
+
+ if wished_fn["name"] not in fn_lookup:
+ return changed, {}
+
+ target_fn = fn_lookup[wished_fn["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Function namespace would be destroyed"}
+
+ api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_fn["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting function namespace [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES)
+ return changed, response.json
+
+
+def present_strategy(api, wished_fn):
+ changed = False
+
+ fn_list = api.fetch_all_resources("namespaces")
+ fn_lookup = dict((fn["name"], fn)
+ for fn in fn_list)
+
+ payload_fn = payload_from_wished_fn(wished_fn)
+
+ if wished_fn["name"] not in fn_lookup:
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A function namespace would be created."}
+
+ # Create function namespace
+ api.warn(payload_fn)
+ creation_response = api.post(path=api.api_path,
+ data=payload_fn)
+
+ if not creation_response.ok:
+ msg = "Error during function namespace creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ api.wait_to_complete_state_transition(resource=creation_response.json, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_fn = fn_lookup[wished_fn["name"]]
+ decoded_target_fn = deepcopy(target_fn)
+ decoded_target_fn["secret_environment_variables"] = SecretVariables.decode(decoded_target_fn["secret_environment_variables"],
+ payload_fn["secret_environment_variables"])
+
+ patch_payload = resource_attributes_should_be_changed(target=decoded_target_fn,
+ wished=payload_fn,
+ verifiable_mutable_attributes=MUTABLE_ATTRIBUTES,
+ mutable_attributes=MUTABLE_ATTRIBUTES)
+
+ if not patch_payload:
+ return changed, target_fn
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Function namespace attributes would be changed."}
+
+ fn_patch_response = api.patch(path=api.api_path + "/%s" % target_fn["id"],
+ data=patch_payload)
+
+ if not fn_patch_response.ok:
+ api.module.fail_json(msg='Error during function namespace attributes update: [{0}: {1}]'.format(
+ fn_patch_response.status_code, fn_patch_response.json['message']))
+
+ api.wait_to_complete_state_transition(resource=target_fn, stable_states=STABLE_STATES)
+ response = api.get(path=api.api_path + "/%s" % target_fn["id"])
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ SecretVariables.ensure_scaleway_secret_package(module)
+
+ region = module.params["region"]
+ wished_function_namespace = {
+ "state": module.params["state"],
+ "project_id": module.params["project_id"],
+ "name": module.params["name"],
+ "description": module.params['description'],
+ "environment_variables": module.params['environment_variables'],
+ "secret_environment_variables": module.params['secret_environment_variables']
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "functions/v1beta1/regions/%s/namespaces" % region
+
+ changed, summary = state_strategy[wished_function_namespace["state"]](api=api, wished_fn=wished_function_namespace)
+
+ module.exit_json(changed=changed, function_namespace=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(scaleway_waitable_resource_argument_spec())
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ project_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True),
+ description=dict(type='str', default=''),
+ environment_variables=dict(type='dict', default={}),
+ secret_environment_variables=dict(type='dict', default={}, no_log=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py
new file mode 100644
index 000000000..f3ea5ddfc
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_function_namespace_info.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Serverless function namespace info module
+#
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_function_namespace_info
+short_description: Retrieve information on Scaleway Function namespace
+version_added: 6.0.0
+author: Guillaume MARTINEZ (@Lunik)
+description:
+ - This module return information about a function namespace on Scaleway account.
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ project_id:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(fr-par)).
+ required: true
+ choices:
+ - fr-par
+ - nl-ams
+ - pl-waw
+
+ name:
+ type: str
+ description:
+ - Name of the function namespace.
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Get a function namespace info
+ community.general.scaleway_function_namespace_info:
+ project_id: '{{ scw_project }}'
+ region: fr-par
+ name: my-awesome-function-namespace
+ register: function_namespace_info_task
+'''
+
+RETURN = '''
+function_namespace:
+ description: The function namespace information.
+ returned: always
+ type: dict
+ sample:
+ description: ""
+ environment_variables:
+ MY_VAR: my_value
+ error_message: null
+ id: 531a1fd7-98d2-4a74-ad77-d398324304b8
+ name: my-awesome-function-namespace
+ organization_id: e04e3bdc-015c-4514-afde-9389e9be24b0
+ project_id: d44cea58-dcb7-4c95-bff1-1105acb60a98
+ region: fr-par
+ registry_endpoint: ""
+ registry_namespace_id: ""
+ secret_environment_variables:
+ - key: MY_SECRET_VAR
+ value: $argon2id$v=19$m=65536,t=1,p=2$tb6UwSPWx/rH5Vyxt9Ujfw$5ZlvaIjWwNDPxD9Rdght3NarJz4IETKjpvAU3mMSmFg
+ status: pending
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ SCALEWAY_REGIONS, scaleway_argument_spec, Scaleway,
+)
+from ansible.module_utils.basic import AnsibleModule
+
+
+def info_strategy(api, wished_fn):
+ fn_list = api.fetch_all_resources("namespaces")
+ fn_lookup = dict((fn["name"], fn)
+ for fn in fn_list)
+
+ if wished_fn["name"] not in fn_lookup:
+ msg = "Error during function namespace lookup: Unable to find function namespace named '%s' in project '%s'" % (wished_fn["name"],
+ wished_fn["project_id"])
+
+ api.module.fail_json(msg=msg)
+
+ target_fn = fn_lookup[wished_fn["name"]]
+
+ response = api.get(path=api.api_path + "/%s" % target_fn["id"])
+ if not response.ok:
+ msg = "Error during function namespace lookup: %s: '%s' (%s)" % (response.info['msg'],
+ response.json['message'],
+ response.json)
+ api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ wished_function_namespace = {
+ "project_id": module.params["project_id"],
+ "name": module.params["name"]
+ }
+
+ api = Scaleway(module=module)
+ api.api_path = "functions/v1beta1/regions/%s/namespaces" % region
+
+ summary = info_strategy(api=api, wished_fn=wished_function_namespace)
+
+ module.exit_json(changed=False, function_namespace=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ project_id=dict(type='str', required=True),
+ region=dict(type='str', required=True, choices=SCALEWAY_REGIONS),
+ name=dict(type='str', required=True)
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_image_info.py b/ansible_collections/community/general/plugins/modules/scaleway_image_info.py
new file mode 100644
index 000000000..bdae18514
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_image_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_image_info
+short_description: Gather information about the Scaleway images available
+description:
+ - Gather information about the Scaleway images available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway compute zone.
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway images information
+ community.general.scaleway_image_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_image_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_image_info:
+ description:
+ - Response from Scaleway API.
+ - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_image_info": [
+ {
+ "arch": "x86_64",
+ "creation_date": "2018-07-17T16:18:49.276456+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": false,
+ "dtb": "",
+ "id": "15fbd2f7-a0f9-412b-8502-6a44da8d98b8",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.9-4.9.93-rev1/vmlinuz-4.9.93",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.9.93 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "00ae4a88-3252-4eda-9feb-5f6b56bf5ef0",
+ "modification_date": "2018-07-17T16:42:06.319315+00:00",
+ "name": "Debian Stretch",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "da32dfbb-c5ff-476d-ae2d-c297dd09b7dd",
+ "name": "snapshot-2a7229dc-d431-4dc5-b66e-95db08b773af-2018-07-17_16:18",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec, SCALEWAY_LOCATION)
+
+
+class ScalewayImageInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayImageInfo, self).__init__(module)
+ self.name = 'images'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_image_info=ScalewayImageInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip.py b/ansible_collections/community/general/plugins/modules/scaleway_ip.py
new file mode 100644
index 000000000..cf8e2e601
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_ip.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway IP management module
+#
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_ip
+short_description: Scaleway IP management module
+author: Remy Leone (@remyleone)
+description:
+ - This module manages IP on Scaleway account
+ U(https://developer.scaleway.com)
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the IP.
+ default: present
+ choices:
+ - present
+ - absent
+
+ organization:
+ type: str
+ description:
+ - Scaleway organization identifier
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ id:
+ type: str
+ description:
+ - id of the Scaleway IP (UUID)
+
+ server:
+ type: str
+ description:
+ - id of the server you want to attach an IP to.
+ - To unattach an IP don't specify this option
+
+ reverse:
+ type: str
+ description:
+ - Reverse to assign to the IP
+'''
+
+EXAMPLES = '''
+- name: Create an IP
+ community.general.scaleway_ip:
+ organization: '{{ scw_org }}'
+ state: present
+ region: par1
+ register: ip_creation_task
+
+- name: Make sure IP deleted
+ community.general.scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when I(state=present).
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "ips": [
+ {
+ "organization": "951df375-e094-4d26-97c1-ba548eeb9c42",
+ "reverse": null,
+ "id": "dd9e8df6-6775-4863-b517-e0b0ee3d7477",
+ "server": {
+ "id": "3f1568ca-b1a2-4e98-b6f7-31a0588157f1",
+ "name": "ansible_tuto-1"
+ },
+ "address": "212.47.232.136"
+ }
+ ]
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def ip_attributes_should_be_changed(api, target_ip, wished_ip):
+ patch_payload = {}
+
+ if target_ip["reverse"] != wished_ip["reverse"]:
+ patch_payload["reverse"] = wished_ip["reverse"]
+
+ # IP is assigned to a server
+ if target_ip["server"] is None and wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+
+ # IP is unassigned to a server
+ try:
+ if target_ip["server"]["id"] and wished_ip["server"] is None:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ # IP is migrated between 2 different servers
+ try:
+ if target_ip["server"]["id"] != wished_ip["server"]:
+ patch_payload["server"] = wished_ip["server"]
+ except (TypeError, KeyError):
+ pass
+
+ return patch_payload
+
+
+def payload_from_wished_ip(wished_ip):
+ return dict(
+ (k, v)
+ for k, v in wished_ip.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, wished_ip):
+ changed = False
+
+ response = api.get('ips')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ ips_list = response.json["ips"]
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+
+ if wished_ip["id"] not in ip_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "An IP would be created."}
+
+ # Create IP
+ creation_response = api.post('/ips',
+ data=payload_from_wished_ip(wished_ip))
+
+ if not creation_response.ok:
+ msg = "Error during ip creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+ return changed, creation_response.json["ip"]
+
+ target_ip = ip_lookup[wished_ip["id"]]
+ patch_payload = ip_attributes_should_be_changed(api=api, target_ip=target_ip, wished_ip=wished_ip)
+
+ if not patch_payload:
+ return changed, target_ip
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP attributes would be changed."}
+
+ ip_patch_response = api.patch(path="ips/%s" % target_ip["id"],
+ data=patch_payload)
+
+ if not ip_patch_response.ok:
+ api.module.fail_json(msg='Error during IP attributes update: [{0}: {1}]'.format(
+ ip_patch_response.status_code, ip_patch_response.json['message']))
+
+ return changed, ip_patch_response.json["ip"]
+
+
+def absent_strategy(api, wished_ip):
+ response = api.get('ips')
+ changed = False
+
+ status_code = response.status_code
+ ips_json = response.json
+ ips_list = ips_json["ips"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting IPs [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ ip_lookup = dict((ip["id"], ip)
+ for ip in ips_list)
+ if wished_ip["id"] not in ip_lookup.keys():
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "IP would be destroyed"}
+
+ response = api.delete('/ips/' + wished_ip["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting IP [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+ wished_ip = {
+ "organization": module.params['organization'],
+ "reverse": module.params["reverse"],
+ "id": module.params["id"],
+ "server": module.params["server"]
+ }
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, wished_ip=wished_ip)
+ else:
+ changed, summary = present_strategy(api=api, wished_ip=wished_ip)
+ module.exit_json(changed=changed, scaleway_ip=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ organization=dict(required=True),
+ server=dict(),
+ reverse=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ id=dict()
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py b/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py
new file mode 100644
index 000000000..1fd4be589
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_ip_info.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_ip_info
+short_description: Gather information about the Scaleway ips available
+description:
+ - Gather information about the Scaleway ips available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway ips information
+ community.general.scaleway_ip_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_ip_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_ip_info:
+ description:
+ - Response from Scaleway API.
+ - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_ip_info": [
+ {
+ "address": "163.172.170.243",
+ "id": "ea081794-a581-8899-8451-386ddaf0a451",
+ "organization": "3f709602-5e6c-4619-b80c-e324324324af",
+ "reverse": null,
+ "server": {
+ "id": "12f19bc7-109c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayIpInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayIpInfo, self).__init__(module)
+ self.name = 'ips'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_ip_info=ScalewayIpInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_lb.py b/ansible_collections/community/general/plugins/modules/scaleway_lb.py
new file mode 100644
index 000000000..3e43a8ae2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_lb.py
@@ -0,0 +1,366 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Load-balancer management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_lb
+short_description: Scaleway load-balancer management module
+author: Remy Leone (@remyleone)
+description:
+ - "This module manages load-balancers on Scaleway."
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ name:
+ type: str
+ description:
+ - Name of the load-balancer.
+ required: true
+
+ description:
+ type: str
+ description:
+ - Description of the load-balancer.
+ required: true
+
+ organization_id:
+ type: str
+ description:
+ - Organization identifier.
+ required: true
+
+ state:
+ type: str
+ description:
+ - Indicate desired state of the instance.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway zone.
+ required: true
+ choices:
+ - nl-ams
+ - fr-par
+ - pl-waw
+
+ tags:
+ type: list
+ elements: str
+ default: []
+ description:
+ - List of tags to apply to the load-balancer.
+
+ wait:
+ description:
+ - Wait for the load-balancer to reach its desired state before returning.
+ type: bool
+ default: false
+
+ wait_timeout:
+ type: int
+ description:
+ - Time to wait for the load-balancer to reach the expected state.
+ required: false
+ default: 300
+
+ wait_sleep_time:
+ type: int
+ description:
+ - Time to wait before every attempt to check the state of the load-balancer.
+ required: false
+ default: 3
+'''
+
+EXAMPLES = '''
+- name: Create a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: present
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+ tags:
+ - hello
+
+- name: Delete a load-balancer
+ community.general.scaleway_lb:
+ name: foobar
+ state: absent
+ organization_id: 951df375-e094-4d26-97c1-ba548eeb9c42
+ region: fr-par
+'''
+
+RETURNS = '''
+{
+ "scaleway_lb": {
+ "backend_count": 0,
+ "frontend_count": 0,
+ "description": "Description of my load-balancer",
+ "id": "00000000-0000-0000-0000-000000000000",
+ "instances": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.1",
+ "region": "fr-par",
+ "status": "ready"
+ },
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "10.0.0.2",
+ "region": "fr-par",
+ "status": "ready"
+ }
+ ],
+ "ip": [
+ {
+ "id": "00000000-0000-0000-0000-000000000000",
+ "ip_address": "192.168.0.1",
+ "lb_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "reverse": ""
+ }
+ ],
+ "name": "lb_ansible_test",
+ "organization_id": "00000000-0000-0000-0000-000000000000",
+ "region": "fr-par",
+ "status": "ready",
+ "tags": [
+ "first_tag",
+ "second_tag"
+ ]
+ }
+}
+'''
+
+import datetime
+import time
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_REGIONS, SCALEWAY_ENDPOINT, scaleway_argument_spec, Scaleway
+
+STABLE_STATES = (
+ "ready",
+ "absent"
+)
+
+MUTABLE_ATTRIBUTES = (
+ "name",
+ "description"
+)
+
+
+def payload_from_wished_lb(wished_lb):
+ return {
+ "organization_id": wished_lb["organization_id"],
+ "name": wished_lb["name"],
+ "tags": wished_lb["tags"],
+ "description": wished_lb["description"]
+ }
+
+
+def fetch_state(api, lb):
+ api.module.debug("fetch_state of load-balancer: %s" % lb["id"])
+ response = api.get(path=api.api_path + "/%s" % lb["id"])
+
+ if response.status_code == 404:
+ return "absent"
+
+ if not response.ok:
+ msg = 'Error during state fetching: (%s) %s' % (response.status_code, response.json)
+ api.module.fail_json(msg=msg)
+
+ try:
+ api.module.debug("Load-balancer %s in state: %s" % (lb["id"], response.json["status"]))
+ return response.json["status"]
+ except KeyError:
+ api.module.fail_json(msg="Could not fetch state in %s" % response.json)
+
+
+def wait_to_complete_state_transition(api, lb, force_wait=False):
+ wait = api.module.params["wait"]
+ if not (wait or force_wait):
+ return
+ wait_timeout = api.module.params["wait_timeout"]
+ wait_sleep_time = api.module.params["wait_sleep_time"]
+
+ start = datetime.datetime.utcnow()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+ while datetime.datetime.utcnow() < end:
+ api.module.debug("We are going to wait for the load-balancer to finish its transition")
+ state = fetch_state(api, lb)
+ if state in STABLE_STATES:
+ api.module.debug("It seems that the load-balancer is not in transition anymore.")
+ api.module.debug("load-balancer in state: %s" % fetch_state(api, lb))
+ break
+ time.sleep(wait_sleep_time)
+ else:
+ api.module.fail_json(msg="Server takes too long to finish its transition")
+
+
+def lb_attributes_should_be_changed(target_lb, wished_lb):
+ diff = dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES if target_lb[attr] != wished_lb[attr])
+
+ if diff:
+ return dict((attr, wished_lb[attr]) for attr in MUTABLE_ATTRIBUTES)
+ else:
+ return diff
+
+
+def present_strategy(api, wished_lb):
+ changed = False
+
+ response = api.get(path=api.api_path)
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ response.status_code, response.json['message']))
+
+ lbs_list = response.json["lbs"]
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+
+ if wished_lb["name"] not in lb_lookup.keys():
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "A load-balancer would be created."}
+
+ # Create Load-balancer
+ api.warn(payload_from_wished_lb(wished_lb))
+ creation_response = api.post(path=api.api_path,
+ data=payload_from_wished_lb(wished_lb))
+
+ if not creation_response.ok:
+ msg = "Error during lb creation: %s: '%s' (%s)" % (creation_response.info['msg'],
+ creation_response.json['message'],
+ creation_response.json)
+ api.module.fail_json(msg=msg)
+
+ wait_to_complete_state_transition(api=api, lb=creation_response.json)
+ response = api.get(path=api.api_path + "/%s" % creation_response.json["id"])
+ return changed, response.json
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ patch_payload = lb_attributes_should_be_changed(target_lb=target_lb,
+ wished_lb=wished_lb)
+
+ if not patch_payload:
+ return changed, target_lb
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer attributes would be changed."}
+
+ lb_patch_response = api.put(path=api.api_path + "/%s" % target_lb["id"],
+ data=patch_payload)
+
+ if not lb_patch_response.ok:
+ api.module.fail_json(msg='Error during load-balancer attributes update: [{0}: {1}]'.format(
+ lb_patch_response.status_code, lb_patch_response.json['message']))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, lb_patch_response.json
+
+
+def absent_strategy(api, wished_lb):
+ response = api.get(path=api.api_path)
+ changed = False
+
+ status_code = response.status_code
+ lbs_json = response.json
+ lbs_list = lbs_json["lbs"]
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting load-balancers [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ lb_lookup = dict((lb["name"], lb)
+ for lb in lbs_list)
+ if wished_lb["name"] not in lb_lookup.keys():
+ return changed, {}
+
+ target_lb = lb_lookup[wished_lb["name"]]
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "Load-balancer would be destroyed"}
+
+ wait_to_complete_state_transition(api=api, lb=target_lb, force_wait=True)
+ response = api.delete(path=api.api_path + "/%s" % target_lb["id"])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting load-balancer [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ wait_to_complete_state_transition(api=api, lb=target_lb)
+ return changed, response.json
+
+
+state_strategy = {
+ "present": present_strategy,
+ "absent": absent_strategy
+}
+
+
+def core(module):
+ region = module.params["region"]
+ wished_load_balancer = {
+ "state": module.params["state"],
+ "name": module.params["name"],
+ "description": module.params["description"],
+ "tags": module.params["tags"],
+ "organization_id": module.params["organization_id"]
+ }
+ module.params['api_url'] = SCALEWAY_ENDPOINT
+ api = Scaleway(module=module)
+ api.api_path = "lb/v1/regions/%s/lbs" % region
+
+ changed, summary = state_strategy[wished_load_balancer["state"]](api=api,
+ wished_lb=wished_load_balancer)
+ module.exit_json(changed=changed, scaleway_lb=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ description=dict(required=True),
+ region=dict(required=True, choices=SCALEWAY_REGIONS),
+ state=dict(choices=list(state_strategy.keys()), default='present'),
+ tags=dict(type="list", elements="str", default=[]),
+ organization_id=dict(required=True),
+ wait=dict(type="bool", default=False),
+ wait_timeout=dict(type="int", default=300),
+ wait_sleep_time=dict(type="int", default=3),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py b/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py
new file mode 100644
index 000000000..e9e272c98
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_organization_info.py
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_organization_info
+short_description: Gather information about the Scaleway organizations available
+description:
+ - Gather information about the Scaleway organizations available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+options:
+ api_url:
+ description:
+ - Scaleway API URL.
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway organizations information
+ community.general.scaleway_organization_info:
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_organization_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_organization_info:
+ description: Response from Scaleway API.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_organization_info": [
+ {
+ "address_city_name": "Paris",
+ "address_country_code": "FR",
+ "address_line1": "42 Rue de l'univers",
+ "address_line2": null,
+ "address_postal_code": "75042",
+ "address_subdivision_code": "FR-75",
+ "creation_date": "2018-08-06T13:43:28.508575+00:00",
+ "currency": "EUR",
+ "customer_class": "individual",
+ "id": "3f709602-5e6c-4619-b80c-e8432ferewtr",
+ "locale": "fr_FR",
+ "modification_date": "2018-08-06T14:56:41.401685+00:00",
+ "name": "James Bond",
+ "support_id": "694324",
+ "support_level": "basic",
+ "support_pin": "9324",
+ "users": [],
+ "vat_number": null,
+ "warnings": []
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec
+)
+
+
+class ScalewayOrganizationInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayOrganizationInfo, self).__init__(module)
+ self.name = 'organizations'
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_organization_info=ScalewayOrganizationInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_private_network.py b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py
new file mode 100644
index 000000000..33fb7381c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_private_network.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway VPC management module
+#
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_private_network
+short_description: Scaleway private network management
+version_added: 4.5.0
+author: Pascal MANGIN (@pastral)
+description:
+ - "This module manages private network on Scaleway account (U(https://developer.scaleway.com))."
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the VPC.
+ default: present
+ choices:
+ - present
+ - absent
+
+ project:
+ type: str
+ description:
+ - Project identifier.
+ required: true
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ name:
+ type: str
+ description:
+ - Name of the VPC.
+
+ tags:
+ type: list
+ elements: str
+ description:
+ - List of tags to apply to the instance.
+ default: []
+
+'''
+
+EXAMPLES = '''
+- name: Create an private network
+ community.general.scaleway_vpc:
+ project: '{{ scw_project }}'
+ name: 'vpc_one'
+ state: present
+ region: par1
+ register: vpc_creation_task
+
+- name: Make sure private network with name 'foo' is deleted in region par1
+ community.general.scaleway_vpc:
+ name: 'foo'
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+scaleway_private_network:
+ description: Information on the VPC.
+ returned: success when I(state=present)
+ type: dict
+ sample:
+ {
+ "created_at": "2022-01-15T11:11:12.676445Z",
+ "id": "12345678-f1e6-40ec-83e5-12345d67ed89",
+ "name": "network",
+ "organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "tags": [
+ "tag1",
+ "tag2",
+ "tag3",
+ "tag4",
+ "tag5"
+ ],
+ "updated_at": "2022-01-15T11:12:04.624837Z",
+ "zone": "fr-par-2"
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def get_private_network(api, name, page=1):
+ page_size = 10
+ response = api.get('private-networks', params={'name': name, 'order_by': 'name_asc', 'page': page, 'page_size': page_size})
+ if not response.ok:
+ msg = "Error during get private network creation: %s: '%s' (%s)" % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+
+ if response.json['total_count'] == 0:
+ return None
+
+ i = 0
+ while i < len(response.json['private_networks']):
+ if response.json['private_networks'][i]['name'] == name:
+ return response.json['private_networks'][i]
+ i += 1
+
+ # search on next page if needed
+ if (page * page_size) < response.json['total_count']:
+ return get_private_network(api, name, page + 1)
+
+ return None
+
+
+def present_strategy(api, wished_private_network):
+
+ changed = False
+ private_network = get_private_network(api, wished_private_network['name'])
+ if private_network is not None:
+ if set(wished_private_network['tags']) == set(private_network['tags']):
+ return changed, private_network
+ else:
+ # private network need to be updated
+ data = {'name': wished_private_network['name'],
+ 'tags': wished_private_network['tags']
+ }
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "private network would be updated"}
+
+ response = api.patch(path='private-networks/' + private_network['id'], data=data)
+ if not response.ok:
+ api.module.fail_json(msg='Error updating private network [{0}: {1}]'.format(response.status_code, response.json))
+
+ return changed, response.json
+
+ # private network need to be create
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "private network would be created"}
+
+ data = {'name': wished_private_network['name'],
+ 'project_id': wished_private_network['project'],
+ 'tags': wished_private_network['tags']
+ }
+
+ response = api.post(path='private-networks/', data=data)
+
+ if not response.ok:
+ api.module.fail_json(msg='Error creating private network [{0}: {1}]'.format(response.status_code, response.json))
+
+ return changed, response.json
+
+
+def absent_strategy(api, wished_private_network):
+
+ changed = False
+ private_network = get_private_network(api, wished_private_network['name'])
+ if private_network is None:
+ return changed, {}
+
+ changed = True
+ if api.module.check_mode:
+ return changed, {"status": "private network would be destroyed"}
+
+ response = api.delete('private-networks/' + private_network['id'])
+
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting private network [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ return changed, response.json
+
+
+def core(module):
+
+ wished_private_network = {
+ "project": module.params['project'],
+ "tags": module.params['tags'],
+ "name": module.params['name']
+ }
+
+ region = module.params["region"]
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint_vpc"]
+
+ api = Scaleway(module=module)
+ if module.params["state"] == "absent":
+ changed, summary = absent_strategy(api=api, wished_private_network=wished_private_network)
+ else:
+ changed, summary = present_strategy(api=api, wished_private_network=wished_private_network)
+ module.exit_json(changed=changed, scaleway_private_network=summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ project=dict(required=True),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ tags=dict(type="list", elements="str", default=[]),
+ name=dict()
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
new file mode 100644
index 000000000..5523da41c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Security Group management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group
+short_description: Scaleway Security Group management module
+author: Antoine Barbare (@abarbare)
+description:
+ - "This module manages Security Group on Scaleway account U(https://developer.scaleway.com)."
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ description:
+ - Indicate desired state of the Security Group.
+ type: str
+ choices: [ absent, present ]
+ default: present
+
+ organization:
+ description:
+ - Organization identifier.
+ type: str
+ required: true
+
+ region:
+ description:
+ - Scaleway region to use (for example C(par1)).
+ type: str
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ name:
+ description:
+ - Name of the Security Group.
+ type: str
+ required: true
+
+ description:
+ description:
+ - Description of the Security Group.
+ type: str
+
+ stateful:
+ description:
+ - Create a stateful security group which allows established connections in and out.
+ type: bool
+ required: true
+
+ inbound_default_policy:
+ description:
+ - Default policy for incoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ outbound_default_policy:
+ description:
+ - Default policy for outcoming traffic.
+ type: str
+ choices: [ accept, drop ]
+
+ organization_default:
+ description:
+ - Create security group to be the default one.
+ type: bool
+'''
+
+EXAMPLES = '''
+- name: Create a Security Group
+ community.general.scaleway_security_group:
+ state: present
+ region: par1
+ name: security_group
+ description: "my security group description"
+ organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when I(state=present).
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group": {
+ "description": "my security group description",
+ "enable_default_security": true,
+ "id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
+ "inbound_default_policy": "accept",
+ "name": "security_group",
+ "organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
+ "organization_default": false,
+ "outbound_default_policy": "accept",
+ "servers": [],
+ "stateful": false
+ }
+ }
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+from uuid import uuid4
+
+
+def payload_from_security_group(security_group):
+ return dict(
+ (k, v)
+ for k, v in security_group.items()
+ if k != 'id' and v is not None
+ )
+
+
+def present_strategy(api, security_group):
+ ret = {'changed': False}
+
+ response = api.get('security_groups')
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+
+ if security_group['name'] not in security_group_lookup.keys():
+ ret['changed'] = True
+ if api.module.check_mode:
+ # Help user when check mode is enabled by defining id key
+ ret['scaleway_security_group'] = {'id': str(uuid4())}
+ return ret
+
+ # Create Security Group
+ response = api.post('/security_groups',
+ data=payload_from_security_group(security_group))
+
+ if not response.ok:
+ msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
+ api.module.fail_json(msg=msg)
+ ret['scaleway_security_group'] = response.json['security_group']
+
+ else:
+ ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
+
+ return ret
+
+
+def absent_strategy(api, security_group):
+ response = api.get('security_groups')
+ ret = {'changed': False}
+
+ if not response.ok:
+ api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ security_group_lookup = dict((sg['name'], sg)
+ for sg in response.json['security_groups'])
+ if security_group['name'] not in security_group_lookup.keys():
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
+ if not response.ok:
+ api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ security_group = {
+ 'organization': module.params['organization'],
+ 'name': module.params['name'],
+ 'description': module.params['description'],
+ 'stateful': module.params['stateful'],
+ 'inbound_default_policy': module.params['inbound_default_policy'],
+ 'outbound_default_policy': module.params['outbound_default_policy'],
+ 'organization_default': module.params['organization_default'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ api = Scaleway(module=module)
+ if module.params['state'] == 'present':
+ summary = present_strategy(api=api, security_group=security_group)
+ else:
+ summary = absent_strategy(api=api, security_group=security_group)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ organization=dict(type='str', required=True),
+ name=dict(type='str', required=True),
+ description=dict(type='str'),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ stateful=dict(type='bool', required=True),
+ inbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ outbound_default_policy=dict(type='str', choices=['accept', 'drop']),
+ organization_default=dict(type='bool'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py
new file mode 100644
index 000000000..fb28e8774
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group_info.py
@@ -0,0 +1,118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_security_group_info
+short_description: Gather information about the Scaleway security groups available
+description:
+ - Gather information about the Scaleway security groups available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway security groups information
+ community.general.scaleway_security_group_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_security_group_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_security_group_info:
+ description:
+ - Response from Scaleway API.
+ - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_security_group_info": [
+ {
+ "description": "test-ams",
+ "enable_default_security": true,
+ "id": "7fcde327-8bed-43a6-95c4-6dfbc56d8b51",
+ "name": "test-ams",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "organization_default": false,
+ "servers": [
+ {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d158"
+ }
+ ]
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewaySecurityGroupInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySecurityGroupInfo, self).__init__(module)
+ self.name = 'security_groups'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_security_group_info=ScalewaySecurityGroupInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
new file mode 100644
index 000000000..136631d03
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_security_group_rule.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway Security Group Rule management module
+#
+# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_security_group_rule
+short_description: Scaleway Security Group Rule management module
+author: Antoine Barbare (@abarbare)
+description:
+ - "This module manages Security Group Rule on Scaleway account U(https://developer.scaleway.com)."
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+requirements:
+ - ipaddress
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the Security Group Rule.
+ default: present
+ choices:
+ - present
+ - absent
+
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+
+ protocol:
+ type: str
+ description:
+ - Network protocol to use.
+ choices:
+ - TCP
+ - UDP
+ - ICMP
+ required: true
+
+ port:
+ description:
+ - Port related to the rule, null value for all the ports.
+ required: true
+ type: int
+
+ ip_range:
+ type: str
+ description:
+ - IPV4 CIDR notation to apply to the rule.
+ default: 0.0.0.0/0
+
+ direction:
+ type: str
+ description:
+ - Rule direction.
+ choices:
+ - inbound
+ - outbound
+ required: true
+
+ action:
+ type: str
+ description:
+ - Rule action.
+ choices:
+ - accept
+ - drop
+ required: true
+
+ security_group:
+ type: str
+ description:
+ - Security Group unique identifier.
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a Security Group Rule
+ community.general.scaleway_security_group_rule:
+ state: present
+ region: par1
+ protocol: TCP
+ port: 80
+ ip_range: 0.0.0.0/0
+ direction: inbound
+ action: accept
+ security_group: b57210ee-1281-4820-a6db-329f78596ecb
+ register: security_group_rule_creation_task
+'''
+
+RETURN = '''
+data:
+ description: This is only present when I(state=present).
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "scaleway_security_group_rule": {
+ "direction": "inbound",
+ "protocol": "TCP",
+ "ip_range": "0.0.0.0/0",
+ "dest_port_from": 80,
+ "action": "accept",
+ "position": 2,
+ "dest_port_to": null,
+ "editable": null,
+ "id": "10cb0b9a-80f6-4830-abd7-a31cd828b5e9"
+ }
+ }
+'''
+
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway, payload_from_object
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+try:
+ from ipaddress import ip_network # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ IPADDRESS_IMP_ERR = traceback.format_exc()
+ HAS_IPADDRESS = False
+else:
+ IPADDRESS_IMP_ERR = None
+ HAS_IPADDRESS = True
+
+
+def get_sgr_from_api(security_group_rules, security_group_rule):
+ """ Check if a security_group_rule specs are present in security_group_rules
+ Return None if no rules match the specs
+ Return the rule if found
+ """
+ for sgr in security_group_rules:
+ if (sgr['ip_range'] == security_group_rule['ip_range'] and sgr['dest_port_from'] == security_group_rule['dest_port_from'] and
+ sgr['direction'] == security_group_rule['direction'] and sgr['action'] == security_group_rule['action'] and
+ sgr['protocol'] == security_group_rule['protocol']):
+ return sgr
+
+ return None
+
+
+def present_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ # Create Security Group Rule
+ response = api.post('/security_groups/%s/rules' % security_group_id,
+ data=payload_from_object(security_group_rule))
+
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error during security group rule creation: "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+ ret['scaleway_security_group_rule'] = response.json['rule']
+
+ else:
+ ret['scaleway_security_group_rule'] = existing_rule
+
+ return ret
+
+
+def absent_strategy(api, security_group_id, security_group_rule):
+ ret = {'changed': False}
+
+ response = api.get('security_groups/%s/rules' % security_group_id)
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error getting security group rules "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ existing_rule = get_sgr_from_api(
+ response.json['rules'], security_group_rule)
+
+ if not existing_rule:
+ return ret
+
+ ret['changed'] = True
+ if api.module.check_mode:
+ return ret
+
+ response = api.delete(
+ '/security_groups/%s/rules/%s' %
+ (security_group_id, existing_rule['id']))
+ if not response.ok:
+ api.module.fail_json(
+ msg='Error deleting security group rule "%s": "%s" (%s)' %
+ (response.info['msg'], response.json['message'], response.json))
+
+ return ret
+
+
+def core(module):
+ api = Scaleway(module=module)
+
+ security_group_rule = {
+ 'protocol': module.params['protocol'],
+ 'dest_port_from': module.params['port'],
+ 'ip_range': module.params['ip_range'],
+ 'direction': module.params['direction'],
+ 'action': module.params['action'],
+ }
+
+ region = module.params['region']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
+
+ if module.params['state'] == 'present':
+ summary = present_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ else:
+ summary = absent_strategy(
+ api=api,
+ security_group_id=module.params['security_group'],
+ security_group_rule=security_group_rule)
+ module.exit_json(**summary)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ region=dict(type='str', required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ protocol=dict(type='str', required=True, choices=['TCP', 'UDP', 'ICMP']),
+ port=dict(type='int', required=True),
+ ip_range=dict(type='str', default='0.0.0.0/0'),
+ direction=dict(type='str', required=True, choices=['inbound', 'outbound']),
+ action=dict(type='str', required=True, choices=['accept', 'drop']),
+ security_group=dict(type='str', required=True),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ if not HAS_IPADDRESS:
+ module.fail_json(msg=missing_required_lib('ipaddress'), exception=IPADDRESS_IMP_ERR)
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_server_info.py b/ansible_collections/community/general/plugins/modules/scaleway_server_info.py
new file mode 100644
index 000000000..01e9410da
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_server_info.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_server_info
+short_description: Gather information about the Scaleway servers available
+description:
+ - Gather information about the Scaleway servers available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway servers information
+ community.general.scaleway_server_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_server_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_server_info:
+ description:
+ - Response from Scaleway API.
+ - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_server_info": [
+ {
+ "arch": "x86_64",
+ "boot_type": "local",
+ "bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "commercial_type": "START1-XS",
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "dynamic_ip_required": false,
+ "enable_ipv6": false,
+ "extra_networks": [],
+ "hostname": "scw-e0d256",
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "image": {
+ "arch": "x86_64",
+ "creation_date": "2018-04-26T12:42:21.619844+00:00",
+ "default_bootscript": {
+ "architecture": "x86_64",
+ "bootcmdargs": "LINUX_COMMON scaleway boot=local nbd.max_part=16",
+ "default": true,
+ "dtb": "",
+ "id": "b1e68c26-a19c-4eac-9222-498b22bd7ad9",
+ "initrd": "http://169.254.42.24/initrd/initrd-Linux-x86_64-v3.14.5.gz",
+ "kernel": "http://169.254.42.24/kernel/x86_64-mainline-lts-4.4-4.4.127-rev1/vmlinuz-4.4.127",
+ "organization": "11111111-1111-4111-8111-111111111111",
+ "public": true,
+ "title": "x86_64 mainline 4.4.127 rev1"
+ },
+ "extra_volumes": [],
+ "from_server": null,
+ "id": "67375eb1-f14d-4f02-bb42-6119cecbde51",
+ "modification_date": "2018-04-26T12:49:07.573004+00:00",
+ "name": "Ubuntu Xenial",
+ "organization": "51b656e3-4865-41e8-adbc-0c45bdd780db",
+ "public": true,
+ "root_volume": {
+ "id": "020b8d61-3867-4a0e-84a4-445c5393e05d",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "size": 25000000000,
+ "volume_type": "l_ssd"
+ },
+ "state": "available"
+ },
+ "ipv6": null,
+ "location": {
+ "cluster_id": "5",
+ "hypervisor_id": "412",
+ "node_id": "2",
+ "platform_id": "13",
+ "zone_id": "par1"
+ },
+ "maintenances": [],
+ "modification_date": "2018-08-14T21:37:28.630882+00:00",
+ "name": "scw-e0d256",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "private_ip": "10.14.222.131",
+ "protected": false,
+ "public_ip": {
+ "address": "163.172.170.197",
+ "dynamic": false,
+ "id": "ea081794-a581-4495-8451-386ddaf0a451"
+ },
+ "security_group": {
+ "id": "a37379d2-d8b0-4668-9cfb-1233fc436f7e",
+ "name": "Default security group"
+ },
+ "state": "running",
+ "state_detail": "booted",
+ "tags": [],
+ "volumes": {
+ "0": {
+ "creation_date": "2018-08-14T21:36:56.271545+00:00",
+ "export_uri": "device://dev/vda",
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "modification_date": "2018-08-14T21:36:56.271545+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": {
+ "id": "12f19bc7-108c-4517-954c-e6b3d0311363",
+ "name": "scw-e0d256"
+ },
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ }
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION,
+)
+
+
+class ScalewayServerInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayServerInfo, self).__init__(module)
+ self.name = 'servers'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_server_info=ScalewayServerInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py b/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py
new file mode 100644
index 000000000..687f43c85
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_snapshot_info.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_snapshot_info
+short_description: Gather information about the Scaleway snapshots available
+description:
+ - Gather information about the Scaleway snapshot available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway snapshots information
+ community.general.scaleway_snapshot_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_snapshot_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_snapshot_info:
+ description:
+ - Response from Scaleway API.
+ - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_snapshot_info": [
+ {
+ "base_volume": {
+ "id": "68386fae-4f55-4fbf-aabb-953036a85872",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42"
+ },
+ "creation_date": "2018-08-14T22:34:35.299461+00:00",
+ "id": "b61b4b03-a2e9-4da5-b5ea-e462ac0662d2",
+ "modification_date": "2018-08-14T22:34:54.520560+00:00",
+ "name": "snapshot-87fc282d-f252-4262-adad-86979d9074cf-2018-04-26_12:42 snapshot",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "size": 25000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway,
+ ScalewayException,
+ scaleway_argument_spec,
+ SCALEWAY_LOCATION
+)
+
+
+class ScalewaySnapshotInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewaySnapshotInfo, self).__init__(module)
+ self.name = 'snapshots'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_snapshot_info=ScalewaySnapshotInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
new file mode 100644
index 000000000..a39e57aa3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_sshkey.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway SSH keys management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_sshkey
+short_description: Scaleway SSH keys management module
+author: Remy Leone (@remyleone)
+description:
+ - "This module manages SSH keys on Scaleway account U(https://developer.scaleway.com)."
+extends_documentation_fragment:
+- community.general.scaleway
+- community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the SSH key.
+ default: present
+ choices:
+ - present
+ - absent
+ ssh_pub_key:
+ type: str
+ description:
+ - The public SSH key as a string to add.
+ required: true
+ api_url:
+ type: str
+ description:
+ - Scaleway API URL.
+ default: 'https://account.scaleway.com'
+ aliases: ['base_url']
+'''
+
+EXAMPLES = '''
+- name: "Add SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+
+- name: "Delete SSH key"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "absent"
+
+- name: "Add SSH key with explicit token"
+ community.general.scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAA..."
+ state: "present"
+ oauth_token: "6ecd2c9b-6f4f-44d4-a187-61a92078d08c"
+'''
+
+RETURN = '''
+data:
+ description: This is only present when I(state=present).
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "ssh_public_keys": [
+ {"key": "ssh-rsa AAAA...."}
+ ]
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible_collections.community.general.plugins.module_utils.scaleway import scaleway_argument_spec, Scaleway
+
+
+def extract_present_sshkeys(raw_organization_dict):
+ ssh_key_list = raw_organization_dict["organizations"][0]["users"][0]["ssh_public_keys"]
+ ssh_key_lookup = [ssh_key["key"] for ssh_key in ssh_key_list]
+ return ssh_key_lookup
+
+
+def extract_user_id(raw_organization_dict):
+ return raw_organization_dict["organizations"][0]["users"][0]["id"]
+
+
+def sshkey_user_patch(ssh_lookup):
+ ssh_list = {"ssh_public_keys": [{"key": key}
+ for key in ssh_lookup]}
+ return ssh_list
+
+
+def core(module):
+ ssh_pub_key = module.params['ssh_pub_key']
+ state = module.params["state"]
+ account_api = Scaleway(module)
+ response = account_api.get('organizations')
+
+ status_code = response.status_code
+ organization_json = response.json
+
+ if not response.ok:
+ module.fail_json(msg='Error getting ssh key [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ user_id = extract_user_id(organization_json)
+ present_sshkeys = []
+ try:
+ present_sshkeys = extract_present_sshkeys(organization_json)
+ except (KeyError, IndexError) as e:
+ module.fail_json(changed=False, data="Error while extracting present SSH keys from API")
+
+ if state in ('present',):
+ if ssh_pub_key in present_sshkeys:
+ module.exit_json(changed=False)
+
+ # If key not found create it!
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.append(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if ssh_pub_key not in present_sshkeys:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ present_sshkeys.remove(ssh_pub_key)
+ payload = sshkey_user_patch(present_sshkeys)
+
+ response = account_api.patch('/users/%s' % user_id, data=payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting ssh key [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ ssh_pub_key=dict(required=True),
+ api_url=dict(fallback=(env_fallback, ['SCW_API_URL']), default='https://account.scaleway.com', aliases=['base_url']),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_user_data.py b/ansible_collections/community/general/plugins/modules/scaleway_user_data.py
new file mode 100644
index 000000000..08ff86a55
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_user_data.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway user data management module
+#
+# Copyright (C) 2018 Online SAS.
+# https://www.scaleway.com
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_user_data
+short_description: Scaleway user_data management module
+author: Remy Leone (@remyleone)
+description:
+ - This module manages user_data on compute instances on Scaleway.
+ - It can be used to configure cloud-init for instance.
+extends_documentation_fragment:
+- community.general.scaleway
+- community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ server_id:
+ type: str
+ description:
+ - Scaleway Compute instance ID of the server.
+ required: true
+
+ user_data:
+ type: dict
+ description:
+ - User defined data. Typically used with C(cloud-init).
+ - Pass your C(cloud-init) script here as a string.
+ required: false
+
+ region:
+ type: str
+ description:
+ - Scaleway compute zone.
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = '''
+- name: Update the cloud-init
+ community.general.scaleway_user_data:
+ server_id: '5a33b4ab-57dd-4eb6-8b0a-d95eb63492ce'
+ region: ams1
+ user_data:
+ cloud-init: 'final_message: "Hello World!"'
+'''
+
+RETURN = '''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+
+
+def patch_user_data(compute_api, server_id, key, value):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.patch(path=path, data=value, headers={"Content-Type": "text/plain"})
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def delete_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting deleting user_data attributes: %s" % key)
+
+ response = compute_api.delete(path="servers/%s/user_data/%s" % (server_id, key))
+
+ if not response.ok:
+ msg = 'Error during user_data deleting: (%s) %s' % response.status_code, response.body
+ compute_api.module.fail_json(msg=msg)
+
+ return response
+
+
+def get_user_data(compute_api, server_id, key):
+ compute_api.module.debug("Starting patching user_data attributes")
+
+ path = "servers/%s/user_data/%s" % (server_id, key)
+ response = compute_api.get(path=path)
+ if not response.ok:
+ msg = 'Error during user_data patching: %s %s' % (response.status_code, response.body)
+ compute_api.module.fail_json(msg=msg)
+
+ return response.json
+
+
+def core(module):
+ region = module.params["region"]
+ server_id = module.params["server_id"]
+ user_data = module.params["user_data"]
+ changed = False
+
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+ compute_api = Scaleway(module=module)
+
+ user_data_list = compute_api.get(path="servers/%s/user_data" % server_id)
+ if not user_data_list.ok:
+ msg = 'Error during user_data fetching: %s %s' % user_data_list.status_code, user_data_list.body
+ compute_api.module.fail_json(msg=msg)
+
+ present_user_data_keys = user_data_list.json["user_data"]
+ present_user_data = dict(
+ (key, get_user_data(compute_api=compute_api, server_id=server_id, key=key))
+ for key in present_user_data_keys
+ )
+
+ if present_user_data == user_data:
+ module.exit_json(changed=changed, msg=user_data_list.json)
+
+ # First we remove keys that are not defined in the wished user_data
+ for key in present_user_data:
+ if key not in user_data:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ delete_user_data(compute_api=compute_api, server_id=server_id, key=key)
+
+ # Then we patch keys that are different
+ for key, value in user_data.items():
+ if key not in present_user_data or user_data[key] != present_user_data[key]:
+
+ changed = True
+ if compute_api.module.check_mode:
+ module.exit_json(changed=changed, msg={"status": "User-data of %s would be patched." % server_id})
+
+ patch_user_data(compute_api=compute_api, server_id=server_id, key=key, value=value)
+
+ module.exit_json(changed=changed, msg=user_data)
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ user_data=dict(type="dict"),
+ server_id=dict(required=True),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume.py b/ansible_collections/community/general/plugins/modules/scaleway_volume.py
new file mode 100644
index 000000000..2ff09da54
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_volume.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Scaleway volumes management module
+#
+# Copyright (C) 2018 Henryk Konsek Consulting (hekonsek@gmail.com).
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: scaleway_volume
+short_description: Scaleway volumes management module
+author: Henryk Konsek (@hekonsek)
+description:
+ - "This module manages volumes on Scaleway account U(https://developer.scaleway.com)."
+extends_documentation_fragment:
+- community.general.scaleway
+- community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ state:
+ type: str
+ description:
+ - Indicate desired state of the volume.
+ default: present
+ choices:
+ - present
+ - absent
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example par1).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+ name:
+ type: str
+ description:
+ - Name used to identify the volume.
+ required: true
+ project:
+ type: str
+ description:
+ - Scaleway project ID to which volume belongs.
+ version_added: 4.3.0
+ organization:
+ type: str
+ description:
+ - ScaleWay organization ID to which volume belongs.
+ size:
+ type: int
+ description:
+ - Size of the volume in bytes.
+ volume_type:
+ type: str
+ description:
+ - Type of the volume (for example 'l_ssd').
+'''
+
+EXAMPLES = '''
+- name: Create 10GB volume
+ community.general.scaleway_volume:
+ name: my-volume
+ state: present
+ region: par1
+ project: "{{ scw_org }}"
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- name: Make sure volume deleted
+ community.general.scaleway_volume:
+ name: my-volume
+ state: absent
+ region: par1
+'''
+
+RETURN = '''
+data:
+ description: This is only present when I(state=present).
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "volume": {
+ "export_uri": null,
+ "id": "c675f420-cfeb-48ff-ba2a-9d2a4dbe3fcd",
+ "name": "volume-0-3",
+ "project": "000a115d-2852-4b0a-9ce8-47f1134ba95a",
+ "server": null,
+ "size": 10000000000,
+ "volume_type": "l_ssd"
+ }
+}
+'''
+
+from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
+from ansible.module_utils.basic import AnsibleModule
+
+
+def core(module):
+ region = module.params["region"]
+ state = module.params['state']
+ name = module.params['name']
+ organization = module.params['organization']
+ project = module.params['project']
+ size = module.params['size']
+ volume_type = module.params['volume_type']
+ module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+ account_api = Scaleway(module)
+ response = account_api.get('volumes')
+ status_code = response.status_code
+ volumes_json = response.json
+
+ if project is None:
+ project = organization
+
+ if not response.ok:
+ module.fail_json(msg='Error getting volume [{0}: {1}]'.format(
+ status_code, response.json['message']))
+
+ volumeByName = None
+ for volume in volumes_json['volumes']:
+ if volume['project'] == project and volume['name'] == name:
+ volumeByName = volume
+
+ if state in ('present',):
+ if volumeByName is not None:
+ module.exit_json(changed=False)
+
+ payload = {'name': name, 'project': project, 'size': size, 'volume_type': volume_type}
+
+ response = account_api.post('/volumes', payload)
+
+ if response.ok:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error creating volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+ elif state in ('absent',):
+ if volumeByName is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ response = account_api.delete('/volumes/' + volumeByName['id'])
+ if response.status_code == 204:
+ module.exit_json(changed=True, data=response.json)
+
+ module.fail_json(msg='Error deleting volume [{0}: {1}]'.format(
+ response.status_code, response.json))
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ size=dict(type='int'),
+ project=dict(),
+ organization=dict(),
+ volume_type=dict(),
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ('organization', 'project'),
+ ],
+ required_one_of=[
+ ('organization', 'project'),
+ ],
+ )
+
+ core(module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py b/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py
new file mode 100644
index 000000000..471845c43
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/scaleway_volume_info.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: scaleway_volume_info
+short_description: Gather information about the Scaleway volumes available
+description:
+ - Gather information about the Scaleway volumes available.
+author:
+ - "Yanis Guenane (@Spredzy)"
+ - "Remy Leone (@remyleone)"
+extends_documentation_fragment:
+ - community.general.scaleway
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+options:
+ region:
+ type: str
+ description:
+ - Scaleway region to use (for example C(par1)).
+ required: true
+ choices:
+ - ams1
+ - EMEA-NL-EVS
+ - par1
+ - EMEA-FR-PAR1
+ - par2
+ - EMEA-FR-PAR2
+ - waw1
+ - EMEA-PL-WAW1
+'''
+
+EXAMPLES = r'''
+- name: Gather Scaleway volumes information
+ community.general.scaleway_volume_info:
+ region: par1
+ register: result
+
+- ansible.builtin.debug:
+ msg: "{{ result.scaleway_volume_info }}"
+'''
+
+RETURN = r'''
+---
+scaleway_volume_info:
+ description:
+ - Response from Scaleway API.
+ - "For more details please refer to: U(https://developers.scaleway.com/en/products/instance/api/)."
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ "scaleway_volume_info": [
+ {
+ "creation_date": "2018-08-14T20:56:24.949660+00:00",
+ "export_uri": null,
+ "id": "b8d51a06-daeb-4fef-9539-a8aea016c1ba",
+ "modification_date": "2018-08-14T20:56:24.949660+00:00",
+ "name": "test-volume",
+ "organization": "3f709602-5e6c-4619-b80c-e841c89734af",
+ "server": null,
+ "size": 50000000000,
+ "state": "available",
+ "volume_type": "l_ssd"
+ }
+ ]
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.scaleway import (
+ Scaleway, ScalewayException, scaleway_argument_spec,
+ SCALEWAY_LOCATION)
+
+
+class ScalewayVolumeInfo(Scaleway):
+
+ def __init__(self, module):
+ super(ScalewayVolumeInfo, self).__init__(module)
+ self.name = 'volumes'
+
+ region = module.params["region"]
+ self.module.params['api_url'] = SCALEWAY_LOCATION[region]["api_endpoint"]
+
+
+def main():
+ argument_spec = scaleway_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, choices=list(SCALEWAY_LOCATION.keys())),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ module.exit_json(
+ scaleway_volume_info=ScalewayVolumeInfo(module).get_resources()
+ )
+ except ScalewayException as exc:
+ module.fail_json(msg=exc.message)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sefcontext.py b/ansible_collections/community/general/plugins/modules/sefcontext.py
new file mode 100644
index 000000000..b2fb36767
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sefcontext.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sefcontext
+short_description: Manages SELinux file context mapping definitions
+description:
+ - Manages SELinux file context mapping definitions.
+ - Similar to the C(semanage fcontext) command.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.platform
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ platform:
+ platforms: linux
+options:
+ target:
+ description:
+ - Target path (expression).
+ type: str
+ required: true
+ aliases: [ path ]
+ ftype:
+ description:
+ - The file type that should have SELinux contexts applied.
+ - "The following file type options are available:"
+ - C(a) for all files,
+ - C(b) for block devices,
+ - C(c) for character devices,
+ - C(d) for directories,
+ - C(f) for regular files,
+ - C(l) for symbolic links,
+ - C(p) for named pipes,
+ - C(s) for socket files.
+ type: str
+ choices: [ a, b, c, d, f, l, p, s ]
+ default: a
+ setype:
+ description:
+ - SELinux type for the specified I(target).
+ type: str
+ substitute:
+ description:
+ - Path to use to substitute file context(s) for the specified I(target). The context labeling for the I(target) subtree is made equivalent to this path.
+ - This is also referred to as SELinux file context equivalence and it implements the C(equal) functionality of the SELinux management tools.
+ version_added: 6.4.0
+ type: str
+ aliases: [ equal ]
+ seuser:
+ description:
+ - SELinux user for the specified I(target).
+ - Defaults to C(system_u) for new file contexts and to existing value when modifying file contexts.
+ type: str
+ selevel:
+ description:
+ - SELinux range for the specified I(target).
+ - Defaults to C(s0) for new file contexts and to existing value when modifying file contexts.
+ type: str
+ aliases: [ serange ]
+ state:
+ description:
+ - Whether the SELinux file context must be C(absent) or C(present).
+ - Specifying C(absent) without either I(setype) or I(substitute) deletes both SELinux type or path substitution mappings that match I(target).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ - Note that this does not apply SELinux file contexts to existing files.
+ type: bool
+ default: true
+ ignore_selinux_state:
+ description:
+ - Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
+ type: bool
+ default: false
+notes:
+- The changes are persistent across reboots.
+- I(setype) and I(substitute) are mutually exclusive.
+- If I(state=present) then one of I(setype) or I(substitute) is mandatory.
+- The M(community.general.sefcontext) module does not modify existing files to the new
+ SELinux context(s), so it is advisable to first create the SELinux
+ file contexts before creating files, or run C(restorecon) manually
+ for the existing files that require the new SELinux file contexts.
+- Not applying SELinux fcontexts to existing files is a deliberate
+ decision as it would be unclear what reported changes would entail
+ to, and there's no guarantee that applying SELinux fcontext does
+ not pick up other unrelated prior changes.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Allow apache to modify files in /srv/git_repos
+ community.general.sefcontext:
+ target: '/srv/git_repos(/.*)?'
+ setype: httpd_sys_rw_content_t
+ state: present
+
+- name: Substitute file contexts for path /srv/containers with /var/lib/containers
+ community.general.sefcontext:
+ target: /srv/containers
+ substitute: /var/lib/containers
+ state: present
+
+- name: Delete file context path substitution for /srv/containers
+ community.general.sefcontext:
+ target: /srv/containers
+ substitute: /var/lib/containers
+ state: absent
+
+- name: Delete any file context mappings for path /srv/git
+ community.general.sefcontext:
+ target: /srv/git
+ state: absent
+
+- name: Apply new SELinux file context to filesystem
+ ansible.builtin.command: restorecon -irv /srv/git_repos
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+# Add missing entries (backward compatible)
+if HAVE_SEOBJECT:
+ seobject.file_types.update(
+ a=seobject.SEMANAGE_FCONTEXT_ALL,
+ b=seobject.SEMANAGE_FCONTEXT_BLOCK,
+ c=seobject.SEMANAGE_FCONTEXT_CHAR,
+ d=seobject.SEMANAGE_FCONTEXT_DIR,
+ f=seobject.SEMANAGE_FCONTEXT_REG,
+ l=seobject.SEMANAGE_FCONTEXT_LINK,
+ p=seobject.SEMANAGE_FCONTEXT_PIPE,
+ s=seobject.SEMANAGE_FCONTEXT_SOCK,
+ )
+
+# Make backward compatible
+option_to_file_type_str = dict(
+ a='all files',
+ b='block device',
+ c='character device',
+ d='directory',
+ f='regular file',
+ l='symbolic link',
+ p='named pipe',
+ s='socket',
+)
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def semanage_fcontext_exists(sefcontext, target, ftype):
+ ''' Get the SELinux file context mapping definition from policy. Return None if it does not exist. '''
+
+ # Beware that records comprise of a string representation of the file_type
+ record = (target, option_to_file_type_str[ftype])
+ records = sefcontext.get_all()
+ try:
+ return records[record]
+ except KeyError:
+ return None
+
+
+def semanage_fcontext_substitute_exists(sefcontext, target):
+ ''' Get the SELinux file context path substitution definition from policy. Return None if it does not exist. '''
+
+ return sefcontext.equiv_dist.get(target, sefcontext.equiv.get(target))
+
+
+def semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser, sestore=''):
+ ''' Add or modify SELinux file context mapping definition to the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ if substitute is None:
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ if exists:
+ # Modify existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if seuser is None:
+ seuser = orig_seuser
+ if serange is None:
+ serange = orig_serange
+
+ if setype != orig_setype or seuser != orig_seuser or serange != orig_serange:
+ if not module.check_mode:
+ sefcontext.modify(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, orig_seuser, orig_serole, orig_setype, orig_serange)
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, orig_serole, setype, serange)
+ else:
+ # Add missing entry
+ if seuser is None:
+ seuser = 'system_u'
+ if serange is None:
+ serange = 's0'
+
+ if not module.check_mode:
+ sefcontext.add(target, setype, ftype, serange, seuser)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context mappings\n'
+ prepared_diff += '+%s %s %s:%s:%s:%s\n' % (target, ftype, seuser, 'object_r', setype, serange)
+ else:
+ exists = semanage_fcontext_substitute_exists(sefcontext, target)
+ if exists:
+ # Modify existing path substitution entry
+ orig_substitute = exists
+
+ if substitute != orig_substitute:
+ if not module.check_mode:
+ sefcontext.modify_equal(target, substitute)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Change to semanage file context path substitutions\n'
+ prepared_diff += '-%s = %s\n' % (target, orig_substitute)
+ prepared_diff += '+%s = %s\n' % (target, substitute)
+ else:
+ # Add missing path substitution entry
+ if not module.check_mode:
+ sefcontext.add_equal(target, substitute)
+ changed = True
+ if module._diff:
+ prepared_diff += '# Addition to semanage file context path substitutions\n'
+ prepared_diff += '+%s = %s\n' % (target, substitute)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, seuser=seuser, serange=serange, **result)
+
+
+def semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload, sestore=''):
+ ''' Delete SELinux file context mapping definition from the policy. '''
+
+ changed = False
+ prepared_diff = ''
+
+ try:
+ sefcontext = seobject.fcontextRecords(sestore)
+ sefcontext.set_reload(do_reload)
+ exists = semanage_fcontext_exists(sefcontext, target, ftype)
+ substitute_exists = semanage_fcontext_substitute_exists(sefcontext, target)
+ if exists and substitute is None:
+ # Remove existing entry
+ orig_seuser, orig_serole, orig_setype, orig_serange = exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, ftype)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context mappings\n'
+ prepared_diff += '-%s %s %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])
+ if substitute_exists and setype is None and ((substitute is not None and substitute_exists == substitute) or substitute is None):
+ # Remove existing path substitution entry
+ orig_substitute = substitute_exists
+
+ if not module.check_mode:
+ sefcontext.delete(target, orig_substitute)
+ changed = True
+
+ if module._diff:
+ prepared_diff += '# Deletion to semanage file context path substitutions\n'
+ prepared_diff += '-%s = %s\n' % (target, orig_substitute)
+
+ except Exception as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))
+
+ if module._diff and prepared_diff:
+ result['diff'] = dict(prepared=prepared_diff)
+
+ module.exit_json(changed=changed, **result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ target=dict(type='str', required=True, aliases=['path']),
+ ftype=dict(type='str', default='a', choices=list(option_to_file_type_str.keys())),
+ setype=dict(type='str'),
+ substitute=dict(type='str', aliases=['equal']),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange']),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ mutually_exclusive=[
+ ('setype', 'substitute'),
+ ('substitute', 'ftype'),
+ ('substitute', 'seuser'),
+ ('substitute', 'selevel'),
+ ],
+ required_if=[
+ ('state', 'present', ('setype', 'substitute'), True),
+ ],
+
+ supports_check_mode=True,
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ target = module.params['target']
+ ftype = module.params['ftype']
+ setype = module.params['setype']
+ substitute = module.params['substitute']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = dict(target=target, ftype=ftype, setype=setype, substitute=substitute, state=state)
+
+ if state == 'present':
+ semanage_fcontext_modify(module, result, target, ftype, setype, substitute, do_reload, serange, seuser)
+ elif state == 'absent':
+ semanage_fcontext_delete(module, result, target, ftype, setype, substitute, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/selinux_permissive.py b/ansible_collections/community/general/plugins/modules/selinux_permissive.py
new file mode 100644
index 000000000..7249a01b8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/selinux_permissive.py
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Michael Scherer <misc@zarb.org>
+# inspired by code of github.com/dandiker/
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: selinux_permissive
+short_description: Change permissive domain in SELinux policy
+description:
+ - Add and remove a domain from the list of permissive domains.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ domain:
+ description:
+ - The domain that will be added or removed from the list of permissive domains.
+ type: str
+ required: true
+ aliases: [ name ]
+ permissive:
+ description:
+ - Indicate if the domain should or should not be set as permissive.
+ type: bool
+ required: true
+ no_reload:
+ description:
+ - Disable reloading of the SELinux policy after making change to a domain's permissive setting.
+ - The default is C(false), which causes policy to be reloaded when a domain changes state.
+ - Reloading the policy does not work on older versions of the C(policycoreutils-python) library, for example in EL 6."
+ type: bool
+ default: false
+ store:
+ description:
+ - Name of the SELinux policy store to use.
+ type: str
+ default: ''
+notes:
+ - Requires a recent version of SELinux and C(policycoreutils-python) (EL 6 or newer).
+requirements: [ policycoreutils-python ]
+author:
+- Michael Scherer (@mscherer) <misc@zarb.org>
+'''
+
+EXAMPLES = r'''
+- name: Change the httpd_t domain to permissive
+ community.general.selinux_permissive:
+ name: httpd_t
+ permissive: true
+'''
+
+import traceback
+
+HAVE_SEOBJECT = False
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str', required=True, aliases=['name']),
+ store=dict(type='str', default=''),
+ permissive=dict(type='bool', required=True),
+ no_reload=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ )
+
+ # global vars
+ changed = False
+ store = module.params['store']
+ permissive = module.params['permissive']
+ domain = module.params['domain']
+ no_reload = module.params['no_reload']
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(changed=False, msg=missing_required_lib("policycoreutils-python"),
+ exception=SEOBJECT_IMP_ERR)
+
+ try:
+ permissive_domains = seobject.permissiveRecords(store)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ # not supported on EL 6
+ if 'set_reload' in dir(permissive_domains):
+ permissive_domains.set_reload(not no_reload)
+
+ try:
+ all_domains = permissive_domains.get_all()
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+
+ if permissive:
+ if domain not in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.add(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+ else:
+ if domain in all_domains:
+ if not module.check_mode:
+ try:
+ permissive_domains.delete(domain)
+ except ValueError as e:
+ module.fail_json(domain=domain, msg=to_native(e), exception=traceback.format_exc())
+ changed = True
+
+ module.exit_json(changed=changed, store=store,
+ permissive=permissive, domain=domain)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/selogin.py b/ansible_collections/community/general/plugins/modules/selogin.py
new file mode 100644
index 000000000..57482b090
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/selogin.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Petr Lautrbach <plautrba@redhat.com>
+# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: selogin
+short_description: Manages linux user to SELinux user mapping
+description:
+ - Manages linux user to SELinux user mapping
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ login:
+ type: str
+ description:
+ - a Linux user
+ required: true
+ seuser:
+ type: str
+ description:
+ - SELinux user name
+ selevel:
+ type: str
+ aliases: [ serange ]
+ description:
+ - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
+ default: s0
+ state:
+ type: str
+ description:
+ - Desired mapping value.
+ default: present
+ choices: [ 'present', 'absent' ]
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: true
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+notes:
+ - The changes are persistent across reboots
+ - Not tested on any debian based system
+requirements: [ 'libselinux', 'policycoreutils' ]
+author:
+- Dan Keder (@dankeder)
+- Petr Lautrbach (@bachradsusi)
+- James Cassell (@jamescassell)
+'''
+
+EXAMPLES = '''
+- name: Modify the default user on the system to the guest_u user
+ community.general.selogin:
+ login: __default__
+ seuser: guest_u
+ state: present
+
+- name: Assign gijoe user on an MLS machine a range and to the staff_u user
+ community.general.selogin:
+ login: gijoe
+ seuser: staff_u
+ serange: SystemLow-Secret
+ state: present
+
+- name: Assign all users in the engineering group to the staff_u user
+ community.general.selogin:
+ login: '%engineering'
+ seuser: staff_u
+ state: present
+'''
+
+RETURN = r'''
+# Default return values
+'''
+
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
+ """ Add linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ # for local_login in all_logins:
+ if login not in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.add(login, seuser, serange)
+ else:
+ if all_logins[login][0] != seuser or all_logins[login][1] != serange:
+ change = True
+ if not module.check_mode:
+ selogin.modify(login, seuser, serange)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_login_del(module, login, seuser, do_reload, sestore=''):
+ """ Delete linux user to SELinux user mapping
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type login: str
+ :param login: a Linux User or a Linux group if it begins with %
+
+ :type seuser: str
+ :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ try:
+ selogin = seobject.loginRecords(sestore)
+ selogin.set_reload(do_reload)
+ change = False
+ all_logins = selogin.get_all()
+ # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
+ if login in all_logins.keys():
+ change = True
+ if not module.check_mode:
+ selogin.delete(login)
+
+ except (ValueError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ login=dict(type='str', required=True),
+ seuser=dict(type='str'),
+ selevel=dict(type='str', aliases=['serange'], default='s0'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ ),
+ required_if=[
+ ["state", "present", ["seuser"]]
+ ],
+ supports_check_mode=True
+ )
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ login = module.params['login']
+ seuser = module.params['seuser']
+ serange = module.params['selevel']
+ state = module.params['state']
+ do_reload = module.params['reload']
+
+ result = {
+ 'login': login,
+ 'seuser': seuser,
+ 'serange': serange,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
+ elif state == 'absent':
+ result['changed'] = semanage_login_del(module, login, seuser, do_reload)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sendgrid.py b/ansible_collections/community/general/plugins/modules/sendgrid.py
new file mode 100644
index 000000000..2c0cc9a5b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sendgrid.py
@@ -0,0 +1,280 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: sendgrid
+short_description: Sends an email with the SendGrid API
+description:
+ - "Sends an email with a SendGrid account through their API, not through
+ the SMTP service."
+notes:
+ - "This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails."
+ - "Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need an active SendGrid
+ account."
+ - "In order to use api_key, cc, bcc, attachments, from_name, html_body, headers
+ you must pip install sendgrid"
+ - "since 2.2 I(username) and I(password) are not required if you supply an I(api_key)"
+requirements:
+ - sendgrid Python library 1.6.22 or lower (Sendgrid API V2 supported)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ username:
+ type: str
+ description:
+ - Username for logging into the SendGrid account.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ password:
+ type: str
+ description:
+ - Password that corresponds to the username.
+ - Since 2.2 it is only required if I(api_key) is not supplied.
+ from_address:
+ type: str
+ description:
+ - The address in the "from" field for the email.
+ required: true
+ to_addresses:
+ type: list
+ elements: str
+ description:
+ - A list with one or more recipient email addresses.
+ required: true
+ subject:
+ type: str
+ description:
+ - The desired subject for the email.
+ required: true
+ api_key:
+ type: str
+ description:
+ - Sendgrid API key to use instead of username/password.
+ cc:
+ type: list
+ elements: str
+ description:
+ - A list of email addresses to cc.
+ bcc:
+ type: list
+ elements: str
+ description:
+ - A list of email addresses to bcc.
+ attachments:
+ type: list
+ elements: path
+ description:
+ - A list of relative or explicit paths of files you want to attach (7MB limit as per SendGrid docs).
+ from_name:
+ type: str
+ description:
+ - The name you want to appear in the from field, i.e 'John Doe'.
+ html_body:
+ description:
+ - Whether the body is html content that should be rendered.
+ type: bool
+ default: false
+ headers:
+ type: dict
+ description:
+ - A dict to pass on as headers.
+ body:
+ type: str
+ description:
+ - The e-mail body content.
+ required: true
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = r'''
+- name: Send an email to a single recipient that the deployment was successful
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "ansible@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ subject: "Deployment success."
+ body: "The most recent Ansible deployment was successful."
+ delegate_to: localhost
+
+- name: Send an email to more than one recipient that the build failed
+ community.general.sendgrid:
+ username: "{{ sendgrid_username }}"
+ password: "{{ sendgrid_password }}"
+ from_address: "build@mycompany.com"
+ to_addresses:
+ - "ops@mycompany.com"
+ - "devteam@mycompany.com"
+ subject: "Build failure!."
+ body: "Unable to pull source repository from Git server."
+ delegate_to: localhost
+'''
+
+# =======================================
+# sendgrid module support methods
+#
+import os
+import traceback
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+SENDGRID_IMP_ERR = None
+try:
+ import sendgrid
+ HAS_SENDGRID = True
+except ImportError:
+ SENDGRID_IMP_ERR = traceback.format_exc()
+ HAS_SENDGRID = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.urls import fetch_url
+
+
+def post_sendgrid_api(module, username, password, from_address, to_addresses,
+ subject, body, api_key=None, cc=None, bcc=None, attachments=None,
+ html_body=False, from_name=None, headers=None):
+
+ if not HAS_SENDGRID:
+ SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
+ AGENT = "Ansible"
+ data = {'api_user': username, 'api_key': password,
+ 'from': from_address, 'subject': subject, 'text': body}
+ encoded_data = urlencode(data)
+ to_addresses_api = ''
+ for recipient in to_addresses:
+ recipient = to_bytes(recipient, errors='surrogate_or_strict')
+ to_addresses_api += '&to[]=%s' % recipient
+ encoded_data += to_addresses_api
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json'}
+ return fetch_url(module, SENDGRID_URI, data=encoded_data, headers=headers, method='POST')
+ else:
+ # Remove this check when adding Sendgrid API v3 support
+ if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"):
+ module.fail_json(msg="Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs.")
+
+ if api_key:
+ sg = sendgrid.SendGridClient(api_key)
+ else:
+ sg = sendgrid.SendGridClient(username, password)
+
+ message = sendgrid.Mail()
+ message.set_subject(subject)
+
+ for recip in to_addresses:
+ message.add_to(recip)
+
+ if cc:
+ for recip in cc:
+ message.add_cc(recip)
+ if bcc:
+ for recip in bcc:
+ message.add_bcc(recip)
+
+ if headers:
+ message.set_headers(headers)
+
+ if attachments:
+ for f in attachments:
+ name = os.path.basename(f)
+ message.add_attachment(name, f)
+
+ if from_name:
+ message.set_from('%s <%s.' % (from_name, from_address))
+ else:
+ message.set_from(from_address)
+
+ if html_body:
+ message.set_html(body)
+ else:
+ message.set_text(body)
+
+ return sg.send(message)
+# =======================================
+# Main
+#
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ username=dict(required=False),
+ password=dict(required=False, no_log=True),
+ api_key=dict(required=False, no_log=True),
+ bcc=dict(required=False, type='list', elements='str'),
+ cc=dict(required=False, type='list', elements='str'),
+ headers=dict(required=False, type='dict'),
+ from_address=dict(required=True),
+ from_name=dict(required=False),
+ to_addresses=dict(required=True, type='list', elements='str'),
+ subject=dict(required=True),
+ body=dict(required=True),
+ html_body=dict(required=False, default=False, type='bool'),
+ attachments=dict(required=False, type='list', elements='path')
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['api_key', 'password'],
+ ['api_key', 'username']
+ ],
+ required_together=[['username', 'password']],
+ )
+
+ username = module.params['username']
+ password = module.params['password']
+ api_key = module.params['api_key']
+ bcc = module.params['bcc']
+ cc = module.params['cc']
+ headers = module.params['headers']
+ from_name = module.params['from_name']
+ from_address = module.params['from_address']
+ to_addresses = module.params['to_addresses']
+ subject = module.params['subject']
+ body = module.params['body']
+ html_body = module.params['html_body']
+ attachments = module.params['attachments']
+
+ sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
+
+ if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
+ reason = 'when using any of the following arguments: ' \
+ 'api_key, bcc, cc, headers, from_name, html_body, attachments'
+ module.fail_json(msg=missing_required_lib('sendgrid', reason=reason),
+ exception=SENDGRID_IMP_ERR)
+
+ response, info = post_sendgrid_api(module, username, password,
+ from_address, to_addresses, subject, body, attachments=attachments,
+ bcc=bcc, cc=cc, headers=headers, html_body=html_body, api_key=api_key)
+
+ if not HAS_SENDGRID:
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['msg'])
+ else:
+ if response != 200:
+ module.fail_json(msg="unable to send email through SendGrid API: %s" % info['message'])
+
+ module.exit_json(msg=subject, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sensu_check.py b/ansible_collections/community/general/plugins/modules/sensu_check.py
new file mode 100644
index 000000000..1ac2316a8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sensu_check.py
@@ -0,0 +1,376 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_check
+short_description: Manage Sensu checks
+description:
+ - Manage the checks that should be run on a machine by I(Sensu).
+ - Most options do not have a default and will not be added to the check definition unless specified.
+ - All defaults except I(path), I(state), I(backup) and I(metric) are not managed by this module,
+ - they are simply specified for your convenience.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The name of the check
+ - This is the key that is used to determine whether a check exists
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the check should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the json file of the check to be added/removed.
+ - Will be created if it does not exist (unless I(state=absent)).
+ - The parent folders need to exist when I(state=present), otherwise an error will be thrown
+ default: /etc/sensu/conf.d/checks.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so
+ - you can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: false
+ command:
+ type: str
+ description:
+ - Path to the sensu check to run (not required when I(state=absent))
+ handlers:
+ type: list
+ elements: str
+ description:
+ - List of handlers to notify when the check fails
+ subscribers:
+ type: list
+ elements: str
+ description:
+ - List of subscribers/channels this check should run for
+ - See sensu_subscribers to subscribe a machine to a channel
+ interval:
+ type: int
+ description:
+ - Check interval in seconds
+ timeout:
+ type: int
+ description:
+ - Timeout for the check
+ - If not specified, it defaults to 10.
+ ttl:
+ type: int
+ description:
+ - Time to live in seconds until the check is considered stale
+ handle:
+ description:
+ - Whether the check should be handled or not
+ - Default is C(false).
+ type: bool
+ subdue_begin:
+ type: str
+ description:
+ - When to disable handling of check failures
+ subdue_end:
+ type: str
+ description:
+ - When to enable handling of check failures
+ dependencies:
+ type: list
+ elements: str
+ description:
+ - Other checks this check depends on, if dependencies fail handling of this check will be disabled
+ metric:
+ description:
+ - Whether the check is a metric
+ type: bool
+ default: false
+ standalone:
+ description:
+ - Whether the check should be scheduled by the sensu client or server
+ - This option obviates the need for specifying the I(subscribers) option
+ - Default is C(false).
+ type: bool
+ publish:
+ description:
+ - Whether the check should be scheduled at all.
+ - You can still issue it via the sensu api
+ - Default is C(false).
+ type: bool
+ occurrences:
+ type: int
+ description:
+ - Number of event occurrences before the handler should take action
+ - If not specified, defaults to 1.
+ refresh:
+ type: int
+ description:
+ - Number of seconds handlers should wait before taking second action
+ aggregate:
+ description:
+ - Classifies the check as an aggregate check,
+ - making it available via the aggregate API
+ - Default is C(false).
+ type: bool
+ low_flap_threshold:
+ type: int
+ description:
+ - The low threshold for flap detection
+ high_flap_threshold:
+ type: int
+ description:
+ - The high threshold for flap detection
+ custom:
+ type: dict
+ description:
+ - A hash/dictionary of custom parameters for mixing to the configuration.
+ - You can't rewrite others module parameters using this
+ source:
+ type: str
+ description:
+ - The check source, used to create a JIT Sensu client for an external resource (e.g. a network switch).
+author: "Anders Ingemann (@andsens)"
+'''
+
+EXAMPLES = '''
+# Fetch metrics about the CPU load every 60 seconds,
+# the sensu server has a handler called 'relay' which forwards stats to graphite
+- name: Get cpu metrics
+ community.general.sensu_check:
+ name: cpu_load
+ command: /etc/sensu/plugins/system/cpu-mpstat-metrics.rb
+ metric: true
+ handlers: relay
+ subscribers: common
+ interval: 60
+
+# Check whether nginx is running
+- name: Check nginx process
+ community.general.sensu_check:
+ name: nginx_running
+ command: /etc/sensu/plugins/processes/check-procs.rb -f /var/run/nginx.pid
+ handlers: default
+ subscribers: nginx
+ interval: 60
+
+# Stop monitoring the disk capacity.
+# Note that the check will still show up in the sensu dashboard,
+# to remove it completely you need to issue a DELETE request to the sensu api.
+- name: Check disk
+ community.general.sensu_check:
+ name: check_disk_capacity
+ state: absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def sensu_check(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ stream = None
+ try:
+ try:
+ stream = open(path, 'r')
+ config = json.load(stream)
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+ finally:
+ if stream:
+ stream.close()
+
+ if 'checks' not in config:
+ if state == 'absent':
+ reasons.append('`checks\' section did not exist and state is `absent\'')
+ return changed, reasons
+ config['checks'] = {}
+ changed = True
+ reasons.append('`checks\' section did not exist')
+
+ if state == 'absent':
+ if name in config['checks']:
+ del config['checks'][name]
+ changed = True
+ reasons.append('check was present and state is `absent\'')
+
+ if state == 'present':
+ if name not in config['checks']:
+ check = {}
+ config['checks'][name] = check
+ changed = True
+ reasons.append('check was absent and state is `present\'')
+ else:
+ check = config['checks'][name]
+ simple_opts = ['command',
+ 'handlers',
+ 'subscribers',
+ 'interval',
+ 'timeout',
+ 'ttl',
+ 'handle',
+ 'dependencies',
+ 'standalone',
+ 'publish',
+ 'occurrences',
+ 'refresh',
+ 'aggregate',
+ 'low_flap_threshold',
+ 'high_flap_threshold',
+ 'source',
+ ]
+ for opt in simple_opts:
+ if module.params[opt] is not None:
+ if opt not in check or check[opt] != module.params[opt]:
+ check[opt] = module.params[opt]
+ changed = True
+ reasons.append('`{opt}\' did not exist or was different'.format(opt=opt))
+ else:
+ if opt in check:
+ del check[opt]
+ changed = True
+ reasons.append('`{opt}\' was removed'.format(opt=opt))
+
+ if module.params['custom']:
+ # Convert to json
+ custom_params = module.params['custom']
+ overwrited_fields = set(custom_params.keys()) & set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end'])
+ if overwrited_fields:
+ msg = 'You can\'t overwriting standard module parameters via "custom". You are trying overwrite: {opt}'.format(opt=list(overwrited_fields))
+ module.fail_json(msg=msg)
+
+ for k, v in custom_params.items():
+ if k in config['checks'][name]:
+ if not config['checks'][name][k] == v:
+ changed = True
+ reasons.append('`custom param {opt}\' was changed'.format(opt=k))
+ else:
+ changed = True
+ reasons.append('`custom param {opt}\' was added'.format(opt=k))
+ check[k] = v
+ simple_opts += custom_params.keys()
+
+ # Remove obsolete custom params
+ for opt in set(config['checks'][name].keys()) - set(simple_opts + ['type', 'subdue', 'subdue_begin', 'subdue_end']):
+ changed = True
+ reasons.append('`custom param {opt}\' was deleted'.format(opt=opt))
+ del check[opt]
+
+ if module.params['metric']:
+ if 'type' not in check or check['type'] != 'metric':
+ check['type'] = 'metric'
+ changed = True
+ reasons.append('`type\' was not defined or not `metric\'')
+ if not module.params['metric'] and 'type' in check:
+ del check['type']
+ changed = True
+ reasons.append('`type\' was defined')
+
+ if module.params['subdue_begin'] is not None and module.params['subdue_end'] is not None:
+ subdue = {'begin': module.params['subdue_begin'],
+ 'end': module.params['subdue_end'],
+ }
+ if 'subdue' not in check or check['subdue'] != subdue:
+ check['subdue'] = subdue
+ changed = True
+ reasons.append('`subdue\' did not exist or was different')
+ else:
+ if 'subdue' in check:
+ del check['subdue']
+ changed = True
+ reasons.append('`subdue\' was removed')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ try:
+ stream = open(path, 'w')
+ stream.write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ finally:
+ if stream:
+ stream.close()
+
+ return changed, reasons
+
+
+def main():
+
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/checks.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': False},
+ 'command': {'type': 'str'},
+ 'handlers': {'type': 'list', 'elements': 'str'},
+ 'subscribers': {'type': 'list', 'elements': 'str'},
+ 'interval': {'type': 'int'},
+ 'timeout': {'type': 'int'},
+ 'ttl': {'type': 'int'},
+ 'handle': {'type': 'bool'},
+ 'subdue_begin': {'type': 'str'},
+ 'subdue_end': {'type': 'str'},
+ 'dependencies': {'type': 'list', 'elements': 'str'},
+ 'metric': {'type': 'bool', 'default': False},
+ 'standalone': {'type': 'bool'},
+ 'publish': {'type': 'bool'},
+ 'occurrences': {'type': 'int'},
+ 'refresh': {'type': 'int'},
+ 'aggregate': {'type': 'bool'},
+ 'low_flap_threshold': {'type': 'int'},
+ 'high_flap_threshold': {'type': 'int'},
+ 'custom': {'type': 'dict'},
+ 'source': {'type': 'str'},
+ }
+
+ required_together = [['subdue_begin', 'subdue_end']]
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ required_together=required_together,
+ supports_check_mode=True)
+ if module.params['state'] != 'absent' and module.params['command'] is None:
+ module.fail_json(msg="missing required arguments: %s" % ",".join(['command']))
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_check(module, path, name, state, backup)
+
+ module.exit_json(path=path, changed=changed, msg='OK', name=name, reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sensu_client.py b/ansible_collections/community/general/plugins/modules/sensu_client.py
new file mode 100644
index 000000000..2e0bd12ee
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sensu_client.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_client
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu client configuration
+description:
+ - Manages Sensu client configuration.
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/clients.html)'
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Whether the client should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the client. The name cannot contain special characters or spaces.
+ - If not specified, it defaults to the system hostname as determined by Ruby Socket.gethostname (provided by Sensu).
+ address:
+ type: str
+ description:
+ - An address to help identify and reach the client. This is only informational, usually an IP address or hostname.
+ - If not specified it defaults to non-loopback IPv4 address as determined by Ruby Socket.ip_address_list (provided by Sensu).
+ subscriptions:
+ type: list
+ elements: str
+ description:
+ - An array of client subscriptions, a list of roles and/or responsibilities assigned to the system (e.g. webserver).
+ - These subscriptions determine which monitoring checks are executed by the client, as check requests are sent to subscriptions.
+ - The subscriptions array items must be strings.
+ safe_mode:
+ description:
+ - If safe mode is enabled for the client. Safe mode requires local check definitions in order to accept a check request and execute the check.
+ type: bool
+ default: false
+ redact:
+ type: list
+ elements: str
+ description:
+ - Client definition attributes to redact (values) when logging and sending client keepalives.
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the Sensu client socket.
+ keepalives:
+ description:
+ - If Sensu should monitor keepalives for this client.
+ type: bool
+ default: true
+ keepalive:
+ type: dict
+ description:
+ - The keepalive definition scope, used to configure Sensu client keepalives behavior (e.g. keepalive thresholds, etc).
+ registration:
+ type: dict
+ description:
+ - The registration definition scope, used to configure Sensu registration event handlers.
+ deregister:
+ description:
+ - If a deregistration event should be created upon Sensu client process stop.
+ - Default is C(false).
+ type: bool
+ deregistration:
+ type: dict
+ description:
+ - The deregistration definition scope, used to configure automated Sensu client de-registration.
+ ec2:
+ type: dict
+ description:
+ - The ec2 definition scope, used to configure the Sensu Enterprise AWS EC2 integration (Sensu Enterprise users only).
+ chef:
+ type: dict
+ description:
+ - The chef definition scope, used to configure the Sensu Enterprise Chef integration (Sensu Enterprise users only).
+ puppet:
+ type: dict
+ description:
+ - The puppet definition scope, used to configure the Sensu Enterprise Puppet integration (Sensu Enterprise users only).
+ servicenow:
+ type: dict
+ description:
+ - The servicenow definition scope, used to configure the Sensu Enterprise ServiceNow integration (Sensu Enterprise users only).
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Minimum possible configuration
+- name: Configure Sensu client
+ community.general.sensu_client:
+ subscriptions:
+ - default
+
+# With customization
+- name: Configure Sensu client
+ community.general.sensu_client:
+ name: "{{ ansible_fqdn }}"
+ address: "{{ ansible_default_ipv4['address'] }}"
+ subscriptions:
+ - default
+ - webserver
+ redact:
+ - password
+ socket:
+ bind: 127.0.0.1
+ port: 3030
+ keepalive:
+ thresholds:
+ warning: 180
+ critical: 300
+ handlers:
+ - email
+ custom:
+ - broadcast: irc
+ occurrences: 3
+ register: client
+ notify:
+ - Restart sensu-client
+
+- name: Secure Sensu client configuration file
+ ansible.builtin.file:
+ path: "{{ client['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+
+- name: Delete the Sensu client configuration
+ community.general.sensu_client:
+ state: "absent"
+'''
+
+RETURN = '''
+config:
+ description: Effective client configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'client', 'subscriptions': ['default']}
+file:
+ description: Path to the client configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/client.json"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(type='str', ),
+ address=dict(type='str', ),
+ subscriptions=dict(type='list', elements="str"),
+ safe_mode=dict(type='bool', default=False),
+ redact=dict(type='list', elements="str"),
+ socket=dict(type='dict'),
+ keepalives=dict(type='bool', default=True),
+ keepalive=dict(type='dict'),
+ registration=dict(type='dict'),
+ deregister=dict(type='bool'),
+ deregistration=dict(type='dict'),
+ ec2=dict(type='dict'),
+ chef=dict(type='dict'),
+ puppet=dict(type='dict'),
+ servicenow=dict(type='dict')
+ ),
+ required_if=[
+ ['state', 'present', ['subscriptions']]
+ ]
+ )
+
+ state = module.params['state']
+ path = "/etc/sensu/conf.d/client.json"
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build client configuration from module arguments
+ config = {'client': {}}
+ args = ['name', 'address', 'subscriptions', 'safe_mode', 'redact',
+ 'socket', 'keepalives', 'keepalive', 'registration', 'deregister',
+ 'deregistration', 'ec2', 'chef', 'puppet', 'servicenow']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['client'][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Client configuration is already up to date',
+ config=config['client'],
+ file=path)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Client configuration would have been updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+
+ try:
+ with open(path, 'w') as client:
+ client.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Client configuration updated',
+ changed=True,
+ config=config['client'],
+ file=path)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sensu_handler.py b/ansible_collections/community/general/plugins/modules/sensu_handler.py
new file mode 100644
index 000000000..bbb8dc612
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sensu_handler.py
@@ -0,0 +1,281 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sensu_handler
+author: "David Moreau Simard (@dmsimard)"
+short_description: Manages Sensu handler configuration
+description:
+ - Manages Sensu handler configuration
+ - 'For more information, refer to the Sensu documentation: U(https://sensuapp.org/docs/latest/reference/handlers.html)'
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Whether the handler should be present or not
+ choices: [ 'present', 'absent' ]
+ default: present
+ name:
+ type: str
+ description:
+ - A unique name for the handler. The name cannot contain special characters or spaces.
+ required: true
+ type:
+ type: str
+ description:
+ - The handler type
+ choices: [ 'pipe', 'tcp', 'udp', 'transport', 'set' ]
+ filter:
+ type: str
+ description:
+ - The Sensu event filter (name) to use when filtering events for the handler.
+ filters:
+ type: list
+ elements: str
+ description:
+ - An array of Sensu event filters (names) to use when filtering events for the handler.
+ - Each array item must be a string.
+ severities:
+ type: list
+ elements: str
+ description:
+ - An array of check result severities the handler will handle.
+ - 'NOTE: event resolution bypasses this filtering.'
+ - "Example: [ 'warning', 'critical', 'unknown' ]."
+ mutator:
+ type: str
+ description:
+ - The Sensu event mutator (name) to use to mutate event data for the handler.
+ timeout:
+ type: int
+ description:
+ - The handler execution duration timeout in seconds (hard stop).
+ - Only used by pipe and tcp handler types.
+ default: 10
+ handle_silenced:
+ description:
+ - If events matching one or more silence entries should be handled.
+ type: bool
+ default: false
+ handle_flapping:
+ description:
+ - If events in the flapping state should be handled.
+ type: bool
+ default: false
+ command:
+ type: str
+ description:
+ - The handler command to be executed.
+ - The event data is passed to the process via STDIN.
+ - 'NOTE: the command attribute is only required for Pipe handlers (i.e. handlers configured with "type": "pipe").'
+ socket:
+ type: dict
+ description:
+ - The socket definition scope, used to configure the TCP/UDP handler socket.
+ - 'NOTE: the socket attribute is only required for TCP/UDP handlers (i.e. handlers configured with "type": "tcp" or "type": "udp").'
+ pipe:
+ type: dict
+ description:
+ - The pipe definition scope, used to configure the Sensu transport pipe.
+ - 'NOTE: the pipe attribute is only required for Transport handlers (i.e. handlers configured with "type": "transport").'
+ handlers:
+ type: list
+ elements: str
+ description:
+ - An array of Sensu event handlers (names) to use for events using the handler set.
+ - 'NOTE: the handlers attribute is only required for handler sets (i.e. handlers configured with "type": "set").'
+notes:
+ - Check mode is supported
+'''
+
+EXAMPLES = '''
+# Configure a handler that sends event data as STDIN (pipe)
+- name: Configure IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ type: "pipe"
+ command: "/usr/local/bin/notify-irc.sh"
+ severities:
+ - "ok"
+ - "critical"
+ - "warning"
+ - "unknown"
+ timeout: 15
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+# Delete a handler
+- name: Delete IRC Sensu handler
+ community.general.sensu_handler:
+ name: "irc_handler"
+ state: "absent"
+
+# Example of a TCP handler
+- name: Configure TCP Sensu handler
+ community.general.sensu_handler:
+ name: "tcp_handler"
+ type: "tcp"
+ timeout: 30
+ socket:
+ host: "10.0.1.99"
+ port: 4444
+ register: handler
+ notify:
+ - Restart sensu-client
+ - Restart sensu-server
+
+- name: Secure Sensu handler configuration file
+ ansible.builtin.file:
+ path: "{{ handler['file'] }}"
+ owner: "sensu"
+ group: "sensu"
+ mode: "0600"
+'''
+
+RETURN = '''
+config:
+ description: Effective handler configuration, when state is present
+ returned: success
+ type: dict
+ sample: {'name': 'irc', 'type': 'pipe', 'command': '/usr/local/bin/notify-irc.sh'}
+file:
+ description: Path to the handler configuration file
+ returned: success
+ type: str
+ sample: "/etc/sensu/conf.d/handlers/irc.json"
+name:
+ description: Name of the handler
+ returned: success
+ type: str
+ sample: "irc"
+'''
+
+import json
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=dict(
+ state=dict(type='str', choices=['present', 'absent'], default='present'),
+ name=dict(type='str', required=True),
+ type=dict(type='str', choices=['pipe', 'tcp', 'udp', 'transport', 'set']),
+ filter=dict(type='str'),
+ filters=dict(type='list', elements='str'),
+ severities=dict(type='list', elements='str'),
+ mutator=dict(type='str'),
+ timeout=dict(type='int', default=10),
+ handle_silenced=dict(type='bool', default=False),
+ handle_flapping=dict(type='bool', default=False),
+ command=dict(type='str'),
+ socket=dict(type='dict'),
+ pipe=dict(type='dict'),
+ handlers=dict(type='list', elements='str'),
+ ),
+ required_if=[
+ ['state', 'present', ['type']],
+ ['type', 'pipe', ['command']],
+ ['type', 'tcp', ['socket']],
+ ['type', 'udp', ['socket']],
+ ['type', 'transport', ['pipe']],
+ ['type', 'set', ['handlers']]
+ ]
+ )
+
+ state = module.params['state']
+ name = module.params['name']
+ path = '/etc/sensu/conf.d/handlers/{0}.json'.format(name)
+
+ if state == 'absent':
+ if os.path.exists(path):
+ if module.check_mode:
+ msg = '{path} would have been deleted'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ else:
+ try:
+ os.remove(path)
+ msg = '{path} deleted successfully'.format(path=path)
+ module.exit_json(msg=msg, changed=True)
+ except OSError as e:
+ msg = 'Exception when trying to delete {path}: {exception}'
+ module.fail_json(
+ msg=msg.format(path=path, exception=str(e)))
+ else:
+ # Idempotency: it's okay if the file doesn't exist
+ msg = '{path} already does not exist'.format(path=path)
+ module.exit_json(msg=msg)
+
+ # Build handler configuration from module arguments
+ config = {'handlers': {name: {}}}
+ args = ['type', 'filter', 'filters', 'severities', 'mutator', 'timeout',
+ 'handle_silenced', 'handle_flapping', 'command', 'socket',
+ 'pipe', 'handlers']
+
+ for arg in args:
+ if arg in module.params and module.params[arg] is not None:
+ config['handlers'][name][arg] = module.params[arg]
+
+ # Load the current config, if there is one, so we can compare
+ current_config = None
+ try:
+ current_config = json.load(open(path, 'r'))
+ except (IOError, ValueError):
+ # File either doesn't exist or it's invalid JSON
+ pass
+
+ if current_config is not None and current_config == config:
+ # Config is the same, let's not change anything
+ module.exit_json(msg='Handler configuration is already up to date',
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ # Validate that directory exists before trying to write to it
+ if not module.check_mode and not os.path.exists(os.path.dirname(path)):
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError as e:
+ module.fail_json(msg='Unable to create {0}: {1}'.format(os.path.dirname(path),
+ str(e)))
+
+ if module.check_mode:
+ module.exit_json(msg='Handler configuration would have been updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+
+ try:
+ with open(path, 'w') as handler:
+ handler.write(json.dumps(config, indent=4))
+ module.exit_json(msg='Handler configuration updated',
+ changed=True,
+ config=config['handlers'][name],
+ file=path,
+ name=name)
+ except (OSError, IOError) as e:
+ module.fail_json(msg='Unable to write file {0}: {1}'.format(path,
+ str(e)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sensu_silence.py b/ansible_collections/community/general/plugins/modules/sensu_silence.py
new file mode 100644
index 000000000..14c664755
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sensu_silence.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Steven Bambling <smbambling@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_silence
+author: Steven Bambling (@smbambling)
+short_description: Manage Sensu silence entries
+description:
+ - Create and clear (delete) a silence entries via the Sensu API
+ for subscriptions and checks.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ check:
+ type: str
+ description:
+ - Specifies the check which the silence entry applies to.
+ creator:
+ type: str
+ description:
+ - Specifies the entity responsible for this entry.
+ expire:
+ type: int
+ description:
+ - If specified, the silence entry will be automatically cleared
+ after this number of seconds.
+ expire_on_resolve:
+ description:
+ - If specified as true, the silence entry will be automatically
+ cleared once the condition it is silencing is resolved.
+ type: bool
+ reason:
+ type: str
+ description:
+ - If specified, this free-form string is used to provide context or
+ rationale for the reason this silence entry was created.
+ state:
+ type: str
+ description:
+ - Specifies to create or clear (delete) a silence entry via the Sensu API
+ default: present
+ choices: ['present', 'absent']
+ subscription:
+ type: str
+ description:
+ - Specifies the subscription which the silence entry applies to.
+ - To create a silence entry for a client prepend C(client:) to client name.
+ Example - C(client:server1.example.dev)
+ required: true
+ url:
+ type: str
+ description:
+ - Specifies the URL of the Sensu monitoring host server.
+ required: false
+ default: http://127.0.01:4567
+'''
+
+EXAMPLES = '''
+# Silence ALL checks for a given client
+- name: Silence server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ creator: "{{ ansible_user_id }}"
+ reason: Performing maintenance
+
+# Silence specific check for a client
+- name: Silence CPU_Usage check for server1.example.dev
+ community.general.sensu_silence:
+ subscription: client:server1.example.dev
+ check: CPU_Usage
+ creator: "{{ ansible_user_id }}"
+ reason: Investigation alert issue
+
+# Silence multiple clients from a dict
+ silence:
+ server1.example.dev:
+ reason: 'Deployment in progress'
+ server2.example.dev:
+ reason: 'Deployment in progress'
+
+- name: Silence several clients from a dict
+ community.general.sensu_silence:
+ subscription: "client:{{ item.key }}"
+ reason: "{{ item.value.reason }}"
+ creator: "{{ ansible_user_id }}"
+ with_dict: "{{ silence }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def query(module, url, check, subscription):
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='GET',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] == 500:
+ module.fail_json(
+ msg="Failed to query silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, False
+
+
+def clear(module, url, check, subscription):
+ # Test if silence exists before clearing
+ (rc, out, changed) = query(module, url, check, subscription)
+
+ d = dict((i['subscription'], i['check']) for i in out)
+ subscription_exists = subscription in d
+ if check and subscription_exists:
+ exists = (check == d[subscription])
+ else:
+ exists = subscription_exists
+
+ # If check/subscription doesn't exist
+ # exit with changed state of False
+ if not exists:
+ return False, out, changed
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced/clear'
+
+ request_data = {
+ 'check': check,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 204:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" % (subscription, info)
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def create(
+ module, url, check, creator, expire,
+ expire_on_resolve, reason, subscription):
+ (rc, out, changed) = query(module, url, check, subscription)
+ for i in out:
+ if (i['subscription'] == subscription):
+ if (
+ (check is None or check == i['check']) and
+ (
+ creator == '' or
+ creator == i['creator']) and
+ (
+ reason == '' or
+ reason == i['reason']) and
+ (
+ expire is None or expire == i['expire']) and
+ (
+ expire_on_resolve is None or
+ expire_on_resolve == i['expire_on_resolve']
+ )
+ ):
+ return False, out, False
+
+ # module.check_mode is inherited from the AnsibleMOdule class
+ if not module.check_mode:
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ url = url + '/silenced'
+
+ request_data = {
+ 'check': check,
+ 'creator': creator,
+ 'expire': expire,
+ 'expire_on_resolve': expire_on_resolve,
+ 'reason': reason,
+ 'subscription': subscription,
+ }
+
+ # Remove keys with None value
+ for k, v in dict(request_data).items():
+ if v is None:
+ del request_data[k]
+
+ response, info = fetch_url(
+ module, url, method='POST',
+ headers=headers, data=json.dumps(request_data)
+ )
+
+ if info['status'] != 201:
+ module.fail_json(
+ msg="Failed to silence %s. Reason: %s" %
+ (subscription, info['msg'])
+ )
+
+ try:
+ json_out = json.loads(to_native(response.read()))
+ except Exception:
+ json_out = ""
+
+ return False, json_out, True
+ return False, out, True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ check=dict(required=False),
+ creator=dict(required=False),
+ expire=dict(type='int', required=False),
+ expire_on_resolve=dict(type='bool', required=False),
+ reason=dict(required=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ subscription=dict(required=True),
+ url=dict(required=False, default='http://127.0.01:4567'),
+ ),
+ supports_check_mode=True
+ )
+
+ url = module.params['url']
+ check = module.params['check']
+ creator = module.params['creator']
+ expire = module.params['expire']
+ expire_on_resolve = module.params['expire_on_resolve']
+ reason = module.params['reason']
+ subscription = module.params['subscription']
+ state = module.params['state']
+
+ if state == 'present':
+ (rc, out, changed) = create(
+ module, url, check, creator,
+ expire, expire_on_resolve, reason, subscription
+ )
+
+ if state == 'absent':
+ (rc, out, changed) = clear(module, url, check, subscription)
+
+ if rc != 0:
+ module.fail_json(msg="failed", result=out)
+ module.exit_json(msg="success", result=out, changed=changed)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sensu_subscription.py b/ansible_collections/community/general/plugins/modules/sensu_subscription.py
new file mode 100644
index 000000000..0077e2ffa
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sensu_subscription.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Anders Ingemann <aim@secoya.dk>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sensu_subscription
+short_description: Manage Sensu subscriptions
+description:
+ - Manage which I(sensu channels) a machine should subscribe to
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The name of the channel
+ required: true
+ state:
+ type: str
+ description:
+ - Whether the machine should subscribe or unsubscribe from the channel
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+ path:
+ type: str
+ description:
+ - Path to the subscriptions json file
+ required: false
+ default: /etc/sensu/conf.d/subscriptions.json
+ backup:
+ description:
+ - Create a backup file (if yes), including the timestamp information so you
+ - can get the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ required: false
+ default: false
+requirements: [ ]
+author: Anders Ingemann (@andsens)
+'''
+
+RETURN = '''
+reasons:
+ description: the reasons why the module changed or did not change something
+ returned: success
+ type: list
+ sample: ["channel subscription was absent and state is `present'"]
+'''
+
+EXAMPLES = '''
+# Subscribe to the nginx channel
+- name: Subscribe to nginx checks
+ community.general.sensu_subscription: name=nginx
+
+# Unsubscribe from the common checks channel
+- name: Unsubscribe from common checks
+ community.general.sensu_subscription: name=common state=absent
+'''
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def sensu_subscription(module, path, name, state='present', backup=False):
+ changed = False
+ reasons = []
+
+ try:
+ config = json.load(open(path))
+ except IOError as e:
+ if e.errno == 2: # File not found, non-fatal
+ if state == 'absent':
+ reasons.append('file did not exist and state is `absent\'')
+ return changed, reasons
+ config = {}
+ else:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except ValueError:
+ msg = '{path} contains invalid JSON'.format(path=path)
+ module.fail_json(msg=msg)
+
+ if 'client' not in config:
+ if state == 'absent':
+ reasons.append('`client\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client'] = {}
+ changed = True
+ reasons.append('`client\' did not exist')
+
+ if 'subscriptions' not in config['client']:
+ if state == 'absent':
+ reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
+ return changed, reasons
+ config['client']['subscriptions'] = []
+ changed = True
+ reasons.append('`client.subscriptions\' did not exist')
+
+ if name not in config['client']['subscriptions']:
+ if state == 'absent':
+ reasons.append('channel subscription was absent')
+ return changed, reasons
+ config['client']['subscriptions'].append(name)
+ changed = True
+ reasons.append('channel subscription was absent and state is `present\'')
+ else:
+ if state == 'absent':
+ config['client']['subscriptions'].remove(name)
+ changed = True
+ reasons.append('channel subscription was present and state is `absent\'')
+
+ if changed and not module.check_mode:
+ if backup:
+ module.backup_local(path)
+ try:
+ open(path, 'w').write(json.dumps(config, indent=2) + '\n')
+ except IOError as e:
+ module.fail_json(msg='Failed to write to file %s: %s' % (path, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, reasons
+
+
+def main():
+ arg_spec = {'name': {'type': 'str', 'required': True},
+ 'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
+ 'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
+ 'backup': {'type': 'bool', 'default': False},
+ }
+
+ module = AnsibleModule(argument_spec=arg_spec,
+ supports_check_mode=True)
+
+ path = module.params['path']
+ name = module.params['name']
+ state = module.params['state']
+ backup = module.params['backup']
+
+ changed, reasons = sensu_subscription(module, path, name, state, backup)
+
+ module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/seport.py b/ansible_collections/community/general/plugins/modules/seport.py
new file mode 100644
index 000000000..964e8f0ed
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/seport.py
@@ -0,0 +1,331 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Dan Keder <dan.keder@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: seport
+short_description: Manages SELinux network port type definitions
+description:
+ - Manages SELinux network port type definitions.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ ports:
+ description:
+ - Ports or port ranges.
+ - Can be a list (since 2.6) or comma separated string.
+ type: list
+ elements: str
+ required: true
+ proto:
+ description:
+ - Protocol for the specified port.
+ type: str
+ required: true
+ choices: [ tcp, udp ]
+ setype:
+ description:
+ - SELinux type for the specified port.
+ type: str
+ required: true
+ state:
+ description:
+ - Desired boolean value.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ reload:
+ description:
+ - Reload SELinux policy after commit.
+ type: bool
+ default: true
+ ignore_selinux_state:
+ description:
+ - Run independent of selinux runtime state
+ type: bool
+ default: false
+ local:
+ description:
+ - Work with local modifications only.
+ type: bool
+ default: false
+ version_added: 5.6.0
+notes:
+ - The changes are persistent across reboots.
+ - Not tested on any debian based system.
+requirements:
+- libselinux-python
+- policycoreutils-python
+author:
+- Dan Keder (@dankeder)
+'''
+
+EXAMPLES = r'''
+- name: Allow Apache to listen on tcp port 8888
+ community.general.seport:
+ ports: 8888
+ proto: tcp
+ setype: http_port_t
+ state: present
+
+- name: Allow sshd to listen on tcp port 8991
+ community.general.seport:
+ ports: 8991
+ proto: tcp
+ setype: ssh_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports: 10000-10100,10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+
+- name: Allow memcached to listen on tcp ports 10000-10100 and 10112
+ community.general.seport:
+ ports:
+ - 10000-10100
+ - 10112
+ proto: tcp
+ setype: memcache_port_t
+ state: present
+
+- name: Remove tcp port 22 local modification if exists
+ community.general.seport:
+ ports: 22
+ protocol: tcp
+ setype: ssh_port_t
+ state: absent
+ local: true
+'''
+
+import traceback
+
+SELINUX_IMP_ERR = None
+try:
+ import selinux
+ HAVE_SELINUX = True
+except ImportError:
+ SELINUX_IMP_ERR = traceback.format_exc()
+ HAVE_SELINUX = False
+
+SEOBJECT_IMP_ERR = None
+try:
+ import seobject
+ HAVE_SEOBJECT = True
+except ImportError:
+ SEOBJECT_IMP_ERR = traceback.format_exc()
+ HAVE_SEOBJECT = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+def get_runtime_status(ignore_selinux_state=False):
+ return ignore_selinux_state or selinux.is_selinux_enabled()
+
+
+def semanage_port_get_ports(seport, setype, proto, local):
+ """ Get the list of ports that have the specified type definition.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: list
+ :return: List of ports that have the specified SELinux type.
+ """
+ records = seport.get_all_by_type(locallist=local)
+ if (setype, proto) in records:
+ return records[(setype, proto)]
+ else:
+ return []
+
+
+def semanage_port_get_type(seport, port, proto):
+ """ Get the SELinux type of the specified port.
+
+ :param community.general.seport: Instance of seobject.portRecords
+
+ :type port: str
+ :param port: Port or port range (example: "8080", "8080-9090")
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :rtype: tuple
+ :return: Tuple containing the SELinux type and MLS/MCS level, or None if not found.
+ """
+ if isinstance(port, str):
+ ports = port.split('-', 1)
+ if len(ports) == 1:
+ ports.extend(ports)
+ else:
+ ports = (port, port)
+
+ key = (int(ports[0]), int(ports[1]), proto)
+
+ records = seport.get_all()
+ return records.get(key)
+
+
+def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore='', local=False):
+ """ Add SELinux port type definition to the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type serange: str
+ :param serange: SELinux MLS/MCS range (defaults to 's0')
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ change = False
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ ports_by_type = semanage_port_get_ports(seport, setype, proto, local)
+ for port in ports:
+ if port in ports_by_type:
+ continue
+
+ change = True
+ if module.check_mode:
+ continue
+ port_type = semanage_port_get_type(seport, port, proto)
+ if port_type is None:
+ seport.add(port, proto, serange, setype)
+ else:
+ seport.modify(port, proto, serange, setype)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def semanage_port_del(module, ports, proto, setype, do_reload, sestore='', local=False):
+ """ Delete SELinux port type definition from the policy.
+
+ :type module: AnsibleModule
+ :param module: Ansible module
+
+ :type ports: list
+ :param ports: List of ports and port ranges to delete (e.g. ["8080", "8080-9090"])
+
+ :type proto: str
+ :param proto: Protocol ('tcp' or 'udp')
+
+ :type setype: str
+ :param setype: SELinux type.
+
+ :type do_reload: bool
+ :param do_reload: Whether to reload SELinux policy after commit
+
+ :type sestore: str
+ :param sestore: SELinux store
+
+ :rtype: bool
+ :return: True if the policy was changed, otherwise False
+ """
+ change = False
+ try:
+ seport = seobject.portRecords(sestore)
+ seport.set_reload(do_reload)
+ ports_by_type = semanage_port_get_ports(seport, setype, proto, local)
+ for port in ports:
+ if port in ports_by_type:
+ change = True
+ if not module.check_mode:
+ seport.delete(port, proto)
+
+ except (ValueError, IOError, KeyError, OSError, RuntimeError) as e:
+ module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
+
+ return change
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_selinux_state=dict(type='bool', default=False),
+ ports=dict(type='list', elements='str', required=True),
+ proto=dict(type='str', required=True, choices=['tcp', 'udp']),
+ setype=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ reload=dict(type='bool', default=True),
+ local=dict(type='bool', default=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ if not HAVE_SELINUX:
+ module.fail_json(msg=missing_required_lib("libselinux-python"), exception=SELINUX_IMP_ERR)
+
+ if not HAVE_SEOBJECT:
+ module.fail_json(msg=missing_required_lib("policycoreutils-python"), exception=SEOBJECT_IMP_ERR)
+
+ ignore_selinux_state = module.params['ignore_selinux_state']
+
+ if not get_runtime_status(ignore_selinux_state):
+ module.fail_json(msg="SELinux is disabled on this host.")
+
+ ports = module.params['ports']
+ proto = module.params['proto']
+ setype = module.params['setype']
+ state = module.params['state']
+ do_reload = module.params['reload']
+ local = module.params['local']
+
+ result = {
+ 'ports': ports,
+ 'proto': proto,
+ 'setype': setype,
+ 'state': state,
+ }
+
+ if state == 'present':
+ result['changed'] = semanage_port_add(module, ports, proto, setype, do_reload, local=local)
+ elif state == 'absent':
+ result['changed'] = semanage_port_del(module, ports, proto, setype, do_reload, local=local)
+ else:
+ module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/serverless.py b/ansible_collections/community/general/plugins/modules/serverless.py
new file mode 100644
index 000000000..67d673d4d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/serverless.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: serverless
+short_description: Manages a Serverless Framework project
+description:
+ - Provides support for managing Serverless Framework (U(https://serverless.com/)) project deployments and stacks.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Goal state of given stage/project.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ serverless_bin_path:
+ description:
+ - The path of a serverless framework binary relative to the 'service_path' eg. node_module/.bin/serverless
+ type: path
+ service_path:
+ description:
+ - The path to the root of the Serverless Service to be operated on.
+ type: path
+ required: true
+ stage:
+ description:
+ - The name of the serverless framework project stage to deploy to.
+ - This uses the serverless framework default "dev".
+ type: str
+ default: ''
+ region:
+ description:
+ - AWS region to deploy the service to.
+ - This parameter defaults to C(us-east-1).
+ type: str
+ default: ''
+ deploy:
+ description:
+ - Whether or not to deploy artifacts after building them.
+ - When this option is C(false) all the functions will be built, but no stack update will be run to send them out.
+ - This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ type: bool
+ default: true
+ force:
+ description:
+ - Whether or not to force full deployment, equivalent to serverless C(--force) option.
+ type: bool
+ default: false
+ verbose:
+ description:
+ - Shows all stack events during deployment, and display any Stack Output.
+ type: bool
+ default: false
+notes:
+ - Currently, the C(serverless) command must be in the path of the node executing the task.
+ In the future this may be a flag.
+requirements:
+- serverless
+- yaml
+author:
+- Ryan Scott Brown (@ryansb)
+'''
+
+EXAMPLES = r'''
+- name: Basic deploy of a service
+ community.general.serverless:
+ service_path: '{{ project_dir }}'
+ state: present
+
+- name: Deploy a project, then pull its resource list back into Ansible
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ register: sls
+
+# The cloudformation stack is always named the same as the full service, so the
+# cloudformation_info module can get a full list of the stack resources, as
+# well as stack events and outputs
+- cloudformation_info:
+ region: us-east-1
+ stack_name: '{{ sls.service_name }}'
+ stack_resources: true
+
+- name: Deploy a project using a locally installed serverless binary
+ community.general.serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ serverless_bin_path: node_modules/.bin/serverless
+'''
+
+RETURN = r'''
+service_name:
+ type: str
+ description: The service name specified in the serverless.yml that was just deployed.
+ returned: always
+ sample: my-fancy-service-dev
+state:
+ type: str
+ description: Whether the stack for the serverless project is present/absent.
+ returned: always
+command:
+ type: str
+ description: Full C(serverless) command run by this module, in case you want to re-run the command outside the module.
+ returned: always
+ sample: serverless deploy --stage production
+'''
+
+import os
+
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def read_serverless_config(module):
+ path = module.params.get('service_path')
+ full_path = os.path.join(path, 'serverless.yml')
+
+ try:
+ with open(full_path) as sls_config:
+ config = yaml.safe_load(sls_config.read())
+ return config
+ except IOError as e:
+ module.fail_json(msg="Could not open serverless.yml in {0}. err: {1}".format(full_path, str(e)))
+
+
+def get_service_name(module, stage):
+ config = read_serverless_config(module)
+ if config.get('service') is None:
+ module.fail_json(msg="Could not read `service` key from serverless.yml file")
+
+ if stage:
+ return "{0}-{1}".format(config['service'], stage)
+
+ return "{0}-{1}".format(config['service'], config.get('stage', 'dev'))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ service_path=dict(type='path', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ region=dict(type='str', default=''),
+ stage=dict(type='str', default=''),
+ deploy=dict(type='bool', default=True),
+ serverless_bin_path=dict(type='path'),
+ force=dict(type='bool', default=False),
+ verbose=dict(type='bool', default=False),
+ ),
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg='yaml is required for this module')
+
+ service_path = module.params.get('service_path')
+ state = module.params.get('state')
+ region = module.params.get('region')
+ stage = module.params.get('stage')
+ deploy = module.params.get('deploy', True)
+ force = module.params.get('force', False)
+ verbose = module.params.get('verbose', False)
+ serverless_bin_path = module.params.get('serverless_bin_path')
+
+ if serverless_bin_path is not None:
+ command = serverless_bin_path + " "
+ else:
+ command = module.get_bin_path("serverless") + " "
+
+ if state == 'present':
+ command += 'deploy '
+ elif state == 'absent':
+ command += 'remove '
+ else:
+ module.fail_json(msg="State must either be 'present' or 'absent'. Received: {0}".format(state))
+
+ if state == 'present':
+ if not deploy:
+ command += '--noDeploy '
+ elif force:
+ command += '--force '
+
+ if region:
+ command += '--region {0} '.format(region)
+ if stage:
+ command += '--stage {0} '.format(stage)
+ if verbose:
+ command += '--verbose '
+
+ rc, out, err = module.run_command(command, cwd=service_path)
+ if rc != 0:
+ if state == 'absent' and "-{0}' does not exist".format(stage) in out:
+ module.exit_json(changed=False, state='absent', command=command,
+ out=out, service_name=get_service_name(module, stage))
+
+ module.fail_json(msg="Failure when executing Serverless command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err))
+
+ # gather some facts about the deployment
+ module.exit_json(changed=True, state='present', out=out, command=command,
+ service_name=get_service_name(module, stage))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/shutdown.py b/ansible_collections/community/general/plugins/modules/shutdown.py
new file mode 100644
index 000000000..5d66fad16
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/shutdown.py
@@ -0,0 +1,81 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+module: shutdown
+short_description: Shut down a machine
+notes:
+ - C(PATH) is ignored on the remote node when searching for the C(shutdown) command. Use I(search_paths)
+ to specify locations to search if the default paths do not work.
+description:
+ - Shut downs a machine.
+version_added: "1.1.0"
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.flow
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+ action:
+ support: full
+ async:
+ support: full
+options:
+ delay:
+ description:
+ - Seconds to wait before shutdown. Passed as a parameter to the shutdown command.
+ - On Linux, macOS and OpenBSD, this is converted to minutes and rounded down. If less than 60, it will be set to 0.
+ - On Solaris and FreeBSD, this will be seconds.
+ type: int
+ default: 0
+ msg:
+ description:
+ - Message to display to users before shutdown.
+ type: str
+ default: Shut down initiated by Ansible
+ search_paths:
+ description:
+ - Paths to search on the remote machine for the C(shutdown) command.
+ - I(Only) these paths will be searched for the C(shutdown) command. C(PATH) is ignored in the remote node when searching for the C(shutdown) command.
+ type: list
+ elements: path
+ default: ['/sbin', '/usr/sbin', '/usr/local/sbin']
+
+seealso:
+- module: ansible.builtin.reboot
+author:
+ - Matt Davis (@nitzmahone)
+ - Sam Doran (@samdoran)
+ - Amin Vakil (@aminvakil)
+'''
+
+EXAMPLES = r'''
+- name: Unconditionally shut down the machine with all defaults
+ community.general.shutdown:
+
+- name: Delay shutting down the remote node
+ community.general.shutdown:
+ delay: 60
+
+- name: Shut down a machine with shutdown command in unusual place
+ community.general.shutdown:
+ search_paths:
+ - '/lib/molly-guard'
+'''
+
+RETURN = r'''
+shutdown:
+ description: C(true) if the machine has been shut down.
+ returned: always
+ type: bool
+ sample: true
+'''
diff --git a/ansible_collections/community/general/plugins/modules/sl_vm.py b/ansible_collections/community/general/plugins/modules/sl_vm.py
new file mode 100644
index 000000000..94055d1d2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sl_vm.py
@@ -0,0 +1,439 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: Create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances.
+ - When created, optionally waits for it to be 'running'.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option.
+ type: str
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance.
+ type: str
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance.
+ type: str
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed.
+ type: str
+ choices:
+ - ams01
+ - ams03
+ - che01
+ - dal01
+ - dal05
+ - dal06
+ - dal09
+ - dal10
+ - dal12
+ - dal13
+ - fra02
+ - fra04
+ - fra05
+ - hkg02
+ - hou02
+ - lon02
+ - lon04
+ - lon06
+ - mel01
+ - mex01
+ - mil01
+ - mon01
+ - osl01
+ - par01
+ - sao01
+ - sea01
+ - seo01
+ - sjc01
+ - sjc03
+ - sjc04
+ - sng01
+ - syd01
+ - syd04
+ - tok02
+ - tor01
+ - wdc01
+ - wdc04
+ - wdc06
+ - wdc07
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance.
+ type: str
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed.
+ type: bool
+ default: true
+ private:
+ description:
+ - Flag to determine if the instance should be private only.
+ type: bool
+ default: false
+ dedicated:
+ description:
+ - Flag to determine if the instance should be deployed in dedicated space.
+ type: bool
+ default: false
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance.
+ type: bool
+ default: true
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance.
+ type: int
+ choices: [1, 2, 4, 8, 16, 32, 56]
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance.
+ type: int
+ choices: [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+ flavor:
+ description:
+ - Specify which SoftLayer flavor template to use instead of cpus and memory.
+ version_added: '0.2.0'
+ type: str
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance.
+ default: [ 25 ]
+ type: list
+ elements: int
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance.
+ type: str
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance.
+ type: str
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance.
+ choices: [10, 100, 1000]
+ type: int
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC.
+ type: str
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC.
+ type: str
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance.
+ type: list
+ elements: str
+ default: []
+ post_uri:
+ description:
+ - URL of a post provisioning script to be loaded and executed on virtual instance.
+ type: str
+ state:
+ description:
+ - Create, or cancel a virtual instance.
+ - Specify C(present) for create, C(absent) to cancel.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ wait:
+ description:
+ - Flag used to wait for active status before returning.
+ type: bool
+ default: true
+ wait_time:
+ description:
+ - Time in seconds before wait returns.
+ default: 600
+ type: int
+requirements:
+ - python >= 2.6
+ - softlayer >= 4.1.1
+author:
+- Matt Colton (@mcltn)
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Build instance request
+ community.general.sl_vm:
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: true
+ private: false
+ dedicated: false
+ local_disk: true
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: false
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Build instances request
+ community.general.sl_vm:
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - hostname: instance-2
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-replicas
+ hourly: true
+ private: false
+ dedicated: false
+ local_disk: true
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: true
+ - hostname: instance-3
+ domain: anydomain.com
+ datacenter: dal09
+ tags:
+ - ansible-module-test
+ - ansible-module-test-replicas
+ hourly: true
+ private: false
+ dedicated: false
+ local_disk: true
+ cpus: 1
+ memory: 1024
+ disks:
+ - 25
+ - 100
+ os_code: UBUNTU_LATEST
+ ssh_keys: []
+ wait: true
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Cancel by tag
+ community.general.sl_vm:
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import json
+import time
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+# TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
+ 'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
+ 'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
+ 'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
+CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
+MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+INITIALDISK_SIZES = [25, 100]
+LOCALDISK_SIZES = [25, 100, 150, 200, 300]
+SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
+NIC_SPEEDS = [10, 100, 1000]
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ datacenter=module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname=module.params.get('hostname'),
+ domain=module.params.get('domain'),
+ cpus=module.params.get('cpus'),
+ memory=module.params.get('memory'),
+ flavor=module.params.get('flavor'),
+ hourly=module.params.get('hourly'),
+ datacenter=module.params.get('datacenter'),
+ os_code=module.params.get('os_code'),
+ image_id=module.params.get('image_id'),
+ local_disk=module.params.get('local_disk'),
+ disks=module.params.get('disks'),
+ ssh_keys=module.params.get('ssh_keys'),
+ nic_speed=module.params.get('nic_speed'),
+ private=module.params.get('private'),
+ public_vlan=module.params.get('public_vlan'),
+ private_vlan=module.params.get('private_vlan'),
+ dedicated=module.params.get('dedicated'),
+ post_uri=module.params.get('post_uri'),
+ tags=tags,
+ )
+
+ if instance is not None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module, id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except Exception:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, string_types):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except Exception:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(type='str'),
+ hostname=dict(type='str'),
+ domain=dict(type='str'),
+ datacenter=dict(type='str', choices=DATACENTERS),
+ tags=dict(type='str'),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ flavor=dict(type='str'),
+ disks=dict(type='list', elements='int', default=[25]),
+ os_code=dict(type='str'),
+ image_id=dict(type='str'),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(type='str'),
+ private_vlan=dict(type='str'),
+ ssh_keys=dict(type='list', elements='str', default=[], no_log=False),
+ post_uri=dict(type='str'),
+ state=dict(type='str', default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600),
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') is True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/slack.py b/ansible_collections/community/general/plugins/modules/slack.py
new file mode 100644
index 000000000..4e26f1973
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/slack.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Lee Goolsbee <lgoolsbee@atlassian.com>
+# Copyright (c) 2020, Michal Middleton <mm.404@icloud.com>
+# Copyright (c) 2017, Steve Pletcher <steve@steve-pletcher.com>
+# Copyright (c) 2016, René Moser <mail@renemoser.net>
+# Copyright (c) 2015, Stefan Berggren <nsg@nsg.cc>
+# Copyright (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The C(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+author: "Ramon de la Fuente (@ramondelafuente)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ domain:
+ type: str
+ description:
+ - Slack (sub)domain for your environment without protocol. (i.e.
+ C(example.slack.com)) In 1.8 and beyond, this is deprecated and may
+ be ignored. See token documentation for information.
+ token:
+ type: str
+ description:
+ - Slack integration token. This authenticates you to the slack service.
+ Make sure to use the correct type of token, depending on what method you use.
+ - "Webhook token:
+ Prior to 1.8, a token looked like C(3Ffe373sfhRE6y42Fg3rvf4GlK). In
+ 1.8 and above, ansible adapts to the new slack API where tokens look
+ like C(G922VJP24/D921DW937/3Ffe373sfhRE6y42Fg3rvf4GlK). If tokens
+ are in the new format then slack will ignore any value of domain. If
+ the token is in the old format the domain is required. Ansible has no
+ control of when slack will get rid of the old API. When slack does
+ that the old format will stop working. ** Please keep in mind the tokens
+ are not the API tokens but are the webhook tokens. In slack these are
+ found in the webhook URL which are obtained under the apps and integrations.
+ The incoming webhooks can be added in that area. In some cases this may
+ be locked by your Slack admin and you must request access. It is there
+ that the incoming webhooks can be added. The key is on the end of the
+ URL given to you in that section."
+ - "WebAPI token:
+ Slack WebAPI requires a personal, bot or work application token. These tokens start with C(xoxp-), C(xoxb-)
+ or C(xoxa-), eg. C(xoxb-1234-56789abcdefghijklmnop). WebAPI token is required if you intend to receive thread_id.
+ See Slack's documentation (U(https://api.slack.com/docs/token-types)) for more information."
+ required: true
+ msg:
+ type: str
+ description:
+ - Message to send. Note that the module does not handle escaping characters.
+ Plain-text angle brackets and ampersands should be converted to HTML entities (e.g. & to &amp;) before sending.
+ See Slack's documentation (U(https://api.slack.com/docs/message-formatting)) for more.
+ channel:
+ type: str
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ thread_id:
+ description:
+ - Optional. Timestamp of parent message to thread this message. https://api.slack.com/docs/message-threading
+ type: str
+ message_id:
+ description:
+ - Optional. Message ID to edit, instead of posting a new message.
+ - If supplied I(channel_id) must be in form of C(C0xxxxxxx). use C({{ slack_response.channel_id }}) to get I(channel_id) from previous task run.
+ - Corresponds to C(ts) in the Slack API (U(https://api.slack.com/messaging/modifying)).
+ type: str
+ version_added: 1.2.0
+ username:
+ type: str
+ description:
+ - This is the sender of the message.
+ default: "Ansible"
+ icon_url:
+ type: str
+ description:
+ - URL for the message sender's icon (default C(https://docs.ansible.com/favicon.ico))
+ default: https://docs.ansible.com/favicon.ico
+ icon_emoji:
+ type: str
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ link_names:
+ type: int
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ type: str
+ description:
+ - Setting for the message parser at Slack
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ type: bool
+ default: true
+ color:
+ type: str
+ description:
+ - Allow text to use default colors - use the default of 'normal' to not send a custom color bar at the start of the message.
+ - Allowed values for color can be one of 'normal', 'good', 'warning', 'danger', any valid 3 digit or 6 digit hex color value.
+ - Specifying value in hex is supported since Ansible 2.8.
+ default: 'normal'
+ attachments:
+ type: list
+ elements: dict
+ description:
+ - Define a list of attachments. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/docs/attachments).
+ blocks:
+ description:
+ - Define a list of blocks. This list mirrors the Slack JSON API.
+ - For more information, see U(https://api.slack.com/block-kit).
+ type: list
+ elements: dict
+ version_added: 1.0.0
+ prepend_hash:
+ type: str
+ description:
+ - Setting for automatically prepending a C(#) symbol on the passed in I(channel_id).
+ - The C(auto) method prepends a C(#) unless I(channel_id) starts with one of C(#), C(@), C(C0), C(GF), C(G0), C(CP).
+ These prefixes only cover a small set of the prefixes that should not have a C(#) prepended.
+ Since an exact condition which I(channel_id) values must not have the C(#) prefix is not known,
+ the value C(auto) for this option will be deprecated in the future. It is best to explicitly set
+ I(prepend_hash=always) or I(prepend_hash=never) to obtain the needed behavior.
+ choices:
+ - 'always'
+ - 'never'
+ - 'auto'
+ default: 'auto'
+ version_added: 6.1.0
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ delegate_to: localhost
+
+- name: Send notification message via Slack all options
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} completed'
+ channel: '#ansible'
+ thread_id: '1539917263.000100'
+ username: 'Ansible on {{ inventory_hostname }}'
+ icon_url: http://www.example.com/some-image-file.png
+ link_names: 0
+ parse: 'none'
+ delegate_to: localhost
+
+- name: Insert a color bar in front of the message for visibility purposes and use the default webhook icon and name configured in Slack
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: '{{ inventory_hostname }} is alive!'
+ color: good
+ username: ''
+ icon_url: ''
+
+- name: Insert a color bar in front of the message with valid hex color value
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: 'This message uses color in hex value'
+ color: '#00aacc'
+ username: ''
+ icon_url: ''
+
+- name: Use the attachments API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ attachments:
+ - text: Display my system load on host A and B
+ color: '#ff00dd'
+ title: System load
+ fields:
+ - title: System A
+ value: "load average: 0,74, 0,66, 0,63"
+ short: true
+ - title: System B
+ value: 'load average: 5,16, 4,64, 2,43'
+ short: true
+
+- name: Use the blocks API
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ blocks:
+ - type: section
+ text:
+ type: mrkdwn
+ text: |-
+ *System load*
+ Display my system load on host A and B
+ - type: context
+ elements:
+ - type: mrkdwn
+ text: |-
+ *System A*
+ load average: 0,74, 0,66, 0,63
+ - type: mrkdwn
+ text: |-
+ *System B*
+ load average: 5,16, 4,64, 2,43
+
+- name: Send a message with a link using Slack markup
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: We sent this message using <https://www.ansible.com|Ansible>!
+
+- name: Send a message with angle brackets and ampersands
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ msg: This message has &lt;brackets&gt; &amp; ampersands in plain text.
+
+- name: Initial Threaded Slack message
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ msg: 'Starting a thread with my initial post.'
+ register: slack_response
+- name: Add more info to thread
+ community.general.slack:
+ channel: '#ansible'
+ token: xoxb-1234-56789abcdefghijklmnop
+ thread_id: "{{ slack_response['ts'] }}"
+ color: good
+ msg: 'And this is my threaded response!'
+
+- name: Send a message to be edited later on
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ channel: '#ansible'
+ msg: Deploying something...
+ register: slack_response
+- name: Edit message
+ community.general.slack:
+ token: thetoken/generatedby/slack
+ # The 'channel' option does not accept the channel name. It must use the 'channel_id',
+ # which can be retrieved for example from 'slack_response' from the previous task.
+ channel: "{{ slack_response.channel }}"
+ msg: Deployment complete!
+ message_id: "{{ slack_response.ts }}"
+"""
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+OLD_SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+SLACK_INCOMING_WEBHOOK = 'https://hooks.slack.com/services/%s'
+SLACK_POSTMESSAGE_WEBAPI = 'https://slack.com/api/chat.postMessage'
+SLACK_UPDATEMESSAGE_WEBAPI = 'https://slack.com/api/chat.update'
+SLACK_CONVERSATIONS_HISTORY_WEBAPI = 'https://slack.com/api/conversations.history'
+
+# Escaping quotes and apostrophes to avoid ending string prematurely in ansible call.
+# We do not escape other characters used as Slack metacharacters (e.g. &, <, >).
+escape_table = {
+ '"': "\"",
+ "'": "\'",
+}
+
+
+def is_valid_hex_color(color_choice):
+ if re.match(r'^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$', color_choice):
+ return True
+ return False
+
+
+def escape_quotes(text):
+ """Backslash any quotes within text."""
+ return "".join(escape_table.get(c, c) for c in text)
+
+
+def recursive_escape_quotes(obj, keys):
+ """Recursively escape quotes inside supplied keys inside block kit objects"""
+ if isinstance(obj, dict):
+ escaped = {}
+ for k, v in obj.items():
+ if isinstance(v, str) and k in keys:
+ escaped[k] = escape_quotes(v)
+ else:
+ escaped[k] = recursive_escape_quotes(v, keys)
+ elif isinstance(obj, list):
+ escaped = [recursive_escape_quotes(v, keys) for v in obj]
+ else:
+ escaped = obj
+ return escaped
+
+
+def build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id, prepend_hash):
+ payload = {}
+ if color == "normal" and text is not None:
+ payload = dict(text=escape_quotes(text))
+ elif text is not None:
+ # With a custom color we have to set the message as attachment, and explicitly turn markdown parsing on for it.
+ payload = dict(attachments=[dict(text=escape_quotes(text), color=color, mrkdwn_in=["text"])])
+ if channel is not None:
+ if prepend_hash == 'auto':
+ if channel.startswith(('#', '@', 'C0', 'GF', 'G0', 'CP')):
+ payload['channel'] = channel
+ else:
+ payload['channel'] = '#' + channel
+ elif prepend_hash == 'always':
+ payload['channel'] = '#' + channel
+ elif prepend_hash == 'never':
+ payload['channel'] = channel
+ if thread_id is not None:
+ payload['thread_ts'] = thread_id
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+ if message_id is not None:
+ payload['ts'] = message_id
+
+ if attachments is not None:
+ if 'attachments' not in payload:
+ payload['attachments'] = []
+
+ if attachments is not None:
+ attachment_keys_to_escape = [
+ 'title',
+ 'text',
+ 'author_name',
+ 'pretext',
+ 'fallback',
+ ]
+ for attachment in attachments:
+ for key in attachment_keys_to_escape:
+ if key in attachment:
+ attachment[key] = escape_quotes(attachment[key])
+
+ if 'fallback' not in attachment:
+ attachment['fallback'] = attachment['text']
+
+ payload['attachments'].append(attachment)
+
+ if blocks is not None:
+ block_keys_to_escape = [
+ 'text',
+ 'alt_text'
+ ]
+ payload['blocks'] = recursive_escape_quotes(blocks, block_keys_to_escape)
+
+ return payload
+
+
+def get_slack_message(module, token, channel, ts):
+ headers = {
+ 'Content-Type': 'application/json; charset=UTF-8',
+ 'Accept': 'application/json',
+ 'Authorization': 'Bearer ' + token
+ }
+ qs = urlencode({
+ 'channel': channel,
+ 'ts': ts,
+ 'limit': 1,
+ 'inclusive': 'true',
+ })
+ url = SLACK_CONVERSATIONS_HISTORY_WEBAPI + '?' + qs
+ response, info = fetch_url(module=module, url=url, headers=headers, method='GET')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to get slack message")
+ data = module.from_json(response.read())
+ if len(data['messages']) < 1:
+ module.fail_json(msg="no messages matching ts: %s" % ts)
+ if len(data['messages']) > 1:
+ module.fail_json(msg="more than 1 message matching ts: %s" % ts)
+ return data['messages'][0]
+
+
+def do_notify_slack(module, domain, token, payload):
+ use_webapi = False
+ if token.count('/') >= 2:
+ # New style webhook token
+ slack_uri = SLACK_INCOMING_WEBHOOK % token
+ elif re.match(r'^xox[abp]-\S+$', token):
+ slack_uri = SLACK_UPDATEMESSAGE_WEBAPI if 'ts' in payload else SLACK_POSTMESSAGE_WEBAPI
+ use_webapi = True
+ else:
+ if not domain:
+ module.fail_json(msg="Slack has updated its webhook API. You need to specify a token of the form "
+ "XXXX/YYYY/ZZZZ in your playbook")
+ slack_uri = OLD_SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ headers = {
+ 'Content-Type': 'application/json; charset=UTF-8',
+ 'Accept': 'application/json',
+ }
+ if use_webapi:
+ headers['Authorization'] = 'Bearer ' + token
+
+ data = module.jsonify(payload)
+ response, info = fetch_url(module=module, url=slack_uri, headers=headers, method='POST', data=data)
+
+ if info['status'] != 200:
+ if use_webapi:
+ obscured_incoming_webhook = slack_uri
+ else:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % '[obscured]'
+ module.fail_json(msg=" failed to send %s to %s: %s" % (data, obscured_incoming_webhook, info['msg']))
+
+ # each API requires different handling
+ if use_webapi:
+ return module.from_json(response.read())
+ else:
+ return {'webhook': 'ok'}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ domain=dict(type='str'),
+ token=dict(type='str', required=True, no_log=True),
+ msg=dict(type='str'),
+ channel=dict(type='str'),
+ thread_id=dict(type='str'),
+ username=dict(type='str', default='Ansible'),
+ icon_url=dict(type='str', default='https://docs.ansible.com/favicon.ico'),
+ icon_emoji=dict(type='str'),
+ link_names=dict(type='int', default=1, choices=[0, 1]),
+ parse=dict(type='str', choices=['none', 'full']),
+ validate_certs=dict(default=True, type='bool'),
+ color=dict(type='str', default='normal'),
+ attachments=dict(type='list', elements='dict'),
+ blocks=dict(type='list', elements='dict'),
+ message_id=dict(type='str'),
+ prepend_hash=dict(type='str', default='auto', choices=['always', 'never', 'auto']),
+ ),
+ supports_check_mode=True,
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ thread_id = module.params['thread_id']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+ color = module.params['color']
+ attachments = module.params['attachments']
+ blocks = module.params['blocks']
+ message_id = module.params['message_id']
+ prepend_hash = module.params['prepend_hash']
+
+ color_choices = ['normal', 'good', 'warning', 'danger']
+ if color not in color_choices and not is_valid_hex_color(color):
+ module.fail_json(msg="Color value specified should be either one of %r "
+ "or any valid hex value with length 3 or 6." % color_choices)
+
+ changed = True
+
+ # if updating an existing message, we can check if there's anything to update
+ if message_id is not None:
+ changed = False
+ msg = get_slack_message(module, token, channel, message_id)
+ for key in ('icon_url', 'icon_emoji', 'link_names', 'color', 'attachments', 'blocks'):
+ if msg.get(key) != module.params.get(key):
+ changed = True
+ break
+ # if check mode is active, we shouldn't do anything regardless.
+ # if changed=False, we don't need to do anything, so don't do it.
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, ts=msg['ts'], channel=msg['channel'])
+ elif module.check_mode:
+ module.exit_json(changed=changed)
+
+ payload = build_payload_for_slack(text, channel, thread_id, username, icon_url, icon_emoji, link_names,
+ parse, color, attachments, blocks, message_id, prepend_hash)
+ slack_response = do_notify_slack(module, domain, token, payload)
+
+ if 'ok' in slack_response:
+ # Evaluate WebAPI response
+ if slack_response['ok']:
+ # return payload as a string for backwards compatibility
+ payload_json = module.jsonify(payload)
+ module.exit_json(changed=changed, ts=slack_response['ts'], channel=slack_response['channel'],
+ api=slack_response, payload=payload_json)
+ else:
+ module.fail_json(msg="Slack API error", error=slack_response['error'])
+ else:
+ # Exit with plain OK from WebHook, since we don't have more information
+ # If we get 200 from webhook, the only answer is OK
+ module.exit_json(msg="OK")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/slackpkg.py b/ansible_collections/community/general/plugins/modules/slackpkg.py
new file mode 100644
index 000000000..208061a4c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/slackpkg.py
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Kim Nørgaard
+# Written by Kim Nørgaard <jasen@jasen.dk>
+# Based on pkgng module written by bleader <bleader@ratonland.org>
+# that was based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
+# that was based on pacman module written by Afterburn <https://github.com/afterburn>
+# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: slackpkg
+short_description: Package manager for Slackware >= 12.2
+description:
+ - Manage binary packages for Slackware using 'slackpkg' which
+ is available in versions after 12.2.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - name of package to install/remove
+ required: true
+ type: list
+ elements: str
+ aliases: [pkg]
+
+ state:
+ description:
+ - state of the package, you can use "installed" as an alias for C(present) and removed as one for C(absent).
+ choices: [ 'present', 'absent', 'latest', 'installed', 'removed' ]
+ required: false
+ default: present
+ type: str
+
+ update_cache:
+ description:
+ - update the package database first
+ required: false
+ default: false
+ type: bool
+
+author: Kim Nørgaard (@KimNorgaard)
+requirements: [ "Slackware >= 12.2" ]
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.slackpkg:
+ name: foo
+ state: present
+
+- name: Remove packages foo and bar
+ community.general.slackpkg:
+ name: foo,bar
+ state: absent
+
+- name: Make sure that it is the most updated package
+ community.general.slackpkg:
+ name: foo
+ state: latest
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, slackpkg_path, name):
+
+ import platform
+ import os
+ import re
+
+ machine = platform.machine()
+ # Exception for kernel-headers package on x86_64
+ if name == 'kernel-headers' and machine == 'x86_64':
+ machine = 'x86'
+ pattern = re.compile('^%s-[^-]+-(%s|noarch|fw)-[^-]+$' % (re.escape(name), re.escape(machine)))
+ packages = [f for f in os.listdir('/var/log/packages') if pattern.match(f)]
+
+ if len(packages) > 0:
+ return True
+
+ return False
+
+
+def remove_packages(module, slackpkg_path, packages):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ remove %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to remove %s: %s" % (package, out))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, slackpkg_path, packages):
+
+ install_c = 0
+
+ for package in packages:
+ if query_package(module, slackpkg_path, package):
+ continue
+
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ install %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def upgrade_packages(module, slackpkg_path, packages):
+ install_c = 0
+
+ for package in packages:
+ if not module.check_mode:
+ rc, out, err = module.run_command("%s -default_answer=y -batch=on \
+ upgrade %s" % (slackpkg_path,
+ package))
+
+ if not module.check_mode and not query_package(module, slackpkg_path,
+ package):
+ module.fail_json(msg="failed to install %s: %s" % (package, out),
+ stderr=err)
+
+ install_c += 1
+
+ if install_c > 0:
+ module.exit_json(changed=True, msg="present %s package(s)"
+ % (install_c))
+
+ module.exit_json(changed=False, msg="package(s) already present")
+
+
+def update_cache(module, slackpkg_path):
+ rc, out, err = module.run_command("%s -batch=on update" % (slackpkg_path))
+ if rc != 0:
+ module.fail_json(msg="Could not update package cache")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default="present", choices=['installed', 'removed', 'absent', 'present', 'latest']),
+ name=dict(aliases=["pkg"], required=True, type='list', elements='str'),
+ update_cache=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True)
+
+ slackpkg_path = module.get_bin_path('slackpkg', True)
+
+ p = module.params
+
+ pkgs = p['name']
+
+ if p["update_cache"]:
+ update_cache(module, slackpkg_path)
+
+ if p['state'] == 'latest':
+ upgrade_packages(module, slackpkg_path, pkgs)
+
+ elif p['state'] in ['present', 'installed']:
+ install_packages(module, slackpkg_path, pkgs)
+
+ elif p["state"] in ['removed', 'absent']:
+ remove_packages(module, slackpkg_path, pkgs)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/smartos_image_info.py b/ansible_collections/community/general/plugins/modules/smartos_image_info.py
new file mode 100644
index 000000000..e93ffb9ac
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/smartos_image_info.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: smartos_image_info
+short_description: Get SmartOS image details
+description:
+ - Retrieve information about all installed images on SmartOS.
+ - This module was called C(smartos_image_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.smartos_image_info) module no longer returns C(ansible_facts)!
+author: Adam Števko (@xen0l)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More information can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Return information about all installed images
+ community.general.smartos_image_info:
+ register: result
+
+- name: Return all private active Linux images
+ community.general.smartos_image_info:
+ filters: "os=linux state=active public=false"
+ register: result
+
+- name: Show, how many clones does every image have
+ community.general.smartos_image_info:
+ register: result
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ result.smartos_images[item]['name'] }}-{{ result.smartos_images[item]['version'] }}
+ has {{ result.smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ result.smartos_images.keys() | list }}"
+
+- name: Print information
+ ansible.builtin.debug:
+ msg: "{{ smartos_images[item]['name'] }}-{{ smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+ with_items: "{{ smartos_images.keys() | list }}"
+'''
+
+RETURN = '''
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm'), 'list', '-j']
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(),
+ ),
+ supports_check_mode=True,
+ )
+
+ image_facts = ImageFacts(module)
+
+ data = dict(smartos_images=image_facts.return_all_installed_images())
+
+ module.exit_json(**data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/snap.py b/ansible_collections/community/general/plugins/modules/snap.py
new file mode 100644
index 000000000..4b798d6e2
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/snap.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Alexei Znamensky (russoz) <russoz@gmail.com>
+# Copyright (c) 2021, Marcus Rickert <marcus.rickert@web.de>
+# Copyright (c) 2018, Stanislas Lange (angristan) <angristan@pm.me>
+# Copyright (c) 2018, Victor Carceler <vcarceler@iespuigcastellar.xeill.net>
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: snap
+short_description: Manages snaps
+description:
+ - "Manages snaps packages."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the snaps.
+ required: true
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ required: false
+ default: present
+ choices: [ absent, present, enabled, disabled ]
+ type: str
+ classic:
+ description:
+ - Confinement policy. The classic confinement allows a snap to have
+ the same level of access to the system as "classic" packages,
+ like those managed by APT. This option corresponds to the --classic argument.
+ This option can only be specified if there is a single snap in the task.
+ type: bool
+ required: false
+ default: false
+ channel:
+ description:
+ - Define which release of a snap is installed and tracked for updates.
+ This option can only be specified if there is a single snap in the task.
+ type: str
+ required: false
+ default: stable
+ options:
+ description:
+ - Set options with pattern C(key=value) or C(snap:key=value). If a snap name is given, the option will be applied
+ to that snap only. If the snap name is omitted, the options will be applied to all snaps listed in I(name). Options will
+ only be applied to active snaps.
+ required: false
+ type: list
+ elements: str
+ version_added: 4.4.0
+
+author:
+ - Victor Carceler (@vcarceler) <vcarceler@iespuigcastellar.xeill.net>
+ - Stanislas Lange (@angristan) <angristan@pm.me>
+
+seealso:
+ - module: community.general.snap_alias
+'''
+
+EXAMPLES = '''
+# Install "foo" and "bar" snap
+- name: Install foo
+ community.general.snap:
+ name:
+ - foo
+ - bar
+
+# Install "foo" snap with options par1=A and par2=B
+- name: Install "foo" with options
+ community.general.snap:
+ name:
+ - foo
+ options:
+ - par1=A
+ - par2=B
+
+# Install "foo" and "bar" snaps with common option com=A and specific options fooPar=X and barPar=Y
+- name: Install "foo" and "bar" with options
+ community.general.snap:
+ name:
+ - foo
+ - bar
+ options:
+ - com=A
+ - foo:fooPar=X
+ - bar:barPar=Y
+
+# Remove "foo" snap
+- name: Remove foo
+ community.general.snap:
+ name: foo
+ state: absent
+
+# Install a snap with classic confinement
+- name: Install "foo" with option --classic
+ community.general.snap:
+ name: foo
+ classic: true
+
+# Install a snap with from a specific channel
+- name: Install "foo" with option --channel=latest/edge
+ community.general.snap:
+ name: foo
+ channel: latest/edge
+'''
+
+RETURN = '''
+classic:
+ description: Whether or not the snaps were installed with the classic confinement
+ type: bool
+ returned: When snaps are installed
+channel:
+ description: The channel the snaps were installed from
+ type: str
+ returned: When snaps are installed
+cmd:
+ description: The command that was executed on the host
+ type: str
+ returned: When changed is true
+snaps_installed:
+ description: The list of actually installed snaps
+ type: list
+ returned: When any snaps have been installed
+snaps_removed:
+ description: The list of actually removed snaps
+ type: list
+ returned: When any snaps have been removed
+options_changed:
+ description: The list of options set/changed in format C(snap:key=value).
+ type: list
+ returned: When any options have been changed/set
+ version_added: 4.4.0
+'''
+
+import re
+import json
+import numbers
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ CmdStateModuleHelper, ArgFormat
+)
+
+
+__state_map = dict(
+ present='install',
+ absent='remove',
+ enabled='enable',
+ disabled='disable',
+ info='info', # not public
+ list='list', # not public
+ set='set', # not public
+ get='get', # not public
+)
+
+
+def _state_map(value):
+ return [__state_map[value]]
+
+
+class Snap(CmdStateModuleHelper):
+ __disable_re = re.compile(r'(?:\S+\s+){5}(?P<notes>\S+)')
+ __set_param_re = re.compile(r'(?P<snap_prefix>\S+:)?(?P<key>\S+)\s*=\s*(?P<value>.+)')
+ module = dict(
+ argument_spec={
+ 'name': dict(type='list', elements='str', required=True),
+ 'state': dict(type='str', default='present',
+ choices=['absent', 'present', 'enabled', 'disabled']),
+ 'classic': dict(type='bool', default=False),
+ 'channel': dict(type='str', default='stable'),
+ 'options': dict(type='list', elements='str'),
+ },
+ supports_check_mode=True,
+ )
+ command = "snap"
+ command_args_formats = dict(
+ actionable_snaps=dict(fmt=lambda v: v),
+ state=dict(fmt=_state_map),
+ classic=dict(fmt="--classic", style=ArgFormat.BOOLEAN),
+ channel=dict(fmt=lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)]),
+ options=dict(fmt=list),
+ json_format=dict(fmt="-d", style=ArgFormat.BOOLEAN),
+ )
+ check_rc = False
+
+ @staticmethod
+ def _first_non_zero(a):
+ for elem in a:
+ if elem != 0:
+ return elem
+
+ return 0
+
+ def _run_multiple_commands(self, commands):
+ outputs = [(c,) + self.run_command(params=c) for c in commands]
+ results = ([], [], [], [])
+ for output in outputs:
+ for i in range(4):
+ results[i].append(output[i])
+
+ return [
+ '; '.join([to_native(x) for x in results[0]]),
+ self._first_non_zero(results[1]),
+ '\n'.join(results[2]),
+ '\n'.join(results[3]),
+ ]
+
+ def convert_json_subtree_to_map(self, json_subtree, prefix=None):
+ option_map = {}
+
+ if not isinstance(json_subtree, dict):
+ self.do_raise("Non-dict non-leaf element encountered while parsing option map. "
+ "The output format of 'snap set' may have changed. Aborting!")
+
+ for key, value in json_subtree.items():
+ full_key = key if prefix is None else prefix + "." + key
+
+ if isinstance(value, (str, float, bool, numbers.Integral)):
+ option_map[full_key] = str(value)
+
+ else:
+ option_map.update(self.convert_json_subtree_to_map(json_subtree=value, prefix=full_key))
+
+ return option_map
+
+ def convert_json_to_map(self, json_string):
+ json_object = json.loads(json_string)
+ return self.convert_json_subtree_to_map(json_object)
+
+ def retrieve_option_map(self, snap_name):
+ params = [{'state': 'get'}, {'name': snap_name}, {'json_format': True}]
+ rc, out, err = self.run_command(params=params)
+
+ if rc != 0:
+ return {}
+
+ result = out.splitlines()
+
+ if "has no configuration" in result[0]:
+ return {}
+
+ try:
+ option_map = self.convert_json_to_map(out)
+
+ except Exception as e:
+ self.do_raise(
+ msg="Parsing option map returned by 'snap get {0}' triggers exception '{1}', output:\n'{2}'".format(snap_name, str(e), out))
+
+ return option_map
+
+ def is_snap_installed(self, snap_name):
+ return 0 == self.run_command(params=[{'state': 'list'}, {'name': snap_name}])[0]
+
+ def is_snap_enabled(self, snap_name):
+ rc, out, err = self.run_command(params=[{'state': 'list'}, {'name': snap_name}])
+ if rc != 0:
+ return None
+ result = out.splitlines()[1]
+ match = self.__disable_re.match(result)
+ if not match:
+ self.do_raise(msg="Unable to parse 'snap list {0}' output:\n{1}".format(snap_name, out))
+ notes = match.group('notes')
+ return "disabled" not in notes.split(',')
+
+ def process_actionable_snaps(self, actionable_snaps):
+ self.changed = True
+ self.vars.snaps_installed = actionable_snaps
+
+ if self.module.check_mode:
+ return
+
+ params = ['state', 'classic', 'channel'] # get base cmd parts
+ has_one_pkg_params = bool(self.vars.classic) or self.vars.channel != 'stable'
+ has_multiple_snaps = len(actionable_snaps) > 1
+
+ if has_one_pkg_params and has_multiple_snaps:
+ commands = [params + [{'actionable_snaps': [s]}] for s in actionable_snaps]
+ else:
+ commands = [params + [{'actionable_snaps': actionable_snaps}]]
+ self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
+
+ if rc == 0:
+ return
+
+ classic_snap_pattern = re.compile(r'^error: This revision of snap "(?P<package_name>\w+)"'
+ r' was published using classic confinement')
+ match = classic_snap_pattern.match(err)
+ if match:
+ err_pkg = match.group('package_name')
+ msg = "Couldn't install {name} because it requires classic confinement".format(name=err_pkg)
+ else:
+ msg = "Ooops! Snap installation failed while executing '{cmd}', please examine logs and " \
+ "error output for more details.".format(cmd=self.vars.cmd)
+ self.do_raise(msg=msg)
+
+ def state_present(self):
+
+ self.vars.meta('classic').set(output=True)
+ self.vars.meta('channel').set(output=True)
+ actionable_snaps = [s for s in self.vars.name if not self.is_snap_installed(s)]
+
+ if actionable_snaps:
+ self.process_actionable_snaps(actionable_snaps)
+
+ self.set_options()
+
+ def set_options(self):
+ if self.vars.options is None:
+ return
+
+ actionable_snaps = [s for s in self.vars.name if self.is_snap_installed(s)]
+ overall_options_changed = []
+
+ for snap_name in actionable_snaps:
+ option_map = self.retrieve_option_map(snap_name=snap_name)
+
+ options_changed = []
+
+ for option_string in self.vars.options:
+ match = self.__set_param_re.match(option_string)
+
+ if not match:
+ msg = "Cannot parse set option '{option_string}'".format(option_string=option_string)
+ self.do_raise(msg)
+
+ snap_prefix = match.group("snap_prefix")
+ selected_snap_name = snap_prefix[:-1] if snap_prefix else None
+
+ if selected_snap_name is not None and selected_snap_name not in self.vars.name:
+ msg = "Snap option '{option_string}' refers to snap which is not in the list of snap names".format(option_string=option_string)
+ self.do_raise(msg)
+
+ if selected_snap_name is None or (snap_name is not None and snap_name == selected_snap_name):
+ key = match.group("key")
+ value = match.group("value").strip()
+
+ if key not in option_map or key in option_map and option_map[key] != value:
+ option_without_prefix = key + "=" + value
+ option_with_prefix = option_string if selected_snap_name is not None else snap_name + ":" + option_string
+ options_changed.append(option_without_prefix)
+ overall_options_changed.append(option_with_prefix)
+
+ if options_changed:
+ self.changed = True
+
+ if not self.module.check_mode:
+ params = [{'state': 'set'}, {'name': snap_name}, {'options': options_changed}]
+
+ rc, out, err = self.run_command(params=params)
+
+ if rc != 0:
+ if 'has no "configure" hook' in err:
+ msg = "Snap '{snap}' does not have any configurable options".format(snap=snap_name)
+ self.do_raise(msg)
+
+ msg = "Cannot set options '{options}' for snap '{snap}': error={error}".format(
+ options=" ".join(options_changed), snap=snap_name, error=err)
+ self.do_raise(msg)
+
+ if overall_options_changed:
+ self.vars.options_changed = overall_options_changed
+
+ def _generic_state_action(self, actionable_func, actionable_var, params=None):
+ actionable_snaps = [s for s in self.vars.name if actionable_func(s)]
+ if not actionable_snaps:
+ return
+ self.changed = True
+ self.vars[actionable_var] = actionable_snaps
+ if self.module.check_mode:
+ return
+ if params is None:
+ params = ['state']
+ commands = [params + [{'actionable_snaps': actionable_snaps}]]
+ self.vars.cmd, rc, out, err = self._run_multiple_commands(commands)
+ if rc == 0:
+ return
+ msg = "Ooops! Snap operation failed while executing '{cmd}', please examine logs and " \
+ "error output for more details.".format(cmd=self.vars.cmd)
+ self.do_raise(msg=msg)
+
+ def state_absent(self):
+ self._generic_state_action(self.is_snap_installed, "snaps_removed", ['classic', 'channel', 'state'])
+
+ def state_enabled(self):
+ self._generic_state_action(lambda s: not self.is_snap_enabled(s), "snaps_enabled", ['classic', 'channel', 'state'])
+
+ def state_disabled(self):
+ self._generic_state_action(self.is_snap_enabled, "snaps_disabled", ['classic', 'channel', 'state'])
+
+
+def main():
+ Snap.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/snap_alias.py b/ansible_collections/community/general/plugins/modules/snap_alias.py
new file mode 100644
index 000000000..19fbef003
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/snap_alias.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Alexei Znamensky (russoz) <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: snap_alias
+short_description: Manages snap aliases
+version_added: 4.0.0
+description:
+ - "Manages snaps aliases."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ state:
+ description:
+ - Desired state of the alias.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ name:
+ description:
+ - Name of the snap.
+ type: str
+ alias:
+ description:
+ - Aliases to be created or removed.
+ type: list
+ elements: str
+ aliases: [aliases]
+
+author:
+ - Alexei Znamensky (@russoz) <russoz@gmail.com>
+
+seealso:
+ - module: community.general.snap
+'''
+
+EXAMPLES = '''
+# Install "foo" and "bar" snap
+- name: Create snap alias
+ community.general.snap_alias:
+ name: hello-world
+ alias: hw
+
+- name: Create multiple aliases
+ community.general.snap_alias:
+ name: hello-world
+ aliases:
+ - hw
+ - hw2
+ - hw3
+ state: present # optional
+
+- name: Remove one specific aliases
+ community.general.snap_alias:
+ name: hw
+ state: absent
+
+- name: Remove all aliases for snap
+ community.general.snap_alias:
+ name: hello-world
+ state: absent
+'''
+
+RETURN = '''
+snap_aliases:
+ description: The snap aliases after execution. If called in check mode, then the list represents the state before execution.
+ type: list
+ elements: str
+ returned: always
+'''
+
+
+import re
+
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+
+
+_state_map = dict(
+ present='alias',
+ absent='unalias',
+ info='aliases',
+)
+
+
+class SnapAlias(StateModuleHelper):
+ _RE_ALIAS_LIST = re.compile(r"^(?P<snap>\S+)\s+(?P<alias>[\w-]+)\s+.*$")
+
+ module = dict(
+ argument_spec={
+ 'state': dict(type='str', choices=['absent', 'present'], default='present'),
+ 'name': dict(type='str'),
+ 'alias': dict(type='list', elements='str', aliases=['aliases']),
+ },
+ required_if=[
+ ('state', 'present', ['name', 'alias']),
+ ('state', 'absent', ['name', 'alias'], True),
+ ],
+ supports_check_mode=True,
+ )
+
+ command_args_formats = {
+ "state": cmd_runner_fmt.as_map(_state_map),
+ "name": cmd_runner_fmt.as_list(),
+ "alias": cmd_runner_fmt.as_list(),
+ }
+
+ def _aliases(self):
+ n = self.vars.name
+ return {n: self._get_aliases_for(n)} if n else self._get_aliases()
+
+ def __init_module__(self):
+ self.runner = CmdRunner(self.module, "snap", self.command_args_formats, check_rc=False)
+ self.vars.set("snap_aliases", self._aliases(), change=True, diff=True)
+
+ def __quit_module__(self):
+ self.vars.snap_aliases = self._aliases()
+
+ def _get_aliases(self):
+ def process(rc, out, err):
+ if err:
+ return {}
+ aliases = [self._RE_ALIAS_LIST.match(a.strip()) for a in out.splitlines()[1:]]
+ snap_alias_list = [(entry.group("snap"), entry.group("alias")) for entry in aliases]
+ results = {}
+ for snap, alias in snap_alias_list:
+ results[snap] = results.get(snap, []) + [alias]
+ return results
+
+ with self.runner("state name", check_rc=True, output_process=process) as ctx:
+ aliases = ctx.run(state="info")
+ if self.verbosity >= 4:
+ self.vars.get_aliases_run_info = ctx.run_info
+ return aliases
+
+ def _get_aliases_for(self, name):
+ return self._get_aliases().get(name, [])
+
+ def _has_alias(self, name=None, alias=None):
+ if name:
+ if name not in self.vars.snap_aliases:
+ return False
+ if alias is None:
+ return bool(self.vars.snap_aliases[name])
+ return alias in self.vars.snap_aliases[name]
+
+ return any(alias in aliases for aliases in self.vars.snap_aliases.values())
+
+ def state_present(self):
+ for _alias in self.vars.alias:
+ if not self._has_alias(self.vars.name, _alias):
+ self.changed = True
+ with self.runner("state name alias", check_mode_skip=True) as ctx:
+ ctx.run(alias=_alias)
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+ def state_absent(self):
+ if not self.vars.alias:
+ if self._has_alias(self.vars.name):
+ self.changed = True
+ with self.runner("state name", check_mode_skip=True) as ctx:
+ ctx.run()
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+ else:
+ for _alias in self.vars.alias:
+ if self._has_alias(self.vars.name, _alias):
+ self.changed = True
+ with self.runner("state alias", check_mode_skip=True) as ctx:
+ ctx.run(alias=_alias)
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+
+def main():
+ SnapAlias.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/snmp_facts.py b/ansible_collections/community/general/plugins/modules/snmp_facts.py
new file mode 100644
index 000000000..e54473ffa
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/snmp_facts.py
@@ -0,0 +1,475 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Networklore's snmp library for Ansible
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: snmp_facts
+author:
+- Patrick Ogenstad (@ogenstad)
+short_description: Retrieve facts for a device using SNMP
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ host:
+ description:
+ - Set to target SNMP server (normally C({{ inventory_hostname }})).
+ type: str
+ required: true
+ version:
+ description:
+ - SNMP Version to use, C(v2), C(v2c) or C(v3).
+ type: str
+ required: true
+ choices: [ v2, v2c, v3 ]
+ community:
+ description:
+ - The SNMP community string, required if I(version) is C(v2) or C(v2c).
+ type: str
+ level:
+ description:
+ - Authentication level.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ authNoPriv, authPriv ]
+ username:
+ description:
+ - Username for SNMPv3.
+ - Required if I(version) is C(v3).
+ type: str
+ integrity:
+ description:
+ - Hashing algorithm.
+ - Required if I(version) is C(v3).
+ type: str
+ choices: [ md5, sha ]
+ authkey:
+ description:
+ - Authentication key.
+ - Required I(version) is C(v3).
+ type: str
+ privacy:
+ description:
+ - Encryption algorithm.
+ - Required if I(level) is C(authPriv).
+ type: str
+ choices: [ aes, des ]
+ privkey:
+ description:
+ - Encryption key.
+ - Required if I(level) is C(authPriv).
+ type: str
+ timeout:
+ description:
+ - Response timeout in seconds.
+ type: int
+ version_added: 2.3.0
+ retries:
+ description:
+ - Maximum number of request retries, 0 retries means just a single request.
+ type: int
+ version_added: 2.3.0
+'''
+
+EXAMPLES = r'''
+- name: Gather facts with SNMP version 2
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v2c
+ community: public
+ delegate_to: local
+
+- name: Gather facts using SNMP version 3
+ community.general.snmp_facts:
+ host: '{{ inventory_hostname }}'
+ version: v3
+ level: authPriv
+ integrity: sha
+ privacy: aes
+ username: snmp-user
+ authkey: abc12345
+ privkey: def6789
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+ansible_sysdescr:
+ description: A textual description of the entity.
+ returned: success
+ type: str
+ sample: Linux ubuntu-user 4.4.0-93-generic #116-Ubuntu SMP Fri Aug 11 21:17:51 UTC 2017 x86_64
+ansible_sysobjectid:
+ description: The vendor's authoritative identification of the network management subsystem contained in the entity.
+ returned: success
+ type: str
+ sample: 1.3.6.1.4.1.8072.3.2.10
+ansible_sysuptime:
+ description: The time (in hundredths of a second) since the network management portion of the system was last re-initialized.
+ returned: success
+ type: int
+ sample: 42388
+ansible_syscontact:
+ description: The textual identification of the contact person for this managed node, together with information on how to contact this person.
+ returned: success
+ type: str
+ sample: Me <me@example.org>
+ansible_sysname:
+ description: An administratively-assigned name for this managed node.
+ returned: success
+ type: str
+ sample: ubuntu-user
+ansible_syslocation:
+ description: The physical location of this node (e.g., C(telephone closet, 3rd floor)).
+ returned: success
+ type: str
+ sample: Sitting on the Dock of the Bay
+ansible_all_ipv4_addresses:
+ description: List of all IPv4 addresses.
+ returned: success
+ type: list
+ sample: ["127.0.0.1", "172.17.0.1"]
+ansible_interfaces:
+ description: Dictionary of each network interface and its metadata.
+ returned: success
+ type: dict
+ sample: {
+ "1": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "1",
+ "ipv4": [
+ {
+ "address": "127.0.0.1",
+ "netmask": "255.0.0.0"
+ }
+ ],
+ "mac": "",
+ "mtu": "65536",
+ "name": "lo",
+ "operstatus": "up",
+ "speed": "65536"
+ },
+ "2": {
+ "adminstatus": "up",
+ "description": "",
+ "ifindex": "2",
+ "ipv4": [
+ {
+ "address": "192.168.213.128",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "mac": "000a305a52a1",
+ "mtu": "1500",
+ "name": "Intel Corporation 82545EM Gigabit Ethernet Controller (Copper)",
+ "operstatus": "up",
+ "speed": "1500"
+ }
+ }
+'''
+
+import binascii
+from collections import defaultdict
+from ansible_collections.community.general.plugins.module_utils import deps
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_text
+
+with deps.declare("pysnmp"):
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ from pysnmp.proto.rfc1905 import EndOfMibView
+
+
+class DefineOid(object):
+
+ def __init__(self, dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return to_text(binascii.unhexlify(hexstring[2:]))
+ return hexstring
+
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ return hexstring
+
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options:
+ return adminstatus_options[int_adminstatus]
+ return ""
+
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options:
+ return operstatus_options[int_operstatus]
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(type='str', required=True),
+ version=dict(type='str', required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(type='str'),
+ username=dict(type='str'),
+ level=dict(type='str', choices=['authNoPriv', 'authPriv']),
+ integrity=dict(type='str', choices=['md5', 'sha']),
+ privacy=dict(type='str', choices=['aes', 'des']),
+ authkey=dict(type='str', no_log=True),
+ privkey=dict(type='str', no_log=True),
+ timeout=dict(type='int'),
+ retries=dict(type='int'),
+ ),
+ required_together=(
+ ['username', 'level', 'integrity', 'authkey'],
+ ['privacy', 'privkey'],
+ ),
+ supports_check_mode=True,
+ )
+
+ m_args = module.params
+
+ deps.validate(module)
+
+ cmdGen = cmdgen.CommandGenerator()
+ transport_opts = dict((k, m_args[k]) for k in ('timeout', 'retries') if m_args[k] is not None)
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] in ("v2", "v2c"):
+ if m_args['community'] is None:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] is None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] is None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] in ("v2", "v2c"):
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
+ privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ def Tree():
+ return defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161), **transport_opts),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+
+ cmdgen.MibVariable(p.ifAlias,),
+ lookupMib=False
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ if isinstance(val, EndOfMibView):
+ continue
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifSpeed in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['ansible_interfaces'][ifIndex]['description'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if current_interface not in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ module.exit_json(ansible_facts=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/solaris_zone.py b/ansible_collections/community/general/plugins/modules/solaris_zone.py
new file mode 100644
index 000000000..0f970704e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/solaris_zone.py
@@ -0,0 +1,493 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Paul Markham <pmarkham@netrefinery.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: solaris_zone
+short_description: Manage Solaris zones
+description:
+ - Create, start, stop and delete Solaris zones.
+ - This module does not currently allow changing of options for a zone that is already been created.
+author:
+ - Paul Markham (@pmarkham)
+requirements:
+ - Solaris 10 or 11
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - C(present), configure and install the zone.
+ - C(installed), synonym for C(present).
+ - C(running), if the zone already exists, boot it, otherwise, configure and install
+ the zone first, then boot it.
+ - C(started), synonym for C(running).
+ - C(stopped), shutdown a zone.
+ - C(absent), destroy the zone.
+ - C(configured), configure the ready so that it's to be attached.
+ - C(attached), attach a zone, but do not boot it.
+ - C(detached), shutdown and detach a zone
+ type: str
+ choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]
+ default: present
+ name:
+ description:
+ - Zone name.
+ - A zone name must be unique name.
+ - A zone name must begin with an alpha-numeric character.
+ - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).
+ - The name cannot be longer than 64 characters.
+ type: str
+ required: true
+ path:
+ description:
+ - The path where the zone will be created. This is required when the zone is created, but not
+ used otherwise.
+ type: str
+ sparse:
+ description:
+ - Whether to create a sparse (C(true)) or whole root (C(false)) zone.
+ type: bool
+ default: false
+ root_password:
+ description:
+ - The password hash for the root account. If not specified, the zone's root account
+ will not have a password.
+ type: str
+ config:
+ description:
+ - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
+ and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
+ "set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
+ type: str
+ default: ''
+ create_options:
+ description:
+ - 'Extra options to the zonecfg(1M) create command.'
+ type: str
+ default: ''
+ install_options:
+ description:
+ - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
+ use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
+ type: str
+ default: ''
+ attach_options:
+ description:
+ - 'Extra options to the zoneadm attach command. For example, this can be used to specify
+ whether a minimum or full update of packages is required and if any packages need to
+ be deleted. For valid values, see zoneadm(1M)'
+ type: str
+ default: ''
+ timeout:
+ description:
+ - Timeout, in seconds, for zone to boot.
+ type: int
+ default: 600
+'''
+
+EXAMPLES = '''
+- name: Create and install a zone, but don't boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: present
+ path: /zones/zone1
+ sparse: true
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Create and install a zone and boot it
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Boot an already installed zone
+ community.general.solaris_zone:
+ name: zone1
+ state: running
+
+- name: Stop a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: stopped
+
+- name: Destroy a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: absent
+
+- name: Detach a zone
+ community.general.solaris_zone:
+ name: zone1
+ state: detached
+
+- name: Configure a zone, ready to be attached
+ community.general.solaris_zone:
+ name: zone1
+ state: configured
+ path: /zones/zone1
+ root_password: Be9oX7OSwWoU.
+ config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
+
+- name: Attach zone1
+ community.general.solaris_zone:
+ name: zone1
+ state: attached
+ attach_options: -u
+'''
+
+import os
+import platform
+import re
+import tempfile
+import time
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zone(object):
+ def __init__(self, module):
+ self.changed = False
+ self.msg = []
+
+ self.module = module
+ self.path = self.module.params['path']
+ self.name = self.module.params['name']
+ self.sparse = self.module.params['sparse']
+ self.root_password = self.module.params['root_password']
+ self.timeout = self.module.params['timeout']
+ self.config = self.module.params['config']
+ self.create_options = self.module.params['create_options']
+ self.install_options = self.module.params['install_options']
+ self.attach_options = self.module.params['attach_options']
+
+ self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
+ self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
+ self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
+
+ if self.module.check_mode:
+ self.msg.append('Running in check mode')
+
+ if platform.system() != 'SunOS':
+ self.module.fail_json(msg='This module requires Solaris')
+
+ (self.os_major, self.os_minor) = platform.release().split('.')
+ if int(self.os_minor) < 10:
+ self.module.fail_json(msg='This module requires Solaris 10 or later')
+
+ match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)
+ if not match:
+ self.module.fail_json(msg="Provided zone name is not a valid zone name. "
+ "Please refer documentation for correct zone name specifications.")
+
+ def configure(self):
+ if not self.path:
+ self.module.fail_json(msg='Missing required argument: path')
+
+ if not self.module.check_mode:
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
+
+ if self.sparse:
+ t.write('create %s\n' % self.create_options)
+ self.msg.append('creating sparse-root zone')
+ else:
+ t.write('create -b %s\n' % self.create_options)
+ self.msg.append('creating whole-root zone')
+
+ t.write('set zonepath=%s\n' % self.path)
+ t.write('%s\n' % self.config)
+ t.close()
+
+ cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
+ os.unlink(t.name)
+
+ self.changed = True
+ self.msg.append('zone configured')
+
+ def install(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
+ if int(self.os_minor) == 10:
+ self.configure_sysid()
+ self.configure_password()
+ self.configure_ssh_keys()
+ self.changed = True
+ self.msg.append('zone installed')
+
+ def uninstall(self):
+ if self.is_installed():
+ if not self.module.check_mode:
+ cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone uninstalled')
+
+ def configure_sysid(self):
+ if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
+ os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
+
+ open('%s/root/noautoshutdown' % self.path, 'w').close()
+
+ node = open('%s/root/etc/nodename' % self.path, 'w')
+ node.write(self.name)
+ node.close()
+
+ id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
+ id.write('1 # System previously configured?\n')
+ id.write('1 # Bootparams succeeded?\n')
+ id.write('1 # System is on a network?\n')
+ id.write('1 # Extended network information gathered?\n')
+ id.write('0 # Autobinder succeeded?\n')
+ id.write('1 # Network has subnets?\n')
+ id.write('1 # root password prompted for?\n')
+ id.write('1 # locale and term prompted for?\n')
+ id.write('1 # security policy in place\n')
+ id.write('1 # NFSv4 domain configured\n')
+ id.write('0 # Auto Registration Configured\n')
+ id.write('vt100')
+ id.close()
+
+ def configure_ssh_keys(self):
+ rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
+ dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
+
+ if not os.path.isfile(rsa_key_file):
+ cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
+
+ if not os.path.isfile(dsa_key_file):
+ cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
+
+ def configure_password(self):
+ shadow = '%s/root/etc/shadow' % self.path
+ if self.root_password:
+ f = open(shadow, 'r')
+ lines = f.readlines()
+ f.close()
+
+ for i in range(0, len(lines)):
+ fields = lines[i].split(':')
+ if fields[0] == 'root':
+ fields[1] = self.root_password
+ lines[i] = ':'.join(fields)
+
+ f = open(shadow, 'w')
+ for line in lines:
+ f.write(line)
+ f.close()
+
+ def boot(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
+
+ """
+ The boot command can return before the zone has fully booted. This is especially
+ true on the first boot when the zone initializes the SMF services. Unless the zone
+ has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
+ Wait until the zone's console login is running; once that's running, consider the zone booted.
+ """
+
+ elapsed = 0
+ while True:
+ if elapsed > self.timeout:
+ self.module.fail_json(msg='timed out waiting for zone to boot')
+ rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
+ if rc == 0:
+ break
+ time.sleep(10)
+ elapsed += 10
+ self.changed = True
+ self.msg.append('zone booted')
+
+ def destroy(self):
+ if self.is_running():
+ self.stop()
+ if self.is_installed():
+ self.uninstall()
+ if not self.module.check_mode:
+ cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone deleted')
+
+ def stop(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone stopped')
+
+ def detach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone detached')
+
+ def attach(self):
+ if not self.module.check_mode:
+ cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
+ self.changed = True
+ self.msg.append('zone attached')
+
+ def exists(self):
+ cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def is_running(self):
+ return self.status() == 'running'
+
+ def is_installed(self):
+ return self.status() == 'installed'
+
+ def is_configured(self):
+ return self.status() == 'configured'
+
+ def status(self):
+ cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
+ (rc, out, err) = self.module.run_command(cmd)
+ if rc == 0:
+ return out.split(':')[2]
+ else:
+ return 'undefined'
+
+ def state_present(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+ self.install()
+
+ def state_running(self):
+ self.state_present()
+ if self.is_running():
+ self.msg.append('zone already running')
+ else:
+ self.boot()
+
+ def state_stopped(self):
+ if self.exists():
+ self.stop()
+ else:
+ self.module.fail_json(msg='zone does not exist')
+
+ def state_absent(self):
+ if self.exists():
+ if self.is_running():
+ self.stop()
+ self.destroy()
+ else:
+ self.msg.append('zone does not exist')
+
+ def state_configured(self):
+ if self.exists():
+ self.msg.append('zone already exists')
+ else:
+ self.configure()
+
+ def state_detached(self):
+ if not self.exists():
+ self.module.fail_json(msg='zone does not exist')
+ if self.is_configured():
+ self.msg.append('zone already detached')
+ else:
+ self.stop()
+ self.detach()
+
+ def state_attached(self):
+ if not self.exists():
+ self.msg.append('zone does not exist')
+ if self.is_configured():
+ self.attach()
+ else:
+ self.msg.append('zone already attached')
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present',
+ choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),
+ path=dict(type='str'),
+ sparse=dict(type='bool', default=False),
+ root_password=dict(type='str', no_log=True),
+ timeout=dict(type='int', default=600),
+ config=dict(type='str', default=''),
+ create_options=dict(type='str', default=''),
+ install_options=dict(type='str', default=''),
+ attach_options=dict(type='str', default=''),
+ ),
+ supports_check_mode=True,
+ )
+
+ zone = Zone(module)
+
+ state = module.params['state']
+
+ if state == 'running' or state == 'started':
+ zone.state_running()
+ elif state == 'present' or state == 'installed':
+ zone.state_present()
+ elif state == 'stopped':
+ zone.state_stopped()
+ elif state == 'absent':
+ zone.state_absent()
+ elif state == 'configured':
+ zone.state_configured()
+ elif state == 'detached':
+ zone.state_detached()
+ elif state == 'attached':
+ zone.state_attached()
+ else:
+ module.fail_json(msg='Invalid state: %s' % state)
+
+ module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sorcery.py b/ansible_collections/community/general/plugins/modules/sorcery.py
new file mode 100644
index 000000000..3278ce0ab
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sorcery.py
@@ -0,0 +1,653 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015-2016, Vlad Glagolev <scm@vaygr.net>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sorcery
+short_description: Package manager for Source Mage GNU/Linux
+description:
+ - Manages "spells" on Source Mage GNU/Linux using I(sorcery) toolchain
+author: "Vlad Glagolev (@vaygr)"
+notes:
+ - When all three components are selected, the update goes by the sequence --
+ Sorcery -> Grimoire(s) -> Spell(s); you cannot override it.
+ - grimoire handling (i.e. add/remove, including SCM/rsync versions) is not
+ yet supported.
+requirements:
+ - bash
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the spell
+ - multiple names can be given, separated by commas
+ - special value '*' in conjunction with states C(latest) or
+ C(rebuild) will update or rebuild the whole system respectively
+ aliases: ["spell"]
+ type: list
+ elements: str
+
+ state:
+ description:
+ - Whether to cast, dispel or rebuild a package
+ - state C(cast) is an equivalent of C(present), not C(latest)
+ - state C(latest) always triggers I(update_cache=true)
+ - state C(rebuild) implies cast of all specified spells, not only
+ those existed before
+ choices: ["present", "latest", "absent", "cast", "dispelled", "rebuild"]
+ default: "present"
+ type: str
+
+ depends:
+ description:
+ - Comma-separated list of _optional_ dependencies to build a spell
+ (or make sure it is built) with; use +/- in front of dependency
+ to turn it on/off ('+' is optional though)
+ - this option is ignored if C(name) parameter is equal to '*' or
+ contains more than one spell
+ - providers must be supplied in the form recognized by Sorcery, e.g.
+ 'openssl(SSL)'
+ type: str
+
+ update:
+ description:
+ - Whether or not to update sorcery scripts at the very first stage
+ type: bool
+ default: false
+
+ update_cache:
+ description:
+ - Whether or not to update grimoire collection before casting spells
+ type: bool
+ default: false
+ aliases: ["update_codex"]
+
+ cache_valid_time:
+ description:
+ - Time in seconds to invalidate grimoire collection on update
+ - especially useful for SCM and rsync grimoires
+ - makes sense only in pair with C(update_cache)
+ type: int
+ default: 0
+'''
+
+
+EXAMPLES = '''
+- name: Make sure spell foo is installed
+ community.general.sorcery:
+ spell: foo
+ state: present
+
+- name: Make sure spells foo, bar and baz are removed
+ community.general.sorcery:
+ spell: foo,bar,baz
+ state: absent
+
+- name: Make sure spell foo with dependencies bar and baz is installed
+ community.general.sorcery:
+ spell: foo
+ depends: bar,baz
+ state: present
+
+- name: Make sure spell foo with bar and without baz dependencies is installed
+ community.general.sorcery:
+ spell: foo
+ depends: +bar,-baz
+ state: present
+
+- name: Make sure spell foo with libressl (providing SSL) dependency is installed
+ community.general.sorcery:
+ spell: foo
+ depends: libressl(SSL)
+ state: present
+
+- name: Make sure spells with/without required dependencies (if any) are installed
+ community.general.sorcery:
+ name: "{{ item.spell }}"
+ depends: "{{ item.depends | default(None) }}"
+ state: present
+ loop:
+ - { spell: 'vifm', depends: '+file,-gtk+2' }
+ - { spell: 'fwknop', depends: 'gpgme' }
+ - { spell: 'pv,tnftp,tor' }
+
+- name: Install the latest version of spell foo using regular glossary
+ community.general.sorcery:
+ name: foo
+ state: latest
+
+- name: Rebuild spell foo
+ community.general.sorcery:
+ spell: foo
+ state: rebuild
+
+- name: Rebuild the whole system, but update Sorcery and Codex first
+ community.general.sorcery:
+ spell: '*'
+ state: rebuild
+ update: true
+ update_cache: true
+
+- name: Refresh the grimoire collection if it is 1 day old using native sorcerous alias
+ community.general.sorcery:
+ update_codex: true
+ cache_valid_time: 86400
+
+- name: Update only Sorcery itself
+ community.general.sorcery:
+ update: true
+'''
+
+
+RETURN = '''
+'''
+
+
+import datetime
+import fileinput
+import os
+import re
+import shutil
+import sys
+
+
+# auto-filled at module init
+SORCERY = {
+ 'sorcery': None,
+ 'scribe': None,
+ 'cast': None,
+ 'dispel': None,
+ 'gaze': None
+}
+
+SORCERY_LOG_DIR = "/var/log/sorcery"
+SORCERY_STATE_DIR = "/var/state/sorcery"
+
+
+def get_sorcery_ver(module):
+ """ Get Sorcery version. """
+
+ cmd_sorcery = "%s --version" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0 or not stdout:
+ module.fail_json(msg="unable to get Sorcery version")
+
+ return stdout.strip()
+
+
+def codex_fresh(codex, module):
+ """ Check if grimoire collection is fresh enough. """
+
+ if not module.params['cache_valid_time']:
+ return False
+
+ timedelta = datetime.timedelta(seconds=module.params['cache_valid_time'])
+
+ for grimoire in codex:
+ lastupdate_path = os.path.join(SORCERY_STATE_DIR,
+ grimoire + ".lastupdate")
+
+ try:
+ mtime = os.stat(lastupdate_path).st_mtime
+ except Exception:
+ return False
+
+ lastupdate_ts = datetime.datetime.fromtimestamp(mtime)
+
+ # if any grimoire is not fresh, we invalidate the Codex
+ if lastupdate_ts + timedelta < datetime.datetime.now():
+ return False
+
+ return True
+
+
+def codex_list(module):
+ """ List valid grimoire collection. """
+
+ codex = {}
+
+ cmd_scribe = "%s index" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to list grimoire collection, fix your Codex")
+
+ rex = re.compile(r"^\s*\[\d+\] : (?P<grim>[\w\-+.]+) : [\w\-+./]+(?: : (?P<ver>[\w\-+.]+))?\s*$")
+
+ # drop 4-line header and empty trailing line
+ for line in stdout.splitlines()[4:-1]:
+ match = rex.match(line)
+
+ if match:
+ codex[match.group('grim')] = match.group('ver')
+
+ if not codex:
+ module.fail_json(msg="no grimoires to operate on; add at least one")
+
+ return codex
+
+
+def update_sorcery(module):
+ """ Update sorcery scripts.
+
+ This runs 'sorcery update' ('sorcery -u'). Check mode always returns a
+ positive change value.
+
+ """
+
+ changed = False
+
+ if module.check_mode:
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=True, msg="would have updated Sorcery")
+ else:
+ sorcery_ver = get_sorcery_ver(module)
+
+ cmd_sorcery = "%s update" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Sorcery: " + stdout)
+
+ if sorcery_ver != get_sorcery_ver(module):
+ changed = True
+
+ if not module.params['name'] and not module.params['update_cache']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Sorcery")
+
+
+def update_codex(module):
+ """ Update grimoire collections.
+
+ This runs 'scribe update'. Check mode always returns a positive change
+ value when 'cache_valid_time' is used.
+
+ """
+
+ params = module.params
+
+ changed = False
+
+ codex = codex_list(module)
+ fresh = codex_fresh(codex, module)
+
+ if module.check_mode:
+ if not params['name']:
+ if not fresh:
+ changed = True
+
+ module.exit_json(changed=changed, msg="would have updated Codex")
+ elif not fresh or params['name'] and params['state'] == 'latest':
+ # SILENT is required as a workaround for query() in libgpg
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_scribe = "%s update" % SORCERY['scribe']
+
+ rc, stdout, stderr = module.run_command(cmd_scribe)
+
+ if rc != 0:
+ module.fail_json(msg="unable to update Codex: " + stdout)
+
+ if codex != codex_list(module):
+ changed = True
+
+ if not params['name']:
+ module.exit_json(changed=changed,
+ msg="successfully updated Codex")
+
+
+def match_depends(module):
+ """ Check for matching dependencies.
+
+ This inspects spell's dependencies with the desired states and returns
+ 'False' if a recast is needed to match them. It also adds required lines
+ to the system-wide depends file for proper recast procedure.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ depends = {}
+
+ depends_ok = True
+
+ if len(spells) > 1 or not params['depends']:
+ return depends_ok
+
+ spell = spells[0]
+
+ if module.check_mode:
+ sorcery_depends_orig = os.path.join(SORCERY_STATE_DIR, "depends")
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends.check")
+
+ try:
+ shutil.copy2(sorcery_depends_orig, sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to copy depends.check file")
+ else:
+ sorcery_depends = os.path.join(SORCERY_STATE_DIR, "depends")
+
+ rex = re.compile(r"^(?P<status>\+?|\-){1}(?P<depend>[a-z0-9]+[a-z0-9_\-\+\.]*(\([A-Z0-9_\-\+\.]+\))*)$")
+
+ for d in params['depends'].split(','):
+ match = rex.match(d)
+
+ if not match:
+ module.fail_json(msg="wrong depends line for spell '%s'" % spell)
+
+ # normalize status
+ if not match.group('status') or match.group('status') == '+':
+ status = 'on'
+ else:
+ status = 'off'
+
+ depends[match.group('depend')] = status
+
+ # drop providers spec
+ depends_list = [s.split('(')[0] for s in depends]
+
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(depends_list))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ if rc != 0:
+ module.fail_json(msg="wrong dependencies for spell '%s'" % spell)
+
+ fi = fileinput.input(sorcery_depends, inplace=True)
+
+ try:
+ try:
+ for line in fi:
+ if line.startswith(spell + ':'):
+ match = None
+
+ for d in depends:
+ # when local status is 'off' and dependency is provider,
+ # use only provider value
+ d_offset = d.find('(')
+
+ if d_offset == -1:
+ d_p = ''
+ else:
+ d_p = re.escape(d[d_offset:])
+
+ # .escape() is needed mostly for the spells like 'libsigc++'
+ rex = re.compile("%s:(?:%s|%s):(?P<lstatus>on|off):optional:" %
+ (re.escape(spell), re.escape(d), d_p))
+
+ match = rex.match(line)
+
+ # we matched the line "spell:dependency:on|off:optional:"
+ if match:
+ # if we also matched the local status, mark dependency
+ # as empty and put it back into depends file
+ if match.group('lstatus') == depends[d]:
+ depends[d] = None
+
+ sys.stdout.write(line)
+
+ # status is not that we need, so keep this dependency
+ # in the list for further reverse switching;
+ # stop and process the next line in both cases
+ break
+
+ if not match:
+ sys.stdout.write(line)
+ else:
+ sys.stdout.write(line)
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fi.close()
+
+ depends_new = [v for v in depends if depends[v]]
+
+ if depends_new:
+ try:
+ try:
+ fl = open(sorcery_depends, 'a')
+
+ for k in depends_new:
+ fl.write("%s:%s:%s:optional::\n" % (spell, k, depends[k]))
+ except IOError:
+ module.fail_json(msg="I/O error on the depends file")
+ finally:
+ fl.close()
+
+ depends_ok = False
+
+ if module.check_mode:
+ try:
+ os.remove(sorcery_depends)
+ except IOError:
+ module.fail_json(msg="failed to clean up depends.backup file")
+
+ return depends_ok
+
+
+def manage_spells(module):
+ """ Cast or dispel spells.
+
+ This manages the whole system ('*'), list or a single spell. Command 'cast'
+ is used to install or rebuild spells, while 'dispel' takes care of theirs
+ removal from the system.
+
+ """
+
+ params = module.params
+ spells = params['name']
+
+ sorcery_queue = os.path.join(SORCERY_LOG_DIR, "queue/install")
+
+ if spells == '*':
+ if params['state'] == 'latest':
+ # back up original queue
+ try:
+ os.rename(sorcery_queue, sorcery_queue + ".backup")
+ except IOError:
+ module.fail_json(msg="failed to backup the update queue")
+
+ # see update_codex()
+ module.run_command_environ_update.update(dict(SILENT='1'))
+
+ cmd_sorcery = "%s queue"
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to generate the update queue")
+
+ try:
+ queue_size = os.stat(sorcery_queue).st_size
+ except Exception:
+ module.fail_json(msg="failed to read the update queue")
+
+ if queue_size != 0:
+ if module.check_mode:
+ try:
+ os.rename(sorcery_queue + ".backup", sorcery_queue)
+ except IOError:
+ module.fail_json(msg="failed to restore the update queue")
+
+ module.exit_json(changed=True, msg="would have updated the system")
+
+ cmd_cast = "%s --queue" % SORCERY['cast']
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to update the system")
+
+ module.exit_json(changed=True, msg="successfully updated the system")
+ else:
+ module.exit_json(changed=False, msg="the system is already up to date")
+ elif params['state'] == 'rebuild':
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have rebuilt the system")
+
+ cmd_sorcery = "%s rebuild" % SORCERY['sorcery']
+
+ rc, stdout, stderr = module.run_command(cmd_sorcery)
+
+ if rc != 0:
+ module.fail_json(msg="failed to rebuild the system: " + stdout)
+
+ module.exit_json(changed=True, msg="successfully rebuilt the system")
+ else:
+ module.fail_json(msg="unsupported operation on '*' name value")
+ else:
+ if params['state'] in ('present', 'latest', 'rebuild', 'absent'):
+ # extract versions from the 'gaze' command
+ cmd_gaze = "%s -q version %s" % (SORCERY['gaze'], ' '.join(spells))
+
+ rc, stdout, stderr = module.run_command(cmd_gaze)
+
+ # fail if any of spells cannot be found
+ if rc != 0:
+ module.fail_json(msg="failed to locate spell(s) in the list (%s)" %
+ ', '.join(spells))
+
+ cast_queue = []
+ dispel_queue = []
+
+ rex = re.compile(r"[^|]+\|[^|]+\|(?P<spell>[^|]+)\|(?P<grim_ver>[^|]+)\|(?P<inst_ver>[^$]+)")
+
+ # drop 2-line header and empty trailing line
+ for line in stdout.splitlines()[2:-1]:
+ match = rex.match(line)
+
+ cast = False
+
+ if params['state'] == 'present':
+ # spell is not installed..
+ if match.group('inst_ver') == '-':
+ # ..so set up depends reqs for it
+ match_depends(module)
+
+ cast = True
+ # spell is installed..
+ else:
+ # ..but does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'latest':
+ # grimoire and installed versions do not match..
+ if match.group('grim_ver') != match.group('inst_ver'):
+ # ..so check for depends reqs first and set them up
+ match_depends(module)
+
+ cast = True
+ # grimoire and installed versions match..
+ else:
+ # ..but the spell does not conform depends reqs
+ if not match_depends(module):
+ cast = True
+ elif params['state'] == 'rebuild':
+ cast = True
+ # 'absent'
+ else:
+ if match.group('inst_ver') != '-':
+ dispel_queue.append(match.group('spell'))
+
+ if cast:
+ cast_queue.append(match.group('spell'))
+
+ if cast_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have cast spell(s)")
+
+ cmd_cast = "%s -c %s" % (SORCERY['cast'], ' '.join(cast_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_cast)
+
+ if rc != 0:
+ module.fail_json(msg="failed to cast spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully cast spell(s)")
+ elif params['state'] != 'absent':
+ module.exit_json(changed=False, msg="spell(s) are already cast")
+
+ if dispel_queue:
+ if module.check_mode:
+ module.exit_json(changed=True, msg="would have dispelled spell(s)")
+
+ cmd_dispel = "%s %s" % (SORCERY['dispel'], ' '.join(dispel_queue))
+
+ rc, stdout, stderr = module.run_command(cmd_dispel)
+
+ if rc != 0:
+ module.fail_json(msg="failed to dispel spell(s): %s" + stdout)
+
+ module.exit_json(changed=True, msg="successfully dispelled spell(s)")
+ else:
+ module.exit_json(changed=False, msg="spell(s) are already dispelled")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['spell'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'latest',
+ 'absent', 'cast', 'dispelled', 'rebuild']),
+ depends=dict(default=None),
+ update=dict(default=False, type='bool'),
+ update_cache=dict(default=False, aliases=['update_codex'], type='bool'),
+ cache_valid_time=dict(default=0, type='int')
+ ),
+ required_one_of=[['name', 'update', 'update_cache']],
+ supports_check_mode=True
+ )
+
+ if os.geteuid() != 0:
+ module.fail_json(msg="root privileges are required for this operation")
+
+ for c in SORCERY:
+ SORCERY[c] = module.get_bin_path(c, True)
+
+ # prepare environment: run sorcery commands without asking questions
+ module.run_command_environ_update = dict(PROMPT_DELAY='0', VOYEUR='0')
+
+ params = module.params
+
+ # normalize 'state' parameter
+ if params['state'] in ('present', 'cast'):
+ params['state'] = 'present'
+ elif params['state'] in ('absent', 'dispelled'):
+ params['state'] = 'absent'
+
+ if params['update']:
+ update_sorcery(module)
+
+ if params['update_cache'] or params['state'] == 'latest':
+ update_codex(module)
+
+ if params['name']:
+ manage_spells(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/spectrum_device.py b/ansible_collections/community/general/plugins/modules/spectrum_device.py
new file mode 100644
index 000000000..5cfc07664
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/spectrum_device.py
@@ -0,0 +1,336 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Renato Orgito <orgito@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: spectrum_device
+short_description: Creates/deletes devices in CA Spectrum
+description:
+ - This module allows you to create and delete devices in CA Spectrum U(https://www.ca.com/us/products/ca-spectrum.html).
+ - Tested on CA Spectrum 9.4.2, 10.1.1 and 10.2.1
+author: "Renato Orgito (@orgito)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ device:
+ type: str
+ aliases: [ host, name ]
+ required: true
+ description:
+ - IP address of the device.
+ - If a hostname is given, it will be resolved to the IP address.
+ community:
+ type: str
+ description:
+ - SNMP community used for device discovery.
+ - Required when I(state=present).
+ required: true
+ landscape:
+ type: str
+ required: true
+ description:
+ - Landscape handle of the SpectroServer to which add or remove the device.
+ state:
+ type: str
+ description:
+ - On C(present) creates the device when it does not exist.
+ - On C(absent) removes the device when it exists.
+ choices: ['present', 'absent']
+ default: 'present'
+ url:
+ type: str
+ aliases: [ oneclick_url ]
+ required: true
+ description:
+ - HTTP, HTTPS URL of the Oneclick server in the form C((http|https)://host.domain[:port]).
+ url_username:
+ type: str
+ aliases: [ oneclick_user ]
+ required: true
+ description:
+ - Oneclick user name.
+ url_password:
+ type: str
+ aliases: [ oneclick_password ]
+ required: true
+ description:
+ - Oneclick user password.
+ use_proxy:
+ description:
+ - if C(false), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
+ default: true
+ type: bool
+ validate_certs:
+ description:
+ - If C(false), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ default: true
+ type: bool
+ agentport:
+ type: int
+ required: false
+ description:
+ - UDP port used for SNMP discovery.
+ default: 161
+notes:
+ - The devices will be created inside the I(Universe) container of the specified landscape.
+ - All the operations will be performed only on the specified landscape.
+'''
+
+EXAMPLES = '''
+- name: Add device to CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ community: secret
+ landscape: '0x100000'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ state: present
+
+
+- name: Remove device from CA Spectrum
+ local_action:
+ module: spectrum_device
+ device: '{{ ansible_host }}'
+ landscape: '{{ landscape_handle }}'
+ oneclick_url: http://oneclick.example.com:8080
+ oneclick_user: username
+ oneclick_password: password
+ use_proxy: false
+ state: absent
+'''
+
+RETURN = '''
+device:
+ description: device data when state = present
+ returned: success
+ type: dict
+ sample: {'model_handle': '0x1007ab', 'landscape': '0x100000', 'address': '10.10.5.1'}
+'''
+
+from socket import gethostbyname, gaierror
+import xml.etree.ElementTree as ET
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import fetch_url
+
+
+def request(resource, xml=None, method=None):
+ headers = {
+ "Content-Type": "application/xml",
+ "Accept": "application/xml"
+ }
+
+ url = module.params['oneclick_url'] + '/spectrum/restful/' + resource
+
+ response, info = fetch_url(module, url, data=xml, method=method, headers=headers, timeout=45)
+
+ if info['status'] == 401:
+ module.fail_json(msg="failed to authenticate to Oneclick server")
+
+ if info['status'] not in (200, 201, 204):
+ module.fail_json(msg=info['msg'])
+
+ return response.read()
+
+
+def post(resource, xml=None):
+ return request(resource, xml=xml, method='POST')
+
+
+def delete(resource):
+ return request(resource, xml=None, method='DELETE')
+
+
+def get_ip():
+ try:
+ device_ip = gethostbyname(module.params.get('device'))
+ except gaierror:
+ module.fail_json(msg="failed to resolve device ip address for '%s'" % module.params.get('device'))
+
+ return device_ip
+
+
+def get_device(device_ip):
+ """Query OneClick for the device using the IP Address"""
+ resource = '/models'
+ landscape_min = "0x%x" % int(module.params.get('landscape'), 16)
+ landscape_max = "0x%x" % (int(module.params.get('landscape'), 16) + 0x100000)
+
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
+ <rs:model-request throttlesize="5"
+ xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
+ <rs:target-models>
+ <rs:models-search>
+ <rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
+ <action-models>
+ <filtered-models>
+ <and>
+ <equals>
+ <model-type>SearchManager</model-type>
+ </equals>
+ <greater-than>
+ <attribute id="0x129fa">
+ <value>{mh_min}</value>
+ </attribute>
+ </greater-than>
+ <less-than>
+ <attribute id="0x129fa">
+ <value>{mh_max}</value>
+ </attribute>
+ </less-than>
+ </and>
+ </filtered-models>
+ <action>FIND_DEV_MODELS_BY_IP</action>
+ <attribute id="AttributeID.NETWORK_ADDRESS">
+ <value>{search_ip}</value>
+ </attribute>
+ </action-models>
+ </rs:search-criteria>
+ </rs:models-search>
+ </rs:target-models>
+ <rs:requested-attribute id="0x12d7f" /> <!--Network Address-->
+ </rs:model-request>
+ """.format(search_ip=device_ip, mh_min=landscape_min, mh_max=landscape_max)
+
+ result = post(resource, xml=xml)
+
+ root = ET.fromstring(result)
+
+ if root.get('total-models') == '0':
+ return None
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+
+ # get the first device
+ model = root.find('ca:model-responses', namespace).find('ca:model', namespace)
+
+ if model.get('error'):
+ module.fail_json(msg="error checking device: %s" % model.get('error'))
+
+ # get the attributes
+ model_handle = model.get('mh')
+
+ model_address = model.find('./*[@id="0x12d7f"]').text
+
+ # derive the landscape handler from the model handler of the device
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=model_address,
+ landscape=model_landscape)
+
+ return device
+
+
+def add_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device:
+ module.exit_json(changed=False, device=device)
+
+ if module.check_mode:
+ device = dict(
+ model_handle=None,
+ address=device_ip,
+ landscape="0x%x" % int(module.params.get('landscape'), 16))
+ module.exit_json(changed=True, device=device)
+
+ resource = 'model?ipaddress=' + device_ip + '&commstring=' + module.params.get('community')
+ resource += '&landscapeid=' + module.params.get('landscape')
+
+ if module.params.get('agentport', None):
+ resource += '&agentport=' + str(module.params.get('agentport', 161))
+
+ result = post(resource)
+ root = ET.fromstring(result)
+
+ if root.get('error') != 'Success':
+ module.fail_json(msg=root.get('error-message'))
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ model = root.find('ca:model', namespace)
+
+ model_handle = model.get('mh')
+ model_landscape = "0x%x" % int(int(model_handle, 16) // 0x100000 * 0x100000)
+
+ device = dict(
+ model_handle=model_handle,
+ address=device_ip,
+ landscape=model_landscape,
+ )
+
+ module.exit_json(changed=True, device=device)
+
+
+def remove_device():
+ device_ip = get_ip()
+ device = get_device(device_ip)
+
+ if device is None:
+ module.exit_json(changed=False)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ resource = '/model/' + device['model_handle']
+ result = delete(resource)
+
+ root = ET.fromstring(result)
+
+ namespace = dict(ca='http://www.ca.com/spectrum/restful/schema/response')
+ error = root.find('ca:error', namespace).text
+
+ if error != 'Success':
+ error_message = root.find('ca:error-message', namespace).text
+ module.fail_json(msg="%s %s" % (error, error_message))
+
+ module.exit_json(changed=True)
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ device=dict(required=True, aliases=['host', 'name']),
+ landscape=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ community=dict(required=True, no_log=True), # @TODO remove the 'required', given the required_if ?
+ agentport=dict(type='int', default=161),
+ url=dict(required=True, aliases=['oneclick_url']),
+ url_username=dict(required=True, aliases=['oneclick_user']),
+ url_password=dict(required=True, no_log=True, aliases=['oneclick_password']),
+ use_proxy=dict(type='bool', default=True),
+ validate_certs=dict(type='bool', default=True),
+ ),
+ required_if=[('state', 'present', ['community'])],
+ supports_check_mode=True
+ )
+
+ if module.params.get('state') == 'present':
+ add_device()
+ else:
+ remove_device()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py
new file mode 100644
index 000000000..028ad7f9f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/spectrum_model_attrs.py
@@ -0,0 +1,536 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Tyler Gates <tgates81@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: spectrum_model_attrs
+short_description: Enforce a model's attributes in CA Spectrum
+description:
+ - This module can be used to enforce a model's attributes in CA Spectrum.
+version_added: 2.5.0
+author:
+ - Tyler Gates (@tgates81)
+notes:
+ - Tested on CA Spectrum version 10.4.2.0.189.
+ - Model creation and deletion are not possible with this module. For that use M(community.general.spectrum_device) instead.
+requirements:
+ - 'python >= 2.7'
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ url:
+ description:
+ - URL of OneClick server.
+ type: str
+ required: true
+ url_username:
+ description:
+ - OneClick username.
+ type: str
+ required: true
+ aliases: [username]
+ url_password:
+ description:
+ - OneClick password.
+ type: str
+ required: true
+ aliases: [password]
+ use_proxy:
+ description:
+ - if C(false), it will not use a proxy, even if one is defined in
+ an environment variable on the target hosts.
+ default: true
+ required: false
+ type: bool
+ name:
+ description:
+ - Model name.
+ type: str
+ required: true
+ type:
+ description:
+ - Model type.
+ type: str
+ required: true
+ validate_certs:
+ description:
+ - Validate SSL certificates. Only change this to C(false) if you can guarantee that you are talking to the correct endpoint and there is no
+ man-in-the-middle attack happening.
+ type: bool
+ default: true
+ required: false
+ attributes:
+ description:
+ - A list of attribute names and values to enforce.
+ - All values and parameters are case sensitive and must be provided as strings only.
+ required: true
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Attribute name OR hex ID.
+ - 'Currently defined names are:'
+ - ' C(App_Manufacturer) (C(0x230683))'
+ - ' C(CollectionsModelNameString) (C(0x12adb))'
+ - ' C(Condition) (C(0x1000a))'
+ - ' C(Criticality) (C(0x1290c))'
+ - ' C(DeviceType) (C(0x23000e))'
+ - ' C(isManaged) (C(0x1295d))'
+ - ' C(Model_Class) (C(0x11ee8))'
+ - ' C(Model_Handle) (C(0x129fa))'
+ - ' C(Model_Name) (C(0x1006e))'
+ - ' C(Modeltype_Handle) (C(0x10001))'
+ - ' C(Modeltype_Name) (C(0x10000))'
+ - ' C(Network_Address) (C(0x12d7f))'
+ - ' C(Notes) (C(0x11564))'
+ - ' C(ServiceDesk_Asset_ID) (C(0x12db9))'
+ - ' C(TopologyModelNameString) (C(0x129e7))'
+ - ' C(sysDescr) (C(0x10052))'
+ - ' C(sysName) (C(0x10b5b))'
+ - ' C(Vendor_Name) (C(0x11570))'
+ - ' C(Description) (C(0x230017))'
+ - Hex IDs are the direct identifiers in Spectrum and will always work.
+ - 'To lookup hex IDs go to the UI: Locator -> Devices -> By Model Name -> <enter any model> -> Attributes tab.'
+ type: str
+ required: true
+ value:
+ description:
+ - Attribute value. Empty strings should be C("") or C(null).
+ type: str
+ required: true
+'''
+
+EXAMPLES = r'''
+- name: Enforce maintenance mode for modelxyz01 with a note about why
+ community.general.spectrum_model_attrs:
+ url: "http://oneclick.url.com"
+ username: "{{ oneclick_username }}"
+ password: "{{ oneclick_password }}"
+ name: "modelxyz01"
+ type: "Host_Device"
+ validate_certs: true
+ attributes:
+ - name: "isManaged"
+ value: "false"
+ - name: "Notes"
+ value: "MM set on {{ ansible_date_time.iso8601 }} via CO {{ CO }} by {{ tower_user_name | default(ansible_user_id) }}"
+ delegate_to: localhost
+ register: spectrum_model_attrs_status
+'''
+
+RETURN = r'''
+msg:
+ description: Informational message on the job result.
+ type: str
+ returned: always
+ sample: 'Success'
+changed_attrs:
+ description: Dictionary of changed name or hex IDs (whichever was specified) to their new corresponding values.
+ type: dict
+ returned: always
+ sample: {
+ "Notes": "MM set on 2021-02-03T22:04:02Z via CO CO9999 by tgates",
+ "isManaged": "true"
+ }
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.six.moves.urllib.parse import quote
+import json
+import re
+import xml.etree.ElementTree as ET
+
+
+class spectrum_model_attrs:
+ def __init__(self, module):
+ self.module = module
+ self.url = module.params['url']
+ # If the user did not define a full path to the restul space in url:
+ # params, add what we believe it to be.
+ if not re.search('\\/.+', self.url.split('://')[1]):
+ self.url = "%s/spectrum/restful" % self.url.rstrip('/')
+ # Align these with what is defined in OneClick's UI under:
+ # Locator -> Devices -> By Model Name -> <enter any model> ->
+ # Attributes tab.
+ self.attr_map = dict(App_Manufacturer=hex(0x230683),
+ CollectionsModelNameString=hex(0x12adb),
+ Condition=hex(0x1000a),
+ Criticality=hex(0x1290c),
+ DeviceType=hex(0x23000e),
+ isManaged=hex(0x1295d),
+ Model_Class=hex(0x11ee8),
+ Model_Handle=hex(0x129fa),
+ Model_Name=hex(0x1006e),
+ Modeltype_Handle=hex(0x10001),
+ Modeltype_Name=hex(0x10000),
+ Network_Address=hex(0x12d7f),
+ Notes=hex(0x11564),
+ ServiceDesk_Asset_ID=hex(0x12db9),
+ TopologyModelNameString=hex(0x129e7),
+ sysDescr=hex(0x10052),
+ sysName=hex(0x10b5b),
+ Vendor_Name=hex(0x11570),
+ Description=hex(0x230017))
+ self.search_qualifiers = [
+ "and", "or", "not", "greater-than", "greater-than-or-equals",
+ "less-than", "less-than-or-equals", "equals", "equals-ignore-case",
+ "does-not-equal", "does-not-equal-ignore-case", "has-prefix",
+ "does-not-have-prefix", "has-prefix-ignore-case",
+ "does-not-have-prefix-ignore-case", "has-substring",
+ "does-not-have-substring", "has-substring-ignore-case",
+ "does-not-have-substring-ignore-case", "has-suffix",
+ "does-not-have-suffix", "has-suffix-ignore-case",
+ "does-not-have-suffix-ignore-case", "has-pcre",
+ "has-pcre-ignore-case", "has-wildcard", "has-wildcard-ignore-case",
+ "is-derived-from", "not-is-derived-from"]
+
+ self.resp_namespace = dict(ca="http://www.ca.com/spectrum/restful/schema/response")
+
+ self.result = dict(msg="", changed_attrs=dict())
+ self.success_msg = "Success"
+
+ def build_url(self, path):
+ """
+ Build a sane Spectrum restful API URL
+ :param path: The path to append to the restful base
+ :type path: str
+ :returns: Complete restful API URL
+ :rtype: str
+ """
+
+ return "%s/%s" % (self.url.rstrip('/'), path.lstrip('/'))
+
+ def attr_id(self, name):
+ """
+ Get attribute hex ID
+ :param name: The name of the attribute to retrieve the hex ID for
+ :type name: str
+ :returns: Translated hex ID of name, or None if no translation found
+ :rtype: str or None
+ """
+
+ try:
+ return self.attr_map[name]
+ except KeyError:
+ return None
+
+ def attr_name(self, _id):
+ """
+ Get attribute name from hex ID
+ :param _id: The hex ID to lookup a name for
+ :type _id: str
+ :returns: Translated name of hex ID, or None if no translation found
+ :rtype: str or None
+ """
+
+ for name, m_id in list(self.attr_map.items()):
+ if _id == m_id:
+ return name
+ return None
+
+ def urlencode(self, string):
+ """
+ URL Encode a string
+ :param: string: The string to URL encode
+ :type string: str
+ :returns: URL encode version of supplied string
+ :rtype: str
+ """
+
+ return quote(string, "<>%-_.!*'():?#/@&+,;=")
+
+ def update_model(self, model_handle, attrs):
+ """
+ Update a model's attributes
+ :param model_handle: The model's handle ID
+ :type model_handle: str
+ :param attrs: Model's attributes to update. {'<name/id>': '<attr>'}
+ :type attrs: dict
+ :returns: Nothing; exits on error or updates self.results
+ :rtype: None
+ """
+
+ # Build the update URL
+ update_url = self.build_url("/model/%s?" % model_handle)
+ for name, val in list(attrs.items()):
+ if val is None:
+ # None values should be converted to empty strings
+ val = ""
+ val = self.urlencode(str(val))
+ if not update_url.endswith('?'):
+ update_url += "&"
+
+ update_url += "attr=%s&val=%s" % (self.attr_id(name) or name, val)
+
+ # POST to /model to update the attributes, or fail.
+ resp, info = fetch_url(self.module, update_url, method="PUT",
+ headers={"Content-Type": "application/json",
+ "Accept": "application/json"},
+ use_proxy=self.module.params['use_proxy'])
+ status_code = info["status"]
+ if status_code >= 400:
+ body = info['body']
+ else:
+ body = "" if resp is None else resp.read()
+ if status_code != 200:
+ self.result['msg'] = "HTTP PUT error %s: %s: %s" % (status_code, update_url, body)
+ self.module.fail_json(**self.result)
+
+ # Load and parse the JSON response and either fail or set results.
+ json_resp = json.loads(body)
+ """
+ Example success response:
+ {'model-update-response-list':{'model-responses':{'model':{'@error':'Success','@mh':'0x1010e76','attribute':{'@error':'Success','@id':'0x1295d'}}}}}"
+ Example failure response:
+ {'model-update-response-list': {'model-responses': {'model': {'@error': 'PartialFailure', '@mh': '0x1010e76', 'attribute': {'@error-message': 'brn0vlappua001: You do not have permission to set attribute Network_Address for this model.', '@error': 'Error', '@id': '0x12d7f'}}}}}
+ """ # noqa
+ model_resp = json_resp['model-update-response-list']['model-responses']['model']
+ if model_resp['@error'] != "Success":
+ # I'm not 100% confident on the expected failure structure so just
+ # dump all of ['attribute'].
+ self.result['msg'] = str(model_resp['attribute'])
+ self.module.fail_json(**self.result)
+
+ # Should be OK if we get to here, set results.
+ self.result['msg'] = self.success_msg
+ self.result['changed_attrs'].update(attrs)
+ self.result['changed'] = True
+
+ def find_model(self, search_criteria, ret_attrs=None):
+ """
+ Search for a model in /models
+ :param search_criteria: The XML <rs:search-criteria>
+ :type search_criteria: str
+ :param ret_attrs: List of attributes by name or ID to return back
+ (default is Model_Handle)
+ :type ret_attrs: list
+ returns: Dictionary mapping of ret_attrs to values: {ret_attr: ret_val}
+ rtype: dict
+ """
+
+ # If no return attributes were asked for, return Model_Handle.
+ if ret_attrs is None:
+ ret_attrs = ['Model_Handle']
+
+ # Set the XML <rs:requested-attribute id=<id>> tags. If no hex ID
+ # is found for the name, assume it is already in hex. {name: hex ID}
+ rqstd_attrs = ""
+ for ra in ret_attrs:
+ _id = self.attr_id(ra) or ra
+ rqstd_attrs += '<rs:requested-attribute id="%s" />' % (self.attr_id(ra) or ra)
+
+ # Build the complete XML search query for HTTP POST.
+ xml = """<?xml version="1.0" encoding="UTF-8"?>
+<rs:model-request throttlesize="5"
+xmlns:rs="http://www.ca.com/spectrum/restful/schema/request"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="http://www.ca.com/spectrum/restful/schema/request ../../../xsd/Request.xsd">
+ <rs:target-models>
+ <rs:models-search>
+ <rs:search-criteria xmlns="http://www.ca.com/spectrum/restful/schema/filter">
+ {0}
+ </rs:search-criteria>
+ </rs:models-search>
+ </rs:target-models>
+ {1}
+ </rs:model-request>
+""".format(search_criteria, rqstd_attrs)
+
+ # POST to /models and fail on errors.
+ url = self.build_url("/models")
+ resp, info = fetch_url(self.module, url, data=xml, method="POST",
+ use_proxy=self.module.params['use_proxy'],
+ headers={"Content-Type": "application/xml",
+ "Accept": "application/xml"})
+ status_code = info["status"]
+ if status_code >= 400:
+ body = info['body']
+ else:
+ body = "" if resp is None else resp.read()
+ if status_code != 200:
+ self.result['msg'] = "HTTP POST error %s: %s: %s" % (status_code, url, body)
+ self.module.fail_json(**self.result)
+
+ # Parse through the XML response and fail on any detected errors.
+ root = ET.fromstring(body)
+ total_models = int(root.attrib['total-models'])
+ error = root.attrib['error']
+ model_responses = root.find('ca:model-responses', self.resp_namespace)
+ if total_models < 1:
+ self.result['msg'] = "No models found matching search criteria `%s'" % search_criteria
+ self.module.fail_json(**self.result)
+ elif total_models > 1:
+ self.result['msg'] = "More than one model found (%s): `%s'" % (total_models, ET.tostring(model_responses,
+ encoding='unicode'))
+ self.module.fail_json(**self.result)
+ if error != "EndOfResults":
+ self.result['msg'] = "Unexpected search response `%s': %s" % (error, ET.tostring(model_responses,
+ encoding='unicode'))
+ self.module.fail_json(**self.result)
+ model = model_responses.find('ca:model', self.resp_namespace)
+ attrs = model.findall('ca:attribute', self.resp_namespace)
+ if not attrs:
+ self.result['msg'] = "No attributes returned."
+ self.module.fail_json(**self.result)
+
+ # XML response should be successful. Iterate and set each returned
+ # attribute ID/name and value for return.
+ ret = dict()
+ for attr in attrs:
+ attr_id = attr.get('id')
+ attr_name = self.attr_name(attr_id)
+ # Note: all values except empty strings (None) are strings only!
+ attr_val = attr.text
+ key = attr_name if attr_name in ret_attrs else attr_id
+ ret[key] = attr_val
+ ret_attrs.remove(key)
+ return ret
+
+ def find_model_by_name_type(self, mname, mtype, ret_attrs=None):
+ """
+ Find a model by name and type
+ :param mname: Model name
+ :type mname: str
+ :param mtype: Model type
+ :type mtype: str
+ :param ret_attrs: List of attributes by name or ID to return back
+ (default is Model_Handle)
+ :type ret_attrs: list
+ returns: find_model(): Dictionary mapping of ret_attrs to values:
+ {ret_attr: ret_val}
+ rtype: dict
+ """
+
+ # If no return attributes were asked for, return Model_Handle.
+ if ret_attrs is None:
+ ret_attrs = ['Model_Handle']
+
+ """This is basically as follows:
+ <filtered-models>
+ <and>
+ <equals>
+ <attribute id=...>
+ <value>...</value>
+ </attribute>
+ </equals>
+ <equals>
+ <attribute...>
+ </equals>
+ </and>
+ </filtered-models>
+ """
+
+ # Parent filter tag
+ filtered_models = ET.Element('filtered-models')
+ # Logically and
+ _and = ET.SubElement(filtered_models, 'and')
+
+ # Model Name
+ MN_equals = ET.SubElement(_and, 'equals')
+ Model_Name = ET.SubElement(MN_equals, 'attribute',
+ {'id': self.attr_map['Model_Name']})
+ MN_value = ET.SubElement(Model_Name, 'value')
+ MN_value.text = mname
+
+ # Model Type Name
+ MTN_equals = ET.SubElement(_and, 'equals')
+ Modeltype_Name = ET.SubElement(MTN_equals, 'attribute',
+ {'id': self.attr_map['Modeltype_Name']})
+ MTN_value = ET.SubElement(Modeltype_Name, 'value')
+ MTN_value.text = mtype
+
+ return self.find_model(ET.tostring(filtered_models,
+ encoding='unicode'),
+ ret_attrs)
+
+ def ensure_model_attrs(self):
+
+ # Get a list of all requested attribute names/IDs plus Model_Handle and
+ # use them to query the values currently set. Store finding in a
+ # dictionary.
+ req_attrs = []
+ for attr in self.module.params['attributes']:
+ req_attrs.append(attr['name'])
+ if 'Model_Handle' not in req_attrs:
+ req_attrs.append('Model_Handle')
+
+ # Survey attributes currently set and store in a dict.
+ cur_attrs = self.find_model_by_name_type(self.module.params['name'],
+ self.module.params['type'],
+ req_attrs)
+
+ # Iterate through the requested attributes names/IDs values pair and
+ # compare with those currently set. If different, attempt to change.
+ Model_Handle = cur_attrs.pop("Model_Handle")
+ for attr in self.module.params['attributes']:
+ req_name = attr['name']
+ req_val = attr['value']
+ if req_val == "":
+ # The API will return None on empty string
+ req_val = None
+ if cur_attrs[req_name] != req_val:
+ if self.module.check_mode:
+ self.result['changed_attrs'][req_name] = req_val
+ self.result['msg'] = self.success_msg
+ self.result['changed'] = True
+ continue
+ resp = self.update_model(Model_Handle, {req_name: req_val})
+
+ self.module.exit_json(**self.result)
+
+
+def run_module():
+ argument_spec = dict(
+ url=dict(type='str', required=True),
+ url_username=dict(type='str', required=True, aliases=['username']),
+ url_password=dict(type='str', required=True, aliases=['password'],
+ no_log=True),
+ validate_certs=dict(type='bool', default=True),
+ use_proxy=dict(type='bool', default=True),
+ name=dict(type='str', required=True),
+ type=dict(type='str', required=True),
+ attributes=dict(type='list',
+ required=True,
+ elements='dict',
+ options=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', required=True)
+ )),
+ )
+ module = AnsibleModule(
+ supports_check_mode=True,
+ argument_spec=argument_spec,
+ )
+
+ try:
+ sm = spectrum_model_attrs(module)
+ sm.ensure_model_attrs()
+ except Exception as e:
+ module.fail_json(msg="Failed to ensure attribute(s) on `%s' with "
+ "exception: %s" % (module.params['name'],
+ to_native(e)))
+
+
+def main():
+ run_module()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
new file mode 100644
index 000000000..02f2d3c5c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/spotinst_aws_elastigroup.py
@@ -0,0 +1,1595 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+
+DOCUMENTATION = '''
+---
+module: spotinst_aws_elastigroup
+short_description: Create, update or delete Spotinst AWS Elastigroups
+author: Spotinst (@talzur)
+description:
+ - Can create, update, or delete Spotinst AWS Elastigroups
+ Launch configuration is part of the elastigroup configuration,
+ so no additional modules are necessary for handling the launch configuration.
+ You will have to have a credentials file in this location - <home>/.spotinst/credentials
+ The credentials file must contain a row that looks like this
+ token = <YOUR TOKEN>
+ Full documentation available at https://help.spotinst.com/hc/en-us/articles/115003530285-Ansible-
+requirements:
+ - python >= 2.7
+ - spotinst_sdk >= 1.0.38
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+
+ credentials_path:
+ description:
+ - Optional parameter that allows to set a non-default credentials path.
+ default: ~/.spotinst/credentials
+ type: path
+
+ account_id:
+ description:
+ - Optional parameter that allows to set an account-id inside the module configuration.
+ By default this is retrieved from the credentials path.
+ type: str
+
+ token:
+ description:
+ - A Personal API Access Token issued by Spotinst.
+ - >-
+ When not specified, the module will try to obtain it, in that order, from: environment variable C(SPOTINST_TOKEN), or from the credentials path.
+ type: str
+
+ availability_vs_cost:
+ description:
+ - The strategy orientation.
+ - "The choices available are: C(availabilityOriented), C(costOriented), C(balanced)."
+ required: true
+ type: str
+
+ availability_zones:
+ description:
+ - A list of hash/dictionaries of Availability Zones that are configured in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ name (String),
+ subnet_id (String),
+ placement_group_name (String),
+ required: true
+ type: list
+ elements: dict
+
+ block_device_mappings:
+ description:
+ - A list of hash/dictionaries of Block Device Mappings for elastigroup instances;
+ You can specify virtual devices and EBS volumes.;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are
+ device_name (List of Strings),
+ virtual_name (String),
+ no_device (String),
+ ebs (Object, expects the following keys-
+ delete_on_termination(Boolean),
+ encrypted(Boolean),
+ iops (Integer),
+ snapshot_id(Integer),
+ volume_type(String),
+ volume_size(Integer))
+ type: list
+ elements: dict
+
+ chef:
+ description:
+ - The Chef integration configuration.;
+ Expects the following keys - chef_server (String),
+ organization (String),
+ user (String),
+ pem_key (String),
+ chef_version (String)
+ type: dict
+
+ draining_timeout:
+ description:
+ - Time for instance to be drained from incoming requests and deregistered from ELB before termination.
+ type: int
+
+ ebs_optimized:
+ description:
+ - Enable EBS optimization for supported instances which are not enabled by default.;
+ Note - additional charges will be applied.
+ type: bool
+
+ ebs_volume_pool:
+ description:
+ - A list of hash/dictionaries of EBS devices to reattach to the elastigroup when available;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ volume_ids (List of Strings),
+ device_name (String)
+ type: list
+ elements: dict
+
+ ecs:
+ description:
+ - The ECS integration configuration.;
+ Expects the following key -
+ cluster_name (String)
+ type: dict
+
+ elastic_ips:
+ description:
+ - List of ElasticIps Allocation Ids (Example C(eipalloc-9d4e16f8)) to associate to the group instances
+ type: list
+ elements: str
+
+ fallback_to_od:
+ description:
+ - In case of no spots available, Elastigroup will launch an On-demand instance instead
+ type: bool
+
+ health_check_grace_period:
+ description:
+ - The amount of time, in seconds, after the instance has launched to start and check its health.
+ - If not specified, it defaults to C(300).
+ type: int
+
+ health_check_unhealthy_duration_before_replacement:
+ description:
+ - Minimal mount of time instance should be unhealthy for us to consider it unhealthy.
+ type: int
+
+ health_check_type:
+ description:
+ - The service to use for the health check.
+ - "The choices available are: C(ELB), C(HCS), C(TARGET_GROUP), C(MLB), C(EC2)."
+ type: str
+
+ iam_role_name:
+ description:
+ - The instance profile iamRole name
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ iam_role_arn:
+ description:
+ - The instance profile iamRole arn
+ - Only use iam_role_arn, or iam_role_name
+ type: str
+
+ id:
+ description:
+ - The group id if it already exists and you want to update, or delete it.
+ This will not work unless the uniqueness_by field is set to id.
+ When this is set, and the uniqueness_by field is set, the group will either be updated or deleted, but not created.
+ type: str
+
+ image_id:
+ description:
+ - The image Id used to launch the instance.;
+ In case of conflict between Instance type and image type, an error will be returned
+ required: true
+ type: str
+
+ key_pair:
+ description:
+ - Specify a Key Pair to attach to the instances
+ type: str
+
+ kubernetes:
+ description:
+ - The Kubernetes integration configuration.
+ Expects the following keys -
+ api_server (String),
+ token (String)
+ type: dict
+
+ lifetime_period:
+ description:
+ - Lifetime period
+ type: int
+
+ load_balancers:
+ description:
+ - List of classic ELB names
+ type: list
+ elements: str
+
+ max_size:
+ description:
+ - The upper limit number of instances that you can scale up to
+ required: true
+ type: int
+
+ mesosphere:
+ description:
+ - The Mesosphere integration configuration.
+ Expects the following key -
+ api_server (String)
+ type: dict
+
+ min_size:
+ description:
+ - The lower limit number of instances that you can scale down to
+ required: true
+ type: int
+
+ monitoring:
+ description:
+ - Describes whether instance Enhanced Monitoring is enabled
+ type: str
+
+ name:
+ description:
+ - Unique name for elastigroup to be created, updated or deleted
+ required: true
+ type: str
+
+ network_interfaces:
+ description:
+ - A list of hash/dictionaries of network interfaces to add to the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ description (String),
+ device_index (Integer),
+ secondary_private_ip_address_count (Integer),
+ associate_public_ip_address (Boolean),
+ delete_on_termination (Boolean),
+ groups (List of Strings),
+ network_interface_id (String),
+ private_ip_address (String),
+ subnet_id (String),
+ associate_ipv6_address (Boolean),
+ private_ip_addresses (List of Objects, Keys are privateIpAddress (String, required) and primary (Boolean))
+ type: list
+ elements: dict
+
+ on_demand_count:
+ description:
+ - Required if risk is not set
+ - Number of on demand instances to launch. All other instances will be spot instances.;
+ Either set this parameter or the risk parameter
+ type: int
+
+ on_demand_instance_type:
+ description:
+ - On-demand instance type that will be provisioned
+ type: str
+
+ opsworks:
+ description:
+ - The elastigroup OpsWorks integration configration.;
+ Expects the following key -
+ layer_id (String)
+ type: dict
+
+ persistence:
+ description:
+ - The Stateful elastigroup configration.;
+ Accepts the following keys -
+ should_persist_root_device (Boolean),
+ should_persist_block_devices (Boolean),
+ should_persist_private_ip (Boolean)
+ type: dict
+
+ product:
+ description:
+ - Operation system type.
+ - "Available choices are: C(Linux/UNIX), C(SUSE Linux), C(Windows), C(Linux/UNIX (Amazon VPC)), C(SUSE Linux (Amazon VPC))."
+ required: true
+ type: str
+
+ rancher:
+ description:
+ - The Rancher integration configuration.;
+ Expects the following keys -
+ version (String),
+ access_key (String),
+ secret_key (String),
+ master_host (String)
+ type: dict
+
+ right_scale:
+ description:
+ - The Rightscale integration configuration.;
+ Expects the following keys -
+ account_id (String),
+ refresh_token (String)
+ type: dict
+
+ risk:
+ description:
+ - Required if on demand is not set. The percentage of Spot instances to launch (0 - 100).
+ type: int
+
+ roll_config:
+ description:
+ - Roll configuration.;
+ If you would like the group to roll after updating, please use this feature.
+ Accepts the following keys -
+ batch_size_percentage(Integer, Required),
+ grace_period - (Integer, Required),
+ health_check_type(String, Optional)
+ type: dict
+
+ scheduled_tasks:
+ description:
+ - A list of hash/dictionaries of scheduled tasks to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ adjustment (Integer),
+ scale_target_capacity (Integer),
+ scale_min_capacity (Integer),
+ scale_max_capacity (Integer),
+ adjustment_percentage (Integer),
+ batch_size_percentage (Integer),
+ cron_expression (String),
+ frequency (String),
+ grace_period (Integer),
+ task_type (String, required),
+ is_enabled (Boolean)
+ type: list
+ elements: dict
+
+ security_group_ids:
+ description:
+ - One or more security group IDs. ;
+ In case of update it will override the existing Security Group with the new given array
+ required: true
+ type: list
+ elements: str
+
+ shutdown_script:
+ description:
+ - The Base64-encoded shutdown script that executes prior to instance termination.
+ Encode before setting.
+ type: str
+
+ signals:
+ description:
+ - A list of hash/dictionaries of signals to configure in the elastigroup;
+ keys allowed are -
+ name (String, required),
+ timeout (Integer)
+ type: list
+ elements: dict
+
+ spin_up_time:
+ description:
+ - Spin up time, in seconds, for the instance
+ type: int
+
+ spot_instance_types:
+ description:
+ - Spot instance type that will be provisioned.
+ required: true
+ type: list
+ elements: str
+
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Create or delete the elastigroup
+ default: present
+ type: str
+
+ tags:
+ description:
+ - A list of tags to configure in the elastigroup. Please specify list of keys and values (key colon value);
+ type: list
+ elements: dict
+
+ target:
+ description:
+ - The number of instances to launch
+ required: true
+ type: int
+
+ target_group_arns:
+ description:
+ - List of target group arns instances should be registered to
+ type: list
+ elements: str
+
+ tenancy:
+ description:
+ - Dedicated vs shared tenancy.
+ - "The available choices are: C(default), C(dedicated)."
+ type: str
+
+ terminate_at_end_of_billing_hour:
+ description:
+ - Terminate at the end of billing hour
+ type: bool
+
+ unit:
+ description:
+ - The capacity unit to launch instances by.
+ - "The available choices are: C(instance), C(weight)."
+ type: str
+
+ up_scaling_policies:
+ description:
+ - A list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions (List of Objects, Keys allowed are name (String, required) and value (String)),
+ statistic (String, required)
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ min_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+ elements: dict
+
+ down_scaling_policies:
+ description:
+ - A list of hash/dictionaries of scaling policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ metric_name (String, required),
+ dimensions ((List of Objects), Keys allowed are name (String, required) and value (String)),
+ statistic (String, required),
+ evaluation_periods (String, required),
+ period (String, required),
+ threshold (String, required),
+ cooldown (String, required),
+ unit (String, required),
+ operator (String, required),
+ action_type (String, required),
+ adjustment (String),
+ max_target_capacity (String),
+ target (String),
+ maximum (String),
+ minimum (String)
+ type: list
+ elements: dict
+
+ target_tracking_policies:
+ description:
+ - A list of hash/dictionaries of target tracking policies to configure in the elastigroup;
+ '[{"key":"value", "key":"value"}]';
+ keys allowed are -
+ policy_name (String, required),
+ namespace (String, required),
+ source (String, required),
+ metric_name (String, required),
+ statistic (String, required),
+ unit (String, required),
+ cooldown (String, required),
+ target (String, required)
+ type: list
+ elements: dict
+
+ uniqueness_by:
+ choices:
+ - id
+ - name
+ description:
+ - If your group names are not unique, you may use this feature to update or delete a specific group.
+ Whenever this property is set, you must set a group_id in order to update or delete a group, otherwise a group will be created.
+ default: name
+ type: str
+
+ user_data:
+ description:
+ - Base64-encoded MIME user data. Encode before setting the value.
+ type: str
+
+ utilize_reserved_instances:
+ description:
+ - In case of any available Reserved Instances,
+ Elastigroup will utilize your reservations before purchasing Spot instances.
+ type: bool
+
+ wait_for_instances:
+ description:
+ - Whether or not the elastigroup creation / update actions should wait for the instances to spin
+ type: bool
+ default: false
+
+ wait_timeout:
+ description:
+ - How long the module should wait for instances before failing the action.;
+ Only works if wait_for_instances is True.
+ type: int
+
+ do_not_update:
+ description:
+ - TODO document.
+ type: list
+ elements: str
+ default: []
+
+ multai_token:
+ description:
+ - Token used for Multai configuration.
+ type: str
+
+ multai_load_balancers:
+ description:
+ - Configuration parameters for Multai load balancers.
+ type: list
+ elements: dict
+
+ elastic_beanstalk:
+ description:
+ - Placeholder parameter for future implementation of Elastic Beanstalk configurations.
+ type: dict
+
+'''
+EXAMPLES = '''
+# Basic configuration YAML example
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: true
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup and wait 600 seconds to retrieve the instances, and use their private ips
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: true
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/sda1'
+ ebs:
+ volume_size: 100
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: true
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example, we create an elastigroup with multiple block device mappings, tags, and also an account id
+# In organizations with more than one account, it is required to specify an account_id
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ account_id: act-1a9dd2b
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ tags:
+ - Environment: someEnvValue
+ - OtherTagKey: otherValue
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 5
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: true
+ name: ansible-group-tal
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-8f4b8fe9
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ ebs:
+ volume_size: 60
+ volume_type: gp2
+ - device_name: '/dev/xvdb'
+ ebs:
+ volume_size: 120
+ volume_type: gp2
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ wait_for_instances: true
+ wait_timeout: 600
+ register: result
+
+ - name: Store private ips to file
+ ansible.builtin.shell: echo {{ item.private_ip }}\\n >> list-of-private-ips
+ with_items: "{{ result.instances }}"
+ - ansible.builtin.debug: var=result
+
+# In this example we have set up block device mapping with ephemeral devices
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ block_device_mappings:
+ - device_name: '/dev/xvda'
+ virtual_name: ephemeral0
+ - device_name: '/dev/xvdb/'
+ virtual_name: ephemeral1
+ monitoring: true
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+# In this example we create a basic group configuration with a network interface defined.
+# Each network interface must have a device index
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ network_interfaces:
+ - associate_public_ip_address: true
+ device_index: 0
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-2b68a15c
+ image_id: ami-f173cc91
+ key_pair: spotinst-oregon
+ max_size: 15
+ min_size: 0
+ target: 0
+ unit: instance
+ monitoring: true
+ name: ansible-group
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ load_balancers:
+ - test-lb-1
+ security_group_ids:
+ - sg-8f4b8fe9
+ spot_instance_types:
+ - c3.large
+ do_not_update:
+ - image_id
+ - target
+ register: result
+ - ansible.builtin.debug: var=result
+
+
+# In this example we create a basic group configuration with a target tracking scaling policy defined
+
+- hosts: localhost
+ tasks:
+ - name: Create elastigroup
+ community.general.spotinst_aws_elastigroup:
+ account_id: act-92d45673
+ state: present
+ risk: 100
+ availability_vs_cost: balanced
+ availability_zones:
+ - name: us-west-2a
+ subnet_id: subnet-79da021e
+ image_id: ami-f173cc91
+ fallback_to_od: true
+ tags:
+ - Creator: ValueOfCreatorTag
+ - Environment: ValueOfEnvironmentTag
+ key_pair: spotinst-labs-oregon
+ max_size: 10
+ min_size: 0
+ target: 2
+ unit: instance
+ monitoring: true
+ name: ansible-group-1
+ on_demand_instance_type: c3.large
+ product: Linux/UNIX
+ security_group_ids:
+ - sg-46cdc13d
+ spot_instance_types:
+ - c3.large
+ target_tracking_policies:
+ - policy_name: target-tracking-1
+ namespace: AWS/EC2
+ metric_name: CPUUtilization
+ statistic: average
+ unit: percent
+ target: 50
+ cooldown: 120
+ do_not_update:
+ - image_id
+ register: result
+ - ansible.builtin.debug: var=result
+'''
+
+RETURN = '''
+---
+instances:
+ description: List of active elastigroup instances and their details.
+ returned: success
+ type: dict
+ sample: [
+ {
+ "spotInstanceRequestId": "sir-regs25zp",
+ "instanceId": "i-09640ad8678234c",
+ "instanceType": "m4.large",
+ "product": "Linux/UNIX",
+ "availabilityZone": "us-west-2b",
+ "privateIp": "180.0.2.244",
+ "createdAt": "2017-07-17T12:46:18.000Z",
+ "status": "fulfilled"
+ }
+ ]
+group_id:
+ description: Created / Updated group's ID.
+ returned: success
+ type: str
+ sample: "sig-12345"
+
+'''
+
+HAS_SPOTINST_SDK = False
+__metaclass__ = type
+
+import os
+import time
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ import spotinst_sdk as spotinst
+ from spotinst_sdk import SpotinstClientException
+
+ HAS_SPOTINST_SDK = True
+
+except ImportError:
+ pass
+
+eni_fields = ('description',
+ 'device_index',
+ 'secondary_private_ip_address_count',
+ 'associate_public_ip_address',
+ 'delete_on_termination',
+ 'groups',
+ 'network_interface_id',
+ 'private_ip_address',
+ 'subnet_id',
+ 'associate_ipv6_address')
+
+private_ip_fields = ('private_ip_address',
+ 'primary')
+
+capacity_fields = (dict(ansible_field_name='min_size',
+ spotinst_field_name='minimum'),
+ dict(ansible_field_name='max_size',
+ spotinst_field_name='maximum'),
+ 'target',
+ 'unit')
+
+lspec_fields = ('user_data',
+ 'key_pair',
+ 'tenancy',
+ 'shutdown_script',
+ 'monitoring',
+ 'ebs_optimized',
+ 'image_id',
+ 'health_check_type',
+ 'health_check_grace_period',
+ 'health_check_unhealthy_duration_before_replacement',
+ 'security_group_ids')
+
+iam_fields = (dict(ansible_field_name='iam_role_name',
+ spotinst_field_name='name'),
+ dict(ansible_field_name='iam_role_arn',
+ spotinst_field_name='arn'))
+
+scheduled_task_fields = ('adjustment',
+ 'adjustment_percentage',
+ 'batch_size_percentage',
+ 'cron_expression',
+ 'frequency',
+ 'grace_period',
+ 'task_type',
+ 'is_enabled',
+ 'scale_target_capacity',
+ 'scale_min_capacity',
+ 'scale_max_capacity')
+
+scaling_policy_fields = ('policy_name',
+ 'namespace',
+ 'metric_name',
+ 'dimensions',
+ 'statistic',
+ 'evaluation_periods',
+ 'period',
+ 'threshold',
+ 'cooldown',
+ 'unit',
+ 'operator')
+
+tracking_policy_fields = ('policy_name',
+ 'namespace',
+ 'source',
+ 'metric_name',
+ 'statistic',
+ 'unit',
+ 'cooldown',
+ 'target',
+ 'threshold')
+
+action_fields = (dict(ansible_field_name='action_type',
+ spotinst_field_name='type'),
+ 'adjustment',
+ 'min_target_capacity',
+ 'max_target_capacity',
+ 'target',
+ 'minimum',
+ 'maximum')
+
+signal_fields = ('name',
+ 'timeout')
+
+multai_lb_fields = ('balancer_id',
+ 'project_id',
+ 'target_set_id',
+ 'az_awareness',
+ 'auto_weight')
+
+persistence_fields = ('should_persist_root_device',
+ 'should_persist_block_devices',
+ 'should_persist_private_ip')
+
+strategy_fields = ('risk',
+ 'utilize_reserved_instances',
+ 'fallback_to_od',
+ 'on_demand_count',
+ 'availability_vs_cost',
+ 'draining_timeout',
+ 'spin_up_time',
+ 'lifetime_period')
+
+ebs_fields = ('delete_on_termination',
+ 'encrypted',
+ 'iops',
+ 'snapshot_id',
+ 'volume_type',
+ 'volume_size')
+
+bdm_fields = ('device_name',
+ 'virtual_name',
+ 'no_device')
+
+kubernetes_fields = ('api_server',
+ 'token')
+
+right_scale_fields = ('account_id',
+ 'refresh_token')
+
+rancher_fields = ('access_key',
+ 'secret_key',
+ 'master_host',
+ 'version')
+
+chef_fields = ('chef_server',
+ 'organization',
+ 'user',
+ 'pem_key',
+ 'chef_version')
+
+az_fields = ('name',
+ 'subnet_id',
+ 'placement_group_name')
+
+opsworks_fields = ('layer_id',)
+
+scaling_strategy_fields = ('terminate_at_end_of_billing_hour',)
+
+mesosphere_fields = ('api_server',)
+
+ecs_fields = ('cluster_name',)
+
+multai_fields = ('multai_token',)
+
+
+def handle_elastigroup(client, module):
+ has_changed = False
+ group_id = None
+ message = 'None'
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ uniqueness_by = module.params.get('uniqueness_by')
+ external_group_id = module.params.get('id')
+
+ if uniqueness_by == 'id':
+ if external_group_id is None:
+ should_create = True
+ else:
+ should_create = False
+ group_id = external_group_id
+ else:
+ groups = client.get_elastigroups()
+ should_create, group_id = find_group_with_same_name(groups, name)
+
+ if should_create is True:
+ if state == 'present':
+ eg = expand_elastigroup(module, is_update=False)
+ module.debug(str(" [INFO] " + message + "\n"))
+ group = client.create_elastigroup(group=eg)
+ group_id = group['id']
+ message = 'Created group Successfully.'
+ has_changed = True
+
+ elif state == 'absent':
+ message = 'Cannot delete non-existent group.'
+ has_changed = False
+ else:
+ eg = expand_elastigroup(module, is_update=True)
+
+ if state == 'present':
+ group = client.update_elastigroup(group_update=eg, group_id=group_id)
+ message = 'Updated group successfully.'
+
+ try:
+ roll_config = module.params.get('roll_config')
+ if roll_config:
+ eg_roll = spotinst.aws_elastigroup.Roll(
+ batch_size_percentage=roll_config.get('batch_size_percentage'),
+ grace_period=roll_config.get('grace_period'),
+ health_check_type=roll_config.get('health_check_type')
+ )
+ roll_response = client.roll_group(group_roll=eg_roll, group_id=group_id)
+ message = 'Updated and started rolling the group successfully.'
+
+ except SpotinstClientException as exc:
+ message = 'Updated group successfully, but failed to perform roll. Error:' + str(exc)
+ has_changed = True
+
+ elif state == 'absent':
+ try:
+ client.delete_elastigroup(group_id=group_id)
+ except SpotinstClientException as exc:
+ if "GROUP_DOESNT_EXIST" in exc.message:
+ pass
+ else:
+ module.fail_json(msg="Error while attempting to delete group : " + exc.message)
+
+ message = 'Deleted group successfully.'
+ has_changed = True
+
+ return group_id, message, has_changed
+
+
+def retrieve_group_instances(client, module, group_id):
+ wait_timeout = module.params.get('wait_timeout')
+ wait_for_instances = module.params.get('wait_for_instances')
+
+ health_check_type = module.params.get('health_check_type')
+
+ if wait_timeout is None:
+ wait_timeout = 300
+
+ wait_timeout = time.time() + wait_timeout
+ target = module.params.get('target')
+ state = module.params.get('state')
+ instances = list()
+
+ if state == 'present' and group_id is not None and wait_for_instances is True:
+
+ is_amount_fulfilled = False
+ while is_amount_fulfilled is False and wait_timeout > time.time():
+ instances = list()
+ amount_of_fulfilled_instances = 0
+
+ if health_check_type is not None:
+ healthy_instances = client.get_instance_healthiness(group_id=group_id)
+
+ for healthy_instance in healthy_instances:
+ if healthy_instance.get('healthStatus') == 'HEALTHY':
+ amount_of_fulfilled_instances += 1
+ instances.append(healthy_instance)
+
+ else:
+ active_instances = client.get_elastigroup_active_instances(group_id=group_id)
+
+ for active_instance in active_instances:
+ if active_instance.get('private_ip') is not None:
+ amount_of_fulfilled_instances += 1
+ instances.append(active_instance)
+
+ if amount_of_fulfilled_instances >= target:
+ is_amount_fulfilled = True
+
+ time.sleep(10)
+
+ return instances
+
+
+def find_group_with_same_name(groups, name):
+ for group in groups:
+ if group['name'] == name:
+ return False, group.get('id')
+
+ return True, None
+
+
+def expand_elastigroup(module, is_update):
+ do_not_update = module.params['do_not_update']
+ name = module.params.get('name')
+
+ eg = spotinst.aws_elastigroup.Elastigroup()
+ description = module.params.get('description')
+
+ if name is not None:
+ eg.name = name
+ if description is not None:
+ eg.description = description
+
+ # Capacity
+ expand_capacity(eg, module, is_update, do_not_update)
+ # Strategy
+ expand_strategy(eg, module)
+ # Scaling
+ expand_scaling(eg, module)
+ # Third party integrations
+ expand_integrations(eg, module)
+ # Compute
+ expand_compute(eg, module, is_update, do_not_update)
+ # Multai
+ expand_multai(eg, module)
+ # Scheduling
+ expand_scheduled_tasks(eg, module)
+
+ return eg
+
+
+def expand_compute(eg, module, is_update, do_not_update):
+ elastic_ips = module.params['elastic_ips']
+ on_demand_instance_type = module.params.get('on_demand_instance_type')
+ spot_instance_types = module.params['spot_instance_types']
+ ebs_volume_pool = module.params['ebs_volume_pool']
+ availability_zones_list = module.params['availability_zones']
+ product = module.params.get('product')
+
+ eg_compute = spotinst.aws_elastigroup.Compute()
+
+ if product is not None:
+ # Only put product on group creation
+ if is_update is not True:
+ eg_compute.product = product
+
+ if elastic_ips is not None:
+ eg_compute.elastic_ips = elastic_ips
+
+ if on_demand_instance_type or spot_instance_types is not None:
+ eg_instance_types = spotinst.aws_elastigroup.InstanceTypes()
+
+ if on_demand_instance_type is not None:
+ eg_instance_types.spot = spot_instance_types
+ if spot_instance_types is not None:
+ eg_instance_types.ondemand = on_demand_instance_type
+
+ if eg_instance_types.spot is not None or eg_instance_types.ondemand is not None:
+ eg_compute.instance_types = eg_instance_types
+
+ expand_ebs_volume_pool(eg_compute, ebs_volume_pool)
+
+ eg_compute.availability_zones = expand_list(availability_zones_list, az_fields, 'AvailabilityZone')
+
+ expand_launch_spec(eg_compute, module, is_update, do_not_update)
+
+ eg.compute = eg_compute
+
+
+def expand_ebs_volume_pool(eg_compute, ebs_volumes_list):
+ if ebs_volumes_list is not None:
+ eg_volumes = []
+
+ for volume in ebs_volumes_list:
+ eg_volume = spotinst.aws_elastigroup.EbsVolume()
+
+ if volume.get('device_name') is not None:
+ eg_volume.device_name = volume.get('device_name')
+ if volume.get('volume_ids') is not None:
+ eg_volume.volume_ids = volume.get('volume_ids')
+
+ if eg_volume.device_name is not None:
+ eg_volumes.append(eg_volume)
+
+ if len(eg_volumes) > 0:
+ eg_compute.ebs_volume_pool = eg_volumes
+
+
+def expand_launch_spec(eg_compute, module, is_update, do_not_update):
+ eg_launch_spec = expand_fields(lspec_fields, module.params, 'LaunchSpecification')
+
+ if module.params['iam_role_arn'] is not None or module.params['iam_role_name'] is not None:
+ eg_launch_spec.iam_role = expand_fields(iam_fields, module.params, 'IamRole')
+
+ tags = module.params['tags']
+ load_balancers = module.params['load_balancers']
+ target_group_arns = module.params['target_group_arns']
+ block_device_mappings = module.params['block_device_mappings']
+ network_interfaces = module.params['network_interfaces']
+
+ if is_update is True:
+ if 'image_id' in do_not_update:
+ delattr(eg_launch_spec, 'image_id')
+
+ expand_tags(eg_launch_spec, tags)
+
+ expand_load_balancers(eg_launch_spec, load_balancers, target_group_arns)
+
+ expand_block_device_mappings(eg_launch_spec, block_device_mappings)
+
+ expand_network_interfaces(eg_launch_spec, network_interfaces)
+
+ eg_compute.launch_specification = eg_launch_spec
+
+
+def expand_integrations(eg, module):
+ rancher = module.params.get('rancher')
+ mesosphere = module.params.get('mesosphere')
+ ecs = module.params.get('ecs')
+ kubernetes = module.params.get('kubernetes')
+ right_scale = module.params.get('right_scale')
+ opsworks = module.params.get('opsworks')
+ chef = module.params.get('chef')
+
+ integration_exists = False
+
+ eg_integrations = spotinst.aws_elastigroup.ThirdPartyIntegrations()
+
+ if mesosphere is not None:
+ eg_integrations.mesosphere = expand_fields(mesosphere_fields, mesosphere, 'Mesosphere')
+ integration_exists = True
+
+ if ecs is not None:
+ eg_integrations.ecs = expand_fields(ecs_fields, ecs, 'EcsConfiguration')
+ integration_exists = True
+
+ if kubernetes is not None:
+ eg_integrations.kubernetes = expand_fields(kubernetes_fields, kubernetes, 'KubernetesConfiguration')
+ integration_exists = True
+
+ if right_scale is not None:
+ eg_integrations.right_scale = expand_fields(right_scale_fields, right_scale, 'RightScaleConfiguration')
+ integration_exists = True
+
+ if opsworks is not None:
+ eg_integrations.opsworks = expand_fields(opsworks_fields, opsworks, 'OpsWorksConfiguration')
+ integration_exists = True
+
+ if rancher is not None:
+ eg_integrations.rancher = expand_fields(rancher_fields, rancher, 'Rancher')
+ integration_exists = True
+
+ if chef is not None:
+ eg_integrations.chef = expand_fields(chef_fields, chef, 'ChefConfiguration')
+ integration_exists = True
+
+ if integration_exists:
+ eg.third_parties_integration = eg_integrations
+
+
+def expand_capacity(eg, module, is_update, do_not_update):
+ eg_capacity = expand_fields(capacity_fields, module.params, 'Capacity')
+
+ if is_update is True:
+ delattr(eg_capacity, 'unit')
+
+ if 'target' in do_not_update:
+ delattr(eg_capacity, 'target')
+
+ eg.capacity = eg_capacity
+
+
+def expand_strategy(eg, module):
+ persistence = module.params.get('persistence')
+ signals = module.params.get('signals')
+
+ eg_strategy = expand_fields(strategy_fields, module.params, 'Strategy')
+
+ terminate_at_end_of_billing_hour = module.params.get('terminate_at_end_of_billing_hour')
+
+ if terminate_at_end_of_billing_hour is not None:
+ eg_strategy.eg_scaling_strategy = expand_fields(scaling_strategy_fields,
+ module.params, 'ScalingStrategy')
+
+ if persistence is not None:
+ eg_strategy.persistence = expand_fields(persistence_fields, persistence, 'Persistence')
+
+ if signals is not None:
+ eg_signals = expand_list(signals, signal_fields, 'Signal')
+
+ if len(eg_signals) > 0:
+ eg_strategy.signals = eg_signals
+
+ eg.strategy = eg_strategy
+
+
+def expand_multai(eg, module):
+ multai_load_balancers = module.params.get('multai_load_balancers')
+
+ eg_multai = expand_fields(multai_fields, module.params, 'Multai')
+
+ if multai_load_balancers is not None:
+ eg_multai_load_balancers = expand_list(multai_load_balancers, multai_lb_fields, 'MultaiLoadBalancer')
+
+ if len(eg_multai_load_balancers) > 0:
+ eg_multai.balancers = eg_multai_load_balancers
+ eg.multai = eg_multai
+
+
+def expand_scheduled_tasks(eg, module):
+ scheduled_tasks = module.params.get('scheduled_tasks')
+
+ if scheduled_tasks is not None:
+ eg_scheduling = spotinst.aws_elastigroup.Scheduling()
+
+ eg_tasks = expand_list(scheduled_tasks, scheduled_task_fields, 'ScheduledTask')
+
+ if len(eg_tasks) > 0:
+ eg_scheduling.tasks = eg_tasks
+ eg.scheduling = eg_scheduling
+
+
+def expand_load_balancers(eg_launchspec, load_balancers, target_group_arns):
+ if load_balancers is not None or target_group_arns is not None:
+ eg_load_balancers_config = spotinst.aws_elastigroup.LoadBalancersConfig()
+ eg_total_lbs = []
+
+ if load_balancers is not None:
+ for elb_name in load_balancers:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if elb_name is not None:
+ eg_elb.name = elb_name
+ eg_elb.type = 'CLASSIC'
+ eg_total_lbs.append(eg_elb)
+
+ if target_group_arns is not None:
+ for target_arn in target_group_arns:
+ eg_elb = spotinst.aws_elastigroup.LoadBalancer()
+ if target_arn is not None:
+ eg_elb.arn = target_arn
+ eg_elb.type = 'TARGET_GROUP'
+ eg_total_lbs.append(eg_elb)
+
+ if len(eg_total_lbs) > 0:
+ eg_load_balancers_config.load_balancers = eg_total_lbs
+ eg_launchspec.load_balancers_config = eg_load_balancers_config
+
+
+def expand_tags(eg_launchspec, tags):
+ if tags is not None:
+ eg_tags = []
+
+ for tag in tags:
+ eg_tag = spotinst.aws_elastigroup.Tag()
+ if tag:
+ eg_tag.tag_key, eg_tag.tag_value = list(tag.items())[0]
+
+ eg_tags.append(eg_tag)
+
+ if len(eg_tags) > 0:
+ eg_launchspec.tags = eg_tags
+
+
+def expand_block_device_mappings(eg_launchspec, bdms):
+ if bdms is not None:
+ eg_bdms = []
+
+ for bdm in bdms:
+ eg_bdm = expand_fields(bdm_fields, bdm, 'BlockDeviceMapping')
+
+ if bdm.get('ebs') is not None:
+ eg_bdm.ebs = expand_fields(ebs_fields, bdm.get('ebs'), 'EBS')
+
+ eg_bdms.append(eg_bdm)
+
+ if len(eg_bdms) > 0:
+ eg_launchspec.block_device_mappings = eg_bdms
+
+
+def expand_network_interfaces(eg_launchspec, enis):
+ if enis is not None:
+ eg_enis = []
+
+ for eni in enis:
+ eg_eni = expand_fields(eni_fields, eni, 'NetworkInterface')
+
+ eg_pias = expand_list(eni.get('private_ip_addresses'), private_ip_fields, 'PrivateIpAddress')
+
+ if eg_pias is not None:
+ eg_eni.private_ip_addresses = eg_pias
+
+ eg_enis.append(eg_eni)
+
+ if len(eg_enis) > 0:
+ eg_launchspec.network_interfaces = eg_enis
+
+
+def expand_scaling(eg, module):
+ up_scaling_policies = module.params['up_scaling_policies']
+ down_scaling_policies = module.params['down_scaling_policies']
+ target_tracking_policies = module.params['target_tracking_policies']
+
+ eg_scaling = spotinst.aws_elastigroup.Scaling()
+
+ if up_scaling_policies is not None:
+ eg_up_scaling_policies = expand_scaling_policies(up_scaling_policies)
+ if len(eg_up_scaling_policies) > 0:
+ eg_scaling.up = eg_up_scaling_policies
+
+ if down_scaling_policies is not None:
+ eg_down_scaling_policies = expand_scaling_policies(down_scaling_policies)
+ if len(eg_down_scaling_policies) > 0:
+ eg_scaling.down = eg_down_scaling_policies
+
+ if target_tracking_policies is not None:
+ eg_target_tracking_policies = expand_target_tracking_policies(target_tracking_policies)
+ if len(eg_target_tracking_policies) > 0:
+ eg_scaling.target = eg_target_tracking_policies
+
+ if eg_scaling.down is not None or eg_scaling.up is not None or eg_scaling.target is not None:
+ eg.scaling = eg_scaling
+
+
+def expand_list(items, fields, class_name):
+ if items is not None:
+ new_objects_list = []
+ for item in items:
+ new_obj = expand_fields(fields, item, class_name)
+ new_objects_list.append(new_obj)
+
+ return new_objects_list
+
+
+def expand_fields(fields, item, class_name):
+ class_ = getattr(spotinst.aws_elastigroup, class_name)
+ new_obj = class_()
+
+ # Handle primitive fields
+ if item is not None:
+ for field in fields:
+ if isinstance(field, dict):
+ ansible_field_name = field['ansible_field_name']
+ spotinst_field_name = field['spotinst_field_name']
+ else:
+ ansible_field_name = field
+ spotinst_field_name = field
+ if item.get(ansible_field_name) is not None:
+ setattr(new_obj, spotinst_field_name, item.get(ansible_field_name))
+
+ return new_obj
+
+
+def expand_scaling_policies(scaling_policies):
+ eg_scaling_policies = []
+
+ for policy in scaling_policies:
+ eg_policy = expand_fields(scaling_policy_fields, policy, 'ScalingPolicy')
+ eg_policy.action = expand_fields(action_fields, policy, 'ScalingPolicyAction')
+ eg_scaling_policies.append(eg_policy)
+
+ return eg_scaling_policies
+
+
+def expand_target_tracking_policies(tracking_policies):
+ eg_tracking_policies = []
+
+ for policy in tracking_policies:
+ eg_policy = expand_fields(tracking_policy_fields, policy, 'TargetTrackingPolicy')
+ eg_tracking_policies.append(eg_policy)
+
+ return eg_tracking_policies
+
+
+def main():
+ fields = dict(
+ account_id=dict(type='str'),
+ availability_vs_cost=dict(type='str', required=True),
+ availability_zones=dict(type='list', elements='dict', required=True),
+ block_device_mappings=dict(type='list', elements='dict'),
+ chef=dict(type='dict'),
+ credentials_path=dict(type='path', default="~/.spotinst/credentials"),
+ do_not_update=dict(default=[], type='list', elements='str'),
+ down_scaling_policies=dict(type='list', elements='dict'),
+ draining_timeout=dict(type='int'),
+ ebs_optimized=dict(type='bool'),
+ ebs_volume_pool=dict(type='list', elements='dict'),
+ ecs=dict(type='dict'),
+ elastic_beanstalk=dict(type='dict'),
+ elastic_ips=dict(type='list', elements='str'),
+ fallback_to_od=dict(type='bool'),
+ id=dict(type='str'),
+ health_check_grace_period=dict(type='int'),
+ health_check_type=dict(type='str'),
+ health_check_unhealthy_duration_before_replacement=dict(type='int'),
+ iam_role_arn=dict(type='str'),
+ iam_role_name=dict(type='str'),
+ image_id=dict(type='str', required=True),
+ key_pair=dict(type='str', no_log=False),
+ kubernetes=dict(type='dict'),
+ lifetime_period=dict(type='int'),
+ load_balancers=dict(type='list', elements='str'),
+ max_size=dict(type='int', required=True),
+ mesosphere=dict(type='dict'),
+ min_size=dict(type='int', required=True),
+ monitoring=dict(type='str'),
+ multai_load_balancers=dict(type='list', elements='dict'),
+ multai_token=dict(type='str', no_log=True),
+ name=dict(type='str', required=True),
+ network_interfaces=dict(type='list', elements='dict'),
+ on_demand_count=dict(type='int'),
+ on_demand_instance_type=dict(type='str'),
+ opsworks=dict(type='dict'),
+ persistence=dict(type='dict'),
+ product=dict(type='str', required=True),
+ rancher=dict(type='dict'),
+ right_scale=dict(type='dict'),
+ risk=dict(type='int'),
+ roll_config=dict(type='dict'),
+ scheduled_tasks=dict(type='list', elements='dict'),
+ security_group_ids=dict(type='list', elements='str', required=True),
+ shutdown_script=dict(type='str'),
+ signals=dict(type='list', elements='dict'),
+ spin_up_time=dict(type='int'),
+ spot_instance_types=dict(type='list', elements='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ tags=dict(type='list', elements='dict'),
+ target=dict(type='int', required=True),
+ target_group_arns=dict(type='list', elements='str'),
+ tenancy=dict(type='str'),
+ terminate_at_end_of_billing_hour=dict(type='bool'),
+ token=dict(type='str', no_log=True),
+ unit=dict(type='str'),
+ user_data=dict(type='str'),
+ utilize_reserved_instances=dict(type='bool'),
+ uniqueness_by=dict(default='name', choices=['name', 'id']),
+ up_scaling_policies=dict(type='list', elements='dict'),
+ target_tracking_policies=dict(type='list', elements='dict'),
+ wait_for_instances=dict(type='bool', default=False),
+ wait_timeout=dict(type='int')
+ )
+
+ module = AnsibleModule(argument_spec=fields)
+
+ if not HAS_SPOTINST_SDK:
+ module.fail_json(msg="the Spotinst SDK library is required. (pip install spotinst_sdk)")
+
+ # Retrieve creds file variables
+ creds_file_loaded_vars = dict()
+
+ credentials_path = module.params.get('credentials_path')
+
+ try:
+ with open(credentials_path, "r") as creds:
+ for line in creds:
+ eq_index = line.find('=')
+ var_name = line[:eq_index].strip()
+ string_value = line[eq_index + 1:].strip()
+ creds_file_loaded_vars[var_name] = string_value
+ except IOError:
+ pass
+ # End of creds file retrieval
+
+ token = module.params.get('token')
+ if not token:
+ token = os.environ.get('SPOTINST_TOKEN')
+ if not token:
+ token = creds_file_loaded_vars.get("token")
+
+ account = module.params.get('account_id')
+ if not account:
+ account = os.environ.get('SPOTINST_ACCOUNT_ID') or os.environ.get('ACCOUNT')
+ if not account:
+ account = creds_file_loaded_vars.get("account")
+
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False)
+
+ if account is not None:
+ client = spotinst.SpotinstClient(auth_token=token, print_output=False, account_id=account)
+
+ group_id, message, has_changed = handle_elastigroup(client=client, module=module)
+
+ instances = retrieve_group_instances(client=client, module=module, group_id=group_id)
+
+ module.exit_json(changed=has_changed, group_id=group_id, message=message, instances=instances)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py b/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py
new file mode 100644
index 000000000..32c1cd443
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ss_3par_cpg.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+short_description: Manage HPE StoreServ 3PAR CPG
+author:
+ - Farhan Nomani (@farhan7500)
+ - Gautham P Hegde (@gautamphegde)
+description:
+ - Create and delete CPG on HPE 3PAR.
+module: ss_3par_cpg
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ cpg_name:
+ description:
+ - Name of the CPG.
+ type: str
+ required: true
+ disk_type:
+ choices:
+ - FC
+ - NL
+ - SSD
+ description:
+ - Specifies that physical disks must have the specified device type.
+ type: str
+ domain:
+ description:
+ - Specifies the name of the domain in which the object will reside.
+ type: str
+ growth_increment:
+ description:
+ - Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
+ created on each auto-grow operation.
+ type: str
+ growth_limit:
+ description:
+ - Specifies that the autogrow operation is limited to the specified
+ storage amount that sets the growth limit(in MiB, GiB or TiB).
+ type: str
+ growth_warning:
+ description:
+ - Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
+ results in a warning alert.
+ type: str
+ high_availability:
+ choices:
+ - PORT
+ - CAGE
+ - MAG
+ description:
+ - Specifies that the layout must support the failure of one port pair,
+ one cage, or one magazine.
+ type: str
+ raid_type:
+ choices:
+ - R0
+ - R1
+ - R5
+ - R6
+ description:
+ - Specifies the RAID type for the logical disk.
+ type: str
+ set_size:
+ description:
+ - Specifies the set size in the number of chunklets.
+ type: int
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Whether the specified CPG should exist or not.
+ required: true
+ type: str
+ secure:
+ description:
+ - Specifies whether the certificate needs to be validated while communicating.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.general.hpe3par
+- community.general.attributes
+
+'''
+
+
+EXAMPLES = r'''
+- name: Create CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: present
+ cpg_name: sample_cpg
+ domain: sample_domain
+ growth_increment: 32000 MiB
+ growth_limit: 64000 MiB
+ growth_warning: 48000 MiB
+ raid_type: R6
+ set_size: 8
+ high_availability: MAG
+ disk_type: FC
+ secure: false
+
+- name: Delete CPG sample_cpg
+ community.general.ss_3par_cpg:
+ storage_system_ip: 10.10.10.1
+ storage_system_username: username
+ storage_system_password: password
+ state: absent
+ cpg_name: sample_cpg
+ secure: false
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+try:
+ from hpe3par_sdk import client
+ from hpe3parclient import exceptions
+ HAS_3PARCLIENT = True
+except ImportError:
+ HAS_3PARCLIENT = False
+
+
+def validate_set_size(raid_type, set_size):
+ if raid_type:
+ set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
+ if set_size in set_size_array:
+ return True
+ return False
+
+
+def cpg_ldlayout_map(ldlayout_dict):
+ if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
+ ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
+ ldlayout_dict['RAIDType']]['raid_value']
+ if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
+ ldlayout_dict['HA'] = getattr(
+ client.HPE3ParClient, ldlayout_dict['HA'])
+ return ldlayout_dict
+
+
+def create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type):
+ try:
+ if not validate_set_size(raid_type, set_size):
+ return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
+ if not client_obj.cpgExists(cpg_name):
+
+ disk_patterns = []
+ if disk_type:
+ disk_type = getattr(client.HPE3ParClient, disk_type)
+ disk_patterns = [{'diskType': disk_type}]
+ ld_layout = {
+ 'RAIDType': raid_type,
+ 'setSize': set_size,
+ 'HA': high_availability,
+ 'diskPatterns': disk_patterns}
+ ld_layout = cpg_ldlayout_map(ld_layout)
+ if growth_increment is not None:
+ growth_increment = hpe3par.convert_to_binary_multiple(
+ growth_increment)
+ if growth_limit is not None:
+ growth_limit = hpe3par.convert_to_binary_multiple(
+ growth_limit)
+ if growth_warning is not None:
+ growth_warning = hpe3par.convert_to_binary_multiple(
+ growth_warning)
+ optional = {
+ 'domain': domain,
+ 'growthIncrementMiB': growth_increment,
+ 'growthLimitMiB': growth_limit,
+ 'usedLDWarningAlertMiB': growth_warning,
+ 'LDLayout': ld_layout}
+ client_obj.createCPG(cpg_name, optional)
+ else:
+ return (True, False, "CPG already present")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG creation failed | %s" % (e))
+ return (True, True, "Created CPG %s successfully." % cpg_name)
+
+
+def delete_cpg(
+ client_obj,
+ cpg_name):
+ try:
+ if client_obj.cpgExists(cpg_name):
+ client_obj.deleteCPG(cpg_name)
+ else:
+ return (True, False, "CPG does not exist")
+ except exceptions.ClientException as e:
+ return (False, False, "CPG delete failed | %s" % e)
+ return (True, True, "Deleted CPG %s successfully." % cpg_name)
+
+
+def main():
+ module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+ if not HAS_3PARCLIENT:
+ module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
+
+ if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
+ module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
+
+ storage_system_ip = module.params["storage_system_ip"]
+ storage_system_username = module.params["storage_system_username"]
+ storage_system_password = module.params["storage_system_password"]
+ cpg_name = module.params["cpg_name"]
+ domain = module.params["domain"]
+ growth_increment = module.params["growth_increment"]
+ growth_limit = module.params["growth_limit"]
+ growth_warning = module.params["growth_warning"]
+ raid_type = module.params["raid_type"]
+ set_size = module.params["set_size"]
+ high_availability = module.params["high_availability"]
+ disk_type = module.params["disk_type"]
+ secure = module.params["secure"]
+
+ wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
+ try:
+ client_obj = client.HPE3ParClient(wsapi_url, secure)
+ except exceptions.SSLCertFailed:
+ module.fail_json(msg="SSL Certificate Failed")
+ except exceptions.ConnectionError:
+ module.fail_json(msg="Connection Error")
+ except exceptions.UnsupportedVersion:
+ module.fail_json(msg="Unsupported WSAPI version")
+ except Exception as e:
+ module.fail_json(msg="Initializing client failed. %s" % e)
+
+ if storage_system_username is None or storage_system_password is None:
+ module.fail_json(msg="Storage system username or password is None")
+ if cpg_name is None:
+ module.fail_json(msg="CPG Name is None")
+
+ # States
+ if module.params["state"] == "present":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = create_cpg(
+ client_obj,
+ cpg_name,
+ domain,
+ growth_increment,
+ growth_limit,
+ growth_warning,
+ raid_type,
+ set_size,
+ high_availability,
+ disk_type
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ elif module.params["state"] == "absent":
+ try:
+ client_obj.login(storage_system_username, storage_system_password)
+ return_status, changed, msg = delete_cpg(
+ client_obj,
+ cpg_name
+ )
+ except Exception as e:
+ module.fail_json(msg="CPG create failed | %s" % e)
+ finally:
+ client_obj.logout()
+
+ if return_status:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ssh_config.py b/ansible_collections/community/general/plugins/modules/ssh_config.py
new file mode 100644
index 000000000..672ac8c47
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ssh_config.py
@@ -0,0 +1,341 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Björn Andersson
+# Copyright (c) 2021, Ansible Project
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ssh_config
+short_description: Manage SSH config for user
+version_added: '2.0.0'
+description:
+ - Configures SSH hosts with special C(IdentityFile)s and hostnames.
+author:
+ - Björn Andersson (@gaqzi)
+ - Abhijeet Kasurde (@Akasurde)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Whether a host entry should exist or not.
+ default: present
+ choices: [ 'present', 'absent' ]
+ type: str
+ user:
+ description:
+ - Which user account this configuration file belongs to.
+ - If none given and I(ssh_config_file) is not specified, C(/etc/ssh/ssh_config) is used.
+ - If a user is given, C(~/.ssh/config) is used.
+ - Mutually exclusive with I(ssh_config_file).
+ type: str
+ group:
+ description:
+ - Which group this configuration file belongs to.
+ - If none given, I(user) is used.
+ type: str
+ host:
+ description:
+ - The endpoint this configuration is valid for.
+ - Can be an actual address on the internet or an alias that will
+ connect to the value of I(hostname).
+ required: true
+ type: str
+ hostname:
+ description:
+ - The actual host to connect to when connecting to the host defined.
+ type: str
+ port:
+ description:
+ - The actual port to connect to when connecting to the host defined.
+ type: str
+ remote_user:
+ description:
+ - Specifies the user to log in as.
+ type: str
+ identity_file:
+ description:
+ - The path to an identity file (SSH private key) that will be used
+ when connecting to this host.
+ - File need to exist and have mode C(0600) to be valid.
+ type: path
+ user_known_hosts_file:
+ description:
+ - Sets the user known hosts file option.
+ type: str
+ strict_host_key_checking:
+ description:
+ - Whether to strictly check the host key when doing connections to the remote host.
+ choices: [ 'yes', 'no', 'ask' ]
+ type: str
+ proxycommand:
+ description:
+ - Sets the C(ProxyCommand) option.
+ - Mutually exclusive with I(proxyjump).
+ type: str
+ proxyjump:
+ description:
+ - Sets the C(ProxyJump) option.
+ - Mutually exclusive with I(proxycommand).
+ type: str
+ version_added: 6.5.0
+ forward_agent:
+ description:
+ - Sets the C(ForwardAgent) option.
+ type: bool
+ version_added: 4.0.0
+ ssh_config_file:
+ description:
+ - SSH config file.
+ - If I(user) and this option are not specified, C(/etc/ssh/ssh_config) is used.
+ - Mutually exclusive with I(user).
+ type: path
+ host_key_algorithms:
+ description:
+ - Sets the C(HostKeyAlgorithms) option.
+ type: str
+ version_added: 6.1.0
+requirements:
+- paramiko
+'''
+
+EXAMPLES = r'''
+- name: Add a host in the configuration
+ community.general.ssh_config:
+ user: akasurde
+ host: "example.com"
+ hostname: "github.com"
+ identity_file: "/home/akasurde/.ssh/id_rsa"
+ port: '2223'
+ state: present
+
+- name: Delete a host from the configuration
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ state: absent
+'''
+
+RETURN = r'''
+hosts_added:
+ description: A list of host added.
+ returned: success
+ type: list
+ sample: ["example.com"]
+hosts_removed:
+ description: A list of host removed.
+ returned: success
+ type: list
+ sample: ["example.com"]
+hosts_changed:
+ description: A list of host changed.
+ returned: success
+ type: list
+ sample: ["example.com"]
+hosts_change_diff:
+ description: A list of host diff changes.
+ returned: on change
+ type: list
+ sample: [
+ {
+ "example.com": {
+ "new": {
+ "hostname": "github.com",
+ "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"],
+ "port": "2224"
+ },
+ "old": {
+ "hostname": "github.com",
+ "identityfile": ["/tmp/test_ssh_config/fake_id_rsa"],
+ "port": "2224"
+ }
+ }
+ }
+ ]
+'''
+
+import os
+
+from copy import deepcopy
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils._stormssh import ConfigParser, HAS_PARAMIKO, PARAMIKO_IMPORT_ERROR
+from ansible_collections.community.general.plugins.module_utils.ssh import determine_config_file
+
+
+class SSHConfig(object):
+ def __init__(self, module):
+ self.module = module
+ if not HAS_PARAMIKO:
+ module.fail_json(msg=missing_required_lib('PARAMIKO'), exception=PARAMIKO_IMPORT_ERROR)
+ self.params = module.params
+ self.user = self.params.get('user')
+ self.group = self.params.get('group') or self.user
+ self.host = self.params.get('host')
+ self.config_file = self.params.get('ssh_config_file')
+ self.identity_file = self.params['identity_file']
+ self.check_ssh_config_path()
+ try:
+ self.config = ConfigParser(self.config_file)
+ except FileNotFoundError:
+ self.module.fail_json(msg="Failed to find %s" % self.config_file)
+ self.config.load()
+
+ def check_ssh_config_path(self):
+ self.config_file = determine_config_file(self.user, self.config_file)
+
+ # See if the identity file exists or not, relative to the config file
+ if os.path.exists(self.config_file) and self.identity_file is not None:
+ dirname = os.path.dirname(self.config_file)
+ self.identity_file = os.path.join(dirname, self.identity_file)
+
+ if not os.path.exists(self.identity_file):
+ self.module.fail_json(msg='IdentityFile %s does not exist' % self.params['identity_file'])
+
+ def ensure_state(self):
+ hosts_result = self.config.search_host(self.host)
+ state = self.params['state']
+ args = dict(
+ hostname=self.params.get('hostname'),
+ port=self.params.get('port'),
+ identity_file=self.params.get('identity_file'),
+ user=self.params.get('remote_user'),
+ strict_host_key_checking=self.params.get('strict_host_key_checking'),
+ user_known_hosts_file=self.params.get('user_known_hosts_file'),
+ proxycommand=self.params.get('proxycommand'),
+ proxyjump=self.params.get('proxyjump'),
+ host_key_algorithms=self.params.get('host_key_algorithms'),
+ )
+
+ # Convert True / False to 'yes' / 'no' for usage in ssh_config
+ if self.params['forward_agent'] is True:
+ args['forward_agent'] = 'yes'
+ if self.params['forward_agent'] is False:
+ args['forward_agent'] = 'no'
+
+ config_changed = False
+ hosts_changed = []
+ hosts_change_diff = []
+ hosts_removed = []
+ hosts_added = []
+
+ hosts_result = [host for host in hosts_result if host['host'] == self.host]
+
+ if hosts_result:
+ for host in hosts_result:
+ if state == 'absent':
+ # Delete host from the configuration
+ config_changed = True
+ hosts_removed.append(host['host'])
+ self.config.delete_host(host['host'])
+ else:
+ # Update host in the configuration
+ changed, options = self.change_host(host['options'], **args)
+
+ if changed:
+ config_changed = True
+ self.config.update_host(host['host'], options)
+ hosts_changed.append(host['host'])
+ hosts_change_diff.append({
+ host['host']: {
+ 'old': host['options'],
+ 'new': options,
+ }
+ })
+ elif state == 'present':
+ changed, options = self.change_host(dict(), **args)
+
+ if changed:
+ config_changed = True
+ hosts_added.append(self.host)
+ self.config.add_host(self.host, options)
+
+ if config_changed and not self.module.check_mode:
+ try:
+ self.config.write_to_ssh_config()
+ except PermissionError as perm_exec:
+ self.module.fail_json(
+ msg="Failed to write to %s due to permission issue: %s" % (self.config_file, to_native(perm_exec)))
+ # Make sure we set the permission
+ perm_mode = '0600'
+ if self.config_file == '/etc/ssh/ssh_config':
+ perm_mode = '0644'
+ self.module.set_mode_if_different(self.config_file, perm_mode, False)
+ # Make sure the file is owned by the right user and group
+ self.module.set_owner_if_different(self.config_file, self.user, False)
+ self.module.set_group_if_different(self.config_file, self.group, False)
+
+ self.module.exit_json(changed=config_changed,
+ hosts_changed=hosts_changed,
+ hosts_removed=hosts_removed,
+ hosts_change_diff=hosts_change_diff,
+ hosts_added=hosts_added)
+
+ @staticmethod
+ def change_host(options, **kwargs):
+ options = deepcopy(options)
+ changed = False
+ for k, v in kwargs.items():
+ if '_' in k:
+ k = k.replace('_', '')
+
+ if not v:
+ if options.get(k):
+ del options[k]
+ changed = True
+ elif options.get(k) != v and not (isinstance(options.get(k), list) and v in options.get(k)):
+ options[k] = v
+ changed = True
+
+ return changed, options
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ group=dict(default=None, type='str'),
+ host=dict(type='str', required=True),
+ hostname=dict(type='str'),
+ host_key_algorithms=dict(type='str', no_log=False),
+ identity_file=dict(type='path'),
+ port=dict(type='str'),
+ proxycommand=dict(type='str', default=None),
+ proxyjump=dict(type='str', default=None),
+ forward_agent=dict(type='bool'),
+ remote_user=dict(type='str'),
+ ssh_config_file=dict(default=None, type='path'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ strict_host_key_checking=dict(
+ default=None,
+ choices=['yes', 'no', 'ask']
+ ),
+ user=dict(default=None, type='str'),
+ user_known_hosts_file=dict(type='str', default=None),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['user', 'ssh_config_file'],
+ ['proxycommand', 'proxyjump'],
+ ],
+ )
+
+ ssh_config_obj = SSHConfig(module)
+ ssh_config_obj.ensure_state()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/stackdriver.py b/ansible_collections/community/general/plugins/modules/stackdriver.py
new file mode 100644
index 000000000..cf7cb2f47
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/stackdriver.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: stackdriver
+short_description: Send code deploy and annotation events to stackdriver
+description:
+ - Send code deploy and annotation events to Stackdriver
+author: "Ben Whaley (@bwhaley)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ key:
+ type: str
+ description:
+ - API key.
+ required: true
+ event:
+ type: str
+ description:
+ - The type of event to send, either annotation or deploy
+ choices: ['annotation', 'deploy']
+ required: true
+ revision_id:
+ type: str
+ description:
+ - The revision of the code that was deployed. Required for deploy events
+ deployed_by:
+ type: str
+ description:
+ - The person or robot responsible for deploying the code
+ default: "Ansible"
+ deployed_to:
+ type: str
+ description:
+ - "The environment code was deployed to. (ie: development, staging, production)"
+ repository:
+ type: str
+ description:
+ - The repository (or project) deployed
+ msg:
+ type: str
+ description:
+ - The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation.
+ annotated_by:
+ type: str
+ description:
+ - The person or robot who the annotation should be attributed to.
+ default: "Ansible"
+ level:
+ type: str
+ description:
+ - one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display.
+ choices: ['INFO', 'WARN', 'ERROR']
+ default: 'INFO'
+ instance_id:
+ type: str
+ description:
+ - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
+ event_epoch:
+ type: str
+ description:
+ - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
+'''
+
+EXAMPLES = '''
+- name: Send a code deploy event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: deploy
+ deployed_to: production
+ deployed_by: leeroyjenkins
+ repository: MyWebApp
+ revision_id: abcd123
+
+- name: Send an annotation event to stackdriver
+ community.general.stackdriver:
+ key: AAAAAA
+ event: annotation
+ msg: Greetings from Ansible
+ annotated_by: leeroyjenkins
+ level: WARN
+ instance_id: i-abcd1234
+'''
+
+# ===========================================
+# Stackdriver module specific support methods.
+#
+
+import json
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import fetch_url
+
+
+def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
+ """Send a deploy event to Stackdriver"""
+ deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
+
+ params = {}
+ params['revision_id'] = revision_id
+ params['deployed_by'] = deployed_by
+ if deployed_to:
+ params['deployed_to'] = deployed_to
+ if repository:
+ params['repository'] = repository
+
+ return do_send_request(module, deploy_api, params, key)
+
+
+def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
+ """Send an annotation event to Stackdriver"""
+ annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
+
+ params = {}
+ params['message'] = msg
+ if annotated_by:
+ params['annotated_by'] = annotated_by
+ if level:
+ params['level'] = level
+ if instance_id:
+ params['instance_id'] = instance_id
+ if event_epoch:
+ params['event_epoch'] = event_epoch
+
+ return do_send_request(module, annotation_api, params, key)
+
+
+def do_send_request(module, url, params, key):
+ data = json.dumps(params)
+ headers = {
+ 'Content-Type': 'application/json',
+ 'x-stackdriver-apikey': key
+ }
+ response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="Unable to send msg: %s" % info['msg'])
+
+
+# ===========================================
+# Module execution.
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict( # @TODO add types
+ key=dict(required=True, no_log=True),
+ event=dict(required=True, choices=['deploy', 'annotation']),
+ msg=dict(),
+ revision_id=dict(),
+ annotated_by=dict(default='Ansible'),
+ level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
+ instance_id=dict(),
+ event_epoch=dict(), # @TODO int?
+ deployed_by=dict(default='Ansible'),
+ deployed_to=dict(),
+ repository=dict(),
+ ),
+ supports_check_mode=True
+ )
+
+ key = module.params["key"]
+ event = module.params["event"]
+
+ # Annotation params
+ msg = module.params["msg"]
+ annotated_by = module.params["annotated_by"]
+ level = module.params["level"]
+ instance_id = module.params["instance_id"]
+ event_epoch = module.params["event_epoch"]
+
+ # Deploy params
+ revision_id = module.params["revision_id"]
+ deployed_by = module.params["deployed_by"]
+ deployed_to = module.params["deployed_to"]
+ repository = module.params["repository"]
+
+ ##################################################################
+ # deploy requires revision_id
+ # annotation requires msg
+ # We verify these manually
+ ##################################################################
+
+ if event == 'deploy':
+ if not revision_id:
+ module.fail_json(msg="revision_id required for deploy events")
+ try:
+ send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
+ except Exception as e:
+ module.fail_json(msg="unable to sent deploy event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ if event == 'annotation':
+ if not msg:
+ module.fail_json(msg="msg required for annotation events")
+ try:
+ send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
+ except Exception as e:
+ module.fail_json(msg="unable to sent annotation event: %s" % to_native(e),
+ exception=traceback.format_exc())
+
+ changed = True
+ module.exit_json(changed=changed, deployed_by=deployed_by)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/stacki_host.py b/ansible_collections/community/general/plugins/modules/stacki_host.py
new file mode 100644
index 000000000..e286bc961
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/stacki_host.py
@@ -0,0 +1,303 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Hugh Ma <Hugh.Ma@flextronics.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: stacki_host
+short_description: Add or remove host to stacki front-end
+description:
+ - Use this module to add or remove hosts to a stacki front-end via API.
+ - Information on stacki can be found at U(https://github.com/StackIQ/stacki).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the host to be added to Stacki.
+ required: true
+ type: str
+ stacki_user:
+ description:
+ - Username for authenticating with Stacki API, but if not specified, the environment variable C(stacki_user) is used instead.
+ required: true
+ type: str
+ stacki_password:
+ description:
+ - Password for authenticating with Stacki API, but if not
+ specified, the environment variable C(stacki_password) is used instead.
+ required: true
+ type: str
+ stacki_endpoint:
+ description:
+ - URL for the Stacki API Endpoint.
+ required: true
+ type: str
+ prim_intf_mac:
+ description:
+ - MAC Address for the primary PXE boot network interface.
+ - Currently not used by the module.
+ type: str
+ prim_intf_ip:
+ description:
+ - IP Address for the primary network interface.
+ - Currently not used by the module.
+ type: str
+ prim_intf:
+ description:
+ - Name of the primary network interface.
+ - Currently not used by the module.
+ type: str
+ force_install:
+ description:
+ - Set value to C(true) to force node into install state if it already exists in stacki.
+ type: bool
+ default: false
+ state:
+ description:
+ - Set value to the desired state for the specified host.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ appliance:
+ description:
+ - Applicance to be used in host creation.
+ - Required if I(state) is C(present) and host does not yet exist.
+ type: str
+ default: backend
+ rack:
+ description:
+ - Rack to be used in host creation.
+ - Required if I(state) is C(present) and host does not yet exist.
+ type: int
+ default: 0
+ rank:
+ description:
+ - Rank to be used in host creation.
+ - In Stacki terminology, the rank is the position of the machine in a rack.
+ - Required if I(state) is C(present) and host does not yet exist.
+ type: int
+ default: 0
+ network:
+ description:
+ - Network to be configured in the host.
+ - Currently not used by the module.
+ type: str
+ default: private
+author:
+- Hugh Ma (@bbyhuy) <Hugh.Ma@flextronics.com>
+'''
+
+EXAMPLES = '''
+- name: Add a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ prim_intf_mac: mac_addr
+ prim_intf_ip: x.x.x.x
+ prim_intf: eth0
+
+- name: Remove a host named test-1
+ community.general.stacki_host:
+ name: test-1
+ stacki_user: usr
+ stacki_password: pwd
+ stacki_endpoint: url
+ state: absent
+'''
+
+RETURN = '''
+changed:
+ description: response to whether or not the api call completed successfully
+ returned: always
+ type: bool
+ sample: true
+
+stdout:
+ description: the set of responses from the commands
+ returned: always
+ type: list
+ sample: ['...', '...']
+
+stdout_lines:
+ description: the value of stdout split into a list
+ returned: always
+ type: list
+ sample: [['...', '...'], ['...'], ['...']]
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+class StackiHost(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.hostname = module.params['name']
+ self.rack = module.params['rack']
+ self.rank = module.params['rank']
+ self.appliance = module.params['appliance']
+ self.prim_intf = module.params['prim_intf']
+ self.prim_intf_ip = module.params['prim_intf_ip']
+ self.network = module.params['network']
+ self.prim_intf_mac = module.params['prim_intf_mac']
+ self.endpoint = module.params['stacki_endpoint']
+
+ auth_creds = {'USERNAME': module.params['stacki_user'],
+ 'PASSWORD': module.params['stacki_password']}
+
+ # Get Initial CSRF
+ cred_a = self.do_request(self.endpoint, method="GET")
+ cookie_a = cred_a.headers.get('Set-Cookie').split(';')
+ init_csrftoken = None
+ for c in cookie_a:
+ if "csrftoken" in c:
+ init_csrftoken = c.replace("csrftoken=", "")
+ init_csrftoken = init_csrftoken.rstrip("\r\n")
+ break
+
+ # Make Header Dictionary with initial CSRF
+ header = {'csrftoken': init_csrftoken, 'X-CSRFToken': init_csrftoken,
+ 'Content-type': 'application/x-www-form-urlencoded', 'Cookie': cred_a.headers.get('Set-Cookie')}
+
+ # Endpoint to get final authentication header
+ login_endpoint = self.endpoint + "/login"
+
+ # Get Final CSRF and Session ID
+ login_req = self.do_request(login_endpoint, headers=header, payload=urlencode(auth_creds), method='POST')
+
+ cookie_f = login_req.headers.get('Set-Cookie').split(';')
+ csrftoken = None
+ for f in cookie_f:
+ if "csrftoken" in f:
+ csrftoken = f.replace("csrftoken=", "")
+ if "sessionid" in f:
+ sessionid = c.split("sessionid=", 1)[-1]
+ sessionid = sessionid.rstrip("\r\n")
+
+ self.header = {'csrftoken': csrftoken,
+ 'X-CSRFToken': csrftoken,
+ 'sessionid': sessionid,
+ 'Content-type': 'application/json',
+ 'Cookie': login_req.headers.get('Set-Cookie')}
+
+ def do_request(self, url, payload=None, headers=None, method=None):
+ res, info = fetch_url(self.module, url, data=payload, headers=headers, method=method)
+
+ if info['status'] != 200:
+ self.module.fail_json(changed=False, msg=info['msg'])
+
+ return res
+
+ def stack_check_host(self):
+ res = self.do_request(self.endpoint, payload=json.dumps({"cmd": "list host"}), headers=self.header, method="POST")
+ return self.hostname in res.read()
+
+ def stack_sync(self):
+ self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync config"}), headers=self.header, method="POST")
+ self.do_request(self.endpoint, payload=json.dumps({"cmd": "sync host config"}), headers=self.header, method="POST")
+
+ def stack_force_install(self, result):
+ data = {'cmd': "set host boot {0} action=install".format(self.hostname)}
+ self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+ changed = True
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_add(self, result):
+ data = dict()
+ changed = False
+
+ data['cmd'] = "add host {0} rack={1} rank={2} appliance={3}"\
+ .format(self.hostname, self.rack, self.rank, self.appliance)
+ self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = changed
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+ def stack_remove(self, result):
+ data = dict()
+
+ data['cmd'] = "remove host {0}"\
+ .format(self.hostname)
+ self.do_request(self.endpoint, payload=json.dumps(data), headers=self.header, method="POST")
+
+ self.stack_sync()
+
+ result['changed'] = True
+ result['stdout'] = "api call successful".rstrip("\r\n")
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ name=dict(type='str', required=True),
+ rack=dict(type='int', default=0),
+ rank=dict(type='int', default=0),
+ appliance=dict(type='str', default='backend'),
+ prim_intf=dict(type='str'),
+ prim_intf_ip=dict(type='str'),
+ network=dict(type='str', default='private'),
+ prim_intf_mac=dict(type='str'),
+ stacki_user=dict(type='str', required=True, fallback=(env_fallback, ['stacki_user'])),
+ stacki_password=dict(type='str', required=True, fallback=(env_fallback, ['stacki_password']), no_log=True),
+ stacki_endpoint=dict(type='str', required=True, fallback=(env_fallback, ['stacki_endpoint'])),
+ force_install=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ )
+
+ result = {'changed': False}
+ missing_params = list()
+
+ stacki = StackiHost(module)
+ host_exists = stacki.stack_check_host()
+
+ # If state is present, but host exists, need force_install flag to put host back into install state
+ if module.params['state'] == 'present' and host_exists and module.params['force_install']:
+ stacki.stack_force_install(result)
+ # If state is present, but host exists, and force_install and false, do nothing
+ elif module.params['state'] == 'present' and host_exists and not module.params['force_install']:
+ result['stdout'] = "{0} already exists. Set 'force_install' to true to bootstrap"\
+ .format(module.params['name'])
+ # Otherwise, state is present, but host doesn't exists, require more params to add host
+ elif module.params['state'] == 'present' and not host_exists:
+ for param in ['appliance', 'rack', 'rank', 'prim_intf', 'prim_intf_ip', 'network', 'prim_intf_mac']:
+ if not module.params[param]:
+ missing_params.append(param)
+ if len(missing_params) > 0: # @FIXME replace with required_if
+ module.fail_json(msg="missing required arguments: {0}".format(missing_params))
+
+ stacki.stack_add(result)
+ # If state is absent, and host exists, lets remove it.
+ elif module.params['state'] == 'absent' and host_exists:
+ stacki.stack_remove(result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/statsd.py b/ansible_collections/community/general/plugins/modules/statsd.py
new file mode 100644
index 000000000..65d33b709
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/statsd.py
@@ -0,0 +1,179 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: statsd
+short_description: Send metrics to StatsD
+version_added: 2.1.0
+description:
+ - The C(statsd) module sends metrics to StatsD.
+ - For more information, see U(https://statsd-metrics.readthedocs.io/en/latest/).
+ - Supported metric types are C(counter) and C(gauge).
+ Currently unupported metric types are C(timer), C(set), and C(gaugedelta).
+author: "Mark Mercado (@mamercad)"
+requirements:
+ - statsd
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - State of the check, only C(present) makes sense.
+ choices: ["present"]
+ default: present
+ host:
+ type: str
+ default: localhost
+ description:
+ - StatsD host (hostname or IP) to send metrics to.
+ port:
+ type: int
+ default: 8125
+ description:
+ - The port on C(host) which StatsD is listening on.
+ protocol:
+ type: str
+ default: udp
+ choices: ["udp", "tcp"]
+ description:
+ - The transport protocol to send metrics over.
+ timeout:
+ type: float
+ default: 1.0
+ description:
+ - Sender timeout, only applicable if C(protocol) is C(tcp).
+ metric:
+ type: str
+ required: true
+ description:
+ - The name of the metric.
+ metric_type:
+ type: str
+ required: true
+ choices: ["counter", "gauge"]
+ description:
+ - The type of metric.
+ metric_prefix:
+ type: str
+ description:
+ - The prefix to add to the metric.
+ default: ''
+ value:
+ type: int
+ required: true
+ description:
+ - The value of the metric.
+ delta:
+ type: bool
+ default: false
+ description:
+ - If the metric is of type C(gauge), change the value by C(delta).
+'''
+
+EXAMPLES = '''
+- name: Increment the metric my_counter by 1
+ community.general.statsd:
+ host: localhost
+ port: 9125
+ protocol: tcp
+ metric: my_counter
+ metric_type: counter
+ value: 1
+
+- name: Set the gauge my_gauge to 7
+ community.general.statsd:
+ host: localhost
+ port: 9125
+ protocol: tcp
+ metric: my_gauge
+ metric_type: gauge
+ value: 7
+'''
+
+
+from ansible.module_utils.basic import (AnsibleModule, missing_required_lib)
+
+try:
+ from statsd import StatsClient, TCPStatsClient
+ HAS_STATSD = True
+except ImportError:
+ HAS_STATSD = False
+
+
+def udp_statsd_client(**client_params):
+ return StatsClient(**client_params)
+
+
+def tcp_statsd_client(**client_params):
+ return TCPStatsClient(**client_params)
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present', choices=['present']),
+ host=dict(type='str', default='localhost'),
+ port=dict(type='int', default=8125),
+ protocol=dict(type='str', default='udp', choices=['udp', 'tcp']),
+ timeout=dict(type='float', default=1.0),
+ metric=dict(type='str', required=True),
+ metric_type=dict(type='str', required=True, choices=['counter', 'gauge']),
+ metric_prefix=dict(type='str', default=''),
+ value=dict(type='int', required=True),
+ delta=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False
+ )
+
+ if not HAS_STATSD:
+ module.fail_json(msg=missing_required_lib('statsd'))
+
+ host = module.params.get('host')
+ port = module.params.get('port')
+ protocol = module.params.get('protocol')
+ timeout = module.params.get('timeout')
+ metric = module.params.get('metric')
+ metric_type = module.params.get('metric_type')
+ metric_prefix = module.params.get('metric_prefix')
+ value = module.params.get('value')
+ delta = module.params.get('delta')
+
+ if protocol == 'udp':
+ client = udp_statsd_client(host=host, port=port, prefix=metric_prefix, maxudpsize=512, ipv6=False)
+ elif protocol == 'tcp':
+ client = tcp_statsd_client(host=host, port=port, timeout=timeout, prefix=metric_prefix, ipv6=False)
+
+ metric_name = '%s/%s' % (metric_prefix, metric) if metric_prefix else metric
+ metric_display_value = '%s (delta=%s)' % (value, delta) if metric_type == 'gauge' else value
+
+ try:
+ if metric_type == 'counter':
+ client.incr(metric, value)
+ elif metric_type == 'gauge':
+ client.gauge(metric, value, delta=delta)
+
+ except Exception as exc:
+ module.fail_json(msg='Failed sending to StatsD %s' % str(exc))
+
+ finally:
+ if protocol == 'tcp':
+ client.close()
+
+ module.exit_json(msg="Sent %s %s -> %s to StatsD" % (metric_type, metric_name, str(metric_display_value)), changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/statusio_maintenance.py b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
new file mode 100644
index 000000000..31b422453
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/statusio_maintenance.py
@@ -0,0 +1,475 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: statusio_maintenance
+short_description: Create maintenance windows for your status.io dashboard
+description:
+ - Creates a maintenance window for status.io
+ - Deletes a maintenance window for status.io
+notes:
+ - You can use the apiary API url (http://docs.statusio.apiary.io/) to
+ capture API traffic
+ - Use start_date and start_time with minutes to set future maintenance window
+author: Benjamin Copeland (@bhcopeland) <ben@copeland.me.uk>
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ title:
+ type: str
+ description:
+ - A descriptive title for the maintenance window
+ default: "A new maintenance window"
+ desc:
+ type: str
+ description:
+ - Message describing the maintenance window
+ default: "Created by Ansible"
+ state:
+ type: str
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent"]
+ api_id:
+ type: str
+ description:
+ - Your unique API ID from status.io
+ required: true
+ api_key:
+ type: str
+ description:
+ - Your unique API Key from status.io
+ required: true
+ statuspage:
+ type: str
+ description:
+ - Your unique StatusPage ID from status.io
+ required: true
+ url:
+ type: str
+ description:
+ - Status.io API URL. A private apiary can be used instead.
+ default: "https://api.status.io"
+ components:
+ type: list
+ elements: str
+ description:
+ - The given name of your component (server name)
+ aliases: ['component']
+ containers:
+ type: list
+ elements: str
+ description:
+ - The given name of your container (data center)
+ aliases: ['container']
+ all_infrastructure_affected:
+ description:
+ - If it affects all components and containers
+ type: bool
+ default: false
+ automation:
+ description:
+ - Automatically start and end the maintenance window
+ type: bool
+ default: false
+ maintenance_notify_now:
+ description:
+ - Notify subscribers now
+ type: bool
+ default: false
+ maintenance_notify_72_hr:
+ description:
+ - Notify subscribers 72 hours before maintenance start time
+ type: bool
+ default: false
+ maintenance_notify_24_hr:
+ description:
+ - Notify subscribers 24 hours before maintenance start time
+ type: bool
+ default: false
+ maintenance_notify_1_hr:
+ description:
+ - Notify subscribers 1 hour before maintenance start time
+ type: bool
+ default: false
+ maintenance_id:
+ type: str
+ description:
+ - The maintenance id number when deleting a maintenance window
+ minutes:
+ type: int
+ description:
+ - The length of time in UTC that the maintenance will run
+ (starting from playbook runtime)
+ default: 10
+ start_date:
+ type: str
+ description:
+ - Date maintenance is expected to start (Month/Day/Year) (UTC)
+ - End Date is worked out from start_date + minutes
+ start_time:
+ type: str
+ description:
+ - Time maintenance is expected to start (Hour:Minutes) (UTC)
+ - End Time is worked out from start_time + minutes
+'''
+
+EXAMPLES = '''
+- name: Create a maintenance window for 10 minutes on server1, with automation to stop the maintenance
+ community.general.statusio_maintenance:
+ title: Router Upgrade from ansible
+ desc: Performing a Router Upgrade
+ components: server1.example.com
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: true
+ automation: true
+
+- name: Create a maintenance window for 60 minutes on server1 and server2
+ community.general.statusio_maintenance:
+ title: Routine maintenance
+ desc: Some security updates
+ components:
+ - server1.example.com
+ - server2.example.com
+ minutes: 60
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ maintenance_notify_1_hr: true
+ automation: true
+ delegate_to: localhost
+
+- name: Create a future maintenance window for 24 hours to all hosts inside the Primary Data Center
+ community.general.statusio_maintenance:
+ title: Data center downtime
+ desc: Performing a Upgrade to our data center
+ components: Primary Data Center
+ api_id: api_id
+ api_key: api_key
+ statuspage: statuspage_id
+ start_date: 01/01/2016
+ start_time: 12:00
+ minutes: 1440
+
+- name: Delete a maintenance window
+ community.general.statusio_maintenance:
+ title: Remove a maintenance window
+ maintenance_id: 561f90faf74bc94a4700087b
+ statuspage: statuspage_id
+ api_id: api_id
+ api_key: api_key
+ state: absent
+
+'''
+# TODO: Add RETURN documentation.
+RETURN = ''' # '''
+
+import datetime
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.urls import open_url
+
+
+def get_api_auth_headers(api_id, api_key, url, statuspage):
+
+ headers = {
+ "x-api-id": api_id,
+ "x-api-key": api_key,
+ "Content-Type": "application/json"
+ }
+
+ try:
+ response = open_url(
+ url + "/v2/component/list/" + statuspage, headers=headers)
+ data = json.loads(response.read())
+ if data['status']['message'] == 'Authentication failed':
+ return 1, None, None, "Authentication failed: " \
+ "Check api_id/api_key and statuspage id."
+ else:
+ auth_headers = headers
+ auth_content = data
+ except Exception as e:
+ return 1, None, None, to_native(e)
+ return 0, auth_headers, auth_content, None
+
+
+def get_component_ids(auth_content, components):
+ host_ids = []
+ lower_components = [x.lower() for x in components]
+ for result in auth_content["result"]:
+ if result['name'].lower() in lower_components:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_components.remove(result['name'].lower())
+ if len(lower_components):
+ # items not found in the api
+ return 1, None, lower_components
+ return 0, host_ids, None
+
+
+def get_container_ids(auth_content, containers):
+ host_ids = []
+ lower_containers = [x.lower() for x in containers]
+ for result in auth_content["result"]:
+ if result["containers"][0]["name"].lower() in lower_containers:
+ data = {
+ "component_id": result["_id"],
+ "container_id": result["containers"][0]["_id"]
+ }
+ host_ids.append(data)
+ lower_containers.remove(result["containers"][0]["name"].lower())
+
+ if len(lower_containers):
+ # items not found in the api
+ return 1, None, lower_containers
+ return 0, host_ids, None
+
+
+def get_date_time(start_date, start_time, minutes):
+ returned_date = []
+ if start_date and start_time:
+ try:
+ datetime.datetime.strptime(start_date, '%m/%d/%Y')
+ returned_date.append(start_date)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_date format."
+ try:
+ datetime.datetime.strptime(start_time, '%H:%M')
+ returned_date.append(start_time)
+ except (NameError, ValueError):
+ return 1, None, "Not a valid start_time format."
+ try:
+ # Work out end date/time based on minutes
+ date_time_start = datetime.datetime.strptime(
+ start_time + start_date, '%H:%M%m/%d/%Y')
+ delta = date_time_start + datetime.timedelta(minutes=minutes)
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ except (NameError, ValueError):
+ return 1, None, "Couldn't work out a valid date"
+ else:
+ now = datetime.datetime.utcnow()
+ delta = now + datetime.timedelta(minutes=minutes)
+ # start_date
+ returned_date.append(now.strftime("%m/%d/%Y"))
+ returned_date.append(now.strftime("%H:%M"))
+ # end_date
+ returned_date.append(delta.strftime("%m/%d/%Y"))
+ returned_date.append(delta.strftime("%H:%M"))
+ return 0, returned_date, None
+
+
+def create_maintenance(auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation, title, desc,
+ returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr):
+ returned_dates = [[x] for x in returned_date]
+ component_id = []
+ container_id = []
+ for val in host_ids:
+ component_id.append(val['component_id'])
+ container_id.append(val['container_id'])
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "components": component_id,
+ "containers": container_id,
+ "all_infrastructure_affected": str(int(all_infrastructure_affected)),
+ "automation": str(int(automation)),
+ "maintenance_name": title,
+ "maintenance_details": desc,
+ "date_planned_start": returned_dates[0],
+ "time_planned_start": returned_dates[1],
+ "date_planned_end": returned_dates[2],
+ "time_planned_end": returned_dates[3],
+ "maintenance_notify_now": str(int(maintenance_notify_now)),
+ "maintenance_notify_72_hr": str(int(maintenance_notify_72_hr)),
+ "maintenance_notify_24_hr": str(int(maintenance_notify_24_hr)),
+ "maintenance_notify_1_hr": str(int(maintenance_notify_1_hr))
+ })
+ response = open_url(
+ url + "/v2/maintenance/schedule", data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+
+ if data["status"]["error"] == "yes":
+ return 1, None, data["status"]["message"]
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def delete_maintenance(auth_headers, url, statuspage, maintenance_id):
+ try:
+ values = json.dumps({
+ "statuspage_id": statuspage,
+ "maintenance_id": maintenance_id,
+ })
+ response = open_url(
+ url=url + "/v2/maintenance/delete",
+ data=values,
+ headers=auth_headers)
+ data = json.loads(response.read())
+ if data["status"]["error"] == "yes":
+ return 1, None, "Invalid maintenance_id"
+ except Exception as e:
+ return 1, None, to_native(e)
+ return 0, None, None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ api_id=dict(required=True),
+ api_key=dict(required=True, no_log=True),
+ statuspage=dict(required=True),
+ state=dict(required=False, default='present',
+ choices=['present', 'absent']),
+ url=dict(default='https://api.status.io', required=False),
+ components=dict(type='list', elements='str', required=False, default=None,
+ aliases=['component']),
+ containers=dict(type='list', elements='str', required=False, default=None,
+ aliases=['container']),
+ all_infrastructure_affected=dict(type='bool', default=False,
+ required=False),
+ automation=dict(type='bool', default=False, required=False),
+ title=dict(required=False, default='A new maintenance window'),
+ desc=dict(required=False, default='Created by Ansible'),
+ minutes=dict(type='int', required=False, default=10),
+ maintenance_notify_now=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_72_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_24_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_notify_1_hr=dict(type='bool', default=False,
+ required=False),
+ maintenance_id=dict(required=False, default=None),
+ start_date=dict(default=None, required=False),
+ start_time=dict(default=None, required=False)
+ ),
+ supports_check_mode=True,
+ )
+
+ api_id = module.params['api_id']
+ api_key = module.params['api_key']
+ statuspage = module.params['statuspage']
+ state = module.params['state']
+ url = module.params['url']
+ components = module.params['components']
+ containers = module.params['containers']
+ all_infrastructure_affected = module.params['all_infrastructure_affected']
+ automation = module.params['automation']
+ title = module.params['title']
+ desc = module.params['desc']
+ minutes = module.params['minutes']
+ maintenance_notify_now = module.params['maintenance_notify_now']
+ maintenance_notify_72_hr = module.params['maintenance_notify_72_hr']
+ maintenance_notify_24_hr = module.params['maintenance_notify_24_hr']
+ maintenance_notify_1_hr = module.params['maintenance_notify_1_hr']
+ maintenance_id = module.params['maintenance_id']
+ start_date = module.params['start_date']
+ start_time = module.params['start_time']
+
+ if state == "present":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+ auth_content = {}
+
+ if minutes or start_time and start_date:
+ (rc, returned_date, error) = get_date_time(
+ start_date, start_time, minutes)
+ if rc != 0:
+ module.fail_json(msg="Failed to set date/time: %s" % error)
+
+ if not components and not containers:
+ return module.fail_json(msg="A Component or Container must be "
+ "defined")
+ elif components and containers:
+ return module.fail_json(msg="Components and containers cannot "
+ "be used together")
+ else:
+ if components:
+ (rc, host_ids, error) = get_component_ids(auth_content,
+ components)
+ if rc != 0:
+ module.fail_json(msg="Failed to find component %s" % error)
+
+ if containers:
+ (rc, host_ids, error) = get_container_ids(auth_content,
+ containers)
+ if rc != 0:
+ module.fail_json(msg="Failed to find container %s" % error)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, dummy, error) = create_maintenance(
+ auth_headers, url, statuspage, host_ids,
+ all_infrastructure_affected, automation,
+ title, desc, returned_date, maintenance_notify_now,
+ maintenance_notify_72_hr, maintenance_notify_24_hr,
+ maintenance_notify_1_hr)
+ if rc == 0:
+ module.exit_json(changed=True, result="Successfully created "
+ "maintenance")
+ else:
+ module.fail_json(msg="Failed to create maintenance: %s"
+ % error)
+
+ if state == "absent":
+
+ if api_id and api_key:
+ (rc, auth_headers, auth_content, error) = \
+ get_api_auth_headers(api_id, api_key, url, statuspage)
+ if rc != 0:
+ module.fail_json(msg="Failed to get auth keys: %s" % error)
+ else:
+ auth_headers = {}
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ (rc, dummy, error) = delete_maintenance(
+ auth_headers, url, statuspage, maintenance_id)
+ if rc == 0:
+ module.exit_json(
+ changed=True,
+ result="Successfully deleted maintenance"
+ )
+ else:
+ module.fail_json(
+ msg="Failed to delete maintenance: %s" % error)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sudoers.py b/ansible_collections/community/general/plugins/modules/sudoers.py
new file mode 100644
index 000000000..fd8289b1c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sudoers.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+
+# Copyright (c) 2019, Jon Ellis (@JonEllis) <ellis.jp@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: sudoers
+short_description: Manage sudoers files
+version_added: "4.3.0"
+description:
+ - This module allows for the manipulation of sudoers files.
+author:
+ - "Jon Ellis (@JonEllis) <ellis.jp@gmail.com>"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ commands:
+ description:
+ - The commands allowed by the sudoers rule.
+ - Multiple can be added by passing a list of commands.
+ - Use C(ALL) for all commands.
+ type: list
+ elements: str
+ group:
+ description:
+ - The name of the group for the sudoers rule.
+ - This option cannot be used in conjunction with I(user).
+ type: str
+ name:
+ required: true
+ description:
+ - The name of the sudoers rule.
+ - This will be used for the filename for the sudoers file managed by this rule.
+ type: str
+ nopassword:
+ description:
+ - Whether a password will be required to run the sudo'd command.
+ default: true
+ type: bool
+ setenv:
+ description:
+ - Whether to allow keeping the environment when command is run with sudo.
+ default: false
+ type: bool
+ version_added: 6.3.0
+ host:
+ description:
+ - Specify the host the rule is for.
+ default: ALL
+ type: str
+ version_added: 6.2.0
+ runas:
+ description:
+ - Specify the target user the command(s) will run as.
+ type: str
+ version_added: 4.7.0
+ sudoers_path:
+ description:
+ - The path which sudoers config files will be managed in.
+ default: /etc/sudoers.d
+ type: str
+ state:
+ default: "present"
+ choices:
+ - present
+ - absent
+ description:
+ - Whether the rule should exist or not.
+ type: str
+ user:
+ description:
+ - The name of the user for the sudoers rule.
+ - This option cannot be used in conjunction with I(group).
+ type: str
+ validation:
+ description:
+ - If C(absent), the sudoers rule will be added without validation.
+ - If C(detect) and visudo is available, then the sudoers rule will be validated by visudo.
+ - If C(required), visudo must be available to validate the sudoers rule.
+ type: str
+ default: detect
+ choices: [ absent, detect, required ]
+ version_added: 5.2.0
+'''
+
+EXAMPLES = '''
+- name: Allow the backup user to sudo /usr/local/bin/backup
+ community.general.sudoers:
+ name: allow-backup
+ state: present
+ user: backup
+ commands: /usr/local/bin/backup
+
+- name: Allow the bob user to run any commands as alice with sudo -u alice
+ community.general.sudoers:
+ name: bob-do-as-alice
+ state: present
+ user: bob
+ runas: alice
+ commands: ALL
+
+- name: >-
+ Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics
+ without requiring a password on the host called webserver
+ community.general.sudoers:
+ name: monitor-app
+ group: monitoring
+ host: webserver
+ commands: /usr/local/bin/gather-app-metrics
+
+- name: >-
+ Allow the alice user to run sudo /bin/systemctl restart my-service or
+ sudo /bin/systemctl reload my-service, but a password is required
+ community.general.sudoers:
+ name: alice-service
+ user: alice
+ commands:
+ - /bin/systemctl restart my-service
+ - /bin/systemctl reload my-service
+ nopassword: false
+
+- name: Revoke the previous sudo grants given to the alice user
+ community.general.sudoers:
+ name: alice-service
+ state: absent
+
+- name: Allow alice to sudo /usr/local/bin/upload and keep env variables
+ community.general.sudoers:
+ name: allow-alice-upload
+ user: alice
+ commands: /usr/local/bin/upload
+ setenv: true
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+class Sudoers(object):
+
+ FILE_MODE = 0o440
+
+ def __init__(self, module):
+ self.module = module
+
+ self.check_mode = module.check_mode
+ self.name = module.params['name']
+ self.user = module.params['user']
+ self.group = module.params['group']
+ self.state = module.params['state']
+ self.nopassword = module.params['nopassword']
+ self.setenv = module.params['setenv']
+ self.host = module.params['host']
+ self.runas = module.params['runas']
+ self.sudoers_path = module.params['sudoers_path']
+ self.file = os.path.join(self.sudoers_path, self.name)
+ self.commands = module.params['commands']
+ self.validation = module.params['validation']
+
+ def write(self):
+ if self.check_mode:
+ return
+
+ with open(self.file, 'w') as f:
+ f.write(self.content())
+
+ os.chmod(self.file, self.FILE_MODE)
+
+ def delete(self):
+ if self.check_mode:
+ return
+
+ os.remove(self.file)
+
+ def exists(self):
+ return os.path.exists(self.file)
+
+ def matches(self):
+ with open(self.file, 'r') as f:
+ content_matches = f.read() == self.content()
+
+ current_mode = os.stat(self.file).st_mode & 0o777
+ mode_matches = current_mode == self.FILE_MODE
+
+ return content_matches and mode_matches
+
+ def content(self):
+ if self.user:
+ owner = self.user
+ elif self.group:
+ owner = '%{group}'.format(group=self.group)
+
+ commands_str = ', '.join(self.commands)
+ nopasswd_str = 'NOPASSWD:' if self.nopassword else ''
+ setenv_str = 'SETENV:' if self.setenv else ''
+ runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''
+ return "{owner} {host}={runas}{nopasswd}{setenv} {commands}\n".format(
+ owner=owner,
+ host=self.host,
+ runas=runas_str,
+ nopasswd=nopasswd_str,
+ setenv=setenv_str,
+ commands=commands_str
+ )
+
+ def validate(self):
+ if self.validation == 'absent':
+ return
+
+ visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required')
+ if visudo_path is None:
+ return
+
+ check_command = [visudo_path, '-c', '-f', '-']
+ rc, stdout, stderr = self.module.run_command(check_command, data=self.content())
+
+ if rc != 0:
+ raise Exception('Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout))
+
+ def run(self):
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ return True
+ else:
+ return False
+
+ self.validate()
+
+ if self.exists() and self.matches():
+ return False
+
+ self.write()
+ return True
+
+
+def main():
+ argument_spec = {
+ 'commands': {
+ 'type': 'list',
+ 'elements': 'str',
+ },
+ 'group': {},
+ 'name': {
+ 'required': True,
+ },
+ 'nopassword': {
+ 'type': 'bool',
+ 'default': True,
+ },
+ 'setenv': {
+ 'type': 'bool',
+ 'default': False,
+ },
+ 'host': {
+ 'type': 'str',
+ 'default': 'ALL',
+ },
+ 'runas': {
+ 'type': 'str',
+ 'default': None,
+ },
+ 'sudoers_path': {
+ 'type': 'str',
+ 'default': '/etc/sudoers.d',
+ },
+ 'state': {
+ 'default': 'present',
+ 'choices': ['present', 'absent'],
+ },
+ 'user': {},
+ 'validation': {
+ 'default': 'detect',
+ 'choices': ['absent', 'detect', 'required']
+ },
+ }
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[['user', 'group']],
+ supports_check_mode=True,
+ required_if=[('state', 'present', ['commands'])],
+ )
+
+ sudoers = Sudoers(module)
+
+ try:
+ changed = sudoers.run()
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/supervisorctl.py b/ansible_collections/community/general/plugins/modules/supervisorctl.py
new file mode 100644
index 000000000..e9df16108
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/supervisorctl.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Matt Wright <matt@nobien.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: supervisorctl
+short_description: Manage the state of a program or group of programs running via supervisord
+description:
+ - Manage the state of a program or group of programs running via supervisord
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The name of the supervisord program or group to manage.
+ - The name will be taken as group name when it ends with a colon I(:)
+ - Group support is only available in Ansible version 1.6 or later.
+ - If I(name=all), all programs and program groups will be managed.
+ required: true
+ config:
+ type: path
+ description:
+ - The supervisor configuration file path
+ server_url:
+ type: str
+ description:
+ - URL on which supervisord server is listening
+ username:
+ type: str
+ description:
+ - username to use for authentication
+ password:
+ type: str
+ description:
+ - password to use for authentication
+ state:
+ type: str
+ description:
+ - The desired state of program/group.
+ required: true
+ choices: [ "present", "started", "stopped", "restarted", "absent", "signalled" ]
+ signal:
+ type: str
+ description:
+ - The signal to send to the program/group, when combined with the 'signalled' state. Required when l(state=signalled).
+ supervisorctl_path:
+ type: path
+ description:
+ - path to supervisorctl executable
+notes:
+ - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+ - When C(state) = I(absent), the module will call C(supervisorctl reread) then C(supervisorctl remove) to remove the target program/group.
+requirements: [ "supervisorctl" ]
+author:
+ - "Matt Wright (@mattupstate)"
+ - "Aaron Wang (@inetfuture) <inetfuture@gmail.com>"
+'''
+
+EXAMPLES = '''
+- name: Manage the state of program to be in started state
+ community.general.supervisorctl:
+ name: my_app
+ state: started
+
+- name: Manage the state of program group to be in started state
+ community.general.supervisorctl:
+ name: 'my_apps:'
+ state: started
+
+- name: Restart my_app, reading supervisorctl configuration from a specified file
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ config: /var/opt/my_project/supervisord.conf
+
+- name: Restart my_app, connecting to supervisord with credentials and server URL
+ community.general.supervisorctl:
+ name: my_app
+ state: restarted
+ username: test
+ password: testpass
+ server_url: http://localhost:9001
+
+- name: Send a signal to my_app via supervisorctl
+ community.general.supervisorctl:
+ name: my_app
+ state: signalled
+ signal: USR1
+
+- name: Restart all programs and program groups
+ community.general.supervisorctl:
+ name: all
+ state: restarted
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule, is_executable
+
+
+def main():
+ arg_spec = dict(
+ name=dict(type='str', required=True),
+ config=dict(type='path'),
+ server_url=dict(type='str'),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ supervisorctl_path=dict(type='path'),
+ state=dict(type='str', required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent', 'signalled']),
+ signal=dict(type='str'),
+ )
+
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True,
+ required_if=[('state', 'signalled', ['signal'])],
+ )
+
+ name = module.params['name']
+ is_group = False
+ if name.endswith(':'):
+ is_group = True
+ name = name.rstrip(':')
+ state = module.params['state']
+ config = module.params.get('config')
+ server_url = module.params.get('server_url')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ supervisorctl_path = module.params.get('supervisorctl_path')
+ signal = module.params.get('signal')
+
+ # we check error message for a pattern, so we need to make sure that's in C locale
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ if supervisorctl_path:
+ if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
+ supervisorctl_args = [supervisorctl_path]
+ else:
+ module.fail_json(
+ msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
+ else:
+ supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
+
+ if config:
+ supervisorctl_args.extend(['-c', config])
+ if server_url:
+ supervisorctl_args.extend(['-s', server_url])
+ if username:
+ supervisorctl_args.extend(['-u', username])
+ if password:
+ supervisorctl_args.extend(['-p', password])
+
+ def run_supervisorctl(cmd, name=None, **kwargs):
+ args = list(supervisorctl_args) # copy the master args
+ args.append(cmd)
+ if name:
+ args.append(name)
+ return module.run_command(args, **kwargs)
+
+ def get_matched_processes():
+ matched = []
+ rc, out, err = run_supervisorctl('status')
+ for line in out.splitlines():
+ # One status line may look like one of these two:
+ # process not in group:
+ # echo_date_lonely RUNNING pid 7680, uptime 13:22:18
+ # process in group:
+ # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
+ fields = [field for field in line.split(' ') if field != '']
+ process_name = fields[0]
+ status = fields[1]
+
+ if is_group:
+ # If there is ':', this process must be in a group.
+ if ':' in process_name:
+ group = process_name.split(':')[0]
+ if group != name:
+ continue
+ else:
+ continue
+ else:
+ if process_name != name and name != "all":
+ continue
+
+ matched.append((process_name, status))
+ return matched
+
+ def take_action_on_processes(processes, status_filter, action, expected_result):
+ to_take_action_on = []
+ for process_name, status in processes:
+ if status_filter(status):
+ to_take_action_on.append(process_name)
+
+ if len(to_take_action_on) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+ if module.check_mode:
+ module.exit_json(changed=True)
+ for process_name in to_take_action_on:
+ rc, out, err = run_supervisorctl(action, process_name, check_rc=True)
+ if '%s: %s' % (process_name, expected_result) not in out:
+ module.fail_json(msg=out)
+
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
+
+ if state == 'restarted':
+ rc, out, err = run_supervisorctl('update', check_rc=True)
+ processes = get_matched_processes()
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+
+ take_action_on_processes(processes, lambda s: True, 'restart', 'started')
+
+ processes = get_matched_processes()
+
+ if state == 'absent':
+ if len(processes) == 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('remove', name)
+ if '%s: removed process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ if state == 'present':
+ if len(processes) > 0:
+ module.exit_json(changed=False, name=name, state=state)
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ run_supervisorctl('reread', check_rc=True)
+ dummy, out, dummy = run_supervisorctl('add', name)
+ if '%s: added process group' % name in out:
+ module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
+
+ # from this point onwards, if there are no matching processes, module cannot go on.
+ if len(processes) == 0:
+ module.fail_json(name=name, msg="ERROR (no such process)")
+
+ if state == 'started':
+ take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started')
+
+ if state == 'stopped':
+ take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped')
+
+ if state == 'signalled':
+ take_action_on_processes(processes, lambda s: s in ('RUNNING',), "signal %s" % signal, 'signalled')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/svc.py b/ansible_collections/community/general/plugins/modules/svc.py
new file mode 100644
index 000000000..bd2eaeb22
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/svc.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2015, Brian Coca <bcoca@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: svc
+author:
+- Brian Coca (@bcoca)
+short_description: Manage daemontools services
+description:
+ - Controls daemontools services on remote hosts using the svc utility.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the service to manage.
+ type: str
+ required: true
+ state:
+ description:
+ - C(Started)/C(stopped) are idempotent actions that will not run
+ commands unless necessary. C(restarted) will always bounce the
+ svc (svc -t) and C(killed) will always bounce the svc (svc -k).
+ C(reloaded) will send a sigusr1 (svc -1).
+ C(once) will run a normally downed svc once (svc -o), not really
+ an idempotent operation.
+ type: str
+ choices: [ killed, once, reloaded, restarted, started, stopped ]
+ downed:
+ description:
+ - Should a 'down' file exist or not, if it exists it disables auto startup.
+ Defaults to no. Downed does not imply stopped.
+ type: bool
+ enabled:
+ description:
+ - Whether the service is enabled or not, if disabled it also implies stopped.
+ Take note that a service can be enabled and downed (no auto restart).
+ type: bool
+ service_dir:
+ description:
+ - Directory svscan watches for services
+ type: str
+ default: /service
+ service_src:
+ description:
+ - Directory where services are defined, the source of symlinks to service_dir.
+ type: str
+ default: /etc/service
+'''
+
+EXAMPLES = '''
+- name: Start svc dnscache, if not running
+ community.general.svc:
+ name: dnscache
+ state: started
+
+- name: Stop svc dnscache, if running
+ community.general.svc:
+ name: dnscache
+ state: stopped
+
+- name: Kill svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: killed
+
+- name: Restart svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: restarted
+
+- name: Reload svc dnscache, in all cases
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+
+- name: Using alternative svc directory location
+ community.general.svc:
+ name: dnscache
+ state: reloaded
+ service_dir: /var/service
+'''
+
+import os
+import re
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def _load_dist_subclass(cls, *args, **kwargs):
+ '''
+ Used for derivative implementations
+ '''
+ subclass = None
+
+ distro = kwargs['module'].params['distro']
+
+ # get the most specific superclass for this platform
+ if distro is not None:
+ for sc in cls.__subclasses__():
+ if sc.distro is not None and sc.distro == distro:
+ subclass = sc
+ if subclass is None:
+ subclass = cls
+
+ return super(cls, subclass).__new__(subclass)
+
+
+class Svc(object):
+ """
+ Main class that handles daemontools, can be subclassed and overridden in case
+ we want to use a 'derivative' like encore, s6, etc
+ """
+
+ # def __new__(cls, *args, **kwargs):
+ # return _load_dist_subclass(cls, args, kwargs)
+
+ def __init__(self, module):
+ self.extra_paths = ['/command', '/usr/local/bin']
+ self.report_vars = ['state', 'enabled', 'downed', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.service_dir = module.params['service_dir']
+ self.service_src = module.params['service_src']
+ self.enabled = None
+ self.downed = None
+ self.full_state = None
+ self.state = None
+ self.pid = None
+ self.duration = None
+
+ self.svc_cmd = module.get_bin_path('svc', opt_dirs=self.extra_paths)
+ self.svstat_cmd = module.get_bin_path('svstat', opt_dirs=self.extra_paths)
+ self.svc_full = '/'.join([self.service_dir, self.name])
+ self.src_full = '/'.join([self.service_src, self.name])
+
+ self.enabled = os.path.lexists(self.svc_full)
+ if self.enabled:
+ self.downed = os.path.lexists('%s/down' % self.svc_full)
+ self.get_status()
+ else:
+ self.downed = os.path.lexists('%s/down' % self.src_full)
+ self.state = 'stopped'
+
+ def enable(self):
+ if os.path.exists(self.src_full):
+ try:
+ os.symlink(self.src_full, self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
+ else:
+ self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
+
+ def disable(self):
+ try:
+ os.unlink(self.svc_full)
+ except OSError as e:
+ self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
+ self.execute_command([self.svc_cmd, '-dx', self.src_full])
+
+ src_log = '%s/log' % self.src_full
+ if os.path.exists(src_log):
+ self.execute_command([self.svc_cmd, '-dx', src_log])
+
+ def get_status(self):
+ rc, out, err = self.execute_command([self.svstat_cmd, self.svc_full])
+
+ if err is not None and err:
+ self.full_state = self.state = err
+ else:
+ self.full_state = out
+
+ m = re.search(r'\(pid (\d+)\)', out)
+ if m:
+ self.pid = m.group(1)
+
+ m = re.search(r'(\d+) seconds', out)
+ if m:
+ self.duration = m.group(1)
+
+ if re.search(' up ', out):
+ self.state = 'start'
+ elif re.search(' down ', out):
+ self.state = 'stopp'
+ else:
+ self.state = 'unknown'
+ return
+
+ if re.search(' want ', out):
+ self.state += 'ing'
+ else:
+ self.state += 'ed'
+
+ def start(self):
+ return self.execute_command([self.svc_cmd, '-u', self.svc_full])
+
+ def stopp(self):
+ return self.stop()
+
+ def stop(self):
+ return self.execute_command([self.svc_cmd, '-d', self.svc_full])
+
+ def once(self):
+ return self.execute_command([self.svc_cmd, '-o', self.svc_full])
+
+ def reload(self):
+ return self.execute_command([self.svc_cmd, '-1', self.svc_full])
+
+ def restart(self):
+ return self.execute_command([self.svc_cmd, '-t', self.svc_full])
+
+ def kill(self):
+ return self.execute_command([self.svc_cmd, '-k', self.svc_full])
+
+ def execute_command(self, cmd):
+ try:
+ rc, out, err = self.module.run_command(cmd)
+ except Exception as e:
+ self.module.fail_json(msg="failed to execute: %s" % to_native(e), exception=traceback.format_exc())
+ return (rc, out, err)
+
+ def report(self):
+ self.get_status()
+ states = {}
+ for k in self.report_vars:
+ states[k] = self.__dict__[k]
+ return states
+
+
+# ===========================================
+# Main control flow
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
+ enabled=dict(type='bool'),
+ downed=dict(type='bool'),
+ service_dir=dict(type='str', default='/service'),
+ service_src=dict(type='str', default='/etc/service'),
+ ),
+ supports_check_mode=True,
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ state = module.params['state']
+ enabled = module.params['enabled']
+ downed = module.params['downed']
+
+ svc = Svc(module)
+ changed = False
+ orig_state = svc.report()
+
+ if enabled is not None and enabled != svc.enabled:
+ changed = True
+ if not module.check_mode:
+ try:
+ if enabled:
+ svc.enable()
+ else:
+ svc.disable()
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change service link: %s" % to_native(e))
+
+ if state is not None and state != svc.state:
+ changed = True
+ if not module.check_mode:
+ getattr(svc, state[:-2])()
+
+ if downed is not None and downed != svc.downed:
+ changed = True
+ if not module.check_mode:
+ d_file = "%s/down" % svc.svc_full
+ try:
+ if downed:
+ open(d_file, "a").close()
+ else:
+ os.unlink(d_file)
+ except (OSError, IOError) as e:
+ module.fail_json(msg="Could not change downed file: %s " % (to_native(e)))
+
+ module.exit_json(changed=changed, svc=svc.report())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/svr4pkg.py b/ansible_collections/community/general/plugins/modules/svr4pkg.py
new file mode 100644
index 000000000..e8c410482
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/svr4pkg.py
@@ -0,0 +1,270 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012, Boyd Adamson <boyd () boydadamson.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: svr4pkg
+short_description: Manage Solaris SVR4 packages
+description:
+ - Manages SVR4 packages on Solaris 10 and 11.
+ - These were the native packages on Solaris <= 10 and are available
+ as a legacy feature in Solaris 11.
+ - Note that this is a very basic packaging system. It will not enforce
+ dependencies on install or remove.
+author: "Boyd Adamson (@brontitall)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Package name, e.g. C(SUNWcsr)
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether to install (C(present)), or remove (C(absent)) a package.
+ - If the package is to be installed, then I(src) is required.
+ - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
+ required: true
+ choices: ["present", "absent"]
+ type: str
+
+ src:
+ description:
+ - Specifies the location to install the package from. Required when I(state=present).
+ - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
+ - If using a file or directory, they must already be accessible by the host. See the M(ansible.builtin.copy) module for a way to get them there.
+ type: str
+ proxy:
+ description:
+ - HTTP[s] proxy to be used if I(src) is a URL.
+ type: str
+ response_file:
+ description:
+ - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
+ required: false
+ type: str
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ type: str
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+- name: Install a package from an already copied file
+ community.general.svr4pkg:
+ name: CSWcommon
+ src: /tmp/cswpkgs.pkg
+ state: present
+
+- name: Install a package directly from an http site
+ community.general.svr4pkg:
+ name: CSWpkgutil
+ src: 'http://get.opencsw.org/now'
+ state: present
+ zone: current
+
+- name: Install a package with a response file
+ community.general.svr4pkg:
+ name: CSWggrep
+ src: /tmp/third-party.pkg
+ response_file: /tmp/ggrep.response
+ state: present
+
+- name: Ensure that a package is not installed
+ community.general.svr4pkg:
+ name: SUNWgnome-sound-recorder
+ state: absent
+
+- name: Ensure that a category is not installed
+ community.general.svr4pkg:
+ name: FIREFOX
+ state: absent
+ category: true
+'''
+
+
+import os
+import tempfile
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def package_installed(module, name, category):
+ cmd = [module.get_bin_path('pkginfo', True), '-q']
+ if category:
+ cmd.append('-c')
+ cmd.append(name)
+ rc, out, err = module.run_command(' '.join(cmd))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def create_admin_file():
+ (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
+ fullauto = b'''
+mail=
+instance=unique
+partial=nocheck
+runlevel=quit
+idepend=nocheck
+rdepend=nocheck
+space=quit
+setuid=nocheck
+conflict=nocheck
+action=nocheck
+networktimeout=60
+networkretries=3
+authentication=quit
+keystore=/var/sadm/security
+proxy=
+basedir=default
+'''
+ os.write(desc, fullauto)
+ os.close(desc)
+ return filename
+
+
+def run_command(module, cmd):
+ progname = cmd[0]
+ cmd[0] = module.get_bin_path(progname, True)
+ return module.run_command(cmd)
+
+
+def package_install(module, name, src, proxy, response_file, zone, category):
+ adminfile = create_admin_file()
+ cmd = ['pkgadd', '-n']
+ if zone == 'current':
+ cmd += ['-G']
+ cmd += ['-a', adminfile, '-d', src]
+ if proxy is not None:
+ cmd += ['-x', proxy]
+ if response_file is not None:
+ cmd += ['-r', response_file]
+ if category:
+ cmd += ['-Y']
+ cmd.append(name)
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def package_uninstall(module, name, src, category):
+ adminfile = create_admin_file()
+ if category:
+ cmd = ['pkgrm', '-na', adminfile, '-Y', name]
+ else:
+ cmd = ['pkgrm', '-na', adminfile, name]
+ (rc, out, err) = run_command(module, cmd)
+ os.unlink(adminfile)
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(required=True, choices=['present', 'absent']),
+ src=dict(default=None),
+ proxy=dict(default=None),
+ response_file=dict(default=None),
+ zone=dict(required=False, default='all', choices=['current', 'all']),
+ category=dict(default=False, type='bool')
+ ),
+ supports_check_mode=True
+ )
+ state = module.params['state']
+ name = module.params['name']
+ src = module.params['src']
+ proxy = module.params['proxy']
+ response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
+ rc = None
+ out = ''
+ err = ''
+ result = {}
+ result['name'] = name
+ result['state'] = state
+
+ if state == 'present':
+ if src is None:
+ module.fail_json(name=name,
+ msg="src is required when state=present")
+ if not package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
+ # Stdout is normally empty but for some packages can be
+ # very long and is not often useful
+ if len(out) > 75:
+ out = out[:75] + '...'
+
+ elif state == 'absent':
+ if package_installed(module, name, category):
+ if module.check_mode:
+ module.exit_json(changed=True)
+ (rc, out, err) = package_uninstall(module, name, src, category)
+ out = out[:75]
+
+ # Returncodes as per pkgadd(1m)
+ # 0 Successful completion
+ # 1 Fatal error.
+ # 2 Warning.
+ # 3 Interruption.
+ # 4 Administration.
+ # 5 Administration. Interaction is required. Do not use pkgadd -n.
+ # 10 Reboot after installation of all packages.
+ # 20 Reboot after installation of this package.
+ # 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
+ if rc in (0, 2, 3, 10, 20):
+ result['changed'] = True
+ # no install nor uninstall, or failed
+ else:
+ result['changed'] = False
+
+ # rc will be none when the package already was installed and no action took place
+ # Only return failed=False when the returncode is known to be good as there may be more
+ # undocumented failure return codes
+ if rc not in (None, 0, 2, 10, 20):
+ result['failed'] = True
+ else:
+ result['failed'] = False
+
+ if out:
+ result['stdout'] = out
+ if err:
+ result['stderr'] = err
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/swdepot.py b/ansible_collections/community/general/plugins/modules/swdepot.py
new file mode 100644
index 000000000..c4660c70d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/swdepot.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Raul Melo
+# Written by Raul Melo <raulmelo@gmail.com>
+# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swdepot
+short_description: Manage packages with swdepot package manager (HP-UX)
+description:
+ - Will install, upgrade and remove packages with swdepot package manager (HP-UX)
+notes: []
+author: "Raul Melo (@melodous)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - package name.
+ aliases: [pkg]
+ required: true
+ type: str
+ state:
+ description:
+ - whether to install (C(present), C(latest)), or remove (C(absent)) a package.
+ required: true
+ choices: [ 'present', 'latest', 'absent']
+ type: str
+ depot:
+ description:
+ - The source repository from which install or upgrade a package.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Install a package
+ community.general.swdepot:
+ name: unzip-6.0
+ state: present
+ depot: 'repository:/path'
+
+- name: Install the latest version of a package
+ community.general.swdepot:
+ name: unzip
+ state: latest
+ depot: 'repository:/path'
+
+- name: Remove a package
+ community.general.swdepot:
+ name: unzip
+ state: absent
+'''
+
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import shlex_quote
+
+
+def compare_package(version1, version2):
+ """ Compare version packages.
+ Return values:
+ -1 first minor
+ 0 equal
+ 1 first greater """
+
+ def normalize(v):
+ return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
+ normalized_version1 = normalize(version1)
+ normalized_version2 = normalize(version2)
+ if normalized_version1 == normalized_version2:
+ rc = 0
+ elif normalized_version1 < normalized_version2:
+ rc = -1
+ else:
+ rc = 1
+ return rc
+
+
+def query_package(module, name, depot=None):
+ """ Returns whether a package is installed or not and version. """
+
+ cmd_list = '/usr/sbin/swlist -a revision -l product'
+ if depot:
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
+ use_unsafe_shell=True)
+ else:
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
+ if rc == 0:
+ version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
+ else:
+ version = None
+
+ return rc, version
+
+
+def remove_package(module, name):
+ """ Uninstall package if installed. """
+
+ cmd_remove = '/usr/sbin/swremove'
+ rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
+
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def install_package(module, depot, name):
+ """ Install package if not already installed """
+
+ cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
+ rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
+ if rc == 0:
+ return rc, stdout
+ else:
+ return rc, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pkg'], required=True),
+ state=dict(choices=['present', 'absent', 'latest'], required=True),
+ depot=dict(default=None, required=False)
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ state = module.params['state']
+ depot = module.params['depot']
+
+ changed = False
+ msg = "No changed"
+ rc = 0
+ if (state == 'present' or state == 'latest') and depot is None:
+ output = "depot parameter is mandatory in present or latest task"
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ # Check local version
+ rc, version_installed = query_package(module, name)
+ if not rc:
+ installed = True
+ msg = "Already installed"
+
+ else:
+ installed = False
+
+ if (state == 'present' or state == 'latest') and installed is False:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ changed = True
+ msg = "Package installed"
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'latest' and installed is True:
+ # Check depot version
+ rc, version_depot = query_package(module, name, depot)
+
+ if not rc:
+ if compare_package(version_installed, version_depot) == -1:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ # Install new version
+ rc, output = install_package(module, depot, name)
+
+ if not rc:
+ msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
+ changed = True
+
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ else:
+ output = "Software package not in repository " + depot
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ elif state == 'absent' and installed is True:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, output = remove_package(module, name)
+ if not rc:
+ changed = True
+ msg = "Package removed"
+ else:
+ module.fail_json(name=name, msg=output, rc=rc)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ module.exit_json(changed=changed, name=name, state=state, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/swupd.py b/ansible_collections/community/general/plugins/modules/swupd.py
new file mode 100644
index 000000000..efd7ca7c1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/swupd.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: swupd
+short_description: Manages updates and bundles in ClearLinux systems
+description:
+ - Manages updates and bundles with the swupd bundle manager, which is used by the
+ Clear Linux Project for Intel Architecture.
+author: Alberto Murillo (@albertomurillo)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ contenturl:
+ description:
+ - URL pointing to the contents of available bundles.
+ If not specified, the contents are retrieved from clearlinux.org.
+ type: str
+ format:
+ description:
+ - The format suffix for version file downloads. For example [1,2,3,staging,etc].
+ If not specified, the default format is used.
+ type: str
+ manifest:
+ description:
+ - The manifest contains information about the bundles at certain version of the OS.
+ Specify a Manifest version to verify against that version or leave unspecified to
+ verify against the current version.
+ aliases: [release, version]
+ type: int
+ name:
+ description:
+ - Name of the (I)bundle to install or remove.
+ aliases: [bundle]
+ type: str
+ state:
+ description:
+ - Indicates the desired (I)bundle state. C(present) ensures the bundle
+ is installed while C(absent) ensures the (I)bundle is not installed.
+ default: present
+ choices: [present, absent]
+ type: str
+ update:
+ description:
+ - Updates the OS to the latest version.
+ type: bool
+ default: false
+ url:
+ description:
+ - Overrides both I(contenturl) and I(versionurl).
+ type: str
+ verify:
+ description:
+ - Verify content for OS version.
+ type: bool
+ default: false
+ versionurl:
+ description:
+ - URL for version string download.
+ type: str
+'''
+
+EXAMPLES = '''
+- name: Update the OS to the latest version
+ community.general.swupd:
+ update: true
+
+- name: Installs the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: present
+
+- name: Removes the "foo" bundle
+ community.general.swupd:
+ name: foo
+ state: absent
+
+- name: Check integrity of filesystem
+ community.general.swupd:
+ verify: true
+
+- name: Downgrade OS to release 12920
+ community.general.swupd:
+ verify: true
+ manifest: 12920
+'''
+
+RETURN = '''
+stdout:
+ description: stdout of swupd
+ returned: always
+ type: str
+stderr:
+ description: stderr of swupd
+ returned: always
+ type: str
+'''
+
+import os
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Swupd(object):
+ FILES_NOT_MATCH = "files did not match"
+ FILES_REPLACED = "missing files were replaced"
+ FILES_FIXED = "files were fixed"
+ FILES_DELETED = "files were deleted"
+
+ def __init__(self, module):
+ # Fail if swupd is not found
+ self.module = module
+ self.swupd_cmd = module.get_bin_path("swupd", False)
+ if not self.swupd_cmd:
+ module.fail_json(msg="Could not find swupd.")
+
+ # Initialize parameters
+ for key in module.params.keys():
+ setattr(self, key, module.params[key])
+
+ # Initialize return values
+ self.changed = False
+ self.failed = False
+ self.msg = None
+ self.rc = None
+ self.stderr = ""
+ self.stdout = ""
+
+ def _run_cmd(self, cmd):
+ self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
+
+ def _get_cmd(self, command):
+ cmd = "%s %s" % (self.swupd_cmd, command)
+
+ if self.format:
+ cmd += " --format=%s" % self.format
+ if self.manifest:
+ cmd += " --manifest=%s" % self.manifest
+ if self.url:
+ cmd += " --url=%s" % self.url
+ else:
+ if self.contenturl and command != "check-update":
+ cmd += " --contenturl=%s" % self.contenturl
+ if self.versionurl:
+ cmd += " --versionurl=%s" % self.versionurl
+
+ return cmd
+
+ def _is_bundle_installed(self, bundle):
+ try:
+ os.stat("/usr/share/clear/bundles/%s" % bundle)
+ except OSError:
+ return False
+
+ return True
+
+ def _needs_update(self):
+ cmd = self._get_cmd("check-update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ return True
+
+ if self.rc == 1:
+ return False
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def _needs_verify(self):
+ cmd = self._get_cmd("verify")
+ self._run_cmd(cmd)
+
+ if self.rc != 0:
+ self.failed = True
+ self.msg = "Failed to check for filesystem inconsistencies."
+
+ if self.FILES_NOT_MATCH in self.stdout:
+ return True
+
+ return False
+
+ def install_bundle(self, bundle):
+ """Installs a bundle with `swupd bundle-add bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=not self._is_bundle_installed(bundle))
+
+ if self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s is already installed" % bundle
+ return
+
+ cmd = self._get_cmd("bundle-add %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s installed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to install bundle %s" % bundle
+
+ def remove_bundle(self, bundle):
+ """Removes a bundle with `swupd bundle-remove bundle`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._is_bundle_installed(bundle))
+
+ if not self._is_bundle_installed(bundle):
+ self.msg = "Bundle %s not installed"
+ return
+
+ cmd = self._get_cmd("bundle-remove %s" % bundle)
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Bundle %s removed" % bundle
+ return
+
+ self.failed = True
+ self.msg = "Failed to remove bundle %s" % bundle
+
+ def update_os(self):
+ """Updates the os with `swupd update`"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_update())
+
+ if not self._needs_update():
+ self.msg = "There are no updates available"
+ return
+
+ cmd = self._get_cmd("update")
+ self._run_cmd(cmd)
+
+ if self.rc == 0:
+ self.changed = True
+ self.msg = "Update successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to check for updates"
+
+ def verify_os(self):
+ """Verifies filesystem against specified or current version"""
+ if self.module.check_mode:
+ self.module.exit_json(changed=self._needs_verify())
+
+ if not self._needs_verify():
+ self.msg = "No files where changed"
+ return
+
+ cmd = self._get_cmd("verify --fix")
+ self._run_cmd(cmd)
+
+ if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
+ self.changed = True
+ self.msg = "Fix successful"
+ return
+
+ self.failed = True
+ self.msg = "Failed to verify the OS"
+
+
+def main():
+ """The main function."""
+ module = AnsibleModule(
+ argument_spec=dict(
+ contenturl=dict(type="str"),
+ format=dict(type="str"),
+ manifest=dict(aliases=["release", "version"], type="int"),
+ name=dict(aliases=["bundle"], type="str"),
+ state=dict(default="present", choices=["present", "absent"], type="str"),
+ update=dict(default=False, type="bool"),
+ url=dict(type="str"),
+ verify=dict(default=False, type="bool"),
+ versionurl=dict(type="str"),
+ ),
+ required_one_of=[["name", "update", "verify"]],
+ mutually_exclusive=[["name", "update", "verify"]],
+ supports_check_mode=True
+ )
+
+ swupd = Swupd(module)
+
+ name = module.params["name"]
+ state = module.params["state"]
+ update = module.params["update"]
+ verify = module.params["verify"]
+
+ if update:
+ swupd.update_os()
+ elif verify:
+ swupd.verify_os()
+ elif state == "present":
+ swupd.install_bundle(name)
+ elif state == "absent":
+ swupd.remove_bundle(name)
+ else:
+ swupd.failed = True
+
+ if swupd.failed:
+ module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+ else:
+ module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/syslogger.py b/ansible_collections/community/general/plugins/modules/syslogger.py
new file mode 100644
index 000000000..3a7abf4fb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/syslogger.py
@@ -0,0 +1,198 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Tim Rightnour <thegarbledone@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syslogger
+short_description: Log messages in the syslog
+description:
+ - Uses syslog to add log entries to the host.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ msg:
+ type: str
+ description:
+ - This is the message to place in syslog.
+ required: true
+ priority:
+ type: str
+ description:
+ - Set the log priority.
+ choices: [ "emerg", "alert", "crit", "err", "warning", "notice", "info", "debug" ]
+ default: "info"
+ facility:
+ type: str
+ description:
+ - Set the log facility.
+ choices: [ "kern", "user", "mail", "daemon", "auth", "lpr", "news",
+ "uucp", "cron", "syslog", "local0", "local1", "local2",
+ "local3", "local4", "local5", "local6", "local7" ]
+ default: "daemon"
+ log_pid:
+ description:
+ - Log the PID in brackets.
+ type: bool
+ default: false
+ ident:
+ description:
+ - Specify the name of application name which is sending the log to syslog.
+ type: str
+ default: 'ansible_syslogger'
+ version_added: '0.2.0'
+author:
+ - Tim Rightnour (@garbled1)
+'''
+
+EXAMPLES = r'''
+- name: Simple Usage
+ community.general.syslogger:
+ msg: "I will end up as daemon.info"
+
+- name: Send a log message with err priority and user facility with log_pid
+ community.general.syslogger:
+ msg: "Hello from Ansible"
+ priority: "err"
+ facility: "user"
+ log_pid: true
+
+- name: Specify the name of application which is sending log message
+ community.general.syslogger:
+ ident: "MyApp"
+ msg: "I want to believe"
+ priority: "alert"
+'''
+
+RETURN = r'''
+ident:
+ description: Name of application sending the message to log
+ returned: always
+ type: str
+ sample: "ansible_syslogger"
+ version_added: '0.2.0'
+priority:
+ description: Priority level
+ returned: always
+ type: str
+ sample: "daemon"
+facility:
+ description: Syslog facility
+ returned: always
+ type: str
+ sample: "info"
+log_pid:
+ description: Log PID status
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Message sent to syslog
+ returned: always
+ type: str
+ sample: "Hello from Ansible"
+'''
+
+import syslog
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def get_facility(facility):
+ return {
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'syslog': syslog.LOG_SYSLOG,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
+ }.get(facility, syslog.LOG_DAEMON)
+
+
+def get_priority(priority):
+ return {
+ 'emerg': syslog.LOG_EMERG,
+ 'alert': syslog.LOG_ALERT,
+ 'crit': syslog.LOG_CRIT,
+ 'err': syslog.LOG_ERR,
+ 'warning': syslog.LOG_WARNING,
+ 'notice': syslog.LOG_NOTICE,
+ 'info': syslog.LOG_INFO,
+ 'debug': syslog.LOG_DEBUG
+ }.get(priority, syslog.LOG_INFO)
+
+
+def main():
+ # define the available arguments/parameters that a user can pass to
+ # the module
+ module_args = dict(
+ ident=dict(type='str', default='ansible_syslogger'),
+ msg=dict(type='str', required=True),
+ priority=dict(type='str', required=False,
+ choices=["emerg", "alert", "crit", "err", "warning",
+ "notice", "info", "debug"],
+ default='info'),
+ facility=dict(type='str', required=False,
+ choices=["kern", "user", "mail", "daemon", "auth",
+ "lpr", "news", "uucp", "cron", "syslog",
+ "local0", "local1", "local2", "local3",
+ "local4", "local5", "local6", "local7"],
+ default='daemon'),
+ log_pid=dict(type='bool', required=False, default=False)
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ )
+
+ result = dict(
+ changed=False,
+ ident=module.params['ident'],
+ priority=module.params['priority'],
+ facility=module.params['facility'],
+ log_pid=module.params['log_pid'],
+ msg=module.params['msg']
+ )
+
+ # do the logging
+ try:
+ syslog.openlog(module.params['ident'],
+ syslog.LOG_PID if module.params['log_pid'] else 0,
+ get_facility(module.params['facility']))
+ syslog.syslog(get_priority(module.params['priority']),
+ module.params['msg'])
+ syslog.closelog()
+ result['changed'] = True
+
+ except Exception as exc:
+ module.fail_json(error='Failed to write to syslog %s' % to_native(exc), exception=traceback.format_exc(), **result)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/syspatch.py b/ansible_collections/community/general/plugins/modules/syspatch.py
new file mode 100644
index 000000000..c90ef0d22
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/syspatch.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019-2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: syspatch
+
+short_description: Manage OpenBSD system patches
+
+
+description:
+ - "Manage OpenBSD system patches using syspatch."
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ revert:
+ description:
+ - Revert system patches.
+ type: str
+ choices: [ all, one ]
+
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = '''
+- name: Apply all available system patches
+ community.general.syspatch:
+
+- name: Revert last patch
+ community.general.syspatch:
+ revert: one
+
+- name: Revert all patches
+ community.general.syspatch:
+ revert: all
+
+# NOTE: You can reboot automatically if a patch requires it:
+- name: Apply all patches and store result
+ community.general.syspatch:
+ register: syspatch
+
+- name: Reboot if patch requires it
+ ansible.builtin.reboot:
+ when: syspatch.reboot_needed
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success)
+ returned: always
+ type: int
+stdout:
+ description: syspatch standard output.
+ returned: always
+ type: str
+ sample: "001_rip6cksum"
+stderr:
+ description: syspatch standard error.
+ returned: always
+ type: str
+ sample: "syspatch: need root privileges"
+reboot_needed:
+ description: Whether or not a reboot is required after an update.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def run_module():
+ # define available arguments/parameters a user can pass to the module
+ module_args = dict(
+ revert=dict(type='str', choices=['all', 'one'])
+ )
+
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=True,
+ )
+
+ result = syspatch_run(module)
+
+ module.exit_json(**result)
+
+
+def syspatch_run(module):
+ cmd = module.get_bin_path('syspatch', True)
+ changed = False
+ reboot_needed = False
+ warnings = []
+
+ # Set safe defaults for run_flag and check_flag
+ run_flag = ['-c']
+ check_flag = ['-c']
+ if module.params['revert']:
+ check_flag = ['-l']
+
+ if module.params['revert'] == 'all':
+ run_flag = ['-R']
+ else:
+ run_flag = ['-r']
+ else:
+ check_flag = ['-c']
+ run_flag = []
+
+ # Run check command
+ rc, out, err = module.run_command([cmd] + check_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+
+ if len(out) > 0:
+ # Changes pending
+ change_pending = True
+ else:
+ # No changes pending
+ change_pending = False
+
+ if module.check_mode:
+ changed = change_pending
+ elif change_pending:
+ rc, out, err = module.run_command([cmd] + run_flag)
+
+ # Workaround syspatch ln bug:
+ # http://openbsd-archive.7691.n7.nabble.com/Warning-applying-latest-syspatch-td354250.html
+ if rc != 0 and err != 'ln: /usr/X11R6/bin/X: No such file or directory\n':
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('create unique kernel') >= 0:
+ # Kernel update applied
+ reboot_needed = True
+ elif out.lower().find('syspatch updated itself') >= 0:
+ warnings.append('Syspatch was updated. Please run syspatch again.')
+
+ # If no stdout, then warn user
+ if len(out) == 0:
+ warnings.append('syspatch had suggested changes, but stdout was empty.')
+
+ changed = True
+ else:
+ changed = False
+
+ return dict(
+ changed=changed,
+ reboot_needed=reboot_needed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sysrc.py b/ansible_collections/community/general/plugins/modules/sysrc.py
new file mode 100644
index 000000000..9652b629a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sysrc.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019 David Lundgren <dlundgren@syberisle.net>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - David Lundgren (@dlundgren)
+module: sysrc
+short_description: Manage FreeBSD using sysrc
+version_added: '2.0.0'
+description:
+ - Manages C(/etc/rc.conf) for FreeBSD.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of variable in C(/etc/rc.conf) to manage.
+ type: str
+ required: true
+ value:
+ description:
+ - The value to set when I(state=present).
+ - The value to add when I(state=value_present).
+ - The value to remove when I(state=value_absent).
+ type: str
+ state:
+ description:
+ - Use I(present) to add the variable.
+ - Use I(absent) to remove the variable.
+ - Use I(value_present) to add the value to the existing variable.
+ - Use I(value_absent) to remove the value from the existing variable.
+ type: str
+ default: "present"
+ choices: [ absent, present, value_present, value_absent ]
+ path:
+ description:
+ - Path to file to use instead of C(/etc/rc.conf).
+ type: str
+ default: "/etc/rc.conf"
+ delim:
+ description:
+ - Delimiter to be used instead of C( ).
+ - Only used when I(state=value_present) or I(state=value_absent).
+ default: " "
+ type: str
+ jail:
+ description:
+ - Name or ID of the jail to operate on.
+ type: str
+notes:
+ - The C(name) cannot contain periods as sysrc does not support OID style names.
+'''
+
+EXAMPLES = r'''
+---
+# enable mysql in the /etc/rc.conf
+- name: Configure mysql pid file
+ community.general.sysrc:
+ name: mysql_pidfile
+ value: "/var/run/mysqld/mysqld.pid"
+
+# enable accf_http kld in the boot loader
+- name: Enable accf_http kld
+ community.general.sysrc:
+ name: accf_http_load
+ state: present
+ value: "YES"
+ path: /boot/loader.conf
+
+# add gif0 to cloned_interfaces
+- name: Add gif0 interface
+ community.general.sysrc:
+ name: cloned_interfaces
+ state: value_present
+ value: "gif0"
+
+# enable nginx on a jail
+- name: Enable nginx in test jail
+ community.general.sysrc:
+ name: nginx_enable
+ value: "YES"
+ jail: testjail
+'''
+
+RETURN = r'''
+changed:
+ description: Return changed for sysrc actions.
+ returned: always
+ type: bool
+ sample: true
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+import re
+
+
+class Sysrc(object):
+ def __init__(self, module, name, value, path, delim, jail):
+ self.module = module
+ self.name = name
+ self.changed = False
+ self.value = value
+ self.path = path
+ self.delim = delim
+ self.jail = jail
+ self.sysrc = module.get_bin_path('sysrc', True)
+
+ def has_unknown_variable(self, out, err):
+ # newer versions of sysrc use stderr instead of stdout
+ return err.find("unknown variable") > 0 or out.find("unknown variable") > 0
+
+ def exists(self):
+ # sysrc doesn't really use exit codes
+ (rc, out, err) = self.run_sysrc(self.name)
+ if self.value is None:
+ regex = "%s: " % re.escape(self.name)
+ else:
+ regex = "%s: %s$" % (re.escape(self.name), re.escape(self.value))
+
+ return not self.has_unknown_variable(out, err) and re.match(regex, out) is not None
+
+ def contains(self):
+ (rc, out, err) = self.run_sysrc('-n', self.name)
+ if self.has_unknown_variable(out, err):
+ return False
+
+ return self.value in out.strip().split(self.delim)
+
+ def present(self):
+ if self.exists():
+ return
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ (rc, out, err) = self.run_sysrc("%s=%s" % (self.name, self.value))
+ if out.find("%s:" % self.name) == 0 and re.search("-> %s$" % re.escape(self.value), out) is not None:
+ self.changed = True
+
+ def absent(self):
+ if not self.exists():
+ return
+
+ # inversed since we still need to mark as changed
+ if not self.module.check_mode:
+ (rc, out, err) = self.run_sysrc('-x', self.name)
+ if self.has_unknown_variable(out, err):
+ return
+
+ self.changed = True
+
+ def value_present(self):
+ if self.contains():
+ return
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ setstring = '%s+=%s%s' % (self.name, self.delim, self.value)
+ (rc, out, err) = self.run_sysrc(setstring)
+ if out.find("%s:" % self.name) == 0:
+ values = out.split(' -> ')[1].strip().split(self.delim)
+ if self.value in values:
+ self.changed = True
+
+ def value_absent(self):
+ if not self.contains():
+ return
+
+ if self.module.check_mode:
+ self.changed = True
+ return
+
+ setstring = '%s-=%s%s' % (self.name, self.delim, self.value)
+ (rc, out, err) = self.run_sysrc(setstring)
+ if out.find("%s:" % self.name) == 0:
+ values = out.split(' -> ')[1].strip().split(self.delim)
+ if self.value not in values:
+ self.changed = True
+
+ def run_sysrc(self, *args):
+ cmd = [self.sysrc, '-f', self.path]
+ if self.jail:
+ cmd += ['-j', self.jail]
+ cmd.extend(args)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ return (rc, out, err)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ value=dict(type='str', default=None),
+ state=dict(type='str', default='present', choices=['absent', 'present', 'value_present', 'value_absent']),
+ path=dict(type='str', default='/etc/rc.conf'),
+ delim=dict(type='str', default=' '),
+ jail=dict(type='str', default=None),
+ ),
+ supports_check_mode=True,
+ )
+
+ name = module.params.pop('name')
+ # OID style names are not supported
+ if not re.match('^[a-zA-Z0-9_]+$', name):
+ module.fail_json(
+ msg="Name may only contain alpha-numeric and underscore characters"
+ )
+
+ value = module.params.pop('value')
+ state = module.params.pop('state')
+ path = module.params.pop('path')
+ delim = module.params.pop('delim')
+ jail = module.params.pop('jail')
+ result = dict(
+ name=name,
+ state=state,
+ value=value,
+ path=path,
+ delim=delim,
+ jail=jail
+ )
+
+ rc_value = Sysrc(module, name, value, path, delim, jail)
+
+ if state == 'present':
+ rc_value.present()
+ elif state == 'absent':
+ rc_value.absent()
+ elif state == 'value_present':
+ rc_value.value_present()
+ elif state == 'value_absent':
+ rc_value.value_absent()
+
+ result['changed'] = rc_value.changed
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/sysupgrade.py b/ansible_collections/community/general/plugins/modules/sysupgrade.py
new file mode 100644
index 000000000..ac80e0196
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/sysupgrade.py
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Andrew Klaus <andrewklaus@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: sysupgrade
+short_description: Manage OpenBSD system upgrades
+version_added: 1.1.0
+description:
+ - Manage OpenBSD system upgrades using sysupgrade.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ snapshot:
+ description:
+ - Apply the latest snapshot.
+ - Otherwise release will be applied.
+ default: false
+ type: bool
+ force:
+ description:
+ - Force upgrade (for snapshots only).
+ default: false
+ type: bool
+ keep_files:
+ description:
+ - Keep the files under /home/_sysupgrade.
+ - By default, the files will be deleted after the upgrade.
+ default: false
+ type: bool
+ fetch_only:
+ description:
+ - Fetch and verify files and create /bsd.upgrade but do not reboot.
+ - Set to C(false) if you want sysupgrade to reboot. This will cause Ansible to error, as it expects the module to exit gracefully. See the examples.
+ default: true
+ type: bool
+ installurl:
+ description:
+ - OpenBSD mirror top-level URL for fetching an upgrade.
+ - By default, the mirror URL is pulled from /etc/installurl.
+ type: str
+author:
+ - Andrew Klaus (@precurse)
+'''
+
+EXAMPLES = r'''
+- name: Upgrade to latest release
+ community.general.sysupgrade:
+ register: sysupgrade
+
+- name: Upgrade to latest snapshot
+ community.general.sysupgrade:
+ snapshot: true
+ installurl: https://cloudflare.cdn.openbsd.org/pub/OpenBSD
+ register: sysupgrade
+
+- name: Reboot to apply upgrade if needed
+ ansible.builtin.reboot:
+ when: sysupgrade.changed
+
+# Note: Ansible will error when running this way due to how
+# the reboot is forcefully handled by sysupgrade:
+
+- name: Have sysupgrade automatically reboot
+ community.general.sysupgrade:
+ fetch_only: false
+ ignore_errors: true
+'''
+
+RETURN = r'''
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+stdout:
+ description: Sysupgrade standard output.
+ returned: always
+ type: str
+stderr:
+ description: Sysupgrade standard error.
+ returned: always
+ type: str
+ sample: "sysupgrade: need root privileges"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def sysupgrade_run(module):
+ sysupgrade_bin = module.get_bin_path('/usr/sbin/sysupgrade', required=True)
+ cmd = [sysupgrade_bin]
+ changed = False
+ warnings = []
+
+ # Setup command flags
+ if module.params['snapshot']:
+ run_flag = ['-s']
+ if module.params['force']:
+ # Force only applies to snapshots
+ run_flag.append('-f')
+ else:
+ # release flag
+ run_flag = ['-r']
+
+ if module.params['keep_files']:
+ run_flag.append('-k')
+
+ if module.params['fetch_only']:
+ run_flag.append('-n')
+
+ # installurl must be the last argument
+ if module.params['installurl']:
+ run_flag.append(module.params['installurl'])
+
+ rc, out, err = module.run_command(cmd + run_flag)
+
+ if rc != 0:
+ module.fail_json(msg="Command %s failed rc=%d, out=%s, err=%s" % (cmd, rc, out, err))
+ elif out.lower().find('already on latest snapshot') >= 0:
+ changed = False
+ elif out.lower().find('upgrade on next reboot') >= 0:
+ changed = True
+
+ return dict(
+ changed=changed,
+ rc=rc,
+ stderr=err,
+ stdout=out,
+ warnings=warnings
+ )
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ snapshot=dict(type='bool', default=False),
+ fetch_only=dict(type='bool', default=True),
+ force=dict(type='bool', default=False),
+ keep_files=dict(type='bool', default=False),
+ installurl=dict(type='str'),
+ ),
+ supports_check_mode=False,
+ )
+ return_dict = sysupgrade_run(module)
+ module.exit_json(**return_dict)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/taiga_issue.py b/ansible_collections/community/general/plugins/modules/taiga_issue.py
new file mode 100644
index 000000000..e80ff43b8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/taiga_issue.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: taiga_issue
+short_description: Creates/deletes an issue in a Taiga Project Management Platform
+description:
+ - Creates/deletes an issue in a Taiga Project Management Platform (U(https://taiga.io)).
+ - An issue is identified by the combination of project, issue subject and issue type.
+ - This module implements the creation or deletion of issues (not the update).
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ taiga_host:
+ type: str
+ description:
+ - The hostname of the Taiga instance.
+ default: https://api.taiga.io
+ project:
+ type: str
+ description:
+ - Name of the project containing the issue. Must exist previously.
+ required: true
+ subject:
+ type: str
+ description:
+ - The issue subject.
+ required: true
+ issue_type:
+ type: str
+ description:
+ - The issue type. Must exist previously.
+ required: true
+ priority:
+ type: str
+ description:
+ - The issue priority. Must exist previously.
+ default: Normal
+ status:
+ type: str
+ description:
+ - The issue status. Must exist previously.
+ default: New
+ severity:
+ type: str
+ description:
+ - The issue severity. Must exist previously.
+ default: Normal
+ description:
+ type: str
+ description:
+ - The issue description.
+ default: ""
+ attachment:
+ type: path
+ description:
+ - Path to a file to be attached to the issue.
+ attachment_description:
+ type: str
+ description:
+ - A string describing the file to be attached to the issue.
+ default: ""
+ tags:
+ type: list
+ elements: str
+ description:
+ - A lists of tags to be assigned to the issue.
+ default: []
+ state:
+ type: str
+ description:
+ - Whether the issue should be present or not.
+ choices: ["present", "absent"]
+ default: present
+author: Alejandro Guirao (@lekum)
+requirements: [python-taiga]
+notes:
+- The authentication is achieved either by the environment variable TAIGA_TOKEN or by the pair of environment variables TAIGA_USERNAME and TAIGA_PASSWORD
+'''
+
+EXAMPLES = '''
+- name: Create an issue in the my hosted Taiga environment and attach an error log
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ priority: High
+ status: New
+ severity: Important
+ description: An error has been found. Please check the attached error log for details.
+ attachment: /path/to/error.log
+ attachment_description: Error log file
+ tags:
+ - Error
+ - Needs manual check
+ state: present
+
+- name: Deletes the previously created issue
+ community.general.taiga_issue:
+ taiga_host: https://mytaigahost.example.com
+ project: myproject
+ subject: An error has been found
+ issue_type: Bug
+ state: absent
+'''
+
+RETURN = '''# '''
+import traceback
+
+from os import getenv
+from os.path import isfile
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+TAIGA_IMP_ERR = None
+try:
+ from taiga import TaigaAPI
+ from taiga.exceptions import TaigaException
+ TAIGA_MODULE_IMPORTED = True
+except ImportError:
+ TAIGA_IMP_ERR = traceback.format_exc()
+ TAIGA_MODULE_IMPORTED = False
+
+
+def manage_issue(taiga_host, project_name, issue_subject, issue_priority,
+ issue_status, issue_type, issue_severity, issue_description,
+ issue_attachment, issue_attachment_description,
+ issue_tags, state, check_mode=False):
+ """
+ Method that creates/deletes issues depending whether they exist and the state desired
+
+ The credentials should be passed via environment variables:
+ - TAIGA_TOKEN
+ - TAIGA_USERNAME and TAIGA_PASSWORD
+
+ Returns a tuple with these elements:
+ - A boolean representing the success of the operation
+ - A descriptive message
+ - A dict with the issue attributes, in case of issue creation, otherwise empty dict
+ """
+
+ changed = False
+
+ try:
+ token = getenv('TAIGA_TOKEN')
+ if token:
+ api = TaigaAPI(host=taiga_host, token=token)
+ else:
+ api = TaigaAPI(host=taiga_host)
+ username = getenv('TAIGA_USERNAME')
+ password = getenv('TAIGA_PASSWORD')
+ if not any([username, password]):
+ return False, changed, "Missing credentials", {}
+ api.auth(username=username, password=password)
+
+ user_id = api.me().id
+ project_list = list(filter(lambda x: x.name == project_name, api.projects.list(member=user_id)))
+ if len(project_list) != 1:
+ return False, changed, "Unable to find project %s" % project_name, {}
+ project = project_list[0]
+ project_id = project.id
+
+ priority_list = list(filter(lambda x: x.name == issue_priority, api.priorities.list(project=project_id)))
+ if len(priority_list) != 1:
+ return False, changed, "Unable to find issue priority %s for project %s" % (issue_priority, project_name), {}
+ priority_id = priority_list[0].id
+
+ status_list = list(filter(lambda x: x.name == issue_status, api.issue_statuses.list(project=project_id)))
+ if len(status_list) != 1:
+ return False, changed, "Unable to find issue status %s for project %s" % (issue_status, project_name), {}
+ status_id = status_list[0].id
+
+ type_list = list(filter(lambda x: x.name == issue_type, project.list_issue_types()))
+ if len(type_list) != 1:
+ return False, changed, "Unable to find issue type %s for project %s" % (issue_type, project_name), {}
+ type_id = type_list[0].id
+
+ severity_list = list(filter(lambda x: x.name == issue_severity, project.list_severities()))
+ if len(severity_list) != 1:
+ return False, changed, "Unable to find severity %s for project %s" % (issue_severity, project_name), {}
+ severity_id = severity_list[0].id
+
+ issue = {
+ "project": project_name,
+ "subject": issue_subject,
+ "priority": issue_priority,
+ "status": issue_status,
+ "type": issue_type,
+ "severity": issue_severity,
+ "description": issue_description,
+ "tags": issue_tags,
+ }
+
+ # An issue is identified by the project_name, the issue_subject and the issue_type
+ matching_issue_list = list(filter(lambda x: x.subject == issue_subject and x.type == type_id, project.list_issues()))
+ matching_issue_list_len = len(matching_issue_list)
+
+ if matching_issue_list_len == 0:
+ # The issue does not exist in the project
+ if state == "present":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Create the issue
+ new_issue = project.add_issue(issue_subject, priority_id, status_id, type_id, severity_id, tags=issue_tags,
+ description=issue_description)
+ if issue_attachment:
+ new_issue.attach(issue_attachment, description=issue_attachment_description)
+ issue["attachment"] = issue_attachment
+ issue["attachment_description"] = issue_attachment_description
+ return True, changed, "Issue created", issue
+
+ else:
+ # If does not exist, do nothing
+ return True, changed, "Issue does not exist", {}
+
+ elif matching_issue_list_len == 1:
+ # The issue exists in the project
+ if state == "absent":
+ # This implies a change
+ changed = True
+ if not check_mode:
+ # Delete the issue
+ matching_issue_list[0].delete()
+ return True, changed, "Issue deleted", {}
+
+ else:
+ # Do nothing
+ return True, changed, "Issue already exists", {}
+
+ else:
+ # More than 1 matching issue
+ return False, changed, "More than one issue with subject %s in project %s" % (issue_subject, project_name), {}
+
+ except TaigaException as exc:
+ msg = "An exception happened: %s" % to_native(exc)
+ return False, changed, msg, {}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ taiga_host=dict(type='str', required=False, default="https://api.taiga.io"),
+ project=dict(type='str', required=True),
+ subject=dict(type='str', required=True),
+ issue_type=dict(type='str', required=True),
+ priority=dict(type='str', required=False, default="Normal"),
+ status=dict(type='str', required=False, default="New"),
+ severity=dict(type='str', required=False, default="Normal"),
+ description=dict(type='str', required=False, default=""),
+ attachment=dict(type='path', required=False, default=None),
+ attachment_description=dict(type='str', required=False, default=""),
+ tags=dict(required=False, default=[], type='list', elements='str'),
+ state=dict(type='str', required=False, choices=['present', 'absent'], default='present'),
+ ),
+ supports_check_mode=True
+ )
+
+ if not TAIGA_MODULE_IMPORTED:
+ module.fail_json(msg=missing_required_lib("python-taiga"), exception=TAIGA_IMP_ERR)
+
+ taiga_host = module.params['taiga_host']
+ project_name = module.params['project']
+ issue_subject = module.params['subject']
+ issue_priority = module.params['priority']
+ issue_status = module.params['status']
+ issue_type = module.params['issue_type']
+ issue_severity = module.params['severity']
+ issue_description = module.params['description']
+ issue_attachment = module.params['attachment']
+ issue_attachment_description = module.params['attachment_description']
+ if issue_attachment:
+ if not isfile(issue_attachment):
+ msg = "%s is not a file" % issue_attachment
+ module.fail_json(msg=msg)
+ issue_tags = module.params['tags']
+ state = module.params['state']
+
+ return_status, changed, msg, issue_attr_dict = manage_issue(
+ taiga_host,
+ project_name,
+ issue_subject,
+ issue_priority,
+ issue_status,
+ issue_type,
+ issue_severity,
+ issue_description,
+ issue_attachment,
+ issue_attachment_description,
+ issue_tags,
+ state,
+ check_mode=module.check_mode
+ )
+ if return_status:
+ if issue_attr_dict:
+ module.exit_json(changed=changed, msg=msg, issue=issue_attr_dict)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/telegram.py b/ansible_collections/community/general/plugins/modules/telegram.py
new file mode 100644
index 000000000..d13e90fd5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/telegram.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Artem Feofanov <artem.feofanov@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: telegram
+author:
+ - "Artem Feofanov (@tyouxa)"
+ - "Nikolai Lomov (@lomserman)"
+
+short_description: Send notifications via telegram
+
+description:
+ - Send notifications via telegram bot, to a verified group or user.
+ - Also, the user may try to use any other telegram bot API method, if you specify I(api_method) argument.
+notes:
+ - You will require a telegram account and create telegram bot to use this module.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ token:
+ type: str
+ description:
+ - Token identifying your telegram bot.
+ required: true
+ api_method:
+ type: str
+ description:
+ - Bot API method.
+ - For reference, see U(https://core.telegram.org/bots/api).
+ default: SendMessage
+ version_added: 2.0.0
+ api_args:
+ type: dict
+ description:
+ - Any parameters for the method.
+ - For reference to default method, C(SendMessage), see U(https://core.telegram.org/bots/api#sendmessage).
+ version_added: 2.0.0
+
+'''
+
+EXAMPLES = """
+
+- name: Send notify to Telegram
+ community.general.telegram:
+ token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ api_args:
+ chat_id: 000000
+ parse_mode: "markdown"
+ text: "Your precious application has been deployed: https://example.com"
+ disable_web_page_preview: true
+ disable_notification: true
+
+- name: Forward message to someone
+ community.general.telegram:
+ token: '9999999:XXXXXXXXXXXXXXXXXXXXXXX'
+ api_method: forwardMessage
+ api_args:
+ chat_id: 000000
+ from_chat_id: 111111
+ disable_notification: true
+ message_id: '{{ saved_msg_id }}'
+"""
+
+RETURN = """
+
+msg:
+ description: The message you attempted to send
+ returned: success
+ type: str
+ sample: "Ansible task finished"
+telegram_error:
+ description: Error message gotten from Telegram API
+ returned: failure
+ type: str
+ sample: "Bad Request: message text is empty"
+"""
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+# noinspection PyUnresolvedReferences
+from ansible.module_utils.six.moves.urllib.parse import quote
+from ansible.module_utils.urls import fetch_url
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(type='str', required=True, no_log=True),
+ api_args=dict(type='dict'),
+ api_method=dict(type="str", default="SendMessage"),
+ ),
+ supports_check_mode=True
+ )
+
+ token = quote(module.params.get('token'))
+ api_args = module.params.get('api_args') or {}
+ api_method = module.params.get('api_method')
+ # filling backward compatibility args
+ api_args['chat_id'] = api_args.get('chat_id')
+ api_args['parse_mode'] = api_args.get('parse_mode')
+ api_args['text'] = api_args.get('text')
+
+ if api_args['parse_mode'] == 'plain':
+ del api_args['parse_mode']
+
+ url = 'https://api.telegram.org/bot{token}/{api_method}'.format(token=token, api_method=api_method)
+
+ if module.check_mode:
+ module.exit_json(changed=False)
+
+ response, info = fetch_url(module, url, method="POST", data=json.dumps(api_args),
+ headers={'Content-Type': 'application/json'})
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ elif info['status'] == -1:
+ # SSL errors, connection problems, etc.
+ module.fail_json(msg="Failed to send message", info=info, response=response)
+ else:
+ body = json.loads(info['body'])
+ module.fail_json(
+ msg="Failed to send message, return status = {status}\n"
+ "url = {api_url}\n"
+ "api_args = {api_args}".format(
+ status=info['status'], api_url=url, api_args=api_args
+ ),
+ telegram_error=body['description'],
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/terraform.py b/ansible_collections/community/general/plugins/modules/terraform.py
new file mode 100644
index 000000000..f9f809220
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/terraform.py
@@ -0,0 +1,659 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: terraform
+short_description: Manages a Terraform deployment (and plans)
+description:
+ - Provides support for deploying resources with Terraform and pulling
+ resource information back into Ansible.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ choices: ['planned', 'present', 'absent']
+ description:
+ - Goal state of given stage/project
+ type: str
+ default: present
+ binary_path:
+ description:
+ - The path of a terraform binary to use, relative to the 'service_path'
+ unless you supply an absolute path.
+ type: path
+ project_path:
+ description:
+ - The path to the root of the Terraform directory with the
+ vars.tf/main.tf/etc to use.
+ type: path
+ required: true
+ plugin_paths:
+ description:
+ - List of paths containing Terraform plugin executable files.
+ - Plugin executables can be downloaded from U(https://releases.hashicorp.com/).
+ - When set, the plugin discovery and auto-download behavior of Terraform is disabled.
+ - The directory structure in the plugin path can be tricky. The Terraform docs
+ U(https://learn.hashicorp.com/tutorials/terraform/automate-terraform#pre-installed-plugins)
+ show a simple directory of files, but actually, the directory structure
+ has to follow the same structure you would see if Terraform auto-downloaded the plugins.
+ See the examples below for a tree output of an example plugin directory.
+ type: list
+ elements: path
+ version_added: 3.0.0
+ workspace:
+ description:
+ - The terraform workspace to work with. This sets the C(TF_WORKSPACE) environmental variable
+ that is used to override workspace selection. For more information about workspaces
+ have a look at U(https://developer.hashicorp.com/terraform/language/state/workspaces).
+ type: str
+ default: default
+ purge_workspace:
+ description:
+ - Only works with state = absent
+ - If true, the workspace will be deleted after the "terraform destroy" action.
+ - The 'default' workspace will not be deleted.
+ default: false
+ type: bool
+ plan_file:
+ description:
+ - The path to an existing Terraform plan file to apply. If this is not
+ specified, Ansible will build a new TF plan and execute it.
+ Note that this option is required if 'state' has the 'planned' value.
+ type: path
+ state_file:
+ description:
+ - The path to an existing Terraform state file to use when building plan.
+ If this is not specified, the default C(terraform.tfstate) will be used.
+ - This option is ignored when plan is specified.
+ type: path
+ variables_files:
+ description:
+ - The path to a variables file for Terraform to fill into the TF
+ configurations. This can accept a list of paths to multiple variables files.
+ - Up until Ansible 2.9, this option was usable as I(variables_file).
+ type: list
+ elements: path
+ aliases: [ 'variables_file' ]
+ variables:
+ description:
+ - A group of key-values pairs to override template variables or those in variables files.
+ By default, only string and number values are allowed, which are passed on unquoted.
+ - Support complex variable structures (lists, dictionaries, numbers, and booleans) to reflect terraform variable syntax when I(complex_vars=true).
+ - Ansible integers or floats are mapped to terraform numbers.
+ - Ansible strings are mapped to terraform strings.
+ - Ansible dictionaries are mapped to terraform objects.
+ - Ansible lists are mapped to terraform lists.
+ - Ansible booleans are mapped to terraform booleans.
+ - "B(Note) passwords passed as variables will be visible in the log output. Make sure to use I(no_log=true) in production!"
+ type: dict
+ complex_vars:
+ description:
+ - Enable/disable capability to handle complex variable structures for C(terraform).
+ - If C(true) the I(variables) also accepts dictionaries, lists, and booleans to be passed to C(terraform).
+ Strings that are passed are correctly quoted.
+ - When disabled, supports only simple variables (strings, integers, and floats), and passes them on unquoted.
+ type: bool
+ default: false
+ version_added: 5.7.0
+ targets:
+ description:
+ - A list of specific resources to target in this plan/application. The
+ resources selected here will also auto-include any dependencies.
+ type: list
+ elements: str
+ default: []
+ lock:
+ description:
+ - Enable statefile locking, if you use a service that accepts locks (such
+ as S3+DynamoDB) to store your statefile.
+ type: bool
+ default: true
+ lock_timeout:
+ description:
+ - How long to maintain the lock on the statefile, if you use a service
+ that accepts locks (such as S3+DynamoDB).
+ type: int
+ force_init:
+ description:
+ - To avoid duplicating infra, if a state file can't be found this will
+ force a C(terraform init). Generally, this should be turned off unless
+ you intend to provision an entirely new Terraform deployment.
+ default: false
+ type: bool
+ overwrite_init:
+ description:
+ - Run init even if C(.terraform/terraform.tfstate) already exists in I(project_path).
+ default: true
+ type: bool
+ version_added: '3.2.0'
+ backend_config:
+ description:
+ - A group of key-values to provide at init stage to the -backend-config parameter.
+ type: dict
+ backend_config_files:
+ description:
+ - The path to a configuration file to provide at init state to the -backend-config parameter.
+ This can accept a list of paths to multiple configuration files.
+ type: list
+ elements: path
+ version_added: '0.2.0'
+ provider_upgrade:
+ description:
+ - Allows Terraform init to upgrade providers to versions specified in the project's version constraints.
+ default: false
+ type: bool
+ version_added: 4.8.0
+ init_reconfigure:
+ description:
+ - Forces backend reconfiguration during init.
+ default: false
+ type: bool
+ version_added: '1.3.0'
+ check_destroy:
+ description:
+ - Apply only when no resources are destroyed. Note that this only prevents "destroy" actions,
+ but not "destroy and re-create" actions. This option is ignored when I(state=absent).
+ type: bool
+ default: false
+ version_added: '3.3.0'
+ parallelism:
+ description:
+ - Restrict concurrent operations when Terraform applies the plan.
+ type: int
+ version_added: '3.8.0'
+notes:
+ - To just run a C(terraform plan), use check mode.
+requirements: [ "terraform" ]
+author: "Ryan Scott Brown (@ryansb)"
+'''
+
+EXAMPLES = """
+- name: Basic deploy of a service
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+
+- name: Define the backend configuration at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config:
+ region: "eu-west-1"
+ bucket: "some-bucket"
+ key: "random.tfstate"
+
+- name: Define the backend configuration with one or more files at init
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ backend_config_files:
+ - /path/to/backend_config_file_1
+ - /path/to/backend_config_file_2
+
+- name: Disable plugin discovery and auto-download by setting plugin_paths
+ community.general.terraform:
+ project_path: 'project/'
+ state: "{{ state }}"
+ force_init: true
+ plugin_paths:
+ - /path/to/plugins_dir_1
+ - /path/to/plugins_dir_2
+
+- name: Complex variables example
+ community.general.terraform:
+ project_path: '{{ project_dir }}'
+ state: present
+ complex_vars: true
+ variables:
+ vm_name: "{{ inventory_hostname }}"
+ vm_vcpus: 2
+ vm_mem: 2048
+ vm_additional_disks:
+ - label: "Third Disk"
+ size: 40
+ thin_provisioned: true
+ unit_number: 2
+ - label: "Fourth Disk"
+ size: 22
+ thin_provisioned: true
+ unit_number: 3
+ force_init: true
+
+### Example directory structure for plugin_paths example
+# $ tree /path/to/plugins_dir_1
+# /path/to/plugins_dir_1/
+# └── registry.terraform.io
+# └── hashicorp
+# └── vsphere
+# ├── 1.24.0
+# │ └── linux_amd64
+# │ └── terraform-provider-vsphere_v1.24.0_x4
+# └── 1.26.0
+# └── linux_amd64
+# └── terraform-provider-vsphere_v1.26.0_x4
+"""
+
+RETURN = """
+outputs:
+ type: complex
+ description: A dictionary of all the TF outputs by their assigned name. Use C(.outputs.MyOutputName.value) to access the value.
+ returned: on success
+ sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
+ contains:
+ sensitive:
+ type: bool
+ returned: always
+ description: Whether Terraform has marked this value as sensitive
+ type:
+ type: str
+ returned: always
+ description: The type of the value (string, int, etc)
+ value:
+ type: str
+ returned: always
+ description: The value of the output as interpolated by Terraform
+stdout:
+ type: str
+ description: Full C(terraform) command stdout, in case you want to display it or examine the event log
+ returned: always
+ sample: ''
+command:
+ type: str
+ description: Full C(terraform) command built by this module, in case you want to re-run the command outside the module or debug a problem.
+ returned: always
+ sample: terraform apply ...
+"""
+
+import os
+import json
+import tempfile
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.six import integer_types
+
+from ansible.module_utils.basic import AnsibleModule
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+module = None
+
+
+def get_version(bin_path):
+ extract_version = module.run_command([bin_path, 'version', '-json'])
+ terraform_version = (json.loads(extract_version[1]))['terraform_version']
+ return terraform_version
+
+
+def preflight_validation(bin_path, project_path, version, variables_args=None, plan_file=None):
+ if project_path is None or '/' not in project_path:
+ module.fail_json(msg="Path for Terraform project can not be None or ''.")
+ if not os.path.exists(bin_path):
+ module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
+ if not os.path.isdir(project_path):
+ module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
+ if LooseVersion(version) < LooseVersion('0.15.0'):
+ module.run_command([bin_path, 'validate', '-no-color'] + variables_args, check_rc=True, cwd=project_path)
+ else:
+ module.run_command([bin_path, 'validate', '-no-color'], check_rc=True, cwd=project_path)
+
+
+def _state_args(state_file):
+ if not state_file:
+ return []
+ if not os.path.exists(state_file):
+ module.warn('Could not find state_file "{0}", the process will not destroy any resources, please check your state file path.'.format(state_file))
+ return ['-state', state_file]
+
+
+def init_plugins(bin_path, project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace):
+ command = [bin_path, 'init', '-input=false', '-no-color']
+ if backend_config:
+ for key, val in backend_config.items():
+ command.extend([
+ '-backend-config',
+ shlex_quote('{0}={1}'.format(key, val))
+ ])
+ if backend_config_files:
+ for f in backend_config_files:
+ command.extend(['-backend-config', f])
+ if init_reconfigure:
+ command.extend(['-reconfigure'])
+ if provider_upgrade:
+ command.extend(['-upgrade'])
+ if plugin_paths:
+ for plugin_path in plugin_paths:
+ command.extend(['-plugin-dir', plugin_path])
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path, environ_update={"TF_WORKSPACE": workspace})
+
+
+def get_workspace_context(bin_path, project_path):
+ workspace_ctx = {"current": "default", "all": []}
+ command = [bin_path, 'workspace', 'list', '-no-color']
+ rc, out, err = module.run_command(command, cwd=project_path)
+ if rc != 0:
+ module.warn("Failed to list Terraform workspaces:\n{0}".format(err))
+ for item in out.split('\n'):
+ stripped_item = item.strip()
+ if not stripped_item:
+ continue
+ elif stripped_item.startswith('* '):
+ workspace_ctx["current"] = stripped_item.replace('* ', '')
+ workspace_ctx["all"].append(stripped_item.replace('* ', ''))
+ else:
+ workspace_ctx["all"].append(stripped_item)
+ return workspace_ctx
+
+
+def _workspace_cmd(bin_path, project_path, action, workspace):
+ command = [bin_path, 'workspace', action, workspace, '-no-color']
+ rc, out, err = module.run_command(command, check_rc=True, cwd=project_path)
+ return rc, out, err
+
+
+def create_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'new', workspace)
+
+
+def select_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'select', workspace)
+
+
+def remove_workspace(bin_path, project_path, workspace):
+ _workspace_cmd(bin_path, project_path, 'delete', workspace)
+
+
+def build_plan(command, project_path, variables_args, state_file, targets, state, apply_args, plan_path=None):
+ if plan_path is None:
+ f, plan_path = tempfile.mkstemp(suffix='.tfplan')
+
+ local_command = command[:]
+
+ plan_command = [command[0], 'plan']
+
+ if state == "planned":
+ for c in local_command[1:]:
+ plan_command.append(c)
+
+ if state == "present":
+ for a in apply_args:
+ local_command.remove(a)
+ for c in local_command[1:]:
+ plan_command.append(c)
+
+ plan_command.extend(['-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path])
+
+ for t in targets:
+ plan_command.extend(['-target', t])
+
+ plan_command.extend(_state_args(state_file))
+
+ rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path)
+
+ if rc == 0:
+ # no changes
+ return plan_path, False, out, err, plan_command if state == 'planned' else command
+ elif rc == 1:
+ # failure to plan
+ module.fail_json(
+ msg='Terraform plan could not be created\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
+ out=out,
+ err=err,
+ cmd=' '.join(plan_command),
+ args=' '.join([shlex_quote(arg) for arg in variables_args])
+ )
+ )
+ elif rc == 2:
+ # changes, but successful
+ return plan_path, True, out, err, plan_command if state == 'planned' else command
+
+ module.fail_json(msg='Terraform plan failed with unexpected exit code {rc}.\nSTDOUT: {out}\nSTDERR: {err}\nCOMMAND: {cmd} {args}'.format(
+ rc=rc,
+ out=out,
+ err=err,
+ cmd=' '.join(plan_command),
+ args=' '.join([shlex_quote(arg) for arg in variables_args])
+ ))
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ project_path=dict(required=True, type='path'),
+ binary_path=dict(type='path'),
+ plugin_paths=dict(type='list', elements='path'),
+ workspace=dict(type='str', default='default'),
+ purge_workspace=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent', 'planned']),
+ variables=dict(type='dict'),
+ complex_vars=dict(type='bool', default=False),
+ variables_files=dict(aliases=['variables_file'], type='list', elements='path'),
+ plan_file=dict(type='path'),
+ state_file=dict(type='path'),
+ targets=dict(type='list', elements='str', default=[]),
+ lock=dict(type='bool', default=True),
+ lock_timeout=dict(type='int',),
+ force_init=dict(type='bool', default=False),
+ backend_config=dict(type='dict'),
+ backend_config_files=dict(type='list', elements='path'),
+ init_reconfigure=dict(type='bool', default=False),
+ overwrite_init=dict(type='bool', default=True),
+ check_destroy=dict(type='bool', default=False),
+ parallelism=dict(type='int'),
+ provider_upgrade=dict(type='bool', default=False),
+ ),
+ required_if=[('state', 'planned', ['plan_file'])],
+ supports_check_mode=True,
+ )
+
+ project_path = module.params.get('project_path')
+ bin_path = module.params.get('binary_path')
+ plugin_paths = module.params.get('plugin_paths')
+ workspace = module.params.get('workspace')
+ purge_workspace = module.params.get('purge_workspace')
+ state = module.params.get('state')
+ variables = module.params.get('variables') or {}
+ complex_vars = module.params.get('complex_vars')
+ variables_files = module.params.get('variables_files')
+ plan_file = module.params.get('plan_file')
+ state_file = module.params.get('state_file')
+ force_init = module.params.get('force_init')
+ backend_config = module.params.get('backend_config')
+ backend_config_files = module.params.get('backend_config_files')
+ init_reconfigure = module.params.get('init_reconfigure')
+ overwrite_init = module.params.get('overwrite_init')
+ check_destroy = module.params.get('check_destroy')
+ provider_upgrade = module.params.get('provider_upgrade')
+
+ if bin_path is not None:
+ command = [bin_path]
+ else:
+ command = [module.get_bin_path('terraform', required=True)]
+
+ checked_version = get_version(command[0])
+
+ if LooseVersion(checked_version) < LooseVersion('0.15.0'):
+ DESTROY_ARGS = ('destroy', '-no-color', '-force')
+ APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
+ else:
+ DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
+ APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')
+
+ if force_init:
+ if overwrite_init or not os.path.isfile(os.path.join(project_path, ".terraform", "terraform.tfstate")):
+ init_plugins(command[0], project_path, backend_config, backend_config_files, init_reconfigure, provider_upgrade, plugin_paths, workspace)
+
+ workspace_ctx = get_workspace_context(command[0], project_path)
+ if workspace_ctx["current"] != workspace:
+ if workspace not in workspace_ctx["all"]:
+ create_workspace(command[0], project_path, workspace)
+ else:
+ select_workspace(command[0], project_path, workspace)
+
+ if state == 'present':
+ command.extend(APPLY_ARGS)
+ elif state == 'absent':
+ command.extend(DESTROY_ARGS)
+
+ if state == 'present' and module.params.get('parallelism') is not None:
+ command.append('-parallelism=%d' % module.params.get('parallelism'))
+
+ def format_args(vars):
+ if isinstance(vars, str):
+ return '"{string}"'.format(string=vars.replace('\\', '\\\\').replace('"', '\\"'))
+ elif isinstance(vars, bool):
+ if vars:
+ return 'true'
+ else:
+ return 'false'
+ return str(vars)
+
+ def process_complex_args(vars):
+ ret_out = []
+ if isinstance(vars, dict):
+ for k, v in vars.items():
+ if isinstance(v, dict):
+ ret_out.append('{0}={{{1}}}'.format(k, process_complex_args(v)))
+ elif isinstance(v, list):
+ ret_out.append("{0}={1}".format(k, process_complex_args(v)))
+ elif isinstance(v, (integer_types, float, str, bool)):
+ ret_out.append('{0}={1}'.format(k, format_args(v)))
+ else:
+ # only to handle anything unforeseen
+ module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.")
+ if isinstance(vars, list):
+ l_out = []
+ for item in vars:
+ if isinstance(item, dict):
+ l_out.append("{{{0}}}".format(process_complex_args(item)))
+ elif isinstance(item, list):
+ l_out.append("{0}".format(process_complex_args(item)))
+ elif isinstance(item, (str, integer_types, float, bool)):
+ l_out.append(format_args(item))
+ else:
+ # only to handle anything unforeseen
+ module.fail_json(msg="Supported types are, dictionaries, lists, strings, integer_types, boolean and float.")
+
+ ret_out.append("[{0}]".format(",".join(l_out)))
+ return ",".join(ret_out)
+
+ variables_args = []
+ if complex_vars:
+ for k, v in variables.items():
+ if isinstance(v, dict):
+ variables_args.extend([
+ '-var',
+ '{0}={{{1}}}'.format(k, process_complex_args(v))
+ ])
+ elif isinstance(v, list):
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, process_complex_args(v))
+ ])
+ # on the top-level we need to pass just the python string with necessary
+ # terraform string escape sequences
+ elif isinstance(v, str):
+ variables_args.extend([
+ '-var',
+ "{0}={1}".format(k, v)
+ ])
+ else:
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, format_args(v))
+ ])
+ else:
+ for k, v in variables.items():
+ variables_args.extend([
+ '-var',
+ '{0}={1}'.format(k, v)
+ ])
+
+ if variables_files:
+ for f in variables_files:
+ variables_args.extend(['-var-file', f])
+
+ preflight_validation(command[0], project_path, checked_version, variables_args)
+
+ if module.params.get('lock') is not None:
+ if module.params.get('lock'):
+ command.append('-lock=true')
+ else:
+ command.append('-lock=false')
+ if module.params.get('lock_timeout') is not None:
+ command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
+
+ for t in (module.params.get('targets') or []):
+ command.extend(['-target', t])
+
+ # we aren't sure if this plan will result in changes, so assume yes
+ needs_application, changed = True, False
+
+ out, err = '', ''
+
+ if state == 'absent':
+ command.extend(variables_args)
+ elif state == 'present' and plan_file:
+ if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
+ command.append(plan_file)
+ else:
+ module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
+ else:
+ plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
+ module.params.get('targets'), state, APPLY_ARGS, plan_file)
+ if state == 'present' and check_destroy and '- destroy' in out:
+ module.fail_json(msg="Aborting command because it would destroy some resources. "
+ "Consider switching the 'check_destroy' to false to suppress this error")
+ command.append(plan_file)
+
+ if needs_application and not module.check_mode and state != 'planned':
+ rc, out, err = module.run_command(command, check_rc=False, cwd=project_path)
+ if rc != 0:
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ module.fail_json(msg=err.rstrip(), rc=rc, stdout=out,
+ stdout_lines=out.splitlines(), stderr=err,
+ stderr_lines=err.splitlines(),
+ cmd=' '.join(command))
+ # checks out to decide if changes were made during execution
+ if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
+ changed = True
+
+ outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
+ rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
+ outputs = {}
+ if rc == 1:
+ module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
+ elif rc != 0:
+ module.fail_json(
+ msg="Failure when getting Terraform outputs. "
+ "Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
+ command=' '.join(outputs_command))
+ else:
+ outputs = json.loads(outputs_text)
+
+ # Restore the Terraform workspace found when running the module
+ if workspace_ctx["current"] != workspace:
+ select_workspace(command[0], project_path, workspace_ctx["current"])
+ if state == 'absent' and workspace != 'default' and purge_workspace is True:
+ remove_workspace(command[0], project_path, workspace)
+
+ module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/timezone.py b/ansible_collections/community/general/plugins/modules/timezone.py
new file mode 100644
index 000000000..05849e4bb
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/timezone.py
@@ -0,0 +1,923 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Shinichi TAMURA (@tmshn)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: timezone
+short_description: Configure timezone setting
+description:
+ - This module configures the timezone setting, both of the system clock and of the hardware clock.
+ If you want to set up the NTP, use M(ansible.builtin.service) module.
+ - It is recommended to restart C(crond) after changing the timezone, otherwise the jobs may run at the wrong time.
+ - Several different tools are used depending on the OS/Distribution involved.
+ For Linux it can use C(timedatectl) or edit C(/etc/sysconfig/clock) or C(/etc/timezone) and C(hwclock).
+ On SmartOS, C(sm-set-timezone), for macOS, C(systemsetup), for BSD, C(/etc/localtime) is modified.
+ On AIX, C(chtz) is used.
+ - Make sure that the zoneinfo files are installed with the appropriate OS package, like C(tzdata) (usually always installed,
+ when not using a minimal installation like Alpine Linux).
+ - As of Ansible 2.3 support was added for SmartOS and BSDs.
+ - As of Ansible 2.4 support was added for macOS.
+ - As of Ansible 2.9 support was added for AIX 6.1+
+ - Windows and HPUX are not supported, please let us know if you find any other OS/distro in which this fails.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ name:
+ description:
+ - Name of the timezone for the system clock.
+ - Default is to keep current setting.
+ - B(At least one of name and hwclock are required.)
+ type: str
+ hwclock:
+ description:
+ - Whether the hardware clock is in UTC or in local timezone.
+ - Default is to keep current setting.
+ - Note that this option is recommended not to change and may fail
+ to configure, especially on virtual environments such as AWS.
+ - B(At least one of name and hwclock are required.)
+ - I(Only used on Linux.)
+ type: str
+ aliases: [ rtc ]
+ choices: [ local, UTC ]
+notes:
+ - On SmartOS the C(sm-set-timezone) utility (part of the smtools package) is required to set the zone timezone
+ - On AIX only Olson/tz database timezones are useable (POSIX is not supported).
+ - An OS reboot is also required on AIX for the new timezone setting to take effect.
+author:
+ - Shinichi TAMURA (@tmshn)
+ - Jasper Lievisse Adriaanse (@jasperla)
+ - Indrajit Raychaudhuri (@indrajitr)
+'''
+
+RETURN = r'''
+diff:
+ description: The differences about the given arguments.
+ returned: success
+ type: complex
+ contains:
+ before:
+ description: The values before change
+ type: dict
+ after:
+ description: The values after change
+ type: dict
+'''
+
+EXAMPLES = r'''
+- name: Set timezone to Asia/Tokyo
+ community.general.timezone:
+ name: Asia/Tokyo
+'''
+
+import errno
+import os
+import platform
+import random
+import re
+import string
+import filecmp
+
+from ansible.module_utils.basic import AnsibleModule, get_distribution
+from ansible.module_utils.six import iteritems
+
+
+class Timezone(object):
+ """This is a generic Timezone manipulation class that is subclassed based on platform.
+
+ A subclass may wish to override the following action methods:
+ - get(key, phase) ... get the value from the system at `phase`
+ - set(key, value) ... set the value to the current system
+ """
+
+ def __new__(cls, module):
+ """Return the platform-specific subclass.
+
+ It does not use load_platform_subclass() because it needs to judge based
+ on whether the `timedatectl` command exists and is available.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ if platform.system() == 'Linux':
+ timedatectl = module.get_bin_path('timedatectl')
+ if timedatectl is not None:
+ rc, stdout, stderr = module.run_command(timedatectl)
+ if rc == 0:
+ return super(Timezone, SystemdTimezone).__new__(SystemdTimezone)
+ else:
+ module.debug('timedatectl command was found but not usable: %s. using other method.' % stderr)
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ else:
+ return super(Timezone, NosystemdTimezone).__new__(NosystemdTimezone)
+ elif re.match('^joyent_.*Z', platform.version()):
+ # platform.system() returns SunOS, which is too broad. So look at the
+ # platform version instead. However we have to ensure that we're not
+ # running in the global zone where changing the timezone has no effect.
+ zonename_cmd = module.get_bin_path('zonename')
+ if zonename_cmd is not None:
+ (rc, stdout, dummy) = module.run_command(zonename_cmd)
+ if rc == 0 and stdout.strip() == 'global':
+ module.fail_json(msg='Adjusting timezone is not supported in Global Zone')
+
+ return super(Timezone, SmartOSTimezone).__new__(SmartOSTimezone)
+ elif platform.system() == 'Darwin':
+ return super(Timezone, DarwinTimezone).__new__(DarwinTimezone)
+ elif re.match('^(Free|Net|Open)BSD', platform.platform()):
+ return super(Timezone, BSDTimezone).__new__(BSDTimezone)
+ elif platform.system() == 'AIX':
+ AIXoslevel = int(platform.version() + platform.release())
+ if AIXoslevel >= 61:
+ return super(Timezone, AIXTimezone).__new__(AIXTimezone)
+ else:
+ module.fail_json(msg='AIX os level must be >= 61 for timezone module (Target: %s).' % AIXoslevel)
+ else:
+ # Not supported yet
+ return super(Timezone, Timezone).__new__(Timezone)
+
+ def __init__(self, module):
+ """Initialize of the class.
+
+ Args:
+ module: The AnsibleModule.
+ """
+ super(Timezone, self).__init__()
+ self.msg = []
+ # `self.value` holds the values for each params on each phases.
+ # Initially there's only info of "planned" phase, but the
+ # `self.check()` function will fill out it.
+ self.value = dict()
+ for key in module.argument_spec:
+ value = module.params[key]
+ if value is not None:
+ self.value[key] = dict(planned=value)
+ self.module = module
+
+ def abort(self, msg):
+ """Abort the process with error message.
+
+ This is just the wrapper of module.fail_json().
+
+ Args:
+ msg: The error message.
+ """
+ error_msg = ['Error message:', msg]
+ if len(self.msg) > 0:
+ error_msg.append('Other message(s):')
+ error_msg.extend(self.msg)
+ self.module.fail_json(msg='\n'.join(error_msg))
+
+ def execute(self, *commands, **kwargs):
+ """Execute the shell command.
+
+ This is just the wrapper of module.run_command().
+
+ Args:
+ *commands: The command to execute.
+ It will be concatenated with single space.
+ **kwargs: Only 'log' key is checked.
+ If kwargs['log'] is true, record the command to self.msg.
+
+ Returns:
+ stdout: Standard output of the command.
+ """
+ command = ' '.join(commands)
+ (rc, stdout, stderr) = self.module.run_command(command, check_rc=True)
+ if kwargs.get('log', False):
+ self.msg.append('executed `%s`' % command)
+ return stdout
+
+ def diff(self, phase1='before', phase2='after'):
+ """Calculate the difference between given 2 phases.
+
+ Args:
+ phase1, phase2: The names of phase to compare.
+
+ Returns:
+ diff: The difference of value between phase1 and phase2.
+ This is in the format which can be used with the
+ `--diff` option of ansible-playbook.
+ """
+ diff = {phase1: {}, phase2: {}}
+ for key, value in iteritems(self.value):
+ diff[phase1][key] = value[phase1]
+ diff[phase2][key] = value[phase2]
+ return diff
+
+ def check(self, phase):
+ """Check the state in given phase and set it to `self.value`.
+
+ Args:
+ phase: The name of the phase to check.
+
+ Returns:
+ NO RETURN VALUE
+ """
+ if phase == 'planned':
+ return
+ for key, value in iteritems(self.value):
+ value[phase] = self.get(key, phase)
+
+ def change(self):
+ """Make the changes effect based on `self.value`."""
+ for key, value in iteritems(self.value):
+ if value['before'] != value['planned']:
+ self.set(key, value['planned'])
+
+ # ===========================================
+ # Platform specific methods (must be replaced by subclass).
+
+ def get(self, key, phase):
+ """Get the value for the key at the given phase.
+
+ Called from self.check().
+
+ Args:
+ key: The key to get the value
+ phase: The phase to get the value
+
+ Return:
+ value: The value for the key at the given phase.
+ """
+ self.abort('get(key, phase) is not implemented on target platform')
+
+ def set(self, key, value):
+ """Set the value for the key (of course, for the phase 'after').
+
+ Called from self.change().
+
+ Args:
+ key: Key to set the value
+ value: Value to set
+ """
+ self.abort('set(key, value) is not implemented on target platform')
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ tzfile = '/usr/share/zoneinfo/%s' % tz
+ if not os.path.isfile(tzfile):
+ self.abort('given timezone "%s" is not available' % tz)
+ return tzfile
+
+
+class SystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for systemd-powered Linux.
+
+ It uses the `timedatectl` command to check/set all arguments.
+ """
+
+ regexps = dict(
+ hwclock=re.compile(r'^\s*RTC in local TZ\s*:\s*([^\s]+)', re.MULTILINE),
+ name=re.compile(r'^\s*Time ?zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ subcmds = dict(
+ hwclock='set-local-rtc',
+ name='set-timezone'
+ )
+
+ def __init__(self, module):
+ super(SystemdTimezone, self).__init__(module)
+ self.timedatectl = module.get_bin_path('timedatectl', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_status(self, phase):
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.timedatectl, 'status')
+ return self.status[phase]
+
+ def get(self, key, phase):
+ status = self._get_status(phase)
+ value = self.regexps[key].search(status).group(1)
+ if key == 'hwclock':
+ # For key='hwclock'; convert yes/no -> local/UTC
+ if self.module.boolean(value):
+ value = 'local'
+ else:
+ value = 'UTC'
+ return value
+
+ def set(self, key, value):
+ # For key='hwclock'; convert UTC/local -> yes/no
+ if key == 'hwclock':
+ if value == 'local':
+ value = 'yes'
+ else:
+ value = 'no'
+ self.execute(self.timedatectl, self.subcmds[key], value, log=True)
+
+
+class NosystemdTimezone(Timezone):
+ """This is a Timezone manipulation class for non systemd-powered Linux.
+
+ For timezone setting, it edits the following file and reflect changes:
+ - /etc/sysconfig/clock ... RHEL/CentOS
+ - /etc/timezone ... Debian/Ubuntu
+ For hwclock setting, it executes `hwclock --systohc` command with the
+ '--utc' or '--localtime' option.
+ """
+
+ conf_files = dict(
+ name=None, # To be set in __init__
+ hwclock=None, # To be set in __init__
+ adjtime='/etc/adjtime'
+ )
+
+ # It's fine if all tree config files don't exist
+ allow_no_file = dict(
+ name=True,
+ hwclock=True,
+ adjtime=True
+ )
+
+ regexps = dict(
+ name=None, # To be set in __init__
+ hwclock=re.compile(r'^UTC\s*=\s*([^\s]+)', re.MULTILINE),
+ adjtime=re.compile(r'^(UTC|LOCAL)$', re.MULTILINE)
+ )
+
+ dist_regexps = dict(
+ SuSE=re.compile(r'^TIMEZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE),
+ redhat=re.compile(r'^ZONE\s*=\s*"?([^"\s]+)"?', re.MULTILINE)
+ )
+
+ dist_tzline_format = dict(
+ SuSE='TIMEZONE="%s"\n',
+ redhat='ZONE="%s"\n'
+ )
+
+ def __init__(self, module):
+ super(NosystemdTimezone, self).__init__(module)
+ # Validate given timezone
+ planned_tz = ''
+ if 'name' in self.value:
+ tzfile = self._verify_timezone()
+ planned_tz = self.value['name']['planned']
+ # `--remove-destination` is needed if /etc/localtime is a symlink so
+ # that it overwrites it instead of following it.
+ self.update_timezone = ['%s --remove-destination %s /etc/localtime' % (self.module.get_bin_path('cp', required=True), tzfile)]
+ self.update_hwclock = self.module.get_bin_path('hwclock', required=True)
+ distribution = get_distribution()
+ self.conf_files['name'] = '/etc/timezone'
+ self.regexps['name'] = re.compile(r'^([^\s]+)', re.MULTILINE)
+ self.tzline_format = '%s\n'
+ # Distribution-specific configurations
+ if self.module.get_bin_path('dpkg-reconfigure') is not None:
+ # Debian/Ubuntu
+ if 'name' in self.value:
+ self.update_timezone = ['%s -sf %s /etc/localtime' % (self.module.get_bin_path('ln', required=True), tzfile),
+ '%s --frontend noninteractive tzdata' % self.module.get_bin_path('dpkg-reconfigure', required=True)]
+ self.conf_files['hwclock'] = '/etc/default/rcS'
+ elif distribution == 'Alpine' or distribution == 'Gentoo':
+ self.conf_files['hwclock'] = '/etc/conf.d/hwclock'
+ if distribution == 'Alpine':
+ self.update_timezone = ['%s -z %s' % (self.module.get_bin_path('setup-timezone', required=True), planned_tz)]
+ else:
+ # RHEL/CentOS/SUSE
+ if self.module.get_bin_path('tzdata-update') is not None:
+ # tzdata-update cannot update the timezone if /etc/localtime is
+ # a symlink so we have to use cp to update the time zone which
+ # was set above.
+ if not os.path.islink('/etc/localtime'):
+ self.update_timezone = [self.module.get_bin_path('tzdata-update', required=True)]
+ # else:
+ # self.update_timezone = 'cp --remove-destination ...' <- configured above
+ self.conf_files['name'] = '/etc/sysconfig/clock'
+ self.conf_files['hwclock'] = '/etc/sysconfig/clock'
+ try:
+ f = open(self.conf_files['name'], 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, 'name'):
+ # If the config file doesn't exist detect the distribution and set regexps.
+ if distribution == 'SuSE':
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+ else:
+ self.abort('could not read configuration file "%s"' % self.conf_files['name'])
+ else:
+ # The key for timezone might be `ZONE` or `TIMEZONE`
+ # (the former is used in RHEL/CentOS and the latter is used in SUSE linux).
+ # So check the content of /etc/sysconfig/clock and decide which key to use.
+ sysconfig_clock = f.read()
+ f.close()
+ if re.search(r'^TIMEZONE\s*=', sysconfig_clock, re.MULTILINE):
+ # For SUSE
+ self.regexps['name'] = self.dist_regexps['SuSE']
+ self.tzline_format = self.dist_tzline_format['SuSE']
+ else:
+ # For RHEL/CentOS
+ self.regexps['name'] = self.dist_regexps['redhat']
+ self.tzline_format = self.dist_tzline_format['redhat']
+
+ def _allow_ioerror(self, err, key):
+ # In some cases, even if the target file does not exist,
+ # simply creating it may solve the problem.
+ # In such cases, we should continue the configuration rather than aborting.
+ if err.errno != errno.ENOENT:
+ # If the error is not ENOENT ("No such file or directory"),
+ # (e.g., permission error, etc), we should abort.
+ return False
+ return self.allow_no_file.get(key, False)
+
+ def _edit_file(self, filename, regexp, value, key):
+ """Replace the first matched line with given `value`.
+
+ If `regexp` matched more than once, other than the first line will be deleted.
+
+ Args:
+ filename: The name of the file to edit.
+ regexp: The regular expression to search with.
+ value: The line which will be inserted.
+ key: For what key the file is being editted.
+ """
+ # Read the file
+ try:
+ file = open(filename, 'r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ lines = []
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ lines = file.readlines()
+ file.close()
+ # Find the all matched lines
+ matched_indices = []
+ for i, line in enumerate(lines):
+ if regexp.search(line):
+ matched_indices.append(i)
+ if len(matched_indices) > 0:
+ insert_line = matched_indices[0]
+ else:
+ insert_line = 0
+ # Remove all matched lines
+ for i in matched_indices[::-1]:
+ del lines[i]
+ # ...and insert the value
+ lines.insert(insert_line, value)
+ # Write the changes
+ try:
+ file = open(filename, 'w')
+ except IOError:
+ self.abort('tried to configure %s using a file "%s", but could not write to it' % (key, filename))
+ else:
+ file.writelines(lines)
+ file.close()
+ self.msg.append('Added 1 line and deleted %s line(s) on %s' % (len(matched_indices), filename))
+
+ def _get_value_from_config(self, key, phase):
+ filename = self.conf_files[key]
+ try:
+ file = open(filename, mode='r')
+ except IOError as err:
+ if self._allow_ioerror(err, key):
+ if key == 'hwclock':
+ return 'n/a'
+ elif key == 'adjtime':
+ return 'UTC'
+ elif key == 'name':
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not read it' % (key, filename))
+ else:
+ status = file.read()
+ file.close()
+ try:
+ value = self.regexps[key].search(status).group(1)
+ except AttributeError:
+ if key == 'hwclock':
+ # If we cannot find UTC in the config that's fine.
+ return 'n/a'
+ elif key == 'adjtime':
+ # If we cannot find UTC/LOCAL in /etc/cannot that means UTC
+ # will be used by default.
+ return 'UTC'
+ elif key == 'name':
+ if phase == 'before':
+ # In 'before' phase UTC/LOCAL doesn't need to be set in
+ # the timezone config file, so we ignore this error.
+ return 'n/a'
+ else:
+ self.abort('tried to configure %s using a file "%s", but could not find a valid value in it' % (key, filename))
+ else:
+ if key == 'hwclock':
+ # convert yes/no -> UTC/local
+ if self.module.boolean(value):
+ value = 'UTC'
+ else:
+ value = 'local'
+ elif key == 'adjtime':
+ # convert LOCAL -> local
+ if value != 'UTC':
+ value = value.lower()
+ return value
+
+ def get(self, key, phase):
+ planned = self.value[key]['planned']
+ if key == 'hwclock':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the value in the config file is the same as the 'planned'
+ # value, we need to check /etc/adjtime.
+ value = self._get_value_from_config('adjtime', phase)
+ elif key == 'name':
+ value = self._get_value_from_config(key, phase)
+ if value == planned:
+ # If the planned values is the same as the one in the config file
+ # we need to check if /etc/localtime is also set to the 'planned' zone.
+ if os.path.islink('/etc/localtime'):
+ # If /etc/localtime is a symlink and is not set to the TZ we 'planned'
+ # to set, we need to return the TZ which the symlink points to.
+ if os.path.exists('/etc/localtime'):
+ # We use readlink() because on some distros zone files are symlinks
+ # to other zone files, so it's hard to get which TZ is actually set
+ # if we follow the symlink.
+ path = os.readlink('/etc/localtime')
+ # most linuxes has it in /usr/share/zoneinfo
+ # alpine linux links under /etc/zoneinfo
+ linktz = re.search(r'(?:/(?:usr/share|etc)/zoneinfo/)(.*)', path, re.MULTILINE)
+ if linktz:
+ valuelink = linktz.group(1)
+ if valuelink != planned:
+ value = valuelink
+ else:
+ # Set current TZ to 'n/a' if the symlink points to a path
+ # which isn't a zone file.
+ value = 'n/a'
+ else:
+ # Set current TZ to 'n/a' if the symlink to the zone file is broken.
+ value = 'n/a'
+ else:
+ # If /etc/localtime is not a symlink best we can do is compare it with
+ # the 'planned' zone info file and return 'n/a' if they are different.
+ try:
+ if not filecmp.cmp('/etc/localtime', '/usr/share/zoneinfo/' + planned):
+ return 'n/a'
+ except Exception:
+ return 'n/a'
+ else:
+ self.abort('unknown parameter "%s"' % key)
+ return value
+
+ def set_timezone(self, value):
+ self._edit_file(filename=self.conf_files['name'],
+ regexp=self.regexps['name'],
+ value=self.tzline_format % value,
+ key='name')
+ for cmd in self.update_timezone:
+ self.execute(cmd)
+
+ def set_hwclock(self, value):
+ if value == 'local':
+ option = '--localtime'
+ utc = 'no'
+ else:
+ option = '--utc'
+ utc = 'yes'
+ if self.conf_files['hwclock'] is not None:
+ self._edit_file(filename=self.conf_files['hwclock'],
+ regexp=self.regexps['hwclock'],
+ value='UTC=%s\n' % utc,
+ key='hwclock')
+ self.execute(self.update_hwclock, '--systohc', option, log=True)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.set_timezone(value)
+ elif key == 'hwclock':
+ self.set_hwclock(value)
+ else:
+ self.abort('unknown parameter "%s"' % key)
+
+
+class SmartOSTimezone(Timezone):
+ """This is a Timezone manipulation class for SmartOS instances.
+
+ It uses the C(sm-set-timezone) utility to set the timezone, and
+ inspects C(/etc/default/init) to determine the current timezone.
+
+ NB: A zone needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(SmartOSTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('sm-set-timezone', required=False)
+ if not self.settimezone:
+ module.fail_json(msg='sm-set-timezone not found. Make sure the smtools package is installed.')
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/default/init`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ try:
+ f = open('/etc/default/init', 'r')
+ for line in f:
+ m = re.match('^TZ=(.*)$', line.strip())
+ if m:
+ return m.groups()[0]
+ except Exception:
+ self.module.fail_json(msg='Failed to read /etc/default/init')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through sm-set-timezone, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ cmd = 'sm-set-timezone %s' % value
+
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # sm-set-timezone knows no state and will always set the timezone.
+ # XXX: https://github.com/joyent/smtools/pull/2
+ m = re.match(r'^\* Changed (to)? timezone (to)? (%s).*' % value, stdout.splitlines()[1])
+ if not (m and m.groups()[-1] == value):
+ self.module.fail_json(msg='Failed to set timezone')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class DarwinTimezone(Timezone):
+ """This is the timezone implementation for Darwin which, unlike other *BSD
+ implementations, uses the `systemsetup` command on Darwin to check/set
+ the timezone.
+ """
+
+ regexps = dict(
+ name=re.compile(r'^\s*Time ?Zone\s*:\s*([^\s]+)', re.MULTILINE)
+ )
+
+ def __init__(self, module):
+ super(DarwinTimezone, self).__init__(module)
+ self.systemsetup = module.get_bin_path('systemsetup', required=True)
+ self.status = dict()
+ # Validate given timezone
+ if 'name' in self.value:
+ self._verify_timezone()
+
+ def _get_current_timezone(self, phase):
+ """Lookup the current timezone via `systemsetup -gettimezone`."""
+ if phase not in self.status:
+ self.status[phase] = self.execute(self.systemsetup, '-gettimezone')
+ return self.status[phase]
+
+ def _verify_timezone(self):
+ tz = self.value['name']['planned']
+ # Lookup the list of supported timezones via `systemsetup -listtimezones`.
+ # Note: Skip the first line that contains the label 'Time Zones:'
+ out = self.execute(self.systemsetup, '-listtimezones').splitlines()[1:]
+ tz_list = list(map(lambda x: x.strip(), out))
+ if tz not in tz_list:
+ self.abort('given timezone "%s" is not available' % tz)
+ return tz
+
+ def get(self, key, phase):
+ if key == 'name':
+ status = self._get_current_timezone(phase)
+ value = self.regexps[key].search(status).group(1)
+ return value
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ self.execute(self.systemsetup, '-settimezone', value, log=True)
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class BSDTimezone(Timezone):
+ """This is the timezone implementation for *BSD which works simply through
+ updating the `/etc/localtime` symlink to point to a valid timezone name under
+ `/usr/share/zoneinfo`.
+ """
+
+ def __init__(self, module):
+ super(BSDTimezone, self).__init__(module)
+
+ def __get_timezone(self):
+ zoneinfo_dir = '/usr/share/zoneinfo/'
+ localtime_file = '/etc/localtime'
+
+ # Strategy 1:
+ # If /etc/localtime does not exist, assum the timezone is UTC.
+ if not os.path.exists(localtime_file):
+ self.module.warn('Could not read /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ # Strategy 2:
+ # Follow symlink of /etc/localtime
+ zoneinfo_file = localtime_file
+ while not zoneinfo_file.startswith(zoneinfo_dir):
+ try:
+ zoneinfo_file = os.readlink(localtime_file)
+ except OSError:
+ # OSError means "end of symlink chain" or broken link.
+ break
+ else:
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 3:
+ # (If /etc/localtime is not symlinked)
+ # Check all files in /usr/share/zoneinfo and return first non-link match.
+ for dname, dummy, fnames in sorted(os.walk(zoneinfo_dir)):
+ for fname in sorted(fnames):
+ zoneinfo_file = os.path.join(dname, fname)
+ if not os.path.islink(zoneinfo_file) and filecmp.cmp(zoneinfo_file, localtime_file):
+ return zoneinfo_file.replace(zoneinfo_dir, '')
+
+ # Strategy 4:
+ # As a fall-back, return 'UTC' as default assumption.
+ self.module.warn('Could not identify timezone name from /etc/localtime. Assuming UTC.')
+ return 'UTC'
+
+ def get(self, key, phase):
+ """Lookup the current timezone by resolving `/etc/localtime`."""
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ if key == 'name':
+ # First determine if the requested timezone is valid by looking in
+ # the zoneinfo directory.
+ zonefile = '/usr/share/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to stat %s' % zonefile)
+
+ # Now (somewhat) atomically update the symlink by creating a new
+ # symlink and move it into place. Otherwise we have to remove the
+ # original symlink and create the new symlink, however that would
+ # create a race condition in case another process tries to read
+ # /etc/localtime between removal and creation.
+ suffix = "".join([random.choice(string.ascii_letters + string.digits) for x in range(0, 10)])
+ new_localtime = '/etc/localtime.' + suffix
+
+ try:
+ os.symlink(zonefile, new_localtime)
+ os.rename(new_localtime, '/etc/localtime')
+ except Exception:
+ os.remove(new_localtime)
+ self.module.fail_json(msg='Could not update /etc/localtime')
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+class AIXTimezone(Timezone):
+ """This is a Timezone manipulation class for AIX instances.
+
+ It uses the C(chtz) utility to set the timezone, and
+ inspects C(/etc/environment) to determine the current timezone.
+
+ While AIX time zones can be set using two formats (POSIX and
+ Olson) the preferred method is Olson.
+ See the following article for more information:
+ https://developer.ibm.com/articles/au-aix-posix/
+
+ NB: AIX needs to be rebooted in order for the change to be
+ activated.
+ """
+
+ def __init__(self, module):
+ super(AIXTimezone, self).__init__(module)
+ self.settimezone = self.module.get_bin_path('chtz', required=True)
+
+ def __get_timezone(self):
+ """ Return the current value of TZ= in /etc/environment """
+ try:
+ f = open('/etc/environment', 'r')
+ etcenvironment = f.read()
+ f.close()
+ except Exception:
+ self.module.fail_json(msg='Issue reading contents of /etc/environment')
+
+ match = re.search(r'^TZ=(.*)$', etcenvironment, re.MULTILINE)
+ if match:
+ return match.group(1)
+ else:
+ return None
+
+ def get(self, key, phase):
+ """Lookup the current timezone name in `/etc/environment`. If anything else
+ is requested, or if the TZ field is not set we fail.
+ """
+ if key == 'name':
+ return self.__get_timezone()
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+ def set(self, key, value):
+ """Set the requested timezone through chtz, an invalid timezone name
+ will be rejected and we have no further input validation to perform.
+ """
+ if key == 'name':
+ # chtz seems to always return 0 on AIX 7.2, even for invalid timezone values.
+ # It will only return non-zero if the chtz command itself fails, it does not check for
+ # valid timezones. We need to perform a basic check to confirm that the timezone
+ # definition exists in /usr/share/lib/zoneinfo
+ # This does mean that we can only support Olson for now. The below commented out regex
+ # detects Olson date formats, so in the future we could detect Posix or Olson and
+ # act accordingly.
+
+ # regex_olson = re.compile('^([a-z0-9_\-\+]+\/?)+$', re.IGNORECASE)
+ # if not regex_olson.match(value):
+ # msg = 'Supplied timezone (%s) does not appear to a be valid Olson string' % value
+ # self.module.fail_json(msg=msg)
+
+ # First determine if the requested timezone is valid by looking in the zoneinfo
+ # directory.
+ zonefile = '/usr/share/lib/zoneinfo/' + value
+ try:
+ if not os.path.isfile(zonefile):
+ self.module.fail_json(msg='%s is not a recognized timezone.' % value)
+ except Exception:
+ self.module.fail_json(msg='Failed to check %s.' % zonefile)
+
+ # Now set the TZ using chtz
+ cmd = 'chtz %s' % value
+ (rc, stdout, stderr) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.fail_json(msg=stderr)
+
+ # The best condition check we can do is to check the value of TZ after making the
+ # change.
+ TZ = self.__get_timezone()
+ if TZ != value:
+ msg = 'TZ value does not match post-change (Actual: %s, Expected: %s).' % (TZ, value)
+ self.module.fail_json(msg=msg)
+
+ else:
+ self.module.fail_json(msg='%s is not a supported option on target platform' % key)
+
+
+def main():
+ # Construct 'module' and 'tz'
+ module = AnsibleModule(
+ argument_spec=dict(
+ hwclock=dict(type='str', choices=['local', 'UTC'], aliases=['rtc']),
+ name=dict(type='str'),
+ ),
+ required_one_of=[
+ ['hwclock', 'name']
+ ],
+ supports_check_mode=True,
+ )
+ tz = Timezone(module)
+
+ # Check the current state
+ tz.check(phase='before')
+ if module.check_mode:
+ diff = tz.diff('before', 'planned')
+ # In check mode, 'planned' state is treated as 'after' state
+ diff['after'] = diff.pop('planned')
+ else:
+ # Make change
+ tz.change()
+ # Check the current state
+ tz.check(phase='after')
+ # Examine if the current state matches planned state
+ (after, planned) = tz.diff('after', 'planned').values()
+ if after != planned:
+ tz.abort('still not desired state, though changes have made - '
+ 'planned: %s, after: %s' % (str(planned), str(after)))
+ diff = tz.diff('before', 'after')
+
+ changed = (diff['before'] != diff['after'])
+ if len(tz.msg) > 0:
+ module.exit_json(changed=changed, diff=diff, msg='\n'.join(tz.msg))
+ else:
+ module.exit_json(changed=changed, diff=diff)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/twilio.py b/ansible_collections/community/general/plugins/modules/twilio.py
new file mode 100644
index 000000000..270320c46
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/twilio.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Matt Makai <matthew.makai@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio
+description:
+ - Sends a text message to a phone number through the Twilio messaging API.
+notes:
+ - This module is non-idempotent because it sends an email through the
+ external API. It is idempotent only in the case that the module fails.
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ account_sid:
+ type: str
+ description:
+ - User's Twilio account token found on the account page.
+ required: true
+ auth_token:
+ type: str
+ description:
+ - User's Twilio authentication token.
+ required: true
+ msg:
+ type: str
+ description:
+ - The body of the text message.
+ required: true
+ to_numbers:
+ type: list
+ elements: str
+ description:
+ - One or more phone numbers to send the text message to, format C(+15551112222).
+ required: true
+ aliases: [ to_number ]
+ from_number:
+ type: str
+ description:
+ - The Twilio number to send the text message from, format C(+15551112222).
+ required: true
+ media_url:
+ type: str
+ description:
+ - A URL with a picture, video or sound clip to send with an MMS
+ (multimedia message) instead of a plain SMS.
+ required: false
+
+author: "Matt Makai (@makaimc)"
+'''
+
+EXAMPLES = '''
+# send an SMS about the build status to (555) 303 5681
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: All servers with webserver role are now configured.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ delegate_to: localhost
+
+# send an SMS to multiple phone numbers about the deployment
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: This server configuration is now complete.
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15553258899
+ to_numbers:
+ - +15551113232
+ - +12025551235
+ - +19735559010
+ delegate_to: localhost
+
+# send an MMS to a single recipient with an update on the deployment
+# and an image of the results
+# note: replace account_sid and auth_token values with your credentials
+# and you have to have the 'from_number' on your Twilio account
+- name: Send a text message to a mobile phone through Twilio
+ community.general.twilio:
+ msg: Deployment complete!
+ account_sid: ACXXXXXXXXXXXXXXXXX
+ auth_token: ACXXXXXXXXXXXXXXXXX
+ from_number: +15552014545
+ to_number: +15553035681
+ media_url: https://demo.twilio.com/logo.png
+ delegate_to: localhost
+'''
+
+# =======================================
+# twilio module support methods
+#
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+
+
+def post_twilio_api(module, account_sid, auth_token, msg, from_number,
+ to_number, media_url=None):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible"
+
+ data = {'From': from_number, 'To': to_number, 'Body': msg}
+ if media_url:
+ data['MediaUrl'] = media_url
+ encoded_data = urlencode(data)
+
+ headers = {'User-Agent': AGENT,
+ 'Content-type': 'application/x-www-form-urlencoded',
+ 'Accept': 'application/json',
+ }
+
+ # Hack module params to have the Basic auth params that fetch_url expects
+ module.params['url_username'] = account_sid.replace('\n', '')
+ module.params['url_password'] = auth_token.replace('\n', '')
+
+ return fetch_url(module, URI, data=encoded_data, headers=headers)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True, no_log=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_numbers=dict(required=True, aliases=['to_number'], type='list', elements='str'),
+ media_url=dict(default=None, required=False),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_numbers = module.params['to_numbers']
+ media_url = module.params['media_url']
+
+ for number in to_numbers:
+ r, info = post_twilio_api(module, account_sid, auth_token, msg,
+ from_number, number, media_url)
+ if info['status'] not in [200, 201]:
+ body_message = "unknown error"
+ if 'body' in info:
+ body = module.from_json(info['body'])
+ body_message = body['message']
+ module.fail_json(msg="unable to send message to %s: %s" % (number, body_message))
+
+ module.exit_json(msg=msg, changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/typetalk.py b/ansible_collections/community/general/plugins/modules/typetalk.py
new file mode 100644
index 000000000..ddf9f3560
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/typetalk.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: typetalk
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ client_id:
+ type: str
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ type: str
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ type: int
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ type: str
+ description:
+ - message body
+ required: true
+requirements: [ json ]
+author: "Takashi Someda (@tksmd)"
+'''
+
+EXAMPLES = '''
+- name: Send a message to typetalk
+ community.general.typetalk:
+ client_id: 12345
+ client_secret: 12345
+ topic: 1
+ msg: install completed
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url, ConnectionError
+
+
+def do_request(module, url, params, headers=None):
+ data = urlencode(params)
+ if headers is None:
+ headers = dict()
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ r, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ exc = ConnectionError(info['msg'])
+ exc.code = info['status']
+ raise exc
+ return r
+
+
+def get_access_token(module, client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request(module, 'https://typetalk.com/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(module, client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(module, client_id, client_secret)
+ url = 'https://typetalk.com/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(module, url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except ConnectionError as e:
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True, no_log=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not json:
+ module.fail_json(msg="json module is required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(module, client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/udm_dns_record.py b/ansible_collections/community/general/plugins/modules/udm_dns_record.py
new file mode 100644
index 000000000..849c84a2d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/udm_dns_record.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_record
+author:
+ - Tobias Rüetschi (@keachi)
+short_description: Manage dns entries on a univention corporate server
+description:
+ - "This module allows to manage dns records on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+requirements:
+ - Univention
+ - ipaddress (for I(type=ptr_record))
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+options:
+ state:
+ type: str
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns record is present or not.
+ name:
+ type: str
+ required: true
+ description:
+ - "Name of the record, this is also the DNS record. E.g. www for
+ www.example.com."
+ - For PTR records this has to be the IP address.
+ zone:
+ type: str
+ required: true
+ description:
+ - Corresponding DNS zone for this record, e.g. example.com.
+ - For PTR records this has to be the full reverse zone (for example C(1.1.192.in-addr.arpa)).
+ type:
+ type: str
+ required: true
+ description:
+ - "Define the record type. C(host_record) is a A or AAAA record,
+ C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
+ is a SRV record and C(txt_record) is a TXT record."
+ - "The available choices are: C(host_record), C(alias), C(ptr_record), C(srv_record), C(txt_record)."
+ data:
+ type: dict
+ default: {}
+ description:
+ - "Additional data for this record, e.g. ['a': '192.0.2.1'].
+ Required if I(state=present)."
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS record on a UCS
+ community.general.udm_dns_record:
+ name: www
+ zone: example.com
+ type: host_record
+ data:
+ a:
+ - 192.0.2.1
+ - 2001:0db8::42
+
+- name: Create a DNS v4 PTR record on a UCS
+ community.general.udm_dns_record:
+ name: 192.0.2.1
+ zone: 2.0.192.in-addr.arpa
+ type: ptr_record
+ data:
+ ptr_record: "www.example.com."
+
+- name: Create a DNS v6 PTR record on a UCS
+ community.general.udm_dns_record:
+ name: 2001:db8:0:0:0:ff00:42:8329
+ zone: 2.4.0.0.0.0.f.f.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa
+ type: ptr_record
+ data:
+ ptr_record: "www.example.com."
+'''
+
+
+RETURN = '''#'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils import deps
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+ config,
+ uldap,
+)
+
+
+with deps.declare("univention", msg="This module requires univention python bindings"):
+ from univention.admin.handlers.dns import (
+ forward_zone,
+ reverse_zone,
+ )
+
+with deps.declare("ipaddress"):
+ import ipaddress
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True, type='str'),
+ zone=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ data=dict(default={}, type='dict'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['data'])
+ ])
+ )
+
+ deps.validate(module, "univention")
+
+ type = module.params['type']
+ zone = module.params['zone']
+ name = module.params['name']
+ data = module.params['data']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ workname = name
+ if type == 'ptr_record':
+ deps.validate(module, "ipaddress")
+
+ try:
+ if 'arpa' not in zone:
+ raise Exception("Zone must be reversed zone for ptr_record. (e.g. 1.1.192.in-addr.arpa)")
+ ipaddr_rev = ipaddress.ip_address(name).reverse_pointer
+ subnet_offset = ipaddr_rev.find(zone)
+ if subnet_offset == -1:
+ raise Exception("reversed IP address {0} is not part of zone.".format(ipaddr_rev))
+ workname = ipaddr_rev[0:subnet_offset - 1]
+ except Exception as e:
+ module.fail_json(
+ msg='handling PTR record for {0} in zone {1} failed: {2}'.format(name, zone, e)
+ )
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0})(relativeDomainName={1}))'.format(zone, workname),
+ attr=['dNSZone']
+ ))
+ exists = bool(len(obj))
+ container = 'zoneName={0},cn=dns,{1}'.format(zone, base_dn())
+ dn = 'relativeDomainName={0},{1}'.format(workname, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ so = forward_zone.lookup(
+ config(),
+ uldap(),
+ '(zone={0})'.format(zone),
+ scope='domain',
+ ) or reverse_zone.lookup(
+ config(),
+ uldap(),
+ '(zoneName={0})'.format(zone),
+ scope='domain',
+ )
+ if not so == 0:
+ raise Exception("Did not find zone '{0}' in Univention".format(zone))
+ obj = umc_module_for_add('dns/{0}'.format(type), container, superordinate=so[0])
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+
+ if type == 'ptr_record':
+ obj['ip'] = name
+ obj['address'] = workname
+ else:
+ obj['name'] = name
+
+ for k, v in data.items():
+ obj[k] = v
+ diff = obj.diff()
+ changed = obj.diff() != []
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ else:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns entry {0} in {1} failed: {2}'.format(name, container, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/udm_dns_zone.py b/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
new file mode 100644
index 000000000..19f24fa1c
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/udm_dns_zone.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_dns_zone
+author:
+ - Tobias Rüetschi (@keachi)
+short_description: Manage dns zones on a univention corporate server
+description:
+ - "This module allows to manage dns zones on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+options:
+ state:
+ type: str
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the dns zone is present or not.
+ type:
+ type: str
+ required: true
+ description:
+ - Define if the zone is a forward or reverse DNS zone.
+ - "The available choices are: C(forward_zone), C(reverse_zone)."
+ zone:
+ type: str
+ required: true
+ description:
+ - DNS zone name, e.g. C(example.com).
+ aliases: [name]
+ nameserver:
+ type: list
+ elements: str
+ default: []
+ description:
+ - List of appropriate name servers. Required if I(state=present).
+ interfaces:
+ type: list
+ elements: str
+ default: []
+ description:
+ - List of interface IP addresses, on which the server should
+ response this zone. Required if I(state=present).
+
+ refresh:
+ type: int
+ default: 3600
+ description:
+ - Interval before the zone should be refreshed.
+ retry:
+ type: int
+ default: 1800
+ description:
+ - Interval that should elapse before a failed refresh should be retried.
+ expire:
+ type: int
+ default: 604800
+ description:
+ - Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
+ ttl:
+ type: int
+ default: 600
+ description:
+ - Minimum TTL field that should be exported with any RR from this zone.
+
+ contact:
+ type: str
+ default: ''
+ description:
+ - Contact person in the SOA record.
+ mx:
+ type: list
+ elements: str
+ default: []
+ description:
+ - List of MX servers. (Must declared as A or AAAA records).
+'''
+
+
+EXAMPLES = '''
+- name: Create a DNS zone on a UCS
+ community.general.udm_dns_zone:
+ zone: example.com
+ type: forward_zone
+ nameserver:
+ - ucs.example.com
+ interfaces:
+ - 192.0.2.1
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def convert_time(time):
+ """Convert a time in seconds into the biggest unit"""
+ units = [
+ (24 * 60 * 60, 'days'),
+ (60 * 60, 'hours'),
+ (60, 'minutes'),
+ (1, 'seconds'),
+ ]
+
+ if time == 0:
+ return ('0', 'seconds')
+ for unit in units:
+ if time >= unit[0]:
+ return ('{0}'.format(time // unit[0]), unit[1])
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ type=dict(required=True,
+ type='str'),
+ zone=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ nameserver=dict(default=[],
+ type='list',
+ elements='str'),
+ interfaces=dict(default=[],
+ type='list',
+ elements='str'),
+ refresh=dict(default=3600,
+ type='int'),
+ retry=dict(default=1800,
+ type='int'),
+ expire=dict(default=604800,
+ type='int'),
+ ttl=dict(default=600,
+ type='int'),
+ contact=dict(default='',
+ type='str'),
+ mx=dict(default=[],
+ type='list',
+ elements='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['nameserver', 'interfaces'])
+ ])
+ )
+ type = module.params['type']
+ zone = module.params['zone']
+ nameserver = module.params['nameserver']
+ interfaces = module.params['interfaces']
+ refresh = module.params['refresh']
+ retry = module.params['retry']
+ expire = module.params['expire']
+ ttl = module.params['ttl']
+ contact = module.params['contact']
+ mx = module.params['mx']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=dNSZone)(zoneName={0}))'.format(zone),
+ attr=['dNSZone']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=dns,{0}'.format(base_dn())
+ dn = 'zoneName={0},{1}'.format(zone, container)
+ if contact == '':
+ contact = 'root@{0}.'.format(zone)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('dns/{0}'.format(type), container)
+ else:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ obj['zone'] = zone
+ obj['nameserver'] = nameserver
+ obj['a'] = interfaces
+ obj['refresh'] = convert_time(refresh)
+ obj['retry'] = convert_time(retry)
+ obj['expire'] = convert_time(expire)
+ obj['ttl'] = convert_time(ttl)
+ obj['contact'] = contact
+ obj['mx'] = mx
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as e:
+ module.fail_json(
+ msg='Creating/editing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('dns/{0}'.format(type), dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as e:
+ module.fail_json(
+ msg='Removing dns zone {0} failed: {1}'.format(zone, e)
+ )
+
+ module.exit_json(
+ changed=changed,
+ diff=diff,
+ zone=zone
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/udm_group.py b/ansible_collections/community/general/plugins/modules/udm_group.py
new file mode 100644
index 000000000..5fe2422f8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/udm_group.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_group
+author:
+ - Tobias Rüetschi (@keachi)
+short_description: Manage of the posix group
+description:
+ - "This module allows to manage user groups on a univention corporate server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+options:
+ state:
+ required: false
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the group is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name of the posix group.
+ type: str
+ description:
+ required: false
+ description:
+ - Group description.
+ type: str
+ position:
+ required: false
+ description:
+ - define the whole ldap position of the group, e.g.
+ C(cn=g123m-1A,cn=classes,cn=schueler,cn=groups,ou=schule,dc=example,dc=com).
+ type: str
+ default: ''
+ ou:
+ required: false
+ description:
+ - LDAP OU, e.g. school for LDAP OU C(ou=school,dc=example,dc=com).
+ type: str
+ default: ''
+ subpath:
+ required: false
+ description:
+ - Subpath inside the OU, e.g. C(cn=classes,cn=students,cn=groups).
+ type: str
+ default: "cn=groups"
+'''
+
+
+EXAMPLES = '''
+- name: Create a POSIX group
+ community.general.udm_group:
+ name: g123m-1A
+
+# Create a POSIX group with the exact DN
+# C(cn=g123m-1A,cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com)
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ subpath: 'cn=classes,cn=students,cn=groups'
+ ou: school
+
+# or
+- name: Create a POSIX group with a DN
+ community.general.udm_group:
+ name: g123m-1A
+ position: 'cn=classes,cn=students,cn=groups,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ description=dict(type='str'),
+ position=dict(default='',
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=groups',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True
+ )
+ name = module.params['name']
+ description = module.params['description']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ groups = list(ldap_search(
+ '(&(objectClass=posixGroup)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ group_dn = 'cn={0},{1}'.format(name, container)
+
+ exists = bool(len(groups))
+
+ if state == 'present':
+ try:
+ if not exists:
+ grp = umc_module_for_add('groups/group', container)
+ else:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ grp['name'] = name
+ grp['description'] = description
+ diff = grp.diff()
+ changed = grp.diff() != []
+ if not module.check_mode:
+ if not exists:
+ grp.create()
+ else:
+ grp.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing group {0} in {1} failed".format(name, container)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ grp = umc_module_for_edit('groups/group', group_dn)
+ if not module.check_mode:
+ grp.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing group {0} failed".format(name)
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/udm_share.py b/ansible_collections/community/general/plugins/modules/udm_share.py
new file mode 100644
index 000000000..274391335
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/udm_share.py
@@ -0,0 +1,579 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: udm_share
+author:
+ - Tobias Rüetschi (@keachi)
+short_description: Manage samba shares on a univention corporate server
+description:
+ - "This module allows to manage samba shares on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the share is present or not.
+ type: str
+ name:
+ required: true
+ description:
+ - Name
+ type: str
+ host:
+ required: false
+ description:
+ - Host FQDN (server which provides the share), e.g. C({{
+ ansible_fqdn }}). Required if I(state=present).
+ type: str
+ path:
+ required: false
+ description:
+ - Directory on the providing server, e.g. C(/home). Required if I(state=present).
+ type: path
+ sambaName:
+ required: false
+ description:
+ - Windows name. Required if I(state=present).
+ type: str
+ aliases: [ samba_name ]
+ ou:
+ required: true
+ description:
+ - Organisational unit, inside the LDAP Base DN.
+ type: str
+ owner:
+ default: '0'
+ description:
+ - Directory owner of the share's root directory.
+ type: str
+ group:
+ default: '0'
+ description:
+ - Directory owner group of the share's root directory.
+ type: str
+ directorymode:
+ default: '00755'
+ description:
+ - Permissions for the share's root directory.
+ type: str
+ root_squash:
+ default: true
+ description:
+ - Modify user ID for root user (root squashing).
+ type: bool
+ subtree_checking:
+ default: true
+ description:
+ - Subtree checking.
+ type: bool
+ sync:
+ default: 'sync'
+ description:
+ - NFS synchronisation.
+ type: str
+ writeable:
+ default: true
+ description:
+ - NFS write access.
+ type: bool
+ sambaBlockSize:
+ description:
+ - Blocking size.
+ type: str
+ aliases: [ samba_block_size ]
+ sambaBlockingLocks:
+ default: true
+ description:
+ - Blocking locks.
+ type: bool
+ aliases: [ samba_blocking_locks ]
+ sambaBrowseable:
+ description:
+ - Show in Windows network environment.
+ type: bool
+ default: true
+ aliases: [ samba_browsable ]
+ sambaCreateMode:
+ default: '0744'
+ description:
+ - File mode.
+ type: str
+ aliases: [ samba_create_mode ]
+ sambaCscPolicy:
+ default: 'manual'
+ description:
+ - Client-side caching policy.
+ type: str
+ aliases: [ samba_csc_policy ]
+ sambaCustomSettings:
+ default: []
+ description:
+ - Option name in smb.conf and its value.
+ type: list
+ elements: dict
+ aliases: [ samba_custom_settings ]
+ sambaDirectoryMode:
+ default: '0755'
+ description:
+ - Directory mode.
+ type: str
+ aliases: [ samba_directory_mode ]
+ sambaDirectorySecurityMode:
+ default: '0777'
+ description:
+ - Directory security mode.
+ type: str
+ aliases: [ samba_directory_security_mode ]
+ sambaDosFilemode:
+ default: false
+ description:
+ - Users with write access may modify permissions.
+ type: bool
+ aliases: [ samba_dos_filemode ]
+ sambaFakeOplocks:
+ default: false
+ description:
+ - Fake oplocks.
+ type: bool
+ aliases: [ samba_fake_oplocks ]
+ sambaForceCreateMode:
+ default: false
+ description:
+ - Force file mode.
+ type: bool
+ aliases: [ samba_force_create_mode ]
+ sambaForceDirectoryMode:
+ default: false
+ description:
+ - Force directory mode.
+ type: bool
+ aliases: [ samba_force_directory_mode ]
+ sambaForceDirectorySecurityMode:
+ default: false
+ description:
+ - Force directory security mode.
+ type: bool
+ aliases: [ samba_force_directory_security_mode ]
+ sambaForceGroup:
+ description:
+ - Force group.
+ type: str
+ aliases: [ samba_force_group ]
+ sambaForceSecurityMode:
+ default: false
+ description:
+ - Force security mode.
+ type: bool
+ aliases: [ samba_force_security_mode ]
+ sambaForceUser:
+ description:
+ - Force user.
+ type: str
+ aliases: [ samba_force_user ]
+ sambaHideFiles:
+ description:
+ - Hide files.
+ type: str
+ aliases: [ samba_hide_files ]
+ sambaHideUnreadable:
+ default: false
+ description:
+ - Hide unreadable files/directories.
+ type: bool
+ aliases: [ samba_hide_unreadable ]
+ sambaHostsAllow:
+ default: []
+ description:
+ - Allowed host/network.
+ type: list
+ elements: str
+ aliases: [ samba_hosts_allow ]
+ sambaHostsDeny:
+ default: []
+ description:
+ - Denied host/network.
+ type: list
+ elements: str
+ aliases: [ samba_hosts_deny ]
+ sambaInheritAcls:
+ default: true
+ description:
+ - Inherit ACLs.
+ type: bool
+ aliases: [ samba_inherit_acls ]
+ sambaInheritOwner:
+ default: false
+ description:
+ - Create files/directories with the owner of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_owner ]
+ sambaInheritPermissions:
+ default: false
+ description:
+ - Create files/directories with permissions of the parent directory.
+ type: bool
+ aliases: [ samba_inherit_permissions ]
+ sambaInvalidUsers:
+ description:
+ - Invalid users or groups.
+ type: str
+ aliases: [ samba_invalid_users ]
+ sambaLevel2Oplocks:
+ default: true
+ description:
+ - Level 2 oplocks.
+ type: bool
+ aliases: [ samba_level_2_oplocks ]
+ sambaLocking:
+ default: true
+ description:
+ - Locking.
+ type: bool
+ aliases: [ samba_locking ]
+ sambaMSDFSRoot:
+ default: false
+ description:
+ - MSDFS root.
+ type: bool
+ aliases: [ samba_msdfs_root ]
+ sambaNtAclSupport:
+ default: true
+ description:
+ - NT ACL support.
+ type: bool
+ aliases: [ samba_nt_acl_support ]
+ sambaOplocks:
+ default: true
+ description:
+ - Oplocks.
+ type: bool
+ aliases: [ samba_oplocks ]
+ sambaPostexec:
+ description:
+ - Postexec script.
+ type: str
+ aliases: [ samba_postexec ]
+ sambaPreexec:
+ description:
+ - Preexec script.
+ type: str
+ aliases: [ samba_preexec ]
+ sambaPublic:
+ default: false
+ description:
+ - Allow anonymous read-only access with a guest user.
+ type: bool
+ aliases: [ samba_public ]
+ sambaSecurityMode:
+ default: '0777'
+ description:
+ - Security mode.
+ type: str
+ aliases: [ samba_security_mode ]
+ sambaStrictLocking:
+ default: 'Auto'
+ description:
+ - Strict locking.
+ type: str
+ aliases: [ samba_strict_locking ]
+ sambaVFSObjects:
+ description:
+ - VFS objects.
+ type: str
+ aliases: [ samba_vfs_objects ]
+ sambaValidUsers:
+ description:
+ - Valid users or groups.
+ type: str
+ aliases: [ samba_valid_users ]
+ sambaWriteList:
+ description:
+ - Restrict write access to these users/groups.
+ type: str
+ aliases: [ samba_write_list ]
+ sambaWriteable:
+ default: true
+ description:
+ - Samba write access.
+ type: bool
+ aliases: [ samba_writeable ]
+ nfs_hosts:
+ default: []
+ description:
+ - Only allow access for this host, IP address or network.
+ type: list
+ elements: str
+ nfsCustomSettings:
+ default: []
+ description:
+ - Option name in exports file.
+ type: list
+ elements: str
+ aliases: [ nfs_custom_settings ]
+'''
+
+
+EXAMPLES = '''
+- name: Create a share named home on the server ucs.example.com with the path /home
+ community.general.udm_share:
+ name: home
+ path: /home
+ host: ucs.example.com
+ sambaName: Home
+'''
+
+
+RETURN = '''# '''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True,
+ type='str'),
+ ou=dict(required=True,
+ type='str'),
+ owner=dict(type='str',
+ default='0'),
+ group=dict(type='str',
+ default='0'),
+ path=dict(type='path'),
+ directorymode=dict(type='str',
+ default='00755'),
+ host=dict(type='str'),
+ root_squash=dict(type='bool',
+ default=True),
+ subtree_checking=dict(type='bool',
+ default=True),
+ sync=dict(type='str',
+ default='sync'),
+ writeable=dict(type='bool',
+ default=True),
+ sambaBlockSize=dict(type='str',
+ aliases=['samba_block_size']),
+ sambaBlockingLocks=dict(type='bool',
+ aliases=['samba_blocking_locks'],
+ default=True),
+ sambaBrowseable=dict(type='bool',
+ aliases=['samba_browsable'],
+ default=True),
+ sambaCreateMode=dict(type='str',
+ aliases=['samba_create_mode'],
+ default='0744'),
+ sambaCscPolicy=dict(type='str',
+ aliases=['samba_csc_policy'],
+ default='manual'),
+ sambaCustomSettings=dict(type='list',
+ elements='dict',
+ aliases=['samba_custom_settings'],
+ default=[]),
+ sambaDirectoryMode=dict(type='str',
+ aliases=['samba_directory_mode'],
+ default='0755'),
+ sambaDirectorySecurityMode=dict(type='str',
+ aliases=['samba_directory_security_mode'],
+ default='0777'),
+ sambaDosFilemode=dict(type='bool',
+ aliases=['samba_dos_filemode'],
+ default=False),
+ sambaFakeOplocks=dict(type='bool',
+ aliases=['samba_fake_oplocks'],
+ default=False),
+ sambaForceCreateMode=dict(type='bool',
+ aliases=['samba_force_create_mode'],
+ default=False),
+ sambaForceDirectoryMode=dict(type='bool',
+ aliases=['samba_force_directory_mode'],
+ default=False),
+ sambaForceDirectorySecurityMode=dict(type='bool',
+ aliases=['samba_force_directory_security_mode'],
+ default=False),
+ sambaForceGroup=dict(type='str',
+ aliases=['samba_force_group']),
+ sambaForceSecurityMode=dict(type='bool',
+ aliases=['samba_force_security_mode'],
+ default=False),
+ sambaForceUser=dict(type='str',
+ aliases=['samba_force_user']),
+ sambaHideFiles=dict(type='str',
+ aliases=['samba_hide_files']),
+ sambaHideUnreadable=dict(type='bool',
+ aliases=['samba_hide_unreadable'],
+ default=False),
+ sambaHostsAllow=dict(type='list',
+ elements='str',
+ aliases=['samba_hosts_allow'],
+ default=[]),
+ sambaHostsDeny=dict(type='list',
+ elements='str',
+ aliases=['samba_hosts_deny'],
+ default=[]),
+ sambaInheritAcls=dict(type='bool',
+ aliases=['samba_inherit_acls'],
+ default=True),
+ sambaInheritOwner=dict(type='bool',
+ aliases=['samba_inherit_owner'],
+ default=False),
+ sambaInheritPermissions=dict(type='bool',
+ aliases=['samba_inherit_permissions'],
+ default=False),
+ sambaInvalidUsers=dict(type='str',
+ aliases=['samba_invalid_users']),
+ sambaLevel2Oplocks=dict(type='bool',
+ aliases=['samba_level_2_oplocks'],
+ default=True),
+ sambaLocking=dict(type='bool',
+ aliases=['samba_locking'],
+ default=True),
+ sambaMSDFSRoot=dict(type='bool',
+ aliases=['samba_msdfs_root'],
+ default=False),
+ sambaName=dict(type='str',
+ aliases=['samba_name']),
+ sambaNtAclSupport=dict(type='bool',
+ aliases=['samba_nt_acl_support'],
+ default=True),
+ sambaOplocks=dict(type='bool',
+ aliases=['samba_oplocks'],
+ default=True),
+ sambaPostexec=dict(type='str',
+ aliases=['samba_postexec']),
+ sambaPreexec=dict(type='str',
+ aliases=['samba_preexec']),
+ sambaPublic=dict(type='bool',
+ aliases=['samba_public'],
+ default=False),
+ sambaSecurityMode=dict(type='str',
+ aliases=['samba_security_mode'],
+ default='0777'),
+ sambaStrictLocking=dict(type='str',
+ aliases=['samba_strict_locking'],
+ default='Auto'),
+ sambaVFSObjects=dict(type='str',
+ aliases=['samba_vfs_objects']),
+ sambaValidUsers=dict(type='str',
+ aliases=['samba_valid_users']),
+ sambaWriteList=dict(type='str',
+ aliases=['samba_write_list']),
+ sambaWriteable=dict(type='bool',
+ aliases=['samba_writeable'],
+ default=True),
+ nfs_hosts=dict(type='list',
+ elements='str',
+ default=[]),
+ nfsCustomSettings=dict(type='list',
+ elements='str',
+ aliases=['nfs_custom_settings'],
+ default=[]),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['path', 'host', 'sambaName'])
+ ])
+ )
+ name = module.params['name']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ obj = list(ldap_search(
+ '(&(objectClass=univentionShare)(cn={0}))'.format(name),
+ attr=['cn']
+ ))
+
+ exists = bool(len(obj))
+ container = 'cn=shares,ou={0},{1}'.format(module.params['ou'], base_dn())
+ dn = 'cn={0},{1}'.format(name, container)
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('shares/share', container)
+ else:
+ obj = umc_module_for_edit('shares/share', dn)
+
+ module.params['printablename'] = '{0} ({1})'.format(name, module.params['host'])
+ for k in obj.keys():
+ if module.params[k] is True:
+ module.params[k] = '1'
+ elif module.params[k] is False:
+ module.params[k] = '0'
+ obj[k] = module.params[k]
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception as err:
+ module.fail_json(
+ msg='Creating/editing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('shares/share', dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception as err:
+ module.fail_json(
+ msg='Removing share {0} in {1} failed: {2}'.format(
+ name,
+ container,
+ err,
+ )
+ )
+
+ module.exit_json(
+ changed=changed,
+ name=name,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/udm_user.py b/ansible_collections/community/general/plugins/modules/udm_user.py
new file mode 100644
index 000000000..05c5ad359
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/udm_user.py
@@ -0,0 +1,573 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adfinis SyGroup AG
+# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: udm_user
+author:
+ - Tobias Rüetschi (@keachi)
+short_description: Manage posix users on a univention corporate server
+description:
+ - "This module allows to manage posix users on a univention corporate
+ server (UCS).
+ It uses the python API of the UCS to create a new object or edit it."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: partial
+options:
+ state:
+ default: "present"
+ choices: [ present, absent ]
+ description:
+ - Whether the user is present or not.
+ type: str
+ username:
+ required: true
+ description:
+ - User name
+ aliases: ['name']
+ type: str
+ firstname:
+ description:
+ - First name. Required if I(state=present).
+ type: str
+ lastname:
+ description:
+ - Last name. Required if I(state=present).
+ type: str
+ password:
+ description:
+ - Password. Required if I(state=present).
+ type: str
+ birthday:
+ description:
+ - Birthday
+ type: str
+ city:
+ description:
+ - City of users business address.
+ type: str
+ country:
+ description:
+ - Country of users business address.
+ type: str
+ department_number:
+ description:
+ - Department number of users business address.
+ aliases: [ departmentNumber ]
+ type: str
+ description:
+ description:
+ - Description (not gecos)
+ type: str
+ display_name:
+ description:
+ - Display name (not gecos)
+ aliases: [ displayName ]
+ type: str
+ email:
+ default: ['']
+ description:
+ - A list of e-mail addresses.
+ type: list
+ elements: str
+ employee_number:
+ description:
+ - Employee number
+ aliases: [ employeeNumber ]
+ type: str
+ employee_type:
+ description:
+ - Employee type
+ aliases: [ employeeType ]
+ type: str
+ gecos:
+ description:
+ - GECOS
+ type: str
+ groups:
+ default: []
+ description:
+ - "POSIX groups, the LDAP DNs of the groups will be found with the
+ LDAP filter for each group as $GROUP:
+ C((&(objectClass=posixGroup)(cn=$GROUP)))."
+ type: list
+ elements: str
+ home_share:
+ description:
+ - "Home NFS share. Must be a LDAP DN, e.g.
+ C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
+ aliases: [ homeShare ]
+ type: str
+ home_share_path:
+ description:
+ - Path to home NFS share, inside the homeShare.
+ aliases: [ homeSharePath ]
+ type: str
+ home_telephone_number:
+ default: []
+ description:
+ - List of private telephone numbers.
+ aliases: [ homeTelephoneNumber ]
+ type: list
+ elements: str
+ homedrive:
+ description:
+ - Windows home drive, e.g. C("H:").
+ type: str
+ mail_alternative_address:
+ default: []
+ description:
+ - List of alternative e-mail addresses.
+ aliases: [ mailAlternativeAddress ]
+ type: list
+ elements: str
+ mail_home_server:
+ description:
+ - FQDN of mail server
+ aliases: [ mailHomeServer ]
+ type: str
+ mail_primary_address:
+ description:
+ - Primary e-mail address
+ aliases: [ mailPrimaryAddress ]
+ type: str
+ mobile_telephone_number:
+ default: []
+ description:
+ - Mobile phone number
+ aliases: [ mobileTelephoneNumber ]
+ type: list
+ elements: str
+ organisation:
+ description:
+ - Organisation
+ aliases: [ organization ]
+ type: str
+ overridePWHistory:
+ type: bool
+ default: false
+ description:
+ - Override password history
+ aliases: [ override_pw_history ]
+ overridePWLength:
+ type: bool
+ default: false
+ description:
+ - Override password check
+ aliases: [ override_pw_length ]
+ pager_telephonenumber:
+ default: []
+ description:
+ - List of pager telephone numbers.
+ aliases: [ pagerTelephonenumber ]
+ type: list
+ elements: str
+ phone:
+ description:
+ - List of telephone numbers.
+ type: list
+ elements: str
+ default: []
+ postcode:
+ description:
+ - Postal code of users business address.
+ type: str
+ primary_group:
+ description:
+ - Primary group. This must be the group LDAP DN.
+ - If not specified, it defaults to C(cn=Domain Users,cn=groups,$LDAP_BASE_DN).
+ aliases: [ primaryGroup ]
+ type: str
+ profilepath:
+ description:
+ - Windows profile directory
+ type: str
+ pwd_change_next_login:
+ choices: [ '0', '1' ]
+ description:
+ - Change password on next login.
+ aliases: [ pwdChangeNextLogin ]
+ type: str
+ room_number:
+ description:
+ - Room number of users business address.
+ aliases: [ roomNumber ]
+ type: str
+ samba_privileges:
+ description:
+ - "Samba privilege, like allow printer administration, do domain
+ join."
+ aliases: [ sambaPrivileges ]
+ type: list
+ elements: str
+ default: []
+ samba_user_workstations:
+ description:
+ - Allow the authentication only on this Microsoft Windows host.
+ aliases: [ sambaUserWorkstations ]
+ type: list
+ elements: str
+ default: []
+ sambahome:
+ description:
+ - Windows home path, e.g. C('\\$FQDN\$USERNAME').
+ type: str
+ scriptpath:
+ description:
+ - Windows logon script.
+ type: str
+ secretary:
+ default: []
+ description:
+ - A list of superiors as LDAP DNs.
+ type: list
+ elements: str
+ serviceprovider:
+ default: ['']
+ description:
+ - Enable user for the following service providers.
+ type: list
+ elements: str
+ shell:
+ default: '/bin/bash'
+ description:
+ - Login shell
+ type: str
+ street:
+ description:
+ - Street of users business address.
+ type: str
+ title:
+ description:
+ - Title, e.g. C(Prof.).
+ type: str
+ unixhome:
+ description:
+ - Unix home directory
+ - If not specified, it defaults to C(/home/$USERNAME).
+ type: str
+ userexpiry:
+ description:
+ - Account expiry date, e.g. C(1999-12-31).
+ - If not specified, it defaults to the current day plus one year.
+ type: str
+ position:
+ default: ''
+ description:
+ - "Define the whole position of users object inside the LDAP tree,
+ e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
+ type: str
+ update_password:
+ default: always
+ choices: [ always, on_create ]
+ description:
+ - "C(always) will update passwords if they differ.
+ C(on_create) will only set the password for newly created users."
+ type: str
+ ou:
+ default: ''
+ description:
+ - "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
+ LDAP OU C(ou=school,dc=example,dc=com)."
+ type: str
+ subpath:
+ default: 'cn=users'
+ description:
+ - "LDAP subpath inside the organizational unit, e.g.
+ C(cn=teachers,cn=users) for LDAP container
+ C(cn=teachers,cn=users,dc=example,dc=com)."
+ type: str
+'''
+
+
+EXAMPLES = '''
+- name: Create a user on a UCS
+ community.general.udm_user:
+ name: FooBar
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ ou: school
+ subpath: 'cn=teachers,cn=users'
+
+# or define the position
+- name: Create a user with the DN C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
+ community.general.udm_user:
+ name: foo
+ password: secure_password
+ firstname: Foo
+ lastname: Bar
+ position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
+'''
+
+
+RETURN = '''# '''
+
+import crypt
+from datetime import date, timedelta
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.univention_umc import (
+ umc_module_for_add,
+ umc_module_for_edit,
+ ldap_search,
+ base_dn,
+)
+
+
+def main():
+ expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
+ module = AnsibleModule(
+ argument_spec=dict(
+ birthday=dict(type='str'),
+ city=dict(type='str'),
+ country=dict(type='str'),
+ department_number=dict(type='str',
+ aliases=['departmentNumber']),
+ description=dict(type='str'),
+ display_name=dict(type='str',
+ aliases=['displayName']),
+ email=dict(default=[''],
+ type='list',
+ elements='str'),
+ employee_number=dict(type='str',
+ aliases=['employeeNumber']),
+ employee_type=dict(type='str',
+ aliases=['employeeType']),
+ firstname=dict(type='str'),
+ gecos=dict(type='str'),
+ groups=dict(default=[],
+ type='list',
+ elements='str'),
+ home_share=dict(type='str',
+ aliases=['homeShare']),
+ home_share_path=dict(type='str',
+ aliases=['homeSharePath']),
+ home_telephone_number=dict(default=[],
+ type='list',
+ elements='str',
+ aliases=['homeTelephoneNumber']),
+ homedrive=dict(type='str'),
+ lastname=dict(type='str'),
+ mail_alternative_address=dict(default=[],
+ type='list',
+ elements='str',
+ aliases=['mailAlternativeAddress']),
+ mail_home_server=dict(type='str',
+ aliases=['mailHomeServer']),
+ mail_primary_address=dict(type='str',
+ aliases=['mailPrimaryAddress']),
+ mobile_telephone_number=dict(default=[],
+ type='list',
+ elements='str',
+ aliases=['mobileTelephoneNumber']),
+ organisation=dict(type='str',
+ aliases=['organization']),
+ overridePWHistory=dict(default=False,
+ type='bool',
+ aliases=['override_pw_history']),
+ overridePWLength=dict(default=False,
+ type='bool',
+ aliases=['override_pw_length']),
+ pager_telephonenumber=dict(default=[],
+ type='list',
+ elements='str',
+ aliases=['pagerTelephonenumber']),
+ password=dict(type='str',
+ no_log=True),
+ phone=dict(default=[],
+ type='list',
+ elements='str'),
+ postcode=dict(type='str'),
+ primary_group=dict(type='str',
+ aliases=['primaryGroup']),
+ profilepath=dict(type='str'),
+ pwd_change_next_login=dict(type='str',
+ choices=['0', '1'],
+ aliases=['pwdChangeNextLogin']),
+ room_number=dict(type='str',
+ aliases=['roomNumber']),
+ samba_privileges=dict(default=[],
+ type='list',
+ elements='str',
+ aliases=['sambaPrivileges']),
+ samba_user_workstations=dict(default=[],
+ type='list',
+ elements='str',
+ aliases=['sambaUserWorkstations']),
+ sambahome=dict(type='str'),
+ scriptpath=dict(type='str'),
+ secretary=dict(default=[],
+ type='list',
+ elements='str'),
+ serviceprovider=dict(default=[''],
+ type='list',
+ elements='str'),
+ shell=dict(default='/bin/bash',
+ type='str'),
+ street=dict(type='str'),
+ title=dict(type='str'),
+ unixhome=dict(type='str'),
+ userexpiry=dict(type='str'),
+ username=dict(required=True,
+ aliases=['name'],
+ type='str'),
+ position=dict(default='',
+ type='str'),
+ update_password=dict(default='always',
+ choices=['always', 'on_create'],
+ type='str'),
+ ou=dict(default='',
+ type='str'),
+ subpath=dict(default='cn=users',
+ type='str'),
+ state=dict(default='present',
+ choices=['present', 'absent'],
+ type='str')
+ ),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'present', ['firstname', 'lastname', 'password'])
+ ])
+ )
+ username = module.params['username']
+ position = module.params['position']
+ ou = module.params['ou']
+ subpath = module.params['subpath']
+ state = module.params['state']
+ changed = False
+ diff = None
+
+ users = list(ldap_search(
+ '(&(objectClass=posixAccount)(uid={0}))'.format(username),
+ attr=['uid']
+ ))
+ if position != '':
+ container = position
+ else:
+ if ou != '':
+ ou = 'ou={0},'.format(ou)
+ if subpath != '':
+ subpath = '{0},'.format(subpath)
+ container = '{0}{1}{2}'.format(subpath, ou, base_dn())
+ user_dn = 'uid={0},{1}'.format(username, container)
+
+ exists = bool(len(users))
+
+ if state == 'present':
+ try:
+ if not exists:
+ obj = umc_module_for_add('users/user', container)
+ else:
+ obj = umc_module_for_edit('users/user', user_dn)
+
+ if module.params['displayName'] is None:
+ module.params['displayName'] = '{0} {1}'.format(
+ module.params['firstname'],
+ module.params['lastname']
+ )
+ if module.params['unixhome'] is None:
+ module.params['unixhome'] = '/home/{0}'.format(
+ module.params['username']
+ )
+ for k in obj.keys():
+ if (k != 'password' and
+ k != 'groups' and
+ k != 'overridePWHistory' and
+ k in module.params and
+ module.params[k] is not None):
+ obj[k] = module.params[k]
+ # handle some special values
+ obj['e-mail'] = module.params['email']
+ if 'userexpiry' in obj and obj.get('userexpiry') is None:
+ obj['userexpiry'] = expiry
+ password = module.params['password']
+ if obj['password'] is None:
+ obj['password'] = password
+ if module.params['update_password'] == 'always':
+ old_password = obj['password'].split('}', 2)[1]
+ if crypt.crypt(password, old_password) != old_password:
+ obj['overridePWHistory'] = module.params['overridePWHistory']
+ obj['overridePWLength'] = module.params['overridePWLength']
+ obj['password'] = password
+
+ diff = obj.diff()
+ if exists:
+ for k in obj.keys():
+ if obj.hasChanged(k):
+ changed = True
+ else:
+ changed = True
+ if not module.check_mode:
+ if not exists:
+ obj.create()
+ elif changed:
+ obj.modify()
+ except Exception:
+ module.fail_json(
+ msg="Creating/editing user {0} in {1} failed".format(
+ username,
+ container
+ )
+ )
+ try:
+ groups = module.params['groups']
+ if groups:
+ filter = '(&(objectClass=posixGroup)(|(cn={0})))'.format(
+ ')(cn='.join(groups)
+ )
+ group_dns = list(ldap_search(filter, attr=['dn']))
+ for dn in group_dns:
+ grp = umc_module_for_edit('groups/group', dn[0])
+ if user_dn not in grp['users']:
+ grp['users'].append(user_dn)
+ if not module.check_mode:
+ grp.modify()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Adding groups to user {0} failed".format(username)
+ )
+
+ if state == 'absent' and exists:
+ try:
+ obj = umc_module_for_edit('users/user', user_dn)
+ if not module.check_mode:
+ obj.remove()
+ changed = True
+ except Exception:
+ module.fail_json(
+ msg="Removing user {0} failed".format(username)
+ )
+
+ module.exit_json(
+ changed=changed,
+ username=username,
+ diff=diff,
+ container=container
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/ufw.py b/ansible_collections/community/general/plugins/modules/ufw.py
new file mode 100644
index 000000000..45c98fd63
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/ufw.py
@@ -0,0 +1,606 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Ahti Kitsik <ak@ahtik.com>
+# Copyright (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
+# Copyright (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
+# Copyright (c) 2013, James Martin <jmartin@basho.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+author:
+ - Aleksey Ovcharenko (@ovcharenko)
+ - Jarno Keskikangas (@pyykkis)
+ - Ahti Kitsik (@ahtik)
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ type: str
+ choices: [ disabled, enabled, reloaded, reset ]
+ default:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ type: str
+ choices: [ allow, deny, reject ]
+ aliases: [ policy ]
+ direction:
+ description:
+ - Select direction for a rule or default policy command. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ choices: [ in, incoming, out, outgoing, routed ]
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ type: str
+ choices: [ 'on', 'off', low, medium, high, full ]
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM.
+ - Note that ufw numbers rules starting with 1.
+ - If I(delete=true) and a value is provided for I(insert),
+ then I(insert) is ignored.
+ type: int
+ insert_relative_to:
+ description:
+ - Allows to interpret the index in I(insert) relative to a position.
+ - C(zero) interprets the rule number as an absolute index (i.e. 1 is
+ the first rule).
+ - C(first-ipv4) interprets the rule number relative to the index of the
+ first IPv4 rule, or relative to the position where the first IPv4 rule
+ would be if there is currently none.
+ - C(last-ipv4) interprets the rule number relative to the index of the
+ last IPv4 rule, or relative to the position where the last IPv4 rule
+ would be if there is currently none.
+ - C(first-ipv6) interprets the rule number relative to the index of the
+ first IPv6 rule, or relative to the position where the first IPv6 rule
+ would be if there is currently none.
+ - C(last-ipv6) interprets the rule number relative to the index of the
+ last IPv6 rule, or relative to the position where the last IPv6 rule
+ would be if there is currently none.
+ type: str
+ choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
+ default: zero
+ rule:
+ description:
+ - Add firewall rule
+ type: str
+ choices: [ allow, deny, limit, reject ]
+ log:
+ description:
+ - Log new connections matched to this rule
+ type: bool
+ default: false
+ from_ip:
+ description:
+ - Source IP address.
+ type: str
+ default: any
+ aliases: [ from, src ]
+ from_port:
+ description:
+ - Source port.
+ type: str
+ to_ip:
+ description:
+ - Destination IP address.
+ type: str
+ default: any
+ aliases: [ dest, to]
+ to_port:
+ description:
+ - Destination port.
+ type: str
+ aliases: [ port ]
+ proto:
+ description:
+ - TCP/IP protocol.
+ type: str
+ choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
+ aliases: [ protocol ]
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d).
+ type: str
+ aliases: [ app ]
+ delete:
+ description:
+ - Delete rule.
+ - If I(delete=true) and a value is provided for I(insert),
+ then I(insert) is ignored.
+ type: bool
+ default: false
+ interface:
+ description:
+ - Specify interface for the rule. The direction (in or out) used
+ for the interface depends on the value of I(direction). See
+ I(interface_in) and I(interface_out) for routed rules that needs
+ to supply both an input and output interface. Mutually
+ exclusive with I(interface_in) and I(interface_out).
+ type: str
+ aliases: [ if ]
+ interface_in:
+ description:
+ - Specify input interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_out) for routed rules.
+ type: str
+ aliases: [ if_in ]
+ version_added: '0.2.0'
+ interface_out:
+ description:
+ - Specify output interface for the rule. This is mutually
+ exclusive with I(direction) and I(interface). However, it is
+ compatible with I(interface_in) for routed rules.
+ type: str
+ aliases: [ if_out ]
+ version_added: '0.2.0'
+ route:
+ description:
+ - Apply the rule to routed/forwarded packets.
+ type: bool
+ default: false
+ comment:
+ description:
+ - Add a comment to the rule. Requires UFW version >=0.35.
+ type: str
+'''
+
+EXAMPLES = r'''
+- name: Allow everything and enable UFW
+ community.general.ufw:
+ state: enabled
+ policy: allow
+
+- name: Set logging
+ community.general.ufw:
+ logging: 'on'
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+- community.general.ufw:
+ rule: reject
+ port: auth
+ log: true
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+- community.general.ufw:
+ rule: limit
+ port: ssh
+ proto: tcp
+
+# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
+# a rule=allow task can leave those ports exposed. Either use delete=true
+# or a separate state=reset task)
+- community.general.ufw:
+ rule: allow
+ name: OpenSSH
+
+- name: Delete OpenSSH rule
+ community.general.ufw:
+ rule: allow
+ name: OpenSSH
+ delete: true
+
+- name: Deny all access to port 53
+ community.general.ufw:
+ rule: deny
+ port: '53'
+
+- name: Allow port range 60000-61000
+ community.general.ufw:
+ rule: allow
+ port: 60000:61000
+ proto: tcp
+
+- name: Allow all access to tcp port 80
+ community.general.ufw:
+ rule: allow
+ port: '80'
+ proto: tcp
+
+- name: Allow all access from RFC1918 networks to this host
+ community.general.ufw:
+ rule: allow
+ src: '{{ item }}'
+ loop:
+ - 10.0.0.0/8
+ - 172.16.0.0/12
+ - 192.168.0.0/16
+
+- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
+ community.general.ufw:
+ rule: deny
+ proto: udp
+ src: 1.2.3.4
+ port: '514'
+ comment: Block syslog
+
+- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ community.general.ufw:
+ rule: allow
+ interface: eth0
+ direction: in
+ proto: udp
+ src: 1.2.3.5
+ from_port: '5469'
+ dest: 1.2.3.4
+ to_port: '5469'
+
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ src: 2001:db8::/32
+ port: '25'
+
+- name: Deny all IPv6 traffic to tcp port 20 on this host
+ # this should be the first IPv6 rule
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+
+- name: Deny all IPv4 traffic to tcp port 20 on this host
+ # This should be the third to last IPv4 rule
+ # (insert: -1 addresses the second to last IPv4 rule;
+ # so the new rule will be inserted before the second
+ # to last IPv4 rule, and will be come the third to last
+ # IPv4 rule.)
+ community.general.ufw:
+ rule: deny
+ proto: tcp
+ port: '20'
+ to_ip: "::"
+ insert: -1
+ insert_relative_to: last-ipv4
+
+# Can be used to further restrict a global FORWARD policy set to allow
+- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
+ community.general.ufw:
+ rule: deny
+ route: true
+ src: 192.0.2.0/24
+ dest: 198.51.100.0/24
+'''
+
+import re
+
+from operator import itemgetter
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def compile_ipv4_regexp():
+ r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
+ r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
+ return re.compile(r)
+
+
+def compile_ipv6_regexp():
+ """
+ validation pattern provided by :
+ https://stackoverflow.com/questions/53497/regular-expression-that-matches-
+ valid-ipv6-addresses#answer-17871737
+ """
+ r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
+ r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
+ r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
+ r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
+ r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
+ r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
+ r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
+ r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
+ r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
+ return re.compile(r)
+
+
+def main():
+ command_keys = ['state', 'default', 'rule', 'logging']
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
+ direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
+ delete=dict(type='bool', default=False),
+ route=dict(type='bool', default=False),
+ insert=dict(type='int'),
+ insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
+ rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
+ interface=dict(type='str', aliases=['if']),
+ interface_in=dict(type='str', aliases=['if_in']),
+ interface_out=dict(type='str', aliases=['if_out']),
+ log=dict(type='bool', default=False),
+ from_ip=dict(type='str', default='any', aliases=['from', 'src']),
+ from_port=dict(type='str'),
+ to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
+ to_port=dict(type='str', aliases=['port']),
+ proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
+ name=dict(type='str', aliases=['app']),
+ comment=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'proto', 'logging'],
+ # Mutual exclusivity with `interface` implied by `required_by`.
+ ['direction', 'interface_in'],
+ ['direction', 'interface_out'],
+ ],
+ required_one_of=([command_keys]),
+ required_by=dict(
+ interface=('direction', ),
+ ),
+ )
+
+ cmds = []
+
+ ipv4_regexp = compile_ipv4_regexp()
+ ipv6_regexp = compile_ipv6_regexp()
+
+ def filter_line_that_not_start_with(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
+
+ def filter_line_that_contains(pattern, content):
+ return [line for line in content.splitlines(True) if pattern in line]
+
+ def filter_line_that_not_contains(pattern, content):
+ return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
+
+ def filter_line_that_match_func(match_func, content):
+ return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
+
+ def filter_line_that_contains_ipv4(content):
+ return filter_line_that_match_func(ipv4_regexp.search, content)
+
+ def filter_line_that_contains_ipv6(content):
+ return filter_line_that_match_func(ipv6_regexp.search, content)
+
+ def is_starting_by_ipv4(ip):
+ return ipv4_regexp.match(ip) is not None
+
+ def is_starting_by_ipv6(ip):
+ return ipv6_regexp.match(ip) is not None
+
+ def execute(cmd, ignore_error=False):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
+
+ if rc != 0 and not ignore_error:
+ module.fail_json(msg=err or out, commands=cmds)
+
+ return out
+
+ def get_current_rules():
+ user_rules_files = ["/lib/ufw/user.rules",
+ "/lib/ufw/user6.rules",
+ "/etc/ufw/user.rules",
+ "/etc/ufw/user6.rules",
+ "/var/lib/ufw/user.rules",
+ "/var/lib/ufw/user6.rules"]
+
+ cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
+
+ cmd.extend([[f] for f in user_rules_files])
+ return execute(cmd, ignore_error=True)
+
+ def ufw_version():
+ """
+ Returns the major and minor version of ufw installed on the system.
+ """
+ out = execute([[ufw_bin], ["--version"]])
+
+ lines = [x for x in out.split('\n') if x.strip() != '']
+ if len(lines) == 0:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
+ if matches is None:
+ module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
+
+ # Convert version to numbers
+ major = int(matches.group(1))
+ minor = int(matches.group(2))
+ rev = 0
+ if matches.group(3) is not None:
+ rev = int(matches.group(3))
+
+ return major, minor, rev
+
+ params = module.params
+
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+ grep_bin = module.get_bin_path('grep', True)
+
+ # Save the pre state and rules in order to recognize changes
+ pre_state = execute([[ufw_bin], ['status verbose']])
+ pre_rules = get_current_rules()
+
+ changed = False
+
+ # Execute filter
+ for (command, value) in commands.items():
+
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = {'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset'}
+
+ if value in ['reloaded', 'reset']:
+ changed = True
+
+ if module.check_mode:
+ # "active" would also match "inactive", hence the space
+ ufw_enabled = pre_state.find(" active") != -1
+ if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
+ changed = True
+ else:
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
+ if extract:
+ current_level = extract.group(2)
+ current_on_off_value = extract.group(1)
+ if value != "off":
+ if current_on_off_value == "off":
+ changed = True
+ elif value != "on" and value != current_level:
+ changed = True
+ elif current_on_off_value != "off":
+ changed = True
+ else:
+ changed = True
+
+ if not module.check_mode:
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
+ module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
+ if module.check_mode:
+ regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
+ extract = re.search(regexp, pre_state)
+ if extract is not None:
+ current_default_values = {}
+ current_default_values["incoming"] = extract.group(1)
+ current_default_values["outgoing"] = extract.group(2)
+ current_default_values["routed"] = extract.group(3)
+ v = current_default_values[params['direction'] or 'incoming']
+ if v not in (value, 'disabled'):
+ changed = True
+ else:
+ changed = True
+ else:
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ if params['direction'] not in ['in', 'out', None]:
+ module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
+ if not params['route'] and params['interface_in'] and params['interface_out']:
+ module.fail_json(msg='Only route rules can combine '
+ 'interface_in and interface_out')
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [route] [delete | insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application] [comment COMMENT]
+ cmd.append([module.boolean(params['route']), 'route'])
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ if params['insert'] is not None and not params['delete']:
+ relative_to_cmd = params['insert_relative_to']
+ if relative_to_cmd == 'zero':
+ insert_to = params['insert']
+ else:
+ (dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
+ numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
+ lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
+ lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
+ last_number = max([no for (no, ipv6) in lines]) if lines else 0
+ has_ipv4 = any(not ipv6 for (no, ipv6) in lines)
+ has_ipv6 = any(ipv6 for (no, ipv6) in lines)
+ if relative_to_cmd == 'first-ipv4':
+ relative_to = 1
+ elif relative_to_cmd == 'last-ipv4':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
+ elif relative_to_cmd == 'first-ipv6':
+ relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
+ elif relative_to_cmd == 'last-ipv6':
+ relative_to = last_number if has_ipv6 else last_number + 1
+ insert_to = params['insert'] + relative_to
+ if insert_to > last_number:
+ # ufw does not like it when the insert number is larger than the
+ # maximal rule number for IPv4/IPv6.
+ insert_to = None
+ cmd.append([insert_to is not None, "insert %s" % insert_to])
+ cmd.append([value])
+ cmd.append([params['direction'], "%s" % params['direction']])
+ cmd.append([params['interface'], "on %s" % params['interface']])
+ cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
+ cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
+ ('to_ip', "to %s"), ('to_port', "port %s"),
+ ('proto', "proto %s"), ('name', "app '%s'")]:
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ ufw_major, ufw_minor, dummy = ufw_version()
+ # comment is supported only in ufw version after 0.35
+ if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
+ cmd.append([params['comment'], "comment '%s'" % params['comment']])
+
+ rules_dry = execute(cmd)
+
+ if module.check_mode:
+
+ nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
+
+ if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
+
+ rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
+ # ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
+ if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
+ if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
+ changed = True
+ elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
+ if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
+ changed = True
+ elif pre_rules != rules_dry:
+ changed = True
+
+ # Get the new state
+ if module.check_mode:
+ return module.exit_json(changed=changed, commands=cmds)
+ else:
+ post_state = execute([[ufw_bin], ['status'], ['verbose']])
+ if not changed:
+ post_rules = get_current_rules()
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/uptimerobot.py b/ansible_collections/community/general/plugins/modules/uptimerobot.py
new file mode 100644
index 000000000..c1894e90a
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/uptimerobot.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: uptimerobot
+short_description: Pause and start Uptime Robot monitoring
+description:
+ - This module will let you start and pause Uptime Robot Monitoring
+author: "Nate Kingsley (@nate-kingsley)"
+requirements:
+ - Valid Uptime Robot API Key
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ state:
+ type: str
+ description:
+ - Define whether or not the monitor should be running or paused.
+ required: true
+ choices: [ "started", "paused" ]
+ monitorid:
+ type: str
+ description:
+ - ID of the monitor to check.
+ required: true
+ apikey:
+ type: str
+ description:
+ - Uptime Robot API key.
+ required: true
+notes:
+ - Support for adding and removing monitors and alert contacts has not yet been implemented.
+'''
+
+EXAMPLES = '''
+- name: Pause the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: paused
+
+- name: Start the monitor with an ID of 12345
+ community.general.uptimerobot:
+ monitorid: 12345
+ apikey: 12345-1234512345
+ state: started
+'''
+
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_text
+
+
+API_BASE = "https://api.uptimerobot.com/"
+
+API_ACTIONS = dict(
+ status='getMonitors?',
+ editMonitor='editMonitor?'
+)
+
+API_FORMAT = 'json'
+API_NOJSONCALLBACK = 1
+CHANGED_STATE = False
+SUPPORTS_CHECK_MODE = False
+
+
+def checkID(module, params):
+
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['status'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult
+
+
+def startMonitor(module, params):
+
+ params['monitorStatus'] = 1
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def pauseMonitor(module, params):
+
+ params['monitorStatus'] = 0
+ data = urlencode(params)
+ full_uri = API_BASE + API_ACTIONS['editMonitor'] + data
+ req, info = fetch_url(module, full_uri)
+ result = to_text(req.read())
+ jsonresult = json.loads(result)
+ req.close()
+ return jsonresult['stat']
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(required=True, choices=['started', 'paused']),
+ apikey=dict(required=True, no_log=True),
+ monitorid=dict(required=True)
+ ),
+ supports_check_mode=SUPPORTS_CHECK_MODE
+ )
+
+ params = dict(
+ apiKey=module.params['apikey'],
+ monitors=module.params['monitorid'],
+ monitorID=module.params['monitorid'],
+ format=API_FORMAT,
+ noJsonCallback=API_NOJSONCALLBACK
+ )
+
+ check_result = checkID(module, params)
+
+ if check_result['stat'] != "ok":
+ module.fail_json(
+ msg="failed",
+ result=check_result['message']
+ )
+
+ if module.params['state'] == 'started':
+ monitor_result = startMonitor(module, params)
+ else:
+ monitor_result = pauseMonitor(module, params)
+
+ module.exit_json(
+ msg="success",
+ result=monitor_result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/urpmi.py b/ansible_collections/community/general/plugins/modules/urpmi.py
new file mode 100644
index 000000000..34e099e4d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/urpmi.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Philippe Makowski
+# Written by Philippe Makowski <philippem@mageia.org>
+# Based on apt module written by Matthew Williams <matthew@flowroute.com>
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: urpmi
+short_description: Urpmi manager
+description:
+ - Manages packages with I(urpmi) (such as for Mageia or Mandriva)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - A list of package names to install, upgrade or remove.
+ required: true
+ aliases: [ package, pkg ]
+ type: list
+ elements: str
+ state:
+ description:
+ - Indicates the desired package state.
+ choices: [ absent, present, installed, removed ]
+ default: present
+ type: str
+ update_cache:
+ description:
+ - Update the package database first C(urpmi.update -a).
+ type: bool
+ default: false
+ no_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(urpmi).
+ type: bool
+ default: true
+ force:
+ description:
+ - Assume "yes" is the answer to any question urpmi has to ask.
+ Corresponds to the C(--force) option for I(urpmi).
+ type: bool
+ default: true
+ root:
+ description:
+ - Specifies an alternative install root, relative to which all packages will be installed.
+ Corresponds to the C(--root) option for I(urpmi).
+ aliases: [ installroot ]
+ type: str
+author:
+- Philippe Makowski (@pmakowski)
+'''
+
+EXAMPLES = '''
+- name: Install package foo
+ community.general.urpmi:
+ pkg: foo
+ state: present
+
+- name: Remove package foo
+ community.general.urpmi:
+ pkg: foo
+ state: absent
+
+- name: Remove packages foo and bar
+ community.general.urpmi:
+ pkg: foo,bar
+ state: absent
+
+- name: Update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists)
+- community.general.urpmi:
+ name: bar
+ state: present
+ update_cache: true
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def query_package(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ return True
+ else:
+ return False
+
+
+def query_package_provides(module, name, root):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rpm_path = module.get_bin_path("rpm", True)
+ cmd = "%s -q --whatprovides %s %s" % (rpm_path, name, root_option(root))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc == 0
+
+
+def update_package_db(module):
+
+ urpmiupdate_path = module.get_bin_path("urpmi.update", True)
+ cmd = "%s -a -q" % (urpmiupdate_path,)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+
+def remove_packages(module, packages, root):
+
+ remove_c = 0
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package, root):
+ continue
+
+ urpme_path = module.get_bin_path("urpme", True)
+ cmd = "%s --auto %s %s" % (urpme_path, root_option(root), package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec, root, force=True, no_recommends=True):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+ if no_recommends:
+ no_recommends_yes = '--no-recommends'
+ else:
+ no_recommends_yes = ''
+
+ if force:
+ force_yes = '--force'
+ else:
+ force_yes = ''
+
+ urpmi_path = module.get_bin_path("urpmi", True)
+ cmd = ("%s --auto %s --quiet %s %s %s" % (urpmi_path, force_yes,
+ no_recommends_yes,
+ root_option(root),
+ packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ for package in pkgspec:
+ if not query_package_provides(module, package, root):
+ module.fail_json(msg="'urpmi %s' failed: %s" % (package, err))
+
+ # urpmi always have 0 for exit code if --force is used
+ if rc:
+ module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def root_option(root):
+ if (root):
+ return "--root=%s" % (root)
+ else:
+ return ""
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(type='str', default='present',
+ choices=['absent', 'installed', 'present', 'removed']),
+ update_cache=dict(type='bool', default=False),
+ force=dict(type='bool', default=True),
+ no_recommends=dict(type='bool', default=True),
+ name=dict(type='list', elements='str', required=True, aliases=['package', 'pkg']),
+ root=dict(type='str', aliases=['installroot']),
+ ),
+ )
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ if p['state'] in ['installed', 'present']:
+ install_packages(module, p['name'], p['root'], p['force'], p['no_recommends'])
+
+ elif p['state'] in ['removed', 'absent']:
+ remove_packages(module, p['name'], p['root'])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_aaa_group.py b/ansible_collections/community/general/plugins/modules/utm_aaa_group.py
new file mode 100644
index 000000000..9c595284d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_aaa_group.py
@@ -0,0 +1,239 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy an aaa group object in Sophos UTM
+
+description:
+ - Create, update or destroy an aaa group object in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ type: str
+ required: true
+ adirectory_groups:
+ description:
+ - List of adirectory group strings.
+ type: list
+ elements: str
+ default: []
+ adirectory_groups_sids:
+ description:
+ - Dictionary of group sids.
+ type: dict
+ default: {}
+ backend_match:
+ description:
+ - The backend for the group.
+ type: str
+ choices:
+ - none
+ - adirectory
+ - edirectory
+ - radius
+ - tacacs
+ - ldap
+ default: none
+ comment:
+ description:
+ - Comment that describes the AAA group.
+ type: str
+ default: ''
+ dynamic:
+ description:
+ - Group type. Is static if none is selected.
+ type: str
+ default: none
+ choices:
+ - none
+ - ipsec_dn
+ - directory_groups
+ edirectory_groups:
+ description:
+ - List of edirectory group strings.
+ type: list
+ elements: str
+ default: []
+ ipsec_dn:
+ description:
+ - The ipsec dn string.
+ type: str
+ default: ''
+ ldap_attribute:
+ description:
+ - The ldap attribute to check against.
+ type: str
+ default: ''
+ ldap_attribute_value:
+ description:
+ - The ldap attribute value to check against.
+ type: str
+ default: ''
+ members:
+ description:
+ - A list of user ref names (aaa/user).
+ type: list
+ elements: str
+ default: []
+ network:
+ description:
+ - The network reference name. The objects contains the known ip addresses for the authentication object (network/aaa).
+ type: str
+ default: ""
+ radius_groups:
+ description:
+ - A list of radius group strings.
+ type: list
+ elements: str
+ default: []
+ tacacs_groups:
+ description:
+ - A list of tacacs group strings.
+ type: list
+ elements: str
+ default: []
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ backend_match: ldap
+ dynamic: directory_groups
+ ldap_attributes: memberof
+ ldap_attributes_value: "cn=groupname,ou=Groups,dc=mydomain,dc=com"
+ network: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created.
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object.
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked.
+ type: bool
+ _type:
+ description: The type of the object.
+ type: str
+ name:
+ description: The name of the object.
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups.
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS.
+ type: list
+ backend_match:
+ description: The backend to use.
+ type: str
+ comment:
+ description: The comment string.
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group.
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups.
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match.
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against.
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against.
+ type: str
+ members:
+ description: List of member identifiers of the group.
+ type: list
+ network:
+ description: The identifier of the network (network/aaa).
+ type: str
+ radius_group:
+ description: The radius group identifier.
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier.
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = ["comment", "adirectory_groups", "adirectory_groups_sids", "backend_match", "dynamic",
+ "edirectory_groups", "ipsec_dn", "ldap_attribute", "ldap_attribute_value", "members",
+ "network", "radius_groups", "tacacs_groups"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ adirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ adirectory_groups_sids=dict(type='dict', required=False, default={}),
+ backend_match=dict(type='str', required=False, default="none",
+ choices=["none", "adirectory", "edirectory", "radius", "tacacs", "ldap"]),
+ comment=dict(type='str', required=False, default=""),
+ dynamic=dict(type='str', required=False, default="none", choices=["none", "ipsec_dn", "directory_groups"]),
+ edirectory_groups=dict(type='list', elements='str', required=False, default=[]),
+ ipsec_dn=dict(type='str', required=False, default=""),
+ ldap_attribute=dict(type='str', required=False, default=""),
+ ldap_attribute_value=dict(type='str', required=False, default=""),
+ members=dict(type='list', elements='str', required=False, default=[]),
+ network=dict(type='str', required=False, default=""),
+ radius_groups=dict(type='list', elements='str', required=False, default=[]),
+ tacacs_groups=dict(type='list', elements='str', required=False, default=[]),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py b/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py
new file mode 100644
index 000000000..37e01c736
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_aaa_group_info.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_aaa_group_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Get info for reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - get info for a reverse_proxy frontend entry in SOPHOS UTM.
+
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+ - community.general.utm
+ - community.general.attributes
+ - community.general.attributes.info_module
+
+'''
+
+EXAMPLES = """
+- name: Remove UTM aaa_group
+ community.general.utm_aaa_group_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAAAGroupEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ adirectory_groups:
+ description: List of Active Directory Groups
+ type: str
+ adirectory_groups_sids:
+ description: List of Active Directory Groups SIDS
+ type: list
+ backend_match:
+ description: The backend to use
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ dynamic:
+ description: Whether the group match is ipsec_dn or directory_group
+ type: str
+ edirectory_groups:
+ description: List of eDirectory Groups
+ type: str
+ ipsec_dn:
+ description: ipsec_dn identifier to match
+ type: str
+ ldap_attribute:
+ description: The LDAP Attribute to match against
+ type: str
+ ldap_attribute_value:
+ description: The LDAP Attribute Value to match against
+ type: str
+ members:
+ description: List of member identifiers of the group
+ type: list
+ network:
+ description: The identifier of the network (network/aaa)
+ type: str
+ radius_group:
+ description: The radius group identifier
+ type: str
+ tacacs_group:
+ description: The tacacs group identifier
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "aaa/group"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ ),
+ supports_check_mode=True,
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py
new file mode 100644
index 000000000..b944e8312
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Create, update or destroy ca host_key_cert entry in Sophos UTM
+
+description:
+ - Create, update or destroy a ca host_key_cert entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry.
+ required: true
+ type: str
+ ca:
+ description:
+ - A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ required: true
+ type: str
+ meta:
+ description:
+ - A reference to an existing utm_ca_meta_x509 object.
+ required: true
+ type: str
+ certificate:
+ description:
+ - The certificate in PEM format.
+ required: true
+ type: str
+ comment:
+ description:
+ - Optional comment string.
+ type: str
+ encrypted:
+ description:
+ - Optionally enable encryption.
+ default: false
+ type: bool
+ key:
+ description:
+ - Optional private key in PEM format.
+ type: str
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ ca: REF_ca/signing_ca_OBJECT_STRING
+ meta: REF_ca/meta_x509_OBJECT_STRING
+ certificate: |
+ --- BEGIN CERTIFICATE ---
+ . . .
+ . . .
+ . . .
+ --- END CERTIFICATE ---
+ state: present
+
+- name: Remove a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: absent
+
+- name: Read a ca_host_key_cert entry
+ community.general.utm_ca_host_key_cert:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ca=dict(type='str', required=True),
+ meta=dict(type='str', required=True),
+ certificate=dict(type='str', required=True),
+ comment=dict(type='str', required=False),
+ encrypted=dict(type='bool', required=False, default=False),
+ key=dict(type='str', required=False, no_log=True),
+ )
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py
new file mode 100644
index 000000000..d81eede69
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_ca_host_key_cert_info.py
@@ -0,0 +1,109 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_ca_host_key_cert_info
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Get info for a ca host_key_cert entry in Sophos UTM
+
+description:
+ - Get info for a ca host_key_cert entry in SOPHOS UTM.
+
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+ - community.general.utm
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = """
+- name: Get info for a ca host_key_cert entry
+ community.general.utm_ca_host_key_cert_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestHostKeyCertEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ ca:
+ description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
+ type: str
+ meta:
+ description: A reference to an existing utm_ca_meta_x509 object.
+ type: str
+ certificate:
+ description: The certificate in PEM format
+ type: str
+ comment:
+ description: Comment string (may be empty string)
+ type: str
+ encrypted:
+ description: If encryption is enabled
+ type: bool
+ key:
+ description: Private key in PEM format (may be empty string)
+ type: str
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "ca/host_key_cert"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ ),
+ supports_check_mode=True,
+ )
+ try:
+ # This is needed because the bool value only accepts int values in the backend
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_dns_host.py b/ansible_collections/community/general/plugins/modules/utm_dns_host.py
new file mode 100644
index 000000000..6b3725557
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_dns_host.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_dns_host
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy dns entry in Sophos UTM
+
+description:
+ - Create, update or destroy a dns entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The IPV4 Address of the entry. Can be left empty for automatic resolving.
+ default: 0.0.0.0
+ address6:
+ type: str
+ description:
+ - The IPV6 Address of the entry. Can be left empty for automatic resolving.
+ default: "::"
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the dns host object
+ default: ''
+ hostname:
+ type: str
+ description:
+ - The hostname for the dns host object
+ interface:
+ type: str
+ description:
+ - The reference name of the interface to use. If not provided the default interface will be used
+ default: ''
+ resolved:
+ description:
+ - whether the hostname's ipv4 address is already resolved or not
+ default: false
+ type: bool
+ resolved6:
+ description:
+ - whether the hostname's ipv6 address is already resolved or not
+ default: false
+ type: bool
+ timeout:
+ type: int
+ description:
+ - the timeout for the utm to resolve the ip address for the hostname again
+ default: 0
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ hostname: testentry.some.tld
+ state: present
+
+- name: Remove UTM dns host entry
+ community.general.utm_dns_host:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestDNSEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ipv4 address of the object
+ type: str
+ address6:
+ description: The ipv6 address of the object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ hostname:
+ description: The hostname of the object
+ type: str
+ interface:
+ description: The reference name of the interface the object is associated with
+ type: str
+ resolved:
+ description: Whether the ipv4 address is resolved or not
+ type: bool
+ resolved6:
+ description: Whether the ipv6 address is resolved or not
+ type: bool
+ timeout:
+ description: The timeout until a new resolving will be attempted
+ type: int
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "network/dns_host"
+ key_to_check_for_changes = ["comment", "hostname", "interface"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=False, default='0.0.0.0'),
+ address6=dict(type='str', required=False, default='::'),
+ comment=dict(type='str', required=False, default=""),
+ hostname=dict(type='str', required=False),
+ interface=dict(type='str', required=False, default=""),
+ resolved=dict(type='bool', required=False, default=False),
+ resolved6=dict(type='bool', required=False, default=False),
+ timeout=dict(type='int', required=False, default=0),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py b/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py
new file mode 100644
index 000000000..a85a46aea
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_network_interface_address.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Create, update or destroy network/interface_address object
+
+description:
+ - Create, update or destroy a network/interface_address object in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ address:
+ type: str
+ description:
+ - The ip4 address of the network/interface_address object.
+ required: true
+ address6:
+ type: str
+ description:
+ - The ip6 address of the network/interface_address object.
+ required: false
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ default: ''
+ resolved:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+ resolved6:
+ type: bool
+ description:
+ - Whether or not the object is resolved
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create a network interface address
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: present
+
+- name: Remove a network interface address
+ network_interface_address:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+ address: 0.0.0.0
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = ["comment", "address"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ address=dict(type='str', required=True),
+ comment=dict(type='str', required=False, default=""),
+ address6=dict(type='str', required=False),
+ resolved=dict(type='bool', required=False),
+ resolved6=dict(type='bool', required=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py b/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py
new file mode 100644
index 000000000..9dc08ad09
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_network_interface_address_info.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Juergen Wiebe <wiebe@e-spirit.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_network_interface_address_info
+
+author:
+ - Juergen Wiebe (@steamx)
+
+short_description: Get info for a network/interface_address object
+
+description:
+ - Get info for a network/interface_address object in SOPHOS UTM.
+
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+ - community.general.utm
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = """
+- name: Get network interface address info
+ utm_proxy_interface_address_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestNetworkInterfaceAddress
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ address:
+ description: The ip4 address of the network/interface_address object
+ type: str
+ address6:
+ description: The ip6 address of the network/interface_address object
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ resolved:
+ description: Whether or not the object is resolved
+ type: bool
+ resolved6:
+ description: Whether or not the object is resolved
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "network/interface_address"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True)
+ ),
+ supports_check_mode=True,
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py b/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py
new file mode 100644
index 000000000..3b482483b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_auth_profile.py
@@ -0,0 +1,355 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Stephan Schwarz <stearz@gmx.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_auth_profile
+
+author:
+ - Stephan Schwarz (@stearz)
+
+short_description: Create, update or destroy reverse_proxy auth_profile entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy auth_profile entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ aaa:
+ type: list
+ elements: str
+ description:
+ - List of references to utm_aaa objects (allowed users or groups)
+ required: true
+ basic_prompt:
+ type: str
+ description:
+ - The message in the basic authentication prompt
+ required: true
+ backend_mode:
+ type: str
+ description:
+ - Specifies if the backend server needs authentication ([Basic|None])
+ default: None
+ choices:
+ - Basic
+ - None
+ backend_strip_basic_auth:
+ description:
+ - Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ default: true
+ backend_user_prefix:
+ type: str
+ description:
+ - Prefix string to prepend to the username for backend authentication
+ default: ""
+ backend_user_suffix:
+ type: str
+ description:
+ - Suffix string to append to the username for backend authentication
+ default: ""
+ comment:
+ type: str
+ description:
+ - Optional comment string
+ default: ""
+ frontend_cookie:
+ type: str
+ description:
+ - Frontend cookie name
+ frontend_cookie_secret:
+ type: str
+ description:
+ - Frontend cookie secret
+ frontend_form:
+ type: str
+ description:
+ - Frontend authentication form name
+ frontend_form_template:
+ type: str
+ description:
+ - Frontend authentication form template
+ default: ""
+ frontend_login:
+ type: str
+ description:
+ - Frontend login name
+ frontend_logout:
+ type: str
+ description:
+ - Frontend logout name
+ frontend_mode:
+ type: str
+ description:
+ - Frontend authentication mode (Form|Basic)
+ default: Basic
+ choices:
+ - Basic
+ - Form
+ frontend_realm:
+ type: str
+ description:
+ - Frontend authentication realm
+ frontend_session_allow_persistency:
+ description:
+ - Allow session persistency
+ type: bool
+ default: false
+ frontend_session_lifetime:
+ type: int
+ description:
+ - session lifetime
+ required: true
+ frontend_session_lifetime_limited:
+ description:
+ - Specifies if limitation of session lifetime is active
+ type: bool
+ default: true
+ frontend_session_lifetime_scope:
+ type: str
+ description:
+ - scope for frontend_session_lifetime (days|hours|minutes)
+ default: hours
+ choices:
+ - days
+ - hours
+ - minutes
+ frontend_session_timeout:
+ type: int
+ description:
+ - session timeout
+ required: true
+ frontend_session_timeout_enabled:
+ description:
+ - Specifies if session timeout is active
+ type: bool
+ default: true
+ frontend_session_timeout_scope:
+ type: str
+ description:
+ - scope for frontend_session_timeout (days|hours|minutes)
+ default: minutes
+ choices:
+ - days
+ - hours
+ - minutes
+ logout_delegation_urls:
+ type: list
+ elements: str
+ description:
+ - List of logout URLs that logouts are delegated to
+ default: []
+ logout_mode:
+ type: str
+ description:
+ - Mode of logout (None|Delegation)
+ default: None
+ choices:
+ - None
+ - Delegation
+ redirect_to_requested_url:
+ description:
+ - Should a redirect to the requested URL be made
+ type: bool
+ default: false
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ aaa: [REF_OBJECT_STRING,REF_ANOTHEROBJECT_STRING]
+ basic_prompt: "Authentication required: Please login"
+ frontend_session_lifetime: 1
+ frontend_session_timeout: 1
+ state: present
+
+- name: Remove UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: absent
+
+- name: Read UTM proxy_auth_profile
+ community.general.utm_proxy_auth_profile:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestAuthProfileEntry
+ state: info
+
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ aaa:
+ description: List of references to utm_aaa objects (allowed users or groups)
+ type: list
+ basic_prompt:
+ description: The message in the basic authentication prompt
+ type: str
+ backend_mode:
+ description: Specifies if the backend server needs authentication ([Basic|None])
+ type: str
+ backend_strip_basic_auth:
+ description: Should the login data be stripped when proxying the request to the backend host
+ type: bool
+ backend_user_prefix:
+ description: Prefix string to prepend to the username for backend authentication
+ type: str
+ backend_user_suffix:
+ description: Suffix string to append to the username for backend authentication
+ type: str
+ comment:
+ description: Optional comment string
+ type: str
+ frontend_cookie:
+ description: Frontend cookie name
+ type: str
+ frontend_form:
+ description: Frontend authentication form name
+ type: str
+ frontend_form_template:
+ description: Frontend authentication form template
+ type: str
+ frontend_login:
+ description: Frontend login name
+ type: str
+ frontend_logout:
+ description: Frontend logout name
+ type: str
+ frontend_mode:
+ description: Frontend authentication mode (Form|Basic)
+ type: str
+ frontend_realm:
+ description: Frontend authentication realm
+ type: str
+ frontend_session_allow_persistency:
+ description: Allow session persistency
+ type: bool
+ frontend_session_lifetime:
+ description: session lifetime
+ type: int
+ frontend_session_lifetime_limited:
+ description: Specifies if limitation of session lifetime is active
+ type: bool
+ frontend_session_lifetime_scope:
+ description: scope for frontend_session_lifetime (days|hours|minutes)
+ type: str
+ frontend_session_timeout:
+ description: session timeout
+ type: int
+ frontend_session_timeout_enabled:
+ description: Specifies if session timeout is active
+ type: bool
+ frontend_session_timeout_scope:
+ description: scope for frontend_session_timeout (days|hours|minutes)
+ type: str
+ logout_delegation_urls:
+ description: List of logout URLs that logouts are delegated to
+ type: list
+ logout_mode:
+ description: Mode of logout (None|Delegation)
+ type: str
+ redirect_to_requested_url:
+ description: Should a redirect to the requested URL be made
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/auth_profile"
+ key_to_check_for_changes = ["aaa", "basic_prompt", "backend_mode", "backend_strip_basic_auth",
+ "backend_user_prefix", "backend_user_suffix", "comment", "frontend_cookie",
+ "frontend_cookie_secret", "frontend_form", "frontend_form_template",
+ "frontend_login", "frontend_logout", "frontend_mode", "frontend_realm",
+ "frontend_session_allow_persistency", "frontend_session_lifetime",
+ "frontend_session_lifetime_limited", "frontend_session_lifetime_scope",
+ "frontend_session_timeout", "frontend_session_timeout_enabled",
+ "frontend_session_timeout_scope", "logout_delegation_urls", "logout_mode",
+ "redirect_to_requested_url"]
+
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ aaa=dict(type='list', elements='str', required=True),
+ basic_prompt=dict(type='str', required=True),
+ backend_mode=dict(type='str', required=False, default="None", choices=['Basic', 'None']),
+ backend_strip_basic_auth=dict(type='bool', required=False, default=True),
+ backend_user_prefix=dict(type='str', required=False, default=""),
+ backend_user_suffix=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ frontend_cookie=dict(type='str', required=False),
+ frontend_cookie_secret=dict(type='str', required=False, no_log=True),
+ frontend_form=dict(type='str', required=False),
+ frontend_form_template=dict(type='str', required=False, default=""),
+ frontend_login=dict(type='str', required=False),
+ frontend_logout=dict(type='str', required=False),
+ frontend_mode=dict(type='str', required=False, default="Basic", choices=['Basic', 'Form']),
+ frontend_realm=dict(type='str', required=False),
+ frontend_session_allow_persistency=dict(type='bool', required=False, default=False),
+ frontend_session_lifetime=dict(type='int', required=True),
+ frontend_session_lifetime_limited=dict(type='bool', required=False, default=True),
+ frontend_session_lifetime_scope=dict(type='str', required=False, default="hours", choices=['days', 'hours', 'minutes']),
+ frontend_session_timeout=dict(type='int', required=True),
+ frontend_session_timeout_enabled=dict(type='bool', required=False, default=True),
+ frontend_session_timeout_scope=dict(type='str', required=False, default="minutes", choices=['days', 'hours', 'minutes']),
+ logout_delegation_urls=dict(type='list', elements='str', required=False, default=[]),
+ logout_mode=dict(type='str', required=False, default="None", choices=['None', 'Delegation']),
+ redirect_to_requested_url=dict(type='bool', required=False, default=False)
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py b/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py
new file mode 100644
index 000000000..a0a3f85b5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_exception.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Sebastian Schenzel <sebastian.schenzel@mailbox.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_exception
+
+author:
+ - Sebastian Schenzel (@RickS-C137)
+
+short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ type: str
+ op:
+ description:
+ - The operand to be used with the entries of the path parameter
+ default: 'AND'
+ choices:
+ - 'AND'
+ - 'OR'
+ required: false
+ type: str
+ path:
+ description:
+ - The paths the exception in the reverse proxy is defined for
+ type: list
+ elements: str
+ default: []
+ required: false
+ skip_custom_threats_filters:
+ description:
+ - A list of threats to be skipped
+ type: list
+ elements: str
+ default: []
+ required: false
+ skip_threats_filter_categories:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: false
+ skipav:
+ description:
+ - Skip the Antivirus Scanning
+ default: false
+ type: bool
+ required: false
+ skipbadclients:
+ description:
+ - Block clients with bad reputation
+ default: false
+ type: bool
+ required: false
+ skipcookie:
+ description:
+ - Skip the Cookie Signing check
+ default: false
+ type: bool
+ required: false
+ skipform:
+ description:
+ - Enable form hardening
+ default: false
+ type: bool
+ required: false
+ skipform_missingtoken:
+ description:
+ - Enable form hardening with missing tokens
+ default: false
+ type: bool
+ required: false
+ skiphtmlrewrite:
+ description:
+ - Protection against SQL
+ default: false
+ type: bool
+ required: false
+ skiptft:
+ description:
+ - Enable true file type control
+ default: false
+ type: bool
+ required: false
+ skipurl:
+ description:
+ - Enable static URL hardening
+ default: false
+ type: bool
+ required: false
+ source:
+ description:
+ - Define which categories of threats are skipped
+ type: list
+ elements: str
+ default: []
+ required: false
+ status:
+ description:
+ - Status of the exception rule set
+ default: true
+ type: bool
+ required: false
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_exception
+ community.general.utm_proxy_exception:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestExceptionEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ comment:
+ description: The optional comment string
+ type: str
+ op:
+ description: The operand to be used with the entries of the path parameter
+ type: str
+ path:
+ description: The paths the exception in the reverse proxy is defined for
+ type: list
+ skip_custom_threats_filters:
+ description: A list of threats to be skipped
+ type: list
+ skip_threats_filter_categories:
+ description: Define which categories of threats are skipped
+ type: list
+ skipav:
+ description: Skip the Antivirus Scanning
+ type: bool
+ skipbadclients:
+ description: Block clients with bad reputation
+ type: bool
+ skipcookie:
+ description: Skip the Cookie Signing check
+ type: bool
+ skipform:
+ description: Enable form hardening
+ type: bool
+ skipform_missingtoken:
+ description: Enable form hardening with missing tokens
+ type: bool
+ skiphtmlrewrite:
+ description: Protection against SQL
+ type: bool
+ skiptft:
+ description: Enable true file type control
+ type: bool
+ skipurl:
+ description: Enable static URL hardening
+ type: bool
+ source:
+ description: Define which categories of threats are skipped
+ type: list
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/exception"
+ key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav",
+ "comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken",
+ "skiphtmlrewrite", "skiptft", "skipurl", "source"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
+ path=dict(type='list', elements='str', required=False, default=[]),
+ skip_custom_threats_filters=dict(type='list', elements='str', required=False, default=[]),
+ skip_threats_filter_categories=dict(type='list', elements='str', required=False, default=[]),
+ skipav=dict(type='bool', required=False, default=False),
+ skipbadclients=dict(type='bool', required=False, default=False),
+ skipcookie=dict(type='bool', required=False, default=False),
+ skipform=dict(type='bool', required=False, default=False),
+ skipform_missingtoken=dict(type='bool', required=False, default=False),
+ skiphtmlrewrite=dict(type='bool', required=False, default=False),
+ skiptft=dict(type='bool', required=False, default=False),
+ skipurl=dict(type='bool', required=False, default=False),
+ source=dict(type='list', elements='str', required=False, default=[]),
+ status=dict(type='bool', required=False, default=True),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py
new file mode 100644
index 000000000..22a773fef
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in Sophos UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ add_content_type_header :
+ description:
+ - Whether to add the content type header or not
+ type: bool
+ default: false
+ address:
+ type: str
+ description:
+ - The reference name of the network/interface_address object.
+ default: REF_DefaultInternalAddress
+ allowed_networks:
+ type: list
+ elements: str
+ description:
+ - A list of reference names for the allowed networks.
+ default: ['REF_NetworkAny']
+ certificate:
+ type: str
+ description:
+ - The reference name of the ca/host_key_cert object.
+ default: ""
+ comment:
+ type: str
+ description:
+ - An optional comment to add to the object
+ default: ""
+ disable_compression:
+ description:
+ - Whether to enable the compression
+ type: bool
+ default: false
+ domain:
+ type: list
+ elements: str
+ description:
+ - A list of domain names for the frontend object
+ exceptions:
+ type: list
+ elements: str
+ description:
+ - A list of exception ref names (reverse_proxy/exception)
+ default: []
+ htmlrewrite:
+ description:
+ - Whether to enable html rewrite or not
+ type: bool
+ default: false
+ htmlrewrite_cookies:
+ description:
+ - Whether to enable html rewrite cookie or not
+ type: bool
+ default: false
+ implicitredirect:
+ description:
+ - Whether to enable implicit redirection or not
+ type: bool
+ default: false
+ lbmethod:
+ type: str
+ description:
+ - Which loadbalancer method should be used
+ choices:
+ - ""
+ - bybusyness
+ - bytraffic
+ - byrequests
+ default: bybusyness
+ locations:
+ type: list
+ elements: str
+ description:
+ - A list of location ref names (reverse_proxy/location)
+ default: []
+ port:
+ type: int
+ description:
+ - The frontend http port
+ default: 80
+ preservehost:
+ description:
+ - Whether to preserve host header
+ type: bool
+ default: false
+ profile:
+ type: str
+ description:
+ - The reference string of the reverse_proxy/profile
+ default: ""
+ status:
+ description:
+ - Whether to activate the frontend entry or not
+ type: bool
+ default: true
+ type:
+ type: str
+ description:
+ - Which protocol should be used
+ choices:
+ - http
+ - https
+ default: http
+ xheaders:
+ description:
+ - Whether to pass the host header or not
+ type: bool
+ default: false
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ host: REF_OBJECT_STRING
+ state: present
+
+- name: Remove utm proxy_frontend
+ community.general.utm_proxy_frontend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestFrontendEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: Whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: Whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate",
+ "comment", "disable_compression", "domain", "exceptions", "htmlrewrite",
+ "htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations",
+ "port", "preservehost", "profile", "status", "type", "xheaders"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ add_content_type_header=dict(type='bool', required=False, default=False),
+ address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
+ allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
+ certificate=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ disable_compression=dict(type='bool', required=False, default=False),
+ domain=dict(type='list', elements='str', required=False),
+ exceptions=dict(type='list', elements='str', required=False, default=[]),
+ htmlrewrite=dict(type='bool', required=False, default=False),
+ htmlrewrite_cookies=dict(type='bool', required=False, default=False),
+ implicitredirect=dict(type='bool', required=False, default=False),
+ lbmethod=dict(type='str', required=False, default="bybusyness",
+ choices=['bybusyness', 'bytraffic', 'byrequests', '']),
+ locations=dict(type='list', elements='str', required=False, default=[]),
+ port=dict(type='int', required=False, default=80),
+ preservehost=dict(type='bool', required=False, default=False),
+ profile=dict(type='str', required=False, default=""),
+ status=dict(type='bool', required=False, default=True),
+ type=dict(type='str', required=False, default="http", choices=['http', 'https']),
+ xheaders=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py
new file mode 100644
index 000000000..0435ef949
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_frontend_info.py
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_frontend_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy reverse_proxy frontend entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy frontend entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+ - community.general.utm
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = """
+- name: Get utm proxy_frontend
+ community.general.utm_proxy_frontend_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestBackendEntry
+ host: REF_OBJECT_STRING
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ add_content_type_header:
+ description: Whether to add the content type header
+ type: bool
+ address:
+ description: The reference name of the address
+ type: str
+ allowed_networks:
+ description: List of reference names of networks associated
+ type: list
+ certificate:
+ description: Reference name of certificate (ca/host_key_cert)
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ disable_compression:
+ description: State of compression support
+ type: bool
+ domain:
+ description: List of hostnames
+ type: list
+ exceptions:
+ description: List of associated proxy exceptions
+ type: list
+ htmlrewrite:
+ description: State of html rewrite
+ type: bool
+ htmlrewrite_cookies:
+ description: whether the html rewrite cookie will be set
+ type: bool
+ implicitredirect:
+ description: whether to use implicit redirection
+ type: bool
+ lbmethod:
+ description: The method of loadbalancer to use
+ type: str
+ locations:
+ description: The reference names of reverse_proxy/locations associated with the object
+ type: list
+ port:
+ description: The port of the frontend connection
+ type: int
+ preservehost:
+ description: Preserve host header
+ type: bool
+ profile:
+ description: The associated reverse_proxy/profile
+ type: str
+ status:
+ description: Whether the frontend object is active or not
+ type: bool
+ type:
+ description: The connection type
+ type: str
+ xheaders:
+ description: The xheaders state
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/frontend"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ),
+ supports_check_mode=True,
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_location.py b/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
new file mode 100644
index 000000000..c22de7b92
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_location.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+ access_control:
+ description:
+ - whether to activate the access control for the location
+ type: str
+ default: '0'
+ choices:
+ - '0'
+ - '1'
+ allowed_networks:
+ description:
+ - A list of allowed networks
+ type: list
+ elements: str
+ default:
+ - REF_NetworkAny
+ auth_profile:
+ type: str
+ description:
+ - The reference name of the auth profile
+ default: ''
+ backend:
+ type: list
+ elements: str
+ description:
+ - A list of backends that are connected with this location declaration
+ default: []
+ be_path:
+ type: str
+ description:
+ - The path of the backend
+ default: ''
+ comment:
+ type: str
+ description:
+ - The optional comment string
+ default: ''
+ denied_networks:
+ type: list
+ elements: str
+ description:
+ - A list of denied network references
+ default: []
+ hot_standby:
+ description:
+ - Activate hot standby mode
+ type: bool
+ default: false
+ path:
+ type: str
+ description:
+ - The path of the location
+ default: "/"
+ status:
+ description:
+ - Whether the location is active or not
+ type: bool
+ default: true
+ stickysession_id:
+ type: str
+ description:
+ - The stickysession id
+ default: ROUTEID
+ stickysession_status:
+ description:
+ - Enable the stickysession
+ type: bool
+ default: false
+ websocket_passthrough:
+ description:
+ - Enable the websocket passthrough
+ type: bool
+ default: false
+
+extends_documentation_fragment:
+- community.general.utm
+- community.general.attributes
+
+'''
+
+EXAMPLES = """
+- name: Create UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ backend: REF_OBJECT_STRING
+ state: present
+
+- name: Remove UTM proxy_location
+ utm_proxy_backend:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+ state: absent
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = ["access_control", "allowed_networks", "auth_profile", "backend", "be_path", "comment",
+ "denied_networks", "hot_standby", "path", "status", "stickysession_id",
+ "stickysession_status", "websocket_passthrough"]
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ access_control=dict(type='str', required=False, default="0", choices=['0', '1']),
+ allowed_networks=dict(type='list', elements='str', required=False, default=['REF_NetworkAny']),
+ auth_profile=dict(type='str', required=False, default=""),
+ backend=dict(type='list', elements='str', required=False, default=[]),
+ be_path=dict(type='str', required=False, default=""),
+ comment=dict(type='str', required=False, default=""),
+ denied_networks=dict(type='list', elements='str', required=False, default=[]),
+ hot_standby=dict(type='bool', required=False, default=False),
+ path=dict(type='str', required=False, default="/"),
+ status=dict(type='bool', required=False, default=True),
+ stickysession_id=dict(type='str', required=False, default='ROUTEID'),
+ stickysession_status=dict(type='bool', required=False, default=False),
+ websocket_passthrough=dict(type='bool', required=False, default=False),
+ )
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py b/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
new file mode 100644
index 000000000..58a32107b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/utm_proxy_location_info.py
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: utm_proxy_location_info
+
+author:
+ - Johannes Brunswicker (@MatrixCrawler)
+
+short_description: Create, update or destroy reverse_proxy location entry in Sophos UTM
+
+description:
+ - Create, update or destroy a reverse_proxy location entry in SOPHOS UTM.
+ - This module needs to have the REST Ability of the UTM to be activated.
+
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+
+options:
+ name:
+ type: str
+ description:
+ - The name of the object. Will be used to identify the entry
+ required: true
+
+extends_documentation_fragment:
+ - community.general.utm
+ - community.general.attributes
+ - community.general.attributes.info_module
+'''
+
+EXAMPLES = """
+- name: Remove UTM proxy_location
+ community.general.utm_proxy_location_info:
+ utm_host: sophos.host.name
+ utm_token: abcdefghijklmno1234
+ name: TestLocationEntry
+"""
+
+RETURN = """
+result:
+ description: The utm object that was created
+ returned: success
+ type: complex
+ contains:
+ _ref:
+ description: The reference name of the object
+ type: str
+ _locked:
+ description: Whether or not the object is currently locked
+ type: bool
+ _type:
+ description: The type of the object
+ type: str
+ name:
+ description: The name of the object
+ type: str
+ access_control:
+ description: Whether to use access control state
+ type: str
+ allowed_networks:
+ description: List of allowed network reference names
+ type: list
+ auth_profile:
+ description: The auth profile reference name
+ type: str
+ backend:
+ description: The backend reference name
+ type: str
+ be_path:
+ description: The backend path
+ type: str
+ comment:
+ description: The comment string
+ type: str
+ denied_networks:
+ description: The list of the denied network names
+ type: list
+ hot_standby:
+ description: Use hot standy
+ type: bool
+ path:
+ description: Path name
+ type: str
+ status:
+ description: Whether the object is active or not
+ type: bool
+ stickysession_id:
+ description: The identifier of the stickysession
+ type: str
+ stickysession_status:
+ description: Whether to use stickysession or not
+ type: bool
+ websocket_passthrough:
+ description: Whether websocket passthrough will be used or not
+ type: bool
+"""
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def main():
+ endpoint = "reverse_proxy/location"
+ key_to_check_for_changes = []
+ module = UTMModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ ),
+ supports_check_mode=True,
+ )
+ try:
+ UTM(module, endpoint, key_to_check_for_changes, info_only=True).execute()
+ except Exception as e:
+ module.fail_json(msg=to_native(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vdo.py b/ansible_collections/community/general/plugins/modules/vdo.py
new file mode 100644
index 000000000..f1ea40e2e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vdo.py
@@ -0,0 +1,781 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Red Hat, Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+author:
+ - Bryan Gurney (@bgurney-rh)
+
+module: vdo
+
+short_description: Module to control VDO
+
+
+description:
+ - This module controls the VDO dedupe and compression device.
+ - VDO, or Virtual Data Optimizer, is a device-mapper target that
+ provides inline block-level deduplication, compression, and
+ thin provisioning capabilities to primary storage.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - The name of the VDO volume.
+ type: str
+ required: true
+ state:
+ description:
+ - Whether this VDO volume should be "present" or "absent".
+ If a "present" VDO volume does not exist, it will be
+ created. If a "present" VDO volume already exists, it
+ will be modified, by updating the configuration, which
+ will take effect when the VDO volume is restarted.
+ Not all parameters of an existing VDO volume can be
+ modified; the "statusparamkeys" list contains the
+ parameters that can be modified after creation. If an
+ "absent" VDO volume does not exist, it will not be
+ removed.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ activated:
+ description:
+ - The "activate" status for a VDO volume. If this is set
+ to C(false), the VDO volume cannot be started, and it will
+ not start on system startup. However, on initial
+ creation, a VDO volume with "activated" set to "off"
+ will be running, until stopped. This is the default
+ behavior of the "vdo create" command; it provides the
+ user an opportunity to write a base amount of metadata
+ (filesystem, LVM headers, etc.) to the VDO volume prior
+ to stopping the volume, and leaving it deactivated
+ until ready to use.
+ type: bool
+ running:
+ description:
+ - Whether this VDO volume is running.
+ - A VDO volume must be activated in order to be started.
+ type: bool
+ device:
+ description:
+ - The full path of the device to use for VDO storage.
+ - This is required if "state" is "present".
+ type: str
+ logicalsize:
+ description:
+ - The logical size of the VDO volume (in megabytes, or
+ LVM suffix format). If not specified for a new volume,
+ this defaults to the same size as the underlying storage
+ device, which is specified in the 'device' parameter.
+ Existing volumes will maintain their size if the
+ logicalsize parameter is not specified, or is smaller
+ than or identical to the current size. If the specified
+ size is larger than the current size, a growlogical
+ operation will be performed.
+ type: str
+ deduplication:
+ description:
+ - Configures whether deduplication is enabled. The
+ default for a created volume is 'enabled'. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ compression:
+ description:
+ - Configures whether compression is enabled. The default
+ for a created volume is 'enabled'. Existing volumes
+ will maintain their previously configured setting unless
+ a different value is specified in the playbook.
+ type: str
+ choices: [ disabled, enabled ]
+ blockmapcachesize:
+ description:
+ - The amount of memory allocated for caching block map
+ pages, in megabytes (or may be issued with an LVM-style
+ suffix of K, M, G, or T). The default (and minimum)
+ value is 128M. The value specifies the size of the
+ cache; there is a 15% memory usage overhead. Each 1.25G
+ of block map covers 1T of logical blocks, therefore a
+ small amount of block map cache memory can cache a
+ significantly large amount of block map data. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ type: str
+ readcache:
+ description:
+ - Enables or disables the read cache. The default is
+ 'disabled'. Choosing 'enabled' enables a read cache
+ which may improve performance for workloads of high
+ deduplication, read workloads with a high level of
+ compression, or on hard disk storage. Existing
+ volumes will maintain their previously configured
+ setting unless a different value is specified in the
+ playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ choices: [ disabled, enabled ]
+ readcachesize:
+ description:
+ - Specifies the extra VDO device read cache size in
+ megabytes. This is in addition to a system-defined
+ minimum. Using a value with a suffix of K, M, G, or T
+ is optional. The default value is 0. 1.125 MB of
+ memory per bio thread will be used per 1 MB of read
+ cache specified (for example, a VDO volume configured
+ with 4 bio threads will have a read cache memory usage
+ overhead of 4.5 MB per 1 MB of read cache specified).
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ - The read cache feature is available in VDO 6.1 and older.
+ type: str
+ emulate512:
+ description:
+ - Enables 512-byte emulation mode, allowing drivers or
+ filesystems to access the VDO volume at 512-byte
+ granularity, instead of the default 4096-byte granularity.
+ Default is 'disabled'; only recommended when a driver
+ or filesystem requires 512-byte sector level access to
+ a device. This option is only available when creating
+ a new volume, and cannot be changed for an existing
+ volume.
+ type: bool
+ default: false
+ growphysical:
+ description:
+ - Specifies whether to attempt to execute a growphysical
+ operation, if there is enough unused space on the
+ device. A growphysical operation will be executed if
+ there is at least 64 GB of free space, relative to the
+ previous physical size of the affected VDO volume.
+ type: bool
+ default: false
+ slabsize:
+ description:
+ - The size of the increment by which the physical size of
+ a VDO volume is grown, in megabytes (or may be issued
+ with an LVM-style suffix of K, M, G, or T). Must be a
+ power of two between 128M and 32G. The default is 2G,
+ which supports volumes having a physical size up to 16T.
+ The maximum, 32G, supports a physical size of up to 256T.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ writepolicy:
+ description:
+ - Specifies the write policy of the VDO volume. The
+ 'sync' mode acknowledges writes only after data is on
+ stable storage. The 'async' mode acknowledges writes
+ when data has been cached for writing to stable
+ storage. The default (and highly recommended) 'auto'
+ mode checks the storage device to determine whether it
+ supports flushes. Devices that support flushes will
+ result in a VDO volume in 'async' mode, while devices
+ that do not support flushes will run in sync mode.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is
+ specified in the playbook.
+ type: str
+ choices: [ async, auto, sync ]
+ indexmem:
+ description:
+ - Specifies the amount of index memory in gigabytes. The
+ default is 0.25. The special decimal values 0.25, 0.5,
+ and 0.75 can be used, as can any positive integer.
+ This option is only available when creating a new
+ volume, and cannot be changed for an existing volume.
+ type: str
+ indexmode:
+ description:
+ - Specifies the index mode of the Albireo index. The
+ default is 'dense', which has a deduplication window of
+ 1 GB of index memory per 1 TB of incoming data,
+ requiring 10 GB of index data on persistent storage.
+ The 'sparse' mode has a deduplication window of 1 GB of
+ index memory per 10 TB of incoming data, but requires
+ 100 GB of index data on persistent storage. This option
+ is only available when creating a new volume, and cannot
+ be changed for an existing volume.
+ type: str
+ choices: [ dense, sparse ]
+ ackthreads:
+ description:
+ - Specifies the number of threads to use for
+ acknowledging completion of requested VDO I/O operations.
+ Valid values are integer values from 1 to 100 (lower
+ numbers are preferable due to overhead). The default is
+ 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ biothreads:
+ description:
+ - Specifies the number of threads to use for submitting I/O
+ operations to the storage device. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 4.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ cputhreads:
+ description:
+ - Specifies the number of threads to use for CPU-intensive
+ work such as hashing or compression. Valid values are
+ integer values from 1 to 100 (lower numbers are
+ preferable due to overhead). The default is 2.
+ Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ logicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on logical
+ block addresses. Valid values are integer values from
+ 1 to 100 (lower numbers are preferable due to overhead).
+ The default is 1. Existing volumes will maintain their
+ previously configured setting unless a different value
+ is specified in the playbook.
+ type: str
+ physicalthreads:
+ description:
+ - Specifies the number of threads across which to
+ subdivide parts of the VDO processing based on physical
+ block addresses. Valid values are integer values from
+ 1 to 16 (lower numbers are preferable due to overhead).
+ The physical space used by the VDO volume must be
+ larger than (slabsize * physicalthreads). The default
+ is 1. Existing volumes will maintain their previously
+ configured setting unless a different value is specified
+ in the playbook.
+ type: str
+ force:
+ description:
+ - When creating a volume, ignores any existing file system
+ or VDO signature already present in the storage device.
+ When stopping or removing a VDO volume, first unmounts
+ the file system stored on the device if mounted.
+ - "B(Warning:) Since this parameter removes all safety
+ checks it is important to make sure that all parameters
+ provided are accurate and intentional."
+ type: bool
+ default: false
+ version_added: 2.4.0
+notes:
+ - In general, the default thread configuration should be used.
+requirements:
+ - PyYAML
+ - kmod-kvdo
+ - vdo
+'''
+
+EXAMPLES = r'''
+- name: Create 2 TB VDO volume vdo1 on device /dev/md0
+ community.general.vdo:
+ name: vdo1
+ state: present
+ device: /dev/md0
+ logicalsize: 2T
+
+- name: Remove VDO volume vdo1
+ community.general.vdo:
+ name: vdo1
+ state: absent
+'''
+
+RETURN = r'''# '''
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+import re
+import traceback
+
+YAML_IMP_ERR = None
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ YAML_IMP_ERR = traceback.format_exc()
+ HAS_YAML = False
+
+
+# Generate a list of VDO volumes, whether they are running or stopped.
+#
+# @param module The AnsibleModule object.
+# @param vdocmd The path of the 'vdo' command.
+#
+# @return vdolist A list of currently created VDO volumes.
+def inventory_vdos(module, vdocmd):
+ rc, vdostatusout, err = module.run_command([vdocmd, "status"])
+
+ # if rc != 0:
+ # module.fail_json(msg="Inventorying VDOs failed: %s"
+ # % vdostatusout, rc=rc, err=err)
+
+ vdolist = []
+
+ if rc == 2 and re.findall(r"vdoconf\.yml does not exist", err, re.MULTILINE):
+ # If there is no /etc/vdoconf.yml file, assume there are no
+ # VDO volumes. Return an empty list of VDO volumes.
+ return vdolist
+
+ if rc != 0:
+ module.fail_json(msg="Inventorying VDOs failed: %s" % vdostatusout, rc=rc, err=err)
+
+ vdostatusyaml = yaml.safe_load(vdostatusout)
+ if vdostatusyaml is None:
+ return vdolist
+
+ vdoyamls = vdostatusyaml['VDOs']
+
+ if vdoyamls is not None:
+ vdolist = list(vdoyamls.keys())
+
+ return vdolist
+
+
+def list_running_vdos(module, vdocmd):
+ rc, vdolistout, err = module.run_command([vdocmd, "list"])
+ runningvdolist = filter(None, vdolistout.split('\n'))
+ return runningvdolist
+
+
+# Generate a string containing options to pass to the 'VDO' command.
+# Note that a 'create' operation will pass more options than a
+# 'modify' operation.
+#
+# @param params A dictionary of parameters, and their values
+# (values of 'None' and/or nonexistent values are ignored).
+#
+# @return vdocmdoptions A string to be used in a 'vdo <action>' command.
+def start_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command([vdocmd, "start", "--name=%s" % vdoname])
+ if rc == 0:
+ module.log("started VDO volume %s" % vdoname)
+ return rc
+
+
+def stop_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command([vdocmd, "stop", "--name=%s" % vdoname])
+ if rc == 0:
+ module.log("stopped VDO volume %s" % vdoname)
+ return rc
+
+
+def activate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command([vdocmd, "activate", "--name=%s" % vdoname])
+ if rc == 0:
+ module.log("activated VDO volume %s" % vdoname)
+ return rc
+
+
+def deactivate_vdo(module, vdoname, vdocmd):
+ rc, out, err = module.run_command([vdocmd, "deactivate", "--name=%s" % vdoname])
+ if rc == 0:
+ module.log("deactivated VDO volume %s" % vdoname)
+ return rc
+
+
+def add_vdooptions(params):
+ options = []
+
+ if params.get('logicalsize') is not None:
+ options.append("--vdoLogicalSize=" + params['logicalsize'])
+
+ if params.get('blockmapcachesize') is not None:
+ options.append("--blockMapCacheSize=" + params['blockmapcachesize'])
+
+ if params.get('readcache') == 'enabled':
+ options.append("--readCache=enabled")
+
+ if params.get('readcachesize') is not None:
+ options.append("--readCacheSize=" + params['readcachesize'])
+
+ if params.get('slabsize') is not None:
+ options.append("--vdoSlabSize=" + params['slabsize'])
+
+ if params.get('emulate512'):
+ options.append("--emulate512=enabled")
+
+ if params.get('indexmem') is not None:
+ options.append("--indexMem=" + params['indexmem'])
+
+ if params.get('indexmode') == 'sparse':
+ options.append("--sparseIndex=enabled")
+
+ if params.get('force'):
+ options.append("--force")
+
+ # Entering an invalid thread config results in a cryptic
+ # 'Could not set up device mapper for %s' error from the 'vdo'
+ # command execution. The dmsetup module on the system will
+ # output a more helpful message, but one would have to log
+ # onto that system to read the error. For now, heed the thread
+ # limit warnings in the DOCUMENTATION section above.
+ if params.get('ackthreads') is not None:
+ options.append("--vdoAckThreads=" + params['ackthreads'])
+
+ if params.get('biothreads') is not None:
+ options.append("--vdoBioThreads=" + params['biothreads'])
+
+ if params.get('cputhreads') is not None:
+ options.append("--vdoCpuThreads=" + params['cputhreads'])
+
+ if params.get('logicalthreads') is not None:
+ options.append("--vdoLogicalThreads=" + params['logicalthreads'])
+
+ if params.get('physicalthreads') is not None:
+ options.append("--vdoPhysicalThreads=" + params['physicalthreads'])
+
+ return options
+
+
+def run_module():
+
+ # Define the available arguments/parameters that a user can pass to
+ # the module.
+ # Defaults for VDO parameters are None, in order to facilitate
+ # the detection of parameters passed from the playbook.
+ # Creation param defaults are determined by the creation section.
+
+ module_args = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ activated=dict(type='bool'),
+ running=dict(type='bool'),
+ growphysical=dict(type='bool', default=False),
+ device=dict(type='str'),
+ logicalsize=dict(type='str'),
+ deduplication=dict(type='str', choices=['disabled', 'enabled']),
+ compression=dict(type='str', choices=['disabled', 'enabled']),
+ blockmapcachesize=dict(type='str'),
+ readcache=dict(type='str', choices=['disabled', 'enabled']),
+ readcachesize=dict(type='str'),
+ emulate512=dict(type='bool', default=False),
+ slabsize=dict(type='str'),
+ writepolicy=dict(type='str', choices=['async', 'auto', 'sync']),
+ indexmem=dict(type='str'),
+ indexmode=dict(type='str', choices=['dense', 'sparse']),
+ ackthreads=dict(type='str'),
+ biothreads=dict(type='str'),
+ cputhreads=dict(type='str'),
+ logicalthreads=dict(type='str'),
+ physicalthreads=dict(type='str'),
+ force=dict(type='bool', default=False),
+ )
+
+ # Seed the result dictionary in the object. There will be an
+ # 'invocation' dictionary added with 'module_args' (arguments
+ # given).
+ result = dict(
+ changed=False,
+ )
+
+ # the AnsibleModule object will be our abstraction working with Ansible
+ # this includes instantiation, a couple of common attr would be the
+ # args/params passed to the execution, as well as if the module
+ # supports check mode
+ module = AnsibleModule(
+ argument_spec=module_args,
+ supports_check_mode=False,
+ )
+
+ if not HAS_YAML:
+ module.fail_json(msg=missing_required_lib('PyYAML'), exception=YAML_IMP_ERR)
+
+ vdocmd = module.get_bin_path("vdo", required=True)
+ if not vdocmd:
+ module.fail_json(msg='VDO is not installed.', **result)
+
+ # Print a pre-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+
+ runningvdolist = list_running_vdos(module, vdocmd)
+
+ # Collect the name of the desired VDO volume, and its state. These will
+ # determine what to do.
+ desiredvdo = module.params['name']
+ state = module.params['state']
+
+ # Create a desired VDO volume that doesn't exist yet.
+ if (desiredvdo not in vdolist) and (state == 'present'):
+ device = module.params['device']
+ if device is None:
+ module.fail_json(msg="Creating a VDO volume requires specifying "
+ "a 'device' in the playbook.")
+
+ # Create a dictionary of the options from the AnsibleModule
+ # parameters, compile the vdo command options, and run "vdo create"
+ # with those options.
+ # Since this is a creation of a new VDO volume, it will contain all
+ # all of the parameters given by the playbook; the rest will
+ # assume default values.
+ vdocmdoptions = add_vdooptions(module.params)
+ rc, out, err = module.run_command(
+ [vdocmd, "create", "--name=%s" % desiredvdo, "--device=%s" % device] + vdocmdoptions)
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Creating VDO %s failed." % desiredvdo, rc=rc, err=err)
+
+ if module.params['compression'] == 'disabled':
+ rc, out, err = module.run_command([vdocmd, "disableCompression", "--name=%s" % desiredvdo])
+
+ if module.params['deduplication'] == 'disabled':
+ rc, out, err = module.run_command([vdocmd, "disableDeduplication", "--name=%s" % desiredvdo])
+
+ if module.params['activated'] is False:
+ deactivate_vdo(module, desiredvdo, vdocmd)
+
+ if module.params['running'] is False:
+ stop_vdo(module, desiredvdo, vdocmd)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("created VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # Modify the current parameters of a VDO that exists.
+ if desiredvdo in vdolist and state == 'present':
+ rc, vdostatusoutput, err = module.run_command([vdocmd, "status"])
+ vdostatusyaml = yaml.safe_load(vdostatusoutput)
+
+ # An empty dictionary to contain dictionaries of VDO statistics
+ processedvdos = {}
+
+ vdoyamls = vdostatusyaml['VDOs']
+ if vdoyamls is not None:
+ processedvdos = vdoyamls
+
+ # The 'vdo status' keys that are currently modifiable.
+ statusparamkeys = ['Acknowledgement threads',
+ 'Bio submission threads',
+ 'Block map cache size',
+ 'CPU-work threads',
+ 'Logical threads',
+ 'Physical threads',
+ 'Read cache',
+ 'Read cache size',
+ 'Configured write policy',
+ 'Compression',
+ 'Deduplication']
+
+ # A key translation table from 'vdo status' output to Ansible
+ # module parameters. This covers all of the 'vdo status'
+ # parameter keys that could be modified with the 'vdo'
+ # command.
+ vdokeytrans = {
+ 'Logical size': 'logicalsize',
+ 'Compression': 'compression',
+ 'Deduplication': 'deduplication',
+ 'Block map cache size': 'blockmapcachesize',
+ 'Read cache': 'readcache',
+ 'Read cache size': 'readcachesize',
+ 'Configured write policy': 'writepolicy',
+ 'Acknowledgement threads': 'ackthreads',
+ 'Bio submission threads': 'biothreads',
+ 'CPU-work threads': 'cputhreads',
+ 'Logical threads': 'logicalthreads',
+ 'Physical threads': 'physicalthreads'
+ }
+
+ # Build a dictionary of the current VDO status parameters, with
+ # the keys used by VDO. (These keys will be converted later.)
+ currentvdoparams = {}
+
+ # Build a "lookup table" dictionary containing a translation table
+ # of the parameters that can be modified
+ modtrans = {}
+
+ for statfield in statusparamkeys:
+ if statfield in processedvdos[desiredvdo]:
+ currentvdoparams[statfield] = processedvdos[desiredvdo][statfield]
+
+ modtrans[statfield] = vdokeytrans[statfield]
+
+ # Build a dictionary of current parameters formatted with the
+ # same keys as the AnsibleModule parameters.
+ currentparams = {}
+ for paramkey in modtrans.keys():
+ currentparams[modtrans[paramkey]] = modtrans[paramkey]
+
+ diffparams = {}
+
+ # Check for differences between the playbook parameters and the
+ # current parameters. This will need a comparison function;
+ # since AnsibleModule params are all strings, compare them as
+ # strings (but if it's None; skip).
+ for key in currentparams.keys():
+ if module.params[key] is not None:
+ if str(currentparams[key]) != module.params[key]:
+ diffparams[key] = module.params[key]
+
+ if diffparams:
+ vdocmdoptions = add_vdooptions(diffparams)
+ if vdocmdoptions:
+ rc, out, err = module.run_command([vdocmd, "modify", "--name=%s" % desiredvdo] + vdocmdoptions)
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Modifying VDO %s failed."
+ % desiredvdo, rc=rc, err=err)
+
+ if 'deduplication' in diffparams.keys():
+ dedupemod = diffparams['deduplication']
+ dedupeparam = "disableDeduplication" if dedupemod == 'disabled' else "enableDeduplication"
+ rc, out, err = module.run_command([vdocmd, dedupeparam, "--name=%s" % desiredvdo])
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing deduplication on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
+
+ if 'compression' in diffparams.keys():
+ compressmod = diffparams['compression']
+ compressparam = "disableCompression" if compressmod == 'disabled' else "enableCompression"
+ rc, out, err = module.run_command([vdocmd, compressparam, "--name=%s" % desiredvdo])
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing compression on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
+
+ if 'writepolicy' in diffparams.keys():
+ writepolmod = diffparams['writepolicy']
+ rc, out, err = module.run_command([
+ vdocmd,
+ "changeWritePolicy",
+ "--name=%s" % desiredvdo,
+ "--writePolicy=%s" % writepolmod,
+ ])
+
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Changing write policy on VDO volume %s failed." % desiredvdo, rc=rc, err=err)
+
+ # Process the size parameters, to determine of a growPhysical or
+ # growLogical operation needs to occur.
+ sizeparamkeys = ['Logical size', ]
+
+ currentsizeparams = {}
+ sizetrans = {}
+ for statfield in sizeparamkeys:
+ currentsizeparams[statfield] = processedvdos[desiredvdo][statfield]
+ sizetrans[statfield] = vdokeytrans[statfield]
+
+ sizeparams = {}
+ for paramkey in currentsizeparams.keys():
+ sizeparams[sizetrans[paramkey]] = currentsizeparams[paramkey]
+
+ diffsizeparams = {}
+ for key in sizeparams.keys():
+ if module.params[key] is not None and str(sizeparams[key]) != module.params[key]:
+ diffsizeparams[key] = module.params[key]
+
+ if module.params['growphysical']:
+ physdevice = module.params['device']
+ rc, devsectors, err = module.run_command([module.get_bin_path("blockdev"), "--getsz", physdevice])
+ devblocks = (int(devsectors) / 8)
+ dmvdoname = ('/dev/mapper/' + desiredvdo)
+ currentvdostats = processedvdos[desiredvdo]['VDO statistics'][dmvdoname]
+ currentphysblocks = currentvdostats['physical blocks']
+
+ # Set a growPhysical threshold to grow only when there is
+ # guaranteed to be more than 2 slabs worth of unallocated
+ # space on the device to use. For now, set to device
+ # size + 64 GB, since 32 GB is the largest possible
+ # slab size.
+ growthresh = devblocks + 16777216
+
+ if currentphysblocks > growthresh:
+ result['changed'] = True
+ rc, out, err = module.run_command([vdocmd, "growPhysical", "--name=%s" % desiredvdo])
+
+ if 'logicalsize' in diffsizeparams.keys():
+ result['changed'] = True
+ rc, out, err = module.run_command([vdocmd, "growLogical", "--name=%s" % desiredvdo, "--vdoLogicalSize=%s" % diffsizeparams['logicalsize']])
+
+ vdoactivatestatus = processedvdos[desiredvdo]['Activate']
+
+ if module.params['activated'] is False and vdoactivatestatus == 'enabled':
+ deactivate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if module.params['activated'] and vdoactivatestatus == 'disabled':
+ activate_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ if module.params['running'] is False and desiredvdo in runningvdolist:
+ stop_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Note that a disabled VDO volume cannot be started by the
+ # 'vdo start' command, by design. To accurately track changed
+ # status, don't try to start a disabled VDO volume.
+ # If the playbook contains 'activated: true', assume that
+ # the activate_vdo() operation succeeded, as 'vdoactivatestatus'
+ # will have the activated status prior to the activate_vdo()
+ # call.
+ if (vdoactivatestatus == 'enabled' or module.params['activated']) and module.params['running'] and desiredvdo not in runningvdolist:
+ start_vdo(module, desiredvdo, vdocmd)
+ if not result['changed']:
+ result['changed'] = True
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ if diffparams:
+ module.log("modified parameters of VDO volume %s" % desiredvdo)
+
+ module.exit_json(**result)
+
+ # Remove a desired VDO that currently exists.
+ if desiredvdo in vdolist and state == 'absent':
+ rc, out, err = module.run_command([vdocmd, "remove", "--name=%s" % desiredvdo])
+ if rc == 0:
+ result['changed'] = True
+ else:
+ module.fail_json(msg="Removing VDO %s failed." % desiredvdo, rc=rc, err=err)
+
+ # Print a post-run list of VDO volumes in the result object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("removed VDO volume %s" % desiredvdo)
+ module.exit_json(**result)
+
+ # fall through
+ # The state for the desired VDO volume was absent, and it does
+ # not exist. Print a post-run list of VDO volumes in the result
+ # object.
+ vdolist = inventory_vdos(module, vdocmd)
+ module.log("received request to remove non-existent VDO volume %s" % desiredvdo)
+
+ module.exit_json(**result)
+
+
+def main():
+ run_module()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vertica_configuration.py b/ansible_collections/community/general/plugins/modules/vertica_configuration.py
new file mode 100644
index 000000000..09b80df3d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vertica_configuration.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_configuration
+short_description: Updates Vertica configuration parameters
+description:
+ - Updates Vertica configuration parameters.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ parameter:
+ description:
+ - Name of the parameter to update.
+ required: true
+ aliases: [name]
+ type: str
+ value:
+ description:
+ - Value of the parameter to be set.
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Updating load_balance_policy
+ community.general.vertica_configuration: name=failovertostandbyafter value='8 hours'
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_configuration_facts(cursor, parameter_name=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter_name, parameter_name)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def check(configuration_facts, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ return False
+ return True
+
+
+def present(configuration_facts, cursor, parameter_name, current_value):
+ parameter_key = parameter_name.lower()
+ changed = False
+ if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
+ cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
+ changed = True
+ if changed:
+ configuration_facts.update(get_configuration_facts(cursor, parameter_name))
+ return changed
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ parameter=dict(required=True, aliases=['name']),
+ value=dict(default=None),
+ db=dict(default=None),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ parameter_name = module.params['parameter']
+ current_value = module.params['value']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)),
+ exception=traceback.format_exc())
+
+ try:
+ configuration_facts = get_configuration_facts(cursor)
+ if module.check_mode:
+ changed = not check(configuration_facts, parameter_name, current_value)
+ else:
+ try:
+ changed = present(configuration_facts, cursor, parameter_name, current_value)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_configuration': configuration_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vertica_info.py b/ansible_collections/community/general/plugins/modules/vertica_info.py
new file mode 100644
index 000000000..3106be3b3
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vertica_info.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_info
+short_description: Gathers Vertica database facts
+description:
+ - Gathers Vertica database information.
+ - This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
+ Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ cluster:
+ description:
+ - Name of the cluster running the schema.
+ default: localhost
+ type: str
+ port:
+ description:
+ Database port to connect to.
+ default: '5433'
+ type: str
+ db:
+ description:
+ - Name of the database running the schema.
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) are installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Gathering vertica facts
+ community.general.vertica_info: db=db_name
+ register: result
+
+- name: Print schemas
+ ansible.builtin.debug:
+ msg: "{{ result.vertica_schemas }}"
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee = r.name and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def get_configuration_facts(cursor, parameter=''):
+ facts = {}
+ cursor.execute("""
+ select c.parameter_name, c.current_value, c.default_value
+ from configuration_parameters c
+ where c.node_name = 'ALL'
+ and (? = '' or c.parameter_name ilike ?)
+ """, parameter, parameter)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.parameter_name.lower()] = {
+ 'parameter_name': row.parameter_name,
+ 'current_value': row.current_value,
+ 'default_value': row.default_value}
+ return facts
+
+
+def get_node_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select node_name, node_address, export_address, node_state, node_type,
+ catalog_path
+ from nodes
+ """)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.node_address] = {
+ 'node_name': row.node_name,
+ 'export_address': row.export_address,
+ 'node_state': row.node_state,
+ 'node_type': row.node_type,
+ 'catalog_path': row.catalog_path}
+ return facts
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ db=dict(default=None),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(default=None, no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server=%s;"
+ "Port=%s;"
+ "Database=%s;"
+ "User=%s;"
+ "Password=%s;"
+ "ConnectionLoadBalance=%s"
+ ) % (module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ user_facts = get_user_facts(cursor)
+ role_facts = get_role_facts(cursor)
+ configuration_facts = get_configuration_facts(cursor)
+ node_facts = get_node_facts(cursor)
+
+ module.exit_json(changed=False,
+ vertica_schemas=schema_facts,
+ vertica_users=user_facts,
+ vertica_roles=role_facts,
+ vertica_configuration=configuration_facts,
+ vertica_nodes=node_facts)
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vertica_role.py b/ansible_collections/community/general/plugins/modules/vertica_role.py
new file mode 100644
index 000000000..704594a12
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vertica_role.py
@@ -0,0 +1,254 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_role
+short_description: Adds or removes Vertica database roles and assigns roles to them
+description:
+ - Adds or removes Vertica database role and, optionally, assign other roles.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ role:
+ description:
+ - Name of the role to add or remove.
+ required: true
+ type: str
+ aliases: ['name']
+ assigned_roles:
+ description:
+ - Comma separated list of roles to assign to the role.
+ aliases: ['assigned_role']
+ type: str
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a role.
+ choices: ['present', 'absent']
+ default: present
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica role
+ community.general.vertica_role: name=role_name db=db_name state=present
+
+- name: Creating a new vertica role with other role assigned
+ community.general.vertica_role: name=role_name assigned_role=other_role_name state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_role_facts(cursor, role=''):
+ facts = {}
+ cursor.execute("""
+ select r.name, r.assigned_roles
+ from roles r
+ where (? = '' or r.name ilike ?)
+ """, role, role)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ role_key = row.name.lower()
+ facts[role_key] = {
+ 'name': row.name,
+ 'assigned_roles': []}
+ if row.assigned_roles:
+ facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(role_facts, cursor, role,
+ existing, required):
+ for assigned_role in set(existing) - set(required):
+ cursor.execute("revoke {0} from {1}".format(assigned_role, role))
+ for assigned_role in set(required) - set(existing):
+ cursor.execute("grant {0} to {1}".format(assigned_role, role))
+
+
+def check(role_facts, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ return False
+ if assigned_roles and sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles']):
+ return False
+ return True
+
+
+def present(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key not in role_facts:
+ cursor.execute("create role {0}".format(role))
+ update_roles(role_facts, cursor, role, [], assigned_roles)
+ role_facts.update(get_role_facts(cursor, role))
+ return True
+ else:
+ changed = False
+ if assigned_roles and (sorted(assigned_roles) != sorted(role_facts[role_key]['assigned_roles'])):
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], assigned_roles)
+ changed = True
+ if changed:
+ role_facts.update(get_role_facts(cursor, role))
+ return changed
+
+
+def absent(role_facts, cursor, role, assigned_roles):
+ role_key = role.lower()
+ if role_key in role_facts:
+ update_roles(role_facts, cursor, role,
+ role_facts[role_key]['assigned_roles'], [])
+ cursor.execute("drop role {0} cascade".format(role_facts[role_key]['name']))
+ del role_facts[role_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ role=dict(required=True, aliases=['name']),
+ assigned_roles=dict(default=None, aliases=['assigned_role']),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ role = module.params['role']
+ assigned_roles = []
+ if module.params['assigned_roles']:
+ assigned_roles = module.params['assigned_roles'].split(',')
+ assigned_roles = filter(None, assigned_roles)
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ role_facts = get_role_facts(cursor)
+ if module.check_mode:
+ changed = not check(role_facts, role, assigned_roles)
+ elif state == 'absent':
+ try:
+ changed = absent(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(role_facts, cursor, role, assigned_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_roles': role_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, role=role, ansible_facts={'vertica_roles': role_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vertica_schema.py b/ansible_collections/community/general/plugins/modules/vertica_schema.py
new file mode 100644
index 000000000..01f8f721e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vertica_schema.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_schema
+short_description: Adds or removes Vertica database schema and roles
+description:
+ - Adds or removes Vertica database schema and, optionally, roles
+ with schema access privileges.
+ - A schema will not be removed until all the objects have been dropped.
+ - In such a situation, if the module tries to remove the schema it
+ will fail and only remove roles created for the schema if they have
+ no dependencies.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ schema:
+ description:
+ - Name of the schema to add or remove.
+ required: true
+ aliases: ['name']
+ type: str
+ usage_roles:
+ description:
+ - Comma separated list of roles to create and grant usage access to the schema.
+ aliases: ['usage_role']
+ type: str
+ create_roles:
+ description:
+ - Comma separated list of roles to create and grant usage and create access to the schema.
+ aliases: ['create_role']
+ type: str
+ owner:
+ description:
+ - Name of the user to set as owner of the schema.
+ type: str
+ state:
+ description:
+ - Whether to create C(present), or drop C(absent) a schema.
+ default: present
+ choices: ['present', 'absent']
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica schema
+ community.general.vertica_schema: name=schema_name db=db_name state=present
+
+- name: Creating a new schema with specific schema owner
+ community.general.vertica_schema: name=schema_name owner=dbowner db=db_name state=present
+
+- name: Creating a new schema with roles
+ community.general.vertica_schema:
+ name=schema_name
+ create_roles=schema_name_all
+ usage_roles=schema_name_ro,schema_name_rw
+ db=db_name
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_schema_facts(cursor, schema=''):
+ facts = {}
+ cursor.execute("""
+ select schema_name, schema_owner, create_time
+ from schemata
+ where not is_system_schema and schema_name not in ('public', 'TxtIndex')
+ and (? = '' or schema_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ facts[row.schema_name.lower()] = {
+ 'name': row.schema_name,
+ 'owner': row.schema_owner,
+ 'create_time': str(row.create_time),
+ 'usage_roles': [],
+ 'create_roles': []}
+ cursor.execute("""
+ select g.object_name as schema_name, r.name as role_name,
+ lower(g.privileges_description) privileges_description
+ from roles r join grants g
+ on g.grantee_id = r.role_id and g.object_type='SCHEMA'
+ and g.privileges_description like '%USAGE%'
+ and g.grantee not in ('public', 'dbadmin')
+ and (? = '' or g.object_name ilike ?)
+ """, schema, schema)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ schema_key = row.schema_name.lower()
+ if 'create' in row.privileges_description:
+ facts[schema_key]['create_roles'].append(row.role_name)
+ else:
+ facts[schema_key]['usage_roles'].append(row.role_name)
+ return facts
+
+
+def update_roles(schema_facts, cursor, schema,
+ existing, required,
+ create_existing, create_required):
+ for role in set(existing + create_existing) - set(required + create_required):
+ cursor.execute("drop role {0} cascade".format(role))
+ for role in set(create_existing) - set(create_required):
+ cursor.execute("revoke create on schema {0} from {1}".format(schema, role))
+ for role in set(required + create_required) - set(existing + create_existing):
+ cursor.execute("create role {0}".format(role))
+ cursor.execute("grant usage on schema {0} to {1}".format(schema, role))
+ for role in set(create_required) - set(create_existing):
+ cursor.execute("grant create on schema {0} to {1}".format(schema, role))
+
+
+def check(schema_facts, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ return False
+ if owner and owner.lower() == schema_facts[schema_key]['owner'].lower():
+ return False
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']):
+ return False
+ if sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+ return False
+ return True
+
+
+def present(schema_facts, cursor, schema, usage_roles, create_roles, owner):
+ schema_key = schema.lower()
+ if schema_key not in schema_facts:
+ query_fragments = ["create schema {0}".format(schema)]
+ if owner:
+ query_fragments.append("authorization {0}".format(owner))
+ cursor.execute(' '.join(query_fragments))
+ update_roles(schema_facts, cursor, schema, [], usage_roles, [], create_roles)
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return True
+ else:
+ changed = False
+ if owner and owner.lower() != schema_facts[schema_key]['owner'].lower():
+ raise NotSupportedError((
+ "Changing schema owner is not supported. "
+ "Current owner: {0}."
+ ).format(schema_facts[schema_key]['owner']))
+ if sorted(usage_roles) != sorted(schema_facts[schema_key]['usage_roles']) or \
+ sorted(create_roles) != sorted(schema_facts[schema_key]['create_roles']):
+
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], usage_roles,
+ schema_facts[schema_key]['create_roles'], create_roles)
+ changed = True
+ if changed:
+ schema_facts.update(get_schema_facts(cursor, schema))
+ return changed
+
+
+def absent(schema_facts, cursor, schema, usage_roles, create_roles):
+ schema_key = schema.lower()
+ if schema_key in schema_facts:
+ update_roles(schema_facts, cursor, schema,
+ schema_facts[schema_key]['usage_roles'], [], schema_facts[schema_key]['create_roles'], [])
+ try:
+ cursor.execute("drop schema {0} restrict".format(schema_facts[schema_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping schema failed due to dependencies.")
+ del schema_facts[schema_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ schema=dict(required=True, aliases=['name']),
+ usage_roles=dict(aliases=['usage_role']),
+ create_roles=dict(aliases=['create_role']),
+ owner=dict(),
+ state=dict(default='present', choices=['absent', 'present']),
+ db=dict(),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ schema = module.params['schema']
+ usage_roles = []
+ if module.params['usage_roles']:
+ usage_roles = module.params['usage_roles'].split(',')
+ usage_roles = filter(None, usage_roles)
+ create_roles = []
+ if module.params['create_roles']:
+ create_roles = module.params['create_roles'].split(',')
+ create_roles = filter(None, create_roles)
+ owner = module.params['owner']
+ state = module.params['state']
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(to_native(e)))
+
+ try:
+ schema_facts = get_schema_facts(cursor)
+ if module.check_mode:
+ changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
+ elif state == 'absent':
+ try:
+ changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state == 'present':
+ try:
+ changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_schemas': schema_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vertica_user.py b/ansible_collections/community/general/plugins/modules/vertica_user.py
new file mode 100644
index 000000000..a6a5b5951
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vertica_user.py
@@ -0,0 +1,393 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vertica_user
+short_description: Adds or removes Vertica database users and assigns roles
+description:
+ - Adds or removes Vertica database user and, optionally, assigns roles.
+ - A user will not be removed until all the dependencies have been dropped.
+ - In such a situation, if the module tries to remove the user it
+ will fail and only remove roles granted to the user.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ user:
+ description:
+ - Name of the user to add or remove.
+ required: true
+ type: str
+ aliases: ['name']
+ profile:
+ description:
+ - Sets the user's profile.
+ type: str
+ resource_pool:
+ description:
+ - Sets the user's resource pool.
+ type: str
+ password:
+ description:
+ - The user's password encrypted by the MD5 algorithm.
+ - The password must be generated with the format C("md5" + md5[password + username]),
+ resulting in a total of 35 characters. An easy way to do this is by querying
+ the Vertica database with select 'md5'||md5('<user_password><user_name>').
+ type: str
+ expired:
+ description:
+ - Sets the user's password expiration.
+ type: bool
+ ldap:
+ description:
+ - Set to true if users are authenticated via LDAP.
+ - The user will be created with password expired and set to I($ldap$).
+ type: bool
+ roles:
+ description:
+ - Comma separated list of roles to assign to the user.
+ aliases: ['role']
+ type: str
+ state:
+ description:
+ - Whether to create C(present), drop C(absent) or lock C(locked) a user.
+ choices: ['present', 'absent', 'locked']
+ default: present
+ type: str
+ db:
+ description:
+ - Name of the Vertica database.
+ type: str
+ cluster:
+ description:
+ - Name of the Vertica cluster.
+ default: localhost
+ type: str
+ port:
+ description:
+ - Vertica cluster port to connect to.
+ default: '5433'
+ type: str
+ login_user:
+ description:
+ - The username used to authenticate with.
+ default: dbadmin
+ type: str
+ login_password:
+ description:
+ - The password used to authenticate with.
+ type: str
+notes:
+ - The default authentication assumes that you are either logging in as or sudo'ing
+ to the C(dbadmin) account on the host.
+ - This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
+ that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
+ - Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
+ to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
+ and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
+ to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
+requirements: [ 'unixODBC', 'pyodbc' ]
+author: "Dariusz Owczarek (@dareko)"
+'''
+
+EXAMPLES = """
+- name: Creating a new vertica user with password
+ community.general.vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
+
+- name: Creating a new vertica user authenticated via ldap with roles assigned
+ community.general.vertica_user:
+ name=user_name
+ ldap=true
+ db=db_name
+ roles=schema_name_ro
+ state=present
+"""
+import traceback
+
+PYODBC_IMP_ERR = None
+try:
+ import pyodbc
+except ImportError:
+ PYODBC_IMP_ERR = traceback.format_exc()
+ pyodbc_found = False
+else:
+ pyodbc_found = True
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_native
+
+
+class NotSupportedError(Exception):
+ pass
+
+
+class CannotDropError(Exception):
+ pass
+
+# module specific functions
+
+
+def get_user_facts(cursor, user=''):
+ facts = {}
+ cursor.execute("""
+ select u.user_name, u.is_locked, u.lock_time,
+ p.password, p.acctexpired as is_expired,
+ u.profile_name, u.resource_pool,
+ u.all_roles, u.default_roles
+ from users u join password_auditor p on p.user_id = u.user_id
+ where not u.is_super_user
+ and (? = '' or u.user_name ilike ?)
+ """, user, user)
+ while True:
+ rows = cursor.fetchmany(100)
+ if not rows:
+ break
+ for row in rows:
+ user_key = row.user_name.lower()
+ facts[user_key] = {
+ 'name': row.user_name,
+ 'locked': str(row.is_locked),
+ 'password': row.password,
+ 'expired': str(row.is_expired),
+ 'profile': row.profile_name,
+ 'resource_pool': row.resource_pool,
+ 'roles': [],
+ 'default_roles': []}
+ if row.is_locked:
+ facts[user_key]['locked_time'] = str(row.lock_time)
+ if row.all_roles:
+ facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
+ if row.default_roles:
+ facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
+ return facts
+
+
+def update_roles(user_facts, cursor, user,
+ existing_all, existing_default, required):
+ del_roles = list(set(existing_all) - set(required))
+ if del_roles:
+ cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
+ new_roles = list(set(required) - set(existing_all))
+ if new_roles:
+ cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
+ if required:
+ cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
+
+
+def check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ return False
+ if profile and profile != user_facts[user_key]['profile']:
+ return False
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ return False
+ if locked != (user_facts[user_key]['locked'] == 'True'):
+ return False
+ if password and password != user_facts[user_key]['password']:
+ return False
+ if (expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or
+ ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True')):
+ return False
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ return False
+ return True
+
+
+def present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles):
+ user_key = user.lower()
+ if user_key not in user_facts:
+ query_fragments = ["create user {0}".format(user)]
+ if locked:
+ query_fragments.append("account lock")
+ if password or ldap:
+ if password:
+ query_fragments.append("identified by '{0}'".format(password))
+ else:
+ query_fragments.append("identified by '$ldap$'")
+ if expired or ldap:
+ query_fragments.append("password expire")
+ if profile:
+ query_fragments.append("profile {0}".format(profile))
+ if resource_pool:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ cursor.execute(' '.join(query_fragments))
+ if resource_pool and resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ update_roles(user_facts, cursor, user, [], [], roles)
+ user_facts.update(get_user_facts(cursor, user))
+ return True
+ else:
+ changed = False
+ query_fragments = ["alter user {0}".format(user)]
+ if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
+ if locked:
+ state = 'lock'
+ else:
+ state = 'unlock'
+ query_fragments.append("account {0}".format(state))
+ changed = True
+ if password and password != user_facts[user_key]['password']:
+ query_fragments.append("identified by '{0}'".format(password))
+ changed = True
+ if ldap:
+ if ldap != (user_facts[user_key]['expired'] == 'True'):
+ query_fragments.append("password expire")
+ changed = True
+ elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
+ if expired:
+ query_fragments.append("password expire")
+ changed = True
+ else:
+ raise NotSupportedError("Unexpiring user password is not supported.")
+ if profile and profile != user_facts[user_key]['profile']:
+ query_fragments.append("profile {0}".format(profile))
+ changed = True
+ if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
+ query_fragments.append("resource pool {0}".format(resource_pool))
+ if user_facts[user_key]['resource_pool'] != 'general':
+ cursor.execute("revoke usage on resource pool {0} from {1}".format(
+ user_facts[user_key]['resource_pool'], user))
+ if resource_pool != 'general':
+ cursor.execute("grant usage on resource pool {0} to {1}".format(
+ resource_pool, user))
+ changed = True
+ if changed:
+ cursor.execute(' '.join(query_fragments))
+ if roles and (sorted(roles) != sorted(user_facts[user_key]['roles']) or
+ sorted(roles) != sorted(user_facts[user_key]['default_roles'])):
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
+ changed = True
+ if changed:
+ user_facts.update(get_user_facts(cursor, user))
+ return changed
+
+
+def absent(user_facts, cursor, user, roles):
+ user_key = user.lower()
+ if user_key in user_facts:
+ update_roles(user_facts, cursor, user,
+ user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
+ try:
+ cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
+ except pyodbc.Error:
+ raise CannotDropError("Dropping user failed due to dependencies.")
+ del user_facts[user_key]
+ return True
+ else:
+ return False
+
+# module logic
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ user=dict(required=True, aliases=['name']),
+ profile=dict(),
+ resource_pool=dict(),
+ password=dict(no_log=True),
+ expired=dict(type='bool'),
+ ldap=dict(type='bool'),
+ roles=dict(aliases=['role']),
+ state=dict(default='present', choices=['absent', 'present', 'locked']),
+ db=dict(),
+ cluster=dict(default='localhost'),
+ port=dict(default='5433'),
+ login_user=dict(default='dbadmin'),
+ login_password=dict(no_log=True),
+ ), supports_check_mode=True)
+
+ if not pyodbc_found:
+ module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
+
+ user = module.params['user']
+ profile = module.params['profile']
+ if profile:
+ profile = profile.lower()
+ resource_pool = module.params['resource_pool']
+ if resource_pool:
+ resource_pool = resource_pool.lower()
+ password = module.params['password']
+ expired = module.params['expired']
+ ldap = module.params['ldap']
+ roles = []
+ if module.params['roles']:
+ roles = module.params['roles'].split(',')
+ roles = filter(None, roles)
+ state = module.params['state']
+ if state == 'locked':
+ locked = True
+ else:
+ locked = False
+ db = ''
+ if module.params['db']:
+ db = module.params['db']
+
+ changed = False
+
+ try:
+ dsn = (
+ "Driver=Vertica;"
+ "Server={0};"
+ "Port={1};"
+ "Database={2};"
+ "User={3};"
+ "Password={4};"
+ "ConnectionLoadBalance={5}"
+ ).format(module.params['cluster'], module.params['port'], db,
+ module.params['login_user'], module.params['login_password'], 'true')
+ db_conn = pyodbc.connect(dsn, autocommit=True)
+ cursor = db_conn.cursor()
+ except Exception as e:
+ module.fail_json(msg="Unable to connect to database: {0}.".format(e))
+
+ try:
+ user_facts = get_user_facts(cursor)
+ if module.check_mode:
+ changed = not check(user_facts, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ elif state == 'absent':
+ try:
+ changed = absent(user_facts, cursor, user, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ elif state in ['present', 'locked']:
+ try:
+ changed = present(user_facts, cursor, user, profile, resource_pool,
+ locked, password, expired, ldap, roles)
+ except pyodbc.Error as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+ except NotSupportedError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except CannotDropError as e:
+ module.fail_json(msg=to_native(e), ansible_facts={'vertica_users': user_facts})
+ except SystemExit:
+ # avoid catching this on python 2.4
+ raise
+ except Exception as e:
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vexata_eg.py b/ansible_collections/community/general/plugins/modules/vexata_eg.py
new file mode 100644
index 000000000..457d1fa9e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vexata_eg.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_eg
+short_description: Manage export groups on Vexata VX100 storage arrays
+description:
+ - Create or delete export groups on a Vexata VX100 array.
+ - An export group is a tuple of a volume group, initiator group and port
+ group that allows a set of volumes to be exposed to one or more hosts
+ through specific array ports.
+author:
+ - Sandeep Kasargod (@vexata)
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Export group name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates export group when present or delete when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ vg:
+ description:
+ - Volume group name.
+ type: str
+ ig:
+ description:
+ - Initiator group name.
+ type: str
+ pg:
+ description:
+ - Port group name.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+- community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Create export group named db_export.
+ community.general.vexata_eg:
+ name: db_export
+ vg: dbvols
+ ig: dbhosts
+ pg: pg1
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete export group named db_export
+ community.general.vexata_eg:
+ name: db_export
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together)
+
+
+def get_eg(module, array):
+ """Retrieve a named vg if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ egs = array.list_egs()
+ eg = filter(lambda eg: eg['name'] == name, egs)
+ if len(eg) == 1:
+ return eg[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve export groups.')
+
+
+def get_vg_id(module, array):
+ """Retrieve a named vg's id if it exists, error if absent."""
+ name = module.params['vg']
+ try:
+ vgs = array.list_vgs()
+ vg = filter(lambda vg: vg['name'] == name, vgs)
+ if len(vg) == 1:
+ return vg[0]['id']
+ else:
+ module.fail_json(msg='Volume group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volume groups.')
+
+
+def get_ig_id(module, array):
+ """Retrieve a named ig's id if it exists, error if absent."""
+ name = module.params['ig']
+ try:
+ igs = array.list_igs()
+ ig = filter(lambda ig: ig['name'] == name, igs)
+ if len(ig) == 1:
+ return ig[0]['id']
+ else:
+ module.fail_json(msg='Initiator group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve initiator groups.')
+
+
+def get_pg_id(module, array):
+ """Retrieve a named pg's id if it exists, error if absent."""
+ name = module.params['pg']
+ try:
+ pgs = array.list_pgs()
+ pg = filter(lambda pg: pg['name'] == name, pgs)
+ if len(pg) == 1:
+ return pg[0]['id']
+ else:
+ module.fail_json(msg='Port group {0} was not found.'.format(name))
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve port groups.')
+
+
+def create_eg(module, array):
+ """"Create a new export group."""
+ changed = False
+ eg_name = module.params['name']
+ vg_id = get_vg_id(module, array)
+ ig_id = get_ig_id(module, array)
+ pg_id = get_pg_id(module, array)
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ eg = array.create_eg(
+ eg_name,
+ 'Ansible export group',
+ (vg_id, ig_id, pg_id))
+ if eg:
+ module.log(msg='Created export group {0}'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def delete_eg(module, array, eg):
+ changed = False
+ eg_name = eg['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_eg(
+ eg['id'])
+ if ok:
+ module.log(msg='Export group {0} deleted.'.format(eg_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ vg=dict(type='str'),
+ ig=dict(type='str'),
+ pg=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ eg = get_eg(module, array)
+
+ if state == 'present' and not eg:
+ create_eg(module, array)
+ elif state == 'absent' and eg:
+ delete_eg(module, array, eg)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vexata_volume.py b/ansible_collections/community/general/plugins/modules/vexata_volume.py
new file mode 100644
index 000000000..7fdfc7e5f
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vexata_volume.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: vexata_volume
+short_description: Manage volumes on Vexata VX100 storage arrays
+description:
+ - Create, deletes or extend volumes on a Vexata VX100 array.
+author:
+- Sandeep Kasargod (@vexata)
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Volume name.
+ required: true
+ type: str
+ state:
+ description:
+ - Creates/Modifies volume when present or removes when absent.
+ default: present
+ choices: [ present, absent ]
+ type: str
+ size:
+ description:
+ - Volume size in M, G, T units. M=2^20, G=2^30, T=2^40 bytes.
+ type: str
+extends_documentation_fragment:
+- community.general.vexata.vx100
+- community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Create new 2 TiB volume named foo
+ community.general.vexata_volume:
+ name: foo
+ size: 2T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Expand volume named foo to 4 TiB
+ community.general.vexata_volume:
+ name: foo
+ size: 4T
+ state: present
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+
+- name: Delete volume named foo
+ community.general.vexata_volume:
+ name: foo
+ state: absent
+ array: vx100_ultra.test.com
+ user: admin
+ password: secret
+'''
+
+RETURN = r'''
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.vexata import (
+ argument_spec, get_array, required_together, size_to_MiB)
+
+
+def get_volume(module, array):
+ """Retrieve a named volume if it exists, None if absent."""
+ name = module.params['name']
+ try:
+ vols = array.list_volumes()
+ vol = filter(lambda v: v['name'] == name, vols)
+ if len(vol) == 1:
+ return vol[0]
+ else:
+ return None
+ except Exception:
+ module.fail_json(msg='Error while attempting to retrieve volumes.')
+
+
+def validate_size(module, err_msg):
+ size = module.params.get('size', False)
+ if not size:
+ module.fail_json(msg=err_msg)
+ size = size_to_MiB(size)
+ if size <= 0:
+ module.fail_json(msg='Invalid volume size, must be <integer>[MGT].')
+ return size
+
+
+def create_volume(module, array):
+ """"Create a new volume."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to create volume.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.create_volume(
+ module.params['name'],
+ 'Ansible volume',
+ size)
+ if vol:
+ module.log(msg='Created volume {0}'.format(vol['id']))
+ changed = True
+ else:
+ module.fail_json(msg='Volume create failed.')
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def update_volume(module, array, volume):
+ """Expand the volume size."""
+ changed = False
+ size = validate_size(module, err_msg='Size is required to update volume')
+ prev_size = volume['volSize']
+ if size <= prev_size:
+ module.log(msg='Volume expanded size needs to be larger '
+ 'than current size.')
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ vol = array.grow_volume(
+ volume['name'],
+ volume['description'],
+ volume['id'],
+ size)
+ if vol:
+ changed = True
+ except Exception:
+ pass
+
+ module.exit_json(changed=changed)
+
+
+def delete_volume(module, array, volume):
+ changed = False
+ vol_name = volume['name']
+ if module.check_mode:
+ module.exit_json(changed=changed)
+
+ try:
+ ok = array.delete_volume(
+ volume['id'])
+ if ok:
+ module.log(msg='Volume {0} deleted.'.format(vol_name))
+ changed = True
+ else:
+ raise Exception
+ except Exception:
+ pass
+ module.exit_json(changed=changed)
+
+
+def main():
+ arg_spec = argument_spec()
+ arg_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ size=dict(type='str')
+ )
+ )
+
+ module = AnsibleModule(arg_spec,
+ supports_check_mode=True,
+ required_together=required_together())
+
+ state = module.params['state']
+ array = get_array(module)
+ volume = get_volume(module, array)
+
+ if state == 'present':
+ if not volume:
+ create_volume(module, array)
+ else:
+ update_volume(module, array, volume)
+ elif state == 'absent' and volume:
+ delete_volume(module, array, volume)
+ else:
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/vmadm.py b/ansible_collections/community/general/plugins/modules/vmadm.py
new file mode 100644
index 000000000..56ade17e4
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/vmadm.py
@@ -0,0 +1,790 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Jasper Lievisse Adriaanse <j@jasper.la>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: vmadm
+short_description: Manage SmartOS virtual machines and zones
+description:
+ - Manage SmartOS virtual machines through vmadm(1M).
+author: Jasper Lievisse Adriaanse (@jasperla)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ archive_on_delete:
+ required: false
+ description:
+ - When enabled, the zone dataset will be mounted on C(/zones/archive)
+ upon removal.
+ type: bool
+ autoboot:
+ required: false
+ description:
+ - Whether or not a VM is booted when the system is rebooted.
+ type: bool
+ brand:
+ choices: [ joyent, joyent-minimal, lx, kvm, bhyve ]
+ default: joyent
+ description:
+ - Type of virtual machine. The C(bhyve) option was added in community.general 0.2.0.
+ type: str
+ boot:
+ required: false
+ description:
+ - Set the boot order for KVM VMs.
+ type: str
+ cpu_cap:
+ required: false
+ description:
+ - Sets a limit on the amount of CPU time that can be used by a VM.
+ Use C(0) for no cap.
+ type: int
+ cpu_shares:
+ required: false
+ description:
+ - Sets a limit on the number of fair share scheduler (FSS) CPU shares for
+ a VM. This limit is relative to all other VMs on the system.
+ type: int
+ cpu_type:
+ required: false
+ choices: [ qemu64, host ]
+ default: qemu64
+ description:
+ - Control the type of virtual CPU exposed to KVM VMs.
+ type: str
+ customer_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contain customer
+ modifiable keys.
+ type: dict
+ delegate_dataset:
+ required: false
+ description:
+ - Whether to delegate a ZFS dataset to an OS VM.
+ type: bool
+ disk_driver:
+ required: false
+ description:
+ - Default value for a virtual disk model for KVM guests.
+ type: str
+ disks:
+ required: false
+ description:
+ - A list of disks to add, valid properties are documented in vmadm(1M).
+ type: list
+ elements: dict
+ dns_domain:
+ required: false
+ description:
+ - Domain value for C(/etc/hosts).
+ type: str
+ docker:
+ required: false
+ description:
+ - Docker images need this flag enabled along with the I(brand) set to C(lx).
+ type: bool
+ filesystems:
+ required: false
+ description:
+ - Mount additional filesystems into an OS VM.
+ type: list
+ elements: dict
+ firewall_enabled:
+ required: false
+ description:
+ - Enables the firewall, allowing fwadm(1M) rules to be applied.
+ type: bool
+ force:
+ required: false
+ description:
+ - Force a particular action (i.e. stop or delete a VM).
+ type: bool
+ fs_allowed:
+ required: false
+ description:
+ - Comma separated list of filesystem types this zone is allowed to mount.
+ type: str
+ hostname:
+ required: false
+ description:
+ - Zone/VM hostname.
+ type: str
+ image_uuid:
+ required: false
+ description:
+ - Image UUID.
+ type: str
+ indestructible_delegated:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to delegated datasets.
+ type: bool
+ indestructible_zoneroot:
+ required: false
+ description:
+ - Adds an C(@indestructible) snapshot to zoneroot.
+ type: bool
+ internal_metadata:
+ required: false
+ description:
+ - Metadata to be set and associated with this VM, this contains operator
+ generated keys.
+ type: dict
+ internal_metadata_namespace:
+ required: false
+ description:
+ - List of namespaces to be set as I(internal_metadata-only); these namespaces
+ will come from I(internal_metadata) rather than I(customer_metadata).
+ type: str
+ kernel_version:
+ required: false
+ description:
+ - Kernel version to emulate for LX VMs.
+ type: str
+ limit_priv:
+ required: false
+ description:
+ - Set (comma separated) list of privileges the zone is allowed to use.
+ type: str
+ maintain_resolvers:
+ required: false
+ description:
+ - Resolvers in C(/etc/resolv.conf) will be updated when updating
+ the I(resolvers) property.
+ type: bool
+ max_locked_memory:
+ required: false
+ description:
+ - Total amount of memory (in MiBs) on the host that can be locked by this VM.
+ type: int
+ max_lwps:
+ required: false
+ description:
+ - Maximum number of lightweight processes this VM is allowed to have running.
+ type: int
+ max_physical_memory:
+ required: false
+ description:
+ - Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
+ type: int
+ max_swap:
+ required: false
+ description:
+ - Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
+ type: int
+ mdata_exec_timeout:
+ required: false
+ description:
+ - Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
+ that runs user-scripts in the zone.
+ type: int
+ name:
+ required: false
+ aliases: [ alias ]
+ description:
+ - Name of the VM. vmadm(1M) uses this as an optional name.
+ type: str
+ nic_driver:
+ required: false
+ description:
+ - Default value for a virtual NIC model for KVM guests.
+ type: str
+ nics:
+ required: false
+ description:
+ - A list of nics to add, valid properties are documented in vmadm(1M).
+ type: list
+ elements: dict
+ nowait:
+ required: false
+ description:
+ - Consider the provisioning complete when the VM first starts, rather than
+ when the VM has rebooted.
+ type: bool
+ qemu_opts:
+ required: false
+ description:
+ - Additional qemu arguments for KVM guests. This overwrites the default arguments
+ provided by vmadm(1M) and should only be used for debugging.
+ type: str
+ qemu_extra_opts:
+ required: false
+ description:
+ - Additional qemu cmdline arguments for KVM guests.
+ type: str
+ quota:
+ required: false
+ description:
+ - Quota on zone filesystems (in MiBs).
+ type: int
+ ram:
+ required: false
+ description:
+ - Amount of virtual RAM for a KVM guest (in MiBs).
+ type: int
+ resolvers:
+ required: false
+ description:
+ - List of resolvers to be put into C(/etc/resolv.conf).
+ type: list
+ elements: str
+ routes:
+ required: false
+ description:
+ - Dictionary that maps destinations to gateways, these will be set as static
+ routes in the VM.
+ type: dict
+ spice_opts:
+ required: false
+ description:
+ - Addition options for SPICE-enabled KVM VMs.
+ type: str
+ spice_password:
+ required: false
+ description:
+ - Password required to connect to SPICE. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ state:
+ choices: [ present, running, absent, deleted, stopped, created, restarted, rebooted ]
+ default: running
+ description:
+ - States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
+ operate on a VM that is currently provisioned. C(present) means that the VM will be
+ created if it was absent, and that it will be in a running state. C(absent) will
+ shutdown the zone before removing it.
+ C(stopped) means the zone will be created if it doesn't exist already, before shutting
+ it down.
+ type: str
+ tmpfs:
+ required: false
+ description:
+ - Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
+ type: int
+ uuid:
+ required: false
+ description:
+ - UUID of the VM. Can either be a full UUID or C(*) for all VMs.
+ type: str
+ vcpus:
+ required: false
+ description:
+ - Number of virtual CPUs for a KVM guest.
+ type: int
+ vga:
+ required: false
+ description:
+ - Specify VGA emulation used by KVM VMs.
+ type: str
+ virtio_txburst:
+ required: false
+ description:
+ - Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
+ type: int
+ virtio_txtimer:
+ required: false
+ description:
+ - Timeout (in nanoseconds) for the TX timer of virtio NICs.
+ type: int
+ vnc_password:
+ required: false
+ description:
+ - Password required to connect to VNC. By default no password is set.
+ Please note this can be read from the Global Zone.
+ type: str
+ vnc_port:
+ required: false
+ description:
+ - TCP port to listen of the VNC server. Or set C(0) for random,
+ or C(-1) to disable.
+ type: int
+ zfs_data_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs data dataset. This option
+ only has effect on delegated datasets.
+ type: str
+ zfs_data_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the delegated dataset's filesystem.
+ type: int
+ zfs_filesystem_limit:
+ required: false
+ description:
+ - Maximum number of filesystems the VM can have.
+ type: int
+ zfs_io_priority:
+ required: false
+ description:
+ - IO throttle priority value relative to other VMs.
+ type: int
+ zfs_root_compression:
+ required: false
+ description:
+ - Specifies compression algorithm used for this VMs root dataset. This option
+ only has effect on the zoneroot dataset.
+ type: str
+ zfs_root_recsize:
+ required: false
+ description:
+ - Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
+ type: int
+ zfs_snapshot_limit:
+ required: false
+ description:
+ - Number of snapshots the VM can have.
+ type: int
+ zpool:
+ required: false
+ description:
+ - ZFS pool the VM's zone dataset will be created in.
+ type: str
+requirements:
+ - python >= 2.6
+'''
+
+EXAMPLES = '''
+- name: Create SmartOS zone
+ community.general.vmadm:
+ brand: joyent
+ state: present
+ alias: fw_zone
+ image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
+ firewall_enabled: true
+ indestructible_zoneroot: true
+ nics:
+ - nic_tag: admin
+ ip: dhcp
+ primary: true
+ internal_metadata:
+ root_pw: 'secret'
+ quota: 1
+
+- name: Delete a zone
+ community.general.vmadm:
+ alias: test_zone
+ state: deleted
+
+- name: Stop all zones
+ community.general.vmadm:
+ uuid: '*'
+ state: stopped
+'''
+
+RETURN = '''
+uuid:
+ description: UUID of the managed VM.
+ returned: always
+ type: str
+ sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
+alias:
+ description: Alias of the managed VM.
+ returned: When addressing a VM by alias.
+ type: str
+ sample: 'dns-zone'
+state:
+ description: State of the target, after execution.
+ returned: success
+ type: str
+ sample: 'running'
+'''
+
+import json
+import os
+import re
+import tempfile
+import traceback
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+# While vmadm(1M) supports a -E option to return any errors in JSON, the
+# generated JSON does not play well with the JSON parsers of Python.
+# The returned message contains '\n' as part of the stacktrace,
+# which breaks the parsers.
+
+
+def get_vm_prop(module, uuid, prop):
+ # Lookup a property for the given VM.
+ # Returns the property, or None if not found.
+ cmd = [module.vmadm, 'lookup', '-j', '-o', prop, 'uuid={0}'.format(uuid)]
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(prop),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if stdout_json:
+ return stdout_json[0].get(prop)
+
+
+def get_vm_uuid(module, alias):
+ # Lookup the uuid that goes with the given alias.
+ # Returns the uuid or '' if not found.
+ cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid', 'alias={0}'.format(alias)]
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(
+ msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
+
+ # If no VM was found matching the given alias, we get back an empty array.
+ # That is not an error condition as we might be explicitly checking it's
+ # absence.
+ try:
+ stdout_json = json.loads(stdout)
+ except Exception as e:
+ module.fail_json(
+ msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
+ details=to_native(e), exception=traceback.format_exc())
+
+ if stdout_json:
+ return stdout_json[0].get('uuid')
+
+
+def get_all_vm_uuids(module):
+ # Retrieve the UUIDs for all VMs.
+ cmd = [module.vmadm, 'lookup', '-j', '-o', 'uuid']
+
+ (rc, stdout, stderr) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg='Failed to get VMs list', exception=stderr)
+
+ try:
+ stdout_json = json.loads(stdout)
+ return [v['uuid'] for v in stdout_json]
+ except Exception as e:
+ module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e),
+ exception=traceback.format_exc())
+
+
+def new_vm(module, uuid, vm_state):
+ payload_file = create_payload(module, uuid)
+
+ (rc, dummy, stderr) = vmadm_create_vm(module, payload_file)
+
+ if rc != 0:
+ changed = False
+ module.fail_json(msg='Could not create VM', exception=stderr)
+ else:
+ changed = True
+ # 'vmadm create' returns all output to stderr...
+ match = re.match('Successfully created VM (.*)', stderr)
+ if match:
+ vm_uuid = match.groups()[0]
+ if not is_valid_uuid(vm_uuid):
+ module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
+ else:
+ module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
+
+ # Now that the VM is created, ensure it is in the desired state (if not 'running')
+ if vm_state != 'running':
+ ret = set_vm_state(module, vm_uuid, vm_state)
+ if not ret:
+ module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
+
+ try:
+ os.unlink(payload_file)
+ except Exception as e:
+ # Since the payload may contain sensitive information, fail hard
+ # if we cannot remove the file so the operator knows about it.
+ module.fail_json(msg='Could not remove temporary JSON payload file {0}: {1}'.format(payload_file, to_native(e)),
+ exception=traceback.format_exc())
+
+ return changed, vm_uuid
+
+
+def vmadm_create_vm(module, payload_file):
+ # Create a new VM using the provided payload.
+ cmd = [module.vmadm, 'create', '-f', payload_file]
+
+ return module.run_command(cmd)
+
+
+def set_vm_state(module, vm_uuid, vm_state):
+ p = module.params
+
+ # Check if the VM is already in the desired state.
+ state = get_vm_prop(module, vm_uuid, 'state')
+ if state and (state == vm_state):
+ return None
+
+ # Lookup table for the state to be in, and which command to use for that.
+ # vm_state: [vmadm commandm, forceable?]
+ cmds = {
+ 'stopped': ['stop', True],
+ 'running': ['start', False],
+ 'deleted': ['delete', True],
+ 'rebooted': ['reboot', False]
+ }
+
+ command, forceable = cmds[vm_state]
+ force = ['-F'] if p['force'] and forceable else []
+
+ cmd = [module.vmadm, command] + force + [vm_uuid]
+
+ (dummy, dummy, stderr) = module.run_command(cmd)
+
+ match = re.match('^Successfully.*', stderr)
+ return match is not None
+
+
+def create_payload(module, uuid):
+ # Create the JSON payload (vmdef) and return the filename.
+
+ # Filter out the few options that are not valid VM properties.
+ module_options = ['force', 'state']
+ # @TODO make this a simple {} comprehension as soon as py2 is ditched
+ # @TODO {k: v for k, v in p.items() if k not in module_options}
+ vmdef = dict([(k, v) for k, v in module.params.items() if k not in module_options and v])
+
+ try:
+ vmdef_json = json.dumps(vmdef)
+ except Exception as e:
+ module.fail_json(
+ msg='Could not create valid JSON payload', exception=traceback.format_exc())
+
+ # Create the temporary file that contains our payload, and set tight
+ # permissions for it may container sensitive information.
+ try:
+ # XXX: When there's a way to get the current ansible temporary directory
+ # drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
+ # the payload (thus removing the `save_payload` option).
+ fname = tempfile.mkstemp()[1]
+ os.chmod(fname, 0o400)
+ with open(fname, 'w') as fh:
+ fh.write(vmdef_json)
+ except Exception as e:
+ module.fail_json(msg='Could not save JSON payload: %s' % to_native(e), exception=traceback.format_exc())
+
+ return fname
+
+
+def vm_state_transition(module, uuid, vm_state):
+ ret = set_vm_state(module, uuid, vm_state)
+
+ # Whether the VM changed state.
+ if ret is None:
+ return False
+ elif ret:
+ return True
+ else:
+ module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
+
+
+def is_valid_uuid(uuid):
+ return re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE) is not None
+
+
+def validate_uuids(module):
+ failed = [
+ name
+ for name, pvalue in [(x, module.params[x]) for x in ['uuid', 'image_uuid']]
+ if pvalue and pvalue != '*' and not is_valid_uuid(pvalue)
+ ]
+
+ if failed:
+ module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
+
+
+def manage_all_vms(module, vm_state):
+ # Handle operations for all VMs, which can by definition only
+ # be state transitions.
+ state = module.params['state']
+
+ if state == 'created':
+ module.fail_json(msg='State "created" is only valid for tasks with a single VM')
+
+ # If any of the VMs has a change, the task as a whole has a change.
+ any_changed = False
+
+ # First get all VM uuids and for each check their state, and adjust it if needed.
+ for uuid in get_all_vm_uuids(module):
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+ if not current_vm_state and vm_state == 'deleted':
+ any_changed = False
+ else:
+ if module.check_mode:
+ if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
+ any_changed = True
+ else:
+ any_changed = vm_state_transition(module, uuid, vm_state) or any_changed
+
+ return any_changed
+
+
+def main():
+ # In order to reduce the clutter and boilerplate for trivial options,
+ # abstract the vmadm properties and build the dict of arguments later.
+ # Dict of all options that are simple to define based on their type.
+ # They're not required and have a default of None.
+ properties = {
+ 'str': [
+ 'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
+ 'image_uuid', 'internal_metadata_namespace', 'kernel_version',
+ 'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
+ 'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
+ 'zfs_root_compression', 'zpool'
+ ],
+ 'bool': [
+ 'archive_on_delete', 'autoboot', 'delegate_dataset',
+ 'docker', 'firewall_enabled', 'force', 'indestructible_delegated',
+ 'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
+ ],
+ 'int': [
+ 'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
+ 'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
+ 'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
+ 'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
+ 'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
+ 'zfs_snapshot_limit'
+ ],
+ 'dict': ['customer_metadata', 'internal_metadata', 'routes'],
+ }
+
+ # Start with the options that are not as trivial as those above.
+ options = dict(
+ state=dict(
+ default='running',
+ type='str',
+ choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
+ ),
+ name=dict(
+ type='str',
+ aliases=['alias']
+ ),
+ brand=dict(
+ default='joyent',
+ type='str',
+ choices=['joyent', 'joyent-minimal', 'lx', 'kvm', 'bhyve']
+ ),
+ cpu_type=dict(
+ default='qemu64',
+ type='str',
+ choices=['host', 'qemu64']
+ ),
+ # Regular strings, however these require additional options.
+ spice_password=dict(type='str', no_log=True),
+ vnc_password=dict(type='str', no_log=True),
+ disks=dict(type='list', elements='dict'),
+ nics=dict(type='list', elements='dict'),
+ resolvers=dict(type='list', elements='str'),
+ filesystems=dict(type='list', elements='dict'),
+ )
+
+ # Add our 'simple' options to options dict.
+ for type in properties:
+ for p in properties[type]:
+ option = dict(type=type)
+ options[p] = option
+
+ module = AnsibleModule(
+ argument_spec=options,
+ supports_check_mode=True,
+ required_one_of=[['name', 'uuid']]
+ )
+
+ module.vmadm = module.get_bin_path('vmadm', required=True)
+
+ p = module.params
+ uuid = p['uuid']
+ state = p['state']
+
+ # Translate the state parameter into something we can use later on.
+ if state in ['present', 'running']:
+ vm_state = 'running'
+ elif state in ['stopped', 'created']:
+ vm_state = 'stopped'
+ elif state in ['absent', 'deleted']:
+ vm_state = 'deleted'
+ elif state in ['restarted', 'rebooted']:
+ vm_state = 'rebooted'
+
+ result = {'state': state}
+
+ # While it's possible to refer to a given VM by it's `alias`, it's easier
+ # to operate on VMs by their UUID. So if we're not given a `uuid`, look
+ # it up.
+ if not uuid:
+ uuid = get_vm_uuid(module, p['name'])
+ # Bit of a chicken and egg problem here for VMs with state == deleted.
+ # If they're going to be removed in this play, we have to lookup the
+ # uuid. If they're already deleted there's nothing to lookup.
+ # So if state == deleted and get_vm_uuid() returned '', the VM is already
+ # deleted and there's nothing else to do.
+ if uuid is None and vm_state == 'deleted':
+ result['name'] = p['name']
+ module.exit_json(**result)
+
+ validate_uuids(module)
+
+ if p['name']:
+ result['name'] = p['name']
+ result['uuid'] = uuid
+
+ if uuid == '*':
+ result['changed'] = manage_all_vms(module, vm_state)
+ module.exit_json(**result)
+
+ # The general flow is as follows:
+ # - first the current state of the VM is obtained by it's UUID.
+ # - If the state was not found and the desired state is 'deleted', return.
+ # - If the state was not found, it means the VM has to be created.
+ # Subsequently the VM will be set to the desired state (i.e. stopped)
+ # - Otherwise, it means the VM exists already and we operate on it's
+ # state (i.e. reboot it.)
+ #
+ # In the future it should be possible to query the VM for a particular
+ # property as a valid state (i.e. queried) so the result can be
+ # registered.
+ # Also, VMs should be able to get their properties updated.
+ # Managing VM snapshots should be part of a standalone module.
+
+ # First obtain the VM state to determine what needs to be done with it.
+ current_vm_state = get_vm_prop(module, uuid, 'state')
+
+ # First handle the case where the VM should be deleted and is not present.
+ if not current_vm_state and vm_state == 'deleted':
+ result['changed'] = False
+ elif module.check_mode:
+ # Shortcut for check mode, if there is no VM yet, it will need to be created.
+ # Or, if the VM is not in the desired state yet, it needs to transition.
+ result['changed'] = (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state)
+ elif not current_vm_state:
+ # No VM was found that matched the given ID (alias or uuid), so we create it.
+ result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
+ else:
+ # VM was found, operate on its state directly.
+ result['changed'] = vm_state_transition(module, uuid, vm_state)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/wakeonlan.py b/ansible_collections/community/general/plugins/modules/wakeonlan.py
new file mode 100644
index 000000000..6d7e09452
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/wakeonlan.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: wakeonlan
+short_description: Send a magic Wake-on-LAN (WoL) broadcast packet
+description:
+ - The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ mac:
+ description:
+ - MAC address to send Wake-on-LAN broadcast packet for.
+ required: true
+ type: str
+ broadcast:
+ description:
+ - Network broadcast address to use for broadcasting magic Wake-on-LAN packet.
+ default: 255.255.255.255
+ type: str
+ port:
+ description:
+ - UDP port to use for magic Wake-on-LAN packet.
+ default: 7
+ type: int
+todo:
+ - Add arping support to check whether the system is up (before and after)
+ - Enable check-mode support (when we have arping support)
+ - Does not have SecureOn password support
+notes:
+ - This module sends a magic packet, without knowing whether it worked
+ - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS)
+ - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first).
+seealso:
+- module: community.windows.win_wakeonlan
+author:
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ community.general.wakeonlan:
+ mac: '00:00:5E:00:53:66'
+ broadcast: 192.0.2.23
+ delegate_to: localhost
+
+- community.general.wakeonlan:
+ mac: 00:00:5E:00:53:66
+ port: 9
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+# Default return values
+'''
+import socket
+import struct
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def wakeonlan(module, mac, broadcast, port):
+ """ Send a magic Wake-on-LAN packet. """
+
+ mac_orig = mac
+
+ # Remove possible separator from MAC address
+ if len(mac) == 12 + 5:
+ mac = mac.replace(mac[2], '')
+
+ # If we don't end up with 12 hexadecimal characters, fail
+ if len(mac) != 12:
+ module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig)
+
+ # Test if it converts to an integer, otherwise fail
+ try:
+ int(mac, 16)
+ except ValueError:
+ module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig)
+
+ # Create payload for magic packet
+ data = b''
+ padding = ''.join(['FFFFFFFFFFFF', mac * 20])
+ for i in range(0, len(padding), 2):
+ data = b''.join([data, struct.pack('B', int(padding[i: i + 2], 16))])
+
+ # Broadcast payload to network
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ if not module.check_mode:
+
+ try:
+ sock.sendto(data, (broadcast, port))
+ except socket.error as e:
+ sock.close()
+ module.fail_json(msg=to_native(e), exception=traceback.format_exc())
+
+ sock.close()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ mac=dict(type='str', required=True),
+ broadcast=dict(type='str', default='255.255.255.255'),
+ port=dict(type='int', default=7),
+ ),
+ supports_check_mode=True,
+ )
+
+ mac = module.params['mac']
+ broadcast = module.params['broadcast']
+ port = module.params['port']
+
+ wakeonlan(module, mac, broadcast, port)
+
+ module.exit_json(changed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py b/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py
new file mode 100644
index 000000000..a51d454d9
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/wdc_redfish_command.py
@@ -0,0 +1,345 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022 Western Digital Corporation
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: wdc_redfish_command
+short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs
+version_added: 5.4.0
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action.
+ - Manages OOB controller firmware. For example, Firmware Activate, Update and Activate.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ description:
+ - Base URI of OOB controller. Must include this or I(ioms).
+ type: str
+ ioms:
+ description:
+ - List of IOM FQDNs for the enclosure. Must include this or I(baseuri).
+ type: list
+ elements: str
+ username:
+ description:
+ - User for authentication with OOB controller.
+ type: str
+ password:
+ description:
+ - Password for authentication with OOB controller.
+ type: str
+ auth_token:
+ description:
+ - Security token for authentication with OOB controller.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller.
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - ID of the component to modify, such as C(Enclosure), C(IOModuleAFRU), C(PowerSupplyBFRU), C(FanExternalFRU3), or C(FanInternalFRU).
+ type: str
+ version_added: 5.4.0
+ update_image_uri:
+ required: false
+ description:
+ - The URI of the image for the update.
+ type: str
+ update_creds:
+ required: false
+ description:
+ - The credentials for retrieving the update image.
+ type: dict
+ suboptions:
+ username:
+ required: false
+ description:
+ - The username for retrieving the update image.
+ type: str
+ password:
+ required: false
+ description:
+ - The password for retrieving the update image.
+ type: str
+notes:
+ - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section.
+ - ioms is a list of FQDNs for the enclosure's IOMs.
+
+
+author: Mike Moerk (@mikemoerk)
+'''
+
+EXAMPLES = '''
+- name: Firmware Activate (required after SimpleUpdate to apply the new firmware)
+ community.general.wdc_redfish_command:
+ category: Update
+ command: FWActivate
+ ioms: "{{ ioms }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Firmware Activate with individual IOMs specified
+ community.general.wdc_redfish_command:
+ category: Update
+ command: FWActivate
+ ioms:
+ - iom1.wdc.com
+ - iom2.wdc.com
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Firmware Activate with baseuri specified
+ community.general.wdc_redfish_command:
+ category: Update
+ command: FWActivate
+ baseuri: "iom1.wdc.com"
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+
+- name: Update and Activate (orchestrates firmware update and activation with a single command)
+ community.general.wdc_redfish_command:
+ category: Update
+ command: UpdateAndActivate
+ ioms: "{{ ioms }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ update_image_uri: "{{ update_image_uri }}"
+ update_creds:
+ username: operator
+ password: supersecretpwd
+
+- name: Turn on enclosure indicator LED
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: Enclosure
+ command: IndicatorLedOn
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Turn off IOM A indicator LED
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: IOModuleAFRU
+ command: IndicatorLedOff
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Turn on Power Supply B indicator LED
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: PowerSupplyBFRU
+ command: IndicatorLedOn
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Turn on External Fan 3 indicator LED
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: FanExternalFRU3
+ command: IndicatorLedOn
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Turn on Internal Fan indicator LED
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: FanInternalFRU
+ command: IndicatorLedOn
+ username: "{{ username }}"
+ password: "{{ password }}"
+
+- name: Set chassis to Low Power Mode
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: Enclosure
+ command: PowerModeLow
+
+- name: Set chassis to Normal Power Mode
+ community.general.wdc_redfish_command:
+ category: Chassis
+ resource_id: Enclosure
+ command: PowerModeNormal
+
+'''
+
+RETURN = '''
+msg:
+ description: Message with action result or error description
+ returned: always
+ type: str
+ sample: "Action was successful"
+'''
+
+from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+CATEGORY_COMMANDS_ALL = {
+ "Update": [
+ "FWActivate",
+ "UpdateAndActivate"
+ ],
+ "Chassis": [
+ "IndicatorLedOn",
+ "IndicatorLedOff",
+ "PowerModeLow",
+ "PowerModeNormal",
+ ]
+}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ ioms=dict(type='list', elements='str'),
+ baseuri=dict(),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ update_creds=dict(
+ type='dict',
+ options=dict(
+ username=dict(),
+ password=dict(no_log=True)
+ )
+ ),
+ resource_id=dict(),
+ update_image_uri=dict(),
+ timeout=dict(type='int', default=10)
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ('baseuri', 'ioms')
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=True
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Resource to modify
+ resource_id = module.params['resource_id']
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, sorted(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Build root URI(s)
+ if module.params.get("baseuri") is not None:
+ root_uris = ["https://" + module.params['baseuri']]
+ else:
+ root_uris = [
+ "https://" + iom for iom in module.params['ioms']
+ ]
+ rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module,
+ resource_id=resource_id, data_modification=True)
+
+ # Organize by Categories / Commands
+
+ if category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+ # update options
+ update_opts = {
+ 'update_creds': module.params['update_creds']
+ }
+ for command in command_list:
+ if command == "FWActivate":
+ if module.check_mode:
+ result = {
+ 'ret': True,
+ 'changed': True,
+ 'msg': 'FWActivate not performed in check mode.'
+ }
+ else:
+ result = rf_utils.firmware_activate(update_opts)
+ elif command == "UpdateAndActivate":
+ update_opts["update_image_uri"] = module.params['update_image_uri']
+ result = rf_utils.update_and_activate(update_opts)
+
+ elif category == "Chassis":
+ result = rf_utils._find_chassis_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ led_commands = ["IndicatorLedOn", "IndicatorLedOff"]
+
+ # Check if more than one led_command is present
+ num_led_commands = sum([command in led_commands for command in command_list])
+ if num_led_commands > 1:
+ result = {'ret': False, 'msg': "Only one IndicatorLed command should be sent at a time."}
+ else:
+ for command in command_list:
+ if command.startswith("IndicatorLed"):
+ result = rf_utils.manage_chassis_indicator_led(command)
+ elif command.startswith("PowerMode"):
+ result = rf_utils.manage_chassis_power_mode(command)
+
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ else:
+ del result['ret']
+ changed = result.get('changed', True)
+ session = result.get('session', dict())
+ module.exit_json(changed=changed,
+ session=session,
+ msg='Action was successful' if not module.check_mode else result.get(
+ 'msg', "No action performed in check mode."
+ ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py b/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py
new file mode 100644
index 000000000..038e1a72d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/wdc_redfish_info.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2022 Western Digital Corporation
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: wdc_redfish_info
+short_description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs
+version_added: 5.4.0
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ get information back.
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ description:
+ - Base URI of OOB controller. Must include this or I(ioms).
+ type: str
+ ioms:
+ description:
+ - List of IOM FQDNs for the enclosure. Must include this or I(baseuri).
+ type: list
+ elements: str
+ username:
+ description:
+ - User for authentication with OOB controller.
+ type: str
+ password:
+ description:
+ - Password for authentication with OOB controller.
+ type: str
+ auth_token:
+ description:
+ - Security token for authentication with OOB controller.
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller.
+ default: 10
+ type: int
+
+notes:
+ - In the inventory, you can specify baseuri or ioms. See the EXAMPLES section.
+ - ioms is a list of FQDNs for the enclosure's IOMs.
+
+author: Mike Moerk (@mikemoerk)
+'''
+
+EXAMPLES = '''
+- name: Get Simple Update Status with individual IOMs specified
+ community.general.wdc_redfish_info:
+ category: Update
+ command: SimpleUpdateStatus
+ ioms:
+ - iom1.wdc.com
+ - iom2.wdc.com
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+- name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.simple_update_status.entries | to_nice_json }}"
+
+- name: Get Simple Update Status with baseuri specified
+ community.general.wdc_redfish_info:
+ category: Update
+ command: SimpleUpdateStatus
+ baseuri: "iom1.wdc.com"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+- name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.simple_update_status.entries | to_nice_json }}"
+'''
+
+RETURN = '''
+Description:
+ description: Firmware update status description.
+ returned: always
+ type: str
+ sample: Ready for FW update
+ErrorCode:
+ description: Numeric error code for firmware update status. Non-zero indicates an error condition.
+ returned: always
+ type: int
+ sample: 0
+EstimatedRemainingMinutes:
+ description: Estimated number of minutes remaining in firmware update operation.
+ returned: always
+ type: int
+ sample: 20
+StatusCode:
+ description: Firmware update status code.
+ returned: always
+ type: int
+ sample: 2
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils import WdcRedfishUtils
+
+CATEGORY_COMMANDS_ALL = {
+ "Update": ["SimpleUpdateStatus"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ ioms=dict(type='list', elements='str'),
+ baseuri=dict(),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10)
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ('baseuri', 'ioms')
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=True
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, sorted(CATEGORY_COMMANDS_ALL.keys()))))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Build root URI(s)
+ if module.params.get("baseuri") is not None:
+ root_uris = ["https://" + module.params['baseuri']]
+ else:
+ root_uris = [
+ "https://" + iom for iom in module.params['ioms']
+ ]
+ rf_utils = WdcRedfishUtils(creds, root_uris, timeout, module,
+ resource_id=None,
+ data_modification=False
+ )
+
+ # Organize by Categories / Commands
+
+ if category == "Update":
+ # execute only if we find UpdateService resources
+ resource = rf_utils._find_updateservice_resource()
+ if resource['ret'] is False:
+ module.fail_json(msg=resource['msg'])
+ for command in command_list:
+ if command == "SimpleUpdateStatus":
+ simple_update_status_result = rf_utils.get_simple_update_status()
+ if simple_update_status_result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ else:
+ del simple_update_status_result['ret']
+ result["simple_update_status"] = simple_update_status_result
+ module.exit_json(changed=False, redfish_facts=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_app.py b/ansible_collections/community/general/plugins/modules/webfaction_app.py
new file mode 100644
index 000000000..7a4702675
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/webfaction_app.py
@@ -0,0 +1,207 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# https://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at U(https://github.com/quentinsf/ansible-webfaction).
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether the application should exist
+ choices: ['present', 'absent']
+ default: "present"
+ type: str
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at U(https://docs.webfaction.com/xmlrpc-api/apps.html) for a list.
+ required: true
+ type: str
+
+ autostart:
+ description:
+ - Whether the app should restart with an C(autostart.cgi) script
+ type: bool
+ default: false
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ default: ''
+ type: str
+
+ port_open:
+ description:
+ - IF the port should be opened
+ type: bool
+ default: false
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+ type: str
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+ type: str
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ type: str
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ community.general.webfaction_app:
+ name: "my_wsgi_app1"
+ state: present
+ type: mod_wsgi35-python27
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ type=dict(required=True),
+ autostart=dict(type='bool', default=False),
+ extra_info=dict(default=""),
+ port_open=dict(type='bool', default=False),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed=False,
+ result=existing_app,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(app_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_db.py b/ansible_collections/community/general/plugins/modules/webfaction_db.py
new file mode 100644
index 000000000..c4742cb21
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/webfaction_db.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Quentin Stafford-Fraser, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create a webfaction database using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether the database should exist
+ choices: ['present', 'absent']
+ default: "present"
+ type: str
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+ type: str
+
+ password:
+ description:
+ - The password for the new database user.
+ type: str
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+ type: str
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+ type: str
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ type: str
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ community.general.webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type=dict(required=True, choices=['mysql', 'postgresql']),
+ password=dict(no_log=True),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ machine=dict(),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed=False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(db_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_domain.py b/ansible_collections/community/general/plugins/modules/webfaction_domain.py
new file mode 100644
index 000000000..9bffec3cd
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/webfaction_domain.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - If you are I(deleting) domains by using I(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you don't specify subdomains, the domain will be deleted.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether the domain should exist
+ choices: ['present', 'absent']
+ default: "present"
+ type: str
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ default: []
+ type: list
+ elements: str
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+ type: str
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ community.general.webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ subdomains=dict(default=[], type='list', elements='str'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed=False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(domain_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
new file mode 100644
index 000000000..2b543c5b1
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/webfaction_mailbox.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Quentin Stafford-Fraser and Andy Baker
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create webfaction mailbox using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+ type: str
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ choices: ['present', 'absent']
+ default: "present"
+ type: str
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+ type: str
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ community.general.webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True, no_log=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/webfaction_site.py b/ansible_collections/community/general/plugins/modules/webfaction_site.py
new file mode 100644
index 000000000..385f55211
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/webfaction_site.py
@@ -0,0 +1,217 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Quentin Stafford-Fraser
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create Webfaction website using Ansible and the Webfaction API
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ address. You can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
+ - See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+ type: str
+
+ state:
+ description:
+ - Whether the website should exist
+ choices: ['present', 'absent']
+ default: "present"
+ type: str
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+ type: str
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ type: bool
+ default: false
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ default: []
+ type: list
+ elements: list
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ default: []
+ type: list
+ elements: str
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+ type: str
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+ type: str
+'''
+
+EXAMPLES = '''
+ - name: Create website
+ community.general.webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: false
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves import xmlrpc_client
+
+
+webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host=dict(required=True),
+ https=dict(required=False, type='bool', default=False),
+ subdomains=dict(type='list', elements='str', default=[]),
+ site_apps=dict(type='list', elements='list', default=[]),
+ login_name=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed=False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append((a[0], a[1]))
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website(
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed=False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {0}".format(site_state))
+
+ module.exit_json(
+ changed=True,
+ result=result
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xattr.py b/ansible_collections/community/general/plugins/modules/xattr.py
new file mode 100644
index 000000000..0b44fdaad
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xattr.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: xattr
+short_description: Manage user defined extended attributes
+description:
+ - Manages filesystem user defined extended attributes.
+ - Requires that extended attributes are enabled on the target filesystem
+ and that the setfattr/getfattr utilities are present.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of.
+ - Before 2.3 this option was only usable as I(name).
+ type: path
+ required: true
+ aliases: [ name ]
+ namespace:
+ description:
+ - Namespace of the named name/key.
+ type: str
+ default: user
+ key:
+ description:
+ - The name of a specific Extended attribute key to set/retrieve.
+ type: str
+ value:
+ description:
+ - The value to set the named name/key to, it automatically sets the I(state) to C(present).
+ type: str
+ state:
+ description:
+ - defines which state you want to do.
+ C(read) retrieves the current value for a I(key) (default)
+ C(present) sets I(path) to C(value), default if value is set
+ C(all) dumps all data
+ C(keys) retrieves all keys
+ C(absent) deletes the key
+ type: str
+ choices: [ absent, all, keys, present, read ]
+ default: read
+ follow:
+ description:
+ - If C(true), dereferences symlinks and sets/gets attributes on symlink target,
+ otherwise acts on symlink itself.
+ type: bool
+ default: true
+notes:
+ - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well.
+author:
+ - Brian Coca (@bcoca)
+'''
+
+EXAMPLES = '''
+- name: Obtain the extended attributes of /etc/foo.conf
+ community.general.xattr:
+ path: /etc/foo.conf
+
+- name: Set the key 'user.foo' to value 'bar'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ value: bar
+
+- name: Set the key 'trusted.glusterfs.volume-id' to value '0x817b94343f164f199e5b573b4ea1f914'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ value: "0x817b94343f164f199e5b573b4ea1f914"
+
+- name: Remove the key 'user.foo'
+ community.general.xattr:
+ path: /etc/foo.conf
+ key: foo
+ state: absent
+
+- name: Remove the key 'trusted.glusterfs.volume-id'
+ community.general.xattr:
+ path: /mnt/bricks/brick1
+ namespace: trusted
+ key: glusterfs.volume-id
+ state: absent
+'''
+
+import os
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+
+
+def get_xattr_keys(module, path, follow):
+ cmd = [module.get_bin_path('getfattr', True), '--absolute-names']
+
+ if not follow:
+ cmd.append('-h')
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def get_xattr(module, path, key, follow):
+ cmd = [module.get_bin_path('getfattr', True), '--absolute-names']
+
+ if not follow:
+ cmd.append('-h')
+ if key is None:
+ cmd.append('-d')
+ else:
+ cmd.append('-n')
+ cmd.append(key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def set_xattr(module, path, key, value, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-n')
+ cmd.append(key)
+ cmd.append('-v')
+ cmd.append(value)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd)
+
+
+def rm_xattr(module, path, key, follow):
+
+ cmd = [module.get_bin_path('setfattr', True)]
+ if not follow:
+ cmd.append('-h')
+ cmd.append('-x')
+ cmd.append(key)
+ cmd.append(path)
+
+ return _run_xattr(module, cmd, False)
+
+
+def _run_xattr(module, cmd, check_rc=True):
+
+ try:
+ (rc, out, err) = module.run_command(cmd, check_rc=check_rc)
+ except Exception as e:
+ module.fail_json(msg="%s!" % to_native(e))
+
+ # result = {'raw': out}
+ result = {}
+ for line in out.splitlines():
+ if line.startswith('#') or line == '':
+ pass
+ elif '=' in line:
+ (key, val) = line.split('=', 1)
+ result[key] = val.strip('"')
+ else:
+ result[line] = ''
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', required=True, aliases=['name']),
+ namespace=dict(type='str', default='user'),
+ key=dict(type='str', no_log=False),
+ value=dict(type='str'),
+ state=dict(type='str', default='read', choices=['absent', 'all', 'keys', 'present', 'read']),
+ follow=dict(type='bool', default=True),
+ ),
+ supports_check_mode=True,
+ )
+ path = module.params.get('path')
+ namespace = module.params.get('namespace')
+ key = module.params.get('key')
+ value = module.params.get('value')
+ state = module.params.get('state')
+ follow = module.params.get('follow')
+
+ if not os.path.exists(path):
+ module.fail_json(msg="path not found or not accessible!")
+
+ changed = False
+ msg = ""
+ res = {}
+
+ if key is None and state in ['absent', 'present']:
+ module.fail_json(msg="%s needs a key parameter" % state)
+
+ # Prepend the key with the namespace if defined
+ if (
+ key is not None and
+ namespace is not None and
+ len(namespace) > 0 and
+ not (namespace == 'user' and key.startswith('user.'))):
+ key = '%s.%s' % (namespace, key)
+
+ if (state == 'present' or value is not None):
+ current = get_xattr(module, path, key, follow)
+ if current is None or key not in current or value != current[key]:
+ if not module.check_mode:
+ res = set_xattr(module, path, key, value, follow)
+ changed = True
+ res = current
+ msg = "%s set to %s" % (key, value)
+ elif state == 'absent':
+ current = get_xattr(module, path, key, follow)
+ if current is not None and key in current:
+ if not module.check_mode:
+ res = rm_xattr(module, path, key, follow)
+ changed = True
+ res = current
+ msg = "%s removed" % (key)
+ elif state == 'keys':
+ res = get_xattr_keys(module, path, follow)
+ msg = "returning all keys"
+ elif state == 'all':
+ res = get_xattr(module, path, None, follow)
+ msg = "dumping all"
+ else:
+ res = get_xattr(module, path, key, follow)
+ msg = "returning %s" % key
+
+ module.exit_json(changed=changed, msg=msg, xattr=res)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xbps.py b/ansible_collections/community/general/plugins/modules/xbps.py
new file mode 100644
index 000000000..1fea5b384
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xbps.py
@@ -0,0 +1,351 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2016 Dino Occhialini <dino.occhialini@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xbps
+short_description: Manage packages with XBPS
+description:
+ - Manage packages with the XBPS package manager.
+author:
+ - "Dino Occhialini (@dinoocch)"
+ - "Michael Aldridge (@the-maldridge)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Name of the package to install, upgrade, or remove.
+ aliases: [pkg,package]
+ type: list
+ elements: str
+ state:
+ description:
+ - Desired state of the package.
+ default: "present"
+ choices: ["present", "absent", "latest", "installed", "removed"]
+ type: str
+ recurse:
+ description:
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
+ type: bool
+ default: false
+ update_cache:
+ description:
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
+ type: bool
+ default: true
+ upgrade:
+ description:
+ - Whether or not to upgrade whole system
+ type: bool
+ default: false
+ upgrade_xbps:
+ description:
+ - Whether or not to upgrade the xbps package when necessary.
+ Before installing new packages,
+ xbps requires the user to update the xbps package itself.
+ Thus when this option is set to C(false),
+ upgrades and installations will fail when xbps is not up to date.
+ type: bool
+ default: true
+ version_added: '0.2.0'
+'''
+
+EXAMPLES = '''
+- name: Install package foo (automatically updating the xbps package if needed)
+ community.general.xbps:
+ name: foo
+ state: present
+
+- name: Upgrade package foo
+ community.general.xbps:
+ name: foo
+ state: latest
+ update_cache: true
+
+- name: Remove packages foo and bar
+ community.general.xbps:
+ name:
+ - foo
+ - bar
+ state: absent
+
+- name: Recursively remove package foo
+ community.general.xbps:
+ name: foo
+ state: absent
+ recurse: true
+
+- name: Update package cache
+ community.general.xbps:
+ update_cache: true
+
+- name: Upgrade packages
+ community.general.xbps:
+ upgrade: true
+
+- name: Install a package, failing if the xbps package is out of date
+ community.general.xbps:
+ name: foo
+ state: present
+ upgrade_xbps: false
+'''
+
+RETURN = '''
+msg:
+ description: Message about results
+ returned: success
+ type: str
+ sample: "System Upgraded"
+packages:
+ description: Packages that are affected/would be affected
+ type: list
+ sample: ["ansible"]
+ returned: success
+'''
+
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def is_installed(xbps_output):
+ """Returns package install state"""
+ return bool(len(xbps_output))
+
+
+def query_package(module, xbps_path, name, state="present"):
+ """Returns Package info"""
+ if state == "present":
+ lcmd = "%s %s" % (xbps_path['query'], name)
+ lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
+ if not is_installed(lstdout):
+ # package is not installed locally
+ return False, False
+
+ rcmd = "%s -Sun" % (xbps_path['install'])
+ rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
+ if rrc == 0 or rrc == 17:
+ """Return True to indicate that the package is installed locally,
+ and the result of the version number comparison to determine if the
+ package is up-to-date"""
+ return True, name not in rstdout
+
+ return False, False
+
+
+def update_package_db(module, xbps_path):
+ """Returns True if update_package_db changed"""
+ cmd = "%s -S" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="Could not update package db")
+ if "avg rate" in stdout:
+ return True
+ else:
+ return False
+
+
+def upgrade_xbps(module, xbps_path, exit_on_success=False):
+ cmdupgradexbps = "%s -uy xbps" % (xbps_path['install'])
+ rc, stdout, stderr = module.run_command(cmdupgradexbps, check_rc=False)
+ if rc != 0:
+ module.fail_json(msg='Could not upgrade xbps itself')
+
+
+def upgrade(module, xbps_path):
+ """Returns true is full upgrade succeeds"""
+ cmdupgrade = "%s -uy" % (xbps_path['install'])
+ cmdneedupgrade = "%s -un" % (xbps_path['install'])
+
+ rc, stdout, stderr = module.run_command(cmdneedupgrade, check_rc=False)
+ if rc == 0:
+ if len(stdout.splitlines()) == 0:
+ module.exit_json(changed=False, msg='Nothing to upgrade')
+ elif module.check_mode:
+ module.exit_json(changed=True, msg='Would have performed upgrade')
+ else:
+ rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
+ if rc == 0:
+ module.exit_json(changed=True, msg='System upgraded')
+ elif rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-upgrade again
+ module.params['upgrade_xbps'] = False
+ upgrade(module, xbps_path)
+ else:
+ module.fail_json(msg="Could not upgrade")
+ else:
+ module.fail_json(msg="Could not upgrade")
+
+
+def remove_packages(module, xbps_path, packages):
+ """Returns true if package removal succeeds"""
+ changed_packages = []
+ # Using a for loop in case of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ installed, updated = query_package(module, xbps_path, package)
+ if not installed:
+ continue
+
+ cmd = "%s -y %s" % (xbps_path['remove'], package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ changed_packages.append(package)
+
+ if len(changed_packages) > 0:
+
+ module.exit_json(changed=True, msg="removed %s package(s)" %
+ len(changed_packages), packages=changed_packages)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, xbps_path, state, packages):
+ """Returns true if package install succeeds."""
+ toInstall = []
+ for i, package in enumerate(packages):
+ """If the package is installed and state == present or state == latest
+ and is up-to-date then skip"""
+ installed, updated = query_package(module, xbps_path, package)
+ if installed and (state == 'present' or
+ (state == 'latest' and updated)):
+ continue
+
+ toInstall.append(package)
+
+ if len(toInstall) == 0:
+ module.exit_json(changed=False, msg="Nothing to Install")
+
+ cmd = "%s -y %s" % (xbps_path['install'], " ".join(toInstall))
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+
+ if rc == 16 and module.params['upgrade_xbps']:
+ upgrade_xbps(module, xbps_path)
+ # avoid loops by not trying self-update again
+ module.params['upgrade_xbps'] = False
+ install_packages(module, xbps_path, state, packages)
+ elif rc != 0 and not (state == 'latest' and rc == 17):
+ module.fail_json(msg="failed to install %s packages(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+ module.exit_json(changed=True, msg="installed %s package(s)"
+ % (len(toInstall)),
+ packages=toInstall)
+
+
+def check_packages(module, xbps_path, packages, state):
+ """Returns change status of command"""
+ would_be_changed = []
+ for package in packages:
+ installed, updated = query_package(module, xbps_path, package)
+ if ((state in ["present", "latest"] and not installed) or
+ (state == "absent" and installed) or
+ (state == "latest" and not updated)):
+ would_be_changed.append(package)
+ if would_be_changed:
+ if state == "absent":
+ state = "removed"
+ module.exit_json(changed=True, msg="%s package(s) would be %s" % (
+ len(would_be_changed), state),
+ packages=would_be_changed)
+ else:
+ module.exit_json(changed=False, msg="package(s) already %s" % state,
+ packages=[])
+
+
+def update_cache(module, xbps_path, upgrade_planned):
+ """Update package cache"""
+ if module.check_mode:
+ if upgrade_planned:
+ return
+ module.exit_json(
+ changed=True, msg='Would have updated the package cache'
+ )
+ changed = update_package_db(module, xbps_path)
+ if not upgrade_planned:
+ module.exit_json(changed=changed, msg=(
+ 'Updated the package master lists' if changed
+ else 'Package list already up to date'
+ ))
+
+
+def main():
+ """Returns, calling appropriate command"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(default=None, aliases=['pkg', 'package'], type='list', elements='str'),
+ state=dict(default='present', choices=['present', 'installed',
+ 'latest', 'absent',
+ 'removed']),
+ recurse=dict(default=False, type='bool'),
+ upgrade=dict(default=False, type='bool'),
+ update_cache=dict(default=True, type='bool'),
+ upgrade_xbps=dict(default=True, type='bool'),
+ ),
+ required_one_of=[['name', 'update_cache', 'upgrade']],
+ supports_check_mode=True)
+
+ xbps_path = dict()
+ xbps_path['install'] = module.get_bin_path('xbps-install', True)
+ xbps_path['query'] = module.get_bin_path('xbps-query', True)
+ xbps_path['remove'] = module.get_bin_path('xbps-remove', True)
+
+ if not os.path.exists(xbps_path['install']):
+ module.fail_json(msg="cannot find xbps, in path %s"
+ % (xbps_path['install']))
+
+ p = module.params
+
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
+ if p['update_cache']:
+ update_cache(module, xbps_path, (p['name'] or p['upgrade']))
+
+ if p['upgrade']:
+ upgrade(module, xbps_path)
+
+ if p['name']:
+ pkgs = p['name']
+
+ if module.check_mode:
+ check_packages(module, xbps_path, pkgs, p['state'])
+
+ if p['state'] in ['present', 'latest']:
+ install_packages(module, xbps_path, p['state'], pkgs)
+ elif p['state'] == 'absent':
+ remove_packages(module, xbps_path, pkgs)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py b/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py
new file mode 100644
index 000000000..494ea061e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xcc_redfish_command.py
@@ -0,0 +1,795 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: xcc_redfish_command
+short_description: Manages Lenovo Out-Of-Band controllers using Redfish APIs
+version_added: 2.4.0
+description:
+ - Builds Redfish URIs locally and sends them to remote OOB controllers to
+ perform an action or get information back or update a configuration attribute.
+ - Manages virtual media.
+ - Supports getting information back via GET method.
+ - Supports updating a configuration attribute via PATCH method.
+ - Supports performing an action via POST method.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ category:
+ required: true
+ description:
+ - Category to execute on OOB controller.
+ type: str
+ command:
+ required: true
+ description:
+ - List of commands to execute on OOB controller.
+ type: list
+ elements: str
+ baseuri:
+ required: true
+ description:
+ - Base URI of OOB controller.
+ type: str
+ username:
+ description:
+ - Username for authentication with OOB controller.
+ type: str
+ password:
+ description:
+ - Password for authentication with OOB controller.
+ type: str
+ auth_token:
+ description:
+ - Security token for authentication with OOB controller
+ type: str
+ timeout:
+ description:
+ - Timeout in seconds for URL requests to OOB controller.
+ default: 10
+ type: int
+ resource_id:
+ required: false
+ description:
+ - The ID of the System, Manager or Chassis to modify.
+ type: str
+ virtual_media:
+ required: false
+ description:
+ - The options for VirtualMedia commands.
+ type: dict
+ suboptions:
+ media_types:
+ description:
+ - The list of media types appropriate for the image.
+ type: list
+ elements: str
+ default: []
+ image_url:
+ description:
+ - The URL of the image to insert or eject.
+ type: str
+ inserted:
+ description:
+ - Indicates if the image is treated as inserted on command completion.
+ type: bool
+ default: true
+ write_protected:
+ description:
+ - Indicates if the media is treated as write-protected.
+ type: bool
+ default: true
+ username:
+ description:
+ - The username for accessing the image URL.
+ type: str
+ password:
+ description:
+ - The password for accessing the image URL.
+ type: str
+ transfer_protocol_type:
+ description:
+ - The network protocol to use with the image.
+ type: str
+ transfer_method:
+ description:
+ - The transfer method to use with the image.
+ type: str
+ resource_uri:
+ required: false
+ description:
+ - The resource uri to get or patch or post.
+ type: str
+ request_body:
+ required: false
+ description:
+ - The request body to patch or post.
+ type: dict
+
+author: "Yuyan Pan (@panyy3)"
+'''
+
+EXAMPLES = '''
+ - name: Insert Virtual Media
+ community.general.xcc_redfish_command:
+ category: Manager
+ command: VirtualMediaInsert
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: "http://example.com/images/SomeLinux-current.iso"
+ media_types:
+ - CD
+ - DVD
+ resource_id: "1"
+
+ - name: Eject Virtual Media
+ community.general.xcc_redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ virtual_media:
+ image_url: "http://example.com/images/SomeLinux-current.iso"
+ resource_id: "1"
+
+ - name: Eject all Virtual Media
+ community.general.xcc_redfish_command:
+ category: Manager
+ command: VirtualMediaEject
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_id: "1"
+
+ - name: Get ComputeSystem Oem property SystemStatus via GetResource command
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: GetResource
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_uri: "/redfish/v1/Systems/1"
+ register: result
+ - ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.data.Oem.Lenovo.SystemStatus }}"
+
+ - name: Get Oem DNS setting via GetResource command
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: GetResource
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_uri: "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.data }}"
+
+ - name: Get Lenovo FoD key collection resource via GetCollectionResource command
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: GetCollectionResource
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_uri: "/redfish/v1/Managers/1/Oem/Lenovo/FoD/Keys"
+ register: result
+
+ - name: Print fetched information
+ ansible.builtin.debug:
+ msg: "{{ result.redfish_facts.data_list }}"
+
+ - name: Update ComputeSystem property AssetTag via PatchResource command
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: PatchResource
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_uri: "/redfish/v1/Systems/1"
+ request_body:
+ AssetTag: "new_asset_tag"
+
+ - name: Perform BootToBIOSSetup action via PostResource command
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: PostResource
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_uri: "/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.BootToBIOSSetup"
+ request_body: {}
+
+ - name: Perform SecureBoot.ResetKeys action via PostResource command
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: PostResource
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ resource_uri: "/redfish/v1/Systems/1/SecureBoot/Actions/SecureBoot.ResetKeys"
+ request_body:
+ ResetKeysType: DeleteAllKeys
+
+ - name: Create session
+ community.general.redfish_command:
+ category: Sessions
+ command: CreateSession
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result
+
+ - name: Update Manager DateTimeLocalOffset property using security token for auth
+ community.general.xcc_redfish_command:
+ category: Raw
+ command: PatchResource
+ baseuri: "{{ baseuri }}"
+ auth_token: "{{ result.session.token }}"
+ resource_uri: "/redfish/v1/Managers/1"
+ request_body:
+ DateTimeLocalOffset: "+08:00"
+
+ - name: Delete session using security token created by CreateSesssion above
+ community.general.redfish_command:
+ category: Sessions
+ command: DeleteSession
+ baseuri: "{{ baseuri }}"
+ auth_token: "{{ result.session.token }}"
+ session_uri: "{{ result.session.uri }}"
+'''
+
+RETURN = '''
+msg:
+ description: A message related to the performed action(s).
+ returned: when failure or action/update success
+ type: str
+ sample: "Action was successful"
+redfish_facts:
+ description: Resource content.
+ returned: when command == GetResource or command == GetCollectionResource
+ type: dict
+ sample: '{
+ "redfish_facts": {
+ "data": {
+ "@odata.etag": "\"3179bf00d69f25a8b3c\"",
+ "@odata.id": "/redfish/v1/Managers/1/NetworkProtocol/Oem/Lenovo/DNS",
+ "@odata.type": "#LenovoDNS.v1_0_0.LenovoDNS",
+ "DDNS": [
+ {
+ "DDNSEnable": true,
+ "DomainName": "",
+ "DomainNameSource": "DHCP"
+ }
+ ],
+ "DNSEnable": true,
+ "Description": "This resource is used to represent a DNS resource for a Redfish implementation.",
+ "IPv4Address1": "10.103.62.178",
+ "IPv4Address2": "0.0.0.0",
+ "IPv4Address3": "0.0.0.0",
+ "IPv6Address1": "::",
+ "IPv6Address2": "::",
+ "IPv6Address3": "::",
+ "Id": "LenovoDNS",
+ "PreferredAddresstype": "IPv4"
+ },
+ "ret": true
+ }
+ }'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils
+
+
+class XCCRedfishUtils(RedfishUtils):
+ @staticmethod
+ def _find_empty_virt_media_slot(resources, media_types,
+ media_match_strict=True):
+ for uri, data in resources.items():
+ # check MediaTypes
+ if 'MediaTypes' in data and media_types:
+ if not set(media_types).intersection(set(data['MediaTypes'])):
+ continue
+ else:
+ if media_match_strict:
+ continue
+ if 'RDOC' in uri:
+ continue
+ if 'Remote' in uri:
+ continue
+ # if ejected, 'Inserted' should be False and 'ImageName' cleared
+ if (not data.get('Inserted', False) and
+ not data.get('ImageName')):
+ return uri, data
+ return None, None
+
+ def virtual_media_eject_one(self, image_url):
+ # read the VirtualMedia resources from systems
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ # read the VirtualMedia resources from manager
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # find the VirtualMedia resource to eject
+ uri, data, eject = self._find_virt_media_to_eject(resources, image_url)
+ if uri and eject:
+ if ('Actions' not in data or
+ '#VirtualMedia.EjectMedia' not in data['Actions']):
+ # try to eject via PATCH if no EjectMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.EjectMedia'}
+ return self.virtual_media_eject_via_patch(uri)
+ else:
+ # POST to the EjectMedia Action
+ action = data['Actions']['#VirtualMedia.EjectMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI property missing from Action "
+ "#VirtualMedia.EjectMedia"}
+ action_uri = action['target']
+ # empty payload for Eject action
+ payload = {}
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri,
+ payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia ejected"}
+ elif uri and not eject:
+ # already ejected: return success but changed=False
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia image '%s' already ejected" %
+ image_url}
+ else:
+ # return failure (no resources matching image_url found)
+ return {'ret': False, 'changed': False,
+ 'msg': "No VirtualMedia resource found with image '%s' "
+ "inserted" % image_url}
+
+ def virtual_media_eject(self, options):
+ if options:
+ image_url = options.get('image_url')
+ if image_url: # eject specified one media
+ return self.virtual_media_eject_one(image_url)
+
+ # eject all inserted media when no image_url specified
+ # read the VirtualMedia resources from systems
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ # read the VirtualMedia resources from manager
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ # read all the VirtualMedia resources
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # eject all inserted media one by one
+ ejected_media_list = []
+ for uri, data in resources.items():
+ if data.get('Image') and data.get('Inserted', True):
+ returndict = self.virtual_media_eject_one(data.get('Image'))
+ if not returndict['ret']:
+ return returndict
+ ejected_media_list.append(data.get('Image'))
+
+ if len(ejected_media_list) == 0:
+ # no media inserted: return success but changed=False
+ return {'ret': True, 'changed': False,
+ 'msg': "No VirtualMedia image inserted"}
+ else:
+ return {'ret': True, 'changed': True,
+ 'msg': "VirtualMedia %s ejected" % str(ejected_media_list)}
+
+ def virtual_media_insert(self, options):
+ param_map = {
+ 'Inserted': 'inserted',
+ 'WriteProtected': 'write_protected',
+ 'UserName': 'username',
+ 'Password': 'password',
+ 'TransferProtocolType': 'transfer_protocol_type',
+ 'TransferMethod': 'transfer_method'
+ }
+ image_url = options.get('image_url')
+ if not image_url:
+ return {'ret': False,
+ 'msg': "image_url option required for VirtualMediaInsert"}
+ media_types = options.get('media_types')
+
+ # read the VirtualMedia resources from systems
+ response = self.get_request(self.root_uri + self.systems_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ # read the VirtualMedia resources from manager
+ response = self.get_request(self.root_uri + self.manager_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ if 'VirtualMedia' not in data:
+ return {'ret': False, 'msg': "VirtualMedia resource not found"}
+ virt_media_uri = data["VirtualMedia"]["@odata.id"]
+ response = self.get_request(self.root_uri + virt_media_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ virt_media_list = []
+ for member in data[u'Members']:
+ virt_media_list.append(member[u'@odata.id'])
+ resources, headers = self._read_virt_media_resources(virt_media_list)
+
+ # see if image already inserted; if so, nothing to do
+ if self._virt_media_image_inserted(resources, image_url):
+ return {'ret': True, 'changed': False,
+ 'msg': "VirtualMedia '%s' already inserted" % image_url}
+
+ # find an empty slot to insert the media
+ # try first with strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=True)
+ if not uri:
+ # if not found, try without strict media_type matching
+ uri, data = self._find_empty_virt_media_slot(
+ resources, media_types, media_match_strict=False)
+ if not uri:
+ return {'ret': False,
+ 'msg': "Unable to find an available VirtualMedia resource "
+ "%s" % ('supporting ' + str(media_types)
+ if media_types else '')}
+
+ # confirm InsertMedia action found
+ if ('Actions' not in data or
+ '#VirtualMedia.InsertMedia' not in data['Actions']):
+ # try to insert via PATCH if no InsertMedia action found
+ h = headers[uri]
+ if 'allow' in h:
+ methods = [m.strip() for m in h.get('allow').split(',')]
+ if 'PATCH' not in methods:
+ # if Allow header present and PATCH missing, return error
+ return {'ret': False,
+ 'msg': "%s action not found and PATCH not allowed"
+ % '#VirtualMedia.InsertMedia'}
+ return self.virtual_media_insert_via_patch(options, param_map,
+ uri, data)
+
+ # get the action property
+ action = data['Actions']['#VirtualMedia.InsertMedia']
+ if 'target' not in action:
+ return {'ret': False,
+ 'msg': "target URI missing from Action "
+ "#VirtualMedia.InsertMedia"}
+ action_uri = action['target']
+ # get ActionInfo or AllowableValues
+ ai = self._get_all_action_info_values(action)
+ # construct payload
+ payload = self._insert_virt_media_payload(options, param_map, data, ai)
+ # POST to action
+ response = self.post_request(self.root_uri + action_uri, payload)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True, 'msg': "VirtualMedia inserted"}
+
+ def raw_get_resource(self, resource_uri):
+ if resource_uri is None:
+ return {'ret': False, 'msg': "resource_uri is missing"}
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ return {'ret': True, 'data': data}
+
+ def raw_get_collection_resource(self, resource_uri):
+ if resource_uri is None:
+ return {'ret': False, 'msg': "resource_uri is missing"}
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ if 'Members' not in response['data']:
+ return {'ret': False, 'msg': "Specified resource_uri doesn't have Members property"}
+ member_list = [i['@odata.id'] for i in response['data'].get('Members', [])]
+
+ # get member resource one by one
+ data_list = []
+ for member_uri in member_list:
+ uri = self.root_uri + member_uri
+ response = self.get_request(uri)
+ if response['ret'] is False:
+ return response
+ data = response['data']
+ data_list.append(data)
+
+ return {'ret': True, 'data_list': data_list}
+
+ def raw_patch_resource(self, resource_uri, request_body):
+ if resource_uri is None:
+ return {'ret': False, 'msg': "resource_uri is missing"}
+ if request_body is None:
+ return {'ret': False, 'msg': "request_body is missing"}
+ # check whether resource_uri existing or not
+ response = self.get_request(self.root_uri + resource_uri)
+ if response['ret'] is False:
+ return response
+ original_etag = response['data']['@odata.etag']
+
+ # check validity of keys in request_body
+ data = response['data']
+ for key in request_body.keys():
+ if key not in data:
+ return {'ret': False, 'msg': "Key %s not found. Supported key list: %s" % (key, str(data.keys()))}
+
+ # perform patch
+ response = self.patch_request(self.root_uri + resource_uri, request_body)
+ if response['ret'] is False:
+ return response
+
+ # check whether changed or not
+ current_etag = ''
+ if 'data' in response and '@odata.etag' in response['data']:
+ current_etag = response['data']['@odata.etag']
+ if current_etag != original_etag:
+ return {'ret': True, 'changed': True}
+ else:
+ return {'ret': True, 'changed': False}
+
+ def raw_post_resource(self, resource_uri, request_body):
+ if resource_uri is None:
+ return {'ret': False, 'msg': "resource_uri is missing"}
+ if '/Actions/' not in resource_uri:
+ return {'ret': False, 'msg': "Bad uri %s. Keyword /Actions/ should be included in uri" % resource_uri}
+ if request_body is None:
+ return {'ret': False, 'msg': "request_body is missing"}
+ # get action base uri data for further checking
+ action_base_uri = resource_uri.split('/Actions/')[0]
+ response = self.get_request(self.root_uri + action_base_uri)
+ if response['ret'] is False:
+ return response
+ if 'Actions' not in response['data']:
+ return {'ret': False, 'msg': "Actions property not found in %s" % action_base_uri}
+
+ # check resouce_uri with target uri found in action base uri data
+ action_found = False
+ action_info_uri = None
+ action_target_uri_list = []
+ for key in response['data']['Actions'].keys():
+ if action_found:
+ break
+ if not key.startswith('#'):
+ continue
+ if 'target' in response['data']['Actions'][key]:
+ if resource_uri == response['data']['Actions'][key]['target']:
+ action_found = True
+ if '@Redfish.ActionInfo' in response['data']['Actions'][key]:
+ action_info_uri = response['data']['Actions'][key]['@Redfish.ActionInfo']
+ else:
+ action_target_uri_list.append(response['data']['Actions'][key]['target'])
+ if not action_found and 'Oem' in response['data']['Actions']:
+ for key in response['data']['Actions']['Oem'].keys():
+ if action_found:
+ break
+ if not key.startswith('#'):
+ continue
+ if 'target' in response['data']['Actions']['Oem'][key]:
+ if resource_uri == response['data']['Actions']['Oem'][key]['target']:
+ action_found = True
+ if '@Redfish.ActionInfo' in response['data']['Actions']['Oem'][key]:
+ action_info_uri = response['data']['Actions']['Oem'][key]['@Redfish.ActionInfo']
+ else:
+ action_target_uri_list.append(response['data']['Actions']['Oem'][key]['target'])
+
+ if not action_found:
+ return {'ret': False,
+ 'msg': 'Specified resource_uri is not a supported action target uri, please specify a supported target uri instead. Supported uri: %s'
+ % (str(action_target_uri_list))}
+
+ # check request_body with parameter name defined by @Redfish.ActionInfo
+ if action_info_uri is not None:
+ response = self.get_request(self.root_uri + action_info_uri)
+ if response['ret'] is False:
+ return response
+ for key in request_body.keys():
+ key_found = False
+ for para in response['data']['Parameters']:
+ if key == para['Name']:
+ key_found = True
+ break
+ if not key_found:
+ return {'ret': False,
+ 'msg': 'Invalid property %s found in request_body. Please refer to @Redfish.ActionInfo Parameters: %s'
+ % (key, str(response['data']['Parameters']))}
+
+ # perform post
+ response = self.post_request(self.root_uri + resource_uri, request_body)
+ if response['ret'] is False:
+ return response
+ return {'ret': True, 'changed': True}
+
+
+# More will be added as module features are expanded
+CATEGORY_COMMANDS_ALL = {
+ "Manager": ["VirtualMediaInsert",
+ "VirtualMediaEject"],
+ "Raw": ["GetResource",
+ "GetCollectionResource",
+ "PatchResource",
+ "PostResource"]
+}
+
+
+def main():
+ result = {}
+ module = AnsibleModule(
+ argument_spec=dict(
+ category=dict(required=True),
+ command=dict(required=True, type='list', elements='str'),
+ baseuri=dict(required=True),
+ username=dict(),
+ password=dict(no_log=True),
+ auth_token=dict(no_log=True),
+ timeout=dict(type='int', default=10),
+ resource_id=dict(),
+ virtual_media=dict(
+ type='dict',
+ options=dict(
+ media_types=dict(type='list', elements='str', default=[]),
+ image_url=dict(),
+ inserted=dict(type='bool', default=True),
+ write_protected=dict(type='bool', default=True),
+ username=dict(),
+ password=dict(no_log=True),
+ transfer_protocol_type=dict(),
+ transfer_method=dict(),
+ )
+ ),
+ resource_uri=dict(),
+ request_body=dict(
+ type='dict',
+ ),
+ ),
+ required_together=[
+ ('username', 'password'),
+ ],
+ required_one_of=[
+ ('username', 'auth_token'),
+ ],
+ mutually_exclusive=[
+ ('username', 'auth_token'),
+ ],
+ supports_check_mode=False
+ )
+
+ category = module.params['category']
+ command_list = module.params['command']
+
+ # admin credentials used for authentication
+ creds = {'user': module.params['username'],
+ 'pswd': module.params['password'],
+ 'token': module.params['auth_token']}
+
+ # timeout
+ timeout = module.params['timeout']
+
+ # System, Manager or Chassis ID to modify
+ resource_id = module.params['resource_id']
+
+ # VirtualMedia options
+ virtual_media = module.params['virtual_media']
+
+ # resource_uri
+ resource_uri = module.params['resource_uri']
+
+ # request_body
+ request_body = module.params['request_body']
+
+ # Build root URI
+ root_uri = "https://" + module.params['baseuri']
+ rf_utils = XCCRedfishUtils(creds, root_uri, timeout, module, resource_id=resource_id, data_modification=True)
+
+ # Check that Category is valid
+ if category not in CATEGORY_COMMANDS_ALL:
+ module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
+
+ # Check that all commands are valid
+ for cmd in command_list:
+ # Fail if even one command given is invalid
+ if cmd not in CATEGORY_COMMANDS_ALL[category]:
+ module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
+
+ # Organize by Categories / Commands
+ if category == "Manager":
+ # For virtual media resource locates on Systems service
+ result = rf_utils._find_systems_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+ # For virtual media resource locates on Managers service
+ result = rf_utils._find_managers_resource()
+ if result['ret'] is False:
+ module.fail_json(msg=to_native(result['msg']))
+
+ for command in command_list:
+ if command == 'VirtualMediaInsert':
+ result = rf_utils.virtual_media_insert(virtual_media)
+ elif command == 'VirtualMediaEject':
+ result = rf_utils.virtual_media_eject(virtual_media)
+ elif category == "Raw":
+ for command in command_list:
+ if command == 'GetResource':
+ result = rf_utils.raw_get_resource(resource_uri)
+ elif command == 'GetCollectionResource':
+ result = rf_utils.raw_get_collection_resource(resource_uri)
+ elif command == 'PatchResource':
+ result = rf_utils.raw_patch_resource(resource_uri, request_body)
+ elif command == 'PostResource':
+ result = rf_utils.raw_post_resource(resource_uri, request_body)
+
+ # Return data back or fail with proper message
+ if result['ret'] is True:
+ if command == 'GetResource' or command == 'GetCollectionResource':
+ module.exit_json(redfish_facts=result)
+ else:
+ changed = result.get('changed', True)
+ msg = result.get('msg', 'Action was successful')
+ module.exit_json(changed=changed, msg=msg)
+ else:
+ module.fail_json(msg=to_native(result['msg']))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_facts.py b/ansible_collections/community/general/plugins/modules/xenserver_facts.py
new file mode 100644
index 000000000..9924c4a9e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xenserver_facts.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+short_description: Get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp (@caphrim007)
+ - Robin Lee (@cheese)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options: {}
+'''
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ community.general.xenserver_facts:
+
+- name: Print running VMs
+ ansible.builtin.debug:
+ msg: "{{ item }}"
+ with_items: "{{ xs_vms.keys() }}"
+ when: xs_vms[item]['power_state'] == "Running"
+
+# Which will print:
+#
+# TASK: [Print running VMs] ***********************************************************
+# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+# "item": "Control domain on host: 10.0.13.22",
+# "msg": "Control domain on host: 10.0.13.22"
+# }
+'''
+
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils import distro
+from ansible.module_utils.basic import AnsibleModule
+
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ result = distro.linux_distribution()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ networks = change_keys(recs, key='name_label')
+ return networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.values():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.items():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ for param_name, param_value in rec.items():
+ # param_value may be of type xmlrpc.client.DateTime,
+ # which is not simply convertable to str.
+ # Use 'value' attr to get the str value,
+ # following an example in xmlrpc.client.DateTime document
+ if hasattr(param_value, "value"):
+ rec[param_name] = param_value.value
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+
+def get_vms(session):
+ recs = session.xenapi.VM.get_all_records()
+ if not recs:
+ return None
+ vms = change_keys(recs, key='name_label')
+ return vms
+
+
+def get_srs(session):
+ recs = session.xenapi.SR.get_all_records()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='name_label')
+ return srs
+
+
+def main():
+ module = AnsibleModule({}, supports_check_mode=True)
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure as e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible_facts=data)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest.py b/ansible_collections/community/general/plugins/modules/xenserver_guest.py
new file mode 100644
index 000000000..7659ee2ae
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xenserver_guest.py
@@ -0,0 +1,2033 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest
+short_description: Manages virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to create new virtual machines from templates or other virtual machines,
+ modify various virtual machine components like network and disk, rename a virtual machine and
+ remove a virtual machine with associated components.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in I(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for I(hostname) you have to either import host certificate to your OS certificate store or use I(validate_certs): C(false)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- 'Network configuration inside a guest OS, by using I(networks.type), I(networks.ip), I(networks.gateway) etc. parameters, is supported on
+ XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
+ detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
+ agent only support None and Static types of network configuration, where None means DHCP configured interface, I(networks.type) and I(networks.type6)
+ values C(none) and C(dhcp) have same effect. More info here:
+ U(https://www.citrix.com/community/citrix-developer/citrix-hypervisor-developer/citrix-hypervisor-developing-products/citrix-hypervisor-staticip.html)'
+- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
+ C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
+ WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
+ to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
+ Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
+ parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
+ useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones. More info here:
+ U(https://support.citrix.com/article/CTX226713)'
+requirements:
+- python >= 2.6
+- XenAPI
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If I(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
+ - If I(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
+ - If I(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
+ - If I(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
+ type: str
+ default: present
+ choices: [ present, absent, poweredon ]
+ name:
+ description:
+ - Name of the VM to work with.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use I(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ name_desc:
+ description:
+ - VM description.
+ type: str
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ - Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
+ type: str
+ template:
+ description:
+ - Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
+ - Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
+ - In case of multiple templates/VMs/snapshots with same name, use I(template_uuid) to uniquely specify source template.
+ - If VM already exists, this setting will be ignored.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ template_src ]
+ template_uuid:
+ description:
+ - UUID of a template, an existing VM or a snapshot that should be used to create VM.
+ - It is required if template name is not unique.
+ type: str
+ is_template:
+ description:
+ - Convert VM to template.
+ type: bool
+ default: false
+ folder:
+ description:
+ - Destination folder for VM.
+ - This parameter is case sensitive.
+ - 'Example:'
+ - ' folder: /folder1/folder2'
+ type: str
+ hardware:
+ description:
+ - Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
+ type: dict
+ suboptions:
+ num_cpus:
+ description:
+ - Number of CPUs.
+ type: int
+ num_cpu_cores_per_socket:
+ description:
+ - Number of Cores Per Socket. I(num_cpus) has to be a multiple of I(num_cpu_cores_per_socket).
+ type: int
+ memory_mb:
+ description:
+ - Amount of memory in MB.
+ type: int
+ disks:
+ description:
+ - A list of disks to add to VM.
+ - All parameters are case sensitive.
+ - Removing or detaching existing disks of VM is not supported.
+ - New disks are required to have either a I(size) or one of I(size_[tb,gb,mb,kb,b]) parameters specified.
+ - VM needs to be shut down to reconfigure disk size.
+ type: list
+ elements: dict
+ aliases: [ disk ]
+ suboptions:
+ size:
+ description:
+ - 'Disk size with unit. Unit must be: C(b), C(kb), C(mb), C(gb), C(tb). VM needs to be shut down to reconfigure this parameter.'
+ - If no unit is specified, size is assumed to be in bytes.
+ type: str
+ size_b:
+ description:
+ - Disk size in bytes.
+ type: str
+ size_kb:
+ description:
+ - Disk size in kilobytes.
+ type: str
+ size_mb:
+ description:
+ - Disk size in megabytes.
+ type: str
+ size_gb:
+ description:
+ - Disk size in gigabytes.
+ type: str
+ size_tb:
+ description:
+ - Disk size in terabytes.
+ type: str
+ name:
+ description:
+ - Disk name.
+ type: str
+ aliases: [ name_label ]
+ name_desc:
+ description:
+ - Disk description.
+ type: str
+ sr:
+ description:
+ - Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.
+ type: str
+ sr_uuid:
+ description:
+ - UUID of a SR to create disk on. Use if SR name is not unique.
+ type: str
+ cdrom:
+ description:
+ - A CD-ROM configuration for the VM.
+ - All parameters are case sensitive.
+ type: dict
+ suboptions:
+ type:
+ description:
+ - The type of CD-ROM. With C(none) the CD-ROM device will be present but empty.
+ type: str
+ choices: [ none, iso ]
+ iso_name:
+ description:
+ - 'The file name of an ISO image from one of the XenServer ISO Libraries (implies I(type): C(iso)).'
+ - Required if I(type) is set to C(iso).
+ type: str
+ networks:
+ description:
+ - A list of networks (in the order of the NICs).
+ - All parameters are case sensitive.
+ - Name is required for new NICs. Other parameters are optional in all cases.
+ type: list
+ elements: dict
+ aliases: [ network ]
+ suboptions:
+ name:
+ description:
+ - Name of a XenServer network to attach the network interface to.
+ type: str
+ aliases: [ name_label ]
+ mac:
+ description:
+ - Customize MAC address of the interface.
+ type: str
+ type:
+ description:
+ - Type of IPv4 assignment. Value C(none) means whatever is default for OS.
+ - On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).
+ type: str
+ choices: [ none, dhcp, static ]
+ ip:
+ description:
+ - 'Static IPv4 address (implies I(type): C(static)). Can include prefix in format C(<IPv4 address>/<prefix>) instead of using C(netmask).'
+ type: str
+ netmask:
+ description:
+ - Static IPv4 netmask required for I(ip) if prefix is not specified.
+ type: str
+ gateway:
+ description:
+ - Static IPv4 gateway.
+ type: str
+ type6:
+ description:
+ - Type of IPv6 assignment. Value C(none) means whatever is default for OS.
+ type: str
+ choices: [ none, dhcp, static ]
+ ip6:
+ description:
+ - 'Static IPv6 address (implies I(type6): C(static)) with prefix in format C(<IPv6 address>/<prefix>).'
+ type: str
+ gateway6:
+ description:
+ - Static IPv6 gateway.
+ type: str
+ home_server:
+ description:
+ - Name of a XenServer host that will be a Home Server for the VM.
+ - This parameter is case sensitive.
+ type: str
+ custom_params:
+ description:
+ - Define a list of custom VM params to set on VM.
+ - Useful for advanced users familiar with managing VM params trough xe CLI.
+ - A custom value object takes two fields I(key) and I(value) (see example below).
+ type: list
+ elements: dict
+ suboptions:
+ key:
+ description:
+ - VM param name.
+ type: str
+ required: true
+ value:
+ description:
+ - VM param value.
+ type: raw
+ required: true
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM. If I(state) is set to C(absent), this parameter is ignored.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: false
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to accquire an IP address if I(wait_for_ip_address): C(true).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+ linked_clone:
+ description:
+ - Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
+ - This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
+ type: bool
+ default: false
+ force:
+ description:
+ - Ignore warnings and complete the actions.
+ - This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
+ type: bool
+ default: false
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+- community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Create a VM from a template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: false
+ folder: /testvms
+ name: testvm_2
+ state: poweredon
+ template: CentOS 7
+ disks:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ num_cpus: 6
+ num_cpu_cores_per_socket: 3
+ memory_mb: 512
+ cdrom:
+ type: iso
+ iso_name: guest-tools.iso
+ networks:
+ - name: VM Network
+ mac: aa:bb:dd:aa:00:14
+ wait_for_ip_address: true
+ delegate_to: localhost
+ register: deploy
+
+- name: Create a VM template
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ validate_certs: false
+ folder: /testvms
+ name: testvm_6
+ is_template: true
+ disk:
+ - size_gb: 10
+ sr: my_sr
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ delegate_to: localhost
+ register: deploy
+
+- name: Rename a VM (requires the VM's UUID)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ name: new_name
+ state: present
+ delegate_to: localhost
+
+- name: Remove a VM by UUID
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ uuid: 421e4592-c069-924d-ce20-7e7533fab926
+ state: absent
+ delegate_to: localhost
+
+- name: Modify custom params (boot order)
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_8
+ state: present
+ custom_params:
+ - key: HVM_boot_params
+ value: { "order": "ndc" }
+ delegate_to: localhost
+
+- name: Customize network parameters
+ community.general.xenserver_guest:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_10
+ networks:
+ - name: VM Network
+ ip: 192.168.1.100/24
+ gateway: 192.168.1.1
+ - type: dhcp
+ delegate_to: localhost
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+changes:
+ description: Detected or made changes to VM
+ returned: always
+ type: list
+ sample: [
+ {
+ "hardware": [
+ "num_cpus"
+ ]
+ },
+ {
+ "disks_changed": [
+ [],
+ [
+ "size"
+ ]
+ ]
+ },
+ {
+ "disks_new": [
+ {
+ "name": "new-disk",
+ "name_desc": "",
+ "position": 2,
+ "size_gb": "4",
+ "vbd_userdevice": "2"
+ }
+ ]
+ },
+ {
+ "cdrom": [
+ "type",
+ "iso_name"
+ ]
+ },
+ {
+ "networks_changed": [
+ [
+ "mac"
+ ],
+ ]
+ },
+ {
+ "networks_new": [
+ {
+ "name": "Pool-wide network associated with eth2",
+ "position": 1,
+ "vif_device": "1"
+ }
+ ]
+ },
+ "need_poweredoff"
+ ]
+'''
+
+import re
+
+HAS_XENAPI = False
+try:
+ import XenAPI
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.network import is_mac
+from ansible.module_utils import six
+from ansible_collections.community.general.plugins.module_utils.xenserver import (
+ xenserver_common_argument_spec, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address, is_valid_ip_addr, is_valid_ip_netmask,
+ is_valid_ip_prefix, ip_prefix_to_netmask, ip_netmask_to_prefix,
+ is_valid_ip6_addr, is_valid_ip6_prefix)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def exists(self):
+ """Returns True if VM exists, else False."""
+ return True if self.vm_ref is not None else False
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+ def deploy(self):
+ """Deploys new VM from template."""
+ # Safety check.
+ if self.exists():
+ self.module.fail_json(msg="Called deploy on existing VM!")
+
+ try:
+ templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
+ msg_prefix="VM deploy: ")
+
+ # Is this an existing running VM?
+ if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
+ self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
+
+ # Find a SR we can use for VM.copy(). We use SR of the first disk
+ # if specified or default SR if not specified.
+ disk_params_list = self.module.params['disks']
+
+ sr_ref = None
+
+ if disk_params_list:
+ disk_params = disk_params_list[0]
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM deploy disks[0]: ")
+
+ if not sr_ref:
+ if self.default_sr_ref != "OpaqueRef:NULL":
+ sr_ref = self.default_sr_ref
+ else:
+ self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
+
+ # VM name could be an empty string which is bad.
+ if self.module.params['name'] is not None and not self.module.params['name']:
+ self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Now we can instantiate VM. We use VM.clone for linked_clone and
+ # VM.copy for non linked_clone.
+ if self.module.params['linked_clone']:
+ self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
+ else:
+ self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
+
+ # Description is copied over from template so we reset it.
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
+
+ # If template is one of built-in XenServer templates, we have to
+ # do some additional steps.
+ # Note: VM.get_is_default_template() is supported from XenServer 7.2
+ # onward so we use an alternative way.
+ templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
+
+ if "default_template" in templ_other_config and templ_other_config['default_template']:
+ # other_config of built-in XenServer templates have a key called
+ # 'disks' with the following content:
+ # disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
+ # This value of other_data is copied to cloned or copied VM and
+ # it prevents provisioning of VM because sr is not specified and
+ # XAPI returns an error. To get around this, we remove the
+ # 'disks' key and add disks to VM later ourselves.
+ vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
+
+ if "disks" in vm_other_config:
+ del vm_other_config['disks']
+
+ self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
+
+ # At this point we have VM ready for provisioning.
+ self.xapi_session.xenapi.VM.provision(self.vm_ref)
+
+ # After provisioning we can prepare vm_params for reconfigure().
+ self.gather_params()
+
+ # VM is almost ready. We just need to reconfigure it...
+ self.reconfigure()
+
+ # Power on VM if needed.
+ if self.module.params['state'] == "poweredon":
+ self.set_power_state("poweredon")
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def reconfigure(self):
+ """Reconfigures an existing VM.
+
+ Returns:
+ list: parameters that were reconfigured.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called reconfigure on non existing VM!")
+
+ config_changes = self.get_changes()
+
+ vm_power_state_save = self.vm_params['power_state'].lower()
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return config_changes
+
+ if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
+ self.set_power_state("shutdownguest")
+
+ try:
+ for change in config_changes:
+ if isinstance(change, six.string_types):
+ if change == "name":
+ self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
+ elif change == "name_desc":
+ self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
+ elif change == "folder":
+ self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
+
+ if self.module.params['folder']:
+ self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
+ elif change == "home_server":
+ if self.module.params['home_server']:
+ host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
+ else:
+ host_ref = "OpaqueRef:NULL"
+
+ self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
+ elif isinstance(change, dict):
+ if change.get('hardware'):
+ for hardware_change in change['hardware']:
+ if hardware_change == "num_cpus":
+ num_cpus = int(self.module.params['hardware']['num_cpus'])
+
+ if num_cpus < int(self.vm_params['VCPUs_at_startup']):
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ else:
+ self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
+ self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
+ elif hardware_change == "num_cpu_cores_per_socket":
+ self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
+ num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
+
+ if num_cpu_cores_per_socket > 1:
+ self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
+ elif hardware_change == "memory_mb":
+ memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
+ vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
+
+ self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
+ elif change.get('disks_changed'):
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+ position = 0
+
+ for disk_change_list in change['disks_changed']:
+ for disk_change in disk_change_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
+
+ if disk_change == "name":
+ self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
+ elif disk_change == "name_desc":
+ self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
+ elif disk_change == "size":
+ self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
+ "VM reconfigure disks[%s]: " % position)))
+
+ position += 1
+ elif change.get('disks_new'):
+ for position, disk_userdevice in change['disks_new']:
+ disk_params = self.module.params['disks'][position]
+
+ disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
+ disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
+
+ if disk_params.get('sr_uuid'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
+ elif disk_params.get('sr'):
+ sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
+ else:
+ sr_ref = self.default_sr_ref
+
+ disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
+
+ new_disk_vdi = {
+ "name_label": disk_name,
+ "name_description": disk_name_desc,
+ "SR": sr_ref,
+ "virtual_size": disk_size,
+ "type": "user",
+ "sharable": False,
+ "read_only": False,
+ "other_config": {},
+ }
+
+ new_disk_vbd = {
+ "VM": self.vm_ref,
+ "VDI": None,
+ "userdevice": disk_userdevice,
+ "bootable": False,
+ "mode": "RW",
+ "type": "Disk",
+ "empty": False,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
+ vbd_ref_new = self.xapi_session.xenapi.VBD.create(new_disk_vbd)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VBD.plug(vbd_ref_new)
+
+ elif change.get('cdrom'):
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If there is no CD present, we have to create one.
+ if not vm_cdrom_params_list:
+ # We will try to place cdrom at userdevice position
+ # 3 (which is default) if it is not already occupied
+ # else we will place it at first allowed position.
+ cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if "3" in cdrom_userdevices_allowed:
+ cdrom_userdevice = "3"
+ else:
+ cdrom_userdevice = cdrom_userdevices_allowed[0]
+
+ cdrom_vbd = {
+ "VM": self.vm_ref,
+ "VDI": "OpaqueRef:NULL",
+ "userdevice": cdrom_userdevice,
+ "bootable": False,
+ "mode": "RO",
+ "type": "CD",
+ "empty": True,
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
+ else:
+ cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
+
+ cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
+
+ for cdrom_change in change['cdrom']:
+ if cdrom_change == "type":
+ cdrom_type = self.module.params['cdrom']['type']
+
+ if cdrom_type == "none" and not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+ elif cdrom_type == "host":
+ # Unimplemented!
+ pass
+
+ elif cdrom_change == "iso_name":
+ if not cdrom_is_empty:
+ self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
+
+ cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
+ self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
+ elif change.get('networks_changed'):
+ position = 0
+
+ for network_change_list in change['networks_changed']:
+ if network_change_list:
+ vm_vif_params = self.vm_params['VIFs'][position]
+ network_params = self.module.params['networks'][position]
+
+ vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
+ network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
+
+ vif_recreated = False
+
+ if "name" in network_change_list or "mac" in network_change_list:
+ # To change network or MAC, we destroy old
+ # VIF and then create a new one with changed
+ # parameters. That's how XenCenter does it.
+
+ # Copy all old parameters to new VIF record.
+ vif = {
+ "device": vm_vif_params['device'],
+ "network": network_ref,
+ "VM": vm_vif_params['VM'],
+ "MAC": vm_vif_params['MAC'],
+ "MTU": vm_vif_params['MTU'],
+ "other_config": vm_vif_params['other_config'],
+ "qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
+ "qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
+ "locking_mode": vm_vif_params['locking_mode'],
+ "ipv4_allowed": vm_vif_params['ipv4_allowed'],
+ "ipv6_allowed": vm_vif_params['ipv6_allowed'],
+ }
+
+ if "name" in network_change_list:
+ network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+ vif['network'] = network_ref_new
+ vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
+
+ if "mac" in network_change_list:
+ vif['MAC'] = network_params['mac'].lower()
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.unplug(vif_ref)
+
+ self.xapi_session.xenapi.VIF.destroy(vif_ref)
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ vif_ref = vif_ref_new
+ vif_recreated = True
+
+ if self.vm_params['customization_agent'] == "native":
+ vif_reconfigure_needed = False
+
+ if "type" in network_change_list:
+ network_type = network_params['type'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type = vm_vif_params['ipv4_configuration_mode']
+
+ if "ip" in network_change_list:
+ network_ip = network_params['ip']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses']:
+ network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
+ else:
+ network_ip = ""
+
+ if "prefix" in network_change_list:
+ network_prefix = "/%s" % network_params['prefix']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
+ network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
+ else:
+ network_prefix = ""
+
+ if "gateway" in network_change_list:
+ network_gateway = network_params['gateway']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway = vm_vif_params['ipv4_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
+ "%s%s" % (network_ip, network_prefix), network_gateway)
+
+ vif_reconfigure_needed = False
+
+ if "type6" in network_change_list:
+ network_type6 = network_params['type6'].capitalize()
+ vif_reconfigure_needed = True
+ else:
+ network_type6 = vm_vif_params['ipv6_configuration_mode']
+
+ if "ip6" in network_change_list:
+ network_ip6 = network_params['ip6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses']:
+ network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
+ else:
+ network_ip6 = ""
+
+ if "prefix6" in network_change_list:
+ network_prefix6 = "/%s" % network_params['prefix6']
+ vif_reconfigure_needed = True
+ elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
+ network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
+ else:
+ network_prefix6 = ""
+
+ if "gateway6" in network_change_list:
+ network_gateway6 = network_params['gateway6']
+ vif_reconfigure_needed = True
+ else:
+ network_gateway6 = vm_vif_params['ipv6_gateway']
+
+ if vif_recreated or vif_reconfigure_needed:
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
+ "%s%s" % (network_ip6, network_prefix6), network_gateway6)
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vif_device = vm_vif_params['device']
+
+ # A user could have manually changed network
+ # or mac e.g. trough XenCenter and then also
+ # make those changes in playbook manually.
+ # In that case, module will not detect any
+ # changes and info in xenstore_data will
+ # become stale. For that reason we always
+ # update name and mac in xenstore_data.
+
+ # Since we handle name and mac differently,
+ # we have to remove them from
+ # network_change_list.
+ network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
+
+ for network_change in network_change_list_tmp + ['name', 'mac']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change))
+
+ if network_params.get('name'):
+ network_name = network_params['name']
+ else:
+ network_name = vm_vif_params['network']['name_label']
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
+
+ if network_params.get('mac'):
+ network_mac = network_params['mac'].lower()
+ else:
+ network_mac = vm_vif_params['MAC'].lower()
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
+
+ for network_change in network_change_list_tmp:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/%s" % (vif_device, network_change),
+ network_params[network_change])
+
+ position += 1
+ elif change.get('networks_new'):
+ for position, vif_device in change['networks_new']:
+ network_params = self.module.params['networks'][position]
+
+ network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
+
+ network_name = network_params['name']
+ network_mac = network_params['mac'] if network_params.get('mac') else ""
+ network_type = network_params.get('type')
+ network_ip = network_params['ip'] if network_params.get('ip') else ""
+ network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
+ network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
+ network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
+ network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
+ network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
+
+ vif = {
+ "device": vif_device,
+ "network": network_ref,
+ "VM": self.vm_ref,
+ "MAC": network_mac,
+ "MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
+ "other_config": {},
+ "qos_algorithm_type": "",
+ "qos_algorithm_params": {},
+ }
+
+ vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
+
+ if self.vm_params['power_state'].lower() == "running":
+ self.xapi_session.xenapi.VIF.plug(vif_ref_new)
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
+ "%s/%s" % (network_ip, network_prefix), network_gateway)
+
+ if network_type6 and network_type6 == "static":
+ self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
+ "%s/%s" % (network_ip6, network_prefix6), network_gateway6)
+ elif self.vm_params['customization_agent'] == "custom":
+ # We first have to remove any existing data
+ # from xenstore_data because there could be
+ # some old leftover data from some interface
+ # that once occupied same device location as
+ # our new interface.
+ for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
+
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
+
+ # We get MAC from VIF itself instead of
+ # networks.mac because it could be
+ # autogenerated.
+ vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
+
+ if network_type:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
+
+ if network_type == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip" % vif_device, network_ip)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix" % vif_device, network_prefix)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/netmask" % vif_device, network_netmask)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway" % vif_device, network_gateway)
+
+ if network_type6:
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
+
+ if network_type6 == "static":
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/ip6" % vif_device, network_ip6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
+ self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
+ "vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
+
+ elif change.get('custom_params'):
+ for position in change['custom_params']:
+ custom_param_key = self.module.params['custom_params'][position]['key']
+ custom_param_value = self.module.params['custom_params'][position]['value']
+ self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
+
+ if self.module.params['is_template']:
+ self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
+ elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
+ self.set_power_state("poweredon")
+
+ # Gather new params after reconfiguration.
+ self.gather_params()
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ return config_changes
+
+ def destroy(self):
+ """Removes an existing VM with associated disks"""
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called destroy on non existing VM!")
+
+ if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
+ self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
+
+ # Support for Ansible check mode.
+ if self.module.check_mode:
+ return
+
+ # Make sure that VM is poweredoff before we can destroy it.
+ self.set_power_state("poweredoff")
+
+ try:
+ # Destroy VM!
+ self.xapi_session.xenapi.VM.destroy(self.vm_ref)
+
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Destroy all VDIs associated with VM!
+ for vm_disk_params in vm_disk_params_list:
+ vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
+
+ self.xapi_session.xenapi.VDI.destroy(vdi_ref)
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_changes(self):
+ """Finds VM parameters that differ from specified ones.
+
+ This method builds a dictionary with hierarchy of VM parameters
+ that differ from those specified in module parameters.
+
+ Returns:
+ list: VM parameters that differ from those specified in
+ module parameters.
+ """
+ # Safety check.
+ if not self.exists():
+ self.module.fail_json(msg="Called get_changes on non existing VM!")
+
+ need_poweredoff = False
+
+ if self.module.params['is_template']:
+ need_poweredoff = True
+
+ try:
+ # This VM could be a template or a snapshot. In that case we fail
+ # because we can't reconfigure them or it would just be too
+ # dangerous.
+ if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
+
+ if self.vm_params['is_a_snapshot']:
+ self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
+
+ # Let's build a list of parameters that changed.
+ config_changes = []
+
+ # Name could only differ if we found an existing VM by uuid.
+ if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
+ if self.module.params['name']:
+ config_changes.append('name')
+ else:
+ self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
+
+ if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
+ config_changes.append('name_desc')
+
+ # Folder parameter is found in other_config.
+ vm_other_config = self.vm_params['other_config']
+ vm_folder = vm_other_config.get('folder', '')
+
+ if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
+ config_changes.append('folder')
+
+ if self.module.params['home_server'] is not None:
+ if (self.module.params['home_server'] and
+ (not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
+
+ # Check existence only. Ignore return value.
+ get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
+ msg_prefix="VM check home_server: ")
+
+ config_changes.append('home_server')
+ elif not self.module.params['home_server'] and self.vm_params['affinity']:
+ config_changes.append('home_server')
+
+ config_changes_hardware = []
+
+ if self.module.params['hardware']:
+ num_cpus = self.module.params['hardware'].get('num_cpus')
+
+ if num_cpus is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpus = int(num_cpus)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
+
+ if num_cpus < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
+
+ # We can use VCPUs_at_startup or VCPUs_max parameter. I'd
+ # say the former is the way to go but this needs
+ # confirmation and testing.
+ if num_cpus != int(self.vm_params['VCPUs_at_startup']):
+ config_changes_hardware.append('num_cpus')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
+
+ if num_cpu_cores_per_socket is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
+
+ if num_cpu_cores_per_socket < 1:
+ self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
+
+ if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
+ self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
+
+ vm_platform = self.vm_params['platform']
+ vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
+
+ if num_cpu_cores_per_socket != vm_cores_per_socket:
+ config_changes_hardware.append('num_cpu_cores_per_socket')
+ # For now, we don't support hotpluging so VM has to be
+ # in poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ memory_mb = self.module.params['hardware'].get('memory_mb')
+
+ if memory_mb is not None:
+ # Kept for compatibility with older Ansible versions that
+ # do not support subargument specs.
+ try:
+ memory_mb = int(memory_mb)
+ except ValueError as e:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
+
+ if memory_mb < 1:
+ self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
+
+ # There are multiple memory parameters:
+ # - memory_dynamic_max
+ # - memory_dynamic_min
+ # - memory_static_max
+ # - memory_static_min
+ # - memory_target
+ #
+ # memory_target seems like a good candidate but it returns 0 for
+ # halted VMs so we can't use it.
+ #
+ # I decided to use memory_dynamic_max and memory_static_max
+ # and use whichever is larger. This strategy needs validation
+ # and testing.
+ #
+ # XenServer stores memory size in bytes so we need to divide
+ # it by 1024*1024 = 1048576.
+ if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
+ config_changes_hardware.append('memory_mb')
+ # For now, we don't support hotpluging so VM has to be in
+ # poweredoff state to reconfigure.
+ need_poweredoff = True
+
+ if config_changes_hardware:
+ config_changes.append({"hardware": config_changes_hardware})
+
+ config_changes_disks = []
+ config_new_disks = []
+
+ # Find allowed userdevices.
+ vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
+
+ if self.module.params['disks']:
+ # Get the list of all disk. Filter out any CDs found.
+ vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
+
+ # Number of disks defined in module params have to be same or
+ # higher than a number of existing disks attached to the VM.
+ # We don't support removal or detachment of disks.
+ if len(self.module.params['disks']) < len(vm_disk_params_list):
+ self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
+ (len(self.module.params['disks']), len(vm_disk_params_list)))
+
+ # Find the highest disk occupied userdevice.
+ if not vm_disk_params_list:
+ vm_disk_userdevice_highest = "-1"
+ else:
+ vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
+
+ for position in range(len(self.module.params['disks'])):
+ if position < len(vm_disk_params_list):
+ vm_disk_params = vm_disk_params_list[position]
+ else:
+ vm_disk_params = None
+
+ disk_params = self.module.params['disks'][position]
+
+ disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
+
+ disk_name = disk_params.get('name')
+
+ if disk_name is not None and not disk_name:
+ self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
+
+ # If this is an existing disk.
+ if vm_disk_params and vm_disk_params['VDI']:
+ disk_changes = []
+
+ if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
+ disk_changes.append('name')
+
+ disk_name_desc = disk_params.get('name_desc')
+
+ if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
+ disk_changes.append('name_desc')
+
+ if disk_size:
+ if disk_size > int(vm_disk_params['VDI']['virtual_size']):
+ disk_changes.append('size')
+ need_poweredoff = True
+ elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
+ self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
+ "Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
+
+ config_changes_disks.append(disk_changes)
+ # If this is a new disk.
+ else:
+ if not disk_size:
+ self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
+
+ disk_sr_uuid = disk_params.get('sr_uuid')
+ disk_sr = disk_params.get('sr')
+
+ if disk_sr_uuid is not None or disk_sr is not None:
+ # Check existence only. Ignore return value.
+ get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
+ msg_prefix="VM check disks[%s]: " % position)
+ elif self.default_sr_ref == 'OpaqueRef:NULL':
+ self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
+
+ if not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
+
+ disk_userdevice = None
+
+ # We need to place a new disk right above the highest
+ # placed existing disk to maintain relative disk
+ # positions pairable with disk specifications in
+ # module params. That place must not be occupied by
+ # some other device like CD-ROM.
+ for userdevice in vbd_userdevices_allowed:
+ if int(userdevice) > int(vm_disk_userdevice_highest):
+ disk_userdevice = userdevice
+ vbd_userdevices_allowed.remove(userdevice)
+ vm_disk_userdevice_highest = userdevice
+ break
+
+ # If no place was found.
+ if disk_userdevice is None:
+ # Highest occupied place could be a CD-ROM device
+ # so we have to include all devices regardless of
+ # type when calculating out-of-bound position.
+ disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
+ self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
+
+ # For new disks we only track their position.
+ config_new_disks.append((position, disk_userdevice))
+
+ # We should append config_changes_disks to config_changes only
+ # if there is at least one changed disk, else skip.
+ for disk_change in config_changes_disks:
+ if disk_change:
+ config_changes.append({"disks_changed": config_changes_disks})
+ break
+
+ if config_new_disks:
+ config_changes.append({"disks_new": config_new_disks})
+
+ config_changes_cdrom = []
+
+ if self.module.params['cdrom']:
+ # Get the list of all CD-ROMs. Filter out any regular disks
+ # found. If we found no existing CD-ROM, we will create it
+ # later else take the first one found.
+ vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
+
+ # If no existing CD-ROM is found, we will need to add one.
+ # We need to check if there is any userdevice allowed.
+ if not vm_cdrom_params_list and not vbd_userdevices_allowed:
+ self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
+
+ cdrom_type = self.module.params['cdrom'].get('type')
+ cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
+
+ # If cdrom.iso_name is specified but cdrom.type is not,
+ # then set cdrom.type to 'iso', unless cdrom.iso_name is
+ # an empty string, in that case set cdrom.type to 'none'.
+ if not cdrom_type:
+ if cdrom_iso_name:
+ cdrom_type = "iso"
+ elif cdrom_iso_name is not None:
+ cdrom_type = "none"
+
+ self.module.params['cdrom']['type'] = cdrom_type
+
+ # If type changed.
+ if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
+ config_changes_cdrom.append('type')
+
+ if cdrom_type == "iso":
+ # Check if ISO exists.
+ # Check existence only. Ignore return value.
+ get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
+ msg_prefix="VM check cdrom.iso_name: ")
+
+ # Is ISO image changed?
+ if (cdrom_iso_name and
+ (not vm_cdrom_params_list or
+ not vm_cdrom_params_list[0]['VDI'] or
+ cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
+ config_changes_cdrom.append('iso_name')
+
+ if config_changes_cdrom:
+ config_changes.append({"cdrom": config_changes_cdrom})
+
+ config_changes_networks = []
+ config_new_networks = []
+
+ # Find allowed devices.
+ vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
+
+ if self.module.params['networks']:
+ # Number of VIFs defined in module params have to be same or
+ # higher than a number of existing VIFs attached to the VM.
+ # We don't support removal of VIFs.
+ if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
+ self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
+ (len(self.module.params['networks']), len(self.vm_params['VIFs'])))
+
+ # Find the highest occupied device.
+ if not self.vm_params['VIFs']:
+ vif_device_highest = "-1"
+ else:
+ vif_device_highest = self.vm_params['VIFs'][-1]['device']
+
+ for position in range(len(self.module.params['networks'])):
+ if position < len(self.vm_params['VIFs']):
+ vm_vif_params = self.vm_params['VIFs'][position]
+ else:
+ vm_vif_params = None
+
+ network_params = self.module.params['networks'][position]
+
+ network_name = network_params.get('name')
+
+ if network_name is not None and not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
+
+ if network_name:
+ # Check existence only. Ignore return value.
+ get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
+ msg_prefix="VM check networks[%s]: " % position)
+
+ network_mac = network_params.get('mac')
+
+ if network_mac is not None:
+ network_mac = network_mac.lower()
+
+ if not is_mac(network_mac):
+ self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
+
+ # IPv4 reconfiguration.
+ network_type = network_params.get('type')
+ network_ip = network_params.get('ip')
+ network_netmask = network_params.get('netmask')
+ network_prefix = None
+
+ # If networks.ip is specified and networks.type is not,
+ # then set networks.type to 'static'.
+ if not network_type and network_ip:
+ network_type = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
+ network_type = "none"
+
+ if network_type and network_type == "static":
+ if network_ip is not None:
+ network_ip_split = network_ip.split('/')
+ network_ip = network_ip_split[0]
+
+ if network_ip and not is_valid_ip_addr(network_ip):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
+
+ if len(network_ip_split) > 1:
+ network_prefix = network_ip_split[1]
+
+ if not is_valid_ip_prefix(network_prefix):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
+
+ if network_netmask is not None:
+ if not is_valid_ip_netmask(network_netmask):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
+
+ network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
+ elif network_prefix is not None:
+ network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
+
+ # If any parameter is overridden at this point, update it.
+ if network_type:
+ network_params['type'] = network_type
+
+ if network_ip:
+ network_params['ip'] = network_ip
+
+ if network_netmask:
+ network_params['netmask'] = network_netmask
+
+ if network_prefix:
+ network_params['prefix'] = network_prefix
+
+ network_gateway = network_params.get('gateway')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway and not is_valid_ip_addr(network_gateway):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
+
+ # IPv6 reconfiguration.
+ network_type6 = network_params.get('type6')
+ network_ip6 = network_params.get('ip6')
+ network_prefix6 = None
+
+ # If networks.ip6 is specified and networks.type6 is not,
+ # then set networks.type6 to 'static'.
+ if not network_type6 and network_ip6:
+ network_type6 = "static"
+
+ # XenServer natively supports only 'none' and 'static'
+ # type with 'none' being the same as 'dhcp'.
+ if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
+ network_type6 = "none"
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 is not None:
+ network_ip6_split = network_ip6.split('/')
+ network_ip6 = network_ip6_split[0]
+
+ if network_ip6 and not is_valid_ip6_addr(network_ip6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
+
+ if len(network_ip6_split) > 1:
+ network_prefix6 = network_ip6_split[1]
+
+ if not is_valid_ip6_prefix(network_prefix6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
+
+ # If any parameter is overridden at this point, update it.
+ if network_type6:
+ network_params['type6'] = network_type6
+
+ if network_ip6:
+ network_params['ip6'] = network_ip6
+
+ if network_prefix6:
+ network_params['prefix6'] = network_prefix6
+
+ network_gateway6 = network_params.get('gateway6')
+
+ # Gateway can be an empty string (when removing gateway
+ # configuration) but if it is not, it should be validated.
+ if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
+ self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
+
+ # If this is an existing VIF.
+ if vm_vif_params and vm_vif_params['network']:
+ network_changes = []
+
+ if network_name and network_name != vm_vif_params['network']['name_label']:
+ network_changes.append('name')
+
+ if network_mac and network_mac != vm_vif_params['MAC'].lower():
+ network_changes.append('mac')
+
+ if self.vm_params['customization_agent'] == "native":
+ if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
+ network_changes.append('type')
+
+ if network_type and network_type == "static":
+ if network_ip and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
+ network_changes.append('ip')
+
+ if network_prefix and (not vm_vif_params['ipv4_addresses'] or
+ not vm_vif_params['ipv4_addresses'][0] or
+ network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+
+ if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
+ network_changes.append('gateway')
+
+ if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
+ network_changes.append('type6')
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
+ network_changes.append('ip6')
+
+ if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
+ not vm_vif_params['ipv6_addresses'][0] or
+ network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
+ network_changes.append('prefix6')
+
+ if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
+ network_changes.append('gateway6')
+
+ elif self.vm_params['customization_agent'] == "custom":
+ vm_xenstore_data = self.vm_params['xenstore_data']
+
+ if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
+ network_changes.append('type')
+ need_poweredoff = True
+
+ if network_type and network_type == "static":
+ if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
+ network_changes.append('ip')
+ need_poweredoff = True
+
+ if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
+ network_changes.append('prefix')
+ network_changes.append('netmask')
+ need_poweredoff = True
+
+ if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
+ network_changes.append('type6')
+ need_poweredoff = True
+
+ if network_type6 and network_type6 == "static":
+ if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
+ network_changes.append('ip6')
+ need_poweredoff = True
+
+ if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
+ network_changes.append('prefix6')
+ need_poweredoff = True
+
+ if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
+ vm_vif_params['device'], ""):
+ network_changes.append('gateway6')
+ need_poweredoff = True
+
+ config_changes_networks.append(network_changes)
+ # If this is a new VIF.
+ else:
+ if not network_name:
+ self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
+
+ if network_type and network_type == "static" and network_ip and not network_netmask:
+ self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
+
+ if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
+ self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
+
+ # Restart is needed if we are adding new network
+ # interface with IP/gateway parameters specified
+ # and custom agent is used.
+ if self.vm_params['customization_agent'] == "custom":
+ for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
+ if network_params.get(parameter):
+ need_poweredoff = True
+ break
+
+ if not vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
+
+ # We need to place a new network interface right above the
+ # highest placed existing interface to maintain relative
+ # positions pairable with network interface specifications
+ # in module params.
+ vif_device = str(int(vif_device_highest) + 1)
+
+ if vif_device not in vif_devices_allowed:
+ self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
+
+ vif_devices_allowed.remove(vif_device)
+ vif_device_highest = vif_device
+
+ # For new VIFs we only track their position.
+ config_new_networks.append((position, vif_device))
+
+ # We should append config_changes_networks to config_changes only
+ # if there is at least one changed network, else skip.
+ for network_change in config_changes_networks:
+ if network_change:
+ config_changes.append({"networks_changed": config_changes_networks})
+ break
+
+ if config_new_networks:
+ config_changes.append({"networks_new": config_new_networks})
+
+ config_changes_custom_params = []
+
+ if self.module.params['custom_params']:
+ for position in range(len(self.module.params['custom_params'])):
+ custom_param = self.module.params['custom_params'][position]
+
+ custom_param_key = custom_param['key']
+ custom_param_value = custom_param['value']
+
+ if custom_param_key not in self.vm_params:
+ self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
+
+ if custom_param_value != self.vm_params[custom_param_key]:
+ # We only need to track custom param position.
+ config_changes_custom_params.append(position)
+
+ if config_changes_custom_params:
+ config_changes.append({"custom_params": config_changes_custom_params})
+
+ if need_poweredoff:
+ config_changes.append('need_poweredoff')
+
+ return config_changes
+
+ except XenAPI.Failure as f:
+ self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
+
+ def get_normalized_disk_size(self, disk_params, msg_prefix=""):
+ """Parses disk size parameters and returns disk size in bytes.
+
+ This method tries to parse disk size module parameters. It fails
+ with an error message if size cannot be parsed.
+
+ Args:
+ disk_params (dist): A dictionary with disk parameters.
+ msg_prefix (str): A string error messages should be prefixed
+ with (default: "").
+
+ Returns:
+ int: disk size in bytes if disk size is successfully parsed or
+ None if no disk size parameters were found.
+ """
+ # There should be only single size spec but we make a list of all size
+ # specs just in case. Priority is given to 'size' but if not found, we
+ # check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
+ # found.
+ disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
+
+ if disk_size_spec:
+ try:
+ # size
+ if "size" in disk_size_spec:
+ size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
+ disk_size_m = size_regex.match(disk_params['size'])
+
+ if disk_size_m:
+ size = disk_size_m.group(1)
+ unit = disk_size_m.group(2)
+ else:
+ raise ValueError
+ # size_tb, size_gb, size_mb, size_kb, size_b
+ else:
+ size = disk_params[disk_size_spec[0]]
+ unit = disk_size_spec[0].split('_')[-1]
+
+ if not unit:
+ unit = "b"
+ else:
+ unit = unit.lower()
+
+ if re.match(r'\d+\.\d+', size):
+ # We found float value in string, let's typecast it.
+ if unit == "b":
+ # If we found float but unit is bytes, we get the integer part only.
+ size = int(float(size))
+ else:
+ size = float(size)
+ else:
+ # We found int value in string, let's typecast it.
+ size = int(size)
+
+ if not size or size < 0:
+ raise ValueError
+
+ except (TypeError, ValueError, NameError):
+ # Common failure
+ self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
+
+ disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
+
+ if unit in disk_units:
+ return int(size * (1024 ** disk_units[unit]))
+ else:
+ self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
+ (msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
+ else:
+ return None
+
+ @staticmethod
+ def get_cdrom_type(vm_cdrom_params):
+ """Returns VM CD-ROM type."""
+ # TODO: implement support for detecting type host. No server to test
+ # this on at the moment.
+ if vm_cdrom_params['empty']:
+ return "none"
+ else:
+ return "iso"
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['present', 'absent', 'poweredon']),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ uuid=dict(type='str'),
+ template=dict(type='str', aliases=['template_src']),
+ template_uuid=dict(type='str'),
+ is_template=dict(type='bool', default=False),
+ folder=dict(type='str'),
+ hardware=dict(
+ type='dict',
+ options=dict(
+ num_cpus=dict(type='int'),
+ num_cpu_cores_per_socket=dict(type='int'),
+ memory_mb=dict(type='int'),
+ ),
+ ),
+ disks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ size=dict(type='str'),
+ size_tb=dict(type='str'),
+ size_gb=dict(type='str'),
+ size_mb=dict(type='str'),
+ size_kb=dict(type='str'),
+ size_b=dict(type='str'),
+ name=dict(type='str', aliases=['name_label']),
+ name_desc=dict(type='str'),
+ sr=dict(type='str'),
+ sr_uuid=dict(type='str'),
+ ),
+ aliases=['disk'],
+ mutually_exclusive=[
+ ['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
+ ['sr', 'sr_uuid'],
+ ],
+ ),
+ cdrom=dict(
+ type='dict',
+ options=dict(
+ type=dict(type='str', choices=['none', 'iso']),
+ iso_name=dict(type='str'),
+ ),
+ required_if=[
+ ['type', 'iso', ['iso_name']],
+ ],
+ ),
+ networks=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ name=dict(type='str', aliases=['name_label']),
+ mac=dict(type='str'),
+ type=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip=dict(type='str'),
+ netmask=dict(type='str'),
+ gateway=dict(type='str'),
+ type6=dict(type='str', choices=['none', 'dhcp', 'static']),
+ ip6=dict(type='str'),
+ gateway6=dict(type='str'),
+ ),
+ aliases=['network'],
+ required_if=[
+ ['type', 'static', ['ip']],
+ ['type6', 'static', ['ip6']],
+ ],
+ ),
+ home_server=dict(type='str'),
+ custom_params=dict(
+ type='list',
+ elements='dict',
+ options=dict(
+ key=dict(type='str', required=True, no_log=False),
+ value=dict(type='raw', required=True),
+ ),
+ ),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ linked_clone=dict(type='bool', default=False),
+ force=dict(type='bool', default=False),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ mutually_exclusive=[
+ ['template', 'template_uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ vm = XenServerVM(module)
+
+ # Find existing VM
+ if vm.exists():
+ if module.params['state'] == "absent":
+ vm.destroy()
+ result['changed'] = True
+ elif module.params['state'] == "present":
+ config_changes = vm.reconfigure()
+
+ if config_changes:
+ result['changed'] = True
+
+ # Make new disk and network changes more user friendly
+ # and informative.
+ for change in config_changes:
+ if isinstance(change, dict):
+ if change.get('disks_new'):
+ disks_new = []
+
+ for position, userdevice in change['disks_new']:
+ disk_new_params = {"position": position, "vbd_userdevice": userdevice}
+ disk_params = module.params['disks'][position]
+
+ for k in disk_params.keys():
+ if disk_params[k] is not None:
+ disk_new_params[k] = disk_params[k]
+
+ disks_new.append(disk_new_params)
+
+ if disks_new:
+ change['disks_new'] = disks_new
+
+ elif change.get('networks_new'):
+ networks_new = []
+
+ for position, device in change['networks_new']:
+ network_new_params = {"position": position, "vif_device": device}
+ network_params = module.params['networks'][position]
+
+ for k in network_params.keys():
+ if network_params[k] is not None:
+ network_new_params[k] = network_params[k]
+
+ networks_new.append(network_new_params)
+
+ if networks_new:
+ change['networks_new'] = networks_new
+
+ result['changes'] = config_changes
+
+ elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
+ result['changed'] = vm.set_power_state(module.params['state'])
+ elif module.params['state'] != "absent":
+ vm.deploy()
+ result['changed'] = True
+
+ if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py b/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
new file mode 100644
index 000000000..dd28cf7d0
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xenserver_guest_info.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_info
+short_description: Gathers information for virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to gather essential VM facts.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+- This module was called C(xenserver_guest_facts) before Ansible 2.9. The usage did not change.
+requirements:
+- python >= 2.6
+- XenAPI
+options:
+ name:
+ description:
+ - Name of the VM to gather facts from.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to gather fact of. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+- community.general.attributes
+- community.general.attributes.info_module
+'''
+
+EXAMPLES = r'''
+- name: Gather facts
+ community.general.xenserver_guest_info:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "testvm_11-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "testvm_11-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "testvm_11",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI # noqa: F401, pylint: disable=unused-import
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to AnsibleModule object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Gather facts.
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py b/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
new file mode 100644
index 000000000..ba88bbf1d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xenserver_guest_powerstate.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xenserver_guest_powerstate
+short_description: Manages power states of virtual machines running on Citrix Hypervisor/XenServer host or pool
+description: >
+ This module can be used to power on, power off, restart or suspend virtual machine and gracefully reboot or shutdown guest OS of virtual machine.
+author:
+- Bojan Vitnik (@bvitnik) <bvitnik@mainstream.rs>
+notes:
+- Minimal supported version of XenServer is 5.6.
+- Module was tested with XenServer 6.5, 7.1, 7.2, 7.6, Citrix Hypervisor 8.0, XCP-ng 7.6 and 8.0.
+- 'To acquire XenAPI Python library, just run C(pip install XenAPI) on your Ansible Control Node. The library can also be found inside
+ Citrix Hypervisor/XenServer SDK (downloadable from Citrix website). Copy the XenAPI.py file from the SDK to your Python site-packages on your
+ Ansible Control Node to use it. Latest version of the library can also be acquired from GitHub:
+ U(https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI/XenAPI.py)'
+- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
+ accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
+- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
+ which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
+requirements:
+- python >= 2.6
+- XenAPI
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ state:
+ description:
+ - Specify the state VM should be in.
+ - If C(state) is set to value other than C(present), then VM is transitioned into required state and facts are returned.
+ - If C(state) is set to C(present), then VM is just checked for existence and facts are returned.
+ type: str
+ default: present
+ choices: [ powered-on, powered-off, restarted, shutdown-guest, reboot-guest, suspended, present ]
+ name:
+ description:
+ - Name of the VM to manage.
+ - VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
+ - In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
+ - This parameter is case sensitive.
+ type: str
+ aliases: [ name_label ]
+ uuid:
+ description:
+ - UUID of the VM to manage if known. This is XenServer's unique identifier.
+ - It is required if name is not unique.
+ type: str
+ wait_for_ip_address:
+ description:
+ - Wait until XenServer detects an IP address for the VM.
+ - This requires XenServer Tools to be preinstalled on the VM to work properly.
+ type: bool
+ default: false
+ state_change_timeout:
+ description:
+ - 'By default, module will wait indefinitely for VM to change state or acquire an IP address if C(wait_for_ip_address: true).'
+ - If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
+ - In case of timeout, module will generate an error message.
+ type: int
+ default: 0
+extends_documentation_fragment:
+- community.general.xenserver.documentation
+- community.general.attributes
+
+'''
+
+EXAMPLES = r'''
+- name: Power on VM
+ community.general.xenserver_guest_powerstate:
+ hostname: "{{ xenserver_hostname }}"
+ username: "{{ xenserver_username }}"
+ password: "{{ xenserver_password }}"
+ name: testvm_11
+ state: powered-on
+ delegate_to: localhost
+ register: facts
+'''
+
+RETURN = r'''
+instance:
+ description: Metadata about the VM
+ returned: always
+ type: dict
+ sample: {
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "windows-template-testing-0",
+ "name_desc": "",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "windows-template-testing-1",
+ "name_desc": "",
+ "os_device": "xvdb",
+ "size": 42949672960,
+ "sr": "Local storage",
+ "sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "56",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 8192,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 4
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "windows-template-testing",
+ "name_desc": "",
+ "networks": [
+ {
+ "gateway": "192.168.0.254",
+ "gateway6": "fc00::fffe",
+ "ip": "192.168.0.200",
+ "ip6": [
+ "fe80:0000:0000:0000:e9cb:625a:32c5:c291",
+ "fc00:0000:0000:0000:0000:0000:0000:0001"
+ ],
+ "mac": "ba:91:3a:48:20:76",
+ "mtu": "1500",
+ "name": "Pool-wide network associated with eth1",
+ "netmask": "255.255.255.128",
+ "prefix": "25",
+ "prefix6": "64",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-25200",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+'''
+
+HAS_XENAPI = False
+try:
+ import XenAPI # noqa: F401, pylint: disable=unused-import
+ HAS_XENAPI = True
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.xenserver import (xenserver_common_argument_spec, XenServerObject, get_object_ref,
+ gather_vm_params, gather_vm_facts, set_vm_power_state,
+ wait_for_vm_ip_address)
+
+
+class XenServerVM(XenServerObject):
+ """Class for managing XenServer VM.
+
+ Attributes:
+ vm_ref (str): XAPI reference to VM.
+ vm_params (dict): A dictionary with VM parameters as returned
+ by gather_vm_params() function.
+ """
+
+ def __init__(self, module):
+ """Inits XenServerVM using module parameters.
+
+ Args:
+ module: Reference to Ansible module object.
+ """
+ super(XenServerVM, self).__init__(module)
+
+ self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=True, msg_prefix="VM search: ")
+ self.gather_params()
+
+ def gather_params(self):
+ """Gathers all VM parameters available in XAPI database."""
+ self.vm_params = gather_vm_params(self.module, self.vm_ref)
+
+ def gather_facts(self):
+ """Gathers and returns VM facts."""
+ return gather_vm_facts(self.module, self.vm_params)
+
+ def set_power_state(self, power_state):
+ """Controls VM power state."""
+ state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
+
+ # If state has changed, update vm_params.
+ if state_changed:
+ self.vm_params['power_state'] = current_state.capitalize()
+
+ return state_changed
+
+ def wait_for_ip_address(self):
+ """Waits for VM to acquire an IP address."""
+ self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
+
+
+def main():
+ argument_spec = xenserver_common_argument_spec()
+ argument_spec.update(
+ state=dict(type='str', default='present',
+ choices=['powered-on', 'powered-off', 'restarted', 'shutdown-guest', 'reboot-guest', 'suspended', 'present']),
+ name=dict(type='str', aliases=['name_label']),
+ uuid=dict(type='str'),
+ wait_for_ip_address=dict(type='bool', default=False),
+ state_change_timeout=dict(type='int', default=0),
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_one_of=[
+ ['name', 'uuid'],
+ ],
+ )
+
+ result = {'failed': False, 'changed': False}
+
+ # Module will exit with an error message if no VM is found.
+ vm = XenServerVM(module)
+
+ # Set VM power state.
+ if module.params['state'] != "present":
+ result['changed'] = vm.set_power_state(module.params['state'])
+
+ if module.params['wait_for_ip_address']:
+ vm.wait_for_ip_address()
+
+ result['instance'] = vm.gather_facts()
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xfconf.py b/ansible_collections/community/general/plugins/modules/xfconf.py
new file mode 100644
index 000000000..567117d40
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xfconf.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017, Joseph Benden <joe@benden.us>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: xfconf
+author:
+ - "Joseph Benden (@jbenden)"
+ - "Alexei Znamensky (@russoz)"
+short_description: Edit XFCE4 Configurations
+description:
+ - This module allows for the manipulation of Xfce 4 Configuration with the help of
+ xfconf-query. Please see the xfconf-query(1) man page for more details.
+seealso:
+ - name: xfconf-query(1) man page
+ description: Manual page of the C(xfconf-query) tool at the XFCE documentation site.
+ link: 'https://docs.xfce.org/xfce/xfconf/xfconf-query'
+
+ - name: xfconf - Configuration Storage System
+ description: XFCE documentation for the Xfconf configuration system.
+ link: 'https://docs.xfce.org/xfce/xfconf/start'
+
+extends_documentation_fragment:
+ - community.general.attributes
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ channel:
+ description:
+ - A Xfconf preference channel is a top-level tree key, inside of the
+ Xfconf repository that corresponds to the location for which all
+ application properties/keys are stored. See man xfconf-query(1).
+ required: true
+ type: str
+ property:
+ description:
+ - A Xfce preference key is an element in the Xfconf repository
+ that corresponds to an application preference. See man xfconf-query(1).
+ required: true
+ type: str
+ value:
+ description:
+ - Preference properties typically have simple values such as strings,
+ integers, or lists of strings and integers. See man xfconf-query(1).
+ type: list
+ elements: raw
+ value_type:
+ description:
+ - The type of value being set.
+ - When providing more than one I(value_type), the length of the list must
+ be equal to the length of I(value).
+ - If only one I(value_type) is provided, but I(value) contains more than
+ on element, that I(value_type) will be applied to all elements of I(value).
+ - If the I(property) being set is an array and it can possibly have ony one
+ element in the array, then I(force_array=true) must be used to ensure
+ that C(xfconf-query) will interpret the value as an array rather than a
+ scalar.
+ - Support for C(uchar), C(char), C(uint64), and C(int64) has been added in community.general 4.8.0.
+ type: list
+ elements: str
+ choices: [ string, int, double, bool, uint, uchar, char, uint64, int64, float ]
+ state:
+ type: str
+ description:
+ - The action to take upon the property/value.
+ - The state C(get) has been removed in community.general 5.0.0. Please use the module M(community.general.xfconf_info) instead.
+ choices: [ present, absent ]
+ default: "present"
+ force_array:
+ description:
+ - Force array even if only one element.
+ type: bool
+ default: false
+ aliases: ['array']
+ version_added: 1.0.0
+ disable_facts:
+ description:
+ - The value C(false) is no longer allowed since community.general 4.0.0.
+ - This option is deprecated, and will be removed in community.general 8.0.0.
+ type: bool
+ default: true
+ version_added: 2.1.0
+'''
+
+EXAMPLES = """
+- name: Change the DPI to "192"
+ xfconf:
+ channel: "xsettings"
+ property: "/Xft/DPI"
+ value_type: "int"
+ value: "192"
+
+- name: Set workspace names (4)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main', 'Work1', 'Work2', 'Tmp']
+
+- name: Set workspace names (1)
+ xfconf:
+ channel: xfwm4
+ property: /general/workspace_names
+ value_type: string
+ value: ['Main']
+ force_array: true
+"""
+
+RETURN = '''
+ channel:
+ description: The channel specified in the module parameters
+ returned: success
+ type: str
+ sample: "xsettings"
+ property:
+ description: The property specified in the module parameters
+ returned: success
+ type: str
+ sample: "/Xft/DPI"
+ value_type:
+ description:
+ - The type of the value that was changed (C(none) for C(reset)
+ state). Either a single string value or a list of strings for array
+ types.
+ - This is a string or a list of strings.
+ returned: success
+ type: any
+ sample: '"int" or ["str", "str", "str"]'
+ value:
+ description:
+ - The value of the preference key after executing the module. Either a
+ single string value or a list of strings for array types.
+ - This is a string or a list of strings.
+ returned: success
+ type: any
+ sample: '"192" or ["orange", "yellow", "violet"]'
+ previous_value:
+ description:
+ - The value of the preference key before executing the module.
+ Either a single string value or a list of strings for array types.
+ - This is a string or a list of strings.
+ returned: success
+ type: any
+ sample: '"96" or ["red", "blue", "green"]'
+ cmd:
+ description:
+ - A list with the resulting C(xfconf-query) command executed by the module.
+ returned: success
+ type: list
+ elements: str
+ version_added: 5.4.0
+ sample:
+ - /usr/bin/xfconf-query
+ - --channel
+ - xfce4-panel
+ - --property
+ - /plugins/plugin-19/timezone
+ - --create
+ - --type
+ - string
+ - --set
+ - Pacific/Auckland
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner
+
+
+class XFConfProperty(StateModuleHelper):
+ change_params = ('value', )
+ diff_params = ('value', )
+ output_params = ('property', 'channel', 'value')
+ facts_params = ('property', 'channel', 'value')
+ module = dict(
+ argument_spec=dict(
+ state=dict(type='str', choices=("present", "absent"), default="present"),
+ channel=dict(type='str', required=True),
+ property=dict(type='str', required=True),
+ value_type=dict(type='list', elements='str',
+ choices=('string', 'int', 'double', 'bool', 'uint', 'uchar', 'char', 'uint64', 'int64', 'float')),
+ value=dict(type='list', elements='raw'),
+ force_array=dict(type='bool', default=False, aliases=['array']),
+ disable_facts=dict(
+ type='bool', default=True,
+ removed_in_version='8.0.0',
+ removed_from_collection='community.general'
+ ),
+ ),
+ required_if=[('state', 'present', ['value', 'value_type'])],
+ required_together=[('value', 'value_type')],
+ supports_check_mode=True,
+ )
+
+ default_state = 'present'
+
+ def update_xfconf_output(self, **kwargs):
+ self.update_vars(meta={"output": True, "fact": True}, **kwargs)
+
+ def __init_module__(self):
+ self.runner = xfconf_runner(self.module)
+ self.does_not = 'Property "{0}" does not exist on channel "{1}".'.format(self.vars.property,
+ self.vars.channel)
+ self.vars.set('previous_value', self._get(), fact=True)
+ self.vars.set('type', self.vars.value_type, fact=True)
+ self.vars.meta('value').set(initial_value=self.vars.previous_value)
+
+ if self.vars.disable_facts is False:
+ self.do_raise('Returning results as facts has been removed. Stop using disable_facts=false.')
+
+ def process_command_output(self, rc, out, err):
+ if err.rstrip() == self.does_not:
+ return None
+ if rc or len(err):
+ self.do_raise('xfconf-query failed with error (rc={0}): {1}'.format(rc, err))
+
+ result = out.rstrip()
+ if "Value is an array with" in result:
+ result = result.split("\n")
+ result.pop(0)
+ result.pop(0)
+
+ return result
+
+ def _get(self):
+ with self.runner('channel property', output_process=self.process_command_output) as ctx:
+ return ctx.run()
+
+ def state_absent(self):
+ with self.runner('channel property reset', check_mode_skip=True) as ctx:
+ ctx.run(reset=True)
+ self.vars.stdout = ctx.results_out
+ self.vars.stderr = ctx.results_err
+ self.vars.cmd = ctx.cmd
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+ self.vars.value = None
+
+ def state_present(self):
+ # stringify all values - in the CLI they will all be happy strings anyway
+ # and by doing this here the rest of the code can be agnostic to it
+ self.vars.value = [str(v) for v in self.vars.value]
+ value_type = self.vars.value_type
+
+ values_len = len(self.vars.value)
+ types_len = len(value_type)
+
+ if types_len == 1:
+ # use one single type for the entire list
+ value_type = value_type * values_len
+ elif types_len != values_len:
+ # or complain if lists' lengths are different
+ self.do_raise('Number of elements in "value" and "value_type" must be the same')
+
+ # calculates if it is an array
+ self.vars.is_array = \
+ bool(self.vars.force_array) or \
+ isinstance(self.vars.previous_value, list) or \
+ values_len > 1
+
+ with self.runner('channel property create force_array values_and_types', check_mode_skip=True) as ctx:
+ ctx.run(create=True, force_array=self.vars.is_array, values_and_types=(self.vars.value, value_type))
+ self.vars.stdout = ctx.results_out
+ self.vars.stderr = ctx.results_err
+ self.vars.cmd = ctx.cmd
+ if self.verbosity >= 4:
+ self.vars.run_info = ctx.run_info
+
+ if not self.vars.is_array:
+ self.vars.value = self.vars.value[0]
+ self.vars.type = value_type[0]
+ else:
+ self.vars.type = value_type
+
+
+def main():
+ XFConfProperty.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xfconf_info.py b/ansible_collections/community/general/plugins/modules/xfconf_info.py
new file mode 100644
index 000000000..0a99201ef
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xfconf_info.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: xfconf_info
+author:
+ - "Alexei Znamensky (@russoz)"
+short_description: Retrieve XFCE4 configurations
+version_added: 3.5.0
+description:
+ - This module allows retrieving Xfce 4 configurations with the help of C(xfconf-query).
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.info_module
+attributes:
+ check_mode:
+ version_added: 3.3.0
+ # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix
+options:
+ channel:
+ description:
+ - >
+ A Xfconf preference channel is a top-level tree key, inside of the
+ Xfconf repository that corresponds to the location for which all
+ application properties/keys are stored.
+ - If not provided, the module will list all available channels.
+ type: str
+ property:
+ description:
+ - >
+ A Xfce preference key is an element in the Xfconf repository
+ that corresponds to an application preference.
+ - If provided, then I(channel) is required.
+ - If not provided and a I(channel) is provided, then the module will list all available properties in that I(channel).
+ type: str
+notes:
+ - See man xfconf-query(1) for more details.
+'''
+
+EXAMPLES = """
+- name: Get list of all available channels
+ community.general.xfconf_info: {}
+ register: result
+
+- name: Get list of all properties in a specific channel
+ community.general.xfconf_info:
+ channel: xsettings
+ register: result
+
+- name: Retrieve the DPI value
+ community.general.xfconf_info:
+ channel: xsettings
+ property: /Xft/DPI
+ register: result
+
+- name: Get workspace names (4)
+ community.general.xfconf_info:
+ channel: xfwm4
+ property: /general/workspace_names
+ register: result
+"""
+
+RETURN = '''
+ channels:
+ description:
+ - List of available channels.
+ - Returned when the module receives no parameter at all.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - xfce4-desktop
+ - displays
+ - xsettings
+ - xfwm4
+ properties:
+ description:
+ - List of available properties for a specific channel.
+ - Returned by passing only the I(channel) parameter to the module.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - /Gdk/WindowScalingFactor
+ - /Gtk/ButtonImages
+ - /Gtk/CursorThemeSize
+ - /Gtk/DecorationLayout
+ - /Gtk/FontName
+ - /Gtk/MenuImages
+ - /Gtk/MonospaceFontName
+ - /Net/DoubleClickTime
+ - /Net/IconThemeName
+ - /Net/ThemeName
+ - /Xft/Antialias
+ - /Xft/Hinting
+ - /Xft/HintStyle
+ - /Xft/RGBA
+ is_array:
+ description:
+ - Flag indicating whether the property is an array or not.
+ returned: success
+ type: bool
+ value:
+ description:
+ - The value of the property. Empty if the property is of array type.
+ returned: success
+ type: str
+ sample: Monospace 10
+ value_array:
+ description:
+ - The array value of the property. Empty if the property is not of array type.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - Main
+ - Work
+ - Tmp
+'''
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.xfconf import xfconf_runner
+
+
+class XFConfInfo(ModuleHelper):
+ module = dict(
+ argument_spec=dict(
+ channel=dict(type='str'),
+ property=dict(type='str'),
+ ),
+ required_by=dict(
+ property=['channel']
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.runner = xfconf_runner(self.module, check_rc=True)
+ self.vars.set("list_arg", False, output=False)
+ self.vars.set("is_array", False)
+
+ def process_command_output(self, rc, out, err):
+ result = out.rstrip()
+ if "Value is an array with" in result:
+ result = result.split("\n")
+ result.pop(0)
+ result.pop(0)
+ self.vars.is_array = True
+
+ return result
+
+ def _process_list_properties(self, rc, out, err):
+ return out.splitlines()
+
+ def _process_list_channels(self, rc, out, err):
+ lines = out.splitlines()
+ lines.pop(0)
+ lines = [s.lstrip() for s in lines]
+ return lines
+
+ def __run__(self):
+ self.vars.list_arg = not (bool(self.vars.channel) and bool(self.vars.property))
+ output = 'value'
+ proc = self.process_command_output
+ if self.vars.channel is None:
+ output = 'channels'
+ proc = self._process_list_channels
+ elif self.vars.property is None:
+ output = 'properties'
+ proc = self._process_list_properties
+
+ with self.runner.context('list_arg channel property', output_process=proc) as ctx:
+ result = ctx.run(**self.vars)
+
+ if not self.vars.list_arg and self.vars.is_array:
+ output = "value_array"
+ self.vars.set(output, result)
+
+
+def main():
+ XFConfInfo.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xfs_quota.py b/ansible_collections/community/general/plugins/modules/xfs_quota.py
new file mode 100644
index 000000000..6d0521990
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xfs_quota.py
@@ -0,0 +1,504 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Emmanouil Kampitakis <info@kampitakis.de>
+# Copyright (c) 2018, William Leemans <willie@elaba.net>
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: xfs_quota
+short_description: Manage quotas on XFS filesystems
+description:
+ - Configure quotas on XFS filesystems.
+ - Before using this module /etc/projects and /etc/projid need to be configured.
+author:
+ - William Leemans (@bushvin)
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ type:
+ description:
+ - The XFS quota type.
+ type: str
+ required: true
+ choices:
+ - user
+ - group
+ - project
+ name:
+ description:
+ - The name of the user, group or project to apply the quota to, if other than default.
+ type: str
+ mountpoint:
+ description:
+ - The mount point on which to apply the quotas.
+ type: str
+ required: true
+ bhard:
+ description:
+ - Hard blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ bsoft:
+ description:
+ - Soft blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ ihard:
+ description:
+ - Hard inodes quota limit.
+ type: int
+ isoft:
+ description:
+ - Soft inodes quota limit.
+ type: int
+ rtbhard:
+ description:
+ - Hard realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ rtbsoft:
+ description:
+ - Soft realtime blocks quota limit.
+ - This argument supports human readable sizes.
+ type: str
+ state:
+ description:
+ - Whether to apply the limits or remove them.
+ - When removing limit, they are set to 0, and not quite removed.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+
+requirements:
+ - xfsprogs
+"""
+
+EXAMPLES = r"""
+- name: Set default project soft and hard limit on /opt of 1g
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ bsoft: 1g
+ bhard: 1g
+ state: present
+
+- name: Remove the default limits on /opt
+ community.general.xfs_quota:
+ type: project
+ mountpoint: /opt
+ state: absent
+
+- name: Set default soft user inode limits on /home of 1024 inodes and hard of 2048
+ community.general.xfs_quota:
+ type: user
+ mountpoint: /home
+ isoft: 1024
+ ihard: 2048
+
+"""
+
+RETURN = r"""
+bhard:
+ description: the current bhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+bsoft:
+ description: the current bsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+ihard:
+ description: the current ihard setting in bytes
+ returned: always
+ type: int
+ sample: 100
+isoft:
+ description: the current isoft setting in bytes
+ returned: always
+ type: int
+ sample: 100
+rtbhard:
+ description: the current rtbhard setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+rtbsoft:
+ description: the current rtbsoft setting in bytes
+ returned: always
+ type: int
+ sample: 1024
+"""
+
+import grp
+import os
+import pwd
+
+from ansible.module_utils.basic import AnsibleModule, human_to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ bhard=dict(type="str"),
+ bsoft=dict(type="str"),
+ ihard=dict(type="int"),
+ isoft=dict(type="int"),
+ mountpoint=dict(type="str", required=True),
+ name=dict(type="str"),
+ rtbhard=dict(type="str"),
+ rtbsoft=dict(type="str"),
+ state=dict(type="str", default="present", choices=["absent", "present"]),
+ type=dict(type="str", required=True, choices=["group", "project", "user"]),
+ ),
+ supports_check_mode=True,
+ )
+
+ quota_type = module.params["type"]
+ name = module.params["name"]
+ mountpoint = module.params["mountpoint"]
+ bhard = module.params["bhard"]
+ bsoft = module.params["bsoft"]
+ ihard = module.params["ihard"]
+ isoft = module.params["isoft"]
+ rtbhard = module.params["rtbhard"]
+ rtbsoft = module.params["rtbsoft"]
+ state = module.params["state"]
+
+ xfs_quota_bin = module.get_bin_path("xfs_quota", True)
+
+ if bhard is not None:
+ bhard = human_to_bytes(bhard)
+
+ if bsoft is not None:
+ bsoft = human_to_bytes(bsoft)
+
+ if rtbhard is not None:
+ rtbhard = human_to_bytes(rtbhard)
+
+ if rtbsoft is not None:
+ rtbsoft = human_to_bytes(rtbsoft)
+
+ result = dict(
+ changed=False,
+ )
+
+ if not os.path.ismount(mountpoint):
+ module.fail_json(msg="Path '%s' is not a mount point" % mountpoint, **result)
+
+ mp = get_fs_by_mountpoint(mountpoint)
+ if mp is None:
+ module.fail_json(
+ msg="Path '%s' is not a mount point or not located on an xfs file system."
+ % mountpoint,
+ **result
+ )
+
+ if quota_type == "user":
+ type_arg = "-u"
+ quota_default = "root"
+ if name is None:
+ name = quota_default
+
+ if (
+ "uquota" not in mp["mntopts"]
+ and "usrquota" not in mp["mntopts"]
+ and "quota" not in mp["mntopts"]
+ and "uqnoenforce" not in mp["mntopts"]
+ and "qnoenforce" not in mp["mntopts"]
+ ):
+ module.fail_json(
+ msg="Path '%s' is not mounted with the uquota/usrquota/quota/uqnoenforce/qnoenforce option."
+ % mountpoint,
+ **result
+ )
+ try:
+ pwd.getpwnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == "group":
+ type_arg = "-g"
+ quota_default = "root"
+ if name is None:
+ name = quota_default
+
+ if (
+ "gquota" not in mp["mntopts"]
+ and "grpquota" not in mp["mntopts"]
+ and "gqnoenforce" not in mp["mntopts"]
+ ):
+ module.fail_json(
+ msg="Path '%s' is not mounted with the gquota/grpquota/gqnoenforce option. (current options: %s)"
+ % (mountpoint, mp["mntopts"]),
+ **result
+ )
+ try:
+ grp.getgrnam(name)
+ except KeyError as e:
+ module.fail_json(msg="User '%s' does not exist." % name, **result)
+
+ elif quota_type == "project":
+ type_arg = "-p"
+ quota_default = "#0"
+ if name is None:
+ name = quota_default
+
+ if (
+ "pquota" not in mp["mntopts"]
+ and "prjquota" not in mp["mntopts"]
+ and "pqnoenforce" not in mp["mntopts"]
+ ):
+ module.fail_json(
+ msg="Path '%s' is not mounted with the pquota/prjquota/pqnoenforce option."
+ % mountpoint,
+ **result
+ )
+
+ if name != quota_default and not os.path.isfile("/etc/projects"):
+ module.fail_json(msg="Path '/etc/projects' does not exist.", **result)
+
+ if name != quota_default and not os.path.isfile("/etc/projid"):
+ module.fail_json(msg="Path '/etc/projid' does not exist.", **result)
+
+ if name != quota_default and name is not None and get_project_id(name) is None:
+ module.fail_json(
+ msg="Entry '%s' has not been defined in /etc/projid." % name, **result
+ )
+
+ prj_set = True
+ if name != quota_default:
+ cmd = "project %s" % name
+ rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
+ if rc != 0:
+ result["cmd"] = cmd
+ result["rc"] = rc
+ result["stdout"] = stdout
+ result["stderr"] = stderr
+ module.fail_json(msg="Could not get project state.", **result)
+ else:
+ for line in stdout.split("\n"):
+ if (
+ "Project Id '%s' - is not set." in line
+ or "project identifier is not set" in line
+ ):
+ prj_set = False
+ break
+
+ if state == "present" and not prj_set:
+ if not module.check_mode:
+ cmd = "project -s %s" % name
+ rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
+ if rc != 0:
+ result["cmd"] = cmd
+ result["rc"] = rc
+ result["stdout"] = stdout
+ result["stderr"] = stderr
+ module.fail_json(
+ msg="Could not get quota realtime block report.", **result
+ )
+
+ result["changed"] = True
+
+ elif state == "absent" and prj_set and name != quota_default:
+ if not module.check_mode:
+ cmd = "project -C %s" % name
+ rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
+ if rc != 0:
+ result["cmd"] = cmd
+ result["rc"] = rc
+ result["stdout"] = stdout
+ result["stderr"] = stderr
+ module.fail_json(
+ msg="Failed to clear managed tree from project quota control.", **result
+ )
+
+ result["changed"] = True
+
+ current_bsoft, current_bhard = quota_report(
+ module, xfs_quota_bin, mountpoint, name, quota_type, "b"
+ )
+ current_isoft, current_ihard = quota_report(
+ module, xfs_quota_bin, mountpoint, name, quota_type, "i"
+ )
+ current_rtbsoft, current_rtbhard = quota_report(
+ module, xfs_quota_bin, mountpoint, name, quota_type, "rtb"
+ )
+
+ # Set limits
+ if state == "absent":
+ bhard = 0
+ bsoft = 0
+ ihard = 0
+ isoft = 0
+ rtbhard = 0
+ rtbsoft = 0
+
+ # Ensure that a non-existing quota does not trigger a change
+ current_bsoft = current_bsoft if current_bsoft is not None else 0
+ current_bhard = current_bhard if current_bhard is not None else 0
+ current_isoft = current_isoft if current_isoft is not None else 0
+ current_ihard = current_ihard if current_ihard is not None else 0
+ current_rtbsoft = current_rtbsoft if current_rtbsoft is not None else 0
+ current_rtbhard = current_rtbhard if current_rtbhard is not None else 0
+
+ result["xfs_quota"] = dict(
+ bsoft=current_bsoft,
+ bhard=current_bhard,
+ isoft=current_isoft,
+ ihard=current_ihard,
+ rtbsoft=current_rtbsoft,
+ rtbhard=current_rtbhard,
+ )
+
+ limit = []
+ if bsoft is not None and int(bsoft) != current_bsoft:
+ limit.append("bsoft=%s" % bsoft)
+ result["bsoft"] = int(bsoft)
+
+ if bhard is not None and int(bhard) != current_bhard:
+ limit.append("bhard=%s" % bhard)
+ result["bhard"] = int(bhard)
+
+ if isoft is not None and isoft != current_isoft:
+ limit.append("isoft=%s" % isoft)
+ result["isoft"] = isoft
+
+ if ihard is not None and ihard != current_ihard:
+ limit.append("ihard=%s" % ihard)
+ result["ihard"] = ihard
+
+ if rtbsoft is not None and int(rtbsoft) != current_rtbsoft:
+ limit.append("rtbsoft=%s" % rtbsoft)
+ result["rtbsoft"] = int(rtbsoft)
+
+ if rtbhard is not None and int(rtbhard) != current_rtbhard:
+ limit.append("rtbhard=%s" % rtbhard)
+ result["rtbhard"] = int(rtbhard)
+
+ if len(limit) > 0:
+ if not module.check_mode:
+ if name == quota_default:
+ cmd = "limit %s -d %s" % (type_arg, " ".join(limit))
+ else:
+ cmd = "limit %s %s %s" % (type_arg, " ".join(limit), name)
+
+ rc, stdout, stderr = exec_quota(module, xfs_quota_bin, cmd, mountpoint)
+ if rc != 0:
+ result["cmd"] = cmd
+ result["rc"] = rc
+ result["stdout"] = stdout
+ result["stderr"] = stderr
+ module.fail_json(msg="Could not set limits.", **result)
+
+ result["changed"] = True
+
+ module.exit_json(**result)
+
+
+def quota_report(module, xfs_quota_bin, mountpoint, name, quota_type, used_type):
+ soft = None
+ hard = None
+
+ if quota_type == "project":
+ type_arg = "-p"
+ elif quota_type == "user":
+ type_arg = "-u"
+ elif quota_type == "group":
+ type_arg = "-g"
+
+ if used_type == "b":
+ used_arg = "-b"
+ used_name = "blocks"
+ factor = 1024
+ elif used_type == "i":
+ used_arg = "-i"
+ used_name = "inodes"
+ factor = 1
+ elif used_type == "rtb":
+ used_arg = "-r"
+ used_name = "realtime blocks"
+ factor = 1024
+
+ rc, stdout, stderr = exec_quota(
+ module, xfs_quota_bin, "report %s %s" % (type_arg, used_arg), mountpoint
+ )
+
+ if rc != 0:
+ result = dict(
+ changed=False,
+ rc=rc,
+ stdout=stdout,
+ stderr=stderr,
+ )
+ module.fail_json(msg="Could not get quota report for %s." % used_name, **result)
+
+ for line in stdout.split("\n"):
+ line = line.strip().split()
+ if len(line) > 3 and line[0] == name:
+ soft = int(line[2]) * factor
+ hard = int(line[3]) * factor
+ break
+
+ return soft, hard
+
+
+def exec_quota(module, xfs_quota_bin, cmd, mountpoint):
+ cmd = [xfs_quota_bin, "-x", "-c"] + [cmd, mountpoint]
+ (rc, stdout, stderr) = module.run_command(cmd, use_unsafe_shell=True)
+ if (
+ "XFS_GETQUOTA: Operation not permitted" in stderr.split("\n")
+ or rc == 1
+ and "xfs_quota: cannot set limits: Operation not permitted"
+ in stderr.split("\n")
+ ):
+ module.fail_json(
+ msg="You need to be root or have CAP_SYS_ADMIN capability to perform this operation"
+ )
+
+ return rc, stdout, stderr
+
+
+def get_fs_by_mountpoint(mountpoint):
+ mpr = None
+ with open("/proc/mounts", "r") as s:
+ for line in s.readlines():
+ mp = line.strip().split()
+ if len(mp) == 6 and mp[1] == mountpoint and mp[2] == "xfs":
+ mpr = dict(
+ zip(["spec", "file", "vfstype", "mntopts", "freq", "passno"], mp)
+ )
+ mpr["mntopts"] = mpr["mntopts"].split(",")
+ break
+ return mpr
+
+
+def get_project_id(name):
+ prjid = None
+ with open("/etc/projid", "r") as s:
+ for line in s.readlines():
+ line = line.strip().partition(":")
+ if line[0] == name:
+ prjid = line[2]
+ break
+
+ return prjid
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/xml.py b/ansible_collections/community/general/plugins/modules/xml.py
new file mode 100644
index 000000000..5b9bba355
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/xml.py
@@ -0,0 +1,996 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2014, Red Hat, Inc.
+# Copyright (c) 2014, Tim Bielawa <tbielawa@redhat.com>
+# Copyright (c) 2014, Magnus Hedemark <mhedemar@redhat.com>
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: xml
+short_description: Manage bits and pieces of XML files or strings
+description:
+ - A CRUD-like interface to managing bits of XML files.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ path:
+ description:
+ - Path to the file to operate on.
+ - This file must exist ahead of time.
+ - This parameter is required, unless I(xmlstring) is given.
+ type: path
+ aliases: [ dest, file ]
+ xmlstring:
+ description:
+ - A string containing XML on which to operate.
+ - This parameter is required, unless I(path) is given.
+ type: str
+ xpath:
+ description:
+ - A valid XPath expression describing the item(s) you want to manipulate.
+ - Operates on the document root, C(/), by default.
+ type: str
+ namespaces:
+ description:
+ - The namespace C(prefix:uri) mapping for the XPath expression.
+ - Needs to be a C(dict), not a C(list) of items.
+ type: dict
+ default: {}
+ state:
+ description:
+ - Set or remove an xpath selection (node(s), attribute(s)).
+ type: str
+ choices: [ absent, present ]
+ default: present
+ aliases: [ ensure ]
+ attribute:
+ description:
+ - The attribute to select when using parameter I(value).
+ - This is a string, not prepended with C(@).
+ type: raw
+ value:
+ description:
+ - Desired state of the selected attribute.
+ - Either a string, or to unset a value, the Python C(None) keyword (YAML Equivalent, C(null)).
+ - Elements default to no value (but present).
+ - Attributes default to an empty string.
+ type: raw
+ add_children:
+ description:
+ - Add additional child-element(s) to a selected element for a given I(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires I(xpath) to be set.
+ type: list
+ elements: raw
+ set_children:
+ description:
+ - Set the child-element(s) of a selected element for a given I(xpath).
+ - Removes any existing children.
+ - Child elements must be specified as in I(add_children).
+ - This parameter requires I(xpath) to be set.
+ type: list
+ elements: raw
+ count:
+ description:
+ - Search for a given I(xpath) and provide the count of any matches.
+ - This parameter requires I(xpath) to be set.
+ type: bool
+ default: false
+ print_match:
+ description:
+ - Search for a given I(xpath) and print out any matches.
+ - This parameter requires I(xpath) to be set.
+ type: bool
+ default: false
+ pretty_print:
+ description:
+ - Pretty print XML output.
+ type: bool
+ default: false
+ content:
+ description:
+ - Search for a given I(xpath) and get content.
+ - This parameter requires I(xpath) to be set.
+ type: str
+ choices: [ attribute, text ]
+ input_type:
+ description:
+ - Type of input for I(add_children) and I(set_children).
+ type: str
+ choices: [ xml, yaml ]
+ default: yaml
+ backup:
+ description:
+ - Create a backup file including the timestamp information so you can get
+ the original file back if you somehow clobbered it incorrectly.
+ type: bool
+ default: false
+ strip_cdata_tags:
+ description:
+ - Remove CDATA tags surrounding text values.
+ - Note that this might break your XML file if text values contain characters that could be interpreted as XML.
+ type: bool
+ default: false
+ insertbefore:
+ description:
+ - Add additional child-element(s) before the first selected element for a given I(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires I(xpath) to be set.
+ type: bool
+ default: false
+ insertafter:
+ description:
+ - Add additional child-element(s) after the last selected element for a given I(xpath).
+ - Child elements must be given in a list and each item may be either a string
+ (eg. C(children=ansible) to add an empty C(<ansible/>) child element),
+ or a hash where the key is an element name and the value is the element value.
+ - This parameter requires I(xpath) to be set.
+ type: bool
+ default: false
+requirements:
+- lxml >= 2.3.0
+notes:
+- Use the C(--check) and C(--diff) options when testing your expressions.
+- The diff output is automatically pretty-printed, so may not reflect the actual file content, only the file structure.
+- This module does not handle complicated xpath expressions, so limit xpath selectors to simple expressions.
+- Beware that in case your XML elements are namespaced, you need to use the I(namespaces) parameter, see the examples.
+- Namespaces prefix should be used for all children of an element where namespace is defined, unless another namespace is defined for them.
+seealso:
+- name: Xml module development community wiki
+ description: More information related to the development of this xml module.
+ link: https://github.com/ansible/community/wiki/Module:-xml
+- name: Introduction to XPath
+ description: A brief tutorial on XPath (w3schools.com).
+ link: https://www.w3schools.com/xml/xpath_intro.asp
+- name: XPath Reference document
+ description: The reference documentation on XSLT/XPath (developer.mozilla.org).
+ link: https://developer.mozilla.org/en-US/docs/Web/XPath
+author:
+- Tim Bielawa (@tbielawa)
+- Magnus Hedemark (@magnus919)
+- Dag Wieers (@dagwieers)
+'''
+
+EXAMPLES = r'''
+# Consider the following XML file:
+#
+# <business type="bar">
+# <name>Tasty Beverage Co.</name>
+# <beers>
+# <beer>Rochefort 10</beer>
+# <beer>St. Bernardus Abbot 12</beer>
+# <beer>Schlitz</beer>
+# </beers>
+# <rating subjective="true">10</rating>
+# <website>
+# <mobilefriendly/>
+# <address>http://tastybeverageco.com</address>
+# </website>
+# </business>
+
+- name: Remove the 'subjective' attribute of the 'rating' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating/@subjective
+ state: absent
+
+- name: Set the rating to '11'
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/rating
+ value: 11
+
+# Retrieve and display the number of nodes
+- name: Get count of 'beers' nodes
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers/beer
+ count: true
+ register: hits
+
+- ansible.builtin.debug:
+ var: hits.count
+
+# Example where parent XML nodes are created automatically
+- name: Add a 'phonenumber' element to the 'business' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add several more beers to the 'beers' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+- name: Add several more beers to the 'beers' element and add them before the 'Rochefort 10' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: '/business/beers/beer[text()="Rochefort 10"]'
+ insertbefore: true
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+
+# NOTE: The 'state' defaults to 'present' and 'value' defaults to 'null' for elements
+- name: Add a 'validxhtml' element to the 'website' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty 'validatedon' attribute to the 'validxhtml' element
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml/@validatedon
+
+- name: Add or modify an attribute, add element if needed
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ attribute: validatedon
+ value: 1976-08-05
+
+# How to read an attribute value and access it in Ansible
+- name: Read an element's attribute values
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/validxhtml
+ content: attribute
+ register: xmlresp
+
+- name: Show an attribute value
+ ansible.builtin.debug:
+ var: xmlresp.matches[0].validxhtml.validatedon
+
+- name: Remove all children from the 'website' element (option 1)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website/*
+ state: absent
+
+- name: Remove all children from the 'website' element (option 2)
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business/website
+ set_children: []
+
+# In case of namespaces, like in below XML, they have to be explicitly stated.
+#
+# <foo xmlns="http://x.test" xmlns:attr="http://z.test">
+# <bar>
+# <baz xmlns="http://y.test" attr:my_namespaced_attribute="true" />
+# </bar>
+# </foo>
+
+# NOTE: There is the prefix 'x' in front of the 'bar' element, too.
+- name: Set namespaced '/x:foo/x:bar/y:baz/@z:my_namespaced_attribute' to 'false'
+ community.general.xml:
+ path: foo.xml
+ xpath: /x:foo/x:bar/y:baz
+ namespaces:
+ x: http://x.test
+ y: http://y.test
+ z: http://z.test
+ attribute: z:my_namespaced_attribute
+ value: 'false'
+
+- name: Adding building nodes with floor subnodes from a YAML variable
+ community.general.xml:
+ path: /foo/bar.xml
+ xpath: /business
+ add_children:
+ - building:
+ # Attributes
+ name: Scumm bar
+ location: Monkey island
+ # Subnodes
+ _:
+ - floor: Pirate hall
+ - floor: Grog storage
+ - construction_date: "1990" # Only strings are valid
+ - building: Grog factory
+
+# Consider this XML for following example -
+#
+# <config>
+# <element name="test1">
+# <text>part to remove</text>
+# </element>
+# <element name="test2">
+# <text>part to keep</text>
+# </element>
+# </config>
+
+- name: Delete element node based upon attribute
+ community.general.xml:
+ path: bar.xml
+ xpath: /config/element[@name='test1']
+ state: absent
+'''
+
+RETURN = r'''
+actions:
+ description: A dictionary with the original xpath, namespaces and state.
+ type: dict
+ returned: success
+ sample: {xpath: xpath, namespaces: [namespace1, namespace2], state=present}
+backup_file:
+ description: The name of the backup file that was created
+ type: str
+ returned: when I(backup=true)
+ sample: /path/to/file.xml.1942.2017-08-24@14:16:01~
+count:
+ description: The count of xpath matches.
+ type: int
+ returned: when parameter 'count' is set
+ sample: 2
+matches:
+ description: The xpath matches found.
+ type: list
+ returned: when parameter 'print_match' is set
+msg:
+ description: A message related to the performed action(s).
+ type: str
+ returned: always
+xmlstring:
+ description: An XML string of the resulting output.
+ type: str
+ returned: when parameter 'xmlstring' is set
+'''
+
+import copy
+import json
+import os
+import re
+import traceback
+
+from io import BytesIO
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+LXML_IMP_ERR = None
+try:
+ from lxml import etree, objectify
+ HAS_LXML = True
+except ImportError:
+ LXML_IMP_ERR = traceback.format_exc()
+ HAS_LXML = False
+
+from ansible.module_utils.basic import AnsibleModule, json_dict_bytes_to_unicode, missing_required_lib
+from ansible.module_utils.six import iteritems, string_types
+from ansible.module_utils.common.text.converters import to_bytes, to_native
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+_IDENT = r"[a-zA-Z-][a-zA-Z0-9_\-\.]*"
+_NSIDENT = _IDENT + "|" + _IDENT + ":" + _IDENT
+# Note: we can't reasonably support the 'if you need to put both ' and " in a string, concatenate
+# strings wrapped by the other delimiter' XPath trick, especially as simple XPath.
+_XPSTR = "('(?:.*)'|\"(?:.*)\")"
+
+_RE_SPLITSIMPLELAST = re.compile("^(.*)/(" + _NSIDENT + ")$")
+_RE_SPLITSIMPLELASTEQVALUE = re.compile("^(.*)/(" + _NSIDENT + ")/text\\(\\)=" + _XPSTR + "$")
+_RE_SPLITSIMPLEATTRLAST = re.compile("^(.*)/(@(?:" + _NSIDENT + "))$")
+_RE_SPLITSIMPLEATTRLASTEQVALUE = re.compile("^(.*)/(@(?:" + _NSIDENT + "))=" + _XPSTR + "$")
+_RE_SPLITSUBLAST = re.compile("^(.*)/(" + _NSIDENT + ")\\[(.*)\\]$")
+_RE_SPLITONLYEQVALUE = re.compile("^(.*)/text\\(\\)=" + _XPSTR + "$")
+
+
+def has_changed(doc):
+ orig_obj = etree.tostring(objectify.fromstring(etree.tostring(orig_doc)))
+ obj = etree.tostring(objectify.fromstring(etree.tostring(doc)))
+ return (orig_obj != obj)
+
+
+def do_print_match(module, tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ match_xpaths = []
+ for m in match:
+ match_xpaths.append(tree.getpath(m))
+ match_str = json.dumps(match_xpaths)
+ msg = "selector '%s' match: %s" % (xpath, match_str)
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg)
+
+
+def count_nodes(module, tree, xpath, namespaces):
+ """ Return the count of nodes matching the xpath """
+ hits = tree.xpath("count(/%s)" % xpath, namespaces=namespaces)
+ msg = "found %d nodes" % hits
+ finish(module, tree, xpath, namespaces, changed=False, msg=msg, hitcount=int(hits))
+
+
+def is_node(tree, xpath, namespaces):
+ """ Test if a given xpath matches anything and if that match is a node.
+
+ For now we just assume you're only searching for one specific thing."""
+ if xpath_matches(tree, xpath, namespaces):
+ # OK, it found something
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._Element):
+ return True
+
+ return False
+
+
+def is_attribute(tree, xpath, namespaces):
+ """ Test if a given xpath matches and that match is an attribute
+
+ An xpath attribute search will only match one item"""
+ if xpath_matches(tree, xpath, namespaces):
+ match = tree.xpath(xpath, namespaces=namespaces)
+ if isinstance(match[0], etree._ElementStringResult):
+ return True
+ elif isinstance(match[0], etree._ElementUnicodeResult):
+ return True
+ return False
+
+
+def xpath_matches(tree, xpath, namespaces):
+ """ Test if a node exists """
+ if tree.xpath(xpath, namespaces=namespaces):
+ return True
+ return False
+
+
+def delete_xpath_target(module, tree, xpath, namespaces):
+ """ Delete an attribute or element from a tree """
+ changed = False
+ try:
+ for result in tree.xpath(xpath, namespaces=namespaces):
+ changed = True
+ # Get the xpath for this result
+ if is_attribute(tree, xpath, namespaces):
+ # Delete an attribute
+ parent = result.getparent()
+ # Pop this attribute match out of the parent
+ # node's 'attrib' dict by using this match's
+ # 'attrname' attribute for the key
+ parent.attrib.pop(result.attrname)
+ elif is_node(tree, xpath, namespaces):
+ # Delete an element
+ result.getparent().remove(result)
+ else:
+ raise Exception("Impossible error")
+ except Exception as e:
+ module.fail_json(msg="Couldn't delete xpath target: %s (%s)" % (xpath, e))
+ else:
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def replace_children_of(children, match):
+ for element in list(match):
+ match.remove(element)
+ match.extend(children)
+
+
+def set_target_children_inner(module, tree, xpath, namespaces, children, in_type):
+ matches = tree.xpath(xpath, namespaces=namespaces)
+
+ # Create a list of our new children
+ children = children_to_nodes(module, children, in_type)
+ children_as_string = [etree.tostring(c) for c in children]
+
+ changed = False
+
+ # xpaths always return matches as a list, so....
+ for match in matches:
+ # Check if elements differ
+ if len(list(match)) == len(children):
+ for idx, element in enumerate(list(match)):
+ if etree.tostring(element) != children_as_string[idx]:
+ replace_children_of(children, match)
+ changed = True
+ break
+ else:
+ replace_children_of(children, match)
+ changed = True
+
+ return changed
+
+
+def set_target_children(module, tree, xpath, namespaces, children, in_type):
+ changed = set_target_children_inner(module, tree, xpath, namespaces, children, in_type)
+ # Write it out
+ finish(module, tree, xpath, namespaces, changed=changed)
+
+
+def add_target_children(module, tree, xpath, namespaces, children, in_type, insertbefore, insertafter):
+ if is_node(tree, xpath, namespaces):
+ new_kids = children_to_nodes(module, children, in_type)
+ if insertbefore or insertafter:
+ insert_target_children(tree, xpath, namespaces, new_kids, insertbefore, insertafter)
+ else:
+ for node in tree.xpath(xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ finish(module, tree, xpath, namespaces, changed=True)
+ else:
+ finish(module, tree, xpath, namespaces)
+
+
+def insert_target_children(tree, xpath, namespaces, children, insertbefore, insertafter):
+ """
+ Insert the given children before or after the given xpath. If insertbefore is True, it is inserted before the
+ first xpath hit, with insertafter, it is inserted after the last xpath hit.
+ """
+ insert_target = tree.xpath(xpath, namespaces=namespaces)
+ loc_index = 0 if insertbefore else -1
+ index_in_parent = insert_target[loc_index].getparent().index(insert_target[loc_index])
+ parent = insert_target[0].getparent()
+ if insertafter:
+ index_in_parent += 1
+ for child in children:
+ parent.insert(index_in_parent, child)
+ index_in_parent += 1
+
+
+def _extract_xpstr(g):
+ return g[1:-1]
+
+
+def split_xpath_last(xpath):
+ """split an XPath of the form /foo/bar/baz into /foo/bar and baz"""
+ xpath = xpath.strip()
+ m = _RE_SPLITSIMPLELAST.match(xpath)
+ if m:
+ # requesting an element to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLELASTEQVALUE.match(xpath)
+ if m:
+ # requesting an element to exist with an inner text
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSIMPLEATTRLAST.match(xpath)
+ if m:
+ # requesting an attribute to exist
+ return (m.group(1), [(m.group(2), None)])
+ m = _RE_SPLITSIMPLEATTRLASTEQVALUE.match(xpath)
+ if m:
+ # requesting an attribute to exist with a value
+ return (m.group(1), [(m.group(2), _extract_xpstr(m.group(3)))])
+
+ m = _RE_SPLITSUBLAST.match(xpath)
+ if m:
+ content = [x.strip() for x in m.group(3).split(" and ")]
+ return (m.group(1), [('/' + m.group(2), content)])
+
+ m = _RE_SPLITONLYEQVALUE.match(xpath)
+ if m:
+ # requesting a change of inner text
+ return (m.group(1), [("", _extract_xpstr(m.group(2)))])
+ return (xpath, [])
+
+
+def nsnameToClark(name, namespaces):
+ if ":" in name:
+ (nsname, rawname) = name.split(":")
+ # return "{{%s}}%s" % (namespaces[nsname], rawname)
+ return "{{{0}}}{1}".format(namespaces[nsname], rawname)
+
+ # no namespace name here
+ return name
+
+
+def check_or_make_target(module, tree, xpath, namespaces):
+ (inner_xpath, changes) = split_xpath_last(xpath)
+ if (inner_xpath == xpath) or (changes is None):
+ module.fail_json(msg="Can't process Xpath %s in order to spawn nodes! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+ return False
+
+ changed = False
+
+ if not is_node(tree, inner_xpath, namespaces):
+ changed = check_or_make_target(module, tree, inner_xpath, namespaces)
+
+ # we test again after calling check_or_make_target
+ if is_node(tree, inner_xpath, namespaces) and changes:
+ for (eoa, eoa_value) in changes:
+ if eoa and eoa[0] != '@' and eoa[0] != '/':
+ # implicitly creating an element
+ new_kids = children_to_nodes(module, [nsnameToClark(eoa, namespaces)], "yaml")
+ if eoa_value:
+ for nk in new_kids:
+ nk.text = eoa_value
+
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ changed = True
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa and eoa[0] == '/':
+ element = eoa[1:]
+ new_kids = children_to_nodes(module, [nsnameToClark(element, namespaces)], "yaml")
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ node.extend(new_kids)
+ for nk in new_kids:
+ for subexpr in eoa_value:
+ # module.fail_json(msg="element=%s subexpr=%s node=%s now tree=%s" %
+ # (element, subexpr, etree.tostring(node, pretty_print=True), etree.tostring(tree, pretty_print=True))
+ check_or_make_target(module, nk, "./" + subexpr, namespaces)
+ changed = True
+
+ # module.fail_json(msg="now tree=%s" % etree.tostring(tree, pretty_print=True))
+ elif eoa == "":
+ for node in tree.xpath(inner_xpath, namespaces=namespaces):
+ if (node.text != eoa_value):
+ node.text = eoa_value
+ changed = True
+
+ elif eoa and eoa[0] == '@':
+ attribute = nsnameToClark(eoa[1:], namespaces)
+
+ for element in tree.xpath(inner_xpath, namespaces=namespaces):
+ changing = (attribute not in element.attrib or element.attrib[attribute] != eoa_value)
+
+ if changing:
+ changed = changed or changing
+ if eoa_value is None:
+ value = ""
+ else:
+ value = eoa_value
+ element.attrib[attribute] = value
+
+ # module.fail_json(msg="arf %s changing=%s as curval=%s changed tree=%s" %
+ # (xpath, changing, etree.tostring(tree, changing, element[attribute], pretty_print=True)))
+
+ else:
+ module.fail_json(msg="unknown tree transformation=%s" % etree.tostring(tree, pretty_print=True))
+
+ return changed
+
+
+def ensure_xpath_exists(module, tree, xpath, namespaces):
+ changed = False
+
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def set_target_inner(module, tree, xpath, namespaces, attribute, value):
+ changed = False
+
+ try:
+ if not is_node(tree, xpath, namespaces):
+ changed = check_or_make_target(module, tree, xpath, namespaces)
+ except Exception as e:
+ missing_namespace = ""
+ # NOTE: This checks only the namespaces defined in root element!
+ # TODO: Implement a more robust check to check for child namespaces' existence
+ if tree.getroot().nsmap and ":" not in xpath:
+ missing_namespace = "XML document has namespace(s) defined, but no namespace prefix(es) used in xpath!\n"
+ module.fail_json(msg="%sXpath %s causes a failure: %s\n -- tree is %s" %
+ (missing_namespace, xpath, e, etree.tostring(tree, pretty_print=True)), exception=traceback.format_exc())
+
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node! tree is %s" %
+ (xpath, etree.tostring(tree, pretty_print=True)))
+
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ if not attribute:
+ changed = changed or (element.text != value)
+ if element.text != value:
+ element.text = value
+ else:
+ changed = changed or (element.get(attribute) != value)
+ if ":" in attribute:
+ attr_ns, attr_name = attribute.split(":")
+ # attribute = "{{%s}}%s" % (namespaces[attr_ns], attr_name)
+ attribute = "{{{0}}}{1}".format(namespaces[attr_ns], attr_name)
+ if element.get(attribute) != value:
+ element.set(attribute, value)
+
+ return changed
+
+
+def set_target(module, tree, xpath, namespaces, attribute, value):
+ changed = set_target_inner(module, tree, xpath, namespaces, attribute, value)
+ finish(module, tree, xpath, namespaces, changed)
+
+
+def get_element_text(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ elements.append({element.tag: element.text})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def get_element_attr(module, tree, xpath, namespaces):
+ if not is_node(tree, xpath, namespaces):
+ module.fail_json(msg="Xpath %s does not reference a node!" % xpath)
+
+ elements = []
+ for element in tree.xpath(xpath, namespaces=namespaces):
+ child = {}
+ for key in element.keys():
+ value = element.get(key)
+ child.update({key: value})
+ elements.append({element.tag: child})
+
+ finish(module, tree, xpath, namespaces, changed=False, msg=len(elements), hitcount=len(elements), matches=elements)
+
+
+def child_to_element(module, child, in_type):
+ if in_type == 'xml':
+ infile = BytesIO(to_bytes(child, errors='surrogate_or_strict'))
+
+ try:
+ parser = etree.XMLParser()
+ node = etree.parse(infile, parser)
+ return node.getroot()
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing child element: %s" % e)
+ elif in_type == 'yaml':
+ if isinstance(child, string_types):
+ return etree.Element(child)
+ elif isinstance(child, MutableMapping):
+ if len(child) > 1:
+ module.fail_json(msg="Can only create children from hashes with one key")
+
+ (key, value) = next(iteritems(child))
+ if isinstance(value, MutableMapping):
+ children = value.pop('_', None)
+
+ node = etree.Element(key, value)
+
+ if children is not None:
+ if not isinstance(children, list):
+ module.fail_json(msg="Invalid children type: %s, must be list." % type(children))
+
+ subnodes = children_to_nodes(module, children)
+ node.extend(subnodes)
+ else:
+ node = etree.Element(key)
+ node.text = value
+ return node
+ else:
+ module.fail_json(msg="Invalid child type: %s. Children must be either strings or hashes." % type(child))
+ else:
+ module.fail_json(msg="Invalid child input type: %s. Type must be either xml or yaml." % in_type)
+
+
+def children_to_nodes(module=None, children=None, type='yaml'):
+ """turn a str/hash/list of str&hash into a list of elements"""
+ children = [] if children is None else children
+
+ return [child_to_element(module, child, type) for child in children]
+
+
+def make_pretty(module, tree):
+ xml_string = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ result = dict(
+ changed=False,
+ )
+
+ if module.params['path']:
+ xml_file = module.params['path']
+ with open(xml_file, 'rb') as xml_content:
+ if xml_string != xml_content.read():
+ result['changed'] = True
+ if not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(xml_file, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ elif module.params['xmlstring']:
+ result['xmlstring'] = xml_string
+ # NOTE: Modifying a string is not considered a change !
+ if xml_string != module.params['xmlstring']:
+ result['changed'] = True
+
+ module.exit_json(**result)
+
+
+def finish(module, tree, xpath, namespaces, changed=False, msg='', hitcount=0, matches=tuple()):
+
+ result = dict(
+ actions=dict(
+ xpath=xpath,
+ namespaces=namespaces,
+ state=module.params['state']
+ ),
+ changed=has_changed(tree),
+ )
+
+ if module.params['count'] or hitcount:
+ result['count'] = hitcount
+
+ if module.params['print_match'] or matches:
+ result['matches'] = matches
+
+ if msg:
+ result['msg'] = msg
+
+ if result['changed']:
+ if module._diff:
+ result['diff'] = dict(
+ before=etree.tostring(orig_doc, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ after=etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=True),
+ )
+
+ if module.params['path'] and not module.check_mode:
+ if module.params['backup']:
+ result['backup_file'] = module.backup_local(module.params['path'])
+ tree.write(module.params['path'], xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ if module.params['xmlstring']:
+ result['xmlstring'] = etree.tostring(tree, xml_declaration=True, encoding='UTF-8', pretty_print=module.params['pretty_print'])
+
+ module.exit_json(**result)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='path', aliases=['dest', 'file']),
+ xmlstring=dict(type='str'),
+ xpath=dict(type='str'),
+ namespaces=dict(type='dict', default={}),
+ state=dict(type='str', default='present', choices=['absent', 'present'], aliases=['ensure']),
+ value=dict(type='raw'),
+ attribute=dict(type='raw'),
+ add_children=dict(type='list', elements='raw'),
+ set_children=dict(type='list', elements='raw'),
+ count=dict(type='bool', default=False),
+ print_match=dict(type='bool', default=False),
+ pretty_print=dict(type='bool', default=False),
+ content=dict(type='str', choices=['attribute', 'text']),
+ input_type=dict(type='str', default='yaml', choices=['xml', 'yaml']),
+ backup=dict(type='bool', default=False),
+ strip_cdata_tags=dict(type='bool', default=False),
+ insertbefore=dict(type='bool', default=False),
+ insertafter=dict(type='bool', default=False),
+ ),
+ supports_check_mode=True,
+ required_by=dict(
+ add_children=['xpath'],
+ attribute=['value'],
+ content=['xpath'],
+ set_children=['xpath'],
+ value=['xpath'],
+ ),
+ required_if=[
+ ['count', True, ['xpath']],
+ ['print_match', True, ['xpath']],
+ ['insertbefore', True, ['xpath']],
+ ['insertafter', True, ['xpath']],
+ ],
+ required_one_of=[
+ ['path', 'xmlstring'],
+ ['add_children', 'content', 'count', 'pretty_print', 'print_match', 'set_children', 'value'],
+ ],
+ mutually_exclusive=[
+ ['add_children', 'content', 'count', 'print_match', 'set_children', 'value'],
+ ['path', 'xmlstring'],
+ ['insertbefore', 'insertafter'],
+ ],
+ )
+
+ xml_file = module.params['path']
+ xml_string = module.params['xmlstring']
+ xpath = module.params['xpath']
+ namespaces = module.params['namespaces']
+ state = module.params['state']
+ value = json_dict_bytes_to_unicode(module.params['value'])
+ attribute = module.params['attribute']
+ set_children = json_dict_bytes_to_unicode(module.params['set_children'])
+ add_children = json_dict_bytes_to_unicode(module.params['add_children'])
+ pretty_print = module.params['pretty_print']
+ content = module.params['content']
+ input_type = module.params['input_type']
+ print_match = module.params['print_match']
+ count = module.params['count']
+ backup = module.params['backup']
+ strip_cdata_tags = module.params['strip_cdata_tags']
+ insertbefore = module.params['insertbefore']
+ insertafter = module.params['insertafter']
+
+ # Check if we have lxml 2.3.0 or newer installed
+ if not HAS_LXML:
+ module.fail_json(msg=missing_required_lib("lxml"), exception=LXML_IMP_ERR)
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('2.3.0'):
+ module.fail_json(msg='The xml ansible module requires lxml 2.3.0 or newer installed on the managed machine')
+ elif LooseVersion('.'.join(to_native(f) for f in etree.LXML_VERSION)) < LooseVersion('3.0.0'):
+ module.warn('Using lxml version lower than 3.0.0 does not guarantee predictable element attribute order.')
+
+ # Check if the file exists
+ if xml_string:
+ infile = BytesIO(to_bytes(xml_string, errors='surrogate_or_strict'))
+ elif os.path.isfile(xml_file):
+ infile = open(xml_file, 'rb')
+ else:
+ module.fail_json(msg="The target XML source '%s' does not exist." % xml_file)
+
+ # Parse and evaluate xpath expression
+ if xpath is not None:
+ try:
+ etree.XPath(xpath)
+ except etree.XPathSyntaxError as e:
+ module.fail_json(msg="Syntax error in xpath expression: %s (%s)" % (xpath, e))
+ except etree.XPathEvalError as e:
+ module.fail_json(msg="Evaluation error in xpath expression: %s (%s)" % (xpath, e))
+
+ # Try to parse in the target XML file
+ try:
+ parser = etree.XMLParser(remove_blank_text=pretty_print, strip_cdata=strip_cdata_tags)
+ doc = etree.parse(infile, parser)
+ except etree.XMLSyntaxError as e:
+ module.fail_json(msg="Error while parsing document: %s (%s)" % (xml_file or 'xml_string', e))
+
+ # Ensure we have the original copy to compare
+ global orig_doc
+ orig_doc = copy.deepcopy(doc)
+
+ if print_match:
+ do_print_match(module, doc, xpath, namespaces)
+
+ if count:
+ count_nodes(module, doc, xpath, namespaces)
+
+ if content == 'attribute':
+ get_element_attr(module, doc, xpath, namespaces)
+ elif content == 'text':
+ get_element_text(module, doc, xpath, namespaces)
+
+ # File exists:
+ if state == 'absent':
+ # - absent: delete xpath target
+ delete_xpath_target(module, doc, xpath, namespaces)
+
+ # - present: carry on
+
+ # children && value both set?: should have already aborted by now
+ # add_children && set_children both set?: should have already aborted by now
+
+ # set_children set?
+ if set_children is not None:
+ set_target_children(module, doc, xpath, namespaces, set_children, input_type)
+
+ # add_children set?
+ if add_children:
+ add_target_children(module, doc, xpath, namespaces, add_children, input_type, insertbefore, insertafter)
+
+ # No?: Carry on
+
+ # Is the xpath target an attribute selector?
+ if value is not None:
+ set_target(module, doc, xpath, namespaces, attribute, value)
+
+ # If an xpath was provided, we need to do something with the data
+ if xpath is not None:
+ ensure_xpath_exists(module, doc, xpath, namespaces)
+
+ # Otherwise only reformat the xml data?
+ if pretty_print:
+ make_pretty(module, doc)
+
+ module.fail_json(msg="Don't know what to do")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/yarn.py b/ansible_collections/community/general/plugins/modules/yarn.py
new file mode 100644
index 000000000..c278951d5
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/yarn.py
@@ -0,0 +1,408 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017 David Gunter <david.gunter@tivix.com>
+# Copyright (c) 2017 Chris Hoffman <christopher.hoffman@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: yarn
+short_description: Manage node.js packages with Yarn
+description:
+ - Manage node.js packages with the Yarn package manager (https://yarnpkg.com/)
+author:
+ - "David Gunter (@verkaufer)"
+ - "Chris Hoffman (@chrishoffman), creator of NPM Ansible module)"
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ type: str
+ description:
+ - The name of a node.js library to install
+ - If omitted all packages in package.json are installed.
+ - To globally install from local node.js library. Prepend "file:" to the path of the node.js library.
+ required: false
+ path:
+ type: path
+ description:
+ - The base path where Node.js libraries will be installed.
+ - This is where the node_modules folder lives.
+ required: false
+ version:
+ type: str
+ description:
+ - The version of the library to be installed.
+ - Must be in semver format. If "latest" is desired, use "state" arg instead
+ required: false
+ global:
+ description:
+ - Install the node.js library globally
+ required: false
+ default: false
+ type: bool
+ executable:
+ type: path
+ description:
+ - The executable location for yarn.
+ required: false
+ ignore_scripts:
+ description:
+ - Use the --ignore-scripts flag when installing.
+ required: false
+ type: bool
+ default: false
+ production:
+ description:
+ - Install dependencies in production mode.
+ - Yarn will ignore any dependencies under devDependencies in package.json
+ required: false
+ type: bool
+ default: false
+ registry:
+ type: str
+ description:
+ - The registry to install modules from.
+ required: false
+ state:
+ type: str
+ description:
+ - Installation state of the named node.js library
+ - If absent is selected, a name option must be provided
+ required: false
+ default: present
+ choices: [ "present", "absent", "latest" ]
+requirements:
+ - Yarn installed in bin path (typically /usr/local/bin)
+'''
+
+EXAMPLES = '''
+- name: Install "imagemin" node.js package.
+ community.general.yarn:
+ name: imagemin
+ path: /app/location
+
+- name: Install "imagemin" node.js package on version 5.3.1
+ community.general.yarn:
+ name: imagemin
+ version: '5.3.1'
+ path: /app/location
+
+- name: Install "imagemin" node.js package globally.
+ community.general.yarn:
+ name: imagemin
+ global: true
+
+- name: Remove the globally-installed package "imagemin".
+ community.general.yarn:
+ name: imagemin
+ global: true
+ state: absent
+
+- name: Install "imagemin" node.js package from custom registry.
+ community.general.yarn:
+ name: imagemin
+ registry: 'http://registry.mysite.com'
+
+- name: Install packages based on package.json.
+ community.general.yarn:
+ path: /app/location
+
+- name: Update all packages in package.json to their latest version.
+ community.general.yarn:
+ path: /app/location
+ state: latest
+'''
+
+RETURN = '''
+changed:
+ description: Whether Yarn changed any package data
+ returned: always
+ type: bool
+ sample: true
+msg:
+ description: Provides an error message if Yarn syntax was incorrect
+ returned: failure
+ type: str
+ sample: "Package must be explicitly named when uninstalling."
+invocation:
+ description: Parameters and values used during execution
+ returned: success
+ type: dict
+ sample: {
+ "module_args": {
+ "executable": null,
+ "globally": false,
+ "ignore_scripts": false,
+ "name": null,
+ "path": "/some/path/folder",
+ "production": false,
+ "registry": null,
+ "state": "present",
+ "version": null
+ }
+ }
+out:
+ description: Output generated from Yarn.
+ returned: always
+ type: str
+ sample: "yarn add v0.16.1[1/4] Resolving packages...[2/4] Fetching packages...[3/4] Linking dependencies...[4/4]
+ Building fresh packages...success Saved lockfile.success Saved 1 new dependency..left-pad@1.1.3 Done in 0.59s."
+'''
+
+import os
+import json
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Yarn(object):
+
+ def __init__(self, module, **kwargs):
+ self.module = module
+ self.globally = kwargs['globally']
+ self.name = kwargs['name']
+ self.version = kwargs['version']
+ self.path = kwargs['path']
+ self.registry = kwargs['registry']
+ self.production = kwargs['production']
+ self.ignore_scripts = kwargs['ignore_scripts']
+ self.executable = kwargs['executable']
+
+ # Specify a version of package if version arg passed in
+ self.name_version = None
+
+ if kwargs['version'] and self.name is not None:
+ self.name_version = self.name + '@' + str(self.version)
+ elif self.name is not None:
+ self.name_version = self.name
+
+ def _exec(self, args, run_in_check_mode=False, check_rc=True, unsupported_with_global=False):
+ if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
+
+ with_global_arg = self.globally and not unsupported_with_global
+
+ if with_global_arg:
+ # Yarn global arg is inserted before the command (e.g. `yarn global {some-command}`)
+ args.insert(0, 'global')
+
+ cmd = self.executable + args
+
+ if self.production:
+ cmd.append('--production')
+ if self.ignore_scripts:
+ cmd.append('--ignore-scripts')
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
+
+ # If path is specified, cd into that path and run the command.
+ cwd = None
+ if self.path and not with_global_arg:
+ if not os.path.exists(self.path):
+ # Module will make directory if not exists.
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="Path provided %s is not a directory" % self.path)
+ cwd = self.path
+
+ if not os.path.isfile(os.path.join(self.path, 'package.json')):
+ self.module.fail_json(msg="Package.json does not exist in provided path.")
+
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
+ return out, err
+
+ return None, None
+
+ def _process_yarn_error(self, err):
+ try:
+ # We need to filter for errors, since Yarn warnings are included in stderr
+ for line in err.splitlines():
+ if json.loads(line)['type'] == 'error':
+ self.module.fail_json(msg=err)
+ except Exception:
+ self.module.fail_json(msg="Unexpected stderr output from Yarn: %s" % err, stderr=err)
+
+ def list(self):
+ cmd = ['list', '--depth=0', '--json']
+
+ installed = list()
+ missing = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ missing.append(self.name)
+ return installed, missing
+
+ # `yarn global list` should be treated as "unsupported with global" even though it exists,
+ # because it only only lists binaries, but `yarn global add` can install libraries too.
+ result, error = self._exec(cmd, run_in_check_mode=True, check_rc=False, unsupported_with_global=True)
+
+ self._process_yarn_error(error)
+
+ for json_line in result.strip().split('\n'):
+ data = json.loads(json_line)
+ if data['type'] == 'tree':
+ dependencies = data['data']['trees']
+
+ for dep in dependencies:
+ name, version = dep['name'].rsplit('@', 1)
+ installed.append(name)
+
+ if self.name not in installed:
+ missing.append(self.name)
+
+ return installed, missing
+
+ def install(self):
+ if self.name_version:
+ # Yarn has a separate command for installing packages by name...
+ return self._exec(['add', self.name_version])
+ # And one for installing all packages in package.json
+ return self._exec(['install', '--non-interactive'])
+
+ def update(self):
+ return self._exec(['upgrade', '--latest'])
+
+ def uninstall(self):
+ return self._exec(['remove', self.name])
+
+ def list_outdated(self):
+ outdated = list()
+
+ if not os.path.isfile(os.path.join(self.path, 'yarn.lock')):
+ return outdated
+
+ cmd_result, err = self._exec(['outdated', '--json'], True, False, unsupported_with_global=True)
+
+ # the package.json in the global dir is missing a license field, so warnings are expected on stderr
+ self._process_yarn_error(err)
+
+ if not cmd_result:
+ return outdated
+
+ outdated_packages_data = cmd_result.splitlines()[1]
+
+ data = json.loads(outdated_packages_data)
+
+ try:
+ outdated_dependencies = data['data']['body']
+ except KeyError:
+ return outdated
+
+ for dep in outdated_dependencies:
+ # Outdated dependencies returned as a list of lists, where
+ # item at index 0 is the name of the dependency
+ outdated.append(dep[0])
+ return outdated
+
+
+def main():
+ arg_spec = dict(
+ name=dict(default=None),
+ path=dict(default=None, type='path'),
+ version=dict(default=None),
+ production=dict(default=False, type='bool'),
+ executable=dict(default=None, type='path'),
+ registry=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent', 'latest']),
+ ignore_scripts=dict(default=False, type='bool'),
+ )
+ arg_spec['global'] = dict(default=False, type='bool')
+ module = AnsibleModule(
+ argument_spec=arg_spec,
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ path = module.params['path']
+ version = module.params['version']
+ globally = module.params['global']
+ production = module.params['production']
+ registry = module.params['registry']
+ state = module.params['state']
+ ignore_scripts = module.params['ignore_scripts']
+
+ # When installing globally, users should not be able to define a path for installation.
+ # Require a path if global is False, though!
+ if path is None and globally is False:
+ module.fail_json(msg='Path must be specified when not using global arg')
+ elif path and globally is True:
+ module.fail_json(msg='Cannot specify path if doing global installation')
+
+ if state == 'absent' and not name:
+ module.fail_json(msg='Package must be explicitly named when uninstalling.')
+ if state == 'latest':
+ version = 'latest'
+
+ if module.params['executable']:
+ executable = module.params['executable'].split(' ')
+ else:
+ executable = [module.get_bin_path('yarn', True)]
+
+ # When installing globally, use the defined path for global node_modules
+ if globally:
+ _rc, out, _err = module.run_command(executable + ['global', 'dir'], check_rc=True)
+ path = out.strip()
+
+ yarn = Yarn(module,
+ name=name,
+ path=path,
+ version=version,
+ globally=globally,
+ production=production,
+ executable=executable,
+ registry=registry,
+ ignore_scripts=ignore_scripts)
+
+ changed = False
+ out = ''
+ err = ''
+ if state == 'present':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+
+ elif state == 'latest':
+
+ if not name:
+ changed = True
+ out, err = yarn.install()
+ else:
+ installed, missing = yarn.list()
+ outdated = yarn.list_outdated()
+ if len(missing):
+ changed = True
+ out, err = yarn.install()
+ if len(outdated):
+ changed = True
+ out, err = yarn.update()
+ else:
+ # state == absent
+ installed, missing = yarn.list()
+ if name in installed:
+ changed = True
+ out, err = yarn.uninstall()
+
+ module.exit_json(changed=changed, out=out, err=err)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/yum_versionlock.py b/ansible_collections/community/general/plugins/modules/yum_versionlock.py
new file mode 100644
index 000000000..e5d32dc77
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/yum_versionlock.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Florian Paul Azim Hoberg <florian.hoberg@credativ.de>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: yum_versionlock
+version_added: 2.0.0
+short_description: Locks / unlocks a installed package(s) from being updated by yum package manager
+description:
+ - This module adds installed packages to yum versionlock to prevent the package(s) from being updated.
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Package name or a list of package names with optional wildcards.
+ type: list
+ required: true
+ elements: str
+ state:
+ description:
+ - If state is C(present), package(s) will be added to yum versionlock list.
+ - If state is C(absent), package(s) will be removed from yum versionlock list.
+ choices: [ 'absent', 'present' ]
+ type: str
+ default: present
+notes:
+ - Requires yum-plugin-versionlock package on the remote node.
+requirements:
+- yum
+- yum-versionlock
+author:
+ - Florian Paul Azim Hoberg (@gyptazy)
+ - Amin Vakil (@aminvakil)
+'''
+
+EXAMPLES = r'''
+- name: Prevent Apache / httpd from being updated
+ community.general.yum_versionlock:
+ state: present
+ name: httpd
+
+- name: Prevent multiple packages from being updated
+ community.general.yum_versionlock:
+ state: present
+ name:
+ - httpd
+ - nginx
+ - haproxy
+ - curl
+
+- name: Remove lock from Apache / httpd to be updated again
+ community.general.yum_versionlock:
+ state: absent
+ name: httpd
+'''
+
+RETURN = r'''
+packages:
+ description: A list of package(s) in versionlock list.
+ returned: success
+ type: list
+ elements: str
+ sample: [ 'httpd' ]
+state:
+ description: State of package(s).
+ returned: success
+ type: str
+ sample: present
+'''
+
+import re
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.text.converters import to_native
+from fnmatch import fnmatch
+
+# on DNF-based distros, yum is a symlink to dnf, so we try to handle their different entry formats.
+NEVRA_RE_YUM = re.compile(r'^(?P<exclude>!)?(?P<epoch>\d+):(?P<name>.+)-'
+ r'(?P<version>.+)-(?P<release>.+)\.(?P<arch>.+)$')
+NEVRA_RE_DNF = re.compile(r"^(?P<exclude>!)?(?P<name>.+)-(?P<epoch>\d+):(?P<version>.+)-"
+ r"(?P<release>.+)\.(?P<arch>.+)$")
+
+
+class YumVersionLock:
+ def __init__(self, module):
+ self.module = module
+ self.params = module.params
+ self.yum_bin = module.get_bin_path('yum', required=True)
+
+ def get_versionlock_packages(self):
+ """ Get an overview of all packages on yum versionlock """
+ rc, out, err = self.module.run_command([self.yum_bin, "versionlock", "list"])
+ if rc == 0:
+ return out
+ elif rc == 1 and 'o such command:' in err:
+ self.module.fail_json(msg="Error: Please install rpm package yum-plugin-versionlock : " + to_native(err) + to_native(out))
+ self.module.fail_json(msg="Error: " + to_native(err) + to_native(out))
+
+ def ensure_state(self, packages, command):
+ """ Ensure packages state """
+ rc, out, err = self.module.run_command([self.yum_bin, "-q", "versionlock", command] + packages)
+ if rc == 0:
+ return True
+ self.module.fail_json(msg="Error: " + to_native(err) + to_native(out))
+
+
+def match(entry, name):
+ m = NEVRA_RE_YUM.match(entry)
+ if not m:
+ m = NEVRA_RE_DNF.match(entry)
+ if not m:
+ return False
+ return fnmatch(m.group("name"), name)
+
+
+def main():
+ """ start main program to add/remove a package to yum versionlock"""
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='list', elements='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ state = module.params['state']
+ packages = module.params['name']
+ changed = False
+
+ yum_v = YumVersionLock(module)
+
+ # Get an overview of all packages that have a version lock
+ versionlock_packages = yum_v.get_versionlock_packages()
+
+ # Ensure versionlock state of packages
+ packages_list = []
+ if state in ('present', ):
+ command = 'add'
+ for single_pkg in packages:
+ if not any(match(pkg, single_pkg) for pkg in versionlock_packages.split()):
+ packages_list.append(single_pkg)
+ if packages_list:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = yum_v.ensure_state(packages_list, command)
+ elif state in ('absent', ):
+ command = 'delete'
+ for single_pkg in packages:
+ if any(match(pkg, single_pkg) for pkg in versionlock_packages.split()):
+ packages_list.append(single_pkg)
+ if packages_list:
+ if module.check_mode:
+ changed = True
+ else:
+ changed = yum_v.ensure_state(packages_list, command)
+
+ module.exit_json(
+ changed=changed,
+ meta={
+ "packages": packages,
+ "state": state
+ }
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/zfs.py b/ansible_collections/community/general/plugins/modules/zfs.py
new file mode 100644
index 000000000..4cd79c36e
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/zfs.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
+# Copyright (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: zfs
+short_description: Manage zfs
+description:
+ - Manages ZFS file systems, volumes, clones and snapshots
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - In certain situations it may report a task as changed that will not be reported
+ as changed when C(check_mode) is disabled.
+ - For example, this might occur when the zpool C(altroot) option is set or when
+ a size is written using human-readable notation, such as C(1M) or C(1024K),
+ instead of as an unqualified byte count, such as C(1048576).
+ diff_mode:
+ support: full
+options:
+ name:
+ description:
+ - File system, snapshot or volume name e.g. C(rpool/myfs).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to create (C(present)), or remove (C(absent)) a
+ file system, snapshot or volume. All parents/children
+ will be created/destroyed as needed to reach the desired state.
+ choices: [ absent, present ]
+ required: true
+ type: str
+ origin:
+ description:
+ - Snapshot from which to create a clone.
+ type: str
+ extra_zfs_properties:
+ description:
+ - A dictionary of zfs properties to be set.
+ - See the zfs(8) man page for more information.
+ type: dict
+ default: {}
+author:
+- Johan Wiren (@johanwiren)
+'''
+
+EXAMPLES = '''
+- name: Create a new file system called myfs in pool rpool with the setuid property turned off
+ community.general.zfs:
+ name: rpool/myfs
+ state: present
+ extra_zfs_properties:
+ setuid: 'off'
+
+- name: Create a new volume called myvol in pool rpool.
+ community.general.zfs:
+ name: rpool/myvol
+ state: present
+ extra_zfs_properties:
+ volsize: 10M
+
+- name: Create a snapshot of rpool/myfs file system.
+ community.general.zfs:
+ name: rpool/myfs@mysnapshot
+ state: present
+
+- name: Create a new file system called myfs2 with snapdir enabled
+ community.general.zfs:
+ name: rpool/myfs2
+ state: present
+ extra_zfs_properties:
+ snapdir: enabled
+
+- name: Create a new file system by cloning a snapshot
+ community.general.zfs:
+ name: rpool/cloned_fs
+ state: present
+ origin: rpool/myfs@mysnapshot
+
+- name: Destroy a filesystem
+ community.general.zfs:
+ name: rpool/myfs
+ state: absent
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Zfs(object):
+
+ def __init__(self, module, name, properties):
+ self.module = module
+ self.name = name
+ self.properties = properties
+ self.changed = False
+ self.zfs_cmd = module.get_bin_path('zfs', True)
+ self.zpool_cmd = module.get_bin_path('zpool', True)
+ self.pool = name.split('/')[0].split('@')[0]
+ self.is_solaris = os.uname()[0] == 'SunOS'
+ self.is_openzfs = self.check_openzfs()
+ self.enhanced_sharing = self.check_enhanced_sharing()
+
+ def check_openzfs(self):
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if version == '-':
+ return True
+ if int(version) == 5000:
+ return True
+ return False
+
+ def check_enhanced_sharing(self):
+ if self.is_solaris and not self.is_openzfs:
+ cmd = [self.zpool_cmd]
+ cmd.extend(['get', 'version'])
+ cmd.append(self.pool)
+ (rc, out, err) = self.module.run_command(cmd, check_rc=True)
+ version = out.splitlines()[-1].split()[2]
+ if int(version) >= 34:
+ return True
+ return False
+
+ def exists(self):
+ cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
+ rc, dummy, dummy = self.module.run_command(cmd)
+ return rc == 0
+
+ def create(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ properties = self.properties
+ origin = self.module.params.get('origin')
+ cmd = [self.zfs_cmd]
+
+ if "@" in self.name:
+ action = 'snapshot'
+ elif origin:
+ action = 'clone'
+ else:
+ action = 'create'
+
+ cmd.append(action)
+
+ if action in ['create', 'clone']:
+ cmd += ['-p']
+
+ if properties:
+ for prop, value in properties.items():
+ if prop == 'volsize':
+ cmd += ['-V', value]
+ elif prop == 'volblocksize':
+ cmd += ['-b', value]
+ else:
+ cmd += ['-o', '%s=%s' % (prop, value)]
+ if origin and action == 'clone':
+ cmd.append(origin)
+ cmd.append(self.name)
+ self.module.run_command(cmd, check_rc=True)
+ self.changed = True
+
+ def destroy(self):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
+ self.module.run_command(cmd, check_rc=True)
+ self.changed = True
+
+ def set_property(self, prop, value):
+ if self.module.check_mode:
+ self.changed = True
+ return
+ cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
+ self.module.run_command(cmd, check_rc=True)
+
+ def set_properties_if_changed(self):
+ diff = {'before': {'extra_zfs_properties': {}}, 'after': {'extra_zfs_properties': {}}}
+ current_properties = self.get_current_properties()
+ for prop, value in self.properties.items():
+ current_value = current_properties.get(prop, None)
+ if current_value != value:
+ self.set_property(prop, value)
+ diff['before']['extra_zfs_properties'][prop] = current_value
+ diff['after']['extra_zfs_properties'][prop] = value
+ if self.module.check_mode:
+ return diff
+ updated_properties = self.get_current_properties()
+ for prop in self.properties:
+ value = updated_properties.get(prop, None)
+ if value is None:
+ self.module.fail_json(msg="zfsprop was not present after being successfully set: %s" % prop)
+ if current_properties.get(prop, None) != value:
+ self.changed = True
+ if prop in diff['after']['extra_zfs_properties']:
+ diff['after']['extra_zfs_properties'][prop] = value
+ return diff
+
+ def get_current_properties(self):
+ cmd = [self.zfs_cmd, 'get', '-H', '-p', '-o', "property,value,source"]
+ if self.enhanced_sharing:
+ cmd += ['-e']
+ cmd += ['all', self.name]
+ rc, out, err = self.module.run_command(cmd)
+ properties = dict()
+ for line in out.splitlines():
+ prop, value, source = line.split('\t')
+ # include source '-' so that creation-only properties are not removed
+ # to avoids errors when the dataset already exists and the property is not changed
+ # this scenario is most likely when the same playbook is run more than once
+ if source in ('local', 'received', '-'):
+ properties[prop] = value
+ # Add alias for enhanced sharing properties
+ if self.enhanced_sharing:
+ properties['sharenfs'] = properties.get('share.nfs', None)
+ properties['sharesmb'] = properties.get('share.smb', None)
+ return properties
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ origin=dict(type='str'),
+ extra_zfs_properties=dict(type='dict', default={}),
+ ),
+ supports_check_mode=True,
+ )
+
+ state = module.params.get('state')
+ name = module.params.get('name')
+
+ if module.params.get('origin') and '@' in name:
+ module.fail_json(msg='cannot specify origin when operating on a snapshot')
+
+ # Reverse the boolification of zfs properties
+ for prop, value in module.params['extra_zfs_properties'].items():
+ if isinstance(value, bool):
+ if value is True:
+ module.params['extra_zfs_properties'][prop] = 'on'
+ else:
+ module.params['extra_zfs_properties'][prop] = 'off'
+ else:
+ module.params['extra_zfs_properties'][prop] = value
+
+ result = dict(
+ name=name,
+ state=state,
+ )
+
+ zfs = Zfs(module, name, module.params['extra_zfs_properties'])
+
+ if state == 'present':
+ if zfs.exists():
+ result['diff'] = zfs.set_properties_if_changed()
+ else:
+ zfs.create()
+ result['diff'] = {'before': {'state': 'absent'}, 'after': {'state': state}}
+
+ elif state == 'absent':
+ if zfs.exists():
+ zfs.destroy()
+ result['diff'] = {'before': {'state': 'present'}, 'after': {'state': state}}
+ else:
+ result['diff'] = {}
+
+ result['diff']['before_header'] = name
+ result['diff']['after_header'] = name
+
+ result.update(zfs.properties)
+ result['changed'] = zfs.changed
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py b/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
new file mode 100644
index 000000000..0536f1a28
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/zfs_delegate_admin.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2015, Nate Coraor <nate@coraor.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: zfs_delegate_admin
+short_description: Manage ZFS delegated administration (user admin privileges)
+description:
+ - Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
+ operations normally restricted to the superuser.
+ - See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
+ - This module attempts to adhere to the behavior of the command line tool as much as possible.
+requirements:
+ - "A ZFS/OpenZFS implementation that supports delegation with C(zfs allow), including: Solaris >= 10, illumos (all
+ versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - File system or volume name e.g. C(rpool/myfs).
+ required: true
+ type: str
+ state:
+ description:
+ - Whether to allow (C(present)), or unallow (C(absent)) a permission.
+ - When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
+ - When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
+ choices: [ absent, present ]
+ default: present
+ type: str
+ users:
+ description:
+ - List of users to whom permission(s) should be granted.
+ type: list
+ elements: str
+ groups:
+ description:
+ - List of groups to whom permission(s) should be granted.
+ type: list
+ elements: str
+ everyone:
+ description:
+ - Apply permissions to everyone.
+ type: bool
+ default: false
+ permissions:
+ description:
+ - The list of permission(s) to delegate (required if C(state) is C(present)).
+ - Supported permissions depend on the ZFS version in use. See for example
+ U(https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html) for OpenZFS.
+ type: list
+ elements: str
+ local:
+ description:
+ - Apply permissions to C(name) locally (C(zfs allow -l)).
+ type: bool
+ descendents:
+ description:
+ - Apply permissions to C(name)'s descendents (C(zfs allow -d)).
+ type: bool
+ recursive:
+ description:
+ - Unallow permissions recursively (ignored when C(state) is C(present)).
+ type: bool
+ default: false
+author:
+- Nate Coraor (@natefoo)
+'''
+
+EXAMPLES = r'''
+- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: adm
+ permissions: allow,unallow
+
+- name: Grant `zfs send` to everyone, plus the group `backup`
+ community.general.zfs_delegate_admin:
+ name: rpool/myvol
+ groups: backup
+ everyone: true
+ permissions: send
+
+- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ users: foo,bar
+ permissions: send,receive
+ local: true
+
+- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
+ community.general.zfs_delegate_admin:
+ name: rpool/myfs
+ everyone: true
+ state: absent
+'''
+
+# This module does not return anything other than the standard
+# changed/state/msg/stdout
+RETURN = '''
+'''
+
+from itertools import product
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZfsDelegateAdmin(object):
+ def __init__(self, module):
+ self.module = module
+ self.name = module.params.get('name')
+ self.state = module.params.get('state')
+ self.users = module.params.get('users')
+ self.groups = module.params.get('groups')
+ self.everyone = module.params.get('everyone')
+ self.perms = module.params.get('permissions')
+ self.scope = None
+ self.changed = False
+ self.initial_perms = None
+ self.subcommand = 'allow'
+ self.recursive_opt = []
+ self.run_method = self.update
+
+ self.setup(module)
+
+ def setup(self, module):
+ """ Validate params and set up for run.
+ """
+ if self.state == 'absent':
+ self.subcommand = 'unallow'
+ if module.params.get('recursive'):
+ self.recursive_opt = ['-r']
+
+ local = module.params.get('local')
+ descendents = module.params.get('descendents')
+ if (local and descendents) or (not local and not descendents):
+ self.scope = 'ld'
+ elif local:
+ self.scope = 'l'
+ elif descendents:
+ self.scope = 'd'
+ else:
+ self.module.fail_json(msg='Impossible value for local and descendents')
+
+ if not (self.users or self.groups or self.everyone):
+ if self.state == 'present':
+ self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
+ elif self.state == 'absent':
+ self.run_method = self.clear
+ # ansible ensures the else cannot happen here
+
+ self.zfs_path = module.get_bin_path('zfs', True)
+
+ @property
+ def current_perms(self):
+ """ Parse the output of `zfs allow <name>` to retrieve current permissions.
+ """
+ out = self.run_zfs_raw(subcommand='allow')
+ perms = {
+ 'l': {'u': {}, 'g': {}, 'e': []},
+ 'd': {'u': {}, 'g': {}, 'e': []},
+ 'ld': {'u': {}, 'g': {}, 'e': []},
+ }
+ linemap = {
+ 'Local permissions:': 'l',
+ 'Descendent permissions:': 'd',
+ 'Local+Descendent permissions:': 'ld',
+ }
+ scope = None
+ for line in out.splitlines():
+ scope = linemap.get(line, scope)
+ if not scope:
+ continue
+ if ' (unknown: ' in line:
+ line = line.replace('(unknown: ', '', 1)
+ line = line.replace(')', '', 1)
+ try:
+ if line.startswith('\tuser ') or line.startswith('\tgroup '):
+ ent_type, ent, cur_perms = line.split()
+ perms[scope][ent_type[0]][ent] = cur_perms.split(',')
+ elif line.startswith('\teveryone '):
+ perms[scope]['e'] = line.split()[1].split(',')
+ except ValueError:
+ self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
+ return perms
+
+ def run_zfs_raw(self, subcommand=None, args=None):
+ """ Run a raw zfs command, fail on error.
+ """
+ cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
+ rc, out, err = self.module.run_command(cmd)
+ if rc:
+ self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
+ return out
+
+ def run_zfs(self, args):
+ """ Run zfs allow/unallow with appropriate options as per module arguments.
+ """
+ args = self.recursive_opt + ['-' + self.scope] + args
+ if self.perms:
+ args.append(','.join(self.perms))
+ return self.run_zfs_raw(args=args)
+
+ def clear(self):
+ """ Called by run() to clear all permissions.
+ """
+ changed = False
+ stdout = ''
+ for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
+ for ent in self.initial_perms[scope][ent_type].keys():
+ stdout += self.run_zfs(['-%s' % ent_type, ent])
+ changed = True
+ for scope in ('ld', 'l', 'd'):
+ if self.initial_perms[scope]['e']:
+ stdout += self.run_zfs(['-e'])
+ changed = True
+ return (changed, stdout)
+
+ def update(self):
+ """ Update permissions as per module arguments.
+ """
+ stdout = ''
+ for ent_type, entities in (('u', self.users), ('g', self.groups)):
+ if entities:
+ stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
+ if self.everyone:
+ stdout += self.run_zfs(['-e'])
+ return (self.initial_perms != self.current_perms, stdout)
+
+ def run(self):
+ """ Run an operation, return results for Ansible.
+ """
+ exit_args = {'state': self.state}
+ self.initial_perms = self.current_perms
+ exit_args['changed'], stdout = self.run_method()
+ if exit_args['changed']:
+ exit_args['msg'] = 'ZFS delegated admin permissions updated'
+ exit_args['stdout'] = stdout
+ self.module.exit_json(**exit_args)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ users=dict(type='list', elements='str'),
+ groups=dict(type='list', elements='str'),
+ everyone=dict(type='bool', default=False),
+ permissions=dict(type='list', elements='str'),
+ local=dict(type='bool'),
+ descendents=dict(type='bool'),
+ recursive=dict(type='bool', default=False),
+ ),
+ supports_check_mode=False,
+ required_if=[('state', 'present', ['permissions'])],
+ )
+ zfs_delegate_admin = ZfsDelegateAdmin(module)
+ zfs_delegate_admin.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/zfs_facts.py b/ansible_collections/community/general/plugins/modules/zfs_facts.py
new file mode 100644
index 000000000..bb4530c47
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/zfs_facts.py
@@ -0,0 +1,260 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zfs_facts
+short_description: Gather facts about ZFS datasets
+description:
+ - Gather facts from ZFS dataset properties.
+author: Adam Števko (@xen0l)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ name:
+ description:
+ - ZFS dataset name.
+ required: true
+ aliases: [ "ds", "dataset" ]
+ type: str
+ recurse:
+ description:
+ - Specifies if properties for any children should be recursively
+ displayed.
+ type: bool
+ default: false
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: false
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zfs(1M) man page.
+ default: all
+ type: str
+ type:
+ description:
+ - Specifies which datasets types to display. Multiple values have to be
+ provided in comma-separated form.
+ choices: [ 'all', 'filesystem', 'volume', 'snapshot', 'bookmark' ]
+ default: all
+ type: str
+ depth:
+ description:
+ - Specifies recursion depth.
+ type: int
+ default: 0
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS dataset rpool/export/home
+ community.general.zfs_facts:
+ dataset: rpool/export/home
+
+- name: Report space usage on ZFS filesystems under data/home
+ community.general.zfs_facts:
+ name: data/home
+ recurse: true
+ type: filesystem
+
+- ansible.builtin.debug:
+ msg: 'ZFS dataset {{ item.name }} consumes {{ item.used }} of disk space.'
+ with_items: '{{ ansible_zfs_datasets }}'
+'''
+
+RETURN = '''
+name:
+ description: ZFS dataset name
+ returned: always
+ type: str
+ sample: rpool/var/spool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: true
+recurse:
+ description: if we should recurse over ZFS dataset
+ returned: if 'recurse' is set to True
+ type: bool
+ sample: true
+zfs_datasets:
+ description: ZFS dataset facts
+ returned: always
+ type: str
+ sample:
+ {
+ "aclinherit": "restricted",
+ "aclmode": "discard",
+ "atime": "on",
+ "available": "43.8G",
+ "canmount": "on",
+ "casesensitivity": "sensitive",
+ "checksum": "on",
+ "compression": "off",
+ "compressratio": "1.00x",
+ "copies": "1",
+ "creation": "Thu Jun 16 11:37 2016",
+ "dedup": "off",
+ "devices": "on",
+ "exec": "on",
+ "filesystem_count": "none",
+ "filesystem_limit": "none",
+ "logbias": "latency",
+ "logicalreferenced": "18.5K",
+ "logicalused": "3.45G",
+ "mlslabel": "none",
+ "mounted": "yes",
+ "mountpoint": "/rpool",
+ "name": "rpool",
+ "nbmand": "off",
+ "normalization": "none",
+ "org.openindiana.caiman:install": "ready",
+ "primarycache": "all",
+ "quota": "none",
+ "readonly": "off",
+ "recordsize": "128K",
+ "redundant_metadata": "all",
+ "refcompressratio": "1.00x",
+ "referenced": "29.5K",
+ "refquota": "none",
+ "refreservation": "none",
+ "reservation": "none",
+ "secondarycache": "all",
+ "setuid": "on",
+ "sharenfs": "off",
+ "sharesmb": "off",
+ "snapdir": "hidden",
+ "snapshot_count": "none",
+ "snapshot_limit": "none",
+ "sync": "standard",
+ "type": "filesystem",
+ "used": "4.41G",
+ "usedbychildren": "4.41G",
+ "usedbydataset": "29.5K",
+ "usedbyrefreservation": "0",
+ "usedbysnapshots": "0",
+ "utf8only": "off",
+ "version": "5",
+ "vscan": "off",
+ "written": "29.5K",
+ "xattr": "on",
+ "zoned": "off"
+ }
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import iteritems
+
+
+SUPPORTED_TYPES = ['all', 'filesystem', 'volume', 'snapshot', 'bookmark']
+
+
+class ZFSFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+
+ self.name = module.params['name']
+ self.recurse = module.params['recurse']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+ self.type = module.params['type']
+ self.depth = module.params['depth']
+
+ self._datasets = defaultdict(dict)
+ self.facts = []
+
+ def dataset_exists(self):
+ cmd = [self.module.get_bin_path('zfs'), 'list', self.name]
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ return True
+ else:
+ return False
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zfs'), 'get', '-H']
+ if self.parsable:
+ cmd.append('-p')
+ if self.recurse:
+ cmd.append('-r')
+ if int(self.depth) != 0:
+ cmd.append('-d')
+ cmd.append('%s' % self.depth)
+ if self.type:
+ cmd.append('-t')
+ cmd.append(self.type)
+ cmd.extend(['-o', 'name,property,value', self.properties, self.name])
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc == 0:
+ for line in out.splitlines():
+ dataset, property, value = line.split('\t')
+
+ self._datasets[dataset].update({property: value})
+
+ for k, v in iteritems(self._datasets):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_datasets': self.facts}
+ else:
+ self.module.fail_json(msg='Error while trying to get facts about ZFS dataset: %s' % self.name,
+ stderr=err,
+ rc=rc)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['ds', 'dataset'], type='str'),
+ recurse=dict(required=False, default=False, type='bool'),
+ parsable=dict(required=False, default=False, type='bool'),
+ properties=dict(required=False, default='all', type='str'),
+ type=dict(required=False, default='all', type='str', choices=SUPPORTED_TYPES),
+ depth=dict(required=False, default=0, type='int')
+ ),
+ supports_check_mode=True
+ )
+
+ zfs_facts = ZFSFacts(module)
+
+ result = {}
+ result['changed'] = False
+ result['name'] = zfs_facts.name
+
+ if zfs_facts.parsable:
+ result['parsable'] = zfs_facts.parsable
+
+ if zfs_facts.recurse:
+ result['recurse'] = zfs_facts.recurse
+
+ if zfs_facts.dataset_exists():
+ result['ansible_facts'] = zfs_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS dataset %s does not exist!' % zfs_facts.name)
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/znode.py b/ansible_collections/community/general/plugins/modules/znode.py
new file mode 100644
index 000000000..f5aa54ef8
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/znode.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2015 WP Engine, Inc. All rights reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: znode
+short_description: Create, delete, retrieve, and update znodes using ZooKeeper
+description:
+ - Create, delete, retrieve, and update znodes using ZooKeeper.
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+extends_documentation_fragment:
+ - community.general.attributes
+options:
+ hosts:
+ description:
+ - A list of ZooKeeper servers (format '[server]:[port]').
+ required: true
+ type: str
+ name:
+ description:
+ - The path of the znode.
+ required: true
+ type: str
+ value:
+ description:
+ - The value assigned to the znode.
+ type: str
+ op:
+ description:
+ - An operation to perform. Mutually exclusive with state.
+ choices: [ get, wait, list ]
+ type: str
+ state:
+ description:
+ - The state to enforce. Mutually exclusive with op.
+ choices: [ present, absent ]
+ type: str
+ timeout:
+ description:
+ - The amount of time to wait for a node to appear.
+ default: 300
+ type: int
+ recursive:
+ description:
+ - Recursively delete node and all its children.
+ type: bool
+ default: false
+ auth_scheme:
+ description:
+ - 'Authentication scheme.'
+ choices: [ digest, sasl ]
+ type: str
+ default: "digest"
+ required: false
+ version_added: 5.8.0
+ auth_credential:
+ description:
+ - The authentication credential value. Depends on I(auth_scheme).
+ - The format for I(auth_scheme=digest) is C(user:password),
+ and the format for I(auth_scheme=sasl) is C(user:password).
+ type: str
+ required: false
+ version_added: 5.8.0
+ use_tls:
+ description:
+ - Using TLS/SSL or not.
+ type: bool
+ default: false
+ required: false
+ version_added: '6.5.0'
+requirements:
+ - kazoo >= 2.1
+ - python >= 2.6
+author: "Trey Perry (@treyperry)"
+'''
+
+EXAMPLES = """
+- name: Creating or updating a znode with a given value
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+
+- name: Getting the value and stat structure for a znode
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: get
+
+- name: Getting the value and stat structure for a znode using digest authentication
+ community.general.znode:
+ hosts: 'localhost:2181'
+ auth_credential: 'user1:s3cr3t'
+ name: /secretmypath
+ op: get
+
+- name: Listing a particular znode's children
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /zookeeper
+ op: list
+
+- name: Waiting 20 seconds for a znode to appear at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ op: wait
+ timeout: 20
+
+- name: Deleting a znode at path /mypath
+ community.general.znode:
+ hosts: 'localhost:2181'
+ name: /mypath
+ state: absent
+
+- name: Creating or updating a znode with a given value on a remote Zookeeper
+ community.general.znode:
+ hosts: 'my-zookeeper-node:2181'
+ name: /mypath
+ value: myvalue
+ state: present
+ delegate_to: 127.0.0.1
+"""
+
+import time
+import traceback
+
+KAZOO_IMP_ERR = None
+try:
+ from kazoo.client import KazooClient
+ from kazoo.handlers.threading import KazooTimeoutError
+ KAZOO_INSTALLED = True
+except ImportError:
+ KAZOO_IMP_ERR = traceback.format_exc()
+ KAZOO_INSTALLED = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common.text.converters import to_bytes
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ hosts=dict(required=True, type='str'),
+ name=dict(required=True, type='str'),
+ value=dict(type='str'),
+ op=dict(choices=['get', 'wait', 'list']),
+ state=dict(choices=['present', 'absent']),
+ timeout=dict(default=300, type='int'),
+ recursive=dict(default=False, type='bool'),
+ auth_scheme=dict(default='digest', choices=['digest', 'sasl']),
+ auth_credential=dict(type='str', no_log=True),
+ use_tls=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=False
+ )
+
+ if not KAZOO_INSTALLED:
+ module.fail_json(msg=missing_required_lib('kazoo >= 2.1'), exception=KAZOO_IMP_ERR)
+
+ check = check_params(module.params)
+ if not check['success']:
+ module.fail_json(msg=check['msg'])
+
+ zoo = KazooCommandProxy(module)
+ try:
+ zoo.start()
+ except KazooTimeoutError:
+ module.fail_json(msg='The connection to the ZooKeeper ensemble timed out.')
+
+ command_dict = {
+ 'op': {
+ 'get': zoo.get,
+ 'list': zoo.list,
+ 'wait': zoo.wait
+ },
+ 'state': {
+ 'present': zoo.present,
+ 'absent': zoo.absent
+ }
+ }
+
+ command_type = 'op' if 'op' in module.params and module.params['op'] is not None else 'state'
+ method = module.params[command_type]
+ result, result_dict = command_dict[command_type][method]()
+ zoo.shutdown()
+
+ if result:
+ module.exit_json(**result_dict)
+ else:
+ module.fail_json(**result_dict)
+
+
+def check_params(params):
+ if not params['state'] and not params['op']:
+ return {'success': False, 'msg': 'Please define an operation (op) or a state.'}
+
+ if params['state'] and params['op']:
+ return {'success': False, 'msg': 'Please choose an operation (op) or a state, but not both.'}
+
+ return {'success': True}
+
+
+class KazooCommandProxy():
+ def __init__(self, module):
+ self.module = module
+ self.zk = KazooClient(module.params['hosts'], use_ssl=module.params['use_tls'])
+
+ def absent(self):
+ return self._absent(self.module.params['name'])
+
+ def exists(self, znode):
+ return self.zk.exists(znode)
+
+ def list(self):
+ children = self.zk.get_children(self.module.params['name'])
+ return True, {'count': len(children), 'items': children, 'msg': 'Retrieved znodes in path.',
+ 'znode': self.module.params['name']}
+
+ def present(self):
+ return self._present(self.module.params['name'], self.module.params['value'])
+
+ def get(self):
+ return self._get(self.module.params['name'])
+
+ def shutdown(self):
+ self.zk.stop()
+ self.zk.close()
+
+ def start(self):
+ self.zk.start()
+ if self.module.params['auth_credential']:
+ self.zk.add_auth(self.module.params['auth_scheme'], self.module.params['auth_credential'])
+
+ def wait(self):
+ return self._wait(self.module.params['name'], self.module.params['timeout'])
+
+ def _absent(self, znode):
+ if self.exists(znode):
+ self.zk.delete(znode, recursive=self.module.params['recursive'])
+ return True, {'changed': True, 'msg': 'The znode was deleted.'}
+ else:
+ return True, {'changed': False, 'msg': 'The znode does not exist.'}
+
+ def _get(self, path):
+ if self.exists(path):
+ value, zstat = self.zk.get(path)
+ stat_dict = {}
+ for i in dir(zstat):
+ if not i.startswith('_'):
+ attr = getattr(zstat, i)
+ if isinstance(attr, (int, str)):
+ stat_dict[i] = attr
+ result = True, {'msg': 'The node was retrieved.', 'znode': path, 'value': value,
+ 'stat': stat_dict}
+ else:
+ result = False, {'msg': 'The requested node does not exist.'}
+
+ return result
+
+ def _present(self, path, value):
+ if self.exists(path):
+ (current_value, zstat) = self.zk.get(path)
+ if value != current_value:
+ self.zk.set(path, to_bytes(value))
+ return True, {'changed': True, 'msg': 'Updated the znode value.', 'znode': path,
+ 'value': value}
+ else:
+ return True, {'changed': False, 'msg': 'No changes were necessary.', 'znode': path, 'value': value}
+ else:
+ self.zk.create(path, to_bytes(value), makepath=True)
+ return True, {'changed': True, 'msg': 'Created a new znode.', 'znode': path, 'value': value}
+
+ def _wait(self, path, timeout, interval=5):
+ lim = time.time() + timeout
+
+ while time.time() < lim:
+ if self.exists(path):
+ return True, {'msg': 'The node appeared before the configured timeout.',
+ 'znode': path, 'timeout': timeout}
+ else:
+ time.sleep(interval)
+
+ return False, {'msg': 'The node did not appear before the operation timed out.', 'timeout': timeout,
+ 'znode': path}
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/zpool_facts.py b/ansible_collections/community/general/plugins/modules/zpool_facts.py
new file mode 100644
index 000000000..2477a920b
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/zpool_facts.py
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2016, Adam Števko <adam.stevko@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zpool_facts
+short_description: Gather facts about ZFS pools
+description:
+ - Gather facts from ZFS pool properties.
+author: Adam Števko (@xen0l)
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes.facts
+ - community.general.attributes.facts_module
+options:
+ name:
+ description:
+ - ZFS pool name.
+ type: str
+ aliases: [ "pool", "zpool" ]
+ required: false
+ parsable:
+ description:
+ - Specifies if property values should be displayed in machine
+ friendly format.
+ type: bool
+ default: false
+ required: false
+ properties:
+ description:
+ - Specifies which dataset properties should be queried in comma-separated format.
+ For more information about dataset properties, check zpool(1M) man page.
+ type: str
+ default: all
+ required: false
+'''
+
+EXAMPLES = '''
+- name: Gather facts about ZFS pool rpool
+ community.general.zpool_facts: pool=rpool
+
+- name: Gather space usage about all imported ZFS pools
+ community.general.zpool_facts: properties='free,size'
+
+- name: Print gathered information
+ ansible.builtin.debug:
+ msg: 'ZFS pool {{ item.name }} has {{ item.free }} free space out of {{ item.size }}.'
+ with_items: '{{ ansible_zfs_pools }}'
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary containing all the detailed information about the ZFS pool facts
+ returned: always
+ type: complex
+ contains:
+ ansible_zfs_pools:
+ description: ZFS pool facts
+ returned: always
+ type: str
+ sample:
+ {
+ "allocated": "3.46G",
+ "altroot": "-",
+ "autoexpand": "off",
+ "autoreplace": "off",
+ "bootfs": "rpool/ROOT/openindiana",
+ "cachefile": "-",
+ "capacity": "6%",
+ "comment": "-",
+ "dedupditto": "0",
+ "dedupratio": "1.00x",
+ "delegation": "on",
+ "expandsize": "-",
+ "failmode": "wait",
+ "feature@async_destroy": "enabled",
+ "feature@bookmarks": "enabled",
+ "feature@edonr": "enabled",
+ "feature@embedded_data": "active",
+ "feature@empty_bpobj": "active",
+ "feature@enabled_txg": "active",
+ "feature@extensible_dataset": "enabled",
+ "feature@filesystem_limits": "enabled",
+ "feature@hole_birth": "active",
+ "feature@large_blocks": "enabled",
+ "feature@lz4_compress": "active",
+ "feature@multi_vdev_crash_dump": "enabled",
+ "feature@sha512": "enabled",
+ "feature@skein": "enabled",
+ "feature@spacemap_histogram": "active",
+ "fragmentation": "3%",
+ "free": "46.3G",
+ "freeing": "0",
+ "guid": "15729052870819522408",
+ "health": "ONLINE",
+ "leaked": "0",
+ "listsnapshots": "off",
+ "name": "rpool",
+ "readonly": "off",
+ "size": "49.8G",
+ "version": "-"
+ }
+name:
+ description: ZFS pool name
+ returned: always
+ type: str
+ sample: rpool
+parsable:
+ description: if parsable output should be provided in machine friendly format.
+ returned: if 'parsable' is set to True
+ type: bool
+ sample: true
+'''
+
+from collections import defaultdict
+
+from ansible.module_utils.six import iteritems
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ZPoolFacts(object):
+ def __init__(self, module):
+
+ self.module = module
+ self.name = module.params['name']
+ self.parsable = module.params['parsable']
+ self.properties = module.params['properties']
+ self._pools = defaultdict(dict)
+ self.facts = []
+
+ def pool_exists(self):
+ cmd = [self.module.get_bin_path('zpool'), 'list', self.name]
+ rc, dummy, dummy = self.module.run_command(cmd)
+ return rc == 0
+
+ def get_facts(self):
+ cmd = [self.module.get_bin_path('zpool'), 'get', '-H']
+ if self.parsable:
+ cmd.append('-p')
+ cmd.append('-o')
+ cmd.append('name,property,value')
+ cmd.append(self.properties)
+ if self.name:
+ cmd.append(self.name)
+
+ rc, out, err = self.module.run_command(cmd, check_rc=True)
+
+ for line in out.splitlines():
+ pool, prop, value = line.split('\t')
+
+ self._pools[pool].update({prop: value})
+
+ for k, v in iteritems(self._pools):
+ v.update({'name': k})
+ self.facts.append(v)
+
+ return {'ansible_zfs_pools': self.facts}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['pool', 'zpool'], type='str'),
+ parsable=dict(default=False, type='bool'),
+ properties=dict(default='all', type='str'),
+ ),
+ supports_check_mode=True
+ )
+
+ zpool_facts = ZPoolFacts(module)
+
+ result = {
+ 'changed': False,
+ 'name': zpool_facts.name,
+ }
+ if zpool_facts.parsable:
+ result['parsable'] = zpool_facts.parsable
+
+ if zpool_facts.name is not None:
+ if zpool_facts.pool_exists():
+ result['ansible_facts'] = zpool_facts.get_facts()
+ else:
+ module.fail_json(msg='ZFS pool %s does not exist!' % zpool_facts.name)
+ else:
+ result['ansible_facts'] = zpool_facts.get_facts()
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/zypper.py b/ansible_collections/community/general/plugins/modules/zypper.py
new file mode 100644
index 000000000..b47131d3d
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/zypper.py
@@ -0,0 +1,607 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
+# based on
+# openbsd_pkg
+# Copyright (c) 2013
+# Patrik Lundin <patrik.lundin.swe@gmail.com>
+#
+# yum
+# Copyright (c) 2012, Red Hat, Inc
+# Written by Seth Vidal <skvidal at fedoraproject.org>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper
+author:
+ - "Patrick Callahan (@dirtyharrycallahan)"
+ - "Alexander Gubin (@alxgu)"
+ - "Thomas O'Donnell (@andytom)"
+ - "Robin Roth (@robinro)"
+ - "Andrii Radyk (@AnderEnder)"
+short_description: Manage packages on SUSE and openSUSE
+description:
+ - Manage packages on SUSE and openSUSE using the zypper and rpm tools.
+ - Also supports transactional updates, by running zypper inside C(/sbin/transactional-update --continue --drop-if-no-change --quiet run).
+extends_documentation_fragment:
+ - community.general.attributes
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+options:
+ name:
+ description:
+ - Package name C(name) or package specifier or a list of either.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
+ - You can also pass a url or a local path to a rpm file.
+ - When using I(state=latest), this can be '*', which updates all installed packages.
+ required: true
+ aliases: [ 'pkg' ]
+ type: list
+ elements: str
+ state:
+ description:
+ - C(present) will make sure the package is installed.
+ C(latest) will make sure the latest version of the package is installed.
+ C(absent) will make sure the specified package is not installed.
+ C(dist-upgrade) will make sure the latest version of all installed packages from all enabled repositories is installed.
+ - When using C(dist-upgrade), I(name) should be C('*').
+ required: false
+ choices: [ present, latest, absent, dist-upgrade, installed, removed ]
+ default: "present"
+ type: str
+ type:
+ description:
+ - The type of package to be operated on.
+ required: false
+ choices: [ package, patch, pattern, product, srcpackage, application ]
+ default: "package"
+ type: str
+ extra_args_precommand:
+ required: false
+ description:
+ - Add additional global target options to C(zypper).
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable to GPG signature checking of the package
+ signature being installed. Has an effect only if state is
+ I(present) or I(latest).
+ required: false
+ default: false
+ type: bool
+ disable_recommends:
+ description:
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(true)) modifies zypper's default behavior; C(false) does
+ install recommended packages.
+ required: false
+ default: true
+ type: bool
+ force:
+ description:
+ - Adds C(--force) option to I(zypper). Allows to downgrade packages and change vendor or architecture.
+ required: false
+ default: false
+ type: bool
+ force_resolution:
+ description:
+ - Adds C(--force-resolution) option to I(zypper). Allows to (un)install packages with conflicting requirements (resolver will choose a solution).
+ required: false
+ default: false
+ type: bool
+ version_added: '0.2.0'
+ update_cache:
+ description:
+ - Run the equivalent of C(zypper refresh) before the operation. Disabled in check mode.
+ required: false
+ default: false
+ type: bool
+ aliases: [ "refresh" ]
+ oldpackage:
+ description:
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
+ required: false
+ default: false
+ type: bool
+ extra_args:
+ required: false
+ description:
+ - Add additional options to C(zypper) command.
+ - Options should be supplied in a single line as if given in the command line.
+ type: str
+ allow_vendor_change:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--allow_vendor_change) option to I(zypper) dist-upgrade command.
+ version_added: '0.2.0'
+ replacefiles:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--replacefiles) option to I(zypper) install/update command.
+ version_added: '0.2.0'
+ clean_deps:
+ type: bool
+ required: false
+ default: false
+ description:
+ - Adds C(--clean-deps) option to I(zypper) remove command.
+ version_added: '4.6.0'
+notes:
+ - When used with a C(loop:) each package will be processed individually,
+ it is much more efficient to pass the list directly to the I(name) option.
+# informational: requirements for nodes
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+ - rpm
+'''
+
+EXAMPLES = '''
+- name: Install nmap
+ community.general.zypper:
+ name: nmap
+ state: present
+
+- name: Install apache2 with recommended packages
+ community.general.zypper:
+ name: apache2
+ state: present
+ disable_recommends: false
+
+- name: Apply a given patch
+ community.general.zypper:
+ name: openSUSE-2016-128
+ state: present
+ type: patch
+
+- name: Remove the nmap package
+ community.general.zypper:
+ name: nmap
+ state: absent
+
+- name: Install the nginx rpm from a remote repo
+ community.general.zypper:
+ name: 'http://nginx.org/packages/sles/12/x86_64/RPMS/nginx-1.8.0-1.sles12.ngx.x86_64.rpm'
+ state: present
+
+- name: Install local rpm file
+ community.general.zypper:
+ name: /tmp/fancy-software.rpm
+ state: present
+
+- name: Update all packages
+ community.general.zypper:
+ name: '*'
+ state: latest
+
+- name: Apply all available patches
+ community.general.zypper:
+ name: '*'
+ state: latest
+ type: patch
+
+- name: Perform a dist-upgrade with additional arguments
+ community.general.zypper:
+ name: '*'
+ state: dist-upgrade
+ allow_vendor_change: true
+ extra_args: '--allow-arch-change'
+
+- name: Perform a installaion of nmap with the install option replacefiles
+ community.general.zypper:
+ name: 'nmap'
+ state: latest
+ replacefiles: true
+
+- name: Refresh repositories and update package openssl
+ community.general.zypper:
+ name: openssl
+ state: present
+ update_cache: true
+
+- name: "Install specific version (possible comparisons: <, >, <=, >=, =)"
+ community.general.zypper:
+ name: 'docker>=1.10'
+ state: present
+
+- name: Wait 20 seconds to acquire the lock before failing
+ community.general.zypper:
+ name: mosh
+ state: present
+ environment:
+ ZYPP_LOCK_TIMEOUT: 20
+'''
+
+import os.path
+import xml
+import re
+from xml.dom.minidom import parseString as parseXML
+from ansible.module_utils.common.text.converters import to_native
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+
+class Package:
+ def __init__(self, name, prefix, version):
+ self.name = name
+ self.prefix = prefix
+ self.version = version
+ self.shouldinstall = (prefix == '+')
+
+ def __str__(self):
+ return self.prefix + self.name + self.version
+
+
+def split_name_version(name):
+ """splits of the package name and desired version
+
+ example formats:
+ - docker>=1.10
+ - apache=2.4
+
+ Allowed version specifiers: <, >, <=, >=, =
+ Allowed version format: [0-9.-]*
+
+ Also allows a prefix indicating remove "-", "~" or install "+"
+ """
+
+ prefix = ''
+ if name[0] in ['-', '~', '+']:
+ prefix = name[0]
+ name = name[1:]
+ if prefix == '~':
+ prefix = '-'
+
+ version_check = re.compile('^(.*?)((?:<|>|<=|>=|=)[0-9.-]*)?$')
+ try:
+ reres = version_check.match(name)
+ name, version = reres.groups()
+ if version is None:
+ version = ''
+ return prefix, name, version
+ except Exception:
+ return prefix, name, ''
+
+
+def get_want_state(names, remove=False):
+ packages = []
+ urls = []
+ for name in names:
+ if '://' in name or name.endswith('.rpm'):
+ urls.append(name)
+ else:
+ prefix, pname, version = split_name_version(name)
+ if prefix not in ['-', '+']:
+ if remove:
+ prefix = '-'
+ else:
+ prefix = '+'
+ packages.append(Package(pname, prefix, version))
+ return packages, urls
+
+
+def get_installed_state(m, packages):
+ "get installed state of packages"
+
+ cmd = get_cmd(m, 'search')
+ cmd.extend(['--match-exact', '--details', '--installed-only'])
+ cmd.extend([p.name for p in packages])
+ return parse_zypper_xml(m, cmd, fail_not_found=False)[0]
+
+
+def parse_zypper_xml(m, cmd, fail_not_found=True, packages=None):
+ rc, stdout, stderr = m.run_command(cmd, check_rc=False)
+
+ try:
+ dom = parseXML(stdout)
+ except xml.parsers.expat.ExpatError as exc:
+ m.fail_json(msg="Failed to parse zypper xml output: %s" % to_native(exc),
+ rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+ if rc == 104:
+ # exit code 104 is ZYPPER_EXIT_INF_CAP_NOT_FOUND (no packages found)
+ if fail_not_found:
+ errmsg = dom.getElementsByTagName('message')[-1].childNodes[0].data
+ m.fail_json(msg=errmsg, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+ else:
+ return {}, rc, stdout, stderr
+ elif rc in [0, 102, 103, 106]:
+ # zypper exit codes
+ # 0: success
+ # 106: signature verification failed
+ # 102: ZYPPER_EXIT_INF_REBOOT_NEEDED - Returned after a successful installation of a patch which requires reboot of computer.
+ # 103: zypper was upgraded, run same command again
+ if packages is None:
+ firstrun = True
+ packages = {}
+ else:
+ firstrun = False
+ solvable_list = dom.getElementsByTagName('solvable')
+ for solvable in solvable_list:
+ name = solvable.getAttribute('name')
+ packages[name] = {}
+ packages[name]['version'] = solvable.getAttribute('edition')
+ packages[name]['oldversion'] = solvable.getAttribute('edition-old')
+ status = solvable.getAttribute('status')
+ packages[name]['installed'] = status == "installed"
+ packages[name]['group'] = solvable.parentNode.nodeName
+ if rc == 103 and firstrun:
+ # if this was the first run and it failed with 103
+ # run zypper again with the same command to complete update
+ return parse_zypper_xml(m, cmd, fail_not_found=fail_not_found, packages=packages)
+
+ return packages, rc, stdout, stderr
+ m.fail_json(msg='Zypper run command failed with return code %s.' % rc, rc=rc, stdout=stdout, stderr=stderr, cmd=cmd)
+
+
+def get_cmd(m, subcommand):
+ "puts together the basic zypper command arguments with those passed to the module"
+ is_install = subcommand in ['install', 'update', 'patch', 'dist-upgrade']
+ is_refresh = subcommand == 'refresh'
+ cmd = [m.get_bin_path('zypper', required=True), '--quiet', '--non-interactive', '--xmlout']
+ if transactional_updates():
+ cmd = [m.get_bin_path('transactional-update', required=True), '--continue', '--drop-if-no-change', '--quiet', 'run'] + cmd
+ if m.params['extra_args_precommand']:
+ args_list = m.params['extra_args_precommand'].split()
+ cmd.extend(args_list)
+ # add global options before zypper command
+ if (is_install or is_refresh) and m.params['disable_gpg_check']:
+ cmd.append('--no-gpg-checks')
+
+ if subcommand == 'search':
+ cmd.append('--disable-repositories')
+
+ cmd.append(subcommand)
+ if subcommand not in ['patch', 'dist-upgrade'] and not is_refresh:
+ cmd.extend(['--type', m.params['type']])
+ if m.check_mode and subcommand != 'search':
+ cmd.append('--dry-run')
+ if is_install:
+ cmd.append('--auto-agree-with-licenses')
+ if m.params['disable_recommends']:
+ cmd.append('--no-recommends')
+ if m.params['force']:
+ cmd.append('--force')
+ if m.params['force_resolution']:
+ cmd.append('--force-resolution')
+ if m.params['oldpackage']:
+ cmd.append('--oldpackage')
+ if m.params['replacefiles']:
+ cmd.append('--replacefiles')
+ if subcommand == 'remove':
+ if m.params['clean_deps']:
+ cmd.append('--clean-deps')
+ if subcommand == 'dist-upgrade' and m.params['allow_vendor_change']:
+ cmd.append('--allow-vendor-change')
+ if m.params['extra_args']:
+ args_list = m.params['extra_args'].split(' ')
+ cmd.extend(args_list)
+
+ return cmd
+
+
+def set_diff(m, retvals, result):
+ # TODO: if there is only one package, set before/after to version numbers
+ packages = {'installed': [], 'removed': [], 'upgraded': []}
+ if result:
+ for p in result:
+ group = result[p]['group']
+ if group == 'to-upgrade':
+ versions = ' (' + result[p]['oldversion'] + ' => ' + result[p]['version'] + ')'
+ packages['upgraded'].append(p + versions)
+ elif group == 'to-install':
+ packages['installed'].append(p)
+ elif group == 'to-remove':
+ packages['removed'].append(p)
+
+ output = ''
+ for state in packages:
+ if packages[state]:
+ output += state + ': ' + ', '.join(packages[state]) + '\n'
+ if 'diff' not in retvals:
+ retvals['diff'] = {}
+ if 'prepared' not in retvals['diff']:
+ retvals['diff']['prepared'] = output
+ else:
+ retvals['diff']['prepared'] += '\n' + output
+
+
+def package_present(m, name, want_latest):
+ "install and update (if want_latest) the packages in name_install, while removing the packages in name_remove"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ packages, urls = get_want_state(name)
+
+ # add oldpackage flag when a version is given to allow downgrades
+ if any(p.version for p in packages):
+ m.params['oldpackage'] = True
+
+ if not want_latest:
+ # for state=present: filter out already installed packages
+ # if a version is given leave the package in to let zypper handle the version
+ # resolution
+ packageswithoutversion = [p for p in packages if not p.version]
+ prerun_state = {}
+ if packageswithoutversion:
+ prerun_state = get_installed_state(m, packageswithoutversion)
+ # generate lists of packages to install or remove
+ packages = [p for p in packages if p.shouldinstall != (p.name in prerun_state)]
+
+ if not packages and not urls:
+ # nothing to install/remove and nothing to update
+ return None, retvals
+
+ # zypper install also updates packages
+ cmd = get_cmd(m, 'install')
+ cmd.append('--')
+ cmd.extend(urls)
+ # pass packages to zypper
+ # allow for + or - prefixes in install/remove lists
+ # also add version specifier if given
+ # do this in one zypper run to allow for dependency-resolution
+ # for example "-exim postfix" runs without removing packages depending on mailserver
+ cmd.extend([str(p) for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return result, retvals
+
+
+def package_update_all(m):
+ "run update or patch on all available packages"
+
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ if m.params['type'] == 'patch':
+ cmdname = 'patch'
+ elif m.params['state'] == 'dist-upgrade':
+ cmdname = 'dist-upgrade'
+ else:
+ cmdname = 'update'
+
+ cmd = get_cmd(m, cmdname)
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def package_absent(m, name):
+ "remove the packages in name"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+ # Get package state
+ packages, urls = get_want_state(name, remove=True)
+ if any(p.prefix == '+' for p in packages):
+ m.fail_json(msg="Can not combine '+' prefix with state=remove/absent.")
+ if urls:
+ m.fail_json(msg="Can not remove via URL.")
+ if m.params['type'] == 'patch':
+ m.fail_json(msg="Can not remove patches.")
+ prerun_state = get_installed_state(m, packages)
+ packages = [p for p in packages if p.name in prerun_state]
+
+ if not packages:
+ return None, retvals
+
+ cmd = get_cmd(m, 'remove')
+ cmd.extend([p.name + p.version for p in packages])
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+ return result, retvals
+
+
+def repo_refresh(m):
+ "update the repositories"
+ retvals = {'rc': 0, 'stdout': '', 'stderr': ''}
+
+ cmd = get_cmd(m, 'refresh')
+
+ retvals['cmd'] = cmd
+ result, retvals['rc'], retvals['stdout'], retvals['stderr'] = parse_zypper_xml(m, cmd)
+
+ return retvals
+
+
+def get_fs_type_and_readonly_state(mount_point):
+ with open('/proc/mounts', 'r') as file:
+ for line in file.readlines():
+ fields = line.split()
+ path = fields[1]
+ if path == mount_point:
+ fs = fields[2]
+ opts = fields[3]
+ return fs, 'ro' in opts.split(',')
+ return None
+
+
+def transactional_updates():
+ return os.path.exists('/usr/sbin/transactional-update') and get_fs_type_and_readonly_state('/') == ('btrfs', True)
+
+
+# ===========================================
+# Main control flow
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='list', elements='str'),
+ state=dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed', 'dist-upgrade']),
+ type=dict(required=False, default='package', choices=['package', 'patch', 'pattern', 'product', 'srcpackage', 'application']),
+ extra_args_precommand=dict(required=False, default=None),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ disable_recommends=dict(required=False, default=True, type='bool'),
+ force=dict(required=False, default=False, type='bool'),
+ force_resolution=dict(required=False, default=False, type='bool'),
+ update_cache=dict(required=False, aliases=['refresh'], default=False, type='bool'),
+ oldpackage=dict(required=False, default=False, type='bool'),
+ extra_args=dict(required=False, default=None),
+ allow_vendor_change=dict(required=False, default=False, type='bool'),
+ replacefiles=dict(required=False, default=False, type='bool'),
+ clean_deps=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ name = module.params['name']
+ state = module.params['state']
+ update_cache = module.params['update_cache']
+
+ # remove empty strings from package list
+ name = list(filter(None, name))
+
+ # Refresh repositories
+ if update_cache and not module.check_mode:
+ retvals = repo_refresh(module)
+
+ if retvals['rc'] != 0:
+ module.fail_json(msg="Zypper refresh run failed.", **retvals)
+
+ # Perform requested action
+ if name == ['*'] and state in ['latest', 'dist-upgrade']:
+ packages_changed, retvals = package_update_all(module)
+ elif name != ['*'] and state == 'dist-upgrade':
+ module.fail_json(msg="Can not dist-upgrade specific packages.")
+ else:
+ if state in ['absent', 'removed']:
+ packages_changed, retvals = package_absent(module, name)
+ elif state in ['installed', 'present', 'latest']:
+ packages_changed, retvals = package_present(module, name, state == 'latest')
+
+ retvals['changed'] = retvals['rc'] in [0, 102] and bool(packages_changed)
+
+ if module._diff:
+ set_diff(module, retvals, packages_changed)
+
+ if retvals['rc'] not in [0, 102]:
+ module.fail_json(msg="Zypper run failed.", **retvals)
+
+ if not retvals['changed']:
+ del retvals['stdout']
+ del retvals['stderr']
+
+ module.exit_json(name=name, state=state, update_cache=update_cache, **retvals)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/general/plugins/modules/zypper_repository.py b/ansible_collections/community/general/plugins/modules/zypper_repository.py
new file mode 100644
index 000000000..cccd9c579
--- /dev/null
+++ b/ansible_collections/community/general/plugins/modules/zypper_repository.py
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
+# Copyright (c) 2014, Justin Lecher <jlec@gentoo.org>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: zypper_repository
+author: "Matthias Vogelgesang (@matze)"
+short_description: Add and remove Zypper repositories
+description:
+ - Add or remove Zypper repositories on SUSE and openSUSE
+extends_documentation_fragment:
+ - community.general.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - A name for the repository. Not required when adding repofiles.
+ type: str
+ repo:
+ description:
+ - URI of the repository or .repo file. Required when state=present.
+ type: str
+ state:
+ description:
+ - A source string state.
+ choices: [ "absent", "present" ]
+ default: "present"
+ type: str
+ description:
+ description:
+ - A description of the repository
+ type: str
+ disable_gpg_check:
+ description:
+ - Whether to disable GPG signature checking of
+ all packages. Has an effect only if state is
+ I(present).
+ - Needs zypper version >= 1.6.2.
+ type: bool
+ default: false
+ autorefresh:
+ description:
+ - Enable autorefresh of the repository.
+ type: bool
+ default: true
+ aliases: [ "refresh" ]
+ priority:
+ description:
+ - Set priority of repository. Packages will always be installed
+ from the repository with the smallest priority number.
+ - Needs zypper version >= 1.12.25.
+ type: int
+ overwrite_multiple:
+ description:
+ - Overwrite multiple repository entries, if repositories with both name and
+ URL already exist.
+ type: bool
+ default: false
+ auto_import_keys:
+ description:
+ - Automatically import the gpg signing key of the new or changed repository.
+ - Has an effect only if state is I(present). Has no effect on existing (unchanged) repositories or in combination with I(absent).
+ - Implies runrefresh.
+ - Only works with C(.repo) files if `name` is given explicitly.
+ type: bool
+ default: false
+ runrefresh:
+ description:
+ - Refresh the package list of the given repository.
+ - Can be used with repo=* to refresh all repositories.
+ type: bool
+ default: false
+ enabled:
+ description:
+ - Set repository to enabled (or disabled).
+ type: bool
+ default: true
+
+
+requirements:
+ - "zypper >= 1.0 # included in openSUSE >= 11.1 or SUSE Linux Enterprise Server/Desktop >= 11.0"
+ - python-xml
+'''
+
+EXAMPLES = '''
+- name: Add NVIDIA repository for graphics drivers
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: present
+
+- name: Remove NVIDIA repository
+ community.general.zypper_repository:
+ name: nvidia-repo
+ repo: 'ftp://download.nvidia.com/opensuse/12.2'
+ state: absent
+
+- name: Add python development repository
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo'
+
+- name: Refresh all repos
+ community.general.zypper_repository:
+ repo: '*'
+ runrefresh: true
+
+- name: Add a repo and add its gpg key
+ community.general.zypper_repository:
+ repo: 'http://download.opensuse.org/repositories/systemsmanagement/openSUSE_Leap_42.1/'
+ auto_import_keys: true
+
+- name: Force refresh of a repository
+ community.general.zypper_repository:
+ repo: 'http://my_internal_ci_repo/repo'
+ name: my_ci_repo
+ state: present
+ runrefresh: true
+'''
+
+import traceback
+
+XML_IMP_ERR = None
+try:
+ from xml.dom.minidom import parseString as parseXML
+ HAS_XML = True
+except ImportError:
+ XML_IMP_ERR = traceback.format_exc()
+ HAS_XML = False
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+
+from ansible.module_utils.urls import fetch_url
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.six.moves import configparser, StringIO
+from io import open
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck']
+
+
+def _get_cmd(module, *args):
+ """Combines the non-interactive zypper command with arguments/subcommands"""
+ cmd = [module.get_bin_path('zypper', required=True), '--quiet', '--non-interactive']
+ cmd.extend(args)
+
+ return cmd
+
+
+def _parse_repos(module):
+ """parses the output of zypper --xmlout repos and return a parse repo dictionary"""
+ cmd = _get_cmd(module, '--xmlout', 'repos')
+
+ if not HAS_XML:
+ module.fail_json(msg=missing_required_lib("python-xml"), exception=XML_IMP_ERR)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ if rc == 0:
+ repos = []
+ dom = parseXML(stdout)
+ repo_list = dom.getElementsByTagName('repo')
+ for repo in repo_list:
+ opts = {}
+ for o in REPO_OPTS:
+ opts[o] = repo.getAttribute(o)
+ opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data
+ # A repo can be uniquely identified by an alias + url
+ repos.append(opts)
+ return repos
+ # exit code 6 is ZYPPER_EXIT_NO_REPOS (no repositories defined)
+ elif rc == 6:
+ return []
+ else:
+ module.fail_json(msg='Failed to execute "%s"' % " ".join(cmd), rc=rc, stdout=stdout, stderr=stderr)
+
+
+def _repo_changes(module, realrepo, repocmp):
+ "Check whether the 2 given repos have different settings."
+ for k in repocmp:
+ if repocmp[k] and k not in realrepo:
+ return True
+
+ for k, v in realrepo.items():
+ if k in repocmp and repocmp[k]:
+ valold = str(repocmp[k] or "")
+ valnew = v or ""
+ if k == "url":
+ if '$releasever' in valold or '$releasever' in valnew:
+ cmd = ['rpm', '-q', '--qf', '%{version}', '-f', '/etc/os-release']
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ valnew = valnew.replace('$releasever', stdout)
+ valold = valold.replace('$releasever', stdout)
+ if '$basearch' in valold or '$basearch' in valnew:
+ cmd = ['rpm', '-q', '--qf', '%{arch}', '-f', '/etc/os-release']
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ valnew = valnew.replace('$basearch', stdout)
+ valold = valold.replace('$basearch', stdout)
+ valold, valnew = valold.rstrip("/"), valnew.rstrip("/")
+ if valold != valnew:
+ return True
+ return False
+
+
+def repo_exists(module, repodata, overwrite_multiple):
+ """Check whether the repository already exists.
+
+ returns (exists, mod, old_repos)
+ exists: whether a matching (name, URL) repo exists
+ mod: whether there are changes compared to the existing repo
+ old_repos: list of matching repos
+ """
+ existing_repos = _parse_repos(module)
+
+ # look for repos that have matching alias or url to the one searched
+ repos = []
+ for kw in ['alias', 'url']:
+ name = repodata[kw]
+ for oldr in existing_repos:
+ if repodata[kw] == oldr[kw] and oldr not in repos:
+ repos.append(oldr)
+
+ if len(repos) == 0:
+ # Repo does not exist yet
+ return (False, False, None)
+ elif len(repos) == 1:
+ # Found an existing repo, look for changes
+ has_changes = _repo_changes(module, repos[0], repodata)
+ return (True, has_changes, repos)
+ elif len(repos) >= 2:
+ if overwrite_multiple:
+ # Found two repos and want to overwrite_multiple
+ return (True, True, repos)
+ else:
+ errmsg = 'More than one repo matched "%s": "%s".' % (name, repos)
+ errmsg += ' Use overwrite_multiple to allow more than one repo to be overwritten'
+ module.fail_json(msg=errmsg)
+
+
+def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
+ "Adds the repo, removes old repos before, that would conflict."
+ repo = repodata['url']
+ cmd = _get_cmd(module, 'addrepo', '--check')
+ if repodata['name']:
+ cmd.extend(['--name', repodata['name']])
+
+ # priority on addrepo available since 1.12.25
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
+ if repodata['priority']:
+ if zypper_version >= LooseVersion('1.12.25'):
+ cmd.extend(['--priority', str(repodata['priority'])])
+ else:
+ warnings.append("Setting priority only available for zypper >= 1.12.25. Ignoring priority argument.")
+
+ if repodata['enabled'] == '0':
+ cmd.append('--disable')
+
+ # gpgcheck available since 1.6.2
+ # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
+ # the default changed in the past, so don't assume a default here and show warning for old zypper versions
+ if zypper_version >= LooseVersion('1.6.2'):
+ if repodata['gpgcheck'] == '1':
+ cmd.append('--gpgcheck')
+ else:
+ cmd.append('--no-gpgcheck')
+ else:
+ warnings.append("Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value.")
+
+ if repodata['autorefresh'] == '1':
+ cmd.append('--refresh')
+
+ cmd.append(repo)
+
+ if not repo.endswith('.repo'):
+ cmd.append(repodata['alias'])
+
+ if old_repos is not None:
+ for oldrepo in old_repos:
+ remove_repo(module, oldrepo['url'])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
+ return rc, stdout, stderr
+
+
+def remove_repo(module, repo):
+ "Removes the repo."
+ cmd = _get_cmd(module, 'removerepo', repo)
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def get_zypper_version(module):
+ rc, stdout, stderr = module.run_command([module.get_bin_path('zypper', required=True), '--version'])
+ if rc != 0 or not stdout.startswith('zypper '):
+ return LooseVersion('1.0')
+ return LooseVersion(stdout.split()[1])
+
+
+def runrefreshrepo(module, auto_import_keys=False, shortname=None):
+ "Forces zypper to refresh repo metadata."
+ if auto_import_keys:
+ cmd = _get_cmd(module, '--gpg-auto-import-keys', 'refresh', '--force')
+ else:
+ cmd = _get_cmd(module, 'refresh', '--force')
+ if shortname is not None:
+ cmd.extend(['-r', shortname])
+
+ rc, stdout, stderr = module.run_command(cmd, check_rc=True)
+ return rc, stdout, stderr
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=False),
+ repo=dict(required=False),
+ state=dict(choices=['present', 'absent'], default='present'),
+ runrefresh=dict(required=False, default=False, type='bool'),
+ description=dict(required=False),
+ disable_gpg_check=dict(required=False, default=False, type='bool'),
+ autorefresh=dict(required=False, default=True, type='bool', aliases=['refresh']),
+ priority=dict(required=False, type='int'),
+ enabled=dict(required=False, default=True, type='bool'),
+ overwrite_multiple=dict(required=False, default=False, type='bool'),
+ auto_import_keys=dict(required=False, default=False, type='bool'),
+ ),
+ supports_check_mode=False,
+ required_one_of=[['state', 'runrefresh']],
+ )
+
+ repo = module.params['repo']
+ alias = module.params['name']
+ state = module.params['state']
+ overwrite_multiple = module.params['overwrite_multiple']
+ auto_import_keys = module.params['auto_import_keys']
+ runrefresh = module.params['runrefresh']
+
+ zypper_version = get_zypper_version(module)
+ warnings = [] # collect warning messages for final output
+
+ repodata = {
+ 'url': repo,
+ 'alias': alias,
+ 'name': module.params['description'],
+ 'priority': module.params['priority'],
+ }
+ # rewrite bools in the language that zypper lr -x provides for easier comparison
+ if module.params['enabled']:
+ repodata['enabled'] = '1'
+ else:
+ repodata['enabled'] = '0'
+ if module.params['disable_gpg_check']:
+ repodata['gpgcheck'] = '0'
+ else:
+ repodata['gpgcheck'] = '1'
+ if module.params['autorefresh']:
+ repodata['autorefresh'] = '1'
+ else:
+ repodata['autorefresh'] = '0'
+
+ def exit_unchanged():
+ module.exit_json(changed=False, repodata=repodata, state=state)
+
+ # Check run-time module parameters
+ if repo == '*' or alias == '*':
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys)
+ module.exit_json(changed=False, runrefresh=True)
+ else:
+ module.fail_json(msg='repo=* can only be used with the runrefresh option.')
+
+ if state == 'present' and not repo:
+ module.fail_json(msg='Module option state=present requires repo')
+ if state == 'absent' and not repo and not alias:
+ module.fail_json(msg='Alias or repo parameter required when state=absent')
+
+ if repo and repo.endswith('.repo'):
+ if alias:
+ module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding .repo files')
+ else:
+ if not alias and state == "present":
+ module.fail_json(msg='Name required when adding non-repo files.')
+
+ # Download / Open and parse .repo file to ensure idempotency
+ if repo and repo.endswith('.repo'):
+ if repo.startswith(('http://', 'https://')):
+ response, info = fetch_url(module=module, url=repo, force=True)
+ if not response or info['status'] != 200:
+ module.fail_json(msg='Error downloading .repo file from provided URL')
+ repofile_text = to_text(response.read(), errors='surrogate_or_strict')
+ else:
+ try:
+ with open(repo, encoding='utf-8') as file:
+ repofile_text = file.read()
+ except IOError:
+ module.fail_json(msg='Error opening .repo file from provided path')
+
+ repofile = configparser.ConfigParser()
+ try:
+ repofile.readfp(StringIO(repofile_text))
+ except configparser.Error:
+ module.fail_json(msg='Invalid format, .repo file could not be parsed')
+
+ # No support for .repo file with zero or more than one repository
+ if len(repofile.sections()) != 1:
+ err = "Invalid format, .repo file contains %s repositories, expected 1" % len(repofile.sections())
+ module.fail_json(msg=err)
+
+ section = repofile.sections()[0]
+ repofile_items = dict(repofile.items(section))
+ # Only proceed if at least baseurl is available
+ if 'baseurl' not in repofile_items:
+ module.fail_json(msg='No baseurl found in .repo file')
+
+ # Set alias (name) and url based on values from .repo file
+ alias = section
+ repodata['alias'] = section
+ repodata['url'] = repofile_items['baseurl']
+
+ # If gpgkey is part of the .repo file, auto import key
+ if 'gpgkey' in repofile_items:
+ auto_import_keys = True
+
+ # Map additional values, if available
+ if 'name' in repofile_items:
+ repodata['name'] = repofile_items['name']
+ if 'enabled' in repofile_items:
+ repodata['enabled'] = repofile_items['enabled']
+ if 'autorefresh' in repofile_items:
+ repodata['autorefresh'] = repofile_items['autorefresh']
+ if 'gpgcheck' in repofile_items:
+ repodata['gpgcheck'] = repofile_items['gpgcheck']
+
+ exists, mod, old_repos = repo_exists(module, repodata, overwrite_multiple)
+
+ if alias:
+ shortname = alias
+ else:
+ shortname = repo
+
+ if state == 'present':
+ if exists and not mod:
+ if runrefresh:
+ runrefreshrepo(module, auto_import_keys, shortname)
+ exit_unchanged()
+ rc, stdout, stderr = addmodify_repo(module, repodata, old_repos, zypper_version, warnings)
+ if rc == 0 and (runrefresh or auto_import_keys):
+ runrefreshrepo(module, auto_import_keys, shortname)
+ elif state == 'absent':
+ if not exists:
+ exit_unchanged()
+ rc, stdout, stderr = remove_repo(module, shortname)
+
+ if rc == 0:
+ module.exit_json(changed=True, repodata=repodata, state=state, warnings=warnings)
+ else:
+ module.fail_json(msg="Zypper failed with rc %s" % rc, rc=rc, stdout=stdout, stderr=stderr, repodata=repodata, state=state, warnings=warnings)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/plugins/test/a_module.py b/ansible_collections/community/general/plugins/test/a_module.py
new file mode 100644
index 000000000..0d6cecac6
--- /dev/null
+++ b/ansible_collections/community/general/plugins/test/a_module.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: a_module
+ short_description: Test whether a given string refers to an existing module or action plugin
+ version_added: 4.0.0
+ author: Felix Fontein (@felixfontein)
+ description:
+ - Test whether a given string refers to an existing module or action plugin.
+ - This can be useful in roles, which can use this to ensure that required modules are present ahead of time.
+ options:
+ _input:
+ description: A string denoting a fully qualified collection name (FQCN) of a module or action plugin.
+ type: string
+ required: true
+'''
+
+EXAMPLES = '''
+- name: Make sure that community.aws.route53 is available
+ ansible.builtin.assert:
+ that:
+ - >
+ 'community.aws.route53' is community.general.a_module
+
+- name: Make sure that community.general.does_not_exist is not a module or action plugin
+ ansible.builtin.assert:
+ that:
+ - "'community.general.does_not_exist' is not community.general.a_module"
+'''
+
+RETURN = '''
+ _value:
+ description: Whether the module or action plugin denoted by the input exists.
+ type: boolean
+'''
+
+from ansible.plugins.loader import action_loader, module_loader
+
+try:
+ from ansible.errors import AnsiblePluginRemovedError
+except ImportError:
+ AnsiblePluginRemovedError = Exception
+
+
+def a_module(term):
+ """
+ Example:
+ - 'community.general.ufw' is community.general.a_module
+ - 'community.general.does_not_exist' is not community.general.a_module
+ """
+ try:
+ for loader in (action_loader, module_loader):
+ data = loader.find_plugin(term)
+ if data is not None:
+ return True
+ return False
+ except AnsiblePluginRemovedError:
+ return False
+
+
+class TestModule(object):
+ ''' Ansible jinja2 tests '''
+
+ def tests(self):
+ return {
+ 'a_module': a_module,
+ }
diff --git a/ansible_collections/community/general/tests/.gitignore b/ansible_collections/community/general/tests/.gitignore
new file mode 100644
index 000000000..6edf5dc10
--- /dev/null
+++ b/ansible_collections/community/general/tests/.gitignore
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+output/
diff --git a/ansible_collections/community/general/tests/config.yml b/ansible_collections/community/general/tests/config.yml
new file mode 100644
index 000000000..38590f2e4
--- /dev/null
+++ b/ansible_collections/community/general/tests/config.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# See template for more information:
+# https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml
+modules:
+ python_requires: default
diff --git a/ansible_collections/community/general/tests/integration/requirements.yml b/ansible_collections/community/general/tests/integration/requirements.yml
new file mode 100644
index 000000000..b772fc82d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/requirements.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+collections:
+- ansible.posix
+- community.crypto
+- community.docker
diff --git a/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases b/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases
new file mode 100644
index 000000000..8d841c56b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/aix_devices/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# No AIX LPAR available
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml
new file mode 100644
index 000000000..284f46c33
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/aix_devices/tasks/main.yml
@@ -0,0 +1,81 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Scan new devices.
+ aix_devices:
+ device: all
+ state: present
+
+- name: Scan new virtual devices (vio0).
+ aix_devices:
+ device: vio0
+ state: present
+
+- name: Removing IP alias to en0
+ aix_devices:
+ device: en0
+ attributes:
+ delalias4: 10.0.0.100,255.255.255.0
+
+- name: Removes ent2.
+ aix_devices:
+ device: ent2
+ state: absent
+
+- name: Put device en2 in Defined
+ aix_devices:
+ device: en2
+ state: defined
+
+- name: Removes ent4 (inexistent).
+ aix_devices:
+ device: ent4
+ state: absent
+
+- name: Put device en4 in Defined (inexistent)
+ aix_devices:
+ device: en4
+ state: defined
+
+- name: Put vscsi1 and children devices in Defined state.
+ aix_devices:
+ device: vscsi1
+ recursive: true
+ state: defined
+
+- name: Removes vscsi1 and children devices.
+ aix_devices:
+ device: vscsi1
+ recursive: true
+ state: absent
+
+- name: Changes en1 mtu to 9000 and disables arp.
+ aix_devices:
+ device: en1
+ attributes:
+ mtu: 900
+ arp: 'off'
+ state: present
+
+- name: Configure IP, netmask and set en1 up.
+ aix_devices:
+ device: en1
+ attributes:
+ netaddr: 192.168.0.100
+ netmask: 255.255.255.0
+ state: up
+ state: present
+
+- name: Adding IP alias to en0
+ aix_devices:
+ device: en0
+ attributes:
+ alias4: 10.0.0.100,255.255.255.0
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases b/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/aix_filesystem/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml
new file mode 100644
index 000000000..25146062d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/aix_filesystem/tasks/main.yml
@@ -0,0 +1,130 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Umounting /testfs
+ aix_filesystem:
+ filesystem: /testfs
+ state: unmounted
+
+- name: Removing /testfs
+ aix_filesystem:
+ filesystem: /testfs
+ state: absent
+
+- name: Creating a new file system
+ aix_filesystem:
+ filesystem: /newfs
+ size: 1G
+ state: present
+ vg: datavg
+
+# It requires a host (nfshost) exporting the NFS
+- name: Creating NFS filesystem from nfshost (Linux NFS server)
+ aix_filesystem:
+ device: /home/ftp
+ nfs_server: nfshost
+ filesystem: /nfs/ftp
+ state: present
+
+# It requires a volume group named datavg (next three actions)
+- name: Creating a logical volume testlv (aix_lvol module)
+ aix_lvol:
+ vg: datavg
+ lv: testlv
+ size: 2G
+ state: present
+
+- name: Create filesystem in a previously defined logical volume
+ aix_filesystem:
+ device: testlv
+ filesystem: /testfs
+ state: present
+
+- name: Create an already existing filesystem using existing logical volume.
+ aix_filesystem:
+ vg: datavg
+ device: mksysblv
+ filesystem: /mksysb
+ state: present
+
+- name: Create a filesystem in a non-existing VG
+ aix_filesystem:
+ vg: nonexistvg
+ filesystem: /newlv
+ state: present
+
+- name: Resizing /mksysb to 1G
+ aix_filesystem:
+ filesystem: /mksysb
+ size: 1G
+ state: present
+
+- name: Resizing /mksysb to +512M
+ aix_filesystem:
+ filesystem: /mksysb
+ size: +512M
+ state: present
+
+- name: Resizing /mksysb to 11G
+ aix_filesystem:
+ filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to 11G (already done)
+ aix_filesystem:
+ filesystem: /mksysb
+ size: 11G
+ state: present
+
+- name: Resizing /mksysb to -2G
+ aix_filesystem:
+ filesystem: /mksysb
+ size: -2G
+ state: present
+
+- name: Resizing /mksysb to 100G (no enought space)
+ aix_filesystem:
+ filesystem: /mksysb
+ size: +100G
+ state: present
+
+- name: Unmount filesystem /home/ftp
+ aix_filesystem:
+ filesystem: /home/ftp
+ state: unmounted
+
+- name: Remove NFS filesystem /home/ftp
+ aix_filesystem:
+ filesystem: /home/ftp
+ rm_mount_point: true
+ state: absent
+
+- name: Mount filesystem /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ state: mounted
+
+- name: Remove mounted /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ rm_mount_point: true
+ state: absent
+
+- name: Umount /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ state: unmounted
+
+- name: Remove /newfs
+ aix_filesystem:
+ filesystem: /newfs
+ rm_mount_point: true
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/alerta_customer/aliases b/ansible_collections/community/general/tests/integration/targets/alerta_customer/aliases
new file mode 100644
index 000000000..d163e8d9c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alerta_customer/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/alerta_customer/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/alerta_customer/defaults/main.yml
new file mode 100644
index 000000000..3d4877b41
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alerta_customer/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+alerta_url: http://localhost:8080/
+alerta_user: admin@example.com
+alerta_password: password
+alerta_key: demo-key
diff --git a/ansible_collections/community/general/tests/integration/targets/alerta_customer/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/alerta_customer/tasks/main.yml
new file mode 100644
index 000000000..b91c24b53
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alerta_customer/tasks/main.yml
@@ -0,0 +1,156 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create customer (check mode)
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ check_mode: true
+ register: result
+
+- name: Check result (check mode)
+ assert:
+ that:
+ - result is changed
+
+- name: Create customer
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ register: result
+
+- name: Check customer creation
+ assert:
+ that:
+ - result is changed
+
+- name: Test customer creation idempotency
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ register: result
+
+- name: Check customer creation idempotency
+ assert:
+ that:
+ - result is not changed
+
+- name: Delete customer (check mode)
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ state: absent
+ check_mode: true
+ register: result
+
+- name: Check customer deletion (check mode)
+ assert:
+ that:
+ - result is changed
+
+- name: Delete customer
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ state: absent
+ register: result
+
+- name: Check customer deletion
+ assert:
+ that:
+ - result is changed
+
+- name: Test customer deletion idempotency
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ state: absent
+ register: result
+
+- name: Check customer deletion idempotency
+ assert:
+ that:
+ - result is not changed
+
+- name: Delete non-existing customer (check mode)
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_username: "{{ alerta_user }}"
+ api_password: "{{ alerta_password }}"
+ customer: customer1
+ match: admin@admin.admin
+ state: absent
+ check_mode: true
+ register: result
+
+- name: Check non-existing customer deletion (check mode)
+ assert:
+ that:
+ - result is not changed
+
+- name: Create customer with api key
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_key: "{{ alerta_key }}"
+ customer: customer1
+ match: admin@admin.admin
+ register: result
+
+- name: Check customer creation with api key
+ assert:
+ that:
+ - result is changed
+
+- name: Delete customer with api key
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_key: "{{ alerta_key }}"
+ customer: customer1
+ match: admin@admin.admin
+ state: absent
+ register: result
+
+- name: Check customer deletion with api key
+ assert:
+ that:
+ - result is changed
+
+- name: Use wrong api key
+ alerta_customer:
+ alerta_url: "{{ alerta_url }}"
+ api_key: wrong_key
+ customer: customer1
+ match: admin@admin.admin
+ register: result
+ ignore_errors: true
+
+- name: Check customer creation with api key
+ assert:
+ that:
+ - result is not changed
+ - result is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/aliases b/ansible_collections/community/general/tests/integration/targets/alternatives/aliases
new file mode 100644
index 000000000..f360ac626
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+needs/root
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml
new file mode 100644
index 000000000..81d6a7b0d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/main.yml
@@ -0,0 +1,93 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2017 Pierre-Louis Bonicoli <pierre-louis.bonicoli@libregerbil.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'setup: create a dummy alternative'
+ block:
+ - import_tasks: setup.yml
+
+ ##############
+ # Test parameters:
+ # link parameter present / absent ('with_link' variable)
+ # with / without alternatives defined in alternatives file ('with_alternatives' variable)
+ # auto / manual ('mode' variable)
+
+ - include_tasks: tests.yml
+ with_nested:
+ - [ true, false ] # with_link
+ - [ true, false ] # with_alternatives
+ - [ 'auto', 'manual' ] # mode
+ loop_control:
+ loop_var: test_conf
+
+ ##########
+ # Priority
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations again
+ - include_tasks: tests_set_priority.yml
+ with_sequence: start=3 end=4
+ vars:
+ with_alternatives: true
+ mode: auto
+
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations again
+ - include_tasks: tests_set_priority.yml
+ with_sequence: start=3 end=4
+ vars:
+ with_alternatives: false
+ mode: auto
+
+ # Test that path is checked: alternatives must fail when path is nonexistent
+ - import_tasks: path_is_checked.yml
+
+ # Test that subcommands commands work
+ - import_tasks: subcommands.yml
+
+ # Test operation of the 'state' parameter
+ - block:
+ - include_tasks: remove_links.yml
+ - include_tasks: tests_state.yml
+
+ # Cleanup
+ always:
+ - include_tasks: remove_links.yml
+
+ - file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ alternatives_dir }}/dummy'
+ - '{{ alternatives_dir }}/dummymain'
+ - '{{ alternatives_dir }}/dummysubcmd'
+
+ - file:
+ path: '/usr/bin/dummy{{ item }}'
+ state: absent
+ with_sequence: start=1 end=4
+
+ # *Disable tests on Fedora 24*
+ # Shippable Fedora 24 image provides chkconfig-1.7-2.fc24.x86_64 but not the
+ # latest available version (chkconfig-1.8-1.fc24.x86_64). update-alternatives
+ # in chkconfig-1.7-2 fails when /etc/alternatives/dummy link is missing,
+ # error is: 'failed to read link /usr/bin/dummy: No such file or directory'.
+ # Moreover Fedora 24 is no longer maintained.
+ #
+ # *Disable tests on Arch Linux*
+ # TODO: figure out whether there is an alternatives tool for Arch Linux
+ #
+ # *Disable tests on Alpine*
+ # TODO: figure out whether there is an alternatives tool for Alpine
+ when:
+ - ansible_distribution != 'Fedora' or ansible_distribution_major_version|int > 24
+ - ansible_distribution != 'Archlinux'
+ - ansible_distribution != 'Alpine'
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml
new file mode 100644
index 000000000..0bc435889
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/path_is_checked.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Try with nonexistent path
+ alternatives:
+ name: dummy
+ path: '/non/existent/path/there'
+ link: '/usr/bin/dummy'
+ ignore_errors: true
+ register: alternative
+
+- name: Check previous task failed
+ assert:
+ that:
+ - 'alternative is failed'
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml
new file mode 100644
index 000000000..de25b02cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/remove_links.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: remove links
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - "{{ alternatives_dir }}/dummy"
+ - /etc/alternatives/dummy
+ - /usr/bin/dummy
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml
new file mode 100644
index 000000000..ab2c39852
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_os_family }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - default.yml
+ paths: ../vars
+- template:
+ src: dummy_command
+ dest: /usr/bin/dummy{{ item }}
+ owner: root
+ group: root
+ mode: '0755'
+ with_sequence: start=1 end=4
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml
new file mode 100644
index 000000000..77279c67f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/setup_test.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- template:
+ src: dummy_alternative
+ dest: '{{ alternatives_dir }}/dummy'
+ owner: root
+ group: root
+ mode: '0644'
+ when: with_alternatives or ansible_os_family != 'RedHat'
+- file:
+ path: '{{ alternatives_dir }}/dummy'
+ state: absent
+ when: not with_alternatives and ansible_os_family == 'RedHat'
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/subcommands.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/subcommands.yml
new file mode 100644
index 000000000..678bbe68f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/subcommands.yml
@@ -0,0 +1,222 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Try with subcommands
+ alternatives:
+ name: dummymain
+ path: '/usr/bin/dummy1'
+ link: '/usr/bin/dummymain'
+ subcommands:
+ - name: dummysubcmd
+ path: '/usr/bin/dummy2'
+ link: '/usr/bin/dummysubcmd'
+ register: alternative
+
+- name: Check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+
+- name: Execute the current dummymain command
+ command: dummymain
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy1"
+
+- name: Execute the current dummysubcmd command
+ command: dummysubcmd
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy2"
+
+- name: Get dummymain alternatives output
+ command:
+ cmd: '{{ alternatives_command }} --display dummymain'
+ register: result
+
+- name: Print result
+ debug:
+ var: result.stdout_lines
+
+- name: Subcommands are not removed if not specified
+ alternatives:
+ name: dummymain
+ path: '/usr/bin/dummy1'
+ link: '/usr/bin/dummymain'
+ register: alternative
+
+- name: Check expected command was executed
+ assert:
+ that:
+ - 'alternative is not changed'
+
+- name: Execute the current dummysubcmd command
+ command: dummysubcmd
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy2"
+
+- name: Subcommands are removed if set to an empty list
+ alternatives:
+ name: dummymain
+ path: '/usr/bin/dummy1'
+ link: '/usr/bin/dummymain'
+ subcommands: []
+ register: alternative
+
+- name: Check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+
+- name: Execute the current dummysubcmd command
+ command: dummysubcmd
+ register: cmd
+ ignore_errors: true
+
+- name: Ensure that the subcommand is gone
+ assert:
+ that:
+ - cmd.rc == 2
+ - '"No such file" in cmd.msg'
+
+- name: Get dummymain alternatives output
+ command:
+ cmd: '{{ alternatives_command }} --display dummymain'
+ register: result
+
+- name: Print result
+ debug:
+ var: result.stdout_lines
+
+- name: Install other alternative with subcommands
+ alternatives:
+ name: dummymain
+ path: '/usr/bin/dummy3'
+ link: '/usr/bin/dummymain'
+ subcommands:
+ - name: dummysubcmd
+ path: '/usr/bin/dummy4'
+ link: '/usr/bin/dummysubcmd'
+ register: alternative
+
+- name: Check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+
+- name: Execute the current dummymain command
+ command: dummymain
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy3"
+
+- name: Execute the current dummysubcmd command
+ command: dummysubcmd
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy4"
+
+- name: Get dummymain alternatives output
+ command:
+ cmd: '{{ alternatives_command }} --display dummymain'
+ register: result
+
+- name: Print result
+ debug:
+ var: result.stdout_lines
+
+- name: Switch to first alternative
+ alternatives:
+ name: dummymain
+ path: '/usr/bin/dummy1'
+ register: alternative
+
+- name: Check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+
+- name: Execute the current dummymain command
+ command: dummymain
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy1"
+
+- name: Execute the current dummysubcmd command
+ command: dummysubcmd
+ register: cmd
+ ignore_errors: true
+
+- name: Ensure that the subcommand is gone
+ assert:
+ that:
+ - cmd.rc == 2
+ - '"No such file" in cmd.msg'
+
+- name: Get dummymain alternatives output
+ command:
+ cmd: '{{ alternatives_command }} --display dummymain'
+ register: result
+
+- name: Print result
+ debug:
+ var: result.stdout_lines
+
+- name: Switch to second alternative
+ alternatives:
+ name: dummymain
+ path: '/usr/bin/dummy3'
+ register: alternative
+
+- name: Check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+
+- name: Execute the current dummymain command
+ command: dummymain
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy3"
+
+- name: Execute the current dummysubcmd command
+ command: dummysubcmd
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy4"
+
+- name: Get dummymain alternatives output
+ command:
+ cmd: '{{ alternatives_command }} --display dummymain'
+ register: result
+
+- name: Print result
+ debug:
+ var: result.stdout_lines
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml
new file mode 100644
index 000000000..ca59a4b55
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/test.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug:
+ msg: ' with_alternatives: {{ with_alternatives }}, mode: {{ mode }}'
+
+- block:
+ - name: set alternative (using link parameter)
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: '/usr/bin/dummy'
+ register: alternative
+
+ - name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is successful'
+ - 'alternative is changed'
+ when: with_link
+
+- block:
+ - name: set alternative (without link parameter)
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ register: alternative
+
+ - name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is successful'
+ - 'alternative is changed'
+ when: not with_link
+
+- name: execute dummy command
+ shell: dummy
+ register: cmd
+
+- name: check expected command was executed
+ assert:
+ that:
+ - 'cmd.stdout == "dummy" ~ item'
+
+- name: 'check mode (manual: alternatives file existed, it has been updated)'
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"'
+ when: ansible_os_family != 'RedHat' or with_alternatives or item != 1
+
+- name: 'check mode (auto: alternatives file didn''t exist, it has been created)'
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"'
+ when: ansible_os_family == 'RedHat' and not with_alternatives and item == 1
+
+- name: check that alternative has been updated
+ command: "grep -Pzq '/bin/dummy{{ item }}\\n' '{{ alternatives_dir }}/dummy'"
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml
new file mode 100644
index 000000000..75e30cabe
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - include_tasks: remove_links.yml
+ - include_tasks: setup_test.yml
+ # at least two iterations:
+ # - first will use 'link currently absent',
+ # - second will receive 'link currently points to'
+ - include_tasks: test.yml
+ with_sequence: start=1 end=2
+ vars:
+ with_link: '{{ test_conf[0] }}'
+ with_alternatives: '{{ test_conf[1] }}'
+ mode: '{{ test_conf[2] }}'
+ # update-alternatives included in Fedora 26 (1.10) & Red Hat 7.4 (1.8) doesn't provide
+ # '--query' switch, 'link' is mandatory for these distributions.
+ when: ansible_os_family != 'RedHat' or test_conf[0]
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml
new file mode 100644
index 000000000..46cf48e59
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_set_priority.yml
@@ -0,0 +1,54 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: update dummy alternative
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: /usr/bin/dummy
+ priority: '{{ 60 + item|int }}'
+ register: alternative
+
+- name: execute dummy command
+ shell: dummy
+ register: cmd
+
+- name: check if link group is in manual mode
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"'
+
+- name: check expected command was executed
+ assert:
+ that:
+ - 'alternative is changed'
+ - 'cmd.stdout == "dummy{{ item }}"'
+
+- name: check that alternative has been updated
+ command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 60 + item|int }}' '{{ alternatives_dir }}/dummy'"
+
+- name: update dummy priority
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: /usr/bin/dummy
+ priority: '{{ 70 + item|int }}'
+ register: alternative
+
+- name: check that alternative priority has been updated
+ command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 70 + item|int }}' '{{ alternatives_dir }}/dummy'"
+
+- name: no change without priority
+ alternatives:
+ name: dummy
+ path: '/usr/bin/dummy{{ item }}'
+ link: /usr/bin/dummy
+ register: alternative
+
+- name: check no change was triggered without priority
+ assert:
+ that:
+ - 'alternative is not changed'
+
+- name: check that alternative priority has not been changed
+ command: "grep -Pzq '/bin/dummy{{ item }}\\n{{ 70 + item|int }}' '{{ alternatives_dir }}/dummy'"
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_state.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_state.yml
new file mode 100644
index 000000000..92c8078c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/tasks/tests_state.yml
@@ -0,0 +1,120 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Add a few dummy alternatives with state = present and make sure that the
+# group is in 'auto' mode and the highest priority alternative is selected.
+- name: Add some dummy alternatives with state = present
+ alternatives:
+ name: dummy
+ path: "/usr/bin/dummy{{ item.n }}"
+ link: /usr/bin/dummy
+ priority: "{{ item.priority }}"
+ state: present
+ loop:
+ - { n: 1, priority: 50 }
+ - { n: 2, priority: 70 }
+ - { n: 3, priority: 25 }
+
+- name: Ensure that the link group is in auto mode
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"'
+
+# Execute current selected 'dummy' and ensure it's the alternative we expect
+- name: Execute the current dummy command
+ shell: dummy
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy2"
+
+# Add another alternative with state = 'selected' and make sure that
+# this change results in the group being set to manual mode, and the
+# new alternative being the selected one.
+- name: Add another dummy alternative with state = selected
+ alternatives:
+ name: dummy
+ path: /usr/bin/dummy4
+ link: /usr/bin/dummy
+ priority: 10
+ state: selected
+
+- name: Ensure that the link group is in manual mode
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"'
+
+- name: Execute the current dummy command
+ shell: dummy
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy4"
+
+# Set the currently selected alternative to state = 'present' (was previously
+# selected), and ensure that this results in the group not being set to 'auto'
+# mode, and the alternative is still selected.
+- name: Set current selected dummy to state = present
+ alternatives:
+ name: dummy
+ path: /usr/bin/dummy4
+ link: /usr/bin/dummy
+ state: present
+
+- name: Ensure that the link group is in auto mode
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^manual$"'
+
+- name: Execute the current dummy command
+ shell: dummy
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy4"
+
+# Set the currently selected alternative to state = 'auto' (was previously
+# selected), and ensure that this results in the group being set to 'auto'
+# mode, and the highest priority alternative is selected.
+- name: Set current selected dummy to state = present
+ alternatives:
+ name: dummy
+ path: /usr/bin/dummy4
+ link: /usr/bin/dummy
+ state: auto
+
+- name: Ensure that the link group is in auto mode
+ shell: 'head -n1 {{ alternatives_dir }}/dummy | grep "^auto$"'
+
+- name: Execute the current dummy command
+ shell: dummy
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy2"
+
+# Remove an alternative with state = 'absent' and make sure that
+# this change results in the alternative being removed.
+- name: Remove best dummy alternative with state = absent
+ alternatives:
+ name: dummy
+ path: /usr/bin/dummy2
+ state: absent
+
+- name: Ensure that the link group is in auto mode
+ shell: 'grep "/usr/bin/dummy2" {{ alternatives_dir }}/dummy'
+ register: cmd
+ failed_when: cmd.rc == 0
+
+- name: Execute the current dummy command
+ shell: dummy
+ register: cmd
+
+- name: Ensure that the expected command was executed
+ assert:
+ that:
+ - cmd.stdout == "dummy1"
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative b/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative
new file mode 100644
index 000000000..9b7136d56
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_alternative
@@ -0,0 +1,17 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+{{ mode }}
+/usr/bin/dummy
+
+{% if with_alternatives %}
+/usr/bin/dummy1
+40
+/usr/bin/dummy2
+30
+
+{% else %}
+
+{% endif %}
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command b/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command
new file mode 100644
index 000000000..afd80e673
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/templates/dummy_command
@@ -0,0 +1,6 @@
+#!/bin/sh
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+echo dummy{{ item }}
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml
new file mode 100644
index 000000000..e7f87c59d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Debian.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+alternatives_dir: /var/lib/dpkg/alternatives/
+alternatives_command: update-alternatives
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml
new file mode 100644
index 000000000..0d5a9cfec
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/vars/Suse-42.3.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+alternatives_dir: /var/lib/rpm/alternatives/
+alternatives_command: update-alternatives
diff --git a/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml
new file mode 100644
index 000000000..68e1feafa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/alternatives/vars/default.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+alternatives_dir: /var/lib/alternatives/
+alternatives_command: update-alternatives
diff --git a/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/aliases b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/aliases
new file mode 100644
index 000000000..13655b194
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/python2.6
+context/controller # While this is not really true, this module mainly is run on the controller, *and* needs access to the ansible-galaxy CLI tool
diff --git a/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/files/test.yml b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/files/test.yml
new file mode 100644
index 000000000..877b5fca4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/files/test.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+roles:
+ # Install a role from Ansible Galaxy.
+ - name: geerlingguy.java
+ version: 1.9.6
+
+collections:
+ # Install a collection from Ansible Galaxy.
+ - name: geerlingguy.php_roles
+ version: 0.9.3
+ source: https://galaxy.ansible.com
diff --git a/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/tasks/main.yml
new file mode 100644
index 000000000..1ecd9980d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ansible_galaxy_install/tasks/main.yml
@@ -0,0 +1,88 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+###################################################
+- name: Install collection netbox.netbox
+ community.general.ansible_galaxy_install:
+ type: collection
+ name: netbox.netbox
+ register: install_c0
+
+- name: Assert collection netbox.netbox was installed
+ assert:
+ that:
+ - install_c0 is changed
+ - '"netbox.netbox" in install_c0.new_collections'
+
+- name: Install collection netbox.netbox (again)
+ community.general.ansible_galaxy_install:
+ type: collection
+ name: netbox.netbox
+ register: install_c1
+
+- name: Assert collection was not installed
+ assert:
+ that:
+ - install_c1 is not changed
+
+###################################################
+- name: Install role ansistrano.deploy
+ community.general.ansible_galaxy_install:
+ type: role
+ name: ansistrano.deploy
+ register: install_r0
+
+- name: Assert collection ansistrano.deploy was installed
+ assert:
+ that:
+ - install_r0 is changed
+ - '"ansistrano.deploy" in install_r0.new_roles'
+
+- name: Install role ansistrano.deploy (again)
+ community.general.ansible_galaxy_install:
+ type: role
+ name: ansistrano.deploy
+ register: install_r1
+
+- name: Assert role was not installed
+ assert:
+ that:
+ - install_r1 is not changed
+
+###################################################
+- name: Set requirements file path
+ set_fact:
+ reqs_file: '{{ remote_tmp_dir }}/reqs.yaml'
+
+- name: Copy requirements file
+ copy:
+ src: 'files/test.yml'
+ dest: '{{ reqs_file }}'
+
+- name: Install from requirements file
+ community.general.ansible_galaxy_install:
+ type: both
+ requirements_file: "{{ reqs_file }}"
+ register: install_rq0
+ ignore_errors: true
+
+- name: Assert requirements file was installed
+ assert:
+ that:
+ - install_rq0 is changed
+ - '"geerlingguy.java" in install_rq0.new_roles'
+ - '"geerlingguy.php_roles" in install_rq0.new_collections'
+
+- name: Install from requirements file (again)
+ community.general.ansible_galaxy_install:
+ type: both
+ requirements_file: "{{ reqs_file }}"
+ register: install_rq1
+ ignore_errors: true
+
+- name: Assert requirements file was not installed
+ assert:
+ that:
+ - install_rq1 is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases b/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases
new file mode 100644
index 000000000..0d1324b22
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/apache2_module/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml b/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml
new file mode 100644
index 000000000..5d93a9d30
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/635-apache2-misleading-warning.yml
@@ -0,0 +1,47 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+# This test represent the misleading behavior of the following issue: https://github.com/ansible-collections/community.general/issues/635
+- name: Disable MPM event module
+ apache2_module:
+ name: "{{ item.module}}"
+ state: "{{ item.state}}"
+ ignore_configcheck: true
+ register: disable_mpm_modules
+ with_items:
+ - { module: mpm_event, state: absent }
+ - { module: mpm_prefork, state: present }
+
+- assert:
+ that:
+ - "'warnings' in disable_mpm_modules"
+ - disable_mpm_modules["warnings"] == [
+ "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately.",
+ "No MPM module loaded! apache2 reload AND other module actions will fail if no MPM module is loaded immediately."
+ ]
+
+- name: Enable MPM event module - Revert previous change
+ apache2_module:
+ name: "{{ item.module}}"
+ state: "{{ item.state}}"
+ ignore_configcheck: true
+ register: disable_mpm_modules
+ with_items:
+ - { module: mpm_prefork, state: absent }
+ - { module: mpm_event, state: present }
+
+- name: Disable MPM event module
+ apache2_module:
+ name: "{{ item.module}}"
+ state: "{{ item.state}}"
+ ignore_configcheck: true
+ warn_mpm_absent: false
+ register: disable_mpm_modules
+ with_items:
+ - { module: mpm_event, state: absent }
+ - { module: mpm_prefork, state: present }
+
+- assert:
+ that:
+ - "'warnings' not in disable_mpm_modules"
diff --git a/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml b/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml
new file mode 100644
index 000000000..3301a16b1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/actualtest.yml
@@ -0,0 +1,207 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: disable userdir module
+ community.general.apache2_module:
+ name: userdir
+ state: absent
+ register: userdir_first_disable
+
+- name: disable userdir module, second run
+ community.general.apache2_module:
+ name: userdir
+ state: absent
+ register: disable
+
+- name: ensure community.general.apache2_module is idempotent
+ assert:
+ that:
+ - disable is not changed
+
+- name: enable userdir module
+ community.general.apache2_module:
+ name: userdir
+ state: present
+ register: enable
+
+- name: ensure changed on successful enable
+ assert:
+ that:
+ - enable is changed
+
+- name: enable userdir module, second run
+ community.general.apache2_module:
+ name: userdir
+ state: present
+ register: enabletwo
+
+- name: ensure community.general.apache2_module is idempotent
+ assert:
+ that:
+ - 'not enabletwo.changed'
+
+- name: disable userdir module, final run
+ community.general.apache2_module:
+ name: userdir
+ state: absent
+ register: disablefinal
+
+- name: ensure changed on successful disable
+ assert:
+ that:
+ - 'disablefinal.changed'
+
+- name: set userdir to original state
+ community.general.apache2_module:
+ name: userdir
+ state: present
+ when: userdir_first_disable is changed
+
+- name: ensure autoindex enabled
+ community.general.apache2_module:
+ name: autoindex
+ state: present
+
+- name: Debian/Ubuntu specific tests
+ when: "ansible_os_family == 'Debian'"
+ block:
+ - name: force disable of autoindex # bug #2499
+ community.general.apache2_module:
+ name: autoindex
+ state: absent
+ force: true
+
+ - name: reenable autoindex
+ community.general.apache2_module:
+ name: autoindex
+ state: present
+
+ # mod_evasive is enabled by default upon the installation, so disable first and enable second, to preserve the config
+ - name: disable evasive module
+ community.general.apache2_module:
+ name: evasive
+ state: absent
+
+ - name: enable evasive module, test https://github.com/ansible/ansible/issues/22635
+ community.general.apache2_module:
+ name: evasive
+ state: present
+
+ - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
+ community.general.apache2_module:
+ name: dump_io
+ state: present
+ ignore_errors: true
+ register: enable_dumpio_wrong
+
+ - name: disable dump_io
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: absent
+
+ - name: use identifier to enable module, fix for https://github.com/ansible/ansible/issues/33669
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: present
+ register: enable_dumpio_correct_1
+
+ - name: ensure idempotency with identifier
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: present
+ register: enable_dumpio_correct_2
+
+ - name: disable dump_io
+ community.general.apache2_module:
+ name: dump_io
+ identifier: dumpio_module
+ state: absent
+
+ - assert:
+ that:
+ - enable_dumpio_wrong is failed
+ - enable_dumpio_correct_1 is changed
+ - enable_dumpio_correct_2 is not changed
+
+ - name: disable mpm modules
+ community.general.apache2_module:
+ name: "{{ item }}"
+ state: absent
+ ignore_configcheck: true
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+
+ - name: enabled mpm_event
+ community.general.apache2_module:
+ name: mpm_event
+ state: present
+ ignore_configcheck: true
+ register: enabledmpmevent
+
+ - name: ensure changed mpm_event
+ assert:
+ that:
+ - 'enabledmpmevent.changed'
+
+ - name: switch between mpm_event and mpm_worker
+ community.general.apache2_module:
+ name: "{{ item.name }}"
+ state: "{{ item.state }}"
+ ignore_configcheck: true
+ with_items:
+ - name: mpm_event
+ state: absent
+ - name: mpm_worker
+ state: present
+
+ - name: ensure mpm_worker is already enabled
+ community.general.apache2_module:
+ name: mpm_worker
+ state: present
+ register: enabledmpmworker
+
+ - name: ensure mpm_worker unchanged
+ assert:
+ that:
+ - 'not enabledmpmworker.changed'
+
+ - name: try to disable all mpm modules with configcheck
+ community.general.apache2_module:
+ name: "{{item}}"
+ state: absent
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+ ignore_errors: true
+ register: remove_with_configcheck
+
+ - name: ensure configcheck fails task with when run without mpm modules
+ assert:
+ that:
+ - "{{ item.failed }}"
+ with_items: "{{ remove_with_configcheck.results }}"
+
+ - name: try to disable all mpm modules without configcheck
+ community.general.apache2_module:
+ name: "{{item}}"
+ state: absent
+ ignore_configcheck: true
+ with_items:
+ - mpm_worker
+ - mpm_event
+ - mpm_prefork
+
+ - name: enabled mpm_event to restore previous state
+ community.general.apache2_module:
+ name: mpm_event
+ state: present
+ ignore_configcheck: true
+ register: enabledmpmevent
diff --git a/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml
new file mode 100644
index 000000000..6f2f718ad
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/apache2_module/tasks/main.yml
@@ -0,0 +1,52 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install apache via apt
+ apt:
+ name: "{{item}}"
+ state: present
+ when: "ansible_os_family == 'Debian'"
+ with_items:
+ - apache2
+ - libapache2-mod-evasive
+
+- name: install apache via zypper
+ community.general.zypper:
+ name: apache2
+ state: present
+ when: "ansible_os_family == 'Suse'"
+
+- name: test apache2_module
+ block:
+ - name: get list of enabled modules
+ shell: apache2ctl -M | sort
+ register: modules_before
+ - name: include only on supported systems
+ include_tasks: actualtest.yml
+ always:
+ - name: get list of enabled modules
+ shell: apache2ctl -M | sort
+ register: modules_after
+ - name: modules_before
+ debug:
+ var: modules_before
+ - name: modules_after
+ debug:
+ var: modules_after
+ - name: ensure that all test modules are disabled again
+ assert:
+ that: modules_before.stdout == modules_after.stdout
+ when: ansible_os_family in ['Debian', 'Suse']
+ # centos/RHEL does not have a2enmod/a2dismod
+
+- name: include misleading warning test
+ include_tasks: 635-apache2-misleading-warning.yml
+ when: ansible_os_family in ['Debian']
+ # Suse has mpm_event module compiled within the base apache2 \ No newline at end of file
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/aliases b/ansible_collections/community/general/tests/integration/targets/archive/aliases
new file mode 100644
index 000000000..88b15a24f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+needs/root
+destructive
+skip/aix
+skip/osx # FIXME
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt b/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt
new file mode 100644
index 000000000..32276adb8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/files/bar.txt
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+bar.txt
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt b/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/files/empty.txt
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt b/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt
new file mode 100644
index 000000000..a40d2f008
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/files/foo.txt
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+foo.txt
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/files/sub/subfile.txt b/ansible_collections/community/general/tests/integration/targets/archive/files/sub/subfile.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/files/sub/subfile.txt
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml
new file mode 100644
index 000000000..4ca41e254
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/tasks/main.yml
@@ -0,0 +1,145 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the archive module.
+# Copyright (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make sure we start fresh
+
+# Test setup
+- name: prep our files
+ copy: src={{ item }} dest={{remote_tmp_dir}}/{{ item }}
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+ - sub
+ - sub/subfile.txt
+
+# Run twice without lzma backport installed, to make sure it does not crash
+- name: Archive - pre-test - first run
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_pretest_1.tar"
+ format: "tar"
+ register: pretest_1
+
+- name: Archive - pre-test - second run
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_pretest_1.tar"
+ format: "tar"
+ register: pretest_2
+
+- name: Archive - validate pre-test
+ assert:
+ that:
+ - pretest_1 is changed
+ - pretest_2 is not changed
+
+# Install dependencies
+- name: Ensure zip is present to create test archive (yum)
+ yum: name=zip state=latest
+ when: ansible_facts.pkg_mgr == 'yum'
+
+- name: Ensure zip is present to create test archive (apt)
+ apt: name=zip state=latest
+ when: ansible_facts.pkg_mgr == 'apt'
+
+- name: Install prerequisites for backports.lzma when using python2 (non OSX)
+ block:
+ - name: Set liblzma package name depending on the OS
+ set_fact:
+ liblzma_dev_package:
+ Debian: liblzma-dev
+ RedHat: xz-devel
+ Suse: xz-devel
+ - name: Ensure liblzma-dev is present to install backports-lzma
+ package: name={{ liblzma_dev_package[ansible_os_family] }} state=latest
+ when: ansible_os_family in liblzma_dev_package.keys()
+ when:
+ - ansible_python_version.split('.')[0] == '2'
+ - ansible_os_family != 'Darwin'
+
+- name: Install prerequisites for backports.lzma when using python2 (OSX)
+ block:
+ - name: Find brew binary
+ command: which brew
+ register: brew_which
+ - name: Get owner of brew binary
+ stat: path="{{ brew_which.stdout }}"
+ register: brew_stat
+ - name: "Install package"
+ homebrew:
+ name: xz
+ state: present
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a
+ # proper solution can be found
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: "True"
+ when:
+ - ansible_python_version.split('.')[0] == '2'
+ - ansible_os_family == 'Darwin'
+
+- name: Ensure backports.lzma is present to create test archive (pip)
+ pip: name=backports.lzma state=latest
+ when: ansible_python_version.split('.')[0] == '2'
+ register: backports_lzma_pip
+
+- name: Define formats to test
+ set_fact:
+ formats:
+ - tar
+ - zip
+ - gz
+ - bz2
+ - xz
+
+# Run tests
+- name: Run core tests
+ include_tasks:
+ file: ../tests/core.yml
+ loop: "{{ formats }}"
+ loop_control:
+ loop_var: format
+
+- name: Run exclusions tests
+ include_tasks:
+ file: ../tests/exclusions.yml
+ loop: "{{ formats }}"
+ loop_control:
+ loop_var: format
+
+- name: Run remove tests
+ include_tasks:
+ file: ../tests/remove.yml
+ loop: "{{ formats }}"
+ loop_control:
+ loop_var: format
+
+- name: Run broken link tests
+ include_tasks:
+ file: ../tests/broken-link.yml
+ loop: "{{ formats }}"
+ loop_control:
+ loop_var: format
+
+- name: Run Idempotency tests
+ include_tasks:
+ file: ../tests/idempotency.yml
+ loop: "{{ formats }}"
+ loop_control:
+ loop_var: format
+
+# Test cleanup
+- name: Remove backports.lzma if previously installed (pip)
+ pip: name=backports.lzma state=absent
+ when: backports_lzma_pip is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/tests/broken-link.yml b/ansible_collections/community/general/tests/integration/targets/archive/tests/broken-link.yml
new file mode 100644
index 000000000..7c6444371
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/tests/broken-link.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Create link - broken link ({{ format }})
+ file:
+ src: /nowhere
+ dest: "{{ remote_tmp_dir }}/nowhere.txt"
+ state: link
+ force: true
+
+ - name: Archive - broken link ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}"
+ format: "{{ format }}"
+
+ - name: Verify archive exists - broken link ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}"
+ state: file
+
+ - name: Remove archive - broken link ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_broken_link.{{ format }}"
+ state: absent
+
+ - name: Remove link - broken link ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/nowhere.txt"
+ state: absent
+ # 'zip' does not support symlink's
+ when: format != 'zip'
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/tests/core.yml b/ansible_collections/community/general/tests/integration/targets/archive/tests/core.yml
new file mode 100644
index 000000000..1c4f4d1aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/tests/core.yml
@@ -0,0 +1,177 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the archive module.
+# Copyright (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make sure we start fresh
+
+# Core functionality tests
+- name: Archive - no options ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_no_opts.{{ format }}"
+ format: "{{ format }}"
+ register: archive_no_options
+
+- name: Verify that archive exists - no options ({{ format }})
+ file:
+ path: "{{remote_tmp_dir}}/archive_no_opts.{{ format }}"
+ state: file
+
+- name: Verify that archive result is changed and includes all files - no options ({{ format }})
+ assert:
+ that:
+ - archive_no_options is changed
+ - "archive_no_options.dest_state == 'archive'"
+ - "{{ archive_no_options.archived | length }} == 3"
+
+- name: Remove the archive - no options ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_no_options.{{ format }}"
+ state: absent
+
+- name: Archive - file options ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}"
+ format: "{{ format }}"
+ mode: "u+rwX,g-rwx,o-rwx"
+ register: archive_file_options
+
+- name: Retrieve archive file information - file options ({{ format }})
+ stat:
+ path: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}"
+ register: archive_file_options_stat
+
+- name: Test that the file modes were changed
+ assert:
+ that:
+ - archive_file_options_stat is not changed
+ - "archive_file_options.mode == '0600'"
+ - "{{ archive_file_options.archived | length }} == 3"
+
+- name: Remove the archive - file options ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_file_options.{{ format }}"
+ state: absent
+
+- name: Archive - non-ascii ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}"
+ format: "{{ format }}"
+ register: archive_nonascii
+
+- name: Retrieve archive file information - non-ascii ({{ format }})
+ stat:
+ path: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}"
+ register: archive_nonascii_stat
+
+- name: Test that archive exists - non-ascii ({{ format }})
+ assert:
+ that:
+ - archive_nonascii is changed
+ - archive_nonascii_stat.stat.exists == true
+
+- name: Remove the archive - non-ascii ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_nonascii_くらとみ.{{ format }}"
+ state: absent
+
+- name: Archive - single target ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/foo.txt"
+ dest: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}"
+ format: "{{ format }}"
+ register: archive_single_target
+
+- name: Assert archive has correct state - single target ({{ format }})
+ assert:
+ that:
+ - archive_single_target.dest_state == state_map[format]
+ vars:
+ state_map:
+ tar: archive
+ zip: archive
+ gz: compress
+ bz2: compress
+ xz: compress
+
+- block:
+ - name: Retrieve contents of archive - single target ({{ format }})
+ ansible.builtin.unarchive:
+ src: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}"
+ dest: .
+ list_files: true
+ check_mode: true
+ ignore_errors: true
+ register: archive_single_target_contents
+
+ - name: Assert that file names are preserved - single target ({{ format }})
+ assert:
+ that:
+ - "'oo.txt' not in archive_single_target_contents.files"
+ - "'foo.txt' in archive_single_target_contents.files"
+ # ``unarchive`` fails for RHEL and FreeBSD on ansible 2.x
+ when: archive_single_target_contents is success and archive_single_target_contents is not skipped
+ when: "format == 'zip'"
+
+- name: Remove archive - single target ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_single_target.{{ format }}"
+ state: absent
+
+- name: Archive - path list ({{ format }})
+ archive:
+ path:
+ - "{{ remote_tmp_dir }}/empty.txt"
+ - "{{ remote_tmp_dir }}/foo.txt"
+ - "{{ remote_tmp_dir }}/bar.txt"
+ dest: "{{ remote_tmp_dir }}/archive_path_list.{{ format }}"
+ format: "{{ format }}"
+ register: archive_path_list
+
+- name: Verify that archive exists - path list ({{ format }})
+ file:
+ path: "{{remote_tmp_dir}}/archive_path_list.{{ format }}"
+ state: file
+
+- name: Assert that archive contains all files - path list ({{ format }})
+ assert:
+ that:
+ - archive_path_list is changed
+ - "{{ archive_path_list.archived | length }} == 3"
+
+- name: Remove archive - path list ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_path_list.{{ format }}"
+ state: absent
+
+- name: Archive - missing paths ({{ format }})
+ archive:
+ path:
+ - "{{ remote_tmp_dir }}/*.txt"
+ - "{{ remote_tmp_dir }}/dne.txt"
+ exclude_path: "{{ remote_tmp_dir }}/foo.txt"
+ dest: "{{ remote_tmp_dir }}/archive_missing_paths.{{ format }}"
+ format: "{{ format }}"
+ register: archive_missing_paths
+
+- name: Assert that incomplete archive has incomplete state - missing paths ({{ format }})
+ assert:
+ that:
+ - archive_missing_paths is changed
+ - "archive_missing_paths.dest_state == 'incomplete'"
+ - "'{{ remote_tmp_dir }}/dne.txt' in archive_missing_paths.missing"
+ - "'{{ remote_tmp_dir }}/foo.txt' not in archive_missing_paths.missing"
+
+- name: Remove archive - missing paths ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_missing_paths.{{ format }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/tests/exclusions.yml b/ansible_collections/community/general/tests/integration/targets/archive/tests/exclusions.yml
new file mode 100644
index 000000000..3c5f1fc5c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/tests/exclusions.yml
@@ -0,0 +1,44 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Archive - exclusion patterns ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_exclusion_patterns.{{ format }}"
+ format: "{{ format }}"
+ exclusion_patterns: b?r.*
+ register: archive_exclusion_patterns
+
+- name: Assert that only included files are archived - exclusion patterns ({{ format }})
+ assert:
+ that:
+ - archive_exclusion_patterns is changed
+ - "'bar.txt' not in archive_exclusion_patterns.archived"
+
+- name: Remove archive - exclusion patterns ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_exclusion_patterns.{{ format }}"
+ state: absent
+
+- name: Archive - exclude path ({{ format }})
+ archive:
+ path:
+ - "{{ remote_tmp_dir }}/sub/subfile.txt"
+ - "{{ remote_tmp_dir }}"
+ exclude_path:
+ - "{{ remote_tmp_dir }}"
+ dest: "{{ remote_tmp_dir }}/archive_exclude_paths.{{ format }}"
+ format: "{{ format }}"
+ register: archive_excluded_paths
+
+- name: Assert that excluded paths do not influence archive root - exclude path ({{ format }})
+ assert:
+ that:
+ - archive_excluded_paths.arcroot != remote_tmp_dir
+
+- name: Remove archive - exclude path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_exclude_paths.{{ format }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/tests/idempotency.yml b/ansible_collections/community/general/tests/integration/targets/archive/tests/idempotency.yml
new file mode 100644
index 000000000..32f20a656
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/tests/idempotency.yml
@@ -0,0 +1,144 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Archive - file content idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: file_content_idempotency_before
+
+- name: Modify file - file content idempotency ({{ format }})
+ lineinfile:
+ line: bar.txt
+ regexp: "^foo.txt$"
+ path: "{{ remote_tmp_dir }}/foo.txt"
+
+- name: Archive second time - file content idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: file_content_idempotency_after
+
+- name: Assert task status is changed - file content idempotency ({{ format }})
+ assert:
+ that:
+ - file_content_idempotency_after is changed
+ # Only ``zip`` archives are guaranteed to compare file content checksums rather than header checksums
+ when: "format == 'zip'"
+
+- name: Remove archive - file content idempotency ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_file_content_idempotency.{{ format }}"
+ state: absent
+
+- name: Modify file back - file content idempotency ({{ format }})
+ lineinfile:
+ line: foo.txt
+ regexp: "^bar.txt$"
+ path: "{{ remote_tmp_dir }}/foo.txt"
+
+- name: Archive - file name idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: file_name_idempotency_before
+
+- name: Rename file - file name idempotency ({{ format }})
+ command: "mv {{ remote_tmp_dir }}/foo.txt {{ remote_tmp_dir }}/fii.txt"
+
+- name: Archive again - file name idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: file_name_idempotency_after
+
+- name: Check task status - file name idempotency ({{ format }})
+ assert:
+ that:
+ - file_name_idempotency_after is changed
+
+- name: Remove archive - file name idempotency ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_file_name_idempotency.{{ format }}"
+ state: absent
+
+- name: Rename file back - file name idempotency ({{ format }})
+ command: "mv {{ remote_tmp_dir }}/fii.txt {{ remote_tmp_dir }}/foo.txt"
+
+- name: Archive - single file content idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/foo.txt"
+ dest: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: single_file_content_idempotency_before
+
+- name: Modify file - single file content idempotency ({{ format }})
+ lineinfile:
+ line: bar.txt
+ regexp: "^foo.txt$"
+ path: "{{ remote_tmp_dir }}/foo.txt"
+
+- name: Archive second time - single file content idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/foo.txt"
+ dest: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: single_file_content_idempotency_after
+
+- name: Assert task status is changed - single file content idempotency ({{ format }})
+ assert:
+ that:
+ - single_file_content_idempotency_after is changed
+ # ``tar`` archives are not guaranteed to identify changes to file content if the file meta properties are unchanged.
+ when: "format != 'tar'"
+
+- name: Remove archive - single file content idempotency ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_single_file_content_idempotency.{{ format }}"
+ state: absent
+
+- name: Modify file back - single file content idempotency ({{ format }})
+ lineinfile:
+ line: foo.txt
+ regexp: "^bar.txt$"
+ path: "{{ remote_tmp_dir }}/foo.txt"
+
+- name: Archive - single file name idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/foo.txt"
+ dest: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: single_file_name_idempotency_before
+
+- name: Rename file - single file name idempotency ({{ format }})
+ command: "mv {{ remote_tmp_dir }}/foo.txt {{ remote_tmp_dir }}/fii.txt"
+
+- name: Archive again - single file name idempotency ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/fii.txt"
+ dest: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}"
+ format: "{{ format }}"
+ register: single_file_name_idempotency_after
+
+
+# The gz, bz2, and xz formats do not store the original file name
+# so it is not possible to identify a change in this scenario.
+- name: Check task status - single file name idempotency ({{ format }})
+ assert:
+ that:
+ - single_file_name_idempotency_after is changed
+ when: "format in ('tar', 'zip')"
+
+- name: Remove archive - single file name idempotency ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_single_file_name_idempotency.{{ format }}"
+ state: absent
+
+- name: Rename file back - single file name idempotency ({{ format }})
+ command: "mv {{ remote_tmp_dir }}/fii.txt {{ remote_tmp_dir }}/foo.txt"
diff --git a/ansible_collections/community/general/tests/integration/targets/archive/tests/remove.yml b/ansible_collections/community/general/tests/integration/targets/archive/tests/remove.yml
new file mode 100644
index 000000000..8f0b8cff8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/archive/tests/remove.yml
@@ -0,0 +1,211 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Archive - remove source files ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/*.txt"
+ dest: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}"
+ format: "{{ format }}"
+ remove: true
+ register: archive_remove_source_files
+
+- name: Verify archive exists - remove source files ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}"
+ state: file
+
+- name: Verify all files were archived - remove source files ({{ format }})
+ assert:
+ that:
+ - archive_remove_source_files is changed
+ - "{{ archive_remove_source_files.archived | length }} == 3"
+
+- name: Remove Archive - remove source files ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_files.{{ format }}"
+ state: absent
+
+- name: Assert that source files were removed - remove source files ({{ format }})
+ assert:
+ that:
+ - "'{{ remote_tmp_dir }}/{{ item }}' is not exists"
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+
+- name: Copy source files - remove source directory ({{ format }})
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/{{ item }}"
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+
+- name: Create temporary directory - remove source directory ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/tmpdir"
+ state: directory
+
+- name: Copy source files to temporary directory - remove source directory ({{ format }})
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}"
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+
+- name: Archive - remove source directory ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/tmpdir"
+ dest: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}"
+ format: "{{ format }}"
+ remove: true
+ register: archive_remove_source_directory
+
+- name: Verify archive exists - remove source directory ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}"
+ state: file
+
+- name: Verify archive contains all files - remove source directory ({{ format }})
+ assert:
+ that:
+ - archive_remove_source_directory is changed
+ - "{{ archive_remove_source_directory.archived | length }} == 3"
+
+- name: Remove archive - remove source directory ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_directory.{{ format }}"
+ state: absent
+
+- name: Verify source directory was removed - remove source directory ({{ format }})
+ assert:
+ that:
+ - "'{{ remote_tmp_dir }}/tmpdir' is not exists"
+
+- name: Create temporary directory - remove source excluding path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/tmpdir"
+ state: directory
+
+- name: Copy source files to temporary directory - remove source excluding path ({{ format }})
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}"
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+
+- name: Archive - remove source excluding path ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/tmpdir/*"
+ dest: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}"
+ format: "{{ format }}"
+ remove: true
+ exclude_path: "{{ remote_tmp_dir }}/tmpdir/empty.txt"
+ register: archive_remove_source_excluding_path
+
+- name: Verify archive exists - remove source excluding path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}"
+ state: file
+
+- name: Verify all files except excluded are archived - remove source excluding path ({{ format }})
+ assert:
+ that:
+ - archive_remove_source_excluding_path is changed
+ - "{{ archive_remove_source_excluding_path.archived | length }} == 2"
+
+- name: Remove archive - remove source excluding path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_path.{{ format }}"
+ state: absent
+
+- name: Verify that excluded file still exists - remove source excluding path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/tmpdir/empty.txt"
+ state: file
+
+- name: Copy source files to temporary directory - remove source excluding sub path ({{ format }})
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}"
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+ - sub
+ - sub/subfile.txt
+
+- name: Archive - remove source excluding sub path ({{ format }})
+ archive:
+ path:
+ - "{{ remote_tmp_dir }}/tmpdir/*.txt"
+ - "{{ remote_tmp_dir }}/tmpdir/sub/*"
+ dest: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}"
+ format: "{{ format }}"
+ remove: true
+ exclude_path: "{{ remote_tmp_dir }}/tmpdir/sub/subfile.txt"
+ register: archive_remove_source_excluding_sub_path
+
+- name: Verify archive exists - remove source excluding sub path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}"
+ state: file
+
+- name: Remove archive - remove source excluding sub path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_excluding_sub_path.{{ format }}"
+ state: absent
+
+- name: Verify that sub path still exists - remove source excluding sub path ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/tmpdir/sub/subfile.txt"
+ state: file
+
+- name: Copy source files to temporary directory - remove source with nested paths ({{ format }})
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/tmpdir/{{ item }}"
+ with_items:
+ - foo.txt
+ - bar.txt
+ - empty.txt
+ - sub
+ - sub/subfile.txt
+
+- name: Archive - remove source with nested paths ({{ format }})
+ archive:
+ path: "{{ remote_tmp_dir }}/tmpdir/"
+ dest: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}"
+ format: "{{ format }}"
+ remove: true
+ register: archive_remove_nested_paths
+
+- name: Verify archive exists - remove source with nested paths ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}"
+ state: file
+
+- name: Verify source files were removed - remove source with nested paths ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/tmpdir"
+ state: absent
+ register: archive_remove_nested_paths_status
+
+- name: Assert tasks status - remove source with nested paths ({{ format }})
+ assert:
+ that:
+ - archive_remove_nested_paths is success
+ - archive_remove_nested_paths_status is not changed
+
+- name: Remove archive - remove source with nested paths ({{ format }})
+ file:
+ path: "{{ remote_tmp_dir }}/archive_remove_source_nested_paths.{{ format }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/aliases b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/aliases
new file mode 100644
index 000000000..914c36ad3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Projec
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+azp/posix/vm
+destructive
+needs/privileged
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/defaults/main.yml
new file mode 100644
index 000000000..52c88d5de
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/defaults/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+btrfs_subvolume_single_configs:
+- file: "/tmp/disks0.img"
+ loop: "/dev/loop95"
+btrfs_subvolume_multiple_configs:
+- file: "/tmp/diskm0.img"
+ loop: "/dev/loop97"
+- file: "/tmp/diskm1.img"
+ loop: "/dev/loop98"
+- file: "/tmp/diskm2.img"
+ loop: "/dev/loop99"
+btrfs_subvolume_configs: "{{ btrfs_subvolume_single_configs + btrfs_subvolume_multiple_configs }}"
+btrfs_subvolume_single_devices: "{{ btrfs_subvolume_single_configs | map(attribute='loop') }}"
+btrfs_subvolume_single_label: "single"
+btrfs_subvolume_multiple_devices: "{{ btrfs_subvolume_multiple_configs | map(attribute='loop') }}"
+btrfs_subvolume_multiple_label: "multiple"
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/main.yml
new file mode 100644
index 000000000..d47270440
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/main.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required packages
+ ansible.builtin.package:
+ name:
+ - btrfs-progs # btrfs userspace
+ - util-linux # losetup
+ ignore_errors: True
+ register: btrfs_installed
+
+- name: Execute integration tests tests
+ block:
+ - ansible.builtin.include_tasks: 'setup.yml'
+
+ - name: "Execute test scenario for single device filesystem"
+ ansible.builtin.include_tasks: 'run_filesystem_tests.yml'
+ vars:
+ btrfs_subvolume_target_device: "{{ btrfs_subvolume_single_devices | first }}"
+ btrfs_subvolume_target_label: "{{ btrfs_subvolume_single_label }}"
+
+ - name: "Execute test scenario for multiple device configuration"
+ ansible.builtin.include_tasks: 'run_filesystem_tests.yml'
+ vars:
+ btrfs_subvolume_target_device: "{{ btrfs_subvolume_multiple_devices | first }}"
+ btrfs_subvolume_target_label: "{{ btrfs_subvolume_multiple_label }}"
+ when: btrfs_installed is success
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_common_tests.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_common_tests.yml
new file mode 100644
index 000000000..013ec50bf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_common_tests.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- ansible.builtin.include_tasks: 'test_subvolume_simple.yml'
+- ansible.builtin.include_tasks: 'test_subvolume_nested.yml'
+- ansible.builtin.include_tasks: 'test_subvolume_recursive.yml'
+- ansible.builtin.include_tasks: 'test_subvolume_default.yml'
+
+- ansible.builtin.include_tasks: 'test_snapshot_skip.yml'
+- ansible.builtin.include_tasks: 'test_snapshot_clobber.yml'
+- ansible.builtin.include_tasks: 'test_snapshot_error.yml'
+
+- ansible.builtin.include_tasks: 'test_subvolume_whitespace.yml'
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml
new file mode 100644
index 000000000..0ea3fa666
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/run_filesystem_tests.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- ansible.builtin.include_tasks: 'test_filesystem_matching.yml'
+
+- name: "Execute all test scenario for unmounted filesystem"
+ ansible.builtin.include_tasks: 'run_common_tests.yml'
+
+- name: "Execute test scenarios where non-root subvolume is mounted"
+ block:
+ - name: Create subvolume '/nonroot'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ name: "/nonroot"
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ state: "present"
+ register: nonroot
+ - name: "Mount subvolume '/nonroot'"
+ ansible.posix.mount:
+ src: "{{ nonroot.filesystem.devices | first }}"
+ path: /mnt
+ opts: "subvolid={{ nonroot.target_subvolume_id }}"
+ fstype: btrfs
+ state: mounted
+ - name: "Run tests for explicit, mounted single device configuration"
+ ansible.builtin.include_tasks: 'run_common_tests.yml'
+ - name: "Unmount subvolume /nonroot"
+ ansible.posix.mount:
+ path: /mnt
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/setup.yml
new file mode 100644
index 000000000..f5bbdf9c5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/setup.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Create file {{ item.file }} to back loop device {{ item.loop }}"
+ ansible.builtin.command:
+ cmd: "dd if=/dev/zero of={{ item.file }} bs=1M count=200" ## minimum count 109
+ creates: "{{ item.file }}"
+ with_items: "{{ btrfs_subvolume_configs }}"
+
+- name: "Setup loop device {{ item.loop }}"
+ ansible.builtin.command:
+ cmd: "losetup {{ item.loop }} {{ item.file }}"
+ creates: "{{ item.loop }}"
+ with_items: "{{ btrfs_subvolume_configs }}"
+
+- name: Create single device btrfs filesystem
+ ansible.builtin.command:
+ cmd: "mkfs.btrfs --label {{ btrfs_subvolume_single_label }} -f {{ btrfs_subvolume_single_devices | first }}"
+ changed_when: True
+
+- name: Create multiple device btrfs filesystem
+ ansible.builtin.command:
+ cmd: "mkfs.btrfs --label {{ btrfs_subvolume_multiple_label }} -f -d raid0 {{ btrfs_subvolume_multiple_devices | join(' ') }}"
+ changed_when: True
+
+# Typically created by udev, but apparently missing on Alpine
+- name: Create btrfs control device node
+ ansible.builtin.command:
+ cmd: "mknod /dev/btrfs-control c 10 234"
+ creates: "/dev/btrfs-control"
+
+- name: Force rescan to ensure all device are detected
+ ansible.builtin.command:
+ cmd: "btrfs device scan"
+ changed_when: True
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml
new file mode 100644
index 000000000..2455eeacf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_filesystem_matching.yml
@@ -0,0 +1,80 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Match targeted filesystem by label"
+ block:
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by label
+ community.general.btrfs_subvolume:
+ automount: Yes
+ name: "/match_label"
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ state: "present"
+ register: result
+
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
+
+- name: "Match targeted filesystem by uuid"
+ block:
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by uuid
+ community.general.btrfs_subvolume:
+ automount: Yes
+ name: "/match_uuid"
+ filesystem_uuid: "{{ result.filesystem.uuid }}"
+ state: "present"
+ register: result
+
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
+
+- name: "Match targeted filesystem by devices"
+ block:
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem by device
+ community.general.btrfs_subvolume:
+ automount: Yes
+ name: "/match_device"
+ filesystem_device: "{{ result.filesystem.devices | first }}"
+ state: "present"
+ register: result
+
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
+
+- name: "Match only mounted filesystem"
+ block:
+ - name: "Mount filesystem '{{ btrfs_subvolume_target_label }}'"
+ ansible.posix.mount:
+ src: "{{ result.filesystem.devices | first }}"
+ path: /mnt
+ opts: "subvolid={{ 5 }}"
+ fstype: btrfs
+ state: mounted
+
+ - name: Print current status
+ community.general.btrfs_info:
+
+ - name: Match '{{ btrfs_subvolume_target_label }}' filesystem when only mount
+ community.general.btrfs_subvolume:
+ automount: Yes
+ name: "/match_only_mounted"
+ state: "present"
+ register: result
+
+ - name: "Unmount filesystem '{{ btrfs_subvolume_target_label }}'"
+ ansible.posix.mount:
+ path: /mnt
+ state: absent
+
+ - name: Validate the '{{ btrfs_subvolume_target_label }}' filesystem was chosen
+ ansible.builtin.assert:
+ that:
+ - result.filesystem.label == btrfs_subvolume_target_label
+ when: False # TODO don't attempt this if the host already has a pre-existing btrfs filesystem
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml
new file mode 100644
index 000000000..ce25a999b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_clobber.yml
@@ -0,0 +1,41 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a snapshot, overwriting if one already exists at path
+ block:
+ - name: Create a snapshot named 'snapshot_clobber'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_clobber"
+ snapshot_source: "/"
+ snapshot_conflict: "clobber"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_clobber' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Create a snapshot named 'snapshot_clobber' (no idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_clobber"
+ snapshot_source: "/"
+ snapshot_conflict: "clobber"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_clobber' created (no idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+- name: Cleanup created snapshot
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_clobber"
+ state: "absent"
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml
new file mode 100644
index 000000000..49d928b74
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_error.yml
@@ -0,0 +1,42 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a snapshot, erroring if one already exists at path
+ block:
+ - name: Create a snapshot named 'snapshot_error'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_error"
+ snapshot_source: "/"
+ snapshot_conflict: "error"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_error' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Create a snapshot named 'snapshot_error' (no idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_error"
+ snapshot_source: "/"
+ snapshot_conflict: "error"
+ state: "present"
+ register: result
+ ignore_errors: true
+ - name: Snapshot 'snapshot_error' created (no idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Cleanup created snapshot
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_error"
+ state: "absent"
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml
new file mode 100644
index 000000000..07e65b133
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_snapshot_skip.yml
@@ -0,0 +1,41 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a snapshot if one does not already exist at path
+ block:
+ - name: Create a snapshot named 'snapshot_skip'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_skip"
+ snapshot_source: "/"
+ snapshot_conflict: "skip"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_skip' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Create a snapshot named 'snapshot_skip' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_skip"
+ snapshot_source: "/"
+ snapshot_conflict: "skip"
+ state: "present"
+ register: result
+ - name: Snapshot 'snapshot_skip' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Cleanup created snapshot
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/snapshot_skip"
+ state: "absent"
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml
new file mode 100644
index 000000000..f6eed9387
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_default.yml
@@ -0,0 +1,99 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Change the default subvolume
+ block:
+ - name: Update filesystem default subvolume to '@'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ default: True
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "present"
+ register: result
+ - name: Subvolume '@' set to default
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Update filesystem default subvolume to '@' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ default: True
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "present"
+ register: result
+ - name: Subvolume '@' set to default (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Revert the default subvolume
+ block:
+ - name: Revert filesystem default subvolume to '/'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ default: True
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/"
+ state: "present"
+ register: result
+ - name: Subvolume '/' set to default
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Revert filesystem default subvolume to '/' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ default: True
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/"
+ state: "present"
+ register: result
+ - name: Subvolume '/' set to default (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+
+- name: Change the default subvolume again
+ block:
+ - name: Update filesystem default subvolume to '@'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ default: True
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "present"
+ register: result
+ - name: Subvolume '@' set to default
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+- name: Revert custom default subvolume to fs_tree root when deleted
+ block:
+ - name: Delete custom default subvolume '@'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "absent"
+ register: result
+ - name: Subvolume '@' deleted
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Delete custom default subvolume '@' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/@"
+ state: "absent"
+ register: result
+ - name: Subvolume '@' deleted (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml
new file mode 100644
index 000000000..b706bf72a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_nested.yml
@@ -0,0 +1,61 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create parent subvolume 'container'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container"
+ state: "present"
+
+- name: Create a nested subvolume
+ block:
+ - name: Create a subvolume named 'nested' inside 'container'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "present"
+ register: result
+ - name: Subvolume 'container/nested' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Create a subvolume named 'nested' inside 'container' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "present"
+ register: result
+ - name: Subvolume 'container/nested' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Remove a nested subvolume
+ block:
+ - name: Remove a subvolume named 'nested' inside 'container'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "absent"
+ register: result
+ - name: Subvolume 'container/nested' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Remove a subvolume named 'nested' inside 'container' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/nested"
+ state: "absent"
+ register: result
+ - name: Subvolume 'container/nested' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml
new file mode 100644
index 000000000..7e9f99007
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_recursive.yml
@@ -0,0 +1,86 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Recursively create subvolumes
+ block:
+ - name: Create a subvolume named '/recursive/son/grandson'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/son/grandson"
+ recursive: Yes
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Create a subvolume named '/recursive/son/grandson' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/son/grandson"
+ recursive: Yes
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+ - name: Create a subvolume named '/recursive/daughter/granddaughter'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/daughter/granddaughter"
+ recursive: Yes
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Create a subvolume named '/recursive/daughter/granddaughter' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive/daughter/granddaughter"
+ recursive: Yes
+ state: "present"
+ register: result
+ - name: Subvolume named '/recursive/son/grandson' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Recursively remove subvolumes
+ block:
+ - name: Remove subvolume '/recursive' and all descendents
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive"
+ recursive: Yes
+ state: "absent"
+ register: result
+ - name: Subvolume '/recursive' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Remove subvolume '/recursive' and all descendents (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/recursive"
+ recursive: Yes
+ state: "absent"
+ register: result
+ - name: Subvolume '/recursive' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml
new file mode 100644
index 000000000..6cd214e74
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_simple.yml
@@ -0,0 +1,54 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a simple subvolume
+ block:
+ - name: Create a subvolume named 'simple'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "present"
+ register: result
+ - name: Subvolume named 'simple' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Create a subvolume named 'simple' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "present"
+ register: result
+ - name: Subvolume named 'simple' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Remove a simple subvolume
+ block:
+ - name: Remove a subvolume named 'simple'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'simple' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Remove a subvolume named 'simple' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/simple"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'simple' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml
new file mode 100644
index 000000000..6a0147af6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/btrfs_subvolume/tasks/test_subvolume_whitespace.yml
@@ -0,0 +1,62 @@
+---
+# Copyright (c) 2022, Gregory Furlong <gnfzdz@fzdz.io>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a subvolume named 'container'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container"
+ state: "present"
+
+- name: Create a subvolume with whitespace in the name
+ block:
+ - name: Create a subvolume named 'container/my data'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "present"
+ register: result
+ - name: Subvolume named 'container/my data' created
+ ansible.builtin.assert:
+ that:
+ - result is changed
+ - name: Create a subvolume named 'container/my data' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "present"
+ register: result
+ - name: Subvolume named 'container/my data' created (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
+
+- name: Remove a subvolume with whitespace in the name
+ block:
+ - name: Remove a subvolume named 'container/my data'
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'container/my data' removed
+ ansible.builtin.assert:
+ that:
+ - result is changed
+
+ - name: Remove a subvolume named 'container/my data' (idempotency)
+ community.general.btrfs_subvolume:
+ automount: Yes
+ filesystem_label: "{{ btrfs_subvolume_target_label }}"
+ name: "/container/my data"
+ state: "absent"
+ register: result
+ - name: Subvolume named 'container/my data' removed (idempotency)
+ ansible.builtin.assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml b/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml
new file mode 100644
index 000000000..8e1a47e9a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback/inventory.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+all:
+ hosts:
+ testhost:
+ ansible_connection: local
diff --git a/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml
new file mode 100644
index 000000000..827217a53
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback/tasks/main.yml
@@ -0,0 +1,100 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Create temporary playbook files
+ tempfile:
+ state: file
+ suffix: temp
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ label: "{{ test.name }}"
+ register: temporary_playbook_files
+
+ - name: Set temporary playbook file content
+ copy:
+ content: "{{ test.playbook }}"
+ dest: "{{ temporary_playbook_files.results[test_idx].path }}"
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ index_var: test_idx
+ label: "{{ test.name }}"
+
+ - name: Collect outputs
+ command: "ansible-playbook -i {{ inventory }} {{ playbook }}"
+ environment: "{{ test.environment }}"
+ loop: "{{ tests }}"
+ loop_control:
+ loop_var: test
+ label: "{{ test.name }}"
+ register: outputs
+ changed_when: false
+ vars:
+ inventory: "{{ role_path }}/inventory.yml"
+ playbook: "
+ {%- for result in temporary_playbook_files.results -%}
+ {%- if result.test.name == test.name -%}
+ {{- result.path -}}
+ {%- endif -%}
+ {%- endfor -%}"
+
+ - name: Assert test output equals expected output
+ assert:
+ that: result.output.differences | length == 0
+ loop: "{{ results }}"
+ loop_control:
+ loop_var: result
+ label: "{{ result.name }}"
+ register: assertions
+ vars:
+ results: >-
+ {%- set results = [] -%}
+ {%- for result in outputs.results -%}
+ {%- set differences = [] -%}
+ {%- for i in range([result.test.expected_output | count, result.stdout_lines | count] | max) -%}
+ {%- set line = "line_%s" | format(i+1) -%}
+ {%- set test_line = result.stdout_lines[i] | default(none) -%}
+ {%- set expected_lines = result.test.expected_output[i] | default(none) -%}
+ {%- if expected_lines is not string and expected_lines is not none -%}
+ {%- if test_line not in expected_lines -%}
+ {{- differences.append({
+ line: {
+ 'expected_one_of': expected_lines,
+ 'got': test_line }}) -}}
+ {%- endif -%}
+ {%- else -%}
+ {%- if expected_lines != test_line -%}
+ {{- differences.append({
+ line: {
+ 'expected': expected_lines,
+ 'got': test_line }}) -}}
+ {%- endif -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {{- results.append({
+ 'name': result.test.name,
+ 'output': {
+ 'differences': differences,
+ 'expected': result.test.expected_output,
+ 'got': result.stdout_lines }}) -}}
+ {%- endfor -%}
+ {{- results -}}
+
+ always:
+ - name: Remove temporary playbooks
+ file:
+ path: "{{ temporary_file.path }}"
+ state: absent
+ loop: "{{ temporary_playbook_files.results }}"
+ loop_control:
+ loop_var: temporary_file
+ label: "{{ temporary_file.test.name }}: {{ temporary_file.path }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases b/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases
new file mode 100644
index 000000000..3e2dd244c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_diy/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+needs/target/callback
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml
new file mode 100644
index 000000000..fa468b52b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_diy/tasks/main.yml
@@ -0,0 +1,462 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Run tests
+ include_role:
+ name: callback
+ vars:
+ tests:
+ - name: Not using diy callback options
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_start_msg callback using environment variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG: "Sample output Sample playbook message"
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "Sample output Sample playbook message",
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_play_start_msg callback using play variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - name: Sample play name
+ hosts: testhost
+ gather_facts: false
+ vars:
+ ansible_callback_diy_playbook_on_play_start_msg: Sample output {{ ansible_callback_diy.play.name }}
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "Sample output Sample play name",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_task_start_msg callback using play variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: Sample output {{ ansible_callback_diy.task.name }}
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "Sample output Sample task name",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_task_start_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: Sample output {{ ansible_callback_diy.task.name }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "Sample output Sample task name",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_on_ok_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output sample debug msg",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_on_failed_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ failed_when: true
+ ignore_errors: true
+ vars:
+ ansible_callback_diy_runner_on_failed_msg: Sample output Sample failure message
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output Sample failure message",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
+ ]
+
+ - name: Set runner_on_skipped_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ when: false
+ vars:
+ ansible_callback_diy_runner_on_skipped_msg: Sample output Skipped {{ ansible_callback_diy.task.name }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output Skipped Sample task name",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=0 changed=0 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_item_on_ok_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg {{ item }}
+ loop:
+ - sample item 1
+ - sample item 2
+ - sample item 3
+ vars:
+ ansible_callback_diy_runner_item_on_ok_msg: Sample output Looping {{ ansible_callback_diy.result.output.msg }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output Looping sample debug msg sample item 1",
+ "Sample output Looping sample debug msg sample item 2",
+ "Sample output Looping sample debug msg sample item 3",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_item_on_failed_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg {{ item }}
+ loop:
+ - sample item 1
+ - sample item 2
+ - sample item 3
+ failed_when: item == 'sample item 2'
+ ignore_errors: true
+ vars:
+ ansible_callback_diy_runner_item_on_failed_msg: Sample output Looping sample failure message
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => (item=sample item 1) => {",
+ " \"msg\": \"sample debug msg sample item 1\"",
+ "}",
+ "Sample output Looping sample failure message",
+ "ok: [testhost] => (item=sample item 3) => {",
+ " \"msg\": \"sample debug msg sample item 3\"",
+ "}",
+ [
+ # Apparently a bug was fixed in Ansible, as before it ran through with "All items completed"
+ "fatal: [testhost]: FAILED! => {\"msg\": \"All items completed\"}",
+ "fatal: [testhost]: FAILED! => {\"msg\": \"One or more items failed\"}",
+ ],
+ "...ignoring",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=1 "
+ ]
+
+ - name: Set runner_item_on_skipped_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg {{ item }}
+ loop:
+ - sample item 1
+ - sample item 2
+ - sample item 3
+ when: item != 'sample item 2'
+ vars:
+ ansible_callback_diy_runner_item_on_skipped_msg: Sample output Looping Skipped {{ ansible_callback_diy.result.output.item }}
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => (item=sample item 1) => {",
+ " \"msg\": \"sample debug msg sample item 1\"",
+ "}",
+ "Sample output Looping Skipped sample item 2",
+ "ok: [testhost] => (item=sample item 3) => {",
+ " \"msg\": \"sample debug msg sample item 3\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set playbook_on_stats_msg callback using play variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ vars:
+ ansible_callback_diy_playbook_on_stats_msg: |+2
+ Sample output stats
+ ===============================
+ {% for key in ansible_callback_diy.stats | sort %}
+ {% set color_one = "" %}
+ {% set color_two = "" %}
+ {% if ansible_callback_diy.stats[key] %}
+ {% if key == 'ok' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% elif key == 'changed' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% elif key == 'processed' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% elif key == 'skipped' %}
+ {% set prefix = ' ' %}
+ {% set suffix = ' ' %}
+ {% else %}
+ {% set prefix = "" %}
+ {% set suffix = "" %}
+ {% endif %}
+ {{ color_one }}{{ "%s%s%s" | format(prefix,key,suffix) }}{{ color_two }}: {{ ansible_callback_diy.stats[key] | to_nice_yaml }}
+ {% endif %}
+ {% endfor %}
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ " Sample output stats",
+ "===============================",
+ " ok : testhost: 1",
+ "",
+ " processed : testhost: 1"
+ ]
+
+ - name: Suppress output on playbook_on_task_start_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_playbook_on_task_start_msg: ''
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "ok: [testhost] => {",
+ " \"msg\": \"sample debug msg\"",
+ "}",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Suppress output on runner_on_ok_msg callback using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_runner_on_ok_msg: ''
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+
+ - name: Set runner_on_ok_msg_color using task variable
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.diy
+ playbook: !unsafe |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ vars:
+ ansible_callback_diy_runner_on_ok_msg: Sample output {{ ansible_callback_diy.result.output.msg }}
+ ansible_callback_diy_runner_on_ok_msg_color: blue
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "Sample output sample debug msg",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases b/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases
new file mode 100644
index 000000000..343f119da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_log_plays/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml b/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml
new file mode 100644
index 000000000..24f35f899
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_log_plays/ping_log.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - ping:
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh b/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh
new file mode 100755
index 000000000..88eea1626
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_log_plays/runme.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# ANSIBLE_CALLBACK_WHITELIST has been deprecated in ansible-base 2.11, ANSIBLE_CALLBACKS_ENABLED should be used
+export ANSIBLE_CALLBACK_WHITELIST="community.general.log_plays,${ANSIBLE_CALLBACK_WHITELIST:-}"
+export ANSIBLE_CALLBACKS_ENABLED="community.general.log_plays,${ANSIBLE_CALLBACKS_ENABLED:-}"
+
+# run play, should create log and dir if needed
+export ANSIBLE_LOG_FOLDER="logit"
+ansible-playbook ping_log.yml -v "$@"
+[[ -f "${ANSIBLE_LOG_FOLDER}/localhost" ]]
+
+# now force it to fail
+export ANSIBLE_LOG_FOLDER="logit.file"
+touch "${ANSIBLE_LOG_FOLDER}"
+ansible-playbook ping_log.yml -v "$@" 2>&1| grep 'Failure using method (v2_runner_on_ok) in callback plugin'
+[[ ! -f "${ANSIBLE_LOG_FOLDER}/localhost" ]]
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases b/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases
new file mode 100644
index 000000000..a27cf0e26
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_yaml/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/target/callback
diff --git a/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml
new file mode 100644
index 000000000..f3c36663d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/callback_yaml/tasks/main.yml
@@ -0,0 +1,101 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Run tests
+ include_role:
+ name: callback
+ vars:
+ tests:
+ - name: Basic run
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.yaml
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Sample task name
+ debug:
+ msg: sample debug msg
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Sample task name] ********************************************************",
+ "ok: [testhost] => ",
+ " msg: sample debug msg",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+ - name: Test umlauts in multiline
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.yaml
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ tasks:
+ - name: Umlaut output
+ debug:
+ msg: "äöü\néêè\nßï☺"
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Umlaut output] ***********************************************************",
+ "ok: [testhost] => ",
+ " msg: |-",
+ " äöü",
+ " éêè",
+ " ßï☺",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
+ - name: Test to_yaml
+ environment:
+ ANSIBLE_NOCOLOR: 'true'
+ ANSIBLE_FORCE_COLOR: 'false'
+ ANSIBLE_STDOUT_CALLBACK: community.general.yaml
+ playbook: |
+ - hosts: testhost
+ gather_facts: false
+ vars:
+ data: |
+ line 1
+ line 2
+ line 3
+ tasks:
+ - name: Test to_yaml
+ debug:
+ msg: "{{ '{{' }}'{{ '{{' }}'{{ '}}' }} data | to_yaml {{ '{{' }}'{{ '}}' }}'{{ '}}' }}"
+ # The above should be: msg: "{{ data | to_yaml }}"
+ # Unfortunately, the way Ansible handles templating, we need to do some funny 'escaping' tricks...
+ expected_output: [
+ "",
+ "PLAY [testhost] ****************************************************************",
+ "",
+ "TASK [Test to_yaml] ************************************************************",
+ "ok: [testhost] => ",
+ " msg: |-",
+ " 'line 1",
+ " ",
+ " line 2",
+ " ",
+ " line 3",
+ " ",
+ " '",
+ "",
+ "PLAY RECAP *********************************************************************",
+ "testhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 "
+ ]
diff --git a/ansible_collections/community/general/tests/integration/targets/cargo/aliases b/ansible_collections/community/general/tests/integration/targets/cargo/aliases
new file mode 100644
index 000000000..9c7febe24
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cargo/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/cargo/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/cargo/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cargo/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/cargo/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/main.yml
new file mode 100644
index 000000000..bb22e27c0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup.yml
+- name: Set default environment
+ set_fact:
+ cargo_environment: {}
+- name: Set special environment to work around cargo bugs
+ set_fact:
+ cargo_environment:
+ # See https://github.com/rust-lang/cargo/issues/10230#issuecomment-1201662729:
+ CARGO_NET_GIT_FETCH_WITH_CLI: "true"
+ when: has_cargo | default(false) and ansible_distribution == 'Alpine'
+- block:
+ - import_tasks: test_general.yml
+ - import_tasks: test_version.yml
+ environment: "{{ cargo_environment }}"
+ when: has_cargo | default(false)
diff --git a/ansible_collections/community/general/tests/integration/targets/cargo/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/setup.yml
new file mode 100644
index 000000000..232658ab4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/setup.yml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Install cargo
+ package:
+ name: cargo
+ state: present
+ - set_fact:
+ has_cargo: true
+ when:
+ - ansible_system != 'FreeBSD'
+ - ansible_distribution != 'MacOSX'
+ - ansible_distribution != 'RedHat' or ansible_distribution_version is version('8.0', '>=')
+ - ansible_distribution != 'CentOS' or ansible_distribution_version is version('7.0', '>=')
+ - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('18', '>=')
+
+- block:
+ - name: Install rust (containing cargo)
+ package:
+ name: rust
+ state: present
+ - set_fact:
+ has_cargo: true
+ when:
+ - ansible_system == 'FreeBSD' and ansible_distribution_version is version('13.0', '>')
diff --git a/ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_general.yml b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_general.yml
new file mode 100644
index 000000000..2bffa08f0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_general.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Ensure application helloworld is uninstalled
+ community.general.cargo:
+ state: absent
+ name: helloworld
+ register: uninstall_absent_helloworld
+
+- name: Install application helloworld
+ community.general.cargo:
+ name: helloworld
+ register: install_absent_helloworld
+
+- name: Install application helloworld again
+ community.general.cargo:
+ name: helloworld
+ register: install_present_helloworld
+ ignore_errors: true
+
+- name: Uninstall application helloworld
+ community.general.cargo:
+ state: absent
+ name: helloworld
+ register: uninstall_present_helloworld
+
+- name: Check assertions helloworld
+ assert:
+ that:
+ - uninstall_absent_helloworld is not changed
+ - install_absent_helloworld is changed
+ - install_present_helloworld is not changed
+ - uninstall_present_helloworld is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_version.yml b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_version.yml
new file mode 100644
index 000000000..c1ab8e198
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cargo/tasks/test_version.yml
@@ -0,0 +1,50 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install application helloworld-yliu 0.1.0
+ community.general.cargo:
+ name: helloworld-yliu
+ version: 0.1.0
+ register: install_helloworld_010
+
+- name: Install application helloworld-yliu 0.1.0 (idempotent)
+ community.general.cargo:
+ name: helloworld-yliu
+ version: 0.1.0
+ register: install_helloworld_010_idem
+
+- name: Upgrade helloworld-yliu 0.1.0
+ community.general.cargo:
+ name: helloworld-yliu
+ state: latest
+ register: upgrade_helloworld_010
+
+- name: Upgrade helloworld-yliu 0.1.0 (idempotent)
+ community.general.cargo:
+ name: helloworld-yliu
+ state: latest
+ register: upgrade_helloworld_010_idem
+
+- name: Downgrade helloworld-yliu 0.1.0
+ community.general.cargo:
+ name: helloworld-yliu
+ version: 0.1.0
+ register: downgrade_helloworld_010
+
+- name: Downgrade helloworld-yliu 0.1.0 (idempotent)
+ community.general.cargo:
+ name: helloworld-yliu
+ version: 0.1.0
+ register: downgrade_helloworld_010_idem
+
+- name: Check assertions helloworld-yliu
+ assert:
+ that:
+ - install_helloworld_010 is changed
+ - install_helloworld_010_idem is not changed
+ - upgrade_helloworld_010 is changed
+ - upgrade_helloworld_010_idem is not changed
+ - downgrade_helloworld_010 is changed
+ - downgrade_helloworld_010_idem is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases b/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases
new file mode 100644
index 000000000..bec4d21af
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
new file mode 100644
index 000000000..40e762d68
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cloud_init_data_facts/tasks/main.yml
@@ -0,0 +1,68 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test cloud-init
+ # TODO: check for a workaround
+ # install 'cloud-init'' failed: dpkg-divert: error: `diversion of /etc/init/ureadahead.conf
+ # to /etc/init/ureadahead.conf.disabled by cloud-init' clashes with `local diversion of
+ # /etc/init/ureadahead.conf to /etc/init/ureadahead.conf.distrib
+ # https://bugs.launchpad.net/ubuntu/+source/ureadahead/+bug/997838
+ # Will also have to skip on OpenSUSE when running on Python 2 on newer Leap versions
+ # (!= 42 and >= 15) ascloud-init will install the Python 3 package, breaking our build on py2.
+ when:
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_major_version|int == 14)
+ - not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and ansible_python.version.major != 3)
+ - not (ansible_distribution == "CentOS" and ansible_distribution_major_version|int == 8) # TODO: cannot start service
+ - not (ansible_distribution == 'Archlinux') # TODO: package seems to be broken, cannot be downloaded from mirrors?
+ - not (ansible_distribution == 'Alpine') # TODO: not sure what's wrong here, the module doesn't return what the tests expect
+ block:
+ - name: setup install cloud-init
+ package:
+ name:
+ - cloud-init
+ - udev
+
+ - name: Ensure systemd-network user exists
+ user:
+ name: systemd-network
+ state: present
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version|int >= 37
+
+ - name: setup run cloud-init
+ service:
+ name: cloud-init-local
+ state: restarted
+
+ - name: test gather cloud-init facts in check mode
+ cloud_init_data_facts:
+ check_mode: true
+ register: result
+ - name: verify test gather cloud-init facts in check mode
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
+
+ - name: test gather cloud-init facts
+ cloud_init_data_facts:
+ register: result
+ - name: verify test gather cloud-init facts
+ assert:
+ that:
+ - result.cloud_init_data_facts.status.v1 is defined
+ - result.cloud_init_data_facts.status.v1.stage is defined
+ - not result.cloud_init_data_facts.status.v1.stage
+ - cloud_init_data_facts.status.v1 is defined
+ - cloud_init_data_facts.status.v1.stage is defined
+ - not cloud_init_data_facts.status.v1.stage
diff --git a/ansible_collections/community/general/tests/integration/targets/cmd_runner/aliases b/ansible_collections/community/general/tests/integration/targets/cmd_runner/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cmd_runner/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/cmd_runner/library/cmd_echo.py b/ansible_collections/community/general/tests/integration/targets/cmd_runner/library/cmd_echo.py
new file mode 100644
index 000000000..cd8766264
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cmd_runner/library/cmd_echo.py
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = ""
+
+EXAMPLES = ""
+
+RETURN = ""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ arg_formats=dict(type="dict", default={}),
+ arg_order=dict(type="raw", required=True),
+ arg_values=dict(type="dict", default={}),
+ check_mode_skip=dict(type="bool", default=False),
+ aa=dict(type="raw"),
+ ),
+ supports_check_mode=True,
+ )
+ p = module.params
+
+ info = None
+
+ arg_formats = {}
+ for arg, fmt_spec in p['arg_formats'].items():
+ func = getattr(fmt, fmt_spec['func'])
+ args = fmt_spec.get("args", [])
+
+ arg_formats[arg] = func(*args)
+
+ runner = CmdRunner(module, ['echo', '--'], arg_formats=arg_formats)
+
+ with runner.context(p['arg_order'], check_mode_skip=p['check_mode_skip']) as ctx:
+ result = ctx.run(**p['arg_values'])
+ info = ctx.run_info
+ check = "check"
+ rc, out, err = result if result is not None else (None, None, None)
+
+ module.exit_json(rc=rc, out=out, err=err, info=info)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/main.yml
new file mode 100644
index 000000000..36ab039f0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/main.yml
@@ -0,0 +1,8 @@
+# Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: parameterized test cmd_echo
+ ansible.builtin.include_tasks:
+ file: test_cmd_echo.yml
+ loop: "{{ cmd_echo_tests }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml b/ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml
new file mode 100644
index 000000000..1c2caf2b5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cmd_runner/tasks/test_cmd_echo.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test cmd_echo [{{ item.name }}]
+ cmd_echo:
+ arg_formats: "{{ item.arg_formats|default(omit) }}"
+ arg_order: "{{ item.arg_order }}"
+ arg_values: "{{ item.arg_values|default(omit) }}"
+ check_mode_skip: "{{ item.check_mode_skip|default(omit) }}"
+ aa: "{{ item.aa|default(omit) }}"
+ register: test_result
+ check_mode: "{{ item.check_mode|default(omit) }}"
+ ignore_errors: "{{ item.expect_error|default(omit) }}"
+
+- name: check results [{{ item.name }}]
+ assert:
+ that: "{{ item.assertions }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/cmd_runner/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/cmd_runner/vars/main.yml
new file mode 100644
index 000000000..7f0027d49
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cmd_runner/vars/main.yml
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cmd_echo_tests:
+ - name: set aa and bb value
+ arg_formats:
+ aa:
+ func: as_opt_eq_val
+ args: [--answer]
+ bb:
+ func: as_bool
+ args: [--bb-here]
+ arg_order: 'aa bb'
+ arg_values:
+ bb: true
+ aa: 11
+ assertions:
+ - test_result.rc == 0
+ - test_result.out == "-- --answer=11 --bb-here\n"
+ - test_result.err == ""
+
+ - name: default aa value
+ arg_formats:
+ aa:
+ func: as_opt_eq_val
+ args: [--answer]
+ bb:
+ func: as_bool
+ args: [--bb-here]
+ arg_order: ['aa', 'bb']
+ arg_values:
+ aa: 43
+ bb: true
+ assertions:
+ - test_result.rc == 0
+ - test_result.out == "-- --answer=43 --bb-here\n"
+ - test_result.err == ""
+
+ - name: implicit aa format
+ arg_formats:
+ bb:
+ func: as_bool
+ args: [--bb-here]
+ arg_order: ['aa', 'bb']
+ arg_values:
+ bb: true
+ aa: 1984
+ assertions:
+ - test_result.rc == 0
+ - test_result.out == "-- --aa 1984 --bb-here\n"
+ - test_result.err == ""
+
+ - name: missing bb format
+ arg_order: ['aa', 'bb']
+ arg_values:
+ bb: true
+ aa: 1984
+ expect_error: true
+ assertions:
+ - test_result is failed
+ - test_result.rc == 1
+ - '"out" not in test_result'
+ - '"err" not in test_result'
+ - >-
+ "MissingArgumentFormat: Cannot find format for parameter bb"
+ in test_result.module_stderr
+
+ - name: missing bb value
+ arg_formats:
+ bb:
+ func: as_bool
+ args: [--bb-here]
+ arg_order: 'aa bb'
+ aa: 1984
+ expect_error: true
+ assertions:
+ - test_result is failed
+ - test_result.rc == 1
+ - '"out" not in test_result'
+ - '"err" not in test_result'
+ - >-
+ "MissingArgumentValue: Cannot find value for parameter bb"
+ in test_result.module_stderr
+
+ - name: set aa and bb value with check_mode on
+ arg_formats:
+ aa:
+ func: as_opt_eq_val
+ args: [--answer]
+ bb:
+ func: as_bool
+ args: [--bb-here]
+ arg_order: 'aa bb'
+ arg_values:
+ bb: true
+ aa: 11
+ check_mode: true
+ assertions:
+ - test_result.rc == 0
+ - test_result.out == "-- --answer=11 --bb-here\n"
+ - test_result.err == ""
+
+ - name: set aa and bb value with check_mode and check_mode_skip on
+ arg_formats:
+ aa:
+ func: as_opt_eq_val
+ args: [--answer]
+ bb:
+ func: as_bool
+ args: [--bb-here]
+ arg_order: 'aa bb'
+ arg_values:
+ bb: true
+ check_mode_skip: true
+ aa: 11
+ check_mode: true
+ expect_error: true # because if result contains rc != 0, ansible assumes error
+ assertions:
+ - test_result.rc == None
+ - test_result.out == None
+ - test_result.err == None
diff --git a/ansible_collections/community/general/tests/integration/targets/connection/aliases b/ansible_collections/community/general/tests/integration/targets/connection/aliases
new file mode 100644
index 000000000..a02a2d61a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+hidden
diff --git a/ansible_collections/community/general/tests/integration/targets/connection/test.sh b/ansible_collections/community/general/tests/integration/targets/connection/test.sh
new file mode 100755
index 000000000..793a85dd3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection/test.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+[ -f "${INVENTORY}" ]
+
+# Run connection tests with both the default and C locale.
+
+ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+
+if ansible --version | grep ansible | grep -E ' 2\.(9|10|11|12|13)\.'; then
+ LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+fi
diff --git a/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml b/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml
new file mode 100644
index 000000000..bb0a99399
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection/test_connection.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: "{{ target_hosts }}"
+ gather_facts: false
+ serial: 1
+ tasks:
+
+ ### raw with unicode arg and output
+
+ - name: raw with unicode arg and output
+ raw: echo 汉语
+ register: command
+ - name: check output of raw with unicode arg and output
+ assert:
+ that:
+ - "'汉语' in command.stdout"
+ - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
+
+ ### copy local file with unicode filename and content
+
+ - name: create local file with unicode filename and content
+ local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
+ - name: remove remote file with unicode filename and content
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
+ - name: create remote directory with unicode name
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
+ - name: copy local file with unicode filename and content
+ action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
+
+ ### fetch remote file with unicode filename and content
+
+ - name: remove local file with unicode filename and content
+ local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
+ - name: fetch remote file with unicode filename and content
+ fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
+
+ ### remove local and remote temp files
+
+ - name: remove local temp file
+ local_action: file path={{ local_tmp }}-汉语 state=absent
+ - name: remove remote temp file
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
+
+ ### test wait_for_connection plugin
+ - ansible.builtin.wait_for_connection:
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases b/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases
new file mode 100644
index 000000000..38138ee4d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_chroot/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+needs/root
+skip/macos # Skipped due to limitation of macOS 10.15 SIP, please read https://github.com/ansible-collections/community.general/issues/1017#issuecomment-755088895
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh b/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh
new file mode 100755
index 000000000..9f31da64d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_chroot/runme.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory b/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory
new file mode 100644
index 000000000..126b29c8a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_chroot/test_connection.inventory
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[chroot]
+chroot-pipelining ansible_ssh_pipelining=true
+chroot-no-pipelining ansible_ssh_pipelining=false
+[chroot:vars]
+ansible_host=/
+ansible_connection=community.general.chroot
+ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases b/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_jail/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh b/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh
new file mode 100755
index 000000000..9f31da64d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_jail/runme.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory b/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory
new file mode 100644
index 000000000..995c32444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_jail/test_connection.inventory
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[jail]
+jail-pipelining ansible_ssh_pipelining=true
+jail-no-pipelining ansible_ssh_pipelining=false
+[jail:vars]
+ansible_host=freebsd_10_2
+ansible_connection=community.general.jail
+ansible_python_interpreter=/usr/local/bin/python
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases b/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_lxc/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh b/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh
new file mode 100755
index 000000000..9f31da64d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_lxc/runme.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory b/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory
new file mode 100644
index 000000000..cfcd7a32f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_lxc/test_connection.inventory
@@ -0,0 +1,21 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[lxc]
+lxc-pipelining ansible_ssh_pipelining=true
+lxc-no-pipelining ansible_ssh_pipelining=false
+[lxc:vars]
+# 1. install lxc
+# 2. install python2-lxc
+# $ pip install git+https://github.com/lxc/python2-lxc.git
+# 3. create container:
+# $ sudo lxc-create -t download -n centos-7-amd64 -- -d centos -r 7 -a amd64
+# 4. start container:
+# $ sudo lxc-start -n centos-7-amd64 -d
+# 5. run test:
+# $ sudo -E make test_connection_lxc
+# 6. stop container
+# $ sudo lxc-stop -n centos-7-amd64
+ansible_host=centos-7-amd64
+ansible_connection=community.general.lxc
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases b/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases
new file mode 100644
index 000000000..5a0c47032
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_lxd/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+non_local
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh b/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh
new file mode 100755
index 000000000..9f31da64d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_lxd/runme.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory b/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory
new file mode 100644
index 000000000..d2d2c10e3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_lxd/test_connection.inventory
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[lxd]
+lxd-pipelining ansible_ssh_pipelining=true
+lxd-no-pipelining ansible_ssh_pipelining=false
+[lxd:vars]
+ansible_host=centos-7-amd64
+ansible_connection=community.general.lxd
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases b/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases
new file mode 100644
index 000000000..44561e2ff
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_posix/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/target/connection
+hidden
diff --git a/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh b/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh
new file mode 100755
index 000000000..9f31da64d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/connection_posix/test.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+group=$(python -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/consul/aliases b/ansible_collections/community/general/tests/integration/targets/consul/aliases
new file mode 100644
index 000000000..d9cf1eb9c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/consul/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/aix
+skip/macos # cannot simply create binaries in system locations on newer macOS versions
diff --git a/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml
new file mode 100644
index 000000000..0909be206
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/consul/meta/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_openssl
+ - setup_remote_tmp_dir
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml b/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml
new file mode 100644
index 000000000..543668964
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/consul/tasks/consul_session.yml
@@ -0,0 +1,177 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: list sessions
+ consul_session:
+ state: list
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - "'sessions' in result"
+
+- name: create a session
+ consul_session:
+ state: present
+ name: testsession
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result['name'] == 'testsession'
+ - "'session_id' in result"
+
+- set_fact:
+ session_id: "{{ result['session_id'] }}"
+
+- name: list sessions after creation
+ consul_session:
+ state: list
+ register: result
+
+- set_fact:
+ session_count: "{{ result['sessions'] | length }}"
+
+- assert:
+ that:
+ - result is changed
+ # selectattr not available on Jinja 2.2 provided by CentOS 6
+ # hence the two following tasks (set_fact/assert) are used
+ # - (result['sessions'] | selectattr('ID', 'match', '^' ~ session_id ~ '$') | first)['Name'] == 'testsession'
+
+- name: search created session
+ set_fact:
+ test_session_found: true
+ loop: "{{ result['sessions'] }}"
+ when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
+
+- name: ensure session was created
+ assert:
+ that:
+ - test_session_found|default(False)
+
+- name: fetch info about a session
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: ensure 'id' parameter is required when state=info
+ consul_session:
+ state: info
+ name: test
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+
+- name: ensure unknown scheme fails
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ scheme: non_existent
+ register: result
+ ignore_errors: true
+
+- assert:
+ that:
+ - result is failed
+
+- name: ensure SSL certificate is checked
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ register: result
+ ignore_errors: true
+
+- name: previous task should fail since certificate is not known
+ assert:
+ that:
+ - result is failed
+ - "'certificate verify failed' in result.msg"
+
+- name: ensure SSL certificate isn't checked when validate_certs is disabled
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ validate_certs: false
+ register: result
+
+- name: previous task should succeed since certificate isn't checked
+ assert:
+ that:
+ - result is changed
+
+- name: ensure a secure connection is possible
+ consul_session:
+ state: info
+ id: '{{ session_id }}'
+ port: 8501
+ scheme: https
+ environment:
+ REQUESTS_CA_BUNDLE: '{{ remote_dir }}/cert.pem'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: delete a session
+ consul_session:
+ state: absent
+ id: '{{ session_id }}'
+ register: result
+
+- assert:
+ that:
+ - result is changed
+
+- name: list sessions after deletion
+ consul_session:
+ state: list
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ # selectattr and equalto not available on Jinja 2.2 provided by CentOS 6
+ # hence the two following tasks (command/assert) are used
+ # - (result['sessions'] | selectattr('ID', 'equalto', session_id) | list | length) == 0
+
+- name: search deleted session
+ command: echo 'session found'
+ loop: "{{ result['sessions'] }}"
+ when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
+ register: search_deleted
+
+- name: ensure session was deleted
+ assert:
+ that:
+ - search_deleted is skipped # each iteration is skipped
+ - search_deleted is not changed # and then unchanged
+
+- name: ensure session can be created with a ttl
+ consul_session:
+ state: present
+ name: session-with-ttl
+ ttl: 180 # sec
+ register: result
+
+- assert:
+ that:
+ - result is changed
+ - result['ttl'] == 180
diff --git a/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml
new file mode 100644
index 000000000..a2b63ac95
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/consul/tasks/main.yml
@@ -0,0 +1,89 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install Consul and test
+ vars:
+ consul_version: 1.5.0
+ consul_uri: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/consul/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip
+ consul_cmd: '{{ remote_tmp_dir }}/consul'
+ block:
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+ - name: Install python-consul
+ pip:
+ name: python-consul
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/privatekey.pem'
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+ - name: Generate selfsigned certificate
+ register: selfsigned_certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/cert.pem'
+ csr_path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+ - name: Install unzip
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX"
+ - assert:
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+ - set_fact:
+ consul_arch: '386'
+ when: ansible_architecture == 'i386'
+ - set_fact:
+ consul_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+ - name: Download consul binary
+ unarchive:
+ src: '{{ consul_uri }}'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
+ - vars:
+ remote_dir: '{{ echo_remote_tmp_dir.stdout }}'
+ block:
+ - command: echo {{ remote_tmp_dir }}
+ register: echo_remote_tmp_dir
+ - name: Create configuration file
+ template:
+ src: consul_config.hcl.j2
+ dest: '{{ remote_tmp_dir }}/consul_config.hcl'
+ - name: Start Consul (dev mode enabled)
+ shell: nohup {{ consul_cmd }} agent -dev -config-file {{ remote_tmp_dir }}/consul_config.hcl </dev/null >/dev/null 2>&1 &
+ - name: Create some data
+ command: '{{ consul_cmd }} kv put data/value{{ item }} foo{{ item }}'
+ loop:
+ - 1
+ - 2
+ - 3
+ - import_tasks: consul_session.yml
+ always:
+ - name: Kill consul process
+ shell: kill $(cat {{ remote_tmp_dir }}/consul.pid)
+ ignore_errors: true
diff --git a/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j2 b/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j2
new file mode 100644
index 000000000..96da5d664
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/consul/templates/consul_config.hcl.j2
@@ -0,0 +1,14 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+# {{ ansible_managed }}
+server = true
+pid_file = "{{ remote_dir }}/consul.pid"
+ports {
+ http = 8500
+ https = 8501
+}
+key_file = "{{ remote_dir }}/privatekey.pem"
+cert_file = "{{ remote_dir }}/cert.pem"
diff --git a/ansible_collections/community/general/tests/integration/targets/copr/aliases b/ansible_collections/community/general/tests/integration/targets/copr/aliases
new file mode 100644
index 000000000..ed3c1af00
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/copr/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/root
+skip/macos
+skip/osx
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/copr/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/copr/tasks/main.yml
new file mode 100644
index 000000000..0e4651724
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/copr/tasks/main.yml
@@ -0,0 +1,160 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when:
+ # Fedora or RHEL >= 8
+ # This module requires the dnf module which is not available on RHEL 7.
+ - >
+ ansible_distribution == 'Fedora'
+ or (ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
+ and ansible_distribution_major_version | int >= 8)
+ # The copr module imports dnf which is only available for the system Python
+ # interpreter.
+ - >
+ not (ansible_distribution == 'CentOS' and
+ ansible_distribution_major_version | int == 8 and not
+ ansible_python_version.startswith('3.6'))
+ block:
+ - debug: var=copr_chroot
+ - name: enable copr project
+ copr:
+ host: copr.fedorainfracloud.org
+ state: enabled
+ name: '{{ copr_fullname }}'
+ chroot: "{{ copr_chroot }}"
+ register: result
+
+ - name: assert that the copr project was enabled
+ assert:
+ that:
+ - 'result is changed'
+ - result.msg == 'enabled'
+ - result.info == 'Please note that this repository is not part of the main distribution'
+
+ - name: enable copr project
+ check_mode: true
+ copr:
+ state: enabled
+ name: '{{ copr_fullname }}'
+ chroot: '{{ copr_chroot }}'
+ register: result
+
+ - name: assert that the copr project was enabled
+ assert:
+ that:
+ - result is not changed
+ - result.msg == 'enabled'
+
+ - name: Ensure the repo is installed and enabled | slurp
+ register: result
+ ansible.builtin.slurp:
+ src: "{{ copr_repofile }}"
+
+ - name: Ensure the repo is installed and enabled
+ vars:
+ content: "{{ result.content | b64decode }}"
+ _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}"
+ baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}"
+ block:
+ - ansible.builtin.debug:
+ var: content
+ - ansible.builtin.debug:
+ var: baseurl
+ - name: Ensure the repo is installed and enabled
+ ansible.builtin.assert:
+ that:
+ - "'enabled=1' in content"
+ - baseurl | length > 0
+
+ - name: Install test package from Copr
+ when:
+ # Copr does not build new packages for EOL Fedoras.
+ - >
+ not (ansible_distribution == 'Fedora' and
+ ansible_distribution_major_version | int < 35)
+ block:
+ - name: install test package from the copr
+ ansible.builtin.package:
+ update_cache: true
+ name: copr-module-integration-dummy-package
+
+ - name: uninstall test package
+ register: result
+ ansible.builtin.package:
+ name: copr-module-integration-dummy-package
+ state: absent
+
+ - name: check uninstall test package
+ ansible.builtin.assert:
+ that: result.changed | bool
+
+ - name: remove copr project
+ copr:
+ state: absent
+ name: '{{ copr_fullname }}'
+ register: result
+
+ - name: assert that the copr project was removed
+ assert:
+ that:
+ - 'result is changed'
+ - result.msg == 'absent'
+
+ - name: Ensure the repo file was removed | stat
+ register: result
+ ansible.builtin.stat:
+ dest: "{{ copr_repofile }}"
+
+ - name: Ensure the repo file was removed
+ ansible.builtin.assert:
+ that: not result.stat.exists | bool
+
+ - name: disable copr project
+ copr:
+ state: disabled
+ name: '{{ copr_fullname }}'
+ chroot: '{{ copr_chroot }}'
+ register: result
+
+ - name: assert that the copr project was disabled
+ assert:
+ that:
+ - 'result is changed'
+ - result.msg == 'disabled'
+
+ - name: Ensure the repo is installed but disabled | slurp
+ register: result
+ ansible.builtin.slurp:
+ src: "{{ copr_repofile }}"
+
+ - name: Ensure the repo is installed but disabled
+ vars:
+ content: "{{ result.content | b64decode }}"
+ _baseurl: "{{ 'https://download.copr.fedorainfracloud.org/results/gotmax23/community.general.copr_integration_tests' | regex_escape }}"
+ baseurl: "{{ content | regex_search('baseurl=' ~ _baseurl) }}"
+ block:
+ - ansible.builtin.debug:
+ var: content
+ - ansible.builtin.debug:
+ var: baseurl
+ - name: Ensure the repo is installed but disabled
+ ansible.builtin.assert:
+ that:
+ - "'enabled=0' in content"
+ - baseurl | length > 0
+
+ always:
+ - name: clean up
+ ignore_errors: true
+ copr:
+ host: copr.fedorainfracloud.org
+ state: absent
+ name: '{{ copr_fullname }}'
+ chroot: '{{ copr_chroot }}'
+
+ - name: cleanup test package
+ ansible.builtin.package:
+ name: copr-module-integration-dummy-package
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/copr/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/copr/vars/main.yml
new file mode 100644
index 000000000..a37a44d47
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/copr/vars/main.yml
@@ -0,0 +1,15 @@
+# Copyright (c) 2022 Maxwell G <gotmax@e.email>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+---
+copr_host: copr.fedorainfracloud.org
+copr_namespace: gotmax23
+copr_name: community.general.copr_integration_tests
+copr_fullname: '{{ copr_namespace }}/{{ copr_name }}'
+copr_repofile: '/etc/yum.repos.d/_copr:{{ copr_host }}:{{ copr_namespace }}:{{ copr_name }}.repo'
+
+# TODO: Fix chroot autodetection so this isn't necessary
+_copr_chroot_fedora: "fedora-rawhide-x86_64"
+_copr_chroot_rhelish: "epel-{{ ansible_distribution_major_version }}-x86_64"
+copr_chroot: "{{ _copr_chroot_fedora if ansible_distribution == 'Fedora' else _copr_chroot_rhelish }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/cpanm/aliases b/ansible_collections/community/general/tests/integration/targets/cpanm/aliases
new file mode 100644
index 000000000..d30ba06b6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cpanm/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/macos
+skip/osx
+skip/freebsd
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/cpanm/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/cpanm/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cpanm/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/cpanm/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/cpanm/tasks/main.yml
new file mode 100644
index 000000000..c9adc1ca6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cpanm/tasks/main.yml
@@ -0,0 +1,65 @@
+# Copyright (c) 2020, Berkhan Berkdemir
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: bail out for non-supported platforms
+ meta: end_play
+ when:
+ - (ansible_os_family != "RedHat" or ansible_distribution_major_version|int < 7)
+ - ansible_os_family != "Debian"
+
+- name: install perl development package for Red Hat family
+ package:
+ name:
+ - perl-devel
+ - perl-App-cpanminus
+ state: present
+ become: true
+ when: ansible_os_family == "RedHat"
+
+- name: install perl development package for Debian family
+ package:
+ name:
+ - cpanminus
+ state: present
+ become: true
+ when: ansible_os_family == "Debian"
+
+- name: install a Perl package
+ cpanm:
+ name: JSON
+ notest: true
+ register: install_perl_package_result
+
+- name: assert package is installed
+ assert:
+ that:
+ - install_perl_package_result is changed
+ - install_perl_package_result is not failed
+
+- name: install same Perl package
+ cpanm:
+ name: JSON
+ notest: true
+ register: install_same_perl_package_result
+
+- name: assert same package is installed
+ assert:
+ that:
+ - install_same_perl_package_result is not changed
+ - install_same_perl_package_result is not failed
+
+- name: install a Perl package with version operator
+ cpanm:
+ name: JSON
+ version: "@4.01"
+ notest: true
+ mode: new
+ register: install_perl_package_with_version_op_result
+
+- name: assert package with version operator is installed
+ assert:
+ that:
+ - install_perl_package_with_version_op_result is changed
+ - install_perl_package_with_version_op_result is not failed
diff --git a/ansible_collections/community/general/tests/integration/targets/cronvar/aliases b/ansible_collections/community/general/tests/integration/targets/cronvar/aliases
new file mode 100644
index 000000000..e9ef7265d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cronvar/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml
new file mode 100644
index 000000000..11ef47d9d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cronvar/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_config_path: /etc/cron.d
diff --git a/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml
new file mode 100644
index 000000000..92d116f2a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cronvar/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_cron
diff --git a/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml
new file mode 100644
index 000000000..73ec41abc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/cronvar/tasks/main.yml
@@ -0,0 +1,124 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Ensure /etc/cron.d directory exists
+ file:
+ path: /etc/cron.d
+ state: directory
+
+- name: Create EMAIL cron var
+ cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+ register: create_cronvar1
+
+- name: Create EMAIL cron var again
+ cronvar:
+ name: EMAIL
+ value: doug@ansibmod.con.com
+ register: create_cronvar2
+
+- name: Check cron var value
+ shell: crontab -l -u root | grep -c EMAIL=doug@ansibmod.con.com
+ register: varcheck1
+
+- name: Modify EMAIL cron var
+ cronvar:
+ name: EMAIL
+ value: jane@ansibmod.con.com
+ register: create_cronvar3
+
+- name: Check cron var value again
+ shell: crontab -l -u root | grep -c EMAIL=jane@ansibmod.con.com
+ register: varcheck2
+
+- name: Remove EMAIL cron var
+ cronvar:
+ name: EMAIL
+ state: absent
+ register: remove_cronvar1
+
+- name: Remove EMAIL cron var again
+ cronvar:
+ name: EMAIL
+ state: absent
+ register: remove_cronvar2
+
+- name: Check cron var value again
+ shell: crontab -l -u root | grep -c EMAIL
+ register: varcheck3
+ failed_when: varcheck3.rc == 0
+
+- name: Add cron var to custom file
+ cronvar:
+ name: TESTVAR
+ value: somevalue
+ cron_file: cronvar_test
+ register: custom_cronfile1
+
+- name: Add cron var to custom file again
+ cronvar:
+ name: TESTVAR
+ value: somevalue
+ cron_file: cronvar_test
+ register: custom_cronfile2
+
+- name: Check cron var value in custom file
+ command: grep -c TESTVAR=somevalue {{ cron_config_path }}/cronvar_test
+ register: custom_varcheck1
+
+- name: Change cron var in custom file
+ cronvar:
+ name: TESTVAR
+ value: newvalue
+ cron_file: cronvar_test
+ register: custom_cronfile3
+
+- name: Check cron var value in custom file
+ command: grep -c TESTVAR=newvalue {{ cron_config_path }}/cronvar_test
+ register: custom_varcheck2
+
+- name: Remove cron var from custom file
+ cronvar:
+ name: TESTVAR
+ value: newvalue
+ cron_file: cronvar_test
+ state: absent
+ register: custom_remove_cronvar1
+
+- name: Remove cron var from custom file again
+ cronvar:
+ name: TESTVAR
+ value: newvalue
+ cron_file: cronvar_test
+ state: absent
+ register: custom_remove_cronvar2
+
+- name: Check cron var value
+ command: grep -c TESTVAR=newvalue {{ cron_config_path }}/cronvar_test
+ register: custom_varcheck3
+ failed_when: custom_varcheck3.rc == 0
+
+- name: Ensure cronvar tasks did the right thing
+ assert:
+ that:
+ - create_cronvar1 is changed
+ - create_cronvar2 is not changed
+ - create_cronvar3 is changed
+ - remove_cronvar1 is changed
+ - remove_cronvar2 is not changed
+ - varcheck1.stdout == '1'
+ - varcheck2.stdout == '1'
+ - varcheck3.stdout == '0'
+ - custom_remove_cronvar1 is changed
+ - custom_remove_cronvar2 is not changed
+ - custom_varcheck1.stdout == '1'
+ - custom_varcheck2.stdout == '1'
+ - custom_varcheck3.stdout == '0'
diff --git a/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases b/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases
new file mode 100644
index 000000000..afda346c4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/deploy_helper/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
diff --git a/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/deploy_helper/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml
new file mode 100644
index 000000000..fdd8bd87b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/deploy_helper/tasks/main.yml
@@ -0,0 +1,158 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: record the output directory
+ set_fact: deploy_helper_test_root={{remote_tmp_dir}}/deploy_helper_test_root
+
+- name: State=query with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=query
+- name: Assert State=query with default parameters
+ assert:
+ that:
+ - "'project_path' in deploy_helper"
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/current'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/releases'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/shared'"
+ - "deploy_helper.unfinished_filename == 'DEPLOY_UNFINISHED'"
+ - "'previous_release' in deploy_helper"
+ - "'previous_release_path' in deploy_helper"
+ - "'new_release' in deploy_helper"
+ - "'new_release_path' in deploy_helper"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release }}'"
+
+- name: State=query with relative overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=CURRENT_PATH releases_path=RELEASES_PATH shared_path=SHARED_PATH state=query
+- name: Assert State=query with relative overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '{{ deploy_helper.project_path }}/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '{{ deploy_helper.project_path }}/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '{{ deploy_helper.project_path }}/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with absolute overridden paths
+ deploy_helper: path={{ deploy_helper_test_root }} current_path=/CURRENT_PATH releases_path=/RELEASES_PATH shared_path=/SHARED_PATH state=query
+- name: Assert State=query with absolute overridden paths
+ assert:
+ that:
+ - "deploy_helper.current_path == '/CURRENT_PATH'"
+ - "deploy_helper.releases_path == '/RELEASES_PATH'"
+ - "deploy_helper.shared_path == '/SHARED_PATH'"
+ - "deploy_helper.new_release_path == '{{ deploy_helper.releases_path }}/{{ deploy_helper.new_release}}'"
+
+- name: State=query with overridden unfinished_filename
+ deploy_helper: path={{ deploy_helper_test_root }} unfinished_filename=UNFINISHED_DEPLOY state=query
+- name: Assert State=query with overridden unfinished_filename
+ assert:
+ that:
+ - "'UNFINISHED_DEPLOY' == deploy_helper.unfinished_filename"
+
+# Remove the root folder just in case it exists
+- file: path={{ deploy_helper_test_root }} state=absent
+
+- name: State=present with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=present
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with default parameters
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- name: State=finalize with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=finalize
+- stat: path={{ deploy_helper.current_path }}
+ register: current_path
+- stat: path={{ deploy_helper.current_path }}/DEPLOY_UNFINISHED
+ register: current_path_unfinished_filename
+- name: Assert State=finalize with default parameters
+ assert:
+ that:
+ - "current_path.stat.islnk"
+ - "deploy_helper.new_release_path in current_path.stat.lnk_source"
+ - "not current_path_unfinished_filename.stat.exists"
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "releases_count.stdout|trim == '6'"
+- deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=query
+- name: Assert State=finalize with default parameters (previous_release checks)
+ assert:
+ that:
+ - "deploy_helper.new_release == deploy_helper.previous_release"
+
+- name: State=absent with default parameters
+ deploy_helper: path={{ deploy_helper_test_root }} state=absent
+- stat: path={{ deploy_helper_test_root }}
+ register: project_path
+- name: Assert State=absent with default parameters
+ assert:
+ that:
+ - "not project_path.stat.exists"
+
+- debug: msg="Clearing all release data and facts ---------"
+
+- name: State=present with shared_path set to False
+ deploy_helper: path={{ deploy_helper_test_root }} state=present shared_path=''
+- stat: path={{ deploy_helper.releases_path }}
+ register: releases_path
+- stat: path={{ deploy_helper.shared_path }}
+ register: shared_path
+- name: Assert State=present with shared_path set to False
+ assert:
+ that:
+ - "releases_path.stat.exists"
+ - "not shared_path.stat.exists"
+
+# Setup older releases for tests
+- file: path={{ deploy_helper.releases_path }}/{{ item }} state=directory
+ with_items: ['first', 'second', 'third', 'fourth', 'fifth']
+# Setup the new release
+- file: path={{ deploy_helper.new_release_path }} state=directory
+# Add a buildfile, just like in a real deploy
+- copy: content='' dest={{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}
+# Add a buildfile, to an older deploy
+- copy: content='' dest={{ deploy_helper.releases_path }}/third/{{ deploy_helper.unfinished_filename }}
+
+- shell: "ls {{ deploy_helper_test_root }}/releases | wc -l"
+ register: before_releases_count
+- name: State=clean with keep_releases=3
+ deploy_helper: path={{ deploy_helper_test_root }} release={{ deploy_helper.new_release }} state=clean keep_releases=3
+- stat: path={{ deploy_helper.releases_path }}/third
+ register: third_release_path
+- shell: "ls {{ deploy_helper.releases_path }} | wc -l"
+ register: releases_count
+- name: Assert State=finalize with default parameters (clean=true checks)
+ assert:
+ that:
+ - "not third_release_path.stat.exists"
+ - "before_releases_count.stdout|trim == '6'"
+ - "releases_count.stdout|trim == '3'"
+
+# Remove the root folder
+- file: path={{ deploy_helper_test_root }} state=absent
diff --git a/ansible_collections/community/general/tests/integration/targets/discord/README.md b/ansible_collections/community/general/tests/integration/targets/discord/README.md
new file mode 100644
index 000000000..528ea0643
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/discord/README.md
@@ -0,0 +1,20 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+The integration tests can be executed locally:
+
+1. Create or use an existing discord server
+2. Open `Server Settings` and navigate to `Integrations` tab
+3. Click `Create Webhook` to create a new webhook
+4. Click `Copy Webhook URL` and extract the webhook_id + webhook_token
+
+ Example: https://discord.com/api/webhooks/`webhook_id`/`webhook_token`
+
+5. Replace the variables `discord_id` and `discord_token` in the var file
+6. Run the integration test
+````
+ansible-test integration -v --color yes discord --allow-unsupported
+````
diff --git a/ansible_collections/community/general/tests/integration/targets/discord/aliases b/ansible_collections/community/general/tests/integration/targets/discord/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/discord/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/discord/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/discord/defaults/main.yml
new file mode 100644
index 000000000..ef01141ca
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/discord/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+discord_id: 000
+discord_token: xxx
diff --git a/ansible_collections/community/general/tests/integration/targets/discord/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/discord/tasks/main.yml
new file mode 100644
index 000000000..29314ba23
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/discord/tasks/main.yml
@@ -0,0 +1,69 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Send basic message
+ community.general.discord:
+ webhook_id: "{{ discord_id }}"
+ webhook_token: "{{ discord_token }}"
+ content: "Messages from ansible-test"
+ register: result
+
+- name: Check result
+ assert:
+ that:
+ - result is changed
+ - result.http_code == 204
+
+- name: Send embeds
+ community.general.discord:
+ webhook_id: "{{ discord_id }}"
+ webhook_token: "{{ discord_token }}"
+ embeds:
+ - title: "Title of embed message 1"
+ description: "Description embed message 1"
+ footer:
+ text: "author ansible-test"
+ image:
+ url: "https://avatars.githubusercontent.com/u/44586252?s=200&v=4"
+ - title: "Title of embed message 2"
+ description: "Description embed message 2"
+ footer:
+ text: "author ansible-test"
+ icon_url: "https://avatars.githubusercontent.com/u/44586252?s=200&v=4"
+ fields:
+ - name: "Field 1"
+ value: 1
+ - name: "Field 2"
+ value: "Text"
+ timestamp: "{{ ansible_date_time.iso8601 }}"
+ username: Ansible Test
+ avatar_url: "https://avatars.githubusercontent.com/u/44586252?s=200&v=4"
+ register: result
+
+- name: Check result
+ assert:
+ that:
+ - result is changed
+ - result.http_code == 204
+
+- name: Use a wrong token
+ community.general.discord:
+ webhook_id: "{{ discord_id }}"
+ webhook_token: "wrong_token"
+ content: "Messages from ansible-test"
+ register: result
+ ignore_errors: true
+
+- name: Check result
+ assert:
+ that:
+ - result is not changed
+ - result.http_code == 401
+ - result.response.message == "Invalid Webhook Token"
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/aliases b/ansible_collections/community/general/tests/integration/targets/django_manage/aliases
new file mode 100644
index 000000000..98aed9e9d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/aliases
@@ -0,0 +1,15 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2
+skip/freebsd
+skip/macos
+skip/osx
+skip/rhel8.2
+skip/rhel8.3
+skip/rhel8.4
+skip/rhel8.5
+skip/rhel9.0
+skip/rhel9.1
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py
new file mode 100644
index 000000000..881221c06
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/core/settings.py
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# single_app_project/core/settings.py
+SECRET_KEY = 'testtesttesttesttest'
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py
new file mode 100755
index 000000000..4b4eddcb6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/1045-single-app-project/single_app_project/manage.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+# single_app_project/manage.py
+import os
+import sys
+
+
+def main():
+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'single_app_project.core.settings')
+ from django.core.management import execute_from_command_line
+ execute_from_command_line(sys.argv)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py
new file mode 100755
index 000000000..be3140f44
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+"""Django's command-line utility for administrative tasks."""
+import os
+import sys
+
+
+def main():
+ """Run administrative tasks."""
+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'p1.settings')
+ try:
+ from django.core.management import execute_from_command_line
+ except ImportError as exc:
+ raise ImportError(
+ "Couldn't import Django. Are you sure it's installed and "
+ "available on your PYTHONPATH environment variable? Did you "
+ "forget to activate a virtual environment?"
+ ) from exc
+ execute_from_command_line(sys.argv)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py
new file mode 100644
index 000000000..86b3ae64c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/settings.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+"""
+Django settings for p1 project.
+
+Generated by 'django-admin startproj' using Django 3.1.5.
+
+For more information on this file, see
+https://docs.djangoproject.com/en/3.1/topics/settings/
+
+For the full list of settings and their values, see
+https://docs.djangoproject.com/en/3.1/ref/settings/
+"""
+
+import os
+from pathlib import Path
+
+# Build paths inside the project like this: BASE_DIR / 'subdir'.
+BASE_DIR = Path(__file__).resolve().parent.parent
+
+
+# Quick-start development settings - unsuitable for production
+# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
+
+# SECURITY WARNING: keep the secret key used in production secret!
+SECRET_KEY = '%g@gyhl*q@@g(_ab@t^76dao^#b9-v8mw^50)x_bv6wpl+mukj'
+
+# SECURITY WARNING: don't run with debug turned on in production!
+DEBUG = True
+
+ALLOWED_HOSTS = []
+
+
+# Application definition
+
+INSTALLED_APPS = [
+ 'django.contrib.admin',
+ 'django.contrib.auth',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sessions',
+ 'django.contrib.messages',
+ 'django.contrib.staticfiles',
+]
+
+MIDDLEWARE = [
+ 'django.middleware.security.SecurityMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.middleware.csrf.CsrfViewMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+ 'django.contrib.messages.middleware.MessageMiddleware',
+ 'django.middleware.clickjacking.XFrameOptionsMiddleware',
+]
+
+ROOT_URLCONF = 'p1.urls'
+
+TEMPLATES = [
+ {
+ 'BACKEND': 'django.template.backends.django.DjangoTemplates',
+ 'DIRS': [],
+ 'APP_DIRS': True,
+ 'OPTIONS': {
+ 'context_processors': [
+ 'django.template.context_processors.debug',
+ 'django.template.context_processors.request',
+ 'django.contrib.auth.context_processors.auth',
+ 'django.contrib.messages.context_processors.messages',
+ ],
+ },
+ },
+]
+
+WSGI_APPLICATION = 'p1.wsgi.application'
+
+
+# Database
+# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': BASE_DIR / 'db.sqlite3',
+ }
+}
+
+
+# Password validation
+# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
+
+AUTH_PASSWORD_VALIDATORS = [
+ {
+ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
+ },
+ {
+ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
+ },
+]
+
+
+# Internationalization
+# https://docs.djangoproject.com/en/3.1/topics/i18n/
+
+LANGUAGE_CODE = 'en-us'
+
+TIME_ZONE = 'UTC'
+
+USE_I18N = True
+
+USE_L10N = True
+
+USE_TZ = True
+
+
+# Static files (CSS, JavaScript, Images)
+# https://docs.djangoproject.com/en/3.1/howto/static-files/
+
+STATIC_URL = '/static/'
+STATIC_ROOT = '/tmp/django-static'
+
+if "DJANGO_ANSIBLE_RAISE" in os.environ:
+ raise ValueError("DJANGO_ANSIBLE_RAISE={0}".format(os.environ["DJANGO_ANSIBLE_RAISE"]))
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py
new file mode 100644
index 000000000..36cb59275
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/simple_project/p1/p1/urls.py
@@ -0,0 +1,28 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+"""p1 URL Configuration
+
+The `urlpatterns` list routes URLs to views. For more information please see:
+ https://docs.djangoproject.com/en/2.2/topics/http/urls/
+Examples:
+Function views
+ 1. Add an import: from my_app import views
+ 2. Add a URL to urlpatterns: path('', views.home, name='home')
+Class-based views
+ 1. Add an import: from other_app.views import Home
+ 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
+Including another URLconf
+ 1. Import the include() function: from django.urls import include, path
+ 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
+"""
+from django.contrib import admin
+from django.urls import path
+
+urlpatterns = [
+ path('admin/', admin.site.urls),
+]
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/startproj/.keep b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/startproj/.keep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/files/base_test/startproj/.keep
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/django_manage/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/django_manage/tasks/main.yaml b/ansible_collections/community/general/tests/integration/targets/django_manage/tasks/main.yaml
new file mode 100644
index 000000000..c07b53893
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/django_manage/tasks/main.yaml
@@ -0,0 +1,84 @@
+# Test code for django_manage module
+#
+# Copyright (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Create temporary test directory
+ tempfile:
+ state: directory
+ suffix: .django_manage
+ register: tmp_django_root
+
+- name: Install virtualenv on CentOS 8
+ package:
+ name: virtualenv
+ state: present
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8'
+
+- name: Install virtualenv on Arch Linux
+ pip:
+ name: virtualenv
+ state: present
+ when: ansible_os_family == 'Archlinux'
+
+- name: Install required library
+ pip:
+ name: django
+ state: present
+ virtualenv: "{{ tmp_django_root.path }}/venv"
+
+- name: Copy files
+ copy:
+ src: base_test/
+ dest: "{{ tmp_django_root.path }}"
+ mode: preserve
+
+- name: Create project
+ command:
+ chdir: "{{ tmp_django_root.path }}/startproj"
+ cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startproject test_django_manage_1"
+
+- name: Create app
+ command:
+ chdir: "{{ tmp_django_root.path }}/startproj"
+ cmd: "{{ tmp_django_root.path }}/venv/bin/django-admin startapp app1"
+
+- name: Check
+ community.general.django_manage:
+ project_path: "{{ tmp_django_root.path }}/startproj/test_django_manage_1"
+ command: check
+ virtualenv: "{{ tmp_django_root.path }}/venv"
+
+- name: Check simple_project
+ community.general.django_manage:
+ project_path: "{{ tmp_django_root.path }}/simple_project/p1"
+ command: check
+ virtualenv: "{{ tmp_django_root.path }}/venv"
+
+- name: Check custom project
+ community.general.django_manage:
+ project_path: "{{ tmp_django_root.path }}/1045-single-app-project/single_app_project"
+ pythonpath: "{{ tmp_django_root.path }}/1045-single-app-project/"
+ command: check
+ virtualenv: "{{ tmp_django_root.path }}/venv"
+
+- name: Run collectstatic --noinput on simple project
+ community.general.django_manage:
+ project_path: "{{ tmp_django_root.path }}/simple_project/p1"
+ command: collectstatic --noinput
+ virtualenv: "{{ tmp_django_root.path }}/venv"
+
+- name: Trigger exception with environment variable
+ community.general.django_manage:
+ project_path: "{{ tmp_django_root.path }}/simple_project/p1"
+ command: collectstatic --noinput
+ virtualenv: "{{ tmp_django_root.path }}/venv"
+ environment:
+ DJANGO_ANSIBLE_RAISE: blah
+ ignore_errors: true
+ register: env_raise
+
+- name: Check env variable reached manage.py
+ ansible.builtin.assert:
+ that:
+ - "'ValueError: DJANGO_ANSIBLE_RAISE=blah' in env_raise.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/aliases b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/aliases
new file mode 100644
index 000000000..b85ae6419
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/install.yml b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/install.yml
new file mode 100644
index 000000000..9773d87dc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/install.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install dnf versionlock plugin
+ dnf:
+ name: dnf-plugin-versionlock
+ state: present
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml
new file mode 100644
index 000000000..56357e01c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_bash.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Clear locklist
+ community.general.dnf_versionlock:
+ state: clean
+ register: clear_locklist
+
+- assert:
+ that:
+ - clear_locklist.locklist_post | length == 0
+
+- name: Lock installed package bash
+ community.general.dnf_versionlock:
+ name: bash
+ state: present
+ register: lock_bash
+
+- assert:
+ that:
+ - lock_bash is changed
+ - lock_bash.locklist_post | length == 1
+
+- name: Unlock installed package bash
+ community.general.dnf_versionlock:
+ name: bash
+ state: absent
+ register: unlock_bash
+
+- assert:
+ that:
+ - unlock_bash is changed
+ - unlock_bash.locklist_post | length == 0
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml
new file mode 100644
index 000000000..b3fceb26f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/lock_updates.yml
@@ -0,0 +1,74 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Check packages with updates
+ dnf:
+ list: updates
+ register: updates
+
+- name: Set local facts
+ set_fact:
+ _packages: "{{ (updates.results | map(attribute='name') | list)[:5] }}"
+
+- debug:
+ msg:
+ - "The packages to be locked and unlocked are: {{ _packages}}"
+
+- block:
+ - name: Clear locklist
+ community.general.dnf_versionlock:
+ state: clean
+ register: clear_locklist
+
+ - assert:
+ that:
+ - clear_locklist.locklist_post | length == 0
+
+ - name: Lock packages with updates
+ dnf_versionlock:
+ name: "{{ _packages }}"
+ state: present
+ register: lock_packages
+
+ - assert:
+ that:
+ - lock_packages is changed
+ - (lock_packages.locklist_post | length) <= (_packages | length)
+
+ - name: Update packages with updates while locked
+ command: >-
+ dnf update -y
+ --setopt=obsoletes=0 {{ _packages | join(' ') }}
+ register: update_locked_packages
+ changed_when: '"Nothing to do" not in update_locked_packages.stdout'
+
+ - assert:
+ that:
+ - update_locked_packages is not changed
+
+ - name: Unlock packages with updates
+ dnf_versionlock:
+ name: "{{ _packages }}"
+ state: absent
+ register: unlock_packages
+
+ - assert:
+ that:
+ - unlock_packages is changed
+ - unlock_packages.locklist_post | length == 0
+
+ - name: Update packages
+ dnf:
+ name: "{{ _packages }}"
+ state: latest
+ check_mode: true
+ register: update_packages
+
+ - assert:
+ that:
+ - update_packages is changed
+
+ when: updates.results | length > 0
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/main.yml
new file mode 100644
index 000000000..51e823ffd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dnf_versionlock/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - include_tasks: install.yml
+ - include_tasks: lock_bash.yml
+ - include_tasks: lock_updates.yml
+ when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('23', '>=')) or
+ (ansible_distribution in ['RedHat', 'CentOS'] and ansible_distribution_major_version is version('8', '>='))
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases
new file mode 100644
index 000000000..050bf89b4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/osx
+skip/macos
+skip/rhel
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml
new file mode 100644
index 000000000..910f174e1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "include tasks for Debian family"
+ include_tasks: prepare.yml
+ when: ansible_pkg_mgr == "apt"
diff --git a/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml
new file mode 100644
index 000000000..94566b41e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/prepare.yml
@@ -0,0 +1,43 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "set variables for the entire playbook"
+ set_fact:
+ foobarrc: "{{ foobarrc }}"
+ foobarrc_ansible: "{{ foobarrc }}.ansible"
+ foobarrc_distrib: "{{ foobarrc }}.distrib"
+ foobarrc_oldtext: "# foobar configuration file\n# Please refer to the documentation for details\n"
+ foobarrc_oldsha1: "e1c54c36d2fd1b8d67d1826e49b95ac8c0f24c0a"
+ foobarrc_newtext: "# Custom foobar configuration file\nFOO=bar\nBAR=foo"
+ foobarrc_newsha1: "3fe6c890519fb48e27c1b0e3e37afb11357d5cac"
+ vars:
+ foobarrc: "/etc/foobarrc"
+
+- name: "remove foobarrc diversion"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ become: true
+
+- name: "remove test files"
+ file:
+ path: "{{ dpkg_divert_item }}"
+ state: absent
+ loop:
+ - "{{ foobarrc_ansible }}"
+ - "{{ foobarrc_distrib }}"
+ loop_control:
+ loop_var: dpkg_divert_item
+ become: true
+
+
+- block:
+ - name: "include tasks to perform basic tests (create, remove, update)"
+ include_tasks: tests/01-basic.yml
+
+ - name: "include tasks to perform other tests (rename)"
+ include_tasks: tests/02-rename.yml
+ become: true
+ diff: true
diff --git a/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml
new file mode 100644
index 000000000..78863d1db
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/01-basic.yml
@@ -0,0 +1,291 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+################################################################################
+# TEST 01: state=present
+
+- name: "create foobarrc for tests"
+ copy:
+ dest: "{{ foobarrc }}"
+ content: "{{ foobarrc_oldtext }}"
+
+
+- name: "divert foobarrc (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_0
+ check_mode: true
+
+- name: "divert foobarrc (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_1
+
+
+- name: "divert foobarrc (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_2
+
+- name: "divert foobarrc (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ register: diversion_3
+ check_mode: true
+
+
+# Ensure that 'rename' has no effect when state is not changed
+
+- name: "divert foobarrc (rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ rename: true
+ register: diversion_4
+
+- name: "divert foobarrc (check mode, rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: present
+ rename: true
+ register: diversion_5
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_6
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_7
+
+- name: "assert that results of test 01 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4 is not changed
+ - diversion_5 is not changed
+ - diversion_6.stat.exists
+ - diversion_6.stat.checksum == foobarrc_oldsha1
+ - not diversion_7.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_4.diversion == diversion_5.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ - diversion_4.commands == diversion_5.commands
+ quiet: true
+
+
+################################################################################
+# TEST 02: state=absent
+
+- name: "remove diversion for foobarrc (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_0
+ check_mode: true
+
+- name: "remove diversion for foobarrc (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_1
+
+
+- name: "remove diversion for foobarrc (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_2
+
+- name: "remove diversion for foobarrc (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ register: diversion_3
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 02 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: true
+
+
+################################################################################
+# TEST 03: holder=ansible
+
+- name: "create foobarrc diversion with defaults"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+
+
+- name: "update foobarrc diversion holder (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_0
+ check_mode: true
+
+- name: "update foobarrc diversion holder (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_1
+
+
+- name: "update foobarrc diversion holder (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_2
+
+- name: "update foobarrc diversion holder (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ holder: "ansible"
+ register: diversion_3
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 03 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: true
+
+- name: "remove foobarrc diversion"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+
+
+################################################################################
+# TEST 04: divert=/etc/foobarrc.ansible
+
+- name: "create foobarrc diversion with defaults"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+
+
+- name: "update foobarrc divert path (check mode, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_0
+ check_mode: true
+
+- name: "update foobarrc divert path (must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_1
+
+
+- name: "update foobarrc divert path (must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_2
+
+- name: "update foobarrc divert path (check mode, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_3
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must still be there)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.ansible (must not exist)"
+ stat:
+ path: "{{ foobarrc_ansible }}"
+ register: diversion_5
+
+- name: "assert that results of test 04 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: true
+
+- name: "remove foobarrc diversion"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml
new file mode 100644
index 000000000..6c95a7291
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/dpkg_divert/tasks/tests/02-rename.yml
@@ -0,0 +1,384 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+################################################################################
+# TEST 05: rename=yes, state=present
+
+- name: "create diversion for foobarrc (check mode, rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ register: diversion_0
+ check_mode: true
+
+- name: "create diversion for foobarrc (rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ register: diversion_1
+
+
+- name: "create diversion for foobarrc (rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ register: diversion_2
+
+- name: "create diversion for foobarrc (check mode, rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ register: diversion_3
+ check_mode: true
+
+
+# Get results
+
+- name: "stat foobarrc (must not exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 05 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - not diversion_4.stat.exists
+ - diversion_5.stat.exists
+ - diversion_5.stat.checksum == foobarrc_oldsha1
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: true
+
+
+################################################################################
+# TEST 06: rename=yes, state=absent
+
+- name: "remove diversion for foobarrc (check mode, rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ state: absent
+ register: diversion_0
+ check_mode: true
+
+- name: "remove diversion for foobarrc (rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ state: absent
+ register: diversion_1
+
+
+- name: "remove diversion for foobarrc (rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ state: absent
+ register: diversion_2
+
+- name: "remove diversion for foobarrc (check mode, rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ state: absent
+ register: diversion_3
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_5
+
+- name: "assert that results of test 06 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_oldsha1
+ - not diversion_5.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: true
+
+
+################################################################################
+# TEST 07: rename=yes, force=yes, state=present
+
+- name: "create foobarrc.distrib for tests"
+ copy:
+ dest: "{{ foobarrc_distrib }}"
+ content: "{{ foobarrc_oldtext }}"
+
+
+- name: "create diversion for foobarrc (check mode, rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ register: diversion_0
+ ignore_errors: true
+ check_mode: true
+
+- name: "create diversion for foobarrc (rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ register: diversion_1
+ ignore_errors: true
+
+
+- name: "create diversion for foobarrc (check mode, force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ force: true
+ register: diversion_2
+ check_mode: true
+
+- name: "create diversion for foobarrc (force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ force: true
+ register: diversion_3
+
+
+- name: "create diversion for foobarrc (force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ force: true
+ register: diversion_4
+
+- name: "create diversion for foobarrc (check mode, force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ rename: true
+ force: true
+ register: diversion_5
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must not exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_6
+
+- name: "stat foobarrc.distrib (must exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_7
+
+- name: "assert that results of test 07 are as expected"
+ assert:
+ that:
+ - diversion_0 is failed
+ - diversion_1 is failed
+ - diversion_2 is changed
+ - diversion_3 is changed
+ - diversion_4 is not changed
+ - diversion_5 is not changed
+ - not diversion_6.stat.exists
+ - diversion_7.stat.exists
+ - diversion_7.stat.checksum == foobarrc_oldsha1
+ - diversion_0 == diversion_1
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_4.diversion == diversion_5.diversion
+ - diversion_2.commands == diversion_3.commands
+ - diversion_4.commands == diversion_5.commands
+ quiet: true
+
+
+################################################################################
+# TEST 08: state=present, update an existing divert path
+
+- name: "create foobarrc with new contents for tests"
+ copy:
+ dest: "{{ foobarrc }}"
+ content: "{{ foobarrc_newtext }}"
+
+
+- name: "create diversion for foobarrc (check mode, update divert path, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_0
+ check_mode: true
+
+- name: "create diversion for foobarrc (update divert path, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_1
+
+
+- name: "create diversion for foobarrc (update divert path, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_2
+
+- name: "create diversion for foobarrc (check mode, update divert path, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ divert: "{{ foobarrc_ansible }}"
+ register: diversion_3
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_4
+
+- name: "stat foobarrc.ansible (must exist)"
+ stat:
+ path: "{{ foobarrc_ansible }}"
+ register: diversion_5
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_6
+
+- name: "assert that results of test 08 are as expected"
+ assert:
+ that:
+ - diversion_0 is changed
+ - diversion_1 is changed
+ - diversion_2 is not changed
+ - diversion_3 is not changed
+ - diversion_4.stat.exists
+ - diversion_4.stat.checksum == foobarrc_newsha1
+ - diversion_5.stat.exists
+ - diversion_5.stat.checksum == foobarrc_oldsha1
+ - not diversion_6.stat.exists
+ - diversion_0.diversion == diversion_1.diversion
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_0.commands == diversion_1.commands
+ - diversion_2.commands == diversion_3.commands
+ quiet: true
+
+
+################################################################################
+# TEST 09: rename=yes, force=yes, state=absent
+
+- name: "remove diversion for foobarrc (check mode, rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: true
+ register: diversion_0
+ ignore_errors: true
+ check_mode: true
+
+- name: "remove diversion for foobarrc (rename, must fail)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: true
+ register: diversion_1
+ ignore_errors: true
+
+
+- name: "remove diversion for foobarrc (check mode, force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: true
+ force: true
+ register: diversion_2
+ check_mode: true
+
+- name: "remove diversion for foobarrc (force rename, must report a change)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: true
+ force: true
+ register: diversion_3
+
+
+- name: "remove diversion for foobarrc (force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: true
+ force: true
+ register: diversion_4
+
+- name: "remove diversion for foobarrc (check mode, force rename, must NOT report a change, idempotency)"
+ dpkg_divert:
+ path: "{{ foobarrc }}"
+ state: absent
+ rename: true
+ force: true
+ register: diversion_5
+ check_mode: true
+
+
+# Check results
+
+- name: "stat foobarrc (must exist)"
+ stat:
+ path: "{{ foobarrc }}"
+ register: diversion_6
+
+- name: "stat foobarrc.distrib (must not exist)"
+ stat:
+ path: "{{ foobarrc_distrib }}"
+ register: diversion_7
+
+- name: "stat foobarrc.ansible (must not exist)"
+ stat:
+ path: "{{ foobarrc_ansible }}"
+ register: diversion_8
+
+- name: "assert that results of test 09 are as expected"
+ assert:
+ that:
+ - diversion_0 is failed
+ - diversion_1 is failed
+ - diversion_2 is changed
+ - diversion_3 is changed
+ - diversion_4 is not changed
+ - diversion_5 is not changed
+ - diversion_6.stat.exists
+ - diversion_6.stat.checksum == foobarrc_oldsha1
+ - not diversion_7.stat.exists
+ - not diversion_8.stat.exists
+ - diversion_0 == diversion_1
+ - diversion_2.diversion == diversion_3.diversion
+ - diversion_4.diversion == diversion_5.diversion
+ - diversion_2.commands == diversion_3.commands
+ - diversion_4.commands == diversion_5.commands
+ quiet: true
diff --git a/ansible_collections/community/general/tests/integration/targets/etcd3/aliases b/ansible_collections/community/general/tests/integration/targets/etcd3/aliases
new file mode 100644
index 000000000..264446580
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/etcd3/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/python2.6 # installing etcd3 python module will fail on python < 2.7
+disabled # see https://github.com/ansible-collections/community.general/issues/322
diff --git a/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml
new file mode 100644
index 000000000..f922f5506
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/etcd3/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_etcd3
diff --git a/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml
new file mode 100644
index 000000000..2fe7435dc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the etcd3 module
+# Copyright (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ============================================================
+
+
+- name: run_tests for supported distros
+ include_tasks: run_tests.yml
+ when:
+ - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6'
diff --git a/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml b/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml
new file mode 100644
index 000000000..4bd8fa4ec
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/etcd3/tasks/run_tests.yml
@@ -0,0 +1,81 @@
+---
+# test code for the etcd3 module
+# Copyright (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# Copyright 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ============================================================
+
+# Integration tests
+- name: Check mode, show need change
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_chktst
+ check_mode: true
+
+- name: Change to new value
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_chgtst
+
+- name: Idempotency test, show unchanged.
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_idmptnttst
+
+- name: Idempotency test in check mode, show unchanged
+ etcd3:
+ key: "foo"
+ value: "bar"
+ state: "present"
+ register: _etcd3_prst_idmptntchktst
+ check_mode: true
+
+- name: Check mode, show need removal of key
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_chktst
+ check_mode: true
+
+- name: Remove foo key
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_chgtst
+
+- name: Idempotency test in check mode, show unchanged
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_idmptnttst
+ check_mode: true
+
+- name: Idempotency test, show unchanged
+ etcd3:
+ key: "foo"
+ value: "baz"
+ state: "absent"
+ register: _etcd3_absnt_idmptntchktst
+
+- name: Checking the status are expected
+ assert:
+ that:
+ - _etcd3_prst_chktst is changed
+ - _etcd3_prst_chgtst is changed
+ - _etcd3_prst_idmptnttst is not changed
+ - _etcd3_prst_idmptntchktst is not changed
+ - _etcd3_absnt_chktst is changed
+ - _etcd3_absnt_chgtst is changed
+ - _etcd3_absnt_idmptnttst is not changed
+ - _etcd3_absnt_idmptntchktst is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/aliases b/ansible_collections/community/general/tests/integration/targets/filesize/aliases
new file mode 100644
index 000000000..7642e70da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/filesize/defaults/main.yml
new file mode 100644
index 000000000..d51108276
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+filesize_testdir: "/tmp/testdir"
+filesize_testfile: "{{ filesize_testdir }}/testfile"
+filesize_testlink: "{{ filesize_testdir }}/testlink"
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/tasks/basics.yml b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/basics.yml
new file mode 100644
index 000000000..3c0673189
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/basics.yml
@@ -0,0 +1,411 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Test module with basic parameters.
+# Create a file, grow it, reduce it to its initial size and check the match
+# between initial and final checksums. Also check size formats consistency
+# (as 57001B == 57001 B == 57.001 kB, for example, or 0 block or 0 unit is
+# zero, etc).
+
+- name: Create an empty file (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0
+ register: filesize_test_basic_01
+ check_mode: true
+
+- name: Stat the file (should not exist)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_01
+
+
+- name: Create an empty file
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0
+ register: filesize_test_basic_02
+
+- name: Stat the file (should exist now)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_02
+
+
+- name: Create an empty file (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0G
+ register: filesize_test_basic_03
+ check_mode: true
+
+- name: Create an empty file (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0G
+ register: filesize_test_basic_04
+
+- name: Stat the file (should still exist, unchanged)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_04
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ # check_mode & idempotency are in good shape.
+ - filesize_test_basic_01 is changed
+ - filesize_test_basic_02 is changed
+ - filesize_test_basic_03 is not changed
+ - filesize_test_basic_04 is not changed
+
+ # check_mode returns the same command than actual mode.
+ - filesize_test_basic_02.cmd == filesize_test_basic_01.cmd
+ - filesize_test_basic_03.cmd is undefined
+ - filesize_test_basic_04.cmd is undefined
+
+ # Module's specific return results are consistent with user input, that
+ # means: with *expected* results.
+ - filesize_test_basic_01.filesize.bytes == 0
+ - filesize_test_basic_02.filesize.bytes == 0
+ - filesize_test_basic_03.filesize.bytes == 0
+ - filesize_test_basic_04.filesize.bytes == 0
+
+ - filesize_test_basic_01.size_diff == 0
+ - filesize_test_basic_02.size_diff == 0
+ - filesize_test_basic_03.size_diff == 0
+ - filesize_test_basic_04.size_diff == 0
+
+ # Results populated by module.set_fs_attributes_if_different() are still
+ # consistent with current state of the file.
+ - filesize_test_basic_01.state is undefined
+ - filesize_test_basic_02.state in ["file"]
+ - filesize_test_basic_01.size is undefined
+ - filesize_test_basic_02.size == 0
+ - filesize_test_basic_03.size == 0
+ - filesize_test_basic_04.size == 0
+
+ # Cross results with those retrieved by another module.
+ - not filesize_stat_basic_01.stat.exists
+ - filesize_stat_basic_02.stat.exists
+ - filesize_stat_basic_02.stat.isreg
+ - filesize_stat_basic_02.stat.size == 0
+ - filesize_stat_basic_04.stat.size == 0
+
+
+- name: Fill the file up to 57kB (57000B) with random data (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57kB
+ source: /dev/urandom
+ register: filesize_test_basic_11
+ check_mode: true
+
+- name: Stat the file (should still be unchanged)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_11
+
+
+- name: Fill the file up to 57kB (57000B) with random data
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57kB
+ source: /dev/urandom
+ register: filesize_test_basic_12
+
+- name: Stat the resulting file (and get its checksum)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_12
+
+- name: Store checksum as fact
+ ansible.builtin.set_fact:
+ filesize_test_checksum: "{{ filesize_stat_basic_12.stat.checksum }}"
+
+
+- name: Fill the file up to 57000B (57kB) with random data (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57000B
+ source: /dev/urandom
+ register: filesize_test_basic_13
+ check_mode: true
+
+- name: Fill the file up to 57000B (57kB) with random data (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57000B
+ source: /dev/urandom
+ register: filesize_test_basic_14
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_14
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_basic_11 is changed
+ - filesize_test_basic_12 is changed
+ - filesize_test_basic_13 is not changed
+ - filesize_test_basic_14 is not changed
+
+ - filesize_test_basic_12.cmd == filesize_test_basic_11.cmd
+ - filesize_test_basic_13.cmd is undefined
+ - filesize_test_basic_14.cmd is undefined
+
+ - filesize_test_basic_11.filesize.bytes == 57000
+ - filesize_test_basic_12.filesize.bytes == 57000
+ - filesize_test_basic_13.filesize.bytes == 57000
+ - filesize_test_basic_14.filesize.bytes == 57000
+
+ - filesize_test_basic_11.size_diff == 57000
+ - filesize_test_basic_12.size_diff == 57000
+ - filesize_test_basic_13.size_diff == 0
+ - filesize_test_basic_14.size_diff == 0
+
+ - filesize_stat_basic_11.stat.size == 0
+ - filesize_stat_basic_12.stat.size == 57000
+ - filesize_stat_basic_14.stat.size == 57000
+
+ - filesize_stat_basic_14.stat.checksum == filesize_test_checksum
+
+
+
+- name: Expand the file with 1 byte (57001B) (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57001B
+ register: filesize_test_basic_21
+ check_mode: true
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_21
+
+
+- name: Expand the file with 1 byte (57001B)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57001B
+ register: filesize_test_basic_22
+
+- name: Stat the file (should have grown of 1 byte)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_22
+
+
+- name: Expand the file with 1 byte (57.001 kB) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57.001 kB
+ register: filesize_test_basic_23
+ check_mode: true
+
+- name: Expand the file with 1 byte (57.001 kB) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57.001 kB
+ register: filesize_test_basic_24
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_24
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_basic_21 is changed
+ - filesize_test_basic_22 is changed
+ - filesize_test_basic_23 is not changed
+ - filesize_test_basic_24 is not changed
+
+ - filesize_test_basic_22.cmd == filesize_test_basic_21.cmd
+ - filesize_test_basic_23.cmd is undefined
+ - filesize_test_basic_24.cmd is undefined
+
+ - filesize_test_basic_21.filesize.bytes == 57001
+ - filesize_test_basic_22.filesize.bytes == 57001
+ - filesize_test_basic_23.filesize.bytes == 57001
+ - filesize_test_basic_24.filesize.bytes == 57001
+
+ - filesize_test_basic_21.size_diff == 1
+ - filesize_test_basic_22.size_diff == 1
+ - filesize_test_basic_23.size_diff == 0
+ - filesize_test_basic_24.size_diff == 0
+
+ - filesize_stat_basic_21.stat.size == 57000
+ - filesize_stat_basic_22.stat.size == 57001
+ - filesize_stat_basic_24.stat.size == 57001
+
+ - filesize_stat_basic_21.stat.checksum == filesize_test_checksum
+ - filesize_stat_basic_22.stat.checksum != filesize_test_checksum
+ - filesize_stat_basic_24.stat.checksum != filesize_test_checksum
+
+
+
+- name: Expand the file up to 2 MiB (2*1024*1024 bytes) (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 2 MiB
+ register: filesize_test_basic_31
+ check_mode: true
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_31
+
+
+- name: Expand the file up to 2 MiB (2*1024*1024 bytes)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 2 MiB
+ register: filesize_test_basic_32
+
+- name: Stat the file again (should have grown to 2MiB)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_32
+
+
+- name: Expand the file up to 2×1M (2*1024*1024 bytes) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 2
+ blocksize: 1M
+ register: filesize_test_basic_33
+ check_mode: true
+
+- name: Expand the file up to 2×1M (2*1024*1024 bytes) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 2
+ blocksize: 1M
+ register: filesize_test_basic_34
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_34
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_basic_31 is changed
+ - filesize_test_basic_32 is changed
+ - filesize_test_basic_33 is not changed
+ - filesize_test_basic_34 is not changed
+
+ - filesize_test_basic_32.cmd == filesize_test_basic_31.cmd
+ - filesize_test_basic_33.cmd is undefined
+ - filesize_test_basic_34.cmd is undefined
+
+ - filesize_test_basic_31.filesize.bytes == 2*1024**2
+ - filesize_test_basic_32.filesize.bytes == 2*1024**2
+ - filesize_test_basic_33.filesize.bytes == 2*1024**2
+ - filesize_test_basic_34.filesize.bytes == 2*1024**2
+
+ - filesize_test_basic_31.size_diff == 2*1024**2 - 57001
+ - filesize_test_basic_32.size_diff == 2*1024**2 - 57001
+ - filesize_test_basic_33.size_diff == 0
+ - filesize_test_basic_34.size_diff == 0
+
+ - filesize_stat_basic_31.stat.size == 57001
+ - filesize_stat_basic_32.stat.size == 2*1024**2
+ - filesize_stat_basic_34.stat.size == 2*1024**2
+
+
+
+- name: Truncate the file to 57kB (57000B) (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57kB
+ register: filesize_test_basic_41
+ check_mode: true
+
+- name: Stat the resulting file (should be unchanged)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_41
+
+
+- name: Truncate the file to 57kB (57000B)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57kB
+ register: filesize_test_basic_42
+
+- name: Stat the resulting file (and get its checksum)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_42
+
+
+- name: Truncate the file to 57000 B (57kB) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57000 B
+ register: filesize_test_basic_43
+ check_mode: true
+
+- name: Truncate the file to 57000 B (57kB) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 57000 B
+ register: filesize_test_basic_44
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_basic_44
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_basic_41 is changed
+ - filesize_test_basic_42 is changed
+ - filesize_test_basic_43 is not changed
+ - filesize_test_basic_44 is not changed
+
+ - filesize_test_basic_42.cmd == filesize_test_basic_41.cmd
+ - filesize_test_basic_43.cmd is undefined
+ - filesize_test_basic_44.cmd is undefined
+
+ - filesize_test_basic_41.filesize.bytes == 57000
+ - filesize_test_basic_42.filesize.bytes == 57000
+ - filesize_test_basic_43.filesize.bytes == 57000
+ - filesize_test_basic_44.filesize.bytes == 57000
+
+ - filesize_test_basic_41.size_diff == 57000 - 2*1024**2
+ - filesize_test_basic_42.size_diff == 57000 - 2*1024**2
+ - filesize_test_basic_43.size_diff == 0
+ - filesize_test_basic_44.size_diff == 0
+
+ - filesize_stat_basic_41.stat.size == 2*1024**2
+ - filesize_stat_basic_42.stat.size == 57000
+ - filesize_stat_basic_44.stat.size == 57000
+
+ # The original random file is back.
+ - filesize_stat_basic_41.stat.checksum != filesize_test_checksum
+ - filesize_stat_basic_42.stat.checksum == filesize_test_checksum
+ - filesize_stat_basic_44.stat.checksum == filesize_test_checksum
+
+
+
+- name: Remove test file
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/tasks/errors.yml b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/errors.yml
new file mode 100644
index 000000000..351a90ac6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/errors.yml
@@ -0,0 +1,133 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Check error handling of the module.
+# 1. Missing or unknown parameters
+# 2. Wrong values (missing source device, invalid size...)
+
+- name: Trigger an error due to missing parameter (path)
+ community.general.filesize:
+ size: 1kB
+ register: filesize_test_error_01
+ ignore_errors: true
+
+
+- name: Trigger an error due to missing parameter (size)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ register: filesize_test_error_02
+ ignore_errors: true
+
+
+- name: Trigger an error due to conflicting parameters (force|sparse)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 1MB
+ force: true
+ sparse: true
+ register: filesize_test_error_03
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid file path (not a file)
+ community.general.filesize:
+ path: "{{ filesize_testdir }}"
+ size: 4096B
+ register: filesize_test_error_04
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid file path (unexisting parent dir)
+ community.general.filesize:
+ path: "/unexistent/{{ filesize_testfile }}"
+ size: 4096B
+ register: filesize_test_error_05
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid size unit (b)"
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4096b
+ register: filesize_test_error_06
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid size value (bytes require integer)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 1000.5B
+ register: filesize_test_error_07
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid blocksize value (not an integer)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 1M
+ blocksize: "12.5"
+ register: filesize_test_error_08
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid blocksize value type (dict)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 1M
+ blocksize:
+ bytes: 512
+ register: filesize_test_error_09
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid source device (/dev/unexistent)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 1M
+ source: /dev/unexistent
+ register: filesize_test_error_10
+ ignore_errors: true
+
+
+- name: Trigger an error due to invalid source device (/dev/null)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 1M
+ source: /dev/null
+ register: filesize_test_error_11
+ ignore_errors: true
+
+
+- name: Assert that expected errors have been triggered
+ ansible.builtin.assert:
+ that:
+ - "filesize_test_error_01 is failed"
+ - "filesize_test_error_01.msg == 'missing required arguments: path'"
+ - "filesize_test_error_02 is failed"
+ - "filesize_test_error_02.msg == 'missing required arguments: size'"
+ - "filesize_test_error_03 is failed"
+ - "filesize_test_error_03.msg == 'parameters values are mutually exclusive: force=true|sparse=true'"
+ - "filesize_test_error_04 is failed"
+ - "filesize_test_error_04.msg == '%s exists but is not a regular file' % filesize_testdir"
+ - "filesize_test_error_05 is failed"
+ - "filesize_test_error_05.msg == 'parent directory of the file must exist prior to run this module'"
+ - "filesize_test_error_06 is failed"
+ - "filesize_test_error_06.msg is match('invalid size unit')"
+ - "filesize_test_error_07 is failed"
+ - "filesize_test_error_07.msg == 'byte is the smallest unit and requires an integer value'"
+ - "filesize_test_error_08 is failed"
+ - "filesize_test_error_08.msg == 'invalid blocksize value: bytes require an integer value'"
+ - "filesize_test_error_09 is failed"
+ - "filesize_test_error_09.msg is match('invalid value type')"
+ - "filesize_test_error_10 is failed"
+ - "filesize_test_error_10.msg == 'dd error while creating file %s with size 1M from source /dev/unexistent: see stderr for details' % filesize_testfile"
+ - "filesize_test_error_11 is failed"
+ - "filesize_test_error_11.msg == 'module error while creating file %s with size 1M from source /dev/null: file is 0 bytes long' % filesize_testfile"
+
+
+- name: Remove test file
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/tasks/floats.yml b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/floats.yml
new file mode 100644
index 000000000..6d1bde22c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/floats.yml
@@ -0,0 +1,249 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Test module with floating point numbers (ensure they're not rounded too
+# wrongly), since in python floats are tricky:
+# 256.256 * 1000 == 256255.9999999997
+# 512.512 * 1000 == 512511.9999999994
+# 512.513 * 1000 == 512513.0000000006 != .512513 * 1000000
+
+- name: Create a file with a size of 512.512kB (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 512.512kB
+ register: filesize_test_float_01
+ check_mode: true
+
+- name: Stat the file (should not exist)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_01
+
+
+- name: Create a file with a size of 512.512kB
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 512.512kB
+ register: filesize_test_float_02
+
+- name: Stat the file (should exist now)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_02
+
+
+- name: Create a file with a size of 0.512512MB (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0.512512MB
+ register: filesize_test_float_03
+ check_mode: true
+
+- name: Create a file with a size of 0.512512MB (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0.512512MB
+ register: filesize_test_float_04
+
+- name: Stat the file (should still exist, unchanged)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_04
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_float_01 is changed
+ - filesize_test_float_02 is changed
+ - filesize_test_float_03 is not changed
+ - filesize_test_float_04 is not changed
+
+ - filesize_test_float_02.cmd == filesize_test_float_01.cmd
+ - filesize_test_float_03.cmd is undefined
+ - filesize_test_float_04.cmd is undefined
+
+ - filesize_test_float_01.filesize.bytes == 512512
+ - filesize_test_float_02.filesize.bytes == 512512
+ - filesize_test_float_03.filesize.bytes == 512512
+ - filesize_test_float_04.filesize.bytes == 512512
+
+ - filesize_test_float_01.size_diff == 512512
+ - filesize_test_float_02.size_diff == 512512
+ - filesize_test_float_03.size_diff == 0
+ - filesize_test_float_04.size_diff == 0
+
+ - filesize_test_float_01.state is undefined
+ - filesize_test_float_02.state in ["file"]
+ - filesize_test_float_01.size is undefined
+ - filesize_test_float_02.size == 512512
+ - filesize_test_float_03.size == 512512
+ - filesize_test_float_04.size == 512512
+
+ - not filesize_stat_float_01.stat.exists
+ - filesize_stat_float_02.stat.exists
+ - filesize_stat_float_02.stat.isreg
+ - filesize_stat_float_02.stat.size == 512512
+ - filesize_stat_float_04.stat.size == 512512
+
+
+
+- name: Create a file with a size of 512.513kB (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 512.513kB
+ register: filesize_test_float_11
+ check_mode: true
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_11
+
+
+- name: Create a file with a size of 512.513kB
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 512.513kB
+ register: filesize_test_float_12
+
+- name: Stat the file (should have grown of 1 byte)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_12
+
+
+- name: Create a file with a size of 0.512513MB (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0.512513MB
+ register: filesize_test_float_13
+ check_mode: true
+
+- name: Create a file with a size of 0.512513MB (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 0.512513MB
+ register: filesize_test_float_14
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_14
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_float_11 is changed
+ - filesize_test_float_12 is changed
+ - filesize_test_float_13 is not changed
+ - filesize_test_float_14 is not changed
+
+ - filesize_test_float_12.cmd == filesize_test_float_11.cmd
+ - filesize_test_float_13.cmd is undefined
+ - filesize_test_float_14.cmd is undefined
+
+ - filesize_test_float_11.filesize.bytes == 512513
+ - filesize_test_float_12.filesize.bytes == 512513
+ - filesize_test_float_13.filesize.bytes == 512513
+ - filesize_test_float_14.filesize.bytes == 512513
+
+ - filesize_test_float_11.size_diff == 1
+ - filesize_test_float_12.size_diff == 1
+ - filesize_test_float_13.size_diff == 0
+ - filesize_test_float_14.size_diff == 0
+
+ - filesize_test_float_11.size == 512512
+ - filesize_test_float_12.size == 512513
+ - filesize_test_float_13.size == 512513
+ - filesize_test_float_14.size == 512513
+
+ - filesize_stat_float_11.stat.size == 512512
+ - filesize_stat_float_12.stat.size == 512513
+ - filesize_stat_float_14.stat.size == 512513
+
+
+
+- name: Create a file with a size of 4.004MB (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4.004MB
+ register: filesize_test_float_21
+ check_mode: true
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_21
+
+
+- name: Create a file with a size of 4.004MB
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4.004MB
+ register: filesize_test_float_22
+
+- name: Stat the file (should have grown to 4.004MB)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_22
+
+
+- name: Create a file with a size of 4.004MB (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4.004MB
+ register: filesize_test_float_23
+ check_mode: true
+
+- name: Create a file with a size of 4.004MB (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4.004MB
+ register: filesize_test_float_24
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ register: filesize_stat_float_24
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_float_21 is changed
+ - filesize_test_float_22 is changed
+ - filesize_test_float_23 is not changed
+ - filesize_test_float_24 is not changed
+
+ - filesize_test_float_22.cmd == filesize_test_float_21.cmd
+ - filesize_test_float_23.cmd is undefined
+ - filesize_test_float_24.cmd is undefined
+
+ - filesize_test_float_21.filesize.bytes == 4004000
+ - filesize_test_float_22.filesize.bytes == 4004000
+ - filesize_test_float_23.filesize.bytes == 4004000
+ - filesize_test_float_24.filesize.bytes == 4004000
+
+ - filesize_test_float_21.size_diff == 4004000 - 512513
+ - filesize_test_float_22.size_diff == 4004000 - 512513
+ - filesize_test_float_23.size_diff == 0
+ - filesize_test_float_24.size_diff == 0
+
+ - filesize_test_float_21.size == 512513
+ - filesize_test_float_22.size == 4004000
+ - filesize_test_float_23.size == 4004000
+ - filesize_test_float_24.size == 4004000
+
+ - filesize_stat_float_21.stat.size == 512513
+ - filesize_stat_float_22.stat.size == 4004000
+ - filesize_stat_float_24.stat.size == 4004000
+
+
+- name: Remove test file
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/main.yml
new file mode 100644
index 000000000..68cd8934c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Ensure the test dir is present
+ ansible.builtin.file:
+ path: "{{ filesize_testdir }}"
+ state: directory
+
+- name: Ensure the test file is absent
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
+
+- name: Run all tests and remove the workspace anyway
+ block:
+ - name: Include tasks to test error handling
+ include_tasks: errors.yml
+
+ - name: Include tasks to test basic behaviours
+ include_tasks: basics.yml
+
+ - name: Include tasks to test playing with floating point numbers
+ include_tasks: floats.yml
+
+ - name: Include tasks to test playing with sparse files
+ include_tasks: sparse.yml
+ when:
+ - not (ansible_os_family == 'Darwin' and ansible_distribution_version is version('11', '<'))
+
+ - name: Include tasks to test playing with symlinks
+ include_tasks: symlinks.yml
+
+ always:
+ - name: Remove test dir
+ ansible.builtin.file:
+ path: "{{ filesize_testdir }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/tasks/sparse.yml b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/sparse.yml
new file mode 100644
index 000000000..79145b6e2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/sparse.yml
@@ -0,0 +1,286 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Test module with sparse files
+
+- name: Create a huge sparse file of 4TB (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4TB
+ sparse: true
+ register: filesize_test_sparse_01
+ check_mode: true
+
+- name: Stat the file (should not exist)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_01
+
+
+- name: Create a huge sparse file of 4TB
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4TB
+ sparse: true
+ register: filesize_test_sparse_02
+
+- name: Stat the resulting file (should exist now)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_02
+
+
+- name: Create a huge sparse file of 4TB (4000GB) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4000GB
+ sparse: true
+ register: filesize_test_sparse_03
+ check_mode: true
+
+- name: Create a huge sparse file of 4TB (4000GB) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4000GB
+ sparse: true
+ register: filesize_test_sparse_04
+
+- name: Create a huge sparse file of 4TB (4000000 × 1MB) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4000000
+ blocksize: 1MB
+ sparse: true
+ register: filesize_test_sparse_05
+ check_mode: true
+
+- name: Create a huge sparse file of 4TB (4000000 × 1MB) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4000000
+ blocksize: 1MB
+ sparse: true
+ register: filesize_test_sparse_06
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_06
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_sparse_01 is changed
+ - filesize_test_sparse_02 is changed
+ - filesize_test_sparse_03 is not changed
+ - filesize_test_sparse_04 is not changed
+ - filesize_test_sparse_05 is not changed
+ - filesize_test_sparse_06 is not changed
+
+ - filesize_test_sparse_02.cmd == filesize_test_sparse_01.cmd
+ - filesize_test_sparse_03.cmd is undefined
+ - filesize_test_sparse_04.cmd is undefined
+ - filesize_test_sparse_05.cmd is undefined
+ - filesize_test_sparse_06.cmd is undefined
+
+ - filesize_test_sparse_01.filesize.bytes == 4*1000**4
+ - filesize_test_sparse_02.filesize.bytes == 4*1000**4
+ - filesize_test_sparse_03.filesize.bytes == 4*1000**4
+ - filesize_test_sparse_04.filesize.bytes == 4*1000**4
+ - filesize_test_sparse_05.filesize.bytes == 4*1000**4
+ - filesize_test_sparse_06.filesize.bytes == 4*1000**4
+
+ - filesize_test_sparse_01.size_diff == 4*1000**4
+ - filesize_test_sparse_02.size_diff == 4*1000**4
+ - filesize_test_sparse_03.size_diff == 0
+ - filesize_test_sparse_04.size_diff == 0
+ - filesize_test_sparse_05.size_diff == 0
+ - filesize_test_sparse_06.size_diff == 0
+
+ - filesize_test_sparse_01.state is undefined
+ - filesize_test_sparse_02.state in ["file"]
+ - filesize_test_sparse_01.size is undefined
+ - filesize_test_sparse_02.size == 4*1000**4
+ - filesize_test_sparse_03.size == 4*1000**4
+ - filesize_test_sparse_04.size == 4*1000**4
+ - filesize_test_sparse_05.size == 4*1000**4
+ - filesize_test_sparse_06.size == 4*1000**4
+
+ - not filesize_stat_sparse_01.stat.exists
+ - filesize_stat_sparse_02.stat.exists
+ - filesize_stat_sparse_02.stat.isreg
+ - filesize_stat_sparse_02.stat.size == 4*1000**4
+ - filesize_stat_sparse_06.stat.size == 4*1000**4
+
+
+
+- name: Change sparse file size to 4TiB (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4TiB
+ sparse: true
+ register: filesize_test_sparse_11
+ check_mode: true
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_11
+
+
+- name: Change sparse file size to 4TiB
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4TiB
+ sparse: true
+ register: filesize_test_sparse_12
+
+- name: Stat the file again (should have grown)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_12
+
+
+- name: Change sparse file size to 4TiB (4096GiB) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4096GiB
+ sparse: true
+ register: filesize_test_sparse_13
+ check_mode: true
+
+- name: Change sparse file size to 4TiB (4096GiB) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4096GiB
+ sparse: true
+ register: filesize_test_sparse_14
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_14
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_sparse_11 is changed
+ - filesize_test_sparse_12 is changed
+ - filesize_test_sparse_13 is not changed
+ - filesize_test_sparse_14 is not changed
+
+ - filesize_test_sparse_12.cmd == filesize_test_sparse_11.cmd
+ - filesize_test_sparse_13.cmd is undefined
+ - filesize_test_sparse_14.cmd is undefined
+
+ - filesize_test_sparse_11.size_diff == 398046511104
+ - filesize_test_sparse_12.size_diff == 398046511104
+ - filesize_test_sparse_13.size_diff == 0
+ - filesize_test_sparse_14.size_diff == 0
+
+ - filesize_test_sparse_11.size == 4000000000000
+ - filesize_test_sparse_12.size == 4398046511104
+ - filesize_test_sparse_13.size == 4398046511104
+ - filesize_test_sparse_14.size == 4398046511104
+
+ - filesize_stat_sparse_11.stat.size == 4000000000000
+ - filesize_stat_sparse_12.stat.size == 4398046511104
+ - filesize_stat_sparse_14.stat.size == 4398046511104
+
+
+
+- name: Change sparse file size to 4.321TB (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4.321TB
+ sparse: true
+ register: filesize_test_sparse_21
+ check_mode: true
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_21
+
+
+- name: Change sparse file size to 4.321TB
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4.321TB
+ sparse: true
+ register: filesize_test_sparse_22
+
+- name: Stat the file again (should have been reduced)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_22
+
+
+- name: Change sparse file size to 4321×1GB (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4321
+ blocksize: 1GB
+ sparse: true
+ register: filesize_test_sparse_23
+ check_mode: true
+
+- name: Change sparse file size to 4321×1GB (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testfile }}"
+ size: 4321
+ blocksize: 1GB
+ sparse: true
+ register: filesize_test_sparse_24
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_testfile }}"
+ get_checksum: false
+ register: filesize_stat_sparse_24
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_sparse_21 is changed
+ - filesize_test_sparse_22 is changed
+ - filesize_test_sparse_23 is not changed
+ - filesize_test_sparse_24 is not changed
+
+ - filesize_test_sparse_22.cmd == filesize_test_sparse_21.cmd
+ - filesize_test_sparse_23.cmd is undefined
+ - filesize_test_sparse_24.cmd is undefined
+
+ - filesize_test_sparse_21.size_diff == 4321*1000**3 - 4*1024**4
+ - filesize_test_sparse_22.size_diff == 4321*1000**3 - 4*1024**4
+ - filesize_test_sparse_23.size_diff == 0
+ - filesize_test_sparse_24.size_diff == 0
+
+ - filesize_test_sparse_21.size == 4398046511104
+ - filesize_test_sparse_22.size == 4321000000000
+ - filesize_test_sparse_23.size == 4321000000000
+ - filesize_test_sparse_24.size == 4321000000000
+
+ - filesize_stat_sparse_21.stat.size == 4398046511104
+ - filesize_stat_sparse_22.stat.size == 4321000000000
+ - filesize_stat_sparse_24.stat.size == 4321000000000
+
+
+
+- name: Remove test file
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesize/tasks/symlinks.yml b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/symlinks.yml
new file mode 100644
index 000000000..011889656
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesize/tasks/symlinks.yml
@@ -0,0 +1,97 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Check that the module works with symlinks, as expected, i.e. as dd does:
+# follow symlinks.
+
+- name: Ensure the test file is absent
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
+
+- name: Create a broken symlink in the same directory
+ ansible.builtin.file:
+ src: "{{ filesize_testfile | basename }}"
+ dest: "{{ filesize_testlink }}"
+ state: link
+ force: true
+ follow: false
+
+
+
+- name: Create a file with a size of 512 kB (512000 bytes) (check mode)
+ community.general.filesize:
+ path: "{{ filesize_testlink }}"
+ size: "512 kB"
+ register: filesize_test_symlink_01
+ check_mode: true
+
+- name: Create a file with a size of 512 kB (512000 bytes)
+ community.general.filesize:
+ path: "{{ filesize_testlink }}"
+ size: "512 kB"
+ register: filesize_test_symlink_02
+
+- name: Stat the resulting file (not the symlink)
+ ansible.builtin.stat:
+ path: "{{ filesize_test_symlink_02.path }}"
+ register: filesize_stat_symlink_02
+
+
+- name: Create a file with a size of 500 KiB (512000 bytes) (check mode, idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testlink }}"
+ size: "500 KiB"
+ register: filesize_test_symlink_03
+ check_mode: true
+
+- name: Create a file with a size of 500 KiB (512000 bytes) (idempotency)
+ community.general.filesize:
+ path: "{{ filesize_testlink }}"
+ size: "500 KiB"
+ register: filesize_test_symlink_04
+
+- name: Stat the file again (should remain the same)
+ ansible.builtin.stat:
+ path: "{{ filesize_test_symlink_04.path }}"
+ register: filesize_stat_symlink_04
+
+
+- name: Assert that results are as expected
+ ansible.builtin.assert:
+ that:
+ - filesize_test_symlink_01 is changed
+ - filesize_test_symlink_02 is changed
+ - filesize_test_symlink_03 is not changed
+ - filesize_test_symlink_04 is not changed
+
+ - filesize_test_symlink_02.cmd == filesize_test_symlink_01.cmd
+ - filesize_test_symlink_03.cmd is undefined
+ - filesize_test_symlink_04.cmd is undefined
+
+ - filesize_test_symlink_01.state is undefined
+ - filesize_test_symlink_02.state in ["file"]
+ - filesize_test_symlink_01.size is undefined
+ - filesize_test_symlink_02.size == 512000
+ - filesize_test_symlink_03.size == 512000
+ - filesize_test_symlink_04.size == 512000
+
+ - filesize_stat_symlink_02.stat.size == 512000
+ - filesize_stat_symlink_04.stat.size == 512000
+
+ - filesize_test_symlink_04.path == filesize_test_symlink_02.path
+ - filesize_test_symlink_04.path != filesize_testlink
+
+
+
+- name: Remove test file
+ ansible.builtin.file:
+ path: "{{ filesize_testfile }}"
+ state: absent
+
+- name: Remove test link
+ ansible.builtin.file:
+ path: "{{ filesize_testlink }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/aliases b/ansible_collections/community/general/tests/integration/targets/filesystem/aliases
new file mode 100644
index 000000000..a666f7a14
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+destructive
+skip/aix
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml
new file mode 100644
index 000000000..0448d8602
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/defaults/main.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+tested_filesystems:
+ # key: fstype
+ # fssize: size (Mo)
+ # grow: true if resizefs is supported
+ # Other minimal sizes:
+ # - XFS: 20Mo
+ # - Btrfs: 150Mo (50Mo when "--metadata single" is used and 100Mb when on newer Fedora versions)
+ # - f2fs:
+ # - 1.2.0 requires at leat 116Mo
+ # - 1.7.0 requires at least 30Mo
+ # - 1.10.0 requires at least 38Mo
+ # - resizefs asserts when initial fs is smaller than 60Mo and seems to require 1.10.0
+ ext4: {fssize: 10, grow: true}
+ ext4dev: {fssize: 10, grow: true}
+ ext3: {fssize: 10, grow: true}
+ ext2: {fssize: 10, grow: true}
+ xfs: {fssize: 300, grow: false} # grow requires a mounted filesystem
+ btrfs: {fssize: 150, grow: false} # grow requires a mounted filesystem
+ reiserfs: {fssize: 33, grow: false} # grow not implemented
+ vfat: {fssize: 20, grow: true}
+ ocfs2: {fssize: '{{ ocfs2_fssize }}', grow: false} # grow not implemented
+ f2fs: {fssize: '{{ f2fs_fssize|default(60) }}', grow: 'f2fs_version is version("1.10.0", ">=")'}
+ lvm: {fssize: 20, grow: true}
+ swap: {fssize: 10, grow: false} # grow not implemented
+ ufs: {fssize: 10, grow: true}
+
+
+get_uuid_any: "blkid -c /dev/null -o value -s UUID {{ dev }}"
+get_uuid_ufs: "dumpfs {{ dev }} | awk -v sb=superblock -v id=id '$1 == sb && $4 == id {print $6$7}'"
+get_uuid_cmd: "{{ get_uuid_ufs if fstype == 'ufs' else get_uuid_any }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml
new file mode 100644
index 000000000..d3facee4f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir_outside_tmp
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml
new file mode 100644
index 000000000..8966ec2e6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_device.yml
@@ -0,0 +1,64 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'Create a "disk" file'
+ community.general.filesize:
+ path: '{{ image_file }}'
+ size: '{{ fssize }}M'
+ force: true
+
+- vars:
+ dev: '{{ image_file }}'
+ block:
+ - when: fstype == 'lvm'
+ block:
+ - name: 'Show next free loop device'
+ ansible.builtin.command:
+ cmd: 'losetup -f'
+ register: loop_device_cmd
+
+ - name: 'Create a loop device for LVM'
+ ansible.builtin.command:
+ cmd: 'losetup -f {{ dev }}'
+
+ - name: 'Switch to loop device target for further tasks'
+ ansible.builtin.set_fact:
+ dev: "{{ loop_device_cmd.stdout }}"
+
+ - when: fstype == 'ufs'
+ block:
+ - name: 'Create a memory disk for UFS'
+ ansible.builtin.command:
+ cmd: 'mdconfig -a -f {{ dev }}'
+ register: memory_disk_cmd
+
+ - name: 'Switch to memory disk target for further tasks'
+ ansible.builtin.set_fact:
+ dev: "/dev/{{ memory_disk_cmd.stdout }}"
+
+ - include_tasks: '{{ action }}.yml'
+
+ always:
+ - name: 'Detach loop device used for LVM'
+ ansible.builtin.command:
+ cmd: 'losetup -d {{ dev }}'
+ removes: '{{ dev }}'
+ when: fstype == 'lvm'
+
+ - name: 'Detach memory disk used for UFS'
+ ansible.builtin.command:
+ cmd: 'mdconfig -d -u {{ dev }}'
+ removes: '{{ dev }}'
+ when: fstype == 'ufs'
+
+ - name: 'Clean correct device for LVM and UFS'
+ ansible.builtin.set_fact:
+ dev: '{{ image_file }}'
+ when: fstype in ['lvm', 'ufs']
+
+ - name: 'Remove disk image file'
+ ansible.builtin.file:
+ name: '{{ image_file }}'
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml
new file mode 100644
index 000000000..d5470fa56
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/create_fs.yml
@@ -0,0 +1,119 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Create filesystem ({{ fstype }})"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs_result
+
+- name: "Assert that results are as expected"
+ ansible.builtin.assert:
+ that:
+ - 'fs_result is changed'
+ - 'fs_result is success'
+
+- name: "Get UUID of created filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid
+
+- name: "Check that filesystem isn't created if force isn't used"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs2_result
+
+- name: "Get UUID of the filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid2
+
+- name: "Assert that filesystem UUID is not changed"
+ ansible.builtin.assert:
+ that:
+ - 'fs2_result is not changed'
+ - 'fs2_result is success'
+ - 'uuid.stdout == uuid2.stdout'
+
+- name: "Check that filesystem is recreated if force is used"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ force: true
+ register: fs3_result
+
+- name: "Get UUID of the new filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid3
+
+- name: "Assert that filesystem UUID is changed"
+ # libblkid gets no UUID at all for this fstype on FreeBSD
+ when: not (ansible_system == 'FreeBSD' and fstype == 'reiserfs')
+ ansible.builtin.assert:
+ that:
+ - 'fs3_result is changed'
+ - 'fs3_result is success'
+ - 'uuid.stdout != uuid3.stdout'
+
+
+- when: 'grow|bool and (fstype != "vfat" or resize_vfat)'
+ block:
+ - name: "Increase fake device"
+ community.general.filesize:
+ path: '{{ image_file }}'
+ size: '{{ fssize | int + 1 }}M'
+
+ - name: "Resize loop device for LVM"
+ ansible.builtin.command:
+ cmd: 'losetup -c {{ dev }}'
+ when: fstype == 'lvm'
+
+ - name: "Resize memory disk for UFS"
+ ansible.builtin.command:
+ cmd: 'mdconfig -r -u {{ dev }} -s {{ fssize | int + 1 }}M'
+ when: fstype == 'ufs'
+
+ - name: "Expand filesystem"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ resizefs: true
+ register: fs4_result
+
+ - name: "Get UUID of the filesystem"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid4
+
+ - name: "Assert that filesystem UUID is not changed"
+ ansible.builtin.assert:
+ that:
+ - 'fs4_result is changed'
+ - 'fs4_result is success'
+ - 'uuid3.stdout == uuid4.stdout' # unchanged
+
+- when:
+ - (grow | bool and (fstype != "vfat" or resize_vfat)) or
+ (fstype == "xfs" and ansible_system == "Linux" and
+ ansible_distribution not in ["CentOS", "Ubuntu"])
+ block:
+ - name: "Check that resizefs does nothing if device size is not changed"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ resizefs: true
+ register: fs5_result
+
+ - name: "Assert that the state did not change"
+ ansible.builtin.assert:
+ that:
+ - 'fs5_result is not changed'
+ - 'fs5_result is succeeded'
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/freebsd_setup.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/freebsd_setup.yml
new file mode 100644
index 000000000..03fef66e6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/freebsd_setup.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Uninstall e2fsprogs"
+ ansible.builtin.package:
+ name: e2fsprogs
+ state: absent
+
+- name: "Install util-linux"
+ ansible.builtin.package:
+ name: util-linux
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml
new file mode 100644
index 000000000..0ff0f2309
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/main.yml
@@ -0,0 +1,107 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- ansible.builtin.debug:
+ msg: '{{ role_name }}'
+- ansible.builtin.debug:
+ msg: '{{ role_path|basename }}'
+- import_tasks: setup.yml
+
+- include_vars: "{{ lookup('first_found', search) }}"
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - 'default.yml'
+ paths:
+ - '../vars/'
+
+- include_tasks: create_device.yml
+ vars:
+ image_file: '{{ remote_tmp_dir }}/img'
+ fstype: '{{ item.0.key }}'
+ fssize: '{{ item.0.value.fssize }}'
+ grow: '{{ item.0.value.grow }}'
+ action: '{{ item.1 }}'
+ when:
+ # FreeBSD limited support
+ # Not available: btrfs, lvm, f2fs, ocfs2
+ # All BSD systems use swap fs, but only Linux needs mkswap
+ # Supported: ext2/3/4 (e2fsprogs), xfs (xfsprogs), reiserfs (progsreiserfs), vfat
+ - 'not (ansible_system == "FreeBSD" and item.0.key in ["btrfs", "f2fs", "swap", "lvm", "ocfs2"])'
+ # Available on FreeBSD but not on testbed (util-linux conflicts with e2fsprogs): wipefs, mkfs.minix
+ - 'not (ansible_system == "FreeBSD" and item.1 in ["overwrite_another_fs", "remove_fs"])'
+
+ # Linux limited support
+ # Not available: ufs (this is FreeBSD's native fs)
+ - 'not (ansible_system == "Linux" and item.0.key == "ufs")'
+
+ # Other limitations and corner cases
+
+ # f2fs-tools and reiserfs-utils packages not available with RHEL/CentOS on CI
+ - 'not (ansible_distribution in ["CentOS", "RedHat"] and item.0.key in ["f2fs", "reiserfs"])'
+ - 'not (ansible_os_family == "RedHat" and ansible_distribution_major_version is version("8", ">=") and
+ item.0.key == "btrfs")'
+ # reiserfs-utils package not available with Fedora 35 on CI
+ - 'not (ansible_distribution == "Fedora" and (ansible_facts.distribution_major_version | int >= 35) and
+ item.0.key == "reiserfs")'
+ # reiserfs packages apparently not available with Alpine
+ - 'not (ansible_distribution == "Alpine" and item.0.key == "reiserfs")'
+ # ocfs2 only available on Debian based distributions
+ - 'not (item.0.key == "ocfs2" and ansible_os_family != "Debian")'
+ # Tests use losetup which can not be used inside unprivileged container
+ - 'not (item.0.key == "lvm" and ansible_virtualization_type in ["docker", "container", "containerd"])'
+ # vfat resizing fails on Debian (but not Ubuntu)
+ - 'not (item.0.key == "vfat" and ansible_distribution == "Debian")' # TODO: figure out why it fails, fix it!
+ # vfat resizing fails on ArchLinux
+ - 'not (item.0.key == "vfat" and ansible_distribution == "Archlinux")' # TODO: figure out why it fails, fix it!
+ # vfat resizing fails on Ubuntu 22.04
+ - 'not (item.0.key == "vfat" and ansible_distribution == "Ubuntu" and (ansible_facts.distribution_major_version | int == 22))'
+ # TODO: figure out why it fails, fix it!
+ # btrfs-progs cannot be installed on ArchLinux
+ - 'not (item.0.key == "btrfs" and ansible_distribution == "Archlinux")' # TODO: figure out why it fails, fix it!
+
+ # On CentOS 6 shippable containers, wipefs seems unable to remove vfat signatures
+ - 'not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<") and
+ item.1 == "remove_fs" and item.0.key == "vfat")'
+ # On same systems, mkfs.minix (unhandled by the module) can't find the device/file
+ - 'not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<") and
+ item.1 == "overwrite_another_fs")'
+
+ # The xfsprogs package on newer versions of OpenSUSE (15+) require Python 3, we skip this on our Python 2 container
+ # OpenSUSE 42.3 Python2 and the other py3 containers are not affected so we will continue to run that
+ - 'not (ansible_os_family == "Suse" and ansible_distribution_major_version|int != 42 and
+ item.0.key == "xfs" and ansible_python.version.major == 2)'
+
+ # TODO: something seems to be broken on Alpine
+ - 'not (ansible_distribution == "Alpine")'
+
+ loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}"
+
+
+# With FreeBSD extended support (util-linux is not available before 12.2)
+
+- include_tasks: freebsd_setup.yml
+ when:
+ - 'ansible_system == "FreeBSD"'
+ - 'ansible_distribution_version is version("12.2", ">=")'
+
+- include_tasks: create_device.yml
+ vars:
+ image_file: '{{ remote_tmp_dir }}/img'
+ fstype: '{{ item.0.key }}'
+ fssize: '{{ item.0.value.fssize }}'
+ grow: '{{ item.0.value.grow }}'
+ action: '{{ item.1 }}'
+ when:
+ - 'ansible_system == "FreeBSD"'
+ - 'ansible_distribution_version is version("12.2", ">=")'
+ - 'item.0.key in ["xfs", "vfat"]'
+ loop: "{{ query('dict', tested_filesystems)|product(['create_fs', 'overwrite_another_fs', 'remove_fs'])|list }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml
new file mode 100644
index 000000000..69418b22f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/overwrite_another_fs.yml
@@ -0,0 +1,59 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'Recreate "disk" file'
+ community.general.filesize:
+ path: '{{ image_file }}'
+ size: '{{ fssize }}M'
+ force: true
+
+- name: 'Create a minix filesystem'
+ ansible.builtin.command:
+ cmd: 'mkfs.minix {{ dev }}'
+
+- name: 'Get UUID of the new filesystem'
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid
+
+- name: "Check that an existing filesystem (not handled by this module) isn't overwritten when force isn't used"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ register: fs_result
+ ignore_errors: true
+
+- name: 'Get UUID of the filesystem'
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid2
+
+- name: 'Assert that module failed and filesystem UUID is not changed'
+ ansible.builtin.assert:
+ that:
+ - 'fs_result is failed'
+ - 'uuid.stdout == uuid2.stdout'
+
+- name: "Check that an existing filesystem (not handled by this module) is overwritten when force is used"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+ force: true
+ register: fs_result2
+
+- name: 'Get UUID of the new filesystem'
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: uuid3
+
+- name: 'Assert that module succeeded and filesystem UUID is changed'
+ ansible.builtin.assert:
+ that:
+ - 'fs_result2 is success'
+ - 'fs_result2 is changed'
+ - 'uuid2.stdout != uuid3.stdout'
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml
new file mode 100644
index 000000000..c5428b309
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/remove_fs.yml
@@ -0,0 +1,102 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# We assume 'create_fs' tests have passed.
+
+- name: "Create filesystem"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ fstype: '{{ fstype }}'
+
+- name: "Get filesystem UUID with 'blkid'"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: blkid_ref
+
+- name: "Assert that a filesystem exists on top of the device"
+ ansible.builtin.assert:
+ that:
+ - blkid_ref.stdout | length > 0
+
+
+# Test check_mode first
+- name: "Remove filesystem (check mode)"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+ check_mode: true
+
+- name: "Get filesystem UUID with 'blkid' (should remain the same)"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ register: blkid
+
+- name: "Assert that the state changed but the filesystem still exists"
+ ansible.builtin.assert:
+ that:
+ - wipefs is changed
+ - blkid.stdout == blkid_ref.stdout
+
+# Do it
+- name: "Remove filesystem"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+
+- name: "Get filesystem UUID with 'blkid' (should be empty)"
+ ansible.builtin.shell:
+ cmd: "{{ get_uuid_cmd }}"
+ changed_when: false
+ failed_when: false
+ register: blkid
+
+- name: "Assert that the state changed and the device has no filesystem"
+ ansible.builtin.assert:
+ that:
+ - wipefs is changed
+ - blkid.stdout | length == 0
+ - blkid.rc == 2
+
+# Do it again
+- name: "Remove filesystem (idempotency)"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+
+- name: "Assert that the state did not change"
+ ansible.builtin.assert:
+ that:
+ - wipefs is not changed
+
+# and again
+- name: "Remove filesystem (idempotency, check mode)"
+ community.general.filesystem:
+ dev: '{{ dev }}'
+ state: absent
+ register: wipefs
+ check_mode: true
+
+- name: "Assert that the state did not change"
+ ansible.builtin.assert:
+ that:
+ - wipefs is not changed
+
+
+# By the way, test removal of a filesystem on unexistent device
+- name: "Remove filesystem (unexistent device)"
+ community.general.filesystem:
+ dev: '/dev/unexistent_device'
+ state: absent
+ register: wipefs
+
+- name: "Assert that the state did not change"
+ ansible.builtin.assert:
+ that:
+ - wipefs is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml
new file mode 100644
index 000000000..97dafaeee
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/tasks/setup.yml
@@ -0,0 +1,154 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# By installing e2fsprogs on FreeBSD, we get a usable blkid command, but this
+# package conflicts with util-linux, that provides blkid too, but also wipefs
+# (required for filesystem state=absent).
+- name: "Install filesystem tools"
+ ansible.builtin.package:
+ name: '{{ item }}'
+ state: present
+ # xfsprogs on OpenSUSE requires Python 3, skip this for our newer Py2 OpenSUSE builds
+ when: not (item == 'xfsprogs' and ansible_os_family == 'Suse' and ansible_python.version.major == 2 and ansible_distribution_major_version|int != 42)
+ loop:
+ - e2fsprogs
+ - xfsprogs
+
+- name: "Install btrfs progs"
+ ansible.builtin.package:
+ name: btrfs-progs
+ state: present
+ when:
+ - ansible_os_family != 'Suse'
+ - not (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('16.04', '<='))
+ - ansible_system != "FreeBSD"
+ - not (ansible_facts.os_family == "RedHat" and ansible_facts.distribution_major_version is version('8', '>='))
+ - ansible_os_family != 'Archlinux' # TODO
+
+- name: "Install btrfs tools (Ubuntu <= 16.04)"
+ ansible.builtin.package:
+ name: btrfs-tools
+ state: present
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_version is version('16.04', '<=')
+
+- name: "Install btrfs progs (OpenSuse)"
+ ansible.builtin.package:
+ name:
+ - python{{ ansible_python.version.major }}-xml
+ - btrfsprogs
+ state: present
+ when: ansible_os_family == 'Suse'
+
+- name: "Install reiserfs utils (Fedora)"
+ ansible.builtin.package:
+ name: reiserfs-utils
+ state: present
+ when:
+ - ansible_distribution == 'Fedora' and (ansible_facts.distribution_major_version | int < 35)
+
+- name: "Install reiserfs and util-linux-systemd (for findmnt) (OpenSuse)"
+ ansible.builtin.package:
+ name:
+ - reiserfs
+ - util-linux-systemd
+ state: present
+ when:
+ - ansible_os_family == 'Suse'
+
+- name: "Install reiserfs progs (Debian and more)"
+ ansible.builtin.package:
+ name: reiserfsprogs
+ state: present
+ when:
+ - ansible_system == 'Linux'
+ - ansible_os_family not in ['Suse', 'RedHat', 'Alpine']
+
+- name: "Install reiserfs progs (FreeBSD)"
+ ansible.builtin.package:
+ name: progsreiserfs
+ state: present
+ when:
+ - ansible_system == 'FreeBSD'
+
+- name: "Install ocfs2 (Debian)"
+ ansible.builtin.package:
+ name: ocfs2-tools
+ state: present
+ when: ansible_os_family == 'Debian'
+
+- name: "Install f2fs tools and get version"
+ when:
+ - ansible_os_family != 'RedHat' or ansible_distribution == 'Fedora'
+ - ansible_distribution != 'Ubuntu' or ansible_distribution_version is version('16.04', '>=')
+ - ansible_system != "FreeBSD"
+ block:
+ - name: "Install f2fs tools"
+ ansible.builtin.package:
+ name: f2fs-tools
+ state: present
+
+ - name: "Fetch f2fs version"
+ ansible.builtin.command:
+ cmd: mkfs.f2fs /dev/null
+ changed_when: false
+ ignore_errors: true
+ register: mkfs_f2fs
+
+ - name: "Record f2fs_version"
+ ansible.builtin.set_fact:
+ f2fs_version: '{{ mkfs_f2fs.stdout
+ | regex_search("F2FS-tools: mkfs.f2fs Ver:.*")
+ | regex_replace("F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) .*", "\1") }}'
+
+- name: "Install dosfstools and lvm2 (Linux)"
+ ansible.builtin.package:
+ name:
+ - dosfstools
+ - lvm2
+ when: ansible_system == 'Linux'
+
+- name: "Install fatresize and get version"
+ when:
+ - ansible_system == 'Linux'
+ - ansible_os_family != 'Suse'
+ - ansible_os_family != 'RedHat' or (ansible_distribution == 'CentOS' and ansible_distribution_version is version('7.0', '=='))
+ - ansible_os_family != 'Alpine'
+ block:
+ - name: "Install fatresize"
+ ansible.builtin.package:
+ name: fatresize
+ state: present
+
+ - name: "Fetch fatresize version"
+ ansible.builtin.command:
+ cmd: fatresize --help
+ changed_when: false
+ register: fatresize
+
+ - name: "Record fatresize_version"
+ ansible.builtin.set_fact:
+ fatresize_version: '{{ fatresize.stdout_lines[0] | regex_search("[0-9]+\.[0-9]+\.[0-9]+") }}'
+
+- name: "Fetch e2fsprogs version"
+ ansible.builtin.command:
+ cmd: mke2fs -V
+ changed_when: false
+ register: mke2fs
+
+- name: "Record e2fsprogs_version"
+ ansible.builtin.set_fact:
+ # mke2fs 1.43.6 (29-Aug-2017)
+ e2fsprogs_version: '{{ mke2fs.stderr_lines[0] | regex_search("[0-9]{1,2}\.[0-9]{1,2}(\.[0-9]{1,2})?") }}'
+
+- name: "Set version-related facts to skip further tasks"
+ ansible.builtin.set_fact:
+ # http://e2fsprogs.sourceforge.net/e2fsprogs-release.html#1.43
+ # Mke2fs no longer complains if the user tries to create a file system
+ # using the entire block device.
+ force_creation: "{{ e2fsprogs_version is version('1.43', '<') }}"
+ # Earlier versions have a segfault bug
+ resize_vfat: "{{ fatresize_version|default('0.0') is version('1.0.4', '>=') }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml
new file mode 100644
index 000000000..d0cc5f229
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/vars/Ubuntu-14.04.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ocfs2_fssize: 108
+f2fs_fssize: 116
diff --git a/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml
new file mode 100644
index 000000000..80151e40e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filesystem/vars/default.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ocfs2_fssize: 20
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_counter/aliases b/ansible_collections/community/general/tests/integration/targets/filter_counter/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_counter/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_counter/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_counter/tasks/main.yml
new file mode 100644
index 000000000..77d6b1b02
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_counter/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test counter filter
+ assert:
+ that:
+ - "('abca' | community.general.counter) == {'a': 2, 'b': 1, 'c': 1}"
+ - "(['apple', 'pear', 'pear'] | community.general.counter) == {'apple': 1, 'pear': 2}"
+ - "([1, 2, 2, 3] | community.general.counter) == {1: 1, 2: 2, 3: 1}"
+ - "([1.11, 1.11, 1.12] | community.general.counter) == {1.11: 2, 1.12: 1}"
+
+- name: test fail argument not a sequence
+ debug:
+ msg: "{{ {'a': 'b'} | community.general.counter }}"
+ ignore_errors: true
+ register: res
+
+- name: verify test fail argument not a sequence
+ assert:
+ that:
+ - res is failed
+ - res.msg is match('Argument for community.general.counter must be a sequence')
+
+- name: test fail element not hashable
+ debug:
+ msg: "{{ [{'a': 'b'}] | community.general.counter }}"
+ ignore_errors: true
+ register: res
+
+- name: verify test fail element not hashable
+ assert:
+ that:
+ - res is failed
+ - res.msg is match('community.general.counter needs a sequence with hashable elements')
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_dict/aliases b/ansible_collections/community/general/tests/integration/targets/filter_dict/aliases
new file mode 100644
index 000000000..e8051e042
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_dict/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_dict/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_dict/tasks/main.yml
new file mode 100644
index 000000000..7b4cefde9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_dict/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Test dict filter"
+ assert:
+ that:
+ - "[['a', 'b']] | community.general.dict == dict([['a', 'b']])"
+ - "[['a', 'b'], [1, 2]] | community.general.dict == dict([['a', 'b'], [1, 2]])"
+ - "[] | community.general.dict == dict([])"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases b/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml
new file mode 100644
index 000000000..47dc8e25d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_dict_kv/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test dict_kv filter
+ assert:
+ that:
+ - "('value' | community.general.dict_kv('key')) == {'key': 'value'}"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_from_csv/aliases b/ansible_collections/community/general/tests/integration/targets/filter_from_csv/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_from_csv/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_from_csv/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_from_csv/tasks/main.yml
new file mode 100644
index 000000000..5c58f85d4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_from_csv/tasks/main.yml
@@ -0,0 +1,54 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Parse valid csv input
+ assert:
+ that:
+ - "valid_comma_separated | community.general.from_csv == expected_result"
+
+- name: Parse valid csv input containing spaces with/without skipinitialspace=True
+ assert:
+ that:
+ - "valid_comma_separated_spaces | community.general.from_csv(skipinitialspace=True) == expected_result"
+ - "valid_comma_separated_spaces | community.general.from_csv != expected_result"
+
+- name: Parse valid csv input with no headers with/without specifying fieldnames
+ assert:
+ that:
+ - "valid_comma_separated_no_headers | community.general.from_csv(fieldnames=['id','name','role']) == expected_result"
+ - "valid_comma_separated_no_headers | community.general.from_csv != expected_result"
+
+- name: Parse valid pipe-delimited csv input with/without delimiter=|
+ assert:
+ that:
+ - "valid_pipe_separated | community.general.from_csv(delimiter='|') == expected_result"
+ - "valid_pipe_separated | community.general.from_csv != expected_result"
+
+- name: Register result of invalid csv input when strict=False
+ debug:
+ var: "invalid_comma_separated | community.general.from_csv"
+ register: _invalid_csv_strict_false
+
+- name: Test invalid csv input when strict=False is successful
+ assert:
+ that:
+ - _invalid_csv_strict_false is success
+
+- name: Register result of invalid csv input when strict=True
+ debug:
+ var: "invalid_comma_separated | community.general.from_csv(strict=True)"
+ register: _invalid_csv_strict_true
+ ignore_errors: true
+
+- name: Test invalid csv input when strict=True is failed
+ assert:
+ that:
+ - _invalid_csv_strict_true is failed
+ - _invalid_csv_strict_true.msg is match('Unable to process file:.*')
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_from_csv/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_from_csv/vars/main.yml
new file mode 100644
index 000000000..7212c5aee
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_from_csv/vars/main.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+valid_comma_separated: |
+ id,name,role
+ 1,foo,bar
+ 2,bar,baz
+valid_comma_separated_spaces: |
+ id,name,role
+ 1, foo, bar
+ 2, bar, baz
+valid_comma_separated_no_headers: |
+ 1,foo,bar
+ 2,bar,baz
+valid_pipe_separated: |
+ id|name|role
+ 1|foo|bar
+ 2|bar|baz
+invalid_comma_separated: |
+ id,name,role
+ 1,foo,bar
+ 2,"b"ar",baz
+expected_result:
+ - id: '1'
+ name: foo
+ role: bar
+ - id: '2'
+ name: bar
+ role: baz
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/aliases b/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/aliases
new file mode 100644
index 000000000..e8051e042
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml
new file mode 100644
index 000000000..f4047f4ac
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test functionality
+ assert:
+ that:
+ - list1 | community.general.groupby_as_dict('name') == dict1
+
+- name: 'Test error: not a list'
+ set_fact:
+ test: "{{ list_no_list | community.general.groupby_as_dict('name') }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.msg == 'Input is not a sequence'
+
+- name: 'Test error: list element not a mapping'
+ set_fact:
+ test: "{{ list_no_dict | community.general.groupby_as_dict('name') }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "result.msg == 'Sequence element #0 is not a mapping'"
+
+- name: 'Test error: list element does not have attribute'
+ set_fact:
+ test: "{{ list_no_attribute | community.general.groupby_as_dict('name') }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "result.msg == 'Attribute not contained in element #1 of sequence'"
+
+- name: 'Test error: attribute collision'
+ set_fact:
+ test: "{{ list_collision | community.general.groupby_as_dict('name') }}"
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - result.msg == "Multiple sequence entries have attribute value 'a'" or result.msg == "Multiple sequence entries have attribute value u'a'"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/vars/main.yml
new file mode 100644
index 000000000..74e24dd3c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_groupby_as_dict/vars/main.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: a
+ x: y
+ - name: b
+ z: 1
+
+dict1:
+ a:
+ name: a
+ x: y
+ b:
+ name: b
+ z: 1
+
+list_no_list:
+ a:
+ name: a
+
+list_no_dict:
+ - []
+ - 1
+
+list_no_attribute:
+ - name: a
+ foo: baz
+ - foo: bar
+
+list_collision:
+ - name: a
+ - name: a
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_hashids/aliases b/ansible_collections/community/general/tests/integration/targets/filter_hashids/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_hashids/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.sh b/ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.sh
new file mode 100755
index 000000000..c7a215a06
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+source virtualenv.sh
+
+# Requirements have to be installed prior to running ansible-playbook
+# because plugins and requirements are loaded before the task runs
+
+pip install hashids
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.yml b/ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.yml
new file mode 100644
index 000000000..3ac0e388f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_hashids/runme.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ roles:
+ - { role: filter_hashids }
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_hashids/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_hashids/tasks/main.yml
new file mode 100644
index 000000000..4a76540f6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_hashids/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test valid hashable inputs
+ assert:
+ that:
+ - "single_int | community.general.hashids_encode | community.general.hashids_decode == [single_int]"
+ - "int_list | community.general.hashids_encode | community.general.hashids_decode | list == int_list"
+ - "(1,2,3) | community.general.hashids_encode | community.general.hashids_decode == [1,2,3]"
+
+- name: Test valid parameters
+ assert:
+ that:
+ - "single_int | community.general.hashids_encode(salt='test') | community.general.hashids_decode(salt='test') == [single_int]"
+ - "single_int | community.general.hashids_encode(alphabet='1234567890abcdef') | community.general.hashids_decode(alphabet='1234567890abcdef') == [single_int]"
+ - "single_int | community.general.hashids_encode(min_length=20) | community.general.hashids_decode(min_length=20) == [single_int]"
+ - "single_int | community.general.hashids_encode(min_length=20) | length == 20"
+
+- name: Test valid unhashable inputs
+ assert:
+ that:
+ - "single_float | community.general.hashids_encode | community.general.hashids_decode == []"
+ - "arbitrary_string | community.general.hashids_encode | community.general.hashids_decode == []"
+
+- name: Register result of invalid salt
+ debug:
+ var: "invalid_input | community.general.hashids_encode(salt=10)"
+ register: invalid_salt_message
+ ignore_errors: true
+
+- name: Test invalid salt fails
+ assert:
+ that:
+ - invalid_salt_message is failed
+
+- name: Register result of invalid alphabet
+ debug:
+ var: "invalid_input | community.general.hashids_encode(alphabet='abc')"
+ register: invalid_alphabet_message
+ ignore_errors: true
+
+- name: Test invalid alphabet fails
+ assert:
+ that:
+ - invalid_alphabet_message is failed
+
+- name: Register result of invalid min_length
+ debug:
+ var: "invalid_input | community.general.hashids_encode(min_length='foo')"
+ register: invalid_min_length_message
+ ignore_errors: true
+
+- name: Test invalid min_length fails
+ assert:
+ that:
+ - invalid_min_length_message is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_hashids/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_hashids/vars/main.yml
new file mode 100644
index 000000000..db65ef562
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_hashids/vars/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+single_int: 1
+int_list: [1, 2, 3]
+single_float: [2.718]
+arbitrary_string: "will not hash"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases b/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases
new file mode 100644
index 000000000..0e799090e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_jc/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/python2.7 # jc only supports python3.x
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh b/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh
new file mode 100755
index 000000000..c427b8b35
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+source virtualenv.sh
+
+# Requirements have to be installed prior to running ansible-playbook
+# because plugins and requirements are loaded before the task runs
+
+pip install jc
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml b/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml
new file mode 100644
index 000000000..6d6a89c00
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_jc/runme.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ roles:
+ - { role: filter_jc }
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml
new file mode 100644
index 000000000..a06a0bfa4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_jc/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test jc key/value parser
+ assert:
+ that:
+ - "('key1=value1\nkey2=value2' | community.general.jc('kv')) == {'key1': 'value1', 'key2': 'value2'}"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases b/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases
new file mode 100644
index 000000000..cee9abd2c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_json_query/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh b/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh
new file mode 100755
index 000000000..b1fa994b3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+source virtualenv.sh
+
+# Requirements have to be installed prior to running ansible-playbook
+# because plugins and requirements are loaded before the task runs
+
+pip install jmespath
+
+ANSIBLE_ROLES_PATH=../ ansible-playbook runme.yml "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml b/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml
new file mode 100644
index 000000000..28281cffb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_json_query/runme.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ roles:
+ - { role: filter_json_query }
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml
new file mode 100644
index 000000000..92db6d876
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_json_query/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test json_query filter
+ assert:
+ that:
+ - "users | community.general.json_query('[*].hosts[].host') == ['host_a', 'host_b', 'host_c', 'host_d']"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml
new file mode 100644
index 000000000..1edd723be
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_json_query/vars/main.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+users:
+ - name: steve
+ hosts:
+ - host: host_a
+ password: abc
+ - host: host_b
+ - name: bill
+ hosts:
+ - host: host_c
+ password: default
+ - host: host_d
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/aliases b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml
new file mode 100644
index 000000000..62896e1b0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_2-10.yml
@@ -0,0 +1,143 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 101.Merge 2 lists by attribute name. list_merge='keep'
+ block:
+ - name: Merge 2 lists by attribute name. list_merge='keep'. set
+ set_fact:
+ my_list: "{{ [list100, list101]|
+ community.general.lists_mergeby('name', list_merge='keep') }}"
+ - name: Merge 2 lists by attribute name. list_merge='keep'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result101):
+ {{ my_list|difference(result101)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge 2 lists by attribute name. list_merge='keep'. assert
+ assert:
+ that: my_list | difference(result101) | length == 0
+ tags: t101
+
+- name: 102.Merge 2 lists by attribute name. list_merge='append'
+ block:
+ - name: Merge 2 lists by attribute name. list_merge='append'. set
+ set_fact:
+ my_list: "{{ [list100, list101]|
+ community.general.lists_mergeby('name', list_merge='append') }}"
+ - name: Merge 2 lists by attribute name. list_merge='append'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result102):
+ {{ my_list|difference(result102)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge 2 lists by attribute name. list_merge='append'. assert
+ assert:
+ that: my_list | difference(result102) | length == 0
+ tags: t102
+
+- name: 103.Merge 2 lists by attribute name. list_merge='prepend'
+ block:
+ - name: Merge 2 lists by attribute name. list_merge='prepend'. set
+ set_fact:
+ my_list: "{{ [list100, list101]|
+ community.general.lists_mergeby('name', list_merge='prepend') }}"
+ - name: Merge 2 lists by attribute name. list_merge='prepend'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result103):
+ {{ my_list|difference(result103)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge 2 lists by attribute name. list_merge='prepend'. assert
+ assert:
+ that: my_list | difference(result103) | length == 0
+ tags: t103
+
+- name: 104.Merge 2 lists by attribute name. list_merge='append_rp'
+ block:
+ - name: Merge 2 lists by attribute name. list_merge='append_rp'. set
+ set_fact:
+ my_list: "{{ [list102, list103]|
+ community.general.lists_mergeby('name', list_merge='append_rp') }}"
+ - name: Merge 2 lists by attribute name. list_merge='append_rp'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result104):
+ {{ my_list|difference(result104)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge 2 lists by attribute name. list_merge='append_rp'. assert
+ assert:
+ that: my_list | difference(result104) | length == 0
+ tags: t104
+
+- name: 105.Merge 2 lists by attribute name. list_merge='prepend_rp'
+ block:
+ - name: Merge 2 lists by attribute name. list_merge='prepend_rp'. set
+ set_fact:
+ my_list: "{{ [list102, list103]|
+ community.general.lists_mergeby('name', list_merge='prepend_rp') }}"
+ - name: Merge 2 lists by attribute name. list_merge='prepend_rp'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result105):
+ {{ my_list|difference(result105)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge 2 lists by attribute name. list_merge='prepend_rp'. assert
+ assert:
+ that: my_list | difference(result105) | length == 0
+ tags: t105
+
+# Test recursive
+
+- name: 200.Merge by name. recursive=True list_merge='append_rp'
+ block:
+ - name: Merge by name. recursive=True list_merge='append_rp'. set
+ set_fact:
+ my_list: "{{ [list200, list201]|
+ community.general.lists_mergeby('name',
+ recursive=True,
+ list_merge='append_rp') }}"
+ - name: Merge by name. recursive=True list_merge='append_rp'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result200):
+ {{ my_list|difference(result200)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge by name. recursive=True list_merge='append_rp'. assert
+ assert:
+ that: my_list | difference(result200) | length == 0
+ tags: t200
+
+- name: 201.Merge by name. recursive=False list_merge='append_rp'
+ block:
+ - name: Merge by name. recursive=False list_merge='append_rp'. set
+ set_fact:
+ my_list: "{{ [list200, list201]|
+ community.general.lists_mergeby('name',
+ recursive=False,
+ list_merge='append_rp') }}"
+ - name: Merge by name. recursive=False list_merge='append_rp'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result201):
+ {{ my_list|difference(result201)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge by name. recursive=False list_merge='append_rp'. assert
+ assert:
+ that: my_list | difference(result201) | length == 0
+ tags: t201
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml
new file mode 100644
index 000000000..93917c97c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/lists_mergeby_default.yml
@@ -0,0 +1,169 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Debug ansible_version
+ debug:
+ var: ansible_version
+ when: debug_test|d(false)|bool
+ tags: t0
+
+- name: 1. Test lists merged by attribute name
+ block:
+ - name: Test lists merged by attribute name debug
+ debug:
+ msg: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}"
+ when: debug_test|d(false)|bool
+ - name: Test lists merged by attribute name assert
+ assert:
+ that:
+ - "(list1 | community.general.lists_mergeby(list2, 'name') | list |
+ difference(list3) | length) == 0"
+ tags: t1
+
+- name: 2.Test list1 empty
+ block:
+ - name: Test list1 empty debug
+ debug:
+ msg: "{{ [] | community.general.lists_mergeby(list2, 'name') }}"
+ when: debug_test|d(false)|bool
+ - name: Test list1 empty assert
+ assert:
+ that:
+ - "([] | community.general.lists_mergeby(list2, 'name') | list |
+ difference(list2) | length) == 0"
+ tags: t2
+
+- name: 3.Test all lists empty
+ block:
+ - name: Test all lists empty debug
+ debug:
+ msg: "{{ [] | community.general.lists_mergeby([], 'name') }}"
+ when: debug_test|d(false)|bool
+ - name: Test all lists empty assert
+ assert:
+ that:
+ - "([] | community.general.lists_mergeby([], 'name') | list |
+ length) == 0"
+ tags: t3
+
+- name: 4.First argument must be list
+ block:
+ - name: First argument must be list set
+ set_fact:
+ my_list: "{{ {'x': 'y'} | community.general.lists_mergeby(list2, 'name') }}"
+ register: result
+ ignore_errors: true
+ - name: First argument must be list debug
+ debug:
+ var: my_list
+ when: debug_test|d(false)|bool
+ - name: First argument must be list assert
+ assert:
+ that:
+ - result is failed
+ - '"All arguments before the argument index for community.general.lists_mergeby must be lists." in result.msg'
+ tags: t4
+
+- name: 5.Second argument must be list
+ block:
+ - name: Second argument must be list set
+ set_fact:
+ my_list: "{{ list1 | community.general.lists_mergeby({'x': 'y'}, 'name') }}"
+ register: result
+ ignore_errors: true
+ - name: Second argument must be list set debug
+ debug:
+ var: my_list
+ when: debug_test|d(false)|bool
+ - name: Second argument must be list set assert
+ assert:
+ that:
+ - result is failed
+ - '"All arguments before the argument index for community.general.lists_mergeby must be lists." in result.msg'
+ tags: t5
+
+- name: 6.First arguments after the lists must be string
+ block:
+ - name: First arguments after the lists must be string set
+ set_fact:
+ my_list: "{{ list1 | community.general.lists_mergeby(list2, {'x': 'y'}) }}"
+ register: result
+ ignore_errors: true
+ - name: First arguments after the lists must be string debug
+ debug:
+ var: my_list
+ when: debug_test|d(false)|bool
+ - name: First arguments after the lists must be string assert
+ assert:
+ that:
+ - result is failed
+ - '"First argument after the lists for community.general.lists_mergeby must be string." in result.msg'
+ tags: t6
+
+- name: 7.Elements of list must be dictionaries
+ block:
+ - name: Elements of list must be dictionaries set
+ set_fact:
+ my_list: "{{ list4 | community.general.lists_mergeby(list2, 'name') }}"
+ register: result
+ ignore_errors: true
+ - name: Elements of list must be dictionaries debug
+ debug:
+ var: my_list
+ when: debug_test|d(false)|bool
+ - name: Elements of list must be dictionaries assert
+ assert:
+ that:
+ - result is failed
+ - '"Elements of list arguments for lists_mergeby must be dictionaries." in result.msg'
+ tags: t7
+
+- name: 8.Merge 3 lists by attribute name. 1 list in params.
+ block:
+ - name: Merge 3 lists by attribute name. 1 list in params. set
+ set_fact:
+ my_list: "{{ [list1, list2] | community.general.lists_mergeby(list5, 'name') }}"
+ - name: Merge 3 lists by attribute name. 1 list in params. debug
+ debug:
+ var: my_list
+ when: debug_test|d(false)|bool
+ - name: Merge 3 lists by attribute name. 1 list in params. assert
+ assert:
+ that: my_list | difference(result1) | length == 0
+ tags: t8
+
+- name: 9.Merge 3 lists by attribute name. No list in the params.
+ block:
+ - name: Merge 3 lists by attribute name. No list in the params. set
+ set_fact:
+ my_list: "{{ [list1, list2, list5] | community.general.lists_mergeby('name') }}"
+ - name: Merge 3 lists by attribute name. No list in the params. debug
+ debug:
+ var: my_list
+ when: debug_test|d(false)|bool
+ - name: Merge 3 lists by attribute name. No list in the params. asset
+ assert:
+ that: my_list | difference(result1) | length == 0
+ tags: t9
+
+# Test list_merge default options
+
+- name: 100.Merge 2 lists by attribute name. list_merge='replace'
+ block:
+ - name: Merge 2 lists by attribute name. list_merge='replace'. set
+ set_fact:
+ my_list: "{{ [list100, list101] | community.general.lists_mergeby('name') }}"
+ - name: Merge 2 lists by attribute name. list_merge='replace'. debug
+ debug:
+ msg: |-
+ my_list:
+ {{ my_list|to_nice_yaml|indent(2) }}
+ my_list|difference(result100):
+ {{ my_list|difference(result100)|to_nice_yaml|indent(2) }}
+ when: debug_test|d(false)|bool
+ - name: Merge 2 lists by attribute name. list_merge='replace'. assert
+ assert:
+ that: my_list | difference(result100) | length == 0
+ tags: t100
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/main.yml
new file mode 100644
index 000000000..d0bda368c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test list_merge default options
+ import_tasks: lists_mergeby_default.yml
+
+- name: Test list_merge non-default options in Ansible 2.10 and higher
+ import_tasks: lists_mergeby_2-10.yml
+ when: ansible_version.full is version('2.10', '>=')
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/vars/main.yml
new file mode 100644
index 000000000..f3b492878
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_lists_mergeby/vars/main.yml
@@ -0,0 +1,209 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+list1:
+ - name: myname01
+ param01: myparam01
+ - name: myname02
+ param01: myparam02
+
+list2:
+ - name: myname01
+ param01: myparam03
+ - name: myname02
+ param02: myparam04
+ - name: myname03
+ param03: myparam03
+
+list3:
+ - name: myname01
+ param01: myparam03
+ - name: myname02
+ param01: myparam02
+ param02: myparam04
+ - name: myname03
+ param03: myparam03
+
+list4:
+ - name: myname01
+ param01: myparam01
+ - myname02
+
+list5:
+ - name: myname01
+ param01: myparam05
+ - name: myname02
+ param01: myparam06
+
+result1:
+ - name: myname01
+ param01: myparam05
+ - name: myname02
+ param01: myparam06
+ param02: myparam04
+ - name: myname03
+ param03: myparam03
+
+# Test list_merge
+
+list100:
+ - name: myname01
+ param01:
+ - default1
+ - name: myname02
+ param01:
+ - default2
+
+list101:
+ - name: myname01
+ param01:
+ - patch1
+ - name: myname02
+ param01:
+ - patch2
+
+list102:
+ - name: myname01
+ param01:
+ - patch1a
+ - patch1b
+ - patch1c
+ - name: myname02
+ param01:
+ - patch2a
+ - patch2b
+ - patch2d
+
+list103:
+ - name: myname01
+ param01:
+ - patch1c
+ - patch1d
+ - name: myname02
+ param01:
+ - patch2c
+ - patch2d
+
+result100:
+ - name: myname01
+ param01:
+ - patch1
+ - name: myname02
+ param01:
+ - patch2
+
+result101:
+ - name: myname01
+ param01:
+ - default1
+ - name: myname02
+ param01:
+ - default2
+
+result102:
+ - name: myname01
+ param01:
+ - default1
+ - patch1
+ - name: myname02
+ param01:
+ - default2
+ - patch2
+
+result103:
+ - name: myname01
+ param01:
+ - patch1
+ - default1
+ - name: myname02
+ param01:
+ - patch2
+ - default2
+
+result104:
+ - name: myname01
+ param01:
+ - patch1a
+ - patch1b
+ - patch1c
+ - patch1d
+ - name: myname02
+ param01:
+ - patch2a
+ - patch2b
+ - patch2c
+ - patch2d
+
+result105:
+ - name: myname01
+ param01:
+ - patch1c
+ - patch1d
+ - patch1a
+ - patch1b
+ - name: myname02
+ param01:
+ - patch2c
+ - patch2d
+ - patch2a
+ - patch2b
+
+# Test recursive
+
+list200:
+ - name: myname01
+ param01:
+ x: default_value
+ y: default_value
+ list:
+ - default_value
+ - name: myname02
+ param01: [1, 1, 2, 3]
+
+list201:
+ - name: myname01
+ param01:
+ y: patch_value
+ z: patch_value
+ list:
+ - patch_value
+ - name: myname02
+ param01: [3, 4, 4, {key: value}]
+
+result200:
+ - name: myname01
+ param01:
+ list:
+ - default_value
+ - patch_value
+ x: default_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 4
+ - 4
+ - key: value
+
+result201:
+ - name: myname01
+ param01:
+ list:
+ - patch_value
+ y: patch_value
+ z: patch_value
+ - name: myname02
+ param01:
+ - 1
+ - 1
+ - 2
+ - 3
+ - 4
+ - 4
+ - key: value
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/aliases b/ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/aliases
new file mode 100644
index 000000000..51baa3d7a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/tasks/main.yml
new file mode 100644
index 000000000..1462656fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_path_join_shim/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Test path_join filter"
+ assert:
+ that:
+ - "['a', 'b'] | community.general.path_join == 'a/b'"
+ - "['a', '/b'] | community.general.path_join == '/b'"
+ - "[''] | community.general.path_join == ''"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases b/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases
new file mode 100644
index 000000000..cee9abd2c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_random_mac/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_random_mac/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_random_mac/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_random_mac/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml
new file mode 100644
index 000000000..230f9776d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_random_mac/tasks/main.yml
@@ -0,0 +1,62 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for filters
+# Copyright (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test random_mac filter bad argument type
+ debug:
+ var: "0 | community.general.random_mac"
+ register: _bad_random_mac_filter
+ ignore_errors: true
+
+- name: Verify random_mac filter showed a bad argument type error message
+ assert:
+ that:
+ - _bad_random_mac_filter is failed
+ - "_bad_random_mac_filter.msg is match('Invalid value type (.*int.*) for random_mac .*')"
+
+- name: Test random_mac filter bad argument value
+ debug:
+ var: "'dummy' | community.general.random_mac"
+ register: _bad_random_mac_filter
+ ignore_errors: true
+
+- name: Verify random_mac filter showed a bad argument value error message
+ assert:
+ that:
+ - _bad_random_mac_filter is failed
+ - "_bad_random_mac_filter.msg is match('Invalid value (.*) for random_mac: .* not hexa byte')"
+
+- name: Test random_mac filter prefix too big
+ debug:
+ var: "'00:00:00:00:00:00' | community.general.random_mac"
+ register: _bad_random_mac_filter
+ ignore_errors: true
+
+- name: Verify random_mac filter showed a prefix too big error message
+ assert:
+ that:
+ - _bad_random_mac_filter is failed
+ - "_bad_random_mac_filter.msg is match('Invalid value (.*) for random_mac: 5 colon.* separated items max')"
+
+- name: Verify random_mac filter
+ assert:
+ that:
+ - "'00' | community.general.random_mac is match('^00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00' | community.general.random_mac is match('^00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00:00' | community.general.random_mac is match('^00:00:00:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00:00:00' | community.general.random_mac is match('^00:00:00:00:00:[a-f0-9][a-f0-9]$')"
+ - "'00:00:00' | community.general.random_mac != '00:00:00' | community.general.random_mac"
+
+- name: Verify random_mac filter with seed
+ assert:
+ that:
+ - "'00:00:00' | community.general.random_mac(seed='test') == '00:00:00' | community.general.random_mac(seed='test')"
+ - "'00:00:00' | community.general.random_mac(seed='test') != '00:00:00' | community.general.random_mac(seed='another_test')"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_time/aliases b/ansible_collections/community/general/tests/integration/targets/filter_time/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_time/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml
new file mode 100644
index 000000000..3b6539499
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_time/tasks/main.yml
@@ -0,0 +1,115 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test zero is 0
+ assert:
+ that:
+ - "('0' | community.general.to_milliseconds) == 0"
+ - "('0' | community.general.to_seconds) == 0"
+ - "('0' | community.general.to_minutes) == 0"
+
+- name: test to_milliseconds filter
+ assert:
+ that:
+ - "('1000ms' | community.general.to_milliseconds) == 1000"
+ - "('1s' | community.general.to_milliseconds) == 1000"
+ - "('1m' | community.general.to_milliseconds) == 60000"
+
+- name: test to_seconds filter
+ assert:
+ that:
+ - "('1000msecs' | community.general.to_seconds) == 1"
+ - "('1ms' | community.general.to_seconds) == 0.001"
+ - "('12m' | community.general.to_seconds) == 720"
+ - "('300minutes' | community.general.to_seconds) == 18000"
+ - "('3h 12m' | community.general.to_seconds) == 11520"
+ - "('2days 3hours 12mins 15secs' | community.general.to_seconds) == 184335"
+ - "('2d -2d -12s' | community.general.to_seconds) == -12"
+
+- name: test to_minutes filter
+ assert:
+ that:
+ - "('30s' | community.general.to_minutes) == 0.5"
+ - "('12m' | community.general.to_minutes) == 12"
+ - "('3h 72m' | community.general.to_minutes) == 252"
+ - "('300s' | community.general.to_minutes) == 5"
+
+- name: test to_hours filter
+ assert:
+ that:
+ - "('30m' | community.general.to_hours) == 0.5"
+ - "('3h 119m 61s' | community.general.to_hours) > 5"
+
+- name: test to_days filter
+ assert:
+ that:
+ - "('1year' | community.general.to_days) == 365"
+ - "('1week' | community.general.to_days) == 7"
+ - "('2weeks' | community.general.to_days) == 14"
+ - "('1mo' | community.general.to_days) == 30"
+ - "('1mo' | community.general.to_days(month=28)) == 28"
+
+- name: test to_weeks filter
+ assert:
+ that:
+ - "('1y' | community.general.to_weeks | int) == 52"
+ - "('7d' | community.general.to_weeks) == 1"
+ - "('1mo' | community.general.to_weeks(month=28)) == 4"
+
+- name: test to_months filter
+ assert:
+ that:
+ - "('30d' | community.general.to_months) == 1"
+ - "('1year' | community.general.to_months | int) == 12"
+ - "('5years' | community.general.to_months(month=30, year=360)) == 60"
+ - "('1years' | community.general.to_months(month=2, year=34)) == 17"
+
+- name: test to_years filter
+ assert:
+ that:
+ - "('365d' | community.general.to_years | int) == 1"
+ - "('12mo' | community.general.to_years | round(0, 'ceil')) == 1"
+ - "('24mo' | community.general.to_years(month=30, year=360)) == 2"
+
+- name: test fail unknown unit
+ debug:
+ msg: "{{ '1s' | community.general.to_time_unit('lightyears') }}"
+ ignore_errors: true
+ register: res
+
+- name: verify test fail unknown unit
+ assert:
+ that:
+ - res is failed
+ - "'to_time_unit() can not convert to the following unit: lightyears' in res.msg"
+
+- name: test fail unknown string
+ debug:
+ msg: "{{ '1 s' | community.general.to_time_unit('s') }}"
+ ignore_errors: true
+ register: res
+
+- name: test fail unknown string
+ assert:
+ that:
+ - res is failed
+ - "'to_time_unit() can not interpret following string' in res.msg"
+
+- name: test fail unknown kwarg
+ debug:
+ msg: "{{ '1s' | community.general.to_time_unit('s', second=23) }}"
+ ignore_errors: true
+ register: res
+
+- name: test fail unknown kwarg
+ assert:
+ that:
+ - res is failed
+ - "'to_time_unit() got unknown keyword arguments' in res.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/aliases b/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/tasks/main.yml
new file mode 100644
index 000000000..13902706e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/tasks/main.yml
@@ -0,0 +1,44 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test 'NFC' normalization
+ assert:
+ that:
+ - u_umlaut != u_umlaut_combining
+ - u_umlaut_combining != (u_umlaut_combining | community.general.unicode_normalize)
+ - u_umlaut == (u_umlaut_combining | community.general.unicode_normalize)
+
+- name: Test 'NFKC' normalization
+ assert:
+ that:
+ - latin_capital_i != roman_numeral_one
+ - latin_capital_i == (roman_numeral_one | community.general.unicode_normalize(form='NFKC'))
+
+- name: Register invalid input type
+ debug:
+ msg: "{{ 1 | community.general.unicode_normalize }}"
+ ignore_errors: true
+ register: invalid_input_type
+
+- name: Assert an invalid input type causes failure
+ assert:
+ that:
+ - invalid_input_type is failed
+
+- name: Register invalid form selection
+ debug:
+ msg: "{{ 'arbitrary text' | community.general.unicode_normalize(form='invalid') }}"
+ ignore_errors: true
+ register: invalid_form_selection
+
+- name: Assert invalid form selection causes failure
+ assert:
+ that:
+ - invalid_form_selection is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/vars/main.yml
new file mode 100644
index 000000000..ed4e2968b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_unicode_normalize/vars/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+u_umlaut: "{{ '\u00fc' }}"
+u_umlaut_combining: "{{ 'u' + '\u0308' }}"
+roman_numeral_one: "{{ '\u2160' }}"
+latin_capital_i: "{{ '\u0049' }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_version_sort/aliases b/ansible_collections/community/general/tests/integration/targets/filter_version_sort/aliases
new file mode 100644
index 000000000..bc9b4bc99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_version_sort/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # filters are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/filter_version_sort/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/filter_version_sort/tasks/main.yml
new file mode 100644
index 000000000..08985d1ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/filter_version_sort/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: validate that versions are properly sorted in a stable way
+ assert:
+ that:
+ - "['a-1.9.rpm', 'a-1.10-1.rpm', 'a-1.09.rpm', 'b-1.01.rpm', 'a-2.1-0.rpm', 'a-1.10-0.rpm'] | community.general.version_sort == ['a-1.9.rpm', 'a-1.09.rpm', 'a-1.10-0.rpm', 'a-1.10-1.rpm', 'a-2.1-0.rpm', 'b-1.01.rpm']"
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/aliases b/ansible_collections/community/general/tests/integration/targets/flatpak/aliases
new file mode 100644
index 000000000..e462ed8cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/files/serve.py b/ansible_collections/community/general/tests/integration/targets/flatpak/files/serve.py
new file mode 100644
index 000000000..93df1036e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/files/serve.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import posixpath
+import sys
+
+try:
+ from http.server import SimpleHTTPRequestHandler, HTTPServer
+ from urllib.parse import unquote
+except ImportError:
+ from SimpleHTTPServer import SimpleHTTPRequestHandler
+ from BaseHTTPServer import HTTPServer
+ from urllib import unquote
+
+
+# Argument parsing
+if len(sys.argv) != 4:
+ print('Syntax: {0} <bind> <port> <path>'.format(sys.argv[0]))
+ sys.exit(-1)
+
+HOST, PORT, PATH = sys.argv[1:4]
+PORT = int(PORT)
+
+
+# The HTTP request handler
+class Handler(SimpleHTTPRequestHandler):
+ def translate_path(self, path):
+ # Modified from Python 3.6's version of SimpleHTTPRequestHandler
+ # to support using another base directory than CWD.
+
+ # abandon query parameters
+ path = path.split('?', 1)[0]
+ path = path.split('#', 1)[0]
+ # Don't forget explicit trailing slash when normalizing. Issue17324
+ trailing_slash = path.rstrip().endswith('/')
+ try:
+ path = unquote(path, errors='surrogatepass')
+ except (UnicodeDecodeError, TypeError) as exc:
+ path = unquote(path)
+ path = posixpath.normpath(path)
+ words = path.split('/')
+ words = filter(None, words)
+ path = PATH
+ for word in words:
+ if os.path.dirname(word) or word in (os.curdir, os.pardir):
+ # Ignore components that are not a simple file/directory name
+ continue
+ path = os.path.join(path, word)
+ if trailing_slash:
+ path += '/'
+ return path
+
+
+# Run simple HTTP server
+httpd = HTTPServer((HOST, PORT), Handler)
+
+try:
+ httpd.serve_forever()
+except KeyboardInterrupt:
+ pass
+
+httpd.server_close()
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml
new file mode 100644
index 000000000..0ac87654d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_flatpak_remote
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml
new file mode 100644
index 000000000..9f52dc122
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/check_mode.yml
@@ -0,0 +1,197 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# - Tests with absent flatpak --------------------------------------------------
+
+# state=present on absent flatpak
+
+- name: Test addition of absent flatpak (check mode)
+ flatpak:
+ name: com.dummy.App1
+ remote: dummy-remote
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak test result (check mode)
+ assert:
+ that:
+ - addition_result is changed
+ msg: "Adding an absent flatpak shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak (check mode)
+ flatpak:
+ name: com.dummy.App1
+ remote: dummy-remote
+ state: present
+ register: double_addition_result
+ check_mode: true
+
+- name: Verify non-existent idempotency of addition of absent flatpak test result (check mode)
+ assert:
+ that:
+ - double_addition_result is changed
+ msg: |
+ Adding an absent flatpak a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent on absent flatpak
+
+- name: Test removal of absent flatpak check mode
+ flatpak:
+ name: com.dummy.App1
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak test result (check mode)
+ assert:
+ that:
+ - removal_result is not changed
+ msg: "Removing an absent flatpak shall mark module execution as not changed"
+
+# state=present with url on absent flatpak
+
+- name: Test addition of absent flatpak with url (check mode)
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ remote: dummy-remote
+ state: present
+ register: url_addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak with url test result (check mode)
+ assert:
+ that:
+ - url_addition_result is changed
+ msg: "Adding an absent flatpak from URL shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak with url (check mode)
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ remote: dummy-remote
+ state: present
+ register: double_url_addition_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of additionof absent flatpak with url test
+ result (check mode)
+ assert:
+ that:
+ - double_url_addition_result is changed
+ msg: |
+ Adding an absent flatpak from URL a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent with url on absent flatpak
+
+- name: Test removal of absent flatpak with url not doing anything (check mode)
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ state: absent
+ register: url_removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak with url test result (check mode)
+ assert:
+ that:
+ - url_removal_result is not changed
+ msg: "Removing an absent flatpak shall mark module execution as not changed"
+
+# - Tests with present flatpak -------------------------------------------------
+
+# state=present on present flatpak
+
+- name: Test addition of present flatpak (check mode)
+ flatpak:
+ name: com.dummy.App2
+ remote: dummy-remote
+ state: present
+ register: addition_present_result
+ check_mode: true
+
+- name: Verify addition test result of present flatpak (check mode)
+ assert:
+ that:
+ - addition_present_result is not changed
+ msg: "Adding an present flatpak shall mark module execution as not changed"
+
+# state=absent on present flatpak
+
+- name: Test removal of present flatpak (check mode)
+ flatpak:
+ name: com.dummy.App2
+ state: absent
+ register: removal_present_result
+ check_mode: true
+
+- name: Verify removal of present flatpak test result (check mode)
+ assert:
+ that:
+ - removal_present_result is changed
+ msg: "Removing a present flatpak shall mark module execution as changed"
+
+- name: Test non-existent idempotency of removal (check mode)
+ flatpak:
+ name: com.dummy.App2
+ state: absent
+ register: double_removal_present_result
+ check_mode: true
+
+- name: Verify non-existent idempotency of removal (check mode)
+ assert:
+ that:
+ - double_removal_present_result is changed
+ msg: |
+ Removing a present flatpak a second time shall still mark module execution
+ as changed in check mode
+
+# state=present with url on present flatpak
+
+- name: Test addition with url of present flatpak (check mode)
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ remote: dummy-remote
+ state: present
+ register: url_addition_present_result
+ check_mode: true
+
+- name: Verify addition with url of present flatpak test result (check mode)
+ assert:
+ that:
+ - url_addition_present_result is not changed
+ msg: "Adding a present flatpak from URL shall mark module execution as not changed"
+
+# state=absent with url on present flatpak
+
+- name: Test removal with url of present flatpak (check mode)
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ state: absent
+ register: url_removal_present_result
+ check_mode: true
+
+- name: Verify removal with url of present flatpak test result (check mode)
+ assert:
+ that:
+ - url_removal_present_result is changed
+ msg: "Removing an absent flatpak shall mark module execution as not changed"
+
+- name: Test non-existent idempotency of removal with url of present flatpak (check mode)
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ remote: dummy-remote
+ state: absent
+ register: double_url_removal_present_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of installation with url of present
+ flatpak test result (check mode)
+ assert:
+ that:
+ - double_url_removal_present_result is changed
+ msg: Removing an absent flatpak a second time shall still mark module execution as changed
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml
new file mode 100644
index 000000000..deaf354e8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/main.yml
@@ -0,0 +1,64 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2018, Alexander Bethke <oolongbrothers@gmx.net>
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+
+ - import_tasks: setup.yml
+ become: true
+
+ # executable override
+
+ - name: Test executable override
+ flatpak:
+ name: com.dummy.App1
+ remote: dummy-remote
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
+
+ - name: Verify executable override test result
+ assert:
+ that:
+ - executable_override_result is failed
+ - executable_override_result is not changed
+ msg: "Specifying non-existing executable shall fail module execution"
+
+ - import_tasks: check_mode.yml
+ become: false
+
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
+
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
+
+ always:
+
+ - name: Check HTTP server status
+ async_status:
+ jid: "{{ webserver_status.ansible_job_id }}"
+ ignore_errors: true
+
+ - name: List processes
+ command: ps aux
+
+ - name: Stop HTTP server
+ command: >-
+ pkill -f -- '{{ remote_tmp_dir }}/serve.py'
+
+ when: |
+ ansible_distribution == 'Fedora' or
+ ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml
new file mode 100644
index 000000000..4dfdd68cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/setup.yml
@@ -0,0 +1,68 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install flatpak on Fedora
+ dnf:
+ name: flatpak
+ state: present
+ become: true
+ when: ansible_distribution == 'Fedora'
+
+- block:
+ - name: Activate flatpak ppa on Ubuntu
+ apt_repository:
+ repo: ppa:alexlarsson/flatpak
+ state: present
+ mode: '0644'
+ when: ansible_lsb.major_release | int < 18
+
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
+
+ when: ansible_distribution == 'Ubuntu'
+
+- name: Install dummy remote for user
+ flatpak_remote:
+ name: dummy-remote
+ state: present
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ method: user
+
+- name: Install dummy remote for system
+ flatpak_remote:
+ name: dummy-remote
+ state: present
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ method: system
+
+- name: Remove (if necessary) flatpak for testing check mode on absent flatpak
+ flatpak:
+ name:
+ - com.dummy.App1
+ - com.dummy.App3
+ remote: dummy-remote
+ state: absent
+ no_dependencies: true
+
+- name: Add flatpak for testing check mode on present flatpak
+ flatpak:
+ name: com.dummy.App2
+ remote: dummy-remote
+ state: present
+ no_dependencies: true
+
+- name: Copy HTTP server
+ copy:
+ src: serve.py
+ dest: '{{ remote_tmp_dir }}/serve.py'
+ mode: '0755'
+
+- name: Start HTTP server
+ command: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/serve.py 127.0.0.1 8000 /tmp/flatpak/'
+ async: 120
+ poll: 0
+ register: webserver_status
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml
new file mode 100644
index 000000000..29c4efbe9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak/tasks/test.yml
@@ -0,0 +1,289 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# state=present
+
+- name: Test addition - {{ method }}
+ flatpak:
+ name: com.dummy.App1
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - addition_result is changed
+ msg: "state=present shall add flatpak when absent"
+
+- name: Test idempotency of addition - {{ method }}
+ flatpak:
+ name: com.dummy.App1
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_addition_result
+
+- name: Verify idempotency of addition test result - {{ method }}
+ assert:
+ that:
+ - double_addition_result is not changed
+ msg: "state=present shall not do anything when flatpak is already present"
+
+# state=absent
+
+- name: Test removal - {{ method }}
+ flatpak:
+ name: com.dummy.App1
+ state: absent
+ method: "{{ method }}"
+ no_dependencies: true
+ register: removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - removal_result is changed
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal - {{ method }}
+ flatpak:
+ name: com.dummy.App1
+ state: absent
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_removal_result
+
+- name: Verify idempotency of removal test result - {{ method }}
+ assert:
+ that:
+ - double_removal_result is not changed
+ msg: "state=absent shall not do anything when flatpak is not present"
+
+# state=present with url as name
+
+- name: Test addition with url - {{ method }}
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: url_addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - url_addition_result is changed
+ msg: "state=present with url as name shall add flatpak when absent"
+
+- name: Test idempotency of addition with url - {{ method }}
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_url_addition_result
+
+- name: Verify idempotency of addition with url test result - {{ method }}
+ assert:
+ that:
+ - double_url_addition_result is not changed
+ msg: "state=present with url as name shall not do anything when flatpak is already present"
+
+# state=absent with url as name
+
+- name: Test removal with url - {{ method }}
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ state: absent
+ method: "{{ method }}"
+ no_dependencies: true
+ register: url_removal_result
+ ignore_errors: true
+
+- name: Verify removal test result failed - {{ method }}
+ # It looks like flatpak has a bug when the hostname contains a port. If this is the case, it emits
+ # the following message, which we check for. If another error happens, we fail.
+ # Upstream issue: https://github.com/flatpak/flatpak/issues/4307
+ # (The second message happens with Ubuntu 18.04.)
+ assert:
+ that:
+ - >-
+ url_removal_result.msg in [
+ "error: Invalid branch 127.0.0.1:8000: Branch can't contain :",
+ "error: Invalid id http:: Name can't contain :",
+ ]
+ when: url_removal_result is failed
+
+- when: url_removal_result is not failed
+ block:
+
+ - name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - url_removal_result is changed
+ msg: "state=absent with url as name shall remove flatpak when present"
+
+ - name: Test idempotency of removal with url - {{ method }}
+ flatpak:
+ name: http://127.0.0.1:8000/repo/com.dummy.App1.flatpakref
+ state: absent
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_url_removal_result
+
+ - name: Verify idempotency of removal with url test result - {{ method }}
+ assert:
+ that:
+ - double_url_removal_result is not changed
+ msg: "state=absent with url as name shall not do anything when flatpak is not present"
+
+- name: Make sure flatpak is really gone - {{ method }}
+ flatpak:
+ name: com.dummy.App1
+ state: absent
+ method: "{{ method }}"
+ no_dependencies: true
+
+# state=present with list of packages
+
+- name: Test addition with list - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: addition_result
+
+- name: Verify addition with list test result - {{ method }}
+ assert:
+ that:
+ - addition_result is changed
+ msg: "state=present shall add flatpak when absent"
+
+- name: Test idempotency of addition with list - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_addition_result
+
+- name: Verify idempotency of addition with list test result - {{ method }}
+ assert:
+ that:
+ - double_addition_result is not changed
+ msg: "state=present shall not do anything when flatpak is already present"
+
+- name: Test addition with list partially installed - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ - com.dummy.App3
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: addition_result
+
+- name: Verify addition with list partially installed test result - {{ method }}
+ assert:
+ that:
+ - addition_result is changed
+ msg: "state=present shall add flatpak when absent"
+
+- name: Test idempotency of addition with list partially installed - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - http://127.0.0.1:8000/repo/com.dummy.App2.flatpakref
+ - com.dummy.App3
+ remote: dummy-remote
+ state: present
+ method: "{{ method }}"
+ no_dependencies: true
+ register: double_addition_result
+
+- name: Verify idempotency of addition with list partially installed test result - {{ method }}
+ assert:
+ that:
+ - double_addition_result is not changed
+ msg: "state=present shall not do anything when flatpak is already present"
+
+# state=absent with list of packages
+
+- name: Test removal with list - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - com.dummy.App2
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal with list test result - {{ method }}
+ assert:
+ that:
+ - removal_result is changed
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal with list - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - com.dummy.App2
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal with list test result - {{ method }}
+ assert:
+ that:
+ - double_removal_result is not changed
+ msg: "state=absent shall not do anything when flatpak is not present"
+
+- name: Test removal with list partially removed - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - com.dummy.App2
+ - com.dummy.App3
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal with list partially removed test result - {{ method }}
+ assert:
+ that:
+ - removal_result is changed
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal with list partially removed - {{ method }}
+ flatpak:
+ name:
+ - com.dummy.App1
+ - com.dummy.App2
+ - com.dummy.App3
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal with list partially removed test result - {{ method }}
+ assert:
+ that:
+ - double_removal_result is not changed
+ msg: "state=absent shall not do anything when flatpak is not present"
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases
new file mode 100644
index 000000000..e462ed8cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml
new file mode 100644
index 000000000..0ac87654d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_flatpak_remote
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml
new file mode 100644
index 000000000..86db5bf56
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/check_mode.yml
@@ -0,0 +1,206 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# - Tests with absent flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - addition_result is changed
+ msg: "Adding an absent flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of addition of absent flatpak remote (check mode)
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: double_addition_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of addition of absent flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - double_addition_result is changed
+ msg: |
+ Adding an absent flatpak remote a second time shall still mark module execution
+ as changed in check mode
+
+# state=absent
+
+- name: Test removal of absent flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of absent flatpak remote test result (check mode)
+ assert:
+ that:
+ - removal_result is not changed
+ msg: "Removing an absent flatpak remote shall mark module execution as not changed"
+
+
+# - Tests with present flatpak remote -------------------------------------------
+
+# state=present
+
+- name: Test addition of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ register: addition_result
+ check_mode: true
+
+- name: Verify addition of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - addition_result is not changed
+ msg: "Adding a present flatpak remote shall mark module execution as not changed"
+
+# state=absent
+
+- name: Test removal of present flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: removal_result
+ check_mode: true
+
+- name: Verify removal of present flatpak remote test result (check mode)
+ assert:
+ that:
+ - removal_result is changed
+ msg: "Removing a present flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of removal of present flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-test-remote
+ state: absent
+ register: double_removal_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of removal of present flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - double_removal_result is changed
+ msg: |
+ Removing a present flatpak remote a second time shall still mark module execution
+ as changed in check mode
+
+
+# - Tests with disabled flatpak remote ------------------------------------------
+
+# enabled=true
+
+- name: Test activation of disabled flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-disabled-test-remote
+ enabled: true
+ register: activation_result
+ check_mode: true
+
+- name: Verify activation of disabled flatpak remote test result (check mode)
+ assert:
+ that:
+ - activation_result is changed
+ msg: "Enabling an disabled flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of activation of disabled flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-disabled-test-remote
+ enabled: true
+ register: double_activation_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of activation of disabled flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - double_activation_result is changed
+ msg: |
+ Enabling an disabled flatpak remote a second time shall still mark module execution
+ as changed in check mode
+
+# enabled=false
+
+- name: Test deactivation of disabled flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: check-mode-disabled-test-remote
+ enabled: false
+ register: deactivation_result
+ check_mode: true
+
+- name: Verify deactivation of disabled flatpak remote test result (check mode)
+ assert:
+ that:
+ - deactivation_result is not changed
+ msg: "Disabling an disabled flatpak remote shall mark module execution as not changed"
+
+
+# - Tests with enabled flatpak remote ------------------------------------------
+
+# enabled=true
+
+- name: Test activation of enabled flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-enabled-test-remote
+ enabled: true
+ register: activation_result
+ check_mode: true
+
+- name: Verify activation of enabled flatpak remote test result (check mode)
+ assert:
+ that:
+ - activation_result is not changed
+ msg: "Enabling a enabled flatpak remote shall mark module execution as not changed"
+
+# enabled=false
+
+- name: Test deactivation of enabled flatpak remote not doing anything in check mode
+ flatpak_remote:
+ name: check-mode-enabled-test-remote
+ enabled: false
+ register: deactivation_result
+ check_mode: true
+
+- name: Verify deactivation of enabled flatpak remote test result (check mode)
+ assert:
+ that:
+ - deactivation_result is changed
+ msg: "Disabling a enabled flatpak remote shall mark module execution as changed"
+
+- name: Test non-existent idempotency of deactivation of enabled flatpak remote (check mode)
+ flatpak_remote:
+ name: check-mode-enabled-test-remote
+ enabled: false
+ register: double_deactivation_result
+ check_mode: true
+
+- name: >
+ Verify non-existent idempotency of deactivation of enabled flatpak remote
+ test result (check mode)
+ assert:
+ that:
+ - double_deactivation_result is changed
+ msg: |
+ "Disabling a enabled flatpak remote a second time shall still mark module execution
+ as changed in check mode
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml
new file mode 100644
index 000000000..1c5091232
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/main.yml
@@ -0,0 +1,50 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2018, Alexander Bethke <oolongbrothers@gmx.net>
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+
+ - import_tasks: setup.yml
+ become: true
+
+ # executable override
+
+ - name: Test executable override
+ flatpak_remote:
+ name: irrelevant
+ remote: irrelevant
+ state: present
+ executable: nothing-that-exists
+ ignore_errors: true
+ register: executable_override_result
+
+ - name: Verify executable override test result
+ assert:
+ that:
+ - executable_override_result is failed
+ - executable_override_result is not changed
+ msg: "Specifying non-existing executable shall fail module execution"
+
+ - import_tasks: check_mode.yml
+ become: false
+
+ - import_tasks: test.yml
+ become: false
+ vars:
+ method: user
+
+ - import_tasks: test.yml
+ become: true
+ vars:
+ method: system
+
+ when: |
+ ansible_distribution == 'Fedora' or
+ ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml
new file mode 100644
index 000000000..55a14c972
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/setup.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install flatpak on Fedora
+ dnf:
+ name: flatpak
+ state: present
+ when: ansible_distribution == 'Fedora'
+- block:
+ - name: Activate flatpak ppa on Ubuntu versions older than 18.04/bionic
+ apt_repository:
+ repo: ppa:alexlarsson/flatpak
+ state: present
+ mode: '0644'
+ when: ansible_lsb.major_release | int < 18
+ - name: Install flatpak package on Ubuntu
+ apt:
+ name: flatpak
+ state: present
+ when: ansible_distribution == 'Ubuntu'
+- name: Install flatpak remote for testing check mode
+ flatpak_remote:
+ name: check-mode-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ enabled: true
+- name: Install disabled flatpak remote for testing check mode
+ flatpak_remote:
+ name: check-mode-disabled-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ enabled: false
+- name: Install enabled flatpak remote for testing check mode
+ flatpak_remote:
+ name: check-mode-enabled-test-remote
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ enabled: true
diff --git a/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml
new file mode 100644
index 000000000..e847205ff
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/flatpak_remote/tasks/test.yml
@@ -0,0 +1,135 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# state=present
+
+- name: Test addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: addition_result
+
+- name: Verify addition test result - {{ method }}
+ assert:
+ that:
+ - addition_result is changed
+ msg: "state=present shall add flatpak when absent"
+
+- name: Test idempotency of addition - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: /tmp/flatpak/repo/dummy-repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: double_addition_result
+
+- name: Verify idempotency of addition test result - {{ method }}
+ assert:
+ that:
+ - double_addition_result is not changed
+ msg: "state=present shall not do anything when flatpak is already present"
+
+- name: Test updating remote url does not do anything - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ flatpakrepo_url: https://a.different/repo.flatpakrepo
+ state: present
+ method: "{{ method }}"
+ register: url_update_result
+
+- name: Verify updating remote url does not do anything - {{ method }}
+ assert:
+ that:
+ - url_update_result is not changed
+ msg: "Trying to update the URL of an existing flatpak remote shall not do anything"
+
+
+# enabled=false
+
+- name: Test deactivation - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ enabled: false
+ method: "{{ method }}"
+ register: deactivation_result
+
+- name: Verify deactivation test result - {{ method }}
+ assert:
+ that:
+ - deactivation_result is changed
+ msg: "enable=false shall disable flatpak remote when enabled"
+
+- name: Test idempotency of deactivation - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ enabled: false
+ method: "{{ method }}"
+ register: double_deactivation_result
+
+- name: Verify idempotency of deactivation test result - {{ method }}
+ assert:
+ that:
+ - double_deactivation_result is not changed
+ msg: "enabled=false shall not do anything when flatpak remote is already disabled"
+
+
+# enabled=false
+
+- name: Test activation - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ enabled: true
+ method: "{{ method }}"
+ register: activation_result
+
+- name: Verify activation test result - {{ method }}
+ assert:
+ that:
+ - activation_result is changed
+ msg: "enable=true shall enable flatpak remote when disabled"
+
+- name: Test idempotency of activation - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ enabled: true
+ method: "{{ method }}"
+ register: double_activation_result
+
+- name: Verify idempotency of activation test result - {{ method }}
+ assert:
+ that:
+ - double_activation_result is not changed
+ msg: "enabled=true shall not do anything when flatpak remote is already enabled"
+
+
+# state=absent
+
+- name: Test removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: removal_result
+
+- name: Verify removal test result - {{ method }}
+ assert:
+ that:
+ - removal_result is changed
+ msg: "state=absent shall remove flatpak when present"
+
+- name: Test idempotency of removal - {{ method }}
+ flatpak_remote:
+ name: flatpak-test
+ state: absent
+ method: "{{ method }}"
+ register: double_removal_result
+
+- name: Verify idempotency of removal test result - {{ method }}
+ assert:
+ that:
+ - double_removal_result is not changed
+ msg: "state=absent shall not do anything when flatpak is not present"
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/aliases b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/aliases
new file mode 100644
index 000000000..f69a127f4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/gandi
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/defaults/main.yml
new file mode 100644
index 000000000..ec1808d8b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/defaults/main.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) 2020 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gandi_livedns_domain_name: "ansible-tests.org"
+gandi_livedns_record_items:
+
+# Single A record
+- record: test-www
+ type: A
+ values:
+ - 10.10.10.10
+ ttl: 400
+ update_values:
+ - 10.10.10.11
+ update_ttl: 800
+
+# Multiple A records
+- record: test-www-multiple
+ type: A
+ ttl: 3600
+ values:
+ - 10.10.11.10
+ - 10.10.11.10
+ update_values:
+ - 10.10.11.11
+ - 10.10.11.13
+
+# CNAME
+- record: test-cname
+ type: CNAME
+ ttl: 10800
+ values:
+ - test-www2
+ update_values:
+ - test-www
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/create_record.yml b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/create_record.yml
new file mode 100644
index 000000000..c3f1c1798
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/create_record.yml
@@ -0,0 +1,69 @@
+---
+# Copyright (c) 2020 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test absent dns record
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ type: "{{ item.type }}"
+ ttl: "{{ item.ttl }}"
+ state: absent
+ register: result
+- name: verify test absent dns record
+ assert:
+ that:
+ - result is successful
+
+- name: test create a dns record in check mode
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item['values'] }}"
+ ttl: "{{ item.ttl }}"
+ type: "{{ item.type }}"
+ check_mode: true
+ register: result
+- name: verify test create a dns record in check mode
+ assert:
+ that:
+ - result is changed
+
+- name: test create a dns record
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item['values'] }}"
+ ttl: "{{ item.ttl }}"
+ type: "{{ item.type }}"
+ register: result
+- name: verify test create a dns record
+ assert:
+ that:
+ - result is changed
+ - result.record['values'] == {{ item['values'] }}
+ - result.record.record == "{{ item.record }}"
+ - result.record.type == "{{ item.type }}"
+ - result.record.ttl == {{ item.ttl }}
+
+- name: test create a dns record idempotence
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item['values'] }}"
+ ttl: "{{ item.ttl }}"
+ type: "{{ item.type }}"
+ register: result
+- name: verify test create a dns record idempotence
+ assert:
+ that:
+ - result is not changed
+ - result.record['values'] == {{ item['values'] }}
+ - result.record.record == "{{ item.record }}"
+ - result.record.type == "{{ item.type }}"
+ - result.record.ttl == {{ item.ttl }}
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/main.yml
new file mode 100644
index 000000000..19ba4d8fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) 2020 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: record.yml
+ with_items: "{{ gandi_livedns_record_items }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/record.yml b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/record.yml
new file mode 100644
index 000000000..d36e2e857
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/record.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) 2020 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: create_record.yml
+- include_tasks: update_record.yml
+- include_tasks: remove_record.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/remove_record.yml b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/remove_record.yml
new file mode 100644
index 000000000..c4b937fd5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/remove_record.yml
@@ -0,0 +1,61 @@
+---
+# Copyright (c) 2020 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test remove a dns record in check mode
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item.update_values | default(item['values']) }}"
+ type: "{{ item.type }}"
+ state: absent
+ check_mode: true
+ register: result
+- name: verify test remove a dns record in check mode
+ assert:
+ that:
+ - result is changed
+
+- name: test remove a dns record
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item.update_values | default(item['values']) }}"
+ type: "{{ item.type }}"
+ state: absent
+ register: result
+- name: verify test remove a dns record
+ assert:
+ that:
+ - result is changed
+
+- name: test remove a dns record idempotence
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item.update_values | default(item['values']) }}"
+ type: "{{ item.type }}"
+ state: absent
+ register: result
+- name: verify test remove a dns record idempotence
+ assert:
+ that:
+ - result is not changed
+
+- name: test remove second dns record idempotence
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item['values'] }}"
+ type: "{{ item.type }}"
+ state: absent
+ register: result
+- name: verify test remove a dns record idempotence
+ assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/update_record.yml b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/update_record.yml
new file mode 100644
index 000000000..a080560a7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gandi_livedns/tasks/update_record.yml
@@ -0,0 +1,59 @@
+---
+# Copyright (c) 2020 Gregory Thiemonge <gregory.thiemonge@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test update or add another dns record in check mode
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item.update_values | default(item['values']) }}"
+ ttl: "{{ item.update_ttl | default(item.ttl) }}"
+ type: "{{ item.type }}"
+ check_mode: true
+ register: result
+- name: verify test update in check mode
+ assert:
+ that:
+ - result is changed
+ - result.record['values'] == {{ item.update_values | default(item['values']) }}
+ - result.record.record == "{{ item.record }}"
+ - result.record.type == "{{ item.type }}"
+ - result.record.ttl == {{ item.update_ttl | default(item.ttl) }}
+
+- name: test update or add another dns record
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item.update_values | default(item['values']) }}"
+ ttl: "{{ item.update_ttl | default(item.ttl) }}"
+ type: "{{ item.type }}"
+ register: result
+- name: verify test update a dns record
+ assert:
+ that:
+ - result is changed
+ - result.record['values'] == {{ item.update_values | default(item['values']) }}
+ - result.record.record == "{{ item.record }}"
+ - result.record.ttl == {{ item.update_ttl | default(item.ttl) }}
+ - result.record.type == "{{ item.type }}"
+
+- name: test update or add another dns record idempotence
+ community.general.gandi_livedns:
+ api_key: "{{ gandi_api_key }}"
+ record: "{{ item.record }}"
+ domain: "{{ gandi_livedns_domain_name }}"
+ values: "{{ item.update_values | default(item['values']) }}"
+ ttl: "{{ item.update_ttl | default(item.ttl) }}"
+ type: "{{ item.type }}"
+ register: result
+- name: verify test update a dns record idempotence
+ assert:
+ that:
+ - result is not changed
+ - result.record['values'] == {{ item.update_values | default(item['values']) }}
+ - result.record.record == "{{ item.record }}"
+ - result.record.ttl == {{ item.update_ttl | default(item.ttl) }}
+ - result.record.type == "{{ item.type }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/gem/aliases b/ansible_collections/community/general/tests/integration/targets/gem/aliases
new file mode 100644
index 000000000..007bed538
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gem/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gem/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml
new file mode 100644
index 000000000..362c126bf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gem/tasks/main.yml
@@ -0,0 +1,213 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the gem module
+# Copyright (c) 2014, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when:
+ - not (ansible_os_family == 'Alpine') # TODO
+ block:
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - 'default.yml'
+ paths: '../vars'
+
+ - name: Install dependencies for test
+ package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ test_packages }}"
+ when: ansible_distribution != "MacOSX"
+
+ - name: Install a gem
+ gem:
+ name: gist
+ state: present
+ register: install_gem_result
+ ignore_errors: true
+
+ # when running as root on Fedora, '--install-dir' is set in the os defaults which is
+ # incompatible with '--user-install', we ignore this error for this case only
+ - name: fail if failed to install gem
+ fail:
+ msg: "failed to install gem: {{ install_gem_result.msg }}"
+ when:
+ - install_gem_result is failed
+ - not (ansible_user_uid == 0 and "User --install-dir or --user-install but not both" not in install_gem_result.msg)
+
+ - block:
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Ensure gem was installed
+ assert:
+ that:
+ - install_gem_result is changed
+ - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+
+ - name: Remove a gem
+ gem:
+ name: gist
+ state: absent
+ register: remove_gem_results
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Verify gem is not installed
+ assert:
+ that:
+ - remove_gem_results is changed
+ - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
+ when: not install_gem_result is failed
+
+ # install gem in --no-user-install
+ - block:
+ - name: Install a gem with --no-user-install
+ gem:
+ name: gist
+ state: present
+ user_install: false
+ register: install_gem_result
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Ensure gem was installed
+ assert:
+ that:
+ - install_gem_result is changed
+ - current_gems.stdout is search('gist\s+\([0-9.]+\)')
+
+ - name: Remove a gem
+ gem:
+ name: gist
+ state: absent
+ register: remove_gem_results
+
+ - name: List gems
+ command: gem list
+ register: current_gems
+
+ - name: Verify gem is not installed
+ assert:
+ that:
+ - remove_gem_results is changed
+ - current_gems.stdout is not search('gist\s+\([0-9.]+\)')
+ when: ansible_user_uid == 0
+
+ # Check cutom gem directory
+ - name: Install gem in a custom directory with incorrect options
+ gem:
+ name: gist
+ state: present
+ install_dir: "{{ remote_tmp_dir }}/gems"
+ ignore_errors: true
+ register: install_gem_fail_result
+
+ - debug:
+ var: install_gem_fail_result
+ tags: debug
+
+ - name: Ensure previous task failed
+ assert:
+ that:
+ - install_gem_fail_result is failed
+ - install_gem_fail_result.msg == 'install_dir requires user_install=false'
+
+ - name: Install a gem in a custom directory
+ gem:
+ name: gist
+ state: present
+ user_install: false
+ install_dir: "{{ remote_tmp_dir }}/gems"
+ register: install_gem_result
+
+ - name: Find gems in custom directory
+ find:
+ paths: "{{ remote_tmp_dir }}/gems/gems"
+ file_type: directory
+ contains: gist
+ register: gem_search
+
+ - name: Ensure gem was installed in custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - gem_search.files[0].path is search('gist-[0-9.]+')
+ ignore_errors: true
+
+ - name: Remove a gem in a custom directory
+ gem:
+ name: gist
+ state: absent
+ user_install: false
+ install_dir: "{{ remote_tmp_dir }}/gems"
+ register: install_gem_result
+
+ - name: Find gems in custom directory
+ find:
+ paths: "{{ remote_tmp_dir }}/gems/gems"
+ file_type: directory
+ contains: gist
+ register: gem_search
+
+ - name: Ensure gem was removed in custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - gem_search.files | length == 0
+
+ # Custom directory for executables (--bindir)
+ - name: Install gem with custom bindir
+ gem:
+ name: gist
+ state: present
+ bindir: "{{ remote_tmp_dir }}/custom_bindir"
+ norc: true
+ user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL
+ register: install_gem_result
+
+ - name: Get stats of gem executable
+ stat:
+ path: "{{ remote_tmp_dir }}/custom_bindir/gist"
+ register: gem_bindir_stat
+
+ - name: Ensure gem executable was installed in custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - gem_bindir_stat.stat.exists and gem_bindir_stat.stat.isreg
+
+ - name: Remove gem with custom bindir
+ gem:
+ name: gist
+ state: absent
+ bindir: "{{ remote_tmp_dir }}/custom_bindir"
+ norc: true
+ user_install: false # Avoid conflicts between --install-dir and --user-install when running as root on CentOS / Fedora / RHEL
+ register: install_gem_result
+
+ - name: Get stats of gem executable
+ stat:
+ path: "{{ remote_tmp_dir }}/custom_bindir/gist"
+ register: gem_bindir_stat
+
+ - name: Ensure gem executable was removed from custom directory
+ assert:
+ that:
+ - install_gem_result is changed
+ - not gem_bindir_stat.stat.exists
diff --git a/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml b/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml
new file mode 100644
index 000000000..b9d9cc2c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gem/vars/FreeBSD.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_packages:
+ - "devel/ruby-gems"
+ - "ruby"
diff --git a/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml
new file mode 100644
index 000000000..2bb724bfe
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gem/vars/RedHat.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_packages:
+ - "rubygems"
diff --git a/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml
new file mode 100644
index 000000000..b7496e12c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gem/vars/default.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_packages: []
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/aliases b/ansible_collections/community/general/tests/integration/targets/git_config/aliases
new file mode 100644
index 000000000..7b8c653de
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+skip/aix
+destructive
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig b/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig
new file mode 100644
index 000000000..92eeb7eb9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/files/gitconfig
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[http]
+ proxy = foo
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/git_config/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml
new file mode 100644
index 000000000..e294a83fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/exclusion_state_list-all.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_no_value.yml
+
+- name: testing exclusion between state and list_all parameters
+ git_config:
+ list_all: true
+ state: absent
+ register: result
+ ignore_errors: true
+
+- name: assert git_config failed
+ assert:
+ that:
+ - result is failed
+ - "result.msg == 'parameters are mutually exclusive: list_all|state'"
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml
new file mode 100644
index 000000000..4e41bf4e9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_no_state.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_no_value.yml
+
+- name: setting value without state
+ git_config:
+ name: "{{ option_name }}"
+ value: "{{ option_value }}"
+ scope: "{{ option_scope }}"
+ register: set_result
+
+- name: getting value without state
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert set changed and value is correct
+ assert:
+ that:
+ - set_result is changed
+ - set_result.diff.before == "\n"
+ - set_result.diff.after == option_value + "\n"
+ - get_result is not changed
+ - get_result.config_value == option_value
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml
new file mode 100644
index 000000000..cfc3bbe78
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_no_value.yml
+
+- name: setting value with state=present
+ git_config:
+ name: "{{ option_name }}"
+ value: "{{ option_value }}"
+ scope: "{{ option_scope }}"
+ state: present
+ register: result
+
+- name: getting value with state=present
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: present
+ register: get_result
+
+- name: assert set changed and value is correct with state=present
+ assert:
+ that:
+ - set_result is changed
+ - set_result.diff.before == "\n"
+ - set_result.diff.after == option_value + "\n"
+ - get_result is not changed
+ - get_result.config_value == option_value
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml
new file mode 100644
index 000000000..a61ffcc68
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/get_set_state_present_file.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_no_value.yml
+
+- name: setting value with state=present
+ git_config:
+ name: "{{ option_name }}"
+ value: "{{ option_value }}"
+ scope: "file"
+ file: "{{ remote_tmp_dir }}/gitconfig_file"
+ state: present
+ register: result
+
+- name: getting value with state=present
+ git_config:
+ name: "{{ option_name }}"
+ scope: "file"
+ file: "{{ remote_tmp_dir }}/gitconfig_file"
+ state: present
+ register: get_result
+
+- name: assert set changed and value is correct with state=present
+ assert:
+ that:
+ - set_result is changed
+ - set_result.diff.before == "\n"
+ - set_result.diff.after == option_value + "\n"
+ - get_result is not changed
+ - get_result.config_value == option_value
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml
new file mode 100644
index 000000000..4dc72824c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the git_config module
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: setup
+ import_tasks: setup.yml
+
+- block:
+ # testing parameters exclusion: state and list_all
+ - import_tasks: exclusion_state_list-all.yml
+ # testing get/set option without state
+ - import_tasks: get_set_no_state.yml
+ # testing get/set option with state=present
+ - import_tasks: get_set_state_present.yml
+ # testing get/set option with state=present and scope=file
+ - import_tasks: get_set_state_present_file.yml
+ # testing state=absent without value to delete
+ - import_tasks: unset_no_value.yml
+ # testing state=absent with value to delete
+ - import_tasks: unset_value.yml
+ # testing state=absent with value to delete and a defined value parameter
+ - import_tasks: precedence_between_unset_and_value.yml
+ # testing state=absent with check mode
+ - import_tasks: unset_check_mode.yml
+ # testing for case in issue #1776
+ - import_tasks: set_value_with_tilde.yml
+ when: git_installed is succeeded and git_version.stdout is version(git_version_supporting_includes, ">=")
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
new file mode 100644
index 000000000..a76fbab9c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/precedence_between_unset_and_value.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_value.yml
+
+- name: unsetting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ value: bar
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unset changed and deleted value
+ assert:
+ that:
+ - unset_result is changed
+ - unset_result.diff.before == option_value + "\n"
+ - unset_result.diff.after == "\n"
+ - get_result.config_value == ''
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml
new file mode 100644
index 000000000..f78e709bd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/set_value_with_tilde.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+#- import_tasks: setup_no_value.yml
+
+- name: setting value
+ git_config:
+ name: core.hooksPath
+ value: '~/foo/bar'
+ state: present
+ scope: global
+ register: set_result
+
+- name: setting value again
+ git_config:
+ name: core.hooksPath
+ value: '~/foo/bar'
+ state: present
+ scope: global
+ register: set_result2
+
+- name: getting value
+ git_config:
+ name: core.hooksPath
+ scope: global
+ register: get_result
+
+- name: assert set changed and value is correct
+ assert:
+ that:
+ - set_result is changed
+ - set_result2 is not changed
+ - get_result is not changed
+ - get_result.config_value == '~/foo/bar'
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml
new file mode 100644
index 000000000..6e5516da5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: verify that git is installed so this test can continue
+ command: which git
+ register: git_installed
+ ignore_errors: true
+
+- name: get git version, only newer than {{git_version_supporting_includes}} has includes option
+ shell: "git --version | grep 'git version' | sed 's/git version //'"
+ register: git_version
+ ignore_errors: true
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml
new file mode 100644
index 000000000..8e12c350c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_no_value.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ------
+# set up : deleting gitconfig file
+- name: set up without value
+ file:
+ path: ~/.gitconfig
+ state: absent
+
+- name: set up without value (file)
+ file:
+ path: "{{ remote_tmp_dir }}/gitconfig_file"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml
new file mode 100644
index 000000000..126b1ae4b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/setup_value.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ------
+# set up : set gitconfig with value
+- name: set up with value
+ copy:
+ src: gitconfig
+ dest: ~/.gitconfig
+
+- name: set up with value (file)
+ copy:
+ src: gitconfig
+ dest: "{{ remote_tmp_dir }}/gitconfig_file"
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml
new file mode 100644
index 000000000..39bce3379
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_check_mode.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_value.yml
+
+- name: unsetting value with check mode
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ check_mode: true
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unset changed but dit not delete value
+ assert:
+ that:
+ - unset_result is changed
+ - unset_result.diff.before == option_value + "\n"
+ - unset_result.diff.after == "\n"
+ - get_result.config_value == option_value
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml
new file mode 100644
index 000000000..394276cad
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_no_value.yml
@@ -0,0 +1,27 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_no_value.yml
+
+- name: unsetting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unsetting didn't change
+ assert:
+ that:
+ - unset_result is not changed
+ - unset_result.msg == 'no setting to unset'
+ - get_result.config_value == ''
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml
new file mode 100644
index 000000000..dfa535a2d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/tasks/unset_value.yml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_value.yml
+
+- name: unsetting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ state: absent
+ register: unset_result
+
+- name: getting value
+ git_config:
+ name: "{{ option_name }}"
+ scope: "{{ option_scope }}"
+ register: get_result
+
+- name: assert unset changed and deleted value
+ assert:
+ that:
+ - unset_result is changed
+ - unset_result.diff.before == option_value + "\n"
+ - unset_result.diff.after == "\n"
+ - get_result.config_value == ''
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml
new file mode 100644
index 000000000..3cca3ef6e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/git_config/vars/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+git_version_supporting_includes: 1.7.10
+option_name: http.proxy
+option_value: 'foo'
+option_scope: global
+...
diff --git a/ansible_collections/community/general/tests/integration/targets/github_issue/aliases b/ansible_collections/community/general/tests/integration/targets/github_issue/aliases
new file mode 100644
index 000000000..428e8289d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/github_issue/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
diff --git a/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml
new file mode 100644
index 000000000..a7e43c171
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/github_issue/tasks/main.yml
@@ -0,0 +1,38 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the github_issue module.
+#
+# Copyright (c) 2017-2018, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Check if GitHub issue is closed or not
+ github_issue:
+ organization: "{{ organization }}"
+ repo: "{{ repo }}"
+ issue: "{{ issue }}"
+ action: get_status
+ register: get_status_0002
+
+- assert:
+ that:
+ - get_status_0002 is changed
+ - get_status_0002.issue_status == 'closed'
+
+- name: Check if GitHub issue is closed or not
+ github_issue:
+ organization: "{{ organization }}"
+ repo: "{{ repo }}"
+ issue: "{{ non_existent_issue }}"
+ action: get_status
+ register: get_status_0003
+ ignore_errors: true
+
+- assert:
+ that:
+ - get_status_0003 is not changed
+ - get_status_0003 is failed
+ - "'Failed' in get_status_0003.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml
new file mode 100644
index 000000000..8b2a2de6e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/github_issue/vars/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+issue: 23642
+non_existent_issue: 1111111
+organization: ansible
+repo: ansible
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_branch/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_branch/aliases
new file mode 100644
index 000000000..d163e8d9c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_branch/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_branch/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_branch/defaults/main.yml
new file mode 100644
index 000000000..a5f0a0751
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_branch/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_branch: ansible_test_branch
+gitlab_project_name: ansible_test_project
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_branch/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_branch/tasks/main.yml
new file mode 100644
index 000000000..19d90e15c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_branch/tasks/main.yml
@@ -0,0 +1,69 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ initialize_with_readme: true
+ state: present
+
+- name: Create branch {{ gitlab_branch }}
+ community.general.gitlab_branch:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "{{ gitlab_project_name }}"
+ branch: "{{ gitlab_branch }}"
+ ref_branch: main
+ state: present
+
+- name: Create branch {{ gitlab_branch }} ( Idempotency test )
+ community.general.gitlab_branch:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "{{ gitlab_project_name }}"
+ branch: "{{ gitlab_branch }}"
+ ref_branch: main
+ state: present
+ register: create_branch
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - create_branch is not changed
+
+- name: Cleanup branch {{ gitlab_branch }}
+ community.general.gitlab_branch:
+ api_url: https://gitlab.com
+ api_token: secret_access_token
+ project: "{{ gitlab_project_name }}"
+ branch: "{{ gitlab_branch }}"
+ state: absent
+ register: delete_branch
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - delete_branch is changed
+
+- name: Clean up {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases
new file mode 100644
index 000000000..fc0e157c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml
new file mode 100644
index 000000000..8225571b6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_project_name: ansible_test_project
+gitlab_deploy_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnTYY7CYk1F/wBklpdRxudxN6KeXgfhutkiCigSfPhe ansible_test"
+gitlab_deploy_key_new: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDL1TDkIY2uu6NYRD0G5qGeHTd/AoqQpCw1XENXDnTLDN5DNZVCO1+7xfA5DR5V2tcR691Q005BKxoCo+uUBd1aAM7JWyuXl050rZCXBj4oaUF7urjDANQ7FzYuvqp9h8NGkvzfBYz5YBfu4vh43ajnF0daSyZy4RlxeG9G44vnHElXTQ0igaOCSta/23FdERIYzKxuX4Ul42AwtSmCRwbkN4fC86o0UwW2q0zkgFOUoojtS/Avh0aX8UQyeagaPJFXCc/ldG1mMK020GQAEa8aQcUpysnEzZdq6no5Zyn/WQSobpnJ9CraHhdb1QQytg/+c+CgjSN0cERhTvLn0WsQ043jo5g1kSHNu+OiYXmVwTxe95nXCsoYmCNF/DmezjYVxe9BGlKRAEuHsNi87Il84nBnzKVHGlkq8eJNTR8ASjNkjI7pGS0zxCDB55c3LHh4Aa1xU+nwINRurn/TEDpDZc43/XOnt+aqbxkeWbMtOD/r2gfMj8lNZJ/IyamWy7HcFgGpTZJln4WxVLF+Cz56qa8Hf9WzJL+8Lq7eE3sJKOagn/zPgqeybXbTIPSr3fshq3yE8FYHpFKS4aLvQC/XSLCywrhr25DKBn9UHIZmgC9hxMnVJCKux+ltwGJOKIaoj+5n3+DvM+E3fK3fkADo5+Frzay6/rLTwKWUrzfjQQ== ansible_test_new"
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml
new file mode 100644
index 000000000..c345c2467
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_deploy_key/tasks/main.yml
@@ -0,0 +1,78 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+
+- name: Cleanup deploy key to {{ gitlab_project_name }}
+ gitlab_deploy_key:
+ login_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ server_url: "{{ gitlab_host }}"
+ title: "{{ gitlab_project_name }}"
+ key: "{{ gitlab_deploy_key }}"
+ state: absent
+
+
+- name: Add deploy key to {{ gitlab_project_name }}
+ gitlab_deploy_key:
+ login_token: "{{ gitlab_login_token }}"
+ project: "root/{{ gitlab_project_name }}"
+ server_url: "{{ gitlab_host }}"
+ title: "{{ gitlab_project_name }}"
+ key: "{{ gitlab_deploy_key }}"
+ state: present
+ register: deploy_key_status
+
+- assert:
+ that:
+ - deploy_key_status is changed
+ - deploy_key_status.deploy_key.key == gitlab_deploy_key
+
+
+- name: Update public key {{ gitlab_project_name }} (change expected)
+ gitlab_deploy_key:
+ login_token: "{{ gitlab_login_token }}"
+ project: "root/{{ gitlab_project_name }}"
+ server_url: "{{ gitlab_host }}"
+ title: "{{ gitlab_project_name }}"
+ key: "{{ gitlab_deploy_key_new }}"
+ state: present
+ register: deploy_key_status
+
+- assert:
+ that:
+ - deploy_key_status is changed
+ - deploy_key_status.deploy_key.key == gitlab_deploy_key_new
+
+- name: Update public key {{ gitlab_project_name }} (no change expected)
+ gitlab_deploy_key:
+ login_token: "{{ gitlab_login_token }}"
+ project: "root/{{ gitlab_project_name }}"
+ server_url: "{{ gitlab_host }}"
+ title: "{{ gitlab_project_name }}"
+ key: "{{ gitlab_deploy_key_new }}"
+ state: present
+ register: deploy_key_status
+
+- assert:
+ that:
+ - not deploy_key_status.changed
+ - deploy_key_status.deploy_key.key == gitlab_deploy_key_new
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases
new file mode 100644
index 000000000..fc0e157c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml
new file mode 100644
index 000000000..01863abe3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_group: ansible_test_project
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml
new file mode 100644
index 000000000..a0355094f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group/tasks/main.yml
@@ -0,0 +1,129 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Cleanup GitLab Group
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: absent
+
+- name: Create GitLab Group
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: present
+ register: gitlab_group_state
+
+- name: Test group created
+ assert:
+ that:
+ - gitlab_group_state is changed
+
+
+- name: Create GitLab Group ( Idempotency test )
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: present
+ register: gitlab_group_state_again
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - gitlab_group_state_again is not changed
+
+- name: Cleanup GitLab Group for Description Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: absent
+
+- name: Create GitLab Group for Description Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ description: My Test Group
+ state: present
+ register: gitlab_group_state_desc
+
+- name: Test group created with Description
+ assert:
+ that:
+ - gitlab_group_state_desc.group.description == "My Test Group"
+
+- name: Cleanup GitLab Group for project_creation_level Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: absent
+
+- name: Create GitLab Group for project_creation_level Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ project_creation_level: noone
+ state: present
+ register: gitlab_group_state_pcl
+
+- name: Test group created with project_creation_level
+ assert:
+ that:
+ - gitlab_group_state_pcl.group.project_creation_level == "noone"
+
+- name: Cleanup GitLab Group for require_two_factor_authentication Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ state: absent
+
+- name: Create GitLab Group for project_creation_level Test
+ gitlab_group:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ name: ansible_test_group
+ path: ansible_test_group
+ require_two_factor_authentication: true
+ state: present
+ register: gitlab_group_state_rtfa
+
+- name: Test group created with project_creation_level
+ assert:
+ that:
+ - gitlab_group_state_rtfa.group.require_two_factor_authentication == true
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml
new file mode 100644
index 000000000..aa75096da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/tasks/main.yml
@@ -0,0 +1,74 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for gitlab_group_members module
+#
+# Copyright (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Install required library
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Add a User to A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ username }}'
+ access_level: '{{ gitlab_access_level }}'
+ state: present
+
+- name: Remove a User from A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ username }}'
+ state: absent
+
+- name: Add a list of Users to A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ userlist }}'
+ access_level: '{{ gitlab_access_level }}'
+ state: present
+
+- name: Remove a list of Users to A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ userlist }}'
+ state: absent
+
+- name: Add a list of Users with Dedicated Access Levels to A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_users_access: '{{ dedicated_access_users }}'
+ state: present
+
+- name: Remove a list of Users with Dedicated Access Levels to A GitLab Group
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_users_access: '{{ dedicated_access_users }}'
+ state: absent
+
+- name: Add a user, remove all others which might be on this access level
+ gitlab_group_members:
+ api_url: '{{ gitlab_server_url }}'
+ api_token: '{{ gitlab_api_access_token }}'
+ gitlab_group: '{{ gitlab_group_name }}'
+ gitlab_user: '{{ username }}'
+ access_level: '{{ gitlab_access_level }}'
+ pruge_users: '{{ gitlab_access_level }}'
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml
new file mode 100644
index 000000000..908260418
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group_members/vars/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_server_url: https://gitlabserver.example.com
+gitlab_api_access_token: 126hngbscx890cv09b
+gitlab_group_name: groupname1
+username: username1
+gitlab_access_level: developer
+userlist:
+ - username1
+ - username2
+dedicated_access_users:
+ - name: username1
+ access_level: "developer"
+ - name: username2
+ access_level: "maintainer"
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml
new file mode 100644
index 000000000..39a3a5df8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_group_variable/tasks/main.yml
@@ -0,0 +1,706 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: purge all variables for check_mode test
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: true
+
+- name: add a variable value in check_mode
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: true
+ register: gitlab_group_variable_state
+
+- name: check_mode state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: apply add value from check_mode test
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ variables:
+ - name: ACCESS_KEY_ID
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: test new format
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: change protected attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: true
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert protected attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: false
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: change masked attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: true
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert masked attribute by not mention it
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert again masked attribute by not mention it (idempotent)
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: set both (masked and protected) attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: true
+ protected: true
+ variable_type: env_var
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: set again both (masked and protected) attribute (idempotent)
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: true
+ protected: true
+ variable_type: env_var
+ register: gitlab_group_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: revert both (masked and protected) attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: false
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: change a variable value in check_mode again
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: true
+ register: gitlab_group_variable_state
+
+- name: check_mode state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: apply again the value change from check_mode test
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ register: gitlab_group_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+
+- name: change environment scope
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ environment_scope: testing
+ value: checkmode
+ register: gitlab_group_variable_state
+ when: gitlab_premium_tests is defined
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ when: gitlab_premium_tests is defined
+
+- name: apply again the environment scope change
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ environment_scope: testing
+ value: checkmode
+ register: gitlab_group_variable_state
+ when: gitlab_premium_tests is defined
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+ when: gitlab_premium_tests is defined
+
+- name: purge all variables at the beginning
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: true
+
+- name: set two test variables
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_group_variable_state
+
+- name: set two test variables state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ - gitlab_group_variable_state.group_variable.added|length == 2
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: re-set two test variables
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_group_variable_state
+
+- name: re-set two test variables state must not be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is not changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 2
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: edit one variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: edit one variable state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 1
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 1
+ - gitlab_group_variable_state.group_variable.updated[0] == "ACCESS_KEY_ID"
+
+- name: append one variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: value
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 1
+ - gitlab_group_variable_state.group_variable.untouched|length == 2
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.added[0] == "some"
+
+- name: re-set all variables
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ SECRET_ACCESS_KEY: 321cba
+ some: value
+ register: gitlab_group_variable_state
+
+- name: re-set all variables state must not be changed
+ assert:
+ that:
+ - not gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 3
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: set one variables and purge all others
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: value
+ purge: true
+ register: gitlab_group_variable_state
+
+- name: set one variables and purge all others state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 1
+ - gitlab_group_variable_state.group_variable.removed|length == 2
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: only one variable is left
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: value
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: only one variable is left state must not be changed
+ assert:
+ that:
+ - not gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 1
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.untouched[0] == "some"
+
+- name: test integer values
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: 42
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 1
+
+- name: test float values
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ some: 42.23
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 1
+
+- name: delete the last left variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ state: absent
+ vars:
+ some: value
+ register: gitlab_group_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 1
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.removed[0] == "some"
+
+- name: add one variable with variable_type file
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ variables:
+ - name: my_test_var
+ value: my_test_value
+ variable_type: file
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 1
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.added[0] == "my_test_var"
+
+- name: change variable_type attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: env_var
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: revert variable_type attribute
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: file
+ register: gitlab_group_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+
+- name: delete the variable_type file variable
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ state: absent
+ vars:
+ my_test_var: my_test_value
+ register: gitlab_group_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 1
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ - gitlab_group_variable_state.group_variable.removed[0] == "my_test_var"
+
+- name: set complete page and purge existing ones
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ page1_var01: value
+ page1_var02: value
+ page1_var03: value
+ page1_var04: value
+ page1_var05: value
+ page1_var06: value
+ page1_var07: value
+ page1_var08: value
+ page1_var09: value
+ page1_var10: value
+ page1_var11: value
+ page1_var12: value
+ page1_var13: value
+ page1_var14: value
+ page1_var15: value
+ page1_var16: value
+ page1_var17: value
+ page1_var18: value
+ page1_var19: value
+ page1_var20: value
+ purge: true
+ register: gitlab_group_variable_state
+
+- name: complete page added state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ - gitlab_group_variable_state.group_variable.added|length == 20
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+
+- name: set complete page and keep existing ones
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ vars:
+ page2_var01: value
+ page2_var02: value
+ page2_var03: value
+ page2_var04: value
+ page2_var05: value
+ page2_var06: value
+ page2_var07: value
+ page2_var08: value
+ page2_var09: value
+ page2_var10: value
+ page2_var11: value
+ page2_var12: value
+ page2_var13: value
+ page2_var14: value
+ page2_var15: value
+ page2_var16: value
+ page2_var17: value
+ page2_var18: value
+ page2_var19: value
+ page2_var20: value
+ purge: false
+ register: gitlab_group_variable_state
+
+- name: existing page untouched state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state is changed
+ - gitlab_group_variable_state.group_variable.added|length == 20
+ - gitlab_group_variable_state.group_variable.untouched|length == 20
+
+- name: check that no variables are left
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: true
+ register: gitlab_group_variable_state
+
+- name: check that no variables are untouched state must be changed
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 0
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 40
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+
+- name: same vars, diff scope
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: true
+ variables:
+ - name: SECRET_ACCESS_KEY
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+ environment_scope: production
+ - name: SECRET_ACCESS_KEY
+ value: hello_world
+ masked: true
+ protected: true
+ variable_type: env_var
+ environment_scope: development
+ register: gitlab_group_variable_state
+ when: gitlab_premium_tests is defined
+
+- name: verify two vars
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.added|length == 2
+ - gitlab_group_variable_state.group_variable.untouched|length == 0
+ - gitlab_group_variable_state.group_variable.removed|length == 0
+ - gitlab_group_variable_state.group_variable.updated|length == 0
+ when: gitlab_premium_tests is defined
+
+- name: throw error when state is present but no value is given
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ variables:
+ - name: delete_me
+ register: gitlab_group_variable_state
+ ignore_errors: true
+
+- name: verify fail
+ assert:
+ that:
+ - gitlab_group_variable_state.failed
+ - gitlab_group_variable_state is not changed
+
+- name: set a new variable to delete it later
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ purge: true
+ variables:
+ - name: delete_me
+ value: ansible
+ register: gitlab_group_variable_state
+
+- name: verify the change
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+
+- name: delete variable without referencing its value
+ gitlab_group_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ group: "{{ gitlab_group_name }}"
+ state: absent
+ variables:
+ - name: delete_me
+ register: gitlab_group_variable_state
+
+- name: verify deletion
+ assert:
+ that:
+ - gitlab_group_variable_state.changed
+ - gitlab_group_variable_state.group_variable.removed|length == 1
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases
new file mode 100644
index 000000000..fc0e157c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_hook/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml
new file mode 100644
index 000000000..69cec7d33
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_hook/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_project_name: ansible_test_project
+gitlab_hook_url: http://gitlab.example.com/hook
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml
new file mode 100644
index 000000000..aa06f6c81
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_hook/tasks/main.yml
@@ -0,0 +1,77 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+
+- name: Cleanup GitLab hook
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+
+- name: Create GitLab Hook
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_hook_state
+
+- name: Test group created
+ assert:
+ that:
+ - gitlab_hook_state is changed
+
+
+- name: Create GitLab Hook ( Idempotency test )
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_hook_state_again
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - gitlab_hook_state_again is not changed
+
+- name: Remove GitLab hook
+ gitlab_hook:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ hook_url: "{{ gitlab_hook_url }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ register: gitlab_hook_state_absent
+
+- name: Assert hook has been removed
+ assert:
+ that:
+ - gitlab_hook_state_absent is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases
new file mode 100644
index 000000000..fc0e157c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml
new file mode 100644
index 000000000..457129c22
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_project_name: ansible_test_project
+gitlab_deploy_key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnTYY7CYk1F/wBklpdRxudxN6KeXgfhutkiCigSfPhe ansible_test"
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml
new file mode 100644
index 000000000..0a9e47188
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Clean up {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: absent
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ initialize_with_readme: true
+ state: present
+ register: gitlab_project_state
+
+- assert:
+ that:
+ - gitlab_project_state is changed
+
+- name: Create {{ gitlab_project_name }} (Test idempotency)
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+ register: gitlab_project_state_again
+
+- assert:
+ that:
+ - gitlab_project_state_again is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/aliases
new file mode 100644
index 000000000..9f72f3711
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/defaults/main.yml
new file mode 100644
index 000000000..bf84a4751
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_api_token: glpat-XXXXXXXXXXXXXXXXXXXX
+gitlab_api_url: https://gitlab.com
+gitlab_project_name: ansible_test_project
+gitlab_badge_link_url: 'https://example.gitlab.com/%{project_path}'
+updated_gitlab_badge_link_url: 'https://test.gitlab.com/%{project_path}'
+gitlab_badge_image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' \ No newline at end of file
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/tasks/main.yml
new file mode 100644
index 000000000..efc090ef7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_badge/tasks/main.yml
@@ -0,0 +1,214 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ name: "{{ gitlab_project_name }}"
+ initialize_with_readme: true
+ state: present
+
+- name: Create Badge (check)
+ check_mode: true
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ link_url: "{{ gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_create_check_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_create_check_task
+
+- name: Check module call result
+ assert:
+ that:
+ - gitlab_badge_create_check_task.changed
+ - not gitlab_badge_create_check_task.failed
+
+- name: Create Badge
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ link_url: "{{ gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_create_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_create_task
+
+- name: Check module call result
+ assert:
+ that:
+ - gitlab_badge_create_task.changed
+ - not gitlab_badge_create_task.failed
+
+- name: Create Badge (confirmation)
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ link_url: "{{ gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_create_confirmation_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_create_confirmation_task
+
+- name: Check module call result
+ assert:
+ that:
+ - not gitlab_badge_create_confirmation_task.changed
+ - not gitlab_badge_create_confirmation_task.failed
+
+- name: Update Badge (check)
+ check_mode: true
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ link_url: "{{ updated_gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_update_check_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_update_check_task
+
+- name: Check module call result
+ assert:
+ that:
+ - gitlab_badge_update_check_task.changed
+ - not gitlab_badge_update_check_task.failed
+
+- name: Update Badge
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ link_url: "{{ updated_gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_update_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_update_task
+
+- name: Check module call result
+ assert:
+ that:
+ - gitlab_badge_update_task.changed
+ - not gitlab_badge_update_task.failed
+
+- name: Update Badge (confirmation)
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: present
+ link_url: "{{ updated_gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_update_confirmation_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_update_confirmation_task
+
+- name: Check module call result
+ assert:
+ that:
+ - not gitlab_badge_update_confirmation_task.changed
+ - not gitlab_badge_update_confirmation_task.failed
+
+- name: Delete Badge (check)
+ check_mode: true
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ link_url: "{{ updated_gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_delete_check_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_delete_check_task
+
+- name: Check module call result
+ assert:
+ that:
+ - gitlab_badge_delete_check_task.changed
+ - not gitlab_badge_delete_check_task.failed
+
+- name: Delete Badge
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ link_url: "{{ updated_gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_delete_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_delete_task
+
+- name: Check module call result
+ assert:
+ that:
+ - gitlab_badge_delete_task.changed
+ - not gitlab_badge_delete_task.failed
+
+- name: Delete Badge (confirmation)
+ gitlab_project_badge:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ link_url: "{{ updated_gitlab_badge_link_url }}"
+ image_url: "{{ gitlab_badge_image_url }}"
+ register: gitlab_badge_delete_confirmation_task
+
+- ansible.builtin.debug:
+ var: gitlab_badge_delete_confirmation_task
+
+- name: Check module call result
+ assert:
+ that:
+ - not gitlab_badge_delete_confirmation_task.changed
+ - not gitlab_badge_delete_confirmation_task.failed
+
+- name: Clean up {{ gitlab_project_name }}
+ gitlab_project:
+ api_url: "{{ gitlab_api_url }}"
+ validate_certs: false
+ api_token: "{{ gitlab_api_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/defaults/main.yml
new file mode 100644
index 000000000..72d5a68f1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_server_url: https://gitlab.com
+gitlab_api_access_token: "token"
+gitlab_project: some_project
+username: some_user
+gitlab_access_level: developer
+userlist:
+ - username1
+ - username2
+dedicated_access_users:
+ - name: username1
+ access_level: "developer"
+ - name: username2
+ access_level: "maintainer"
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/tasks/main.yml
new file mode 100644
index 000000000..215abad44
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_members/tasks/main.yml
@@ -0,0 +1,124 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for gitlab_project_members module
+#
+# Copyright (c) 2021, Sergey Mikhaltsov <metanovii@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required library
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Clean UP before tests
+ community.general.gitlab_project_members:
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ username }}"
+ state: absent
+
+- name: Add a User to A GitLab Project
+ community.general.gitlab_project_members:
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ username }}"
+ access_level: "{{ gitlab_access_level }}"
+ state: present
+ register: gitlab_project_members_state
+
+- name: Test member added to project
+ assert:
+ that:
+ - gitlab_project_members_state is changed
+
+- name: Add a User to A GitLab Project ( Idempotency test )
+ community.general.gitlab_project_members:
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ username }}"
+ access_level: "{{ gitlab_access_level }}"
+ state: present
+ register: gitlab_project_members_state_again
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - gitlab_project_members_state_again is not changed
+
+- name: Remove a User from A GitLab Project
+ community.general.gitlab_project_members:
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ username }}"
+ state: absent
+ register: remove_gitlab_project_members_state
+
+- name: Test member removed from project
+ assert:
+ that:
+ - remove_gitlab_project_members_state is changed
+
+- name: Remove a User from A GitLab Project ( Idempotency test )
+ community.general.gitlab_project_members:
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ username }}"
+ state: absent
+ register: remove_gitlab_project_members_state_again
+
+- name: Test module is idempotent
+ assert:
+ that:
+ - remove_gitlab_project_members_state_again is not changed
+
+- name: Add a list of Users to A GitLab Project
+ community.general.gitlab_project_members:
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ userlist }}"
+ access_level: "{{ gitlab_access_level }}"
+ state: present
+
+- name: Remove a list of Users to A GitLab Project
+ community.general.gitlab_project_members::
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ userlist }}"
+ state: absent
+
+- name: Add a list of Users with Dedicated Access Levels to A GitLab Project
+ community.general.gitlab_project_members::
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_users_access: "{{ dedicated_access_users }}"
+ state: present
+
+- name: Remove a list of Users with Dedicated Access Levels to A GitLab Project
+ community.general.gitlab_project_members::
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_users_access: "{{ dedicated_access_users }}"
+ state: absent
+
+- name: Add a user, remove all others which might be on this access level
+ community.general.gitlab_project_members::
+ api_url: "{{ gitlab_server_url }}"
+ api_token: "{{ gitlab_api_access_token }}"
+ project: "{{ gitlab_project }}"
+ gitlab_user: "{{ username }}"
+ access_level: "{{ gitlab_access_level }}"
+ pruge_users: "{{ gitlab_access_level }}"
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml
new file mode 100644
index 000000000..0645da0fd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_project_variable/tasks/main.yml
@@ -0,0 +1,701 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: purge all variables for check_mode test
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: true
+
+- name: add a variable value in check_mode
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: true
+ register: gitlab_project_variable_state
+
+- name: check_mode state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: apply add value from check_mode test
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ variables:
+ - name: ACCESS_KEY_ID
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: test new format
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: change protected attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: true
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert protected attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: false
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: change masked attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: true
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert masked attribute by not mention it
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert again masked attribute by not mention it (idempotent)
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be not changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: set both (masked and protected) attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: true
+ protected: true
+ variable_type: env_var
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: set again both (masked and protected) attribute (idempotent)
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ masked: true
+ protected: true
+ variable_type: env_var
+ register: gitlab_project_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: revert both (masked and protected) attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ value: checkmode
+ protected: false
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: change a variable value in check_mode again
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ check_mode: true
+ register: gitlab_project_variable_state
+
+- name: check_mode state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: apply again the value change from check_mode test
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: change environment scope
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ environment_scope: testing
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: apply again the environment scope change
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID:
+ environment_scope: testing
+ value: checkmode
+ register: gitlab_project_variable_state
+
+- name: state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+
+- name: purge all variables at the beginning
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: true
+
+- name: set two test variables
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_project_variable_state
+
+- name: set two test variables state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+ - gitlab_project_variable_state.project_variable.added|length == 2
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: re-set two test variables
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: abc123
+ SECRET_ACCESS_KEY: 321cba
+ register: gitlab_project_variable_state
+
+- name: re-set two test variables state must not be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is not changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 2
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: edit one variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: edit one variable state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 1
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 1
+ - gitlab_project_variable_state.project_variable.updated[0] == "ACCESS_KEY_ID"
+
+- name: append one variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: value
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 1
+ - gitlab_project_variable_state.project_variable.untouched|length == 2
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.added[0] == "some"
+
+- name: re-set all variables
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ ACCESS_KEY_ID: changed
+ SECRET_ACCESS_KEY: 321cba
+ some: value
+ register: gitlab_project_variable_state
+
+- name: re-set all variables state must not be changed
+ assert:
+ that:
+ - not gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 3
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: set one variables and purge all others
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: value
+ purge: true
+ register: gitlab_project_variable_state
+
+- name: set one variables and purge all others state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 1
+ - gitlab_project_variable_state.project_variable.removed|length == 2
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: only one variable is left
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: value
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: only one variable is left state must not be changed
+ assert:
+ that:
+ - not gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 1
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.untouched[0] == "some"
+
+- name: test integer values
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: 42
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 1
+
+- name: test float values
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ some: 42.23
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: only one variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 1
+
+- name: delete the last left variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ vars:
+ some: value
+ register: gitlab_project_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 1
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.removed[0] == "some"
+
+- name: add one variable with variable_type file
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ variables:
+ - name: my_test_var
+ value: my_test_value
+ variable_type: file
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: append one variable state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 1
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ # VALUE_SPECIFIED_IN_NO_LOG_PARAMETER
+ #- gitlab_project_variable_state.project_variable.added[0] == "my_test_var"
+
+- name: change variable_type attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: env_var
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: revert variable_type attribute
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ my_test_var:
+ value: my_test_value
+ variable_type: file
+ register: gitlab_project_variable_state
+
+- name: state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+
+- name: delete the variable_type file variable
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ vars:
+ my_test_var: my_test_value
+ register: gitlab_project_variable_state
+
+- name: no variable is left state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 1
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+ - gitlab_project_variable_state.project_variable.removed[0] == "my_test_var"
+
+- name: set complete page and purge existing ones
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ page1_var01: value
+ page1_var02: value
+ page1_var03: value
+ page1_var04: value
+ page1_var05: value
+ page1_var06: value
+ page1_var07: value
+ page1_var08: value
+ page1_var09: value
+ page1_var10: value
+ page1_var11: value
+ page1_var12: value
+ page1_var13: value
+ page1_var14: value
+ page1_var15: value
+ page1_var16: value
+ page1_var17: value
+ page1_var18: value
+ page1_var19: value
+ page1_var20: value
+ purge: true
+ register: gitlab_project_variable_state
+
+- name: complete page added state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+ - gitlab_project_variable_state.project_variable.added|length == 20
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+
+- name: set complete page and keep existing ones
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ vars:
+ page2_var01: value
+ page2_var02: value
+ page2_var03: value
+ page2_var04: value
+ page2_var05: value
+ page2_var06: value
+ page2_var07: value
+ page2_var08: value
+ page2_var09: value
+ page2_var10: value
+ page2_var11: value
+ page2_var12: value
+ page2_var13: value
+ page2_var14: value
+ page2_var15: value
+ page2_var16: value
+ page2_var17: value
+ page2_var18: value
+ page2_var19: value
+ page2_var20: value
+ purge: false
+ register: gitlab_project_variable_state
+
+- name: existing page untouched state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state is changed
+ - gitlab_project_variable_state.project_variable.added|length == 20
+ - gitlab_project_variable_state.project_variable.untouched|length == 20
+
+- name: check that no variables are left
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: true
+ register: gitlab_project_variable_state
+
+- name: check that no variables are untouched state must be changed
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 0
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 40
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: same vars, diff scope
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: true
+ variables:
+ - name: SECRET_ACCESS_KEY
+ value: 3214cbad
+ masked: true
+ protected: true
+ variable_type: env_var
+ environment_scope: production
+ - name: SECRET_ACCESS_KEY
+ value: hello_world
+ masked: true
+ protected: true
+ variable_type: env_var
+ environment_scope: development
+ register: gitlab_project_variable_state
+
+- name: verify two vars
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.added|length == 2
+ - gitlab_project_variable_state.project_variable.untouched|length == 0
+ - gitlab_project_variable_state.project_variable.removed|length == 0
+ - gitlab_project_variable_state.project_variable.updated|length == 0
+
+- name: throw error when state is present but no value is given
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ variables:
+ - name: delete_me
+ register: gitlab_project_variable_state
+ ignore_errors: true
+
+- name: verify fail
+ assert:
+ that:
+ - gitlab_project_variable_state.failed
+ - gitlab_project_variable_state is not changed
+
+- name: set a new variable to delete it later
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ purge: true
+ variables:
+ - name: delete_me
+ value: ansible
+ register: gitlab_project_variable_state
+
+- name: verify the change
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+
+- name: delete variable without referencing its value
+ gitlab_project_variable:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ project: "{{ gitlab_project_name }}"
+ state: absent
+ variables:
+ - name: delete_me
+ register: gitlab_project_variable_state
+
+- name: verify deletion
+ assert:
+ that:
+ - gitlab_project_variable_state.changed
+ - gitlab_project_variable_state.project_variable.removed|length == 1
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases
new file mode 100644
index 000000000..fc0e157c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_runner/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml
new file mode 100644
index 000000000..ec7c0cfe1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_runner/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_project_name: ansible_test_project
+gitlab_hook_url: http://gitlab.example.com/hook
+gitlab_runner_name: ansible_test_runner
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml
new file mode 100644
index 000000000..467e918c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_runner/tasks/main.yml
@@ -0,0 +1,78 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Create {{ gitlab_project_name }}
+ gitlab_project:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ name: "{{ gitlab_project_name }}"
+ state: present
+
+- name: Cleanup GitLab runner
+ gitlab_runner:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ description: "{{ gitlab_runner_name }}"
+ registration_token: "{{ gitlab_runner_registration_token }}"
+ state: absent
+
+- name: Create GitLab Runner
+ gitlab_runner:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ description: "{{ gitlab_runner_name }}"
+ registration_token: "{{ gitlab_runner_registration_token }}"
+ state: present
+ register: gitlab_runner_state
+
+- name: Test group created
+ assert:
+ that:
+ - gitlab_runner_state is changed
+
+
+#### COMMENTED AS MODULE WILL UPDATE THE RUNNER IF EXISTS. TO BE DISCUSSED ####
+# - name: Create GitLab Runner ( Idempotency test )
+# gitlab_runner:
+# server_url: "{{ gitlab_host }}"
+# validate_certs: false
+# login_token: "{{ gitlab_login_token }}"
+# description: "{{ gitlab_runner_name }}"
+# registration_token: "{{ gitlab_runner_registration_token }}"
+# state: present
+# register: gitlab_runner_state_again
+
+# - name: Test module is idempotent
+# assert:
+# that:
+# - gitlab_runner_state_again is not changed
+
+- name: Remove GitLab Runner
+ gitlab_runner:
+ server_url: "{{ gitlab_host }}"
+ validate_certs: false
+ login_token: "{{ gitlab_login_token }}"
+ description: "{{ gitlab_runner_name }}"
+ registration_token: "{{ gitlab_runner_registration_token }}"
+ state: absent
+ register: gitlab_runner_state_absent
+
+- name: Assert runner has been removed
+ assert:
+ that:
+ - gitlab_runner_state_absent is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases b/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases
new file mode 100644
index 000000000..fc0e157c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_user/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+gitlab/ci
+disabled
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml
new file mode 100644
index 000000000..c7a4a5dd3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_user/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+gitlab_user: ansible_test_user
+gitlab_user_pass: Secr3tPassw00rd
+gitlab_user_email: root@localhost
+gitlab_sshkey_name: ansibletest
+gitlab_sshkey_file: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDI8GIMlrirf+zsvBpxnF0daykP6YEJ5wytZXhDGD2dZXg9Tln0KUSDgreT3FDgoabjlOmG1L/nhu6ML76WCsmc/wnVMlXlDlQpVJSQ2PCxGNs9WRW7Y/Pk6t9KtV/VSYr0LaPgLEU8VkffSUBJezbKa1cssjb4CmRRqcePRNYpgCXdK05TEgFvmXl9qIM8Domf1ak1PlbyMmi/MytzHmnVFzxgUKv5c0Mr+vguCi131gPdh3QSf5AHPLEoO9LcMfu2IO1zvl61wYfsJ0Wn2Fncw+tJQfUin0ffTFgUIsGqki04/YjXyWynjSwQf5Jym4BYM0i2zlDUyRxs4/Tfp4yvJFik42ambzjLK6poq+iCpQReeYih9WZUaZwUQe7zYWhTOuoV7ydsk8+kDRMPidF9K5zWkQnglGrOzdbTqnhxNpwHCg2eSRJ49kPYLOH76g8P7IQvl+zluG0o8Nndir1WcYil4D4CCBskM8WbmrElZH1CRyP/NQMNIf4hFMItTjk= ansible@ansible
+gitlab_sshkey_expires_at: 2030-01-01T00:00:00.000Z
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml
new file mode 100644
index 000000000..e8c1ec360
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/main.yml
@@ -0,0 +1,257 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name: python-gitlab
+ state: present
+
+- name: Clean up gitlab user
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ name: ansible_test_user
+ username: ansible_test_user
+ password: Secr3tPassw00rd
+ email: root@localhost
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: absent
+
+
+- name: Create gitlab user
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check user has been created correctly
+ assert:
+ that:
+ - gitlab_user_state is changed
+
+- name: Create gitlab user again
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: root@localhost
+ name: ansible_test_user
+ username: ansible_test_user
+ password: Secr3tPassw00rd
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state_again
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_state_again is not changed
+ - gitlab_user_state_again.user.is_admin == False
+
+
+- name: Update User Test => Make User Admin
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ isadmin: true
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check if user is admin now
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.is_admin == True
+
+- name: Update User Test => Make User Admin (Again)
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ isadmin: true
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_state is not changed
+ - gitlab_user_state.user.is_admin == True
+
+- name: Update User Test => Remove Admin Rights
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ isadmin: false
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check if user is not admin anymore
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.is_admin == False
+
+
+- name: Update User Test => Try Changing Mail without Confirmation Skipping
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: foo@bar.baz
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: true
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check that eMail is unchanged (Only works with confirmation skipping)
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.email == gitlab_user_email
+
+- name: Update User Test => Change Mail with Confirmation Skip
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: foo@bar.baz
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: false
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check that mail has changed now
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.email == 'foo@bar.baz'
+
+- name: Update User Test => Change Mail with Confirmation Skip (Again)
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: foo@bar.baz
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: false
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_state is not changed
+ - gitlab_user_state.user.email == 'foo@bar.baz'
+
+- name: Update User Test => Revert to original Mail Address
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ confirm: false
+ validate_certs: false
+ api_token: "{{ gitlab_login_token }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check that reverting mail back to original has worked
+ assert:
+ that:
+ - gitlab_user_state is changed
+ - gitlab_user_state.user.email == gitlab_user_email
+
+
+- name: Update User Test => Change User Password
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+
+ # note: the only way to check if a password really is what it is expected
+ # to be is to use it for login, so we use it here instead of the
+ # default token assuming that a user can always change its own password
+ api_username: "{{ gitlab_user }}"
+ api_password: "{{ gitlab_user_pass }}"
+
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: new-super-password
+ state: present
+ register: gitlab_user_state
+
+- name: Check PW setting return state
+ assert:
+ that:
+ # note: there is no way to determine if a password has changed or
+ # not, so it can only be always yellow or always green, we
+ # decided for always green for now
+ - gitlab_user_state is not changed
+
+- name: Update User Test => Reset User Password
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+
+ api_username: "{{ gitlab_user }}"
+ api_password: new-super-password
+
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check PW setting return state (Again)
+ assert:
+ that:
+ - gitlab_user_state is not changed
+
+- name: Update User Test => Check that password was reset
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ validate_certs: false
+
+ api_username: "{{ gitlab_user }}"
+ api_password: "{{ gitlab_user_pass }}"
+
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ state: present
+ register: gitlab_user_state
+
+- name: Check PW setting return state (Reset)
+ assert:
+ that:
+ - gitlab_user_state is not changed
+
+- include_tasks: sshkey.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/sshkey.yml b/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/sshkey.yml
new file mode 100644
index 000000000..bba724d5e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/gitlab_user/tasks/sshkey.yml
@@ -0,0 +1,139 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create gitlab user with sshkey credentials
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: false
+ sshkey_name: "{{ gitlab_sshkey_name }}"
+ sshkey_file: "{{ gitlab_sshkey_file }}"
+ state: present
+ register: gitlab_user_sshkey
+
+- name: Check user has been created correctly
+ assert:
+ that:
+ - gitlab_user_sshkey is changed
+
+- name: Create gitlab user again
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: false
+ sshkey_name: "{{ gitlab_sshkey_name }}"
+ sshkey_file: "{{ gitlab_sshkey_file }}"
+ state: present
+ register: gitlab_user_sshkey_again
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_sshkey_again is not changed
+
+- name: Add expires_at to an already created gitlab user with ssh key
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: false
+ sshkey_name: "{{ gitlab_sshkey_name }}"
+ sshkey_file: "{{ gitlab_sshkey_file }}"
+ sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}"
+ state: present
+ register: gitlab_user_created_user_sshkey_expires_at
+
+- name: Check expires_at will not be added to a present ssh key
+ assert:
+ that:
+ - gitlab_user_created_user_sshkey_expires_at is not changed
+
+- name: Remove created gitlab user
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ validate_certs: false
+ state: absent
+ register: gitlab_user_sshkey_remove
+
+- name: Check user has been removed correctly
+ assert:
+ that:
+ - gitlab_user_sshkey_remove is changed
+
+- name: Create gitlab user with sshkey and expires_at
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: false
+ sshkey_name: "{{ gitlab_sshkey_name }}"
+ sshkey_file: "{{ gitlab_sshkey_file }}"
+ sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}"
+ state: present
+ register: gitlab_user_sshkey_expires_at
+
+- name: Check user has been created correctly
+ assert:
+ that:
+ - gitlab_user_sshkey_expires_at is changed
+
+- name: Create gitlab user with sshkey and expires_at again
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ password: "{{ gitlab_user_pass }}"
+ validate_certs: false
+ sshkey_name: "{{ gitlab_sshkey_name }}"
+ sshkey_file: "{{ gitlab_sshkey_file }}"
+ sshkey_expires_at: "{{ gitlab_sshkey_expires_at }}"
+ state: present
+ register: gitlab_user_sshkey_expires_at_again
+
+- name: Check state is not changed
+ assert:
+ that:
+ - gitlab_user_sshkey_expires_at_again is not changed
+
+- name: Remove created gitlab user
+ gitlab_user:
+ api_url: "{{ gitlab_host }}"
+ api_token: "{{ gitlab_login_token }}"
+ email: "{{ gitlab_user_email }}"
+ name: "{{ gitlab_user }}"
+ username: "{{ gitlab_user }}"
+ validate_certs: false
+ state: absent
+ register: gitlab_user_sshkey_expires_at_remove
+
+- name: Check user has been removed correctly
+ assert:
+ that:
+ - gitlab_user_sshkey_expires_at_remove is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/hg/aliases b/ansible_collections/community/general/tests/integration/targets/hg/aliases
new file mode 100644
index 000000000..e1d7ab2a2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hg/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python3
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hg/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml b/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml
new file mode 100644
index 000000000..1b8916880
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hg/tasks/install.yml
@@ -0,0 +1,88 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: get the default python version
+ command: "{{ ansible_python_interpreter }} -V"
+ register: default_python_version
+
+- name: find the default python
+ command: which python
+ register: which_python
+
+- name: find the default pip
+ command: which pip
+ register: which_pip
+
+- name: preserve the default python
+ command: cp -av "{{ which_python.stdout }}" "{{ which_python.stdout }}.default"
+
+- name: preserve the default pip
+ command: cp -av "{{ which_pip.stdout }}" "{{ which_pip.stdout }}.default"
+
+# using the apt module prevents autoremove from working, so call apt-get via shell instead
+- name: install mercurial (apt)
+ shell: apt-get -y update && apt-get -y install mercurial
+ when: ansible_facts.pkg_mgr == 'apt'
+
+- name: install mercurial (dnf)
+ dnf:
+ name: mercurial
+ when: ansible_facts.pkg_mgr == 'dnf'
+
+- name: install mercurial (yum)
+ yum:
+ name: mercurial
+ when: ansible_facts.pkg_mgr == 'yum'
+
+- name: install mercurial (pkgng)
+ package:
+ name: mercurial
+ when: ansible_facts.pkg_mgr in ['pkgng', 'community.general.pkgng']
+
+- name: install mercurial (zypper)
+ package:
+ name: mercurial
+ when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
+
+- name: preserve the updated python
+ command: cp -av "{{ which_python.stdout }}" "{{ which_python.stdout }}.updated"
+
+- name: preserve the updated pip
+ command: cp -av "{{ which_pip.stdout }}" "{{ which_pip.stdout }}.updated"
+
+- name: locate mercurial
+ command: which hg
+ register: which_hg
+
+- name: get the mercurial interpreter
+ command: head -n 1 "{{ which_hg.stdout }}"
+ register: hg_interpreter
+
+- name: stat the mercurial interpreter
+ stat:
+ path: "{{ hg_interpreter.stdout[2:] }}"
+ register: stat_hg_interpreter
+
+- name: bypass the mercurial python interpreter symlink (if needed)
+ lineinfile:
+ path: "{{ which_hg.stdout }}"
+ regexp: "^#!.*$"
+ line: "#!{{ stat_hg_interpreter.stat.lnk_source }}"
+ when: stat_hg_interpreter.stat.islnk
+
+- name: restore the default python
+ command: cp -av "{{ which_python.stdout }}.default" "{{ which_python.stdout }}"
+
+- name: restore the default pip
+ command: cp -av "{{ which_pip.stdout }}.default" "{{ which_pip.stdout }}"
+
+- name: get the current python version
+ command: "{{ ansible_python_interpreter }} -V"
+ register: current_python_version
+
+- name: verify the python version has not changed
+ assert:
+ that:
+ - default_python_version.stdout == current_python_version.stdout
diff --git a/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml
new file mode 100644
index 000000000..1ca30459c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hg/tasks/main.yml
@@ -0,0 +1,45 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the hg module
+# Copyright (c) 2014, James Tanner <tanner.jc@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: determine if mercurial is already installed
+ command: which hg
+ register: has_hg
+ ignore_errors: true
+
+- name: warn if the underlying system is not capable of running these tests
+ debug:
+ msg: >-
+ The mercurial client is not able to check out Bitbucket repositories as per the changes mentioned here:
+ https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01 . Therefore these tests are skipped.
+ when: (ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04") or ansible_python_version is version("2.7.9", "<")
+
+- block:
+ - name: install mercurial
+ include_tasks: install.yml
+ when: has_hg is failed
+
+ - name: test mercurial
+ include_tasks: run-tests.yml
+
+ - name: uninstall mercurial
+ include_tasks: uninstall.yml
+ when: has_hg is failed
+
+ # As per the bitbucket changes in https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01 , this
+ # test will fail under certain circumstances, to avoid false positives, we skip these tests under the following
+ # circumstances:
+ #
+ # - The ubuntu 14.04 image used on shippable runs python 2.7.6, so we skip explicitly for this image.
+ # - When ansible_python_version is not 2.7.9 or higher, mercurial is likely to also run using this same (old)
+ # python version, which causes issues as per the link above.
+ when:
+ - not (ansible_distribution == "Ubuntu" and ansible_distribution_version == "14.04")
+ - ansible_python_version is version("2.7.9", ">=")
diff --git a/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml b/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml
new file mode 100644
index 000000000..928b7cb68
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hg/tasks/run-tests.yml
@@ -0,0 +1,85 @@
+# test code for the hg module
+# Copyright (c) 2018, Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+- name: set where to extract the repo
+ set_fact:
+ checkout_dir: "{{ remote_tmp_dir }}/hg_project_test"
+
+- name: set what repo to use
+ set_fact:
+ repo: "http://hg.pf.osdn.net/view/a/ak/akasurde/hg_project_test"
+
+- name: clean out the remote_tmp_dir
+ shell: rm -rf {{ remote_tmp_dir }}/*
+
+- name: verify that mercurial is installed so this test can continue
+ shell: which hg
+
+- name: initial checkout
+ hg:
+ repo: "{{ repo }}"
+ dest: "{{ checkout_dir }}"
+ register: hg_result
+
+- debug: var=hg_result
+
+- shell: ls {{ checkout_dir }}
+
+- name: verify information about the initial clone
+ assert:
+ that:
+ - "'before' in hg_result"
+ - "'after' in hg_result"
+ - "not hg_result.before"
+ - "hg_result.changed"
+
+- name: repeated checkout
+ hg:
+ repo: "{{ repo }}"
+ dest: "{{ checkout_dir }}"
+ register: hg_result2
+
+- debug: var=hg_result2
+
+- name: check for tags
+ stat:
+ path: "{{ checkout_dir }}/.hgtags"
+ register: tags
+
+- name: check for remotes
+ stat:
+ path: "{{ checkout_dir }}/.hg/branch"
+ register: branches
+
+- debug: var=tags
+- debug: var=branches
+
+- name: assert presence of tags/trunk/branches
+ assert:
+ that:
+ - "tags.stat.isreg"
+ - "branches.stat.isreg"
+
+- name: verify on a re-clone things are marked unchanged
+ assert:
+ that:
+ - "not hg_result2.changed"
+
+- name: Checkout non-existent repo clone
+ hg:
+ repo: "http://hg.pf.osdn.net/view/a/ak/akasurde/hg_project_test_1"
+ clone: false
+ update: false
+ register: hg_result3
+ ignore_errors: true
+
+- name: Verify result of non-existent repo clone
+ assert:
+ that:
+ - hg_result3.msg
+ - "'abort: HTTP Error 404: Not Found' in hg_result3.msg"
+ - "not hg_result3.changed"
diff --git a/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml b/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml
new file mode 100644
index 000000000..4a26995ef
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hg/tasks/uninstall.yml
@@ -0,0 +1,53 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: restore the updated python
+ command: mv "{{ which_python.stdout }}.updated" "{{ which_python.stdout }}"
+
+- name: restore the updated pip
+ command: mv "{{ which_pip.stdout }}.updated" "{{ which_pip.stdout }}"
+
+- name: restore the mercurial python interpreter symlink (if needed)
+ lineinfile:
+ path: "{{ which_hg.stdout }}"
+ regexp: "^#!.*$"
+ line: "#!{{ stat_hg_interpreter.stat.path }}"
+ when: stat_hg_interpreter.stat.islnk
+
+# using the apt module prevents autoremove from working, so call apt-get via shell instead
+- name: uninstall packages which were not originally installed (apt)
+ shell: apt-get -y remove mercurial && apt-get -y autoremove
+ when: ansible_facts.pkg_mgr == 'apt'
+
+- name: uninstall packages which were not originally installed (dnf)
+ dnf:
+ name: mercurial
+ state: absent
+ autoremove: true
+ when: ansible_facts.pkg_mgr == 'dnf'
+
+# the yum module does not have an autoremove parameter
+- name: uninstall packages which were not originally installed (yum)
+ shell: yum -y autoremove mercurial
+ when: ansible_facts.pkg_mgr == 'yum'
+
+- name: uninstall packages which were not originally installed (pkgng)
+ package:
+ name: mercurial
+ state: absent
+ autoremove: true
+ when: ansible_facts.pkg_mgr in ['pkgng', 'community.general.pkgng']
+
+- name: uninstall packages which were not originally installed (zypper)
+ package:
+ name: mercurial
+ state: absent
+ when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
+
+- name: restore the default python
+ raw: mv "{{ which_python.stdout }}.default" "{{ which_python.stdout }}"
+
+- name: restore the default pip
+ raw: mv "{{ which_pip.stdout }}.default" "{{ which_pip.stdout }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/homebrew/aliases b/ansible_collections/community/general/tests/integration/targets/homebrew/aliases
new file mode 100644
index 000000000..11bb9a086
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homebrew/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/rhel
+skip/docker
+skip/python2.6
diff --git a/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml
new file mode 100644
index 000000000..1db3ef1a6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homebrew/tasks/main.yml
@@ -0,0 +1,99 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the homebrew module.
+# Copyright (c) 2020, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Find brew binary
+ command: which brew
+ register: brew_which
+ when: ansible_distribution in ['MacOSX']
+
+- name: Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+ when: ansible_distribution in ['MacOSX']
+
+#- name: Use ignored-pinned option while upgrading all
+# homebrew:
+# upgrade_all: true
+# upgrade_options: ignore-pinned
+# become: true
+# become_user: "{{ brew_stat.stat.pw_name }}"
+# register: upgrade_option_result
+# environment:
+# HOMEBREW_NO_AUTO_UPDATE: True
+
+#- assert:
+# that:
+# - upgrade_option_result.changed
+
+- vars:
+ package_name: gnu-tar
+
+ block:
+ - name: Make sure {{ package_name }} package is not installed
+ homebrew:
+ name: "{{ package_name }}"
+ state: absent
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+
+ - name: Install {{ package_name }} package using homebrew
+ homebrew:
+ name: "{{ package_name }}"
+ state: present
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: package_result
+
+ - assert:
+ that:
+ - package_result.changed
+
+ - name: Again install {{ package_name }} package using homebrew
+ homebrew:
+ name: "{{ package_name }}"
+ state: present
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: package_result
+
+ - assert:
+ that:
+ - not package_result.changed
+
+ - name: Uninstall {{ package_name }} package using homebrew
+ homebrew:
+ name: "{{ package_name }}"
+ state: absent
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: package_result
+
+ - assert:
+ that:
+ - package_result.changed
+
+ - name: Again uninstall {{ package_name }} package using homebrew
+ homebrew:
+ name: "{{ package_name }}"
+ state: absent
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: package_result
+
+ - assert:
+ that:
+ - not package_result.changed
diff --git a/ansible_collections/community/general/tests/integration/targets/homebrew_cask/aliases b/ansible_collections/community/general/tests/integration/targets/homebrew_cask/aliases
new file mode 100644
index 000000000..11bb9a086
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homebrew_cask/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/rhel
+skip/docker
+skip/python2.6
diff --git a/ansible_collections/community/general/tests/integration/targets/homebrew_cask/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/homebrew_cask/defaults/main.yml
new file mode 100644
index 000000000..18ebd7b10
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homebrew_cask/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cask: brooklyn
diff --git a/ansible_collections/community/general/tests/integration/targets/homebrew_cask/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/homebrew_cask/tasks/main.yml
new file mode 100644
index 000000000..85f257266
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homebrew_cask/tasks/main.yml
@@ -0,0 +1,73 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the homebrew_cask module.
+# Copyright (c) 2022, Joseph Torcasso <jtorcass@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Find brew binary
+ command: which brew
+ register: brew_which
+ when: ansible_distribution in ['MacOSX']
+
+ - name: Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+ when: ansible_distribution in ['MacOSX']
+
+ - block:
+ - name: Install cask
+ homebrew_cask:
+ name: "{{ cask }}"
+ state: present
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: cask_install_result
+
+ - assert:
+ that:
+ - cask_install_result is changed
+ - "'Cask installed' in cask_install_result.msg"
+
+ - name: Install cask (idempotence)
+ homebrew_cask:
+ name: "{{ cask }}"
+ state: present
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: cask_install_result
+
+ - assert:
+ that:
+ - cask_install_result is not changed
+ - "'Cask installed' not in cask_install_result.msg"
+ - "'Cask already installed' in cask_install_result.msg"
+
+ - name: Install cask with force install option
+ homebrew_cask:
+ name: "{{ cask }}"
+ state: present
+ install_options: force
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ register: cask_install_result
+
+ - assert:
+ that:
+ - cask_install_result is changed
+ - "'Cask installed' in cask_install_result.msg"
+
+ always:
+ - name: Delete cask
+ homebrew_cask:
+ name: "{{ cask }}"
+ state: absent
+ install_options: force
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ ignore_errors: true
diff --git a/ansible_collections/community/general/tests/integration/targets/homectl/aliases b/ansible_collections/community/general/tests/integration/targets/homectl/aliases
new file mode 100644
index 000000000..b87db2e43
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homectl/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel9.0 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/
+skip/rhel9.1 # See https://www.reddit.com/r/Fedora/comments/si7nzk/homectl/
diff --git a/ansible_collections/community/general/tests/integration/targets/homectl/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/homectl/tasks/main.yml
new file mode 100644
index 000000000..93c1089b4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/homectl/tasks/main.yml
@@ -0,0 +1,182 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Get systemd version and if it doesn't exist don't run these tests.
+- name: check systemd version
+ command: "systemctl --version"
+ register: systemd_version
+ ignore_errors: true
+
+- name: check homectl version
+ command: homectl --version
+ register: homectl_version
+ ignore_errors: true
+
+- block:
+ - name: Check and start systemd-homed service
+ service:
+ name: systemd-homed.service
+ state: started
+ enabled: true
+
+ - name: Add a user 'james'
+ community.general.homectl:
+ name: james
+ password: myreallysecurepassword1!
+ state: present
+
+ - name: verify user added
+ command: homectl inspect james
+ register: james_info
+
+ - name: Add the user 'tom' with a zsh shell, uid of 1000, and gid of 1000
+ community.general.homectl:
+ name: tom
+ password: myreallysecurepassword1!
+ state: present
+ shell: /bin/zsh
+ uid: 1000
+ gid: 1000
+ disksize: 10G
+ register: tom_userinfo
+
+ - name: Try to add user 'james' that already exists
+ community.general.homectl:
+ name: james
+ password: myreallysecurepassword1!
+ state: present
+ shell: /bin/ksh
+ register: user_exists
+
+ - name: Try to use 'resize=yes' option without 'disksize' option (not allowed)
+ community.general.homectl:
+ name: foo
+ password: uq4895738!@#$%dfd
+ state: present
+ resize: true
+ register: resize_out
+ ignore_errors: true
+
+ - name: Use option 'disksize=1G' without option resize (allowed)
+ community.general.homectl:
+ name: foobar
+ password: "uq4895738!@#$%dfd"
+ state: present
+ disksize: 1G
+ register: disk_out
+ ignore_errors: true
+
+ - name: Try to Create user without giving password
+ community.general.homectl:
+ name: danielle
+ register: danielle_out
+ ignore_errors: true
+
+ - name: remove user 'foobar' without requiring password
+ community.general.homectl:
+ name: foobar
+ state: absent
+ register: delete_foobar_out
+
+ - name: modify user 'james' to have zsh shell and timezone 'America/New_York'
+ community.general.homectl:
+ name: james
+ password: myreallysecurepassword1!
+ state: present
+ shell: /bin/zsh
+ timezone: America/New_York
+ register: lukuser_modify_out
+
+ - name: create user 'jake' with all mount options
+ community.general.homectl:
+ name: jake
+ password: myreallysecurepassword12!
+ mountopts: noexec,nosuid,nodev
+ sshkeys: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDUSW/q2qFZPr2vS0qrmAs+1iQI1jLIBdJ4CVIhE3KnIwxkkiInS8mJ+t0FBTeK3ks3GZLPVYC1v9o2P+oqyUk1CiBnCsMXGJud+L/t8b5r8MiJMyP7Jzd6yhmcvenjvz+vY06jQ9chWAtThEknuaOMongIpQQzSLGbdMy0yMsz4GEjicwdcj1PDwItPvUt4TL4K7V9NE672idADlRt6qng4UwpziqlYgsyIG46ettDras8hGAPricrhFWUS2rLDsCD0thkPFdR8HL1ZWTZ6LtolhO4MYtgntzXn708TTmFC2oIDluzyxVoUYmsfVotVdXFZcOWffnwbCgU+tn75JXTLozgTbV3VWmkxpJFErCWPerxcZv3+7b0f36/Y0gRNjM9HERLDSE1c8yz29NOLY0qH5306aByjOaerxNq9+ZOU/Fmf5/VfGIUp/FdLxDw+V0AzejFG580VAcstEMsOHSdwTbi3gf6LoGSiRyWKKDod0TZCMC6RzfdsfdsfI9CClGl0s= test@router.home"
+ register: jake_out
+
+ - name: Try to remove user 'janet' that doesn't exist
+ community.general.homectl:
+ name: janet
+ state: absent
+ register: user_not_exist
+ ignore_errors: true
+
+ - name: Use check_mode to try and create user 'diana'
+ community.general.homectl:
+ name: diana
+ password: helloworld123!@
+ state: present
+ check_mode: true
+ register: diana_create_checkmode_out
+
+ - name: Verify user 'diana' was not created with check_mode
+ command: homectl inspect diana
+ register: user_diana_exists
+ ignore_errors: true
+
+ - name: Try to modify user 'jake' with only noexec mount option in check_mode
+ community.general.homectl:
+ name: jake
+ password: myreallysecurepassword12!
+ state: present
+ mountopts: noexec
+ check_mode: true
+ register: jake_checkmode_out
+
+ - name: Verify user 'jake' was not modified and still has all mount options
+ command: homectl inspect jake
+ register: user_jake_details_out
+
+ - name: Modify user 'jake' with only noexec mount option
+ community.general.homectl:
+ name: jake
+ password: myreallysecurepassword12!
+ state: present
+ mountopts: noexec
+ register: jake_modify_out
+
+ - name: modify user 'jake' again with only noexec mount option to make sure changed is false as nothing has changed.
+ community.general.homectl:
+ name: jake
+ password: myreallysecurepassword12!
+ state: present
+ mountopts: noexec
+ register: jake_modify_again_out
+
+ - name: Try to modify user 'jake' with an incorrect password
+ community.general.homectl:
+ name: jake
+ password: incorrectPassword!
+ state: present
+ mountopts: noexec
+ locked: true
+ ignore_errors: true
+ register: jake_incorrect_pass_out
+
+ - assert:
+ that:
+ - james_info.rc == 0
+ - tom_userinfo.data['gid'] == 1000 and tom_userinfo.data['uid'] == 1000
+ - user_exists is changed and user_exists.data['shell'] == '/bin/ksh'
+ - resize_out is not changed
+ - disk_out is changed
+ - delete_foobar_out is changed
+ - danielle_out is not changed
+ - lukuser_modify_out.data['timeZone'] == "America/New_York" and lukuser_modify_out.data['shell'] == "/bin/zsh"
+ - user_not_exist is not changed and user_not_exist.msg == "User does not exist!"
+ - jake_out is changed and jake_out.data['mountNoDevices'] == True and jake_out.data['mountNoSuid'] == True and jake_out.data['mountNoExecute'] == True
+ - diana_create_checkmode_out is changed and 'No home for user diana known' in user_diana_exists.stderr
+ - "jake_checkmode_out is changed and 'Mount Flags: nosuid nodev noexec' in user_jake_details_out.stdout"
+ - jake_modify_out is changed and jake_modify_out.data['privileged']['sshAuthorizedKeys'] is not none
+ - jake_modify_out.data['mountNoDevices'] == False and jake_modify_out.data['mountNoExecute'] == True and jake_modify_out.data['mountNoSuid'] == False
+ - jake_modify_again_out is not changed
+ - jake_incorrect_pass_out is not changed and jake_incorrect_pass_out is failed and jake_incorrect_pass_out.msg == 'User exists but password is incorrect!'
+
+ # homectl was first introduced in systemd 245 so check version >= 245 and make sure system has systemd and homectl command
+ when:
+ - systemd_version.rc == 0 and (systemd_version.stdout | regex_search('[0-9][0-9][0-9]') | int >= 245) and homectl_version.rc == 0
+ - ansible_distribution != 'Archlinux' # TODO!
+ - ansible_distribution != 'Fedora' or ansible_distribution_major_version|int < 36 # TODO!
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml
new file mode 100644
index 000000000..dd7086152
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_ecs_instance/tasks/main.yml
@@ -0,0 +1,319 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: create a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ state: present
+ register: eip
+- name: create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: disk
+- name: delete a instance
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+#----------------------------------------------------------
+- name: create a instance (check mode)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a instance
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a instance (idemponent)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a instance that already exists
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a instance (check mode)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a instance
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a instance (idemponent)
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a instance that does not exist
+ hwc_ecs_instance:
+ data_volumes:
+ - volume_id: "{{ disk.id }}"
+ enable_auto_recovery: false
+ eip_id: "{{ eip.id }}"
+ name: "ansible_ecs_instance_test"
+ availability_zone: "cn-north-1a"
+ nics:
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ - subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.34"
+ server_tags:
+ my_server: "my_server"
+ image_id: "8da46d6d-6079-4e31-ad6d-a7167efff892"
+ flavor_name: "s3.small.1"
+ vpc_id: "{{ vpc.id }}"
+ root_volume:
+ volume_type: "SAS"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ register: disk
+- name: delete a eip
+ hwc_vpc_eip:
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ type: "5_bgp"
+ state: absent
+ register: eip
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml
new file mode 100644
index 000000000..63b7d03f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_evs_disk/tasks/main.yml
@@ -0,0 +1,113 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+#----------------------------------------------------------
+- name: create a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ------------------------------------------------------------
+- name: test create a disk in check mode
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: result
+ check_mode: true
+- name: verify results of test create a disk in check mode
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------------------------
+- name: create a disk that already exists
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a disk (check mode)
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a disk
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------------------------
+- name: delete a disk that does not exist (check mode)
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not changed
+# ----------------------------------------------------------------------------
+- name: delete a disk that does not exist
+ hwc_evs_disk:
+ availability_zone: "cn-north-1a"
+ name: "ansible_evs_disk_test"
+ volume_type: "SATA"
+ size: 10
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml
new file mode 100644
index 000000000..3695fd210
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_network_vpc/tasks/main.yml
@@ -0,0 +1,105 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ----------------------------------------------------------------------------
+#
+# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
+#
+# ----------------------------------------------------------------------------
+#
+# This file is automatically generated by Magic Modules and manual
+# changes will be clobbered when the file is regenerated.
+#
+# Please read more about how to change this file at
+# https://www.github.com/huaweicloud/magic-modules
+#
+# ----------------------------------------------------------------------------
+# Pre-test setup
+- name: delete a vpc
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: absent
+#----------------------------------------------------------
+- name: create a vpc
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: create a vpc that already exists
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a vpc
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: delete a vpc that does not exist
+ hwc_network_vpc:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "vpc_1"
+ cidr: "192.168.100.0/24"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml
new file mode 100644
index 000000000..323904773
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_smn_topic/tasks/main.yml
@@ -0,0 +1,86 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete a smn topic
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: absent
+#----------------------------------------------------------
+- name: create a smn topic
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: create a smn topic that already exists
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a smn topic
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+# ----------------------------------------------------------------------------
+- name: delete a smn topic that does not exist
+ hwc_smn_topic:
+ identity_endpoint: "{{ identity_endpoint }}"
+ user: "{{ user }}"
+ password: "{{ password }}"
+ domain: "{{ domain }}"
+ project: "{{ project }}"
+ region: "{{ region }}"
+ name: "ansible_smn_topic_test"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml
new file mode 100644
index 000000000..f830f951f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_eip/tasks/main.yml
@@ -0,0 +1,190 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: port
+- name: delete a eip
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+#----------------------------------------------------------
+- name: create a eip (check mode)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a eip
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a eip (idemponent)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a eip that already exists
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a eip (check mode)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a eip
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a eip (idemponent)
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a eip that does not exist
+ hwc_vpc_eip:
+ type: "5_bgp"
+ dedicated_bandwidth:
+ charge_mode: "traffic"
+ name: "ansible_test_dedicated_bandwidth"
+ size: 1
+ port_id: "{{ port.id }}"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: port
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml
new file mode 100644
index 000000000..b8e02a539
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_peering_connect/tasks/main.yml
@@ -0,0 +1,155 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: present
+ register: vpc1
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: present
+ register: vpc2
+- name: delete a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+#----------------------------------------------------------
+- name: create a peering connect (check mode)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a peering connect (idemponent)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a peering connect that already exists
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a peering connect (check mode)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a peering connect (idemponent)
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a peering connect that does not exist
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: absent
+ register: vpc2
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: absent
+ register: vpc1
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml
new file mode 100644
index 000000000..93b17398f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_port/tasks/main.yml
@@ -0,0 +1,141 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: delete a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+#----------------------------------------------------------
+- name: create a port (check mode)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a port (idemponent)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a port that already exists
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a port (check mode)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a port
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a port (idemponent)
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a port that does not exist
+ hwc_vpc_port:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml
new file mode 100644
index 000000000..6accdb855
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_private_ip/tasks/main.yml
@@ -0,0 +1,142 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: create a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: present
+ register: subnet
+- name: delete a private ip
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+#----------------------------------------------------------
+- name: create a private ip (check mode)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a private ip
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a private ip (idemponent)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a private ip that already exists
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a private ip (check mode)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a private ip
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a private ip (idemponent)
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a private ip that does not exist
+ hwc_vpc_private_ip:
+ subnet_id: "{{ subnet.id }}"
+ ip_address: "192.168.100.33"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a subnet
+ hwc_vpc_subnet:
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ state: absent
+ register: subnet
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml
new file mode 100644
index 000000000..8e2e2ca82
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_route/tasks/main.yml
@@ -0,0 +1,159 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: present
+ register: vpc1
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: present
+ register: vpc2
+- name: create a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: present
+ register: connect
+- name: delete a route
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+#----------------------------------------------------------
+- name: create a route (check mode)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ check_mode: true
+ register: result
+- assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a route
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------
+- name: create a route (idemponent)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: present
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# -----------------------------------------------------------
+- name: create a route that already exists
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a route (check mode)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ check_mode: true
+#----------------------------------------------------------
+- name: delete a route
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a (idemponent)
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ check_mode: true
+ register: result
+- name: not changed
+ assert:
+ that:
+ not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a route that does not exist
+ hwc_vpc_route:
+ vpc_id: "{{ vpc1.id }}"
+ destination: "192.168.0.0/16"
+ next_hop: "{{ connect.id }}"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a peering connect
+ hwc_vpc_peering_connect:
+ local_vpc_id: "{{ vpc1.id }}"
+ name: "ansible_network_peering_test"
+ filters:
+ - "name"
+ peering_vpc:
+ vpc_id: "{{ vpc2.id }}"
+ state: absent
+ register: connect
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_peering"
+ state: absent
+ register: vpc2
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.0.0/16"
+ name: "ansible_network_vpc_test_local"
+ state: absent
+ register: vpc1
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml
new file mode 100644
index 000000000..b6ee25e25
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group/tasks/main.yml
@@ -0,0 +1,91 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: delete a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+#----------------------------------------------------------
+- name: create a security group (check mode)
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a security group (idemponent)
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ check_mode: true
+ register: idemponent
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a security group that already exists
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a security group (check mode)
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: delete a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+# ----------------------------------------------------------------------------
+- name: delete a security group that does not exist
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml
new file mode 100644
index 000000000..4ce4bafdc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_security_group_rule/tasks/main.yml
@@ -0,0 +1,166 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: present
+ register: sg
+- name: delete a security group rule
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+#----------------------------------------------------------
+- name: create a security group rule (check mode)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a security group rule
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: create a security group rule (idemponent)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a security group rule that already exists
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a security group rule (check mode)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a security group rule
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a security group rule (idemponent)
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a security group rule that does not exist
+ hwc_vpc_security_group_rule:
+ direction: "ingress"
+ protocol: "tcp"
+ ethertype: "IPv4"
+ port_range_max: 55
+ security_group_id: "{{ sg.id }}"
+ port_range_min: 22
+ remote_ip_prefix: "0.0.0.0/0"
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a security group
+ hwc_vpc_security_group:
+ name: "ansible_network_security_group_test"
+ state: absent
+ register: sg
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml
new file mode 100644
index 000000000..522ffb601
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/hwc_vpc_subnet/tasks/main.yml
@@ -0,0 +1,152 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Pre-test setup
+- name: create a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: present
+ register: vpc
+- name: delete a subnet
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: absent
+#----------------------------------------------------------
+- name: create a subnet (check mode)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: present
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - not result.id
+ - result.changed
+#----------------------------------------------------------
+- name: create a subnet
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: present
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ result is changed
+#----------------------------------------------------------
+- name: create a subnet (idemponent)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: present
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: create a subnet that already exists
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: present
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#----------------------------------------------------------
+- name: delete a subnet (check mode)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: absent
+ check_mode: true
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a subnet
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: absent
+ register: result
+- name: assert changed is true
+ assert:
+ that:
+ - result is changed
+#----------------------------------------------------------
+- name: delete a subnet (idemponent)
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: absent
+ check_mode: true
+ register: result
+- name: idemponent
+ assert:
+ that:
+ - not result.changed
+# ----------------------------------------------------------------------------
+- name: delete a subnet that does not exist
+ hwc_vpc_subnet:
+ vpc_id: "{{ vpc.id }}"
+ cidr: "192.168.100.0/26"
+ gateway_ip: "192.168.100.32"
+ name: "ansible_network_subnet_test"
+ dhcp_enable: true
+ state: absent
+ register: result
+- name: assert changed is false
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+#---------------------------------------------------------
+# Post-test teardown
+- name: delete a vpc
+ hwc_network_vpc:
+ cidr: "192.168.100.0/24"
+ name: "ansible_network_vpc_test"
+ state: absent
+ register: vpc
diff --git a/ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/aliases b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/tasks/main.yml
new file mode 100644
index 000000000..cc1fce748
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_command/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Wait for iLO Reboot Completion
+ community.general.ilo_redfish_command:
+ category: Systems
+ command: WaitforiLORebootCompletion
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/aliases b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/tasks/main.yml
new file mode 100644
index 000000000..30bfb4edd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_config/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Set NTP Servers
+ ilo_redfish_config:
+ category: Manager
+ command: SetNTPServers
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ attribute_name: StaticNTPServers
+ attribute_value: 1.2.3.4
+
+- name: Set DNS Server
+ ilo_redfish_config:
+ category: Manager
+ command: SetDNSserver
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ attribute_name: DNSServers
+ attribute_value: 192.168.1.1
+
+- name: Set Domain name
+ ilo_redfish_config:
+ category: Manager
+ command: SetDomainName
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ attribute_name: DomainName
+ attribute_value: tst.sgp.hp.mfg
+
+- name: Disable WINS Reg
+ ilo_redfish_config:
+ category: Manager
+ command: SetWINSReg
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ attribute_name: WINSRegistration
+
+- name: Set TimeZone
+ ilo_redfish_config:
+ category: Manager
+ command: SetTimeZone
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ attribute_name: TimeZone
+ attribute_value: Chennai
diff --git a/ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/aliases b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/tasks/main.yml
new file mode 100644
index 000000000..0f80207ac
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ilo_redfish_info/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get sessions
+ ilo_redfish_info:
+ category: Sessions
+ command: GetiLOSessions
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ register: result_sessions
diff --git a/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases b/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases
new file mode 100644
index 000000000..8c1a7b329
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/influxdb_user/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+disabled
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml
new file mode 100644
index 000000000..34b3a3a6c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/influxdb_user/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_influxdb
diff --git a/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml
new file mode 100644
index 000000000..7da2f85e5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: tests.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'
diff --git a/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml b/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml
new file mode 100644
index 000000000..be36ee691
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/influxdb_user/tasks/tests.yml
@@ -0,0 +1,143 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install influxdb python module
+ pip: name=influxdb
+
+- name: Test add admin user in check mode
+ block:
+ - name: Add admin user
+ influxdb_user: user_name=admin user_password=admin admin=yes
+ check_mode: true
+ register: add_admin_user
+
+ - name: Check that admin user adding succeeds with a change
+ assert:
+ that:
+ - add_admin_user is changed
+
+- name: Test add admin user
+ block:
+ - name: Add admin user
+ influxdb_user: user_name=admin user_password=admin admin=yes
+ register: add_admin_user
+
+ - name: Check that admin user adding succeeds with a change
+ assert:
+ that:
+ - add_admin_user is changed
+
+- name: Test add admin user idempotence
+ block:
+ - name: Add admin user
+ influxdb_user: user_name=admin user_password=admin admin=yes
+ register: add_admin_user
+
+ - name: Check that admin user adding succeeds without a change
+ assert:
+ that:
+ - add_admin_user is not changed
+
+- name: Enable authentication and restart service
+ block:
+ - name: Enable authentication
+ lineinfile:
+ path: /etc/influxdb/influxdb.conf
+ regexp: 'auth-enabled ='
+ line: ' auth-enabled = true'
+
+ - name: Restart InfluxDB service
+ service: name=influxdb state=restarted
+
+- name: Test add user in check mode when authentication enabled
+ block:
+ - name: Add user
+ influxdb_user: user_name=user user_password=user login_username=admin login_password=admin
+ check_mode: true
+ register: add_user_with_auth_enabled
+
+ - name: Check that adding user with enabled authentication succeeds with a change
+ assert:
+ that:
+ - add_user_with_auth_enabled is changed
+
+- name: Test add user when authentication enabled
+ block:
+ - name: Add user
+ influxdb_user: user_name=user user_password=user login_username=admin login_password=admin
+ register: add_user_with_auth_enabled
+
+ - name: Check that adding user with enabled authentication succeeds with a change
+ assert:
+ that:
+ - add_user_with_auth_enabled is changed
+
+- name: Test add user when authentication enabled idempotence
+ block:
+ - name: Add the same user
+ influxdb_user: user_name=user user_password=user login_username=admin login_password=admin
+ register: same_user
+
+ - name: Check that adding same user succeeds without a change
+ assert:
+ that:
+ - same_user is not changed
+
+- name: Test change user password in check mode
+ block:
+ - name: Change user password
+ influxdb_user: user_name=user user_password=user2 login_username=admin login_password=admin
+ check_mode: true
+ register: change_password
+
+ - name: Check that password changing succeeds with a change
+ assert:
+ that:
+ - change_password is changed
+
+- name: Test change user password
+ block:
+ - name: Change user password
+ influxdb_user: user_name=user user_password=user2 login_username=admin login_password=admin
+ register: change_password
+
+ - name: Check that password changing succeeds with a change
+ assert:
+ that:
+ - change_password is changed
+
+- name: Test remove user in check mode
+ block:
+ - name: Remove user
+ influxdb_user: user_name=user state=absent login_username=admin login_password=admin
+ check_mode: true
+ register: remove_user
+
+ - name: Check that removing user succeeds with a change
+ assert:
+ that:
+ - remove_user is changed
+
+- name: Test remove user
+ block:
+ - name: Remove user
+ influxdb_user: user_name=user state=absent login_username=admin login_password=admin
+ register: remove_user
+
+ - name: Check that removing user succeeds with a change
+ assert:
+ that:
+ - remove_user is changed
+
+- name: Test remove user idempotence
+ block:
+ - name: Remove user
+ influxdb_user: user_name=user state=absent login_username=admin login_password=admin
+ register: remove_user
+
+ - name: Check that removing user succeeds without a change
+ assert:
+ that:
+ - remove_user is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/aliases b/ansible_collections/community/general/tests/integration/targets/ini_file/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml
new file mode 100644
index 000000000..11c5bf3b2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for ini_file plugins
+# Copyright (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: record the output directory
+ set_fact:
+ output_file: "{{ remote_tmp_dir }}/foo.ini"
+ non_existing_file: "{{ remote_tmp_dir }}/bar.ini"
+
+- name: include tasks
+ block:
+
+ - name: include tasks to perform basic tests
+ include_tasks: tests/00-basic.yml
+
+ - name: reset output file
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+ - name: include tasks to perform tests with parameter "value"
+ include_tasks: tests/01-value.yml
+
+ - name: reset output file
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+ - name: include tasks to perform tests with parameter "values"
+ include_tasks: tests/02-values.yml
+
+ - name: include tasks to test regressions
+ include_tasks: tests/03-encoding.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/00-basic.yml b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/00-basic.yml
new file mode 100644
index 000000000..c619e937a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/00-basic.yml
@@ -0,0 +1,42 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+## basiscs
+
+- name: test-basic 1 - specify both "value" and "values" and fail
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ value: lemonade
+ values:
+ - coke
+ - sprite
+ register: result_basic_1
+ ignore_errors: true
+
+- name: test-basic 1 - verify error message
+ assert:
+ that:
+ - result_basic_1 is not changed
+ - result_basic_1 is failed
+ - "result_basic_1.msg == 'parameters are mutually exclusive: value|values'"
+
+
+- name: test-basic 2 - set "create=no" on non-existing file and fail
+ ini_file:
+ path: "{{ non_existing_file }}"
+ section: food
+ create: false
+ value: banana
+ register: result_basic_2
+ ignore_errors: true
+
+- name: test-basic 2 - verify error message
+ assert:
+ that:
+ - result_basic_2 is not changed
+ - result_basic_2 is failed
+ - result_basic_2.msg == "Destination {{ non_existing_file }} does not exist!"
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/01-value.yml b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/01-value.yml
new file mode 100644
index 000000000..f95f166fe
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/01-value.yml
@@ -0,0 +1,592 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+## testing value
+
+- name: test-value 1 - set "state=present" and "value=null" and "allow_no_value=false" and fail
+ ini_file:
+ path: "{{ output_file }}"
+ section: cars
+ option: audi
+ value: null
+ allow_no_value: false
+ register: result_value_1
+ ignore_errors: true
+
+- name: test-value 1 - verify error message
+ assert:
+ that:
+ - result_value_1 is not changed
+ - result_value_1 is failed
+ - result_value_1.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False."
+
+
+- name: test-value 2 - set "state=present" and omit "value" and "allow_no_value=false" and fail
+ ini_file:
+ path: "{{ output_file }}"
+ section: cars
+ option: audi
+ allow_no_value: false
+ register: result_value_2
+ ignore_errors: true
+
+- name: test-value 2 - verify error message
+ assert:
+ that:
+ - result_value_2 is not changed
+ - result_value_2 is failed
+ - result_value_2.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False."
+
+
+- name: test-value 3 - add "fav=lemonade" in section "[drinks]" in specified file
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ value: lemonade
+ register: result3
+
+- name: test-value 3 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 3 - set expected content and get current ini file content
+ set_fact:
+ expected3: |
+
+ [drinks]
+ fav = lemonade
+ content3: "{{ output_content.content | b64decode }}"
+
+- name: test-value 3 - Verify content of ini file is as expected and ini_file 'changed' is true
+ assert:
+ that:
+ - result3 is changed
+ - result3.msg == 'section and option added'
+ - content3 == expected3
+
+
+- name: test-value 4 - add "fav=lemonade" is in section "[drinks]" again
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ value: lemonade
+ register: result4
+
+- name: test-value 4 - Ensure unchanged
+ assert:
+ that:
+ - result4 is not changed
+ - result4.msg == 'OK'
+
+
+- name: test-value 5 - Ensure "beverage=coke" is in section "[drinks]"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ value: coke
+ register: result5
+
+- name: test-value 5 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 5 - set expected content and get current ini file content
+ set_fact:
+ expected5: |
+
+ [drinks]
+ fav = lemonade
+ beverage = coke
+ content5: "{{ output_content.content | b64decode }}"
+
+- name: test-value 5 - assert 'changed' is true and content is OK
+ assert:
+ that:
+ - result5 is changed
+ - result5.msg == 'option added'
+ - content5 == expected5
+
+
+- name: test-value 6 - Remove option "beverage=coke"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ state: absent
+ register: result6
+
+- name: test-value 6 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 6 - set expected content and get current ini file content
+ set_fact:
+ expected6: |
+
+ [drinks]
+ fav = lemonade
+ content6: "{{ output_content.content | b64decode }}"
+
+- name: test-value 6 - assert 'changed' is true and content is as expected
+ assert:
+ that:
+ - result6 is changed
+ - result6.msg == 'option changed'
+ - content6 == expected6
+
+
+- name: test-value 7 - remove section 'drinks'
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ state: absent
+ register: result7
+
+- name: test-value 7 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 7 - get current ini file content
+ set_fact:
+ content7: "{{ output_content.content | b64decode }}"
+
+- name: test-value 7 - assert 'changed' is true and content is empty
+ assert:
+ that:
+ - result7 is changed
+ - result7.msg == 'section removed'
+ - content7 == "\n"
+
+
+# allow_no_value
+
+- name: test-value 8 - test allow_no_value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: true
+ register: result8
+
+- name: test-value 8 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 8 - set expected content and get current ini file content
+ set_fact:
+ content8: "{{ output_content.content | b64decode }}"
+ expected8: |
+
+ [mysqld]
+ skip-name
+
+- name: test-value 8 - assert 'changed' is true and section and option added
+ assert:
+ that:
+ - result8 is changed
+ - result8.msg == 'section and option added'
+ - content8 == expected8
+
+
+- name: test-value 9 - test allow_no_value idempotency
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: true
+ register: result9
+
+- name: test-value 9 - assert 'changed' is false
+ assert:
+ that:
+ - result9 is not changed
+ - result9.msg == 'OK'
+
+
+- name: test-value 10 - test create empty section
+ ini_file:
+ path: "{{ output_file }}"
+ section: new_empty_section
+ allow_no_value: true
+ register: result10
+
+- name: test-value 10 - assert 'changed' is true and section added
+ assert:
+ that:
+ - result10 is changed
+ - result10.msg == 'only section added'
+
+
+- name: test-value 11 - test create empty section idempotency
+ ini_file:
+ path: "{{ output_file }}"
+ section: new_empty_section
+ allow_no_value: true
+ register: result11
+
+- name: test-value 11 - assert 'changed' is false
+ assert:
+ that:
+ - result11 is not changed
+ - result11.msg == 'OK'
+
+
+- name: test-value 12 - test remove empty section
+ ini_file:
+ state: absent
+ path: "{{ output_file }}"
+ section: new_empty_section
+ allow_no_value: true
+
+- name: test-value 12 - test allow_no_value with loop
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: "{{ item.o }}"
+ value: "{{ item.v | d(omit) }}"
+ allow_no_value: true
+ loop:
+ - { o: "skip-name-resolve" }
+ - { o: "max_connections", v: "500" }
+
+- name: test-value 12 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 12 - set expected content and get current ini file content
+ set_fact:
+ content12: "{{ output_content.content | b64decode }}"
+ expected12: |
+
+ [mysqld]
+ skip-name
+ skip-name-resolve
+ max_connections = 500
+
+- name: test-value 12 - Verify content of ini file is as expected
+ assert:
+ that:
+ - content12 == expected12
+
+
+- name: test-value 13 - change option with no value to option with value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ value: myvalue
+ register: result13
+
+- name: test-value 13 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 13 - set expected content and get current ini file content
+ set_fact:
+ content13: "{{ output_content.content | b64decode }}"
+ expected13: |
+
+ [mysqld]
+ skip-name = myvalue
+ skip-name-resolve
+ max_connections = 500
+
+- name: test-value 13 - assert 'changed' and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result13 is changed
+ - result13.msg == 'option changed'
+ - content13 == expected13
+
+
+- name: test-value 14 - change option with value to option with no value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: true
+ register: result14
+
+- name: test-value 14 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 14 - set expected content and get current ini file content
+ set_fact:
+ content14: "{{ output_content.content | b64decode }}"
+ expected14: |
+
+ [mysqld]
+ skip-name
+ skip-name-resolve
+ max_connections = 500
+
+- name: test-value 14 - assert 'changed' is true and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result14 is changed
+ - result14.msg == 'option changed'
+ - content14 == expected14
+
+
+- name: test-value 15 - Remove option with no value
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name-resolve
+ state: absent
+ register: result15
+
+- name: test-value 15 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 15 - set expected content and get current ini file content
+ set_fact:
+ content15: "{{ output_content.content | b64decode }}"
+ expected15: |
+
+ [mysqld]
+ skip-name
+ max_connections = 500
+
+- name: test-value 15 - assert 'changed' is true and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result15 is changed
+ - result15.msg == 'option changed'
+ - content15 == expected15
+
+
+- name: test-value 16 - Clean test file
+ copy:
+ content: ""
+ dest: "{{ output_file }}"
+ force: true
+
+- name: test-value 16 - Ensure "beverage=coke" is created within no section
+ ini_file:
+ section:
+ path: "{{ output_file }}"
+ option: beverage
+ value: coke
+ register: result16
+
+- name: test-value 16 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 16 - set expected content and get current ini file content
+ set_fact:
+ expected16: |+
+ beverage = coke
+
+ content16: "{{ output_content.content | b64decode }}"
+
+- name: test-value 16 - assert 'changed' is true and content is OK (no section)
+ assert:
+ that:
+ - result16 is changed
+ - result16.msg == 'option added'
+ - content16 == expected16
+
+
+- name: test-value 17 - Ensure "beverage=coke" is modified as "beverage=water" within no section
+ ini_file:
+ path: "{{ output_file }}"
+ option: beverage
+ value: water
+ section:
+ register: result17
+
+- name: test-value 17 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 17 - set expected content and get current ini file content
+ set_fact:
+ expected17: |+
+ beverage = water
+
+ content17: "{{ output_content.content | b64decode }}"
+
+- name: test-value 17 - assert 'changed' is true and content is OK (no section)
+ assert:
+ that:
+ - result17 is changed
+ - result17.msg == 'option changed'
+ - content17 == expected17
+
+
+- name: test-value 18 - remove option 'beverage' within no section
+ ini_file:
+ section:
+ path: "{{ output_file }}"
+ option: beverage
+ state: absent
+ register: result18
+
+- name: test-value 18 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 18 - get current ini file content
+ set_fact:
+ content18: "{{ output_content.content | b64decode }}"
+
+- name: test-value 18 - assert 'changed' is true and option is removed (no section)
+ assert:
+ that:
+ - result18 is changed
+ - result18.msg == 'option changed'
+ - content18 == "\n"
+
+
+- name: test-value 19 - Check add option without section before existing section
+ block:
+ - name: test-value 19 - Add option with section
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ value: water
+ - name: test-value 19 - Add option without section
+ ini_file:
+ path: "{{ output_file }}"
+ section:
+ option: like
+ value: tea
+
+- name: test-value 19 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 19 - set expected content and get current ini file content
+ set_fact:
+ expected19: |
+ like = tea
+
+ [drinks]
+ beverage = water
+ content19: "{{ output_content.content | b64decode }}"
+
+- name: test-value 19 - Verify content of ini file is as expected
+ assert:
+ that:
+ - content19 == expected19
+
+
+- name: test-value 20 - Check add option with empty string value
+ block:
+ - name: test-value 20 - Remove drinks
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ state: absent
+ - name: test-value 20 - Remove tea
+ ini_file:
+ path: "{{ output_file }}"
+ section:
+ option: like
+ value: tea
+ state: absent
+ # See https://github.com/ansible-collections/community.general/issues/3031
+ - name: test-value 20 - Tests with empty strings
+ ini_file:
+ path: "{{ output_file }}"
+ section: "{{ item.section | d('extensions') }}"
+ option: "{{ item.option }}"
+ value: ""
+ allow_no_value: "{{ item.no_value | d(omit) }}"
+ loop:
+ - option: evolve
+ - option: regress
+ - section: foobar
+ option: foo
+ no_value: true
+ - option: improve
+ no_value: true
+
+- name: test-value 20 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 20 - set expected content and get current ini file content
+ set_fact:
+ expected20: |+
+
+ [extensions]
+ evolve =
+ regress =
+ improve =
+ [foobar]
+ foo =
+ content20: "{{ output_content.content | b64decode }}"
+
+- name: test-value 20 - Verify content of ini file is as expected
+ assert:
+ that:
+ - content20 == expected20
+
+
+- name: test-value 21 - Create starting ini file
+ copy:
+ # The content below is the following text file with BOM:
+ # [section1]
+ # var1=aaa
+ # var2=bbb
+ # [section2]
+ # var3=ccc
+ content: !!binary |
+ 77u/W3NlY3Rpb24xXQp2YXIxPWFhYQp2YXIyPWJiYgpbc2VjdGlvbjJdCnZhcjM9Y2NjCg==
+ dest: "{{ output_file }}"
+
+- name: test-value 21 - Test ini breakage
+ ini_file:
+ path: "{{ output_file }}"
+ section: section1
+ option: var4
+ value: 0
+ register: result21
+
+- name: test-value 21 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 21 - set expected content and get current ini file content
+ set_fact:
+ expected21: |
+ [section1]
+ var1=aaa
+ var2=bbb
+ var4 = 0
+ [section2]
+ var3=ccc
+ content21: "{{ output_content.content | b64decode }}"
+
+- name: test-value 21 - Verify content of ini file is as expected
+ assert:
+ that:
+ - result21 is changed
+ - result21.msg == 'option added'
+ - content21 == expected21
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/02-values.yml b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/02-values.yml
new file mode 100644
index 000000000..edfc93e42
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/02-values.yml
@@ -0,0 +1,1023 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+## testing values
+
+- name: "test-values 1 - set 'state=present' and 'values=[]' and 'allow_no_value=false' and fail"
+ ini_file:
+ path: "{{ output_file }}"
+ section: cars
+ option: audi
+ values: []
+ allow_no_value: false
+ register: result1
+ ignore_errors: true
+
+- name: test-values 1 - verify error message
+ assert:
+ that:
+ - result1 is not changed
+ - result1 is failed
+ - result1.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False."
+
+
+- name: "test-values 2 - set 'state=present' and omit 'values' and 'allow_no_value=false' and fail"
+ ini_file:
+ path: "{{ output_file }}"
+ section: cars
+ option: audi
+ allow_no_value: false
+ register: result2
+ ignore_errors: true
+
+- name: test-values 2 - verify error message
+ assert:
+ that:
+ - result2 is not changed
+ - result2 is failed
+ - result2.msg == "Parameter 'value(s)' must be defined if state=present and allow_no_value=False."
+
+
+- name: "test-values 3 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' in specified file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ - cocktail
+ state: present
+ register: result3
+
+- name: test-values 3 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 3 - set expected content and get current ini file content
+ set_fact:
+ expected3: |
+
+ [drinks]
+ fav = lemonade
+ fav = cocktail
+ content3: "{{ output_content.content | b64decode }}"
+
+- name: test-values 3 - Verify content of ini file is as expected and ini_file 'changed' is true
+ assert:
+ that:
+ - result3 is changed
+ - result3.msg == 'section and option added'
+ - content3 == expected3
+
+
+- name: "test-values 4 - remove option 'fav=lemonade' from section '[drinks]' in specified file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ state: absent
+ exclusive: false
+ register: result4
+
+- name: test-values 4 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 4 - set expected content and get current ini file content
+ set_fact:
+ expected4: |
+
+ [drinks]
+ fav = cocktail
+ content4: "{{ output_content.content | b64decode }}"
+
+- name: test-values 4 - Verify content of ini file is as expected and ini_file 'changed' is true
+ assert:
+ that:
+ - result4 is changed
+ - result4.msg == 'option changed'
+ - content4 == expected4
+
+
+- name: "test-values 5 - add option 'fav=lemonade' in section '[drinks]' in specified file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ state: present
+ exclusive: false
+ register: result5
+
+- name: test-values 5 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 5 - set expected content and get current ini file content
+ set_fact:
+ expected5: |
+
+ [drinks]
+ fav = cocktail
+ fav = lemonade
+ content5: "{{ output_content.content | b64decode }}"
+
+- name: test-values 5 - Verify content of ini file is as expected and ini_file 'changed' is true
+ assert:
+ that:
+ - result5 is changed
+ - result5.msg == 'option added'
+ - content5 == expected5
+
+
+- name: "test-values 6 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' and check for idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ - cocktail
+ state: present
+ register: result6
+
+- name: test-values 6 - Ensure unchanged
+ assert:
+ that:
+ - result6 is not changed
+ - result6.msg == 'OK'
+
+
+- name: "test-values 7 - ensure 'fav=cocktail' and 'fav=lemonade' (list reverse order) is 'present' in section '[drinks]' and check for idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - cocktail
+ - lemonade
+ state: present
+ register: result7
+
+- name: test-values 7 - Ensure unchanged
+ assert:
+ that:
+ - result7 is not changed
+ - result7.msg == 'OK'
+
+
+- name: "test-values 8 - add option 'fav=lemonade' in section '[drinks]' again and ensure idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ state: present
+ exclusive: false
+ register: result8
+
+- name: test-values 8 - Ensure unchanged
+ assert:
+ that:
+ - result8 is not changed
+ - result8.msg == 'OK'
+
+
+- name: "test-values 9 - ensure only 'fav=lemonade' is 'present' in section '[drinks]' in specified file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ state: present
+ register: result9
+
+- name: test-values 9 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 9 - set expected content and get current ini file content
+ set_fact:
+ expected9: |
+
+ [drinks]
+ fav = lemonade
+ content9: "{{ output_content.content | b64decode }}"
+
+- name: test-values 9 - Verify content of ini file is as expected and ini_file 'changed' is true
+ assert:
+ that:
+ - result9 is changed
+ - result9.msg == 'option changed'
+ - content9 == expected9
+
+
+- name: "test-values 10 - remove non-existent 'fav=cocktail' from section '[drinks]' in specified file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - cocktail
+ state: absent
+ register: result10
+
+- name: test-values 10 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 10 - set expected content and get current ini file content
+ set_fact:
+ expected10: |
+
+ [drinks]
+ content10: "{{ output_content.content | b64decode }}"
+
+
+- name: test-values 10 - Ensure unchanged
+ assert:
+ that:
+ - result10 is changed
+ - result10.msg == 'option changed'
+ - content10 == expected10
+
+
+- name: "test-values 11 - Ensure 'fav=lemonade' and 'beverage=coke' is 'present' in section '[drinks]'"
+ block:
+ - name: "test-values 11 - resetting ini_fie: Ensure 'fav=lemonade' is 'present' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ state: present
+ - name: "test-values 11 - Ensure 'beverage=coke' is 'present' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ values:
+ - coke
+ state: present
+ register: result11
+
+- name: test-values 11 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 11 - set expected content and get current ini file content
+ set_fact:
+ expected11: |
+
+ [drinks]
+ fav = lemonade
+ beverage = coke
+ content11: "{{ output_content.content | b64decode }}"
+
+- name: test-values 11 - assert 'changed' is true and content is OK
+ assert:
+ that:
+ - result11 is changed
+ - result11.msg == 'option added'
+ - content11 == expected11
+
+
+- name: "test-values 12 - add option 'fav=lemonade' in section '[drinks]' again and ensure idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ state: present
+ exclusive: false
+ register: result12
+
+- name: test-values 12 - Ensure unchanged
+ assert:
+ that:
+ - result12 is not changed
+ - result12.msg == 'OK'
+
+
+- name: "test-values 13 - add option 'fav=cocktail' in section '[drinks]' in specified file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - cocktail
+ state: present
+ exclusive: false
+ register: result13
+
+- name: test-values 13 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 13 - set expected content and get current ini file content
+ set_fact:
+ expected13: |
+
+ [drinks]
+ fav = lemonade
+ beverage = coke
+ fav = cocktail
+ content13: "{{ output_content.content | b64decode }}"
+
+- name: test-values 13 - Verify content of ini file is as expected and ini_file 'changed' is true
+ assert:
+ that:
+ - result13 is changed
+ - result13.msg == 'option added'
+ - content13 == expected13
+
+
+- name: "test-values 14 - Ensure 'refreshment=[water, juice, soft drink]' is 'present' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: refreshment
+ values:
+ - water
+ - juice
+ - soft drink
+ state: present
+ register: result14
+
+- name: test-values 14 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 14 - set expected content and get current ini file content
+ set_fact:
+ expected14: |
+
+ [drinks]
+ fav = lemonade
+ beverage = coke
+ fav = cocktail
+ refreshment = water
+ refreshment = juice
+ refreshment = soft drink
+ content14: "{{ output_content.content | b64decode }}"
+
+- name: test-values 14 - assert 'changed' is true and content is OK
+ assert:
+ that:
+ - result14 is changed
+ - result14.msg == 'option added'
+ - content14 == expected14
+
+
+- name: "test-values 15 - ensure 'fav=lemonade' and 'fav=cocktail' is 'present' in section '[drinks]' and check for idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - lemonade
+ - cocktail
+ state: present
+ register: result15
+
+- name: test-values 15 - Ensure unchanged
+ assert:
+ that:
+ - result15 is not changed
+ - result15.msg == 'OK'
+
+
+- name: "test-values 16 - ensure 'fav=cocktail' and 'fav=lemonade' (list reverse order) is 'present' in section '[drinks]' and check for idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - cocktail
+ - lemonade
+ state: present
+ register: result16
+
+- name: test-values 16 - Ensure unchanged
+ assert:
+ that:
+ - result16 is not changed
+ - result16.msg == 'OK'
+
+
+- name: "test-values 17 - Ensure option 'refreshment' is 'absent' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: refreshment
+ state: absent
+ register: result17
+
+- name: test-values 17 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 17 - set expected content and get current ini file content
+ set_fact:
+ expected17: |
+
+ [drinks]
+ fav = lemonade
+ beverage = coke
+ fav = cocktail
+ content17: "{{ output_content.content | b64decode }}"
+
+- name: test-values 17 - assert 'changed' is true and content is as expected
+ assert:
+ that:
+ - result17 is changed
+ - result17.msg == 'option changed'
+ - content17 == expected17
+
+
+- name: "test-values 18 - Ensure 'beverage=coke' is 'absent' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ state: absent
+ register: result18
+
+- name: test-values 18 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 18 - set expected content and get current ini file content
+ set_fact:
+ expected18: |
+
+ [drinks]
+ fav = lemonade
+ fav = cocktail
+ content18: "{{ output_content.content | b64decode }}"
+
+- name: test-values 18 - assert 'changed' is true and content is as expected
+ assert:
+ that:
+ - result18 is changed
+ - result18.msg == 'option changed'
+ - content18 == expected18
+
+
+- name: "test-values 19 - Ensure non-existent 'beverage=coke' is 'absent' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ values:
+ - coke
+ state: absent
+ register: result19
+
+- name: test-values 19 - Ensure unchanged
+ assert:
+ that:
+ - result19 is not changed
+ - result19.msg == 'OK'
+
+
+- name: test-values 20 - remove section 'drinks'
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ state: absent
+ register: result20
+
+- name: test-values 20 - remove section 'drinks' again to ensure idempotency"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ state: absent
+ register: result20_remove_again
+
+- name: test-values 20 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 20 - get current ini file content
+ set_fact:
+ content20: "{{ output_content.content | b64decode }}"
+
+- name: test-values 20 - assert 'changed' is true and content is empty
+ assert:
+ that:
+ - result20 is changed
+ - result20_remove_again is not changed
+ - result20.msg == 'section removed'
+ - content20 == "\n"
+
+
+- name: "test-values 21 - Ensure 'refreshment=[water, juice, soft drink, juice]' (duplicates removed) is 'present' in section '[drinks]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: refreshment
+ values:
+ - water
+ - juice
+ - soft drink
+ - juice
+ state: present
+ register: result21
+
+- name: test-values 21 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 21 - set expected content and get current ini file content
+ set_fact:
+ expected21: |
+
+ [drinks]
+ refreshment = water
+ refreshment = juice
+ refreshment = soft drink
+ content21: "{{ output_content.content | b64decode }}"
+
+- name: test-values 21 - assert 'changed' is true and content is OK
+ assert:
+ that:
+ - result21 is changed
+ - result21.msg == 'section and option added'
+ - content21 == expected21
+
+
+- name: test-values 22 - Create starting ini file
+ copy:
+ content: |
+
+ # Some comment to test
+ [mysqld]
+ connect_timeout = 300
+ max_connections = 1000
+ [section1]
+ var1 = aaa
+ # comment in section
+ # var2 = some value
+ # comment after section
+
+ [section2]
+ var3 = ccc
+ # comment after section
+ dest: "{{ output_file }}"
+
+- name: "test-values 22 - Ensure 'skip-name' with 'allow_no_value' is 'present' in section '[mysqld]' test allow_no_value"
+ ini_file:
+ path: "{{ output_file }}"
+ section: mysqld
+ option: skip-name
+ allow_no_value: true
+ state: present
+ register: result22
+
+- name: test-values 22 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 22 - set expected content and get current ini file content
+ set_fact:
+ expected22: |
+
+ # Some comment to test
+ [mysqld]
+ connect_timeout = 300
+ max_connections = 1000
+ skip-name
+ [section1]
+ var1 = aaa
+ # comment in section
+ # var2 = some value
+ # comment after section
+
+ [section2]
+ var3 = ccc
+ # comment after section
+ content22: "{{ output_content.content | b64decode }}"
+
+- name: test-values 22 - assert 'changed' is true and content is OK and option added
+ assert:
+ that:
+ - result22 is changed
+ - result22.msg == 'option added'
+ - content22 == expected22
+
+
+- name: "test-values 23 - Ensure 'var2=foo' is 'present' in section '[section1]', replacing commented option 'var2=some value'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: section1
+ option: var2
+ values:
+ - foo
+ state: present
+ register: result23
+
+- name: test-values 23 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 23 - set expected content and get current ini file content
+ set_fact:
+ expected23: |
+
+ # Some comment to test
+ [mysqld]
+ connect_timeout = 300
+ max_connections = 1000
+ skip-name
+ [section1]
+ var1 = aaa
+ # comment in section
+ var2 = foo
+ # comment after section
+
+ [section2]
+ var3 = ccc
+ # comment after section
+ content23: "{{ output_content.content | b64decode }}"
+
+- name: test-values 23 - assert 'changed' and msg 'option changed' and content is as expected
+ assert:
+ that:
+ - result23 is changed
+ - result23.msg == 'option changed'
+ - content23 == expected23
+
+
+- name: "test-values 24 - Ensure 'var2=[foo, foobar]' is 'present' in section '[section1]'"
+ ini_file:
+ path: "{{ output_file }}"
+ section: section1
+ option: var2
+ values:
+ - foo
+ - foobar
+ state: present
+ register: result24
+
+- name: test-values 24 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 24 - set expected content and get current ini file content
+ set_fact:
+ expected24: |
+
+ # Some comment to test
+ [mysqld]
+ connect_timeout = 300
+ max_connections = 1000
+ skip-name
+ [section1]
+ var1 = aaa
+ # comment in section
+ var2 = foo
+ var2 = foobar
+ # comment after section
+
+ [section2]
+ var3 = ccc
+ # comment after section
+ content24: "{{ output_content.content | b64decode }}"
+
+- name: test-values 24 - assert 'changed' and msg 'option added' and content is as expected
+ assert:
+ that:
+ - result24 is changed
+ - result24.msg == 'option added'
+ - content24 == expected24
+
+
+- name: test-values 25 - Clean test file
+ copy:
+ content: ""
+ dest: "{{ output_file }}"
+ force: true
+
+- name: "test-values 25 - Ensure 'beverage=[coke, pepsi]' is created within no section"
+ ini_file:
+ section:
+ path: "{{ output_file }}"
+ option: beverage
+ values:
+ - coke
+ - pepsi
+ state: present
+ register: result25
+
+- name: test-values 25 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 25 - set expected content and get current ini file content
+ set_fact:
+ expected25: |+
+ beverage = coke
+ beverage = pepsi
+
+ content25: "{{ output_content.content | b64decode }}"
+
+- name: test-values 25 - assert 'changed' is true and content is OK (no section)
+ assert:
+ that:
+ - result25 is changed
+ - result25.msg == 'option added'
+ - content25 == expected25
+
+
+- name: "test-values 26 - Ensure 'beverage=coke' and 'beverage=pepsi' are modified within no section"
+ ini_file:
+ path: "{{ output_file }}"
+ option: beverage
+ values:
+ - water
+ - orange juice
+ section:
+ state: present
+ exclusive: true
+ register: result26
+
+- name: test-values 26 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 26 - set expected content and get current ini file content
+ set_fact:
+ expected26: |+
+ beverage = water
+ beverage = orange juice
+
+ content26: "{{ output_content.content | b64decode }}"
+
+- name: test-values 26 - assert 'changed' is true and content is OK (no section)
+ assert:
+ that:
+ - result26 is changed
+ - result26.msg == 'option changed'
+ - content26 == expected26
+
+
+- name: "test-values 27 - ensure option 'beverage' is 'absent' within no section"
+ ini_file:
+ section:
+ path: "{{ output_file }}"
+ option: beverage
+ state: absent
+ register: result27
+
+- name: test-values 27 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 27 - get current ini file content
+ set_fact:
+ content27: "{{ output_content.content | b64decode }}"
+
+- name: test-values 27 - assert changed (no section)
+ assert:
+ that:
+ - result27 is changed
+ - result27.msg == 'option changed'
+ - content27 == "\n"
+
+
+- name: "test-values 28 - Ensure option 'present' without section before existing section"
+ block:
+ - name: test-values 28 - ensure option present within section
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: beverage
+ values:
+ - water
+ - orange juice
+ state: present
+
+ - name: test-values 28 - ensure option present without section
+ ini_file:
+ path: "{{ output_file }}"
+ section:
+ option: like
+ values:
+ - tea
+ - coffee
+ state: present
+
+- name: test-values 28 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-values 28 - set expected content and get current ini file content
+ set_fact:
+ expected28: |
+ like = tea
+ like = coffee
+
+ [drinks]
+ beverage = water
+ beverage = orange juice
+ content28: "{{ output_content.content | b64decode }}"
+
+- name: test-values 28 - Verify content of ini file is as expected
+ assert:
+ that:
+ - content28 == expected28
+
+
+- name: test-value 29 - Create starting ini file
+ copy:
+ content: |
+ [drinks]
+ fav = cocktail
+ beverage = water
+ fav = lemonade
+ beverage = orange juice
+ dest: "{{ output_file }}"
+
+- name: "test-value 29 - Test 'state=absent' with 'exclusive=true' with multiple options in ini_file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - cocktail
+ state: absent
+ register: result29
+
+- name: test-value 29 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 29 - set expected content and get current ini file content
+ set_fact:
+ expected29: |
+ [drinks]
+ beverage = water
+ beverage = orange juice
+ content29: "{{ output_content.content | b64decode }}"
+
+- name: test-value 29 - Verify content of ini file is as expected
+ assert:
+ that:
+ - result29 is changed
+ - result29.msg == 'option changed'
+ - content29 == expected29
+
+
+- name: test-value 30 - Create starting ini file
+ copy:
+ content: |
+ [drinks]
+ fav = cocktail
+ beverage = water
+ fav = lemonade
+ beverage = orange juice
+ dest: "{{ output_file }}"
+
+- name: "test-value 30 - Test 'state=absent' with 'exclusive=false' with multiple options in ini_file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ values:
+ - cocktail
+ state: absent
+ exclusive: false
+ register: result30
+
+- name: test-value 30 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 30 - set expected content and get current ini file content
+ set_fact:
+ expected30: |
+ [drinks]
+ beverage = water
+ fav = lemonade
+ beverage = orange juice
+ content30: "{{ output_content.content | b64decode }}"
+
+- name: test-value 30 - Verify content of ini file is as expected
+ assert:
+ that:
+ - result30 is changed
+ - result30.msg == 'option changed'
+ - content30 == expected30
+
+
+- name: test-value 31 - Create starting ini file
+ copy:
+ content: |
+ [drinks]
+ fav = cocktail
+ beverage = water
+ fav = lemonade
+ beverage = orange juice
+ dest: "{{ output_file }}"
+
+- name: "test-value 31 - Test 'state=absent' with 'exclusive=true' and no value given with multiple options in ini_file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ state: absent
+ register: result31
+
+- name: test-value 31 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 31 - set expected content and get current ini file content
+ set_fact:
+ expected31: |
+ [drinks]
+ beverage = water
+ beverage = orange juice
+ content31: "{{ output_content.content | b64decode }}"
+
+- name: test-value 31 - Verify content of ini file is as expected
+ assert:
+ that:
+ - result31 is changed
+ - result31.msg == 'option changed'
+ - content31 == expected31
+
+
+- name: test-value 32 - Create starting ini file
+ copy:
+ content: |
+ [drinks]
+ fav = cocktail
+ beverage = water
+ fav = lemonade
+ beverage = orange juice
+ dest: "{{ output_file }}"
+
+- name: "test-value 32 - Test 'state=absent' with 'exclusive=false' and no value given with multiple options in ini_file"
+ ini_file:
+ path: "{{ output_file }}"
+ section: drinks
+ option: fav
+ state: absent
+ exclusive: false
+ register: result32
+ diff: true
+
+- name: test-value 32 - read content from output file
+ slurp:
+ src: "{{ output_file }}"
+ register: output_content
+
+- name: test-value 32 - set expected content and get current ini file content
+ set_fact:
+ expected32: |
+ [drinks]
+ fav = cocktail
+ beverage = water
+ fav = lemonade
+ beverage = orange juice
+ content32: "{{ output_content.content | b64decode }}"
+
+- name: test-value 32 - Verify content of ini file is as expected
+ assert:
+ that:
+ - result32 is not changed
+ - result32.msg == 'OK'
+ - content32 == expected32
diff --git a/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml
new file mode 100644
index 000000000..555dd576c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ini_file/tasks/tests/03-encoding.yml
@@ -0,0 +1,44 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Regression test for https://github.com/ansible-collections/community.general/pull/2578#issuecomment-868092282
+- name: Create UTF-8 test file
+ copy:
+ content: !!binary |
+ W2FwcDptYWluXQphdmFpbGFibGVfbGFuZ3VhZ2VzID0gZW4gZnIgZXMgZGUgcHQgamEgbHQgemhf
+ VFcgaWQgZGEgcHRfQlIgcnUgc2wgaXQgbmxfTkwgdWsgdGEgc2kgY3MgbmIgaHUKIyBGdWxsIGxh
+ bmd1YWdlIG5hbWVzIGluIG5hdGl2ZSBsYW5ndWFnZSAoY29tbWEgc2VwYXJhdGVkKQphdmFpbGFi
+ bGVfbGFuZ3VhZ2VzX2Z1bGwgPSBFbmdsaXNoLCBGcmFuw6dhaXMsIEVzcGHDsW9sLCBEZXV0c2No
+ LCBQb3J0dWd1w6pzLCDml6XmnKzoqp4sIExpZXR1dm9zLCDkuK3mlocsIEluZG9uZXNpYSwgRGFu
+ c2ssIFBvcnR1Z3XDqnMgKEJyYXNpbCksINCg0YPRgdGB0LrQuNC5LCBTbG92ZW7FocSNaW5hLCBJ
+ dGFsaWFubywgTmVkZXJsYW5kcywg0KPQutGA0LDRl9C90YHRjNC60LAsIOCupOCuruCuv+CutOCv
+ jSwg4LeD4LeS4LaC4LeE4La9LCDEjGVza3ksIEJva23DpWwsIE1hZ3lhcgo=
+ dest: '{{ output_file }}'
+- name: Add entries
+ ini_file:
+ section: "{{ item.section }}"
+ option: "{{ item.option }}"
+ value: "{{ item.value }}"
+ path: '{{ output_file }}'
+ create: true
+ loop:
+ - section: app:main
+ option: sqlalchemy.url
+ value: postgresql://app:secret@database/app
+ - section: handler_filelog
+ option: args
+ value: (sys.stderr,)
+ - section: handler_filelog
+ option: class
+ value: StreamHandler
+ - section: handler_exc_handler
+ option: args
+ value: (sys.stderr,)
+ - section: båz
+ option: fföø
+ value: ḃâŗ
+ - section: båz
+ option: fföø
+ value: bar
diff --git a/ansible_collections/community/general/tests/integration/targets/interfaces_file/aliases b/ansible_collections/community/general/tests/integration/targets/interfaces_file/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/interfaces_file/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff b/ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff
new file mode 100644
index 000000000..c7f0452df
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iface eno1 inet static
+ address 1.2.3.4
+ netmask 255.255.255.0
+ gateway 1.2.3.1
+ up route add -net 1.2.3.4 netmask 255.255.255.0 gw 1.2.3.1 eno1
+ up ip addr add 4.3.2.1/32 dev eno1
+ down ip addr add 4.3.2.1/32 dev eno1
diff --git a/ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff_3841 b/ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff_3841
new file mode 100644
index 000000000..9f47879c5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/interfaces_file/files/interfaces_ff_3841
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iface eth0 inet static
+ address 1.2.3.4
+ netmask 255.255.255.0
+ gateway 1.2.3.1
+ up route add -net 1.2.3.4 netmask 255.255.255.0 gw 1.2.3.1 eth0
+ up ip addr add 4.3.2.1/32 dev eth0
+ down ip addr add 4.3.2.1/32 dev eth0
diff --git a/ansible_collections/community/general/tests/integration/targets/interfaces_file/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/interfaces_file/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/interfaces_file/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/interfaces_file/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/interfaces_file/tasks/main.yml
new file mode 100644
index 000000000..918a32331
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/interfaces_file/tasks/main.yml
@@ -0,0 +1,67 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name:
+ set_fact:
+ interfaces_testfile: '{{ remote_tmp_dir }}/interfaces'
+ interfaces_testfile_3841: '{{ remote_tmp_dir }}/interfaces_3841'
+
+- name: Copy interfaces file
+ copy:
+ src: 'files/interfaces_ff'
+ dest: '{{ interfaces_testfile }}'
+
+- name: Change IP address to 1.2.3.5
+ community.general.interfaces_file:
+ dest: "{{ interfaces_testfile }}"
+ iface: eno1
+ option: address
+ value: 1.2.3.5
+ register: ifile_1
+
+- assert:
+ that:
+ - ifile_1 is changed
+
+- name: Change IP address to 1.2.3.5 again
+ community.general.interfaces_file:
+ dest: "{{ interfaces_testfile }}"
+ iface: eno1
+ option: address
+ value: 1.2.3.5
+ register: ifile_2
+
+- assert:
+ that:
+ - ifile_2 is not changed
+
+- name: 3841 - copy interfaces file
+ copy:
+ src: 'files/interfaces_ff_3841'
+ dest: '{{ interfaces_testfile_3841 }}'
+
+- name: 3841 - floating_ip_interface_up_ip 2a01:a:b:c::1/64 dev eth0
+ interfaces_file:
+ option: up
+ iface: eth0
+ dest: "{{ interfaces_testfile_3841 }}"
+ value: 'ip addr add 2a01:a:b:c::1/64 dev eth0'
+ state: present
+ register: ifile_3841_a
+
+- name: 3841 - floating_ip_interface_up_ip 2a01:a:b:c::1/64 dev eth0 (again)
+ interfaces_file:
+ option: up
+ iface: eth0
+ dest: "{{ interfaces_testfile_3841 }}"
+ value: 'ip addr add 2a01:a:b:c::1/64 dev eth0'
+ state: present
+ register: ifile_3841_b
+
+- name: 3841 - check assertions
+ assert:
+ that:
+ - ifile_3841_a is changed
+ - ifile_3841_b is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases b/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ipify_facts/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml
new file mode 100644
index 000000000..78e44e946
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ipify_facts/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the ipify_facts
+# Copyright (c) 2017, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug: var=ansible_distribution
+- debug: var=ansible_distribution_version
+
+- set_fact:
+ validate_certs: false
+ when: (ansible_distribution == "MacOSX" and ansible_distribution_version == "10.11.1")
+
+- name: get information about current IP using ipify facts
+ ipify_facts:
+ timeout: 30
+ validate_certs: "{{ validate_certs }}"
+ register: external_ip
+ until: external_ip is successful
+ retries: 5
+ delay: 10
+
+- name: check if task was successful
+ assert:
+ that:
+ - external_ip is not changed
+ - external_ip.ansible_facts is defined
+ - external_ip.ansible_facts.ipify_public_ip is defined
diff --git a/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml
new file mode 100644
index 000000000..3a47dfea8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ipify_facts/vars/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+validate_certs: true
diff --git a/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases b/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases
new file mode 100644
index 000000000..5a02a630b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iptables_state/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+destructive
+skip/docker # kernel modules not loadable
+skip/freebsd # no iptables/netfilter (Linux specific)
+skip/osx # no iptables/netfilter (Linux specific)
+skip/macos # no iptables/netfilter (Linux specific)
+skip/aix # no iptables/netfilter (Linux specific)
diff --git a/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iptables_state/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml
new file mode 100644
index 000000000..a74e74df4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/main.yml
@@ -0,0 +1,38 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: ensure iptables package is installed
+ package:
+ name:
+ - iptables
+ become: true
+
+
+- name: include tasks
+ vars:
+ iptables_saved: "/tmp/test_iptables_state.saved"
+ iptables_tests: "/tmp/test_iptables_state.tests"
+
+ block:
+ - name: include tasks to perform basic tests (check_mode, async, idempotency)
+ include_tasks: tests/00-basic.yml
+
+ - name: include tasks to test tables handling
+ include_tasks: tests/01-tables.yml
+ when:
+ - xtables_lock is undefined
+
+ - name: include tasks to test rollbacks
+ include_tasks: tests/10-rollback.yml
+ when:
+ - xtables_lock is undefined
+ - ansible_connection in ['ssh', 'paramiko', 'smart']
+
+ become: true
diff --git a/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml
new file mode 100644
index 000000000..7b366edce
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/00-basic.yml
@@ -0,0 +1,320 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "ensure our next backup is not there (file)"
+ file:
+ path: "{{ iptables_saved }}"
+ state: absent
+
+- name: "ensure our next rule is not there (iptables)"
+ iptables:
+ chain: OUTPUT
+ jump: ACCEPT
+ state: absent
+
+
+#
+# Basic checks about invalid param/value handling.
+#
+- name: "trigger error about invalid param"
+ iptables_state:
+ name: foobar
+ register: iptables_state
+ ignore_errors: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("Invalid options")
+ quiet: true
+
+
+
+- name: "trigger error about missing param 'state'"
+ iptables_state:
+ path: foobar
+ register: iptables_state
+ ignore_errors: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("missing required arguments")
+ quiet: true
+
+
+
+- name: "trigger error about missing param 'path'"
+ iptables_state:
+ state: saved
+ register: iptables_state
+ ignore_errors: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("missing required arguments")
+ quiet: true
+
+
+
+- name: "trigger error about invalid value for param 'state'"
+ iptables_state:
+ path: foobar
+ state: present
+ register: iptables_state
+ ignore_errors: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.msg is match("value of state must be one of")
+ quiet: true
+
+
+#
+# Play with the current state first. We will create a file to store it in, but
+# no more. These tests are for:
+# - idempotency
+# - check_mode
+#
+- name: "save state (check_mode, must report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+ check_mode: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: true
+
+
+
+- name: "save state (must report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: true
+
+
+
+- name: "save state (idempotency, must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: true
+
+
+
+- name: "save state (check_mode, must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+ check_mode: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.saved
+ quiet: true
+
+
+
+# We begin with 'state=restored' by restoring the current state on itself.
+# This at least ensures the file produced with state=saved is suitable for
+# state=restored.
+
+- name: "state=restored check_mode=true changed=false"
+ block:
+ - name: "restore state (check_mode, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ check_mode: true
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: true
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: true
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=false"
+ block:
+ - name: "restore state (must NOT report a change, warning about rollback & async)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: true
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: true
+ register: xtables_lock
+
+
+
+- name: "change iptables state (iptables)"
+ iptables:
+ chain: OUTPUT
+ jump: ACCEPT
+
+
+
+- name: "state=restored changed=true"
+ block:
+ - name: "restore state (check_mode, must report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ check_mode: true
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state != iptables_state.restored
+ quiet: true
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: true
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=true"
+ block:
+ - name: "restore state (must report a change, async, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - iptables_state.initial_state != iptables_state.restored
+ - iptables_state.applied
+ quiet: true
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: true
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=false"
+ block:
+ - name: "restore state (must NOT report a change, async, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: true
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: true
+ register: xtables_lock
+
+
+
+- name: "state=restored changed=false"
+ block:
+ - name: "restore state (check_mode=yes, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ check_mode: true
+
+ - name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+ - iptables_state.initial_state == iptables_state.restored
+ quiet: true
+
+ rescue:
+ - name: "assert that results are not as expected for only one reason (xtables lock)"
+ assert:
+ that:
+ - iptables_state is failed
+ - iptables_state.stderr is search('xtables lock')
+ quiet: true
+ register: xtables_lock
diff --git a/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml
new file mode 100644
index 000000000..8a9869c43
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/01-tables.yml
@@ -0,0 +1,294 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "ensure our next rule is not there (iptables)"
+ iptables:
+ table: nat
+ chain: INPUT
+ jump: ACCEPT
+ state: absent
+
+- name: "get state (table filter)"
+ iptables_state:
+ table: filter
+ state: saved
+ path: "{{ iptables_saved }}"
+ register: iptables_state
+ changed_when: false
+ check_mode: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - iptables_state.tables.filter is defined
+ - iptables_state.tables.nat is undefined
+ quiet: true
+
+
+
+- name: "get state (table nat)"
+ iptables_state:
+ table: nat
+ state: saved
+ path: "{{ iptables_saved }}"
+ register: iptables_state
+ changed_when: false
+ check_mode: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*nat' in iptables_state.initial_state"
+ - "'*filter' in iptables_state.initial_state"
+ - iptables_state.tables.nat is defined
+ - iptables_state.tables.filter is undefined
+ quiet: true
+
+
+
+- name: "save state (table filter)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ table: filter
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' in iptables_state.saved"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' not in iptables_state.saved"
+ - iptables_state.tables.filter is defined
+ - iptables_state.tables.nat is undefined
+ quiet: true
+
+
+
+- name: "save state (table nat)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ table: nat
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.saved"
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' not in iptables_state.saved"
+ - iptables_state.tables.nat is defined
+ - iptables_state.tables.filter is undefined
+ quiet: true
+
+
+
+- name: "save state (any table)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' in iptables_state.saved"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.saved"
+ - iptables_state.tables.filter is defined
+ - iptables_state.tables.nat is defined
+ quiet: true
+
+
+
+- name: "restore state (table nat, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ table: nat
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.restored"
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' not in iptables_state.restored"
+ - iptables_state.tables.nat is defined
+ - iptables_state.tables.filter is undefined
+ - iptables_state is not changed
+ quiet: true
+
+
+
+- name: "change NAT table (iptables)"
+ iptables:
+ table: nat
+ chain: INPUT
+ jump: ACCEPT
+ state: present
+
+
+
+- name: "restore state (table nat, must report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ table: nat
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*nat' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.restored"
+ - "'*filter' in iptables_state.initial_state"
+ - "'*filter' not in iptables_state.restored"
+ - iptables_state.tables.nat is defined
+ - "'-A INPUT -j ACCEPT' in iptables_state.tables.nat"
+ - "'-A INPUT -j ACCEPT' not in iptables_state.restored"
+ - iptables_state.tables.filter is undefined
+ - iptables_state is changed
+ quiet: true
+
+
+
+- name: "get raw and mangle tables states"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ table: "{{ item }}"
+ loop:
+ - raw
+ - mangle
+ changed_when: false
+ check_mode: true
+
+
+
+- name: "save state (any table)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: saved
+ register: iptables_state
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'filter' in iptables_state.tables"
+ - "'*filter' in iptables_state.saved"
+ - "'mangle' in iptables_state.tables"
+ - "'*mangle' in iptables_state.saved"
+ - "'nat' in iptables_state.tables"
+ - "'*nat' in iptables_state.saved"
+ - "'raw' in iptables_state.tables"
+ - "'*raw' in iptables_state.saved"
+ quiet: true
+
+
+
+- name: "save filter table into a test file"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ table: filter
+ state: saved
+
+- name: "add a table header in comments (# *mangle)"
+ lineinfile:
+ path: "{{ iptables_tests }}"
+ line: "# *mangle"
+
+
+
+- name: "restore state (table filter, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ table: filter
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - "'*mangle' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*raw' in iptables_state.initial_state"
+ - "'filter' in iptables_state.tables"
+ - "'mangle' not in iptables_state.tables"
+ - "'nat' not in iptables_state.tables"
+ - "'raw' not in iptables_state.tables"
+ - "'*filter' in iptables_state.restored"
+ - "'*mangle' not in iptables_state.restored"
+ - "'*nat' not in iptables_state.restored"
+ - "'*raw' not in iptables_state.restored"
+ - iptables_state is not changed
+ quiet: true
+
+
+
+- name: "restore state (any table, must NOT report a change, no warning)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - "'*filter' in iptables_state.initial_state"
+ - "'*mangle' in iptables_state.initial_state"
+ - "'*nat' in iptables_state.initial_state"
+ - "'*raw' in iptables_state.initial_state"
+ - "'filter' in iptables_state.tables"
+ - "'mangle' in iptables_state.tables"
+ - "'nat' in iptables_state.tables"
+ - "'raw' in iptables_state.tables"
+ - "'*filter' in iptables_state.restored"
+ - "'*mangle' in iptables_state.restored"
+ - "'*nat' in iptables_state.restored"
+ - "'*raw' in iptables_state.restored"
+ - iptables_state is not changed
+ quiet: true
+
+
+
+- name: "restore state (table mangle, must fail, no warning)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ table: mangle
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+ ignore_errors: true
+
+- name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is failed
+ - "iptables_state.msg == 'Table mangle to restore not defined in {{ iptables_tests }}'"
+ success_msg: >-
+ The previous error has been triggered by trying to restore a table
+ that is missing in the file provided to iptables-restore.
+ fail_msg: >-
+ The previous task should have failed due to a missing table (mangle)
+ in the file to restore iptables state from.
diff --git a/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml
new file mode 100644
index 000000000..53fdd3ca0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iptables_state/tasks/tests/10-rollback.yml
@@ -0,0 +1,203 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "create a blocking ruleset with a DROP policy"
+ copy:
+ dest: "{{ iptables_tests }}"
+ content: |
+ *filter
+ :INPUT DROP
+ COMMIT
+
+
+
+- name: "restore state from the test file (check_mode, must report a change)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ check_mode: true
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is changed
+
+
+
+- name: "fail to restore state from the test file"
+ block:
+ - name: "restore state from the test file (bad policies, expected error -> rollback)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
+
+
+
+- name: "fail to restore state from the test file (again)"
+ block:
+ - name: "try again, with a higher timeout (bad policies, same expected error)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+ vars:
+ ansible_timeout: "{{ max_delay | d(300) }}"
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
+
+
+
+- name: "restore state from backup (must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+
+
+
+- name: "restore state from backup (mangle, must NOT report a change)"
+ iptables_state:
+ path: "{{ iptables_saved }}"
+ table: mangle
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+- name: "assert that results are as expected"
+ assert:
+ that:
+ - iptables_state is not changed
+
+
+
+- name: "create a blocking ruleset with a REJECT rule"
+ copy:
+ dest: "{{ iptables_tests }}"
+ content: |
+ *filter
+ -A INPUT -j REJECT
+ COMMIT
+
+
+
+- name: "fail to restore state from the test file (again)"
+ block:
+ - name: "restore state from the test file (bad rules, expected error -> rollback)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
+
+
+
+- name: "fail to restore state from the test file (again)"
+ block:
+ - name: "try again, with a higher timeout (bad rules, same expected error)"
+ iptables_state:
+ path: "{{ iptables_tests }}"
+ state: restored
+ register: iptables_state
+ async: "{{ ansible_timeout }}"
+ poll: 0
+ vars:
+ ansible_timeout: "{{ max_delay | d(300) }}"
+
+ rescue:
+ - name: "explain expected failure"
+ assert:
+ that:
+ - iptables_state is not changed
+ - not iptables_state.applied
+ success_msg: >-
+ The previous error has been triggered to test the rollback. If you
+ are there, it means that 1) connection has been lost right after the
+ bad rules have been restored; 2) a rollback happened, so the bad
+ rules are not applied, finally; 3) module failed because it didn't
+ reach the wanted state, but at least host is not lost !!!
+ fail_msg: >-
+ The previous error has been triggered but its results are not as
+ expected.
+
+- name: "check that the expected failure happened"
+ assert:
+ that:
+ - iptables_state is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases b/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases
new file mode 100644
index 000000000..b469f71b0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# There is no Ericsson IPWorks
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml
new file mode 100644
index 000000000..9cbb4edc2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ipwcli_dns/tasks/main.yml
@@ -0,0 +1,115 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for ipwcli_dns
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: variables username, password, container, tld must be set
+ fail:
+ msg: 'Please set the variables: username, password, container and tld.'
+ when: username is not defined or password is not defined or container is not defined or tld is not defined
+
+- name: add a new A record
+ ipwcli_dns:
+ dnsname: example.{{ tld }}
+ type: A
+ container: '{{ container }}'
+ address: 127.0.0.1
+ ttl: 100
+ username: '{{ username }}'
+ password: '{{ password }}'
+ register: result
+
+- name: assert the new A record is added
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+ - result.record == 'arecord example.{{ tld }} 127.0.0.1 -set ttl=100;container={{ container }}'
+
+- name: delete the A record
+ ipwcli_dns:
+ dnsname: example.{{ tld }}
+ type: A
+ container: '{{ container }}'
+ address: 127.0.0.1
+ ttl: 100
+ username: '{{ username }}'
+ password: '{{ password }}'
+ state: absent
+ register: result
+
+- name: assert the new A record is deleted
+ assert:
+ that:
+ - result is not failed
+ - result is changed
+ - result.record == 'arecord example.{{ tld }} 127.0.0.1 -set ttl=100;container={{ container }}'
+
+- name: delete not existing SRV record
+ ipwcli_dns:
+ dnsname: _sip._tcp.test.example.{{ tld }}
+ type: SRV
+ container: '{{ container }}'
+ target: example.{{ tld }}
+ port: 5060
+ username: '{{ username }}'
+ password: '{{ password }}'
+ state: absent
+ register: result
+
+- name: assert the new a record
+ assert:
+ that:
+ - result is not failed
+ - result is not changed
+ - result.record ==
+ 'srvrecord _sip._tcp.test.example.{{ tld }} -set ttl=3600;container={{ container }};priority=10;weight=10;port=5060;target=example.{{ tld }}'
+
+- name: add a SRV record with weight > 65535 against RFC 2782
+ ipwcli_dns:
+ dnsname: _sip._tcp.test.example.{{ tld }}
+ type: SRV
+ container: '{{ container }}'
+ ttl: 100
+ target: example.{{ tld }}
+ port: 5060
+ weight: 65536
+ username: '{{ username }}'
+ password: '{{ password }}'
+ register: result
+ ignore_errors: true
+
+- name: assert the failure of the new SRV record
+ assert:
+ that:
+ - result is failed
+ - result is not changed
+ - "'Out of UINT16 range' in result.stderr"
+
+- name: add NAPTR record (check_mode)
+ ipwcli_dns:
+ dnsname: test.example.{{ tld }}
+ type: NAPTR
+ preference: 10
+ container: '{{ container }}'
+ ttl: 100
+ order: 10
+ service: 'SIP+D2T'
+ replacement: '_sip._tcp.test.example.{{ tld }}.'
+ flags: S
+ username: '{{ username }}'
+ password: '{{ password }}'
+ check_mode: true
+ register: result
+
+- name: assert the NAPTR check_mode
+ assert:
+ that:
+ - result is not failed
+ - result is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_create/aliases b/ansible_collections/community/general/tests/integration/targets/iso_create/aliases
new file mode 100644
index 000000000..4fb0bec81
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_create/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/python2.6
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg b/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg
new file mode 100644
index 000000000..8cd712916
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_create/files/test1.cfg
@@ -0,0 +1,61 @@
+#version=DEVEL
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# System authorization information
+auth --enableshadow --passalgo=sha512
+# Use CDROM installation media
+cdrom
+# Use graphical install
+graphical
+# Run the Setup Agent on first boot
+firstboot --enable
+ignoredisk --only-use=sda
+# Keyboard layouts
+keyboard --vckeymap=us --xlayouts='us'
+# System language
+lang en_US.UTF-8
+# Network information
+network --bootproto=dhcp --device=ens192 --ipv6=auto --no-activate
+network --hostname=localhost.localdomain
+# System services
+services --enabled="chronyd"
+# System timezone
+timezone America/New_York --isUtc
+# X Window System configuration information
+xconfig --startxonboot
+# System bootloader configuration
+bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=sda
+autopart --type=lvm
+# Partition clearing information
+clearpart --none --initlabel
+#firewall --disable
+services --disabled=firewalld
+eula --agreed
+# Reboot when the install is finished.
+reboot
+
+%packages
+@^graphical-server-environment
+@base
+@core
+@desktop-debugging
+@dial-up
+@fonts
+@gnome-desktop
+@guest-agents
+@guest-desktop-agents
+@hardware-monitoring
+@input-methods
+@internet-browser
+@multimedia
+@print-client
+@x11
+chrony
+kexec-tools
+open-vm-tools-desktop
+%end
+%addon com_redhat_kdump --enable --reserve-mb='auto'
+%end
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg b/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg
new file mode 100644
index 000000000..8cd712916
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_create/files/test_dir/test2.cfg
@@ -0,0 +1,61 @@
+#version=DEVEL
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# System authorization information
+auth --enableshadow --passalgo=sha512
+# Use CDROM installation media
+cdrom
+# Use graphical install
+graphical
+# Run the Setup Agent on first boot
+firstboot --enable
+ignoredisk --only-use=sda
+# Keyboard layouts
+keyboard --vckeymap=us --xlayouts='us'
+# System language
+lang en_US.UTF-8
+# Network information
+network --bootproto=dhcp --device=ens192 --ipv6=auto --no-activate
+network --hostname=localhost.localdomain
+# System services
+services --enabled="chronyd"
+# System timezone
+timezone America/New_York --isUtc
+# X Window System configuration information
+xconfig --startxonboot
+# System bootloader configuration
+bootloader --append=" crashkernel=auto" --location=mbr --boot-drive=sda
+autopart --type=lvm
+# Partition clearing information
+clearpart --none --initlabel
+#firewall --disable
+services --disabled=firewalld
+eula --agreed
+# Reboot when the install is finished.
+reboot
+
+%packages
+@^graphical-server-environment
+@base
+@core
+@desktop-debugging
+@dial-up
+@fonts
+@gnome-desktop
+@guest-agents
+@guest-desktop-agents
+@hardware-monitoring
+@input-methods
+@internet-browser
+@multimedia
+@print-client
+@x11
+chrony
+kexec-tools
+open-vm-tools-desktop
+%end
+%addon com_redhat_kdump --enable --reserve-mb='auto'
+%end
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml
new file mode 100644
index 000000000..e7127a2d6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_create/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml
new file mode 100644
index 000000000..d53217bd3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/main.yml
@@ -0,0 +1,163 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for iso_create module
+# Copyright (c) 2020, Diane Wang (Tomorrow9) <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+- name: install pycdlib
+ pip:
+ name: pycdlib
+ extra_args: "-c {{ remote_constraints }}"
+ register: install_pycdlib
+- debug: var=install_pycdlib
+
+- set_fact:
+ output_test_dir: '{{ remote_tmp_dir }}/test_iso_create'
+
+# - include_tasks: prepare_dest_dir.yml
+
+- name: Copy files and directories
+ copy:
+ src: '{{ item }}'
+ dest: '{{ remote_tmp_dir }}/{{ item }}'
+ loop:
+ - test1.cfg
+ - test_dir
+
+- name: Test check mode
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ dest_iso: "{{ output_test_dir }}/test.iso"
+ interchange_level: 3
+ register: iso_result
+ check_mode: true
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test.iso"
+ register: iso_file
+- debug: var=iso_file
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == False
+
+- name: Create iso file with a specified file
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ dest_iso: "{{ output_test_dir }}/test.iso"
+ interchange_level: 3
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == True
+
+- name: Create iso file with a specified file and folder
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ - "{{ remote_tmp_dir }}/test_dir"
+ dest_iso: "{{ output_test_dir }}/test1.iso"
+ interchange_level: 3
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test1.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == True
+
+- name: Create iso file with volume identification string
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ dest_iso: "{{ output_test_dir }}/test2.iso"
+ vol_ident: "OEMDRV"
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test2.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == True
+
+- name: Create iso file with Rock Ridge extension
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ dest_iso: "{{ output_test_dir }}/test3.iso"
+ rock_ridge: "1.09"
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test3.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == True
+
+- name: Create iso file with Joliet extension
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ dest_iso: "{{ output_test_dir }}/test4.iso"
+ joliet: 3
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test4.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == True
+
+- name: Create iso file with UDF enabled
+ iso_create:
+ src_files:
+ - "{{ remote_tmp_dir }}/test1.cfg"
+ dest_iso: "{{ output_test_dir }}/test5.iso"
+ udf: true
+ register: iso_result
+- debug: var=iso_result
+
+- name: Check if iso file created
+ stat:
+ path: "{{ output_test_dir }}/test5.iso"
+ register: iso_file
+
+- assert:
+ that:
+ - iso_result is changed
+ - iso_file.stat.exists == True
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml b/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml
new file mode 100644
index 000000000..d1f405b5f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_create/tasks/prepare_dest_dir.yml
@@ -0,0 +1,13 @@
+# Test code for iso_create module
+# Copyright (c) 2020, Diane Wang (Tomorrow9) <dianew@vmware.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Make sure our testing sub-directory does not exist
+ file:
+ path: '{{ output_test_dir }}'
+ state: absent
+
+- name: Create our testing sub-directory
+ file:
+ path: '{{ output_test_dir }}'
+ state: directory
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/aliases b/ansible_collections/community/general/tests/integration/targets/iso_customize/aliases
new file mode 100644
index 000000000..54a0f1a04
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/aliases
@@ -0,0 +1,13 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/freebsd
+skip/alpine
+skip/python2.6
+skip/docker
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/meta/main.yml
new file mode 100644
index 000000000..5b9177b12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize.yml
new file mode 100644
index 000000000..f7d7bffd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize.yml
@@ -0,0 +1,75 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Add a line to the file test02.cfg and make sure it succeed
+ ansible.builtin.lineinfile:
+ path: "{{ test_dir }}/test02.cfg"
+ regexp: "^test"
+ line: "test"
+
+- name: "Customize ISO file: add file, delete file and change file"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ delete_files:
+ - "/test01.cfg"
+ add_files:
+ - src_file: "{{ test_dir }}/test01.cfg"
+ dest_file: "/preseed/ubuntu.seed"
+ - src_file: "{{ test_dir }}/test02.cfg"
+ dest_file: "/test02.cfg"
+
+- include_tasks: iso_mount.yml
+ vars:
+ iso_name: "{{ dest_iso_name }}"
+
+- debug: var=mount_root_dir
+
+- name: Check the file test01.cfg is deleted
+ stat:
+ path: "{{ mount_root_dir }}/test01.cfg"
+ register: check_file
+
+- assert:
+ that:
+ - check_file.stat.exists == False
+
+- name: Check the file /preseed/ubuntu.seed is added
+ stat:
+ path: "{{ mount_root_dir }}/preseed/ubuntu.seed"
+ register: check_file
+
+- assert:
+ that:
+ - check_file.stat.exists == True
+
+- block:
+ - name: Get the content of file test02.cfg
+ command: "cat {{ mount_root_dir }}/test02.cfg"
+ register: get_file_content
+
+ - set_fact:
+ file_contents: "{{ get_file_content.stdout }}"
+ when: ansible_distribution == 'RedHat' and ansible_distribution_version is version('7.9', '==')
+
+- name: Get the content of file test02.cfg
+ set_fact:
+ file_contents: "{{ lookup('file', mount_root_dir + '/test02.cfg') }}"
+ when: not (ansible_distribution == 'RedHat' and ansible_distribution_version is version('7.9', '=='))
+
+- fail: msg="Failed to replace the file test02.cfg"
+ when: file_contents != "test"
+
+- name: Umount ISO
+ mount:
+ path: "{{ mount_root_dir }}"
+ fstab: "{{ test_dir }}/temp.fstab"
+ state: unmounted
+
+- name: Delete line of file test02.cfg
+ ansible.builtin.lineinfile:
+ path: "{{ test_dir }}/test02.cfg"
+ regexp: "test"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_add_files.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_add_files.yml
new file mode 100644
index 000000000..210767707
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_add_files.yml
@@ -0,0 +1,34 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Customize ISO file: add file"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test1.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ add_files:
+ - src_file: "{{ test_dir }}/test01.cfg"
+ dest_file: "preseed/ubuntu.seed"
+
+
+- include_tasks: iso_mount.yml
+ vars:
+ iso_name: "{{ dest_iso_name }}"
+
+- debug: var=mount_root_dir
+
+- name: Check the file /preseed/ubuntu.seed is added
+ stat:
+ path: "{{ mount_root_dir }}/preseed/ubuntu.seed"
+ register: check_file
+
+- assert:
+ that:
+ - check_file.stat.exists == True
+
+- name: Umount ISO
+ mount:
+ path: "{{ mount_root_dir }}"
+ fstab: "{{ test_dir }}/temp.fstab"
+ state: unmounted
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_delete_files.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_delete_files.yml
new file mode 100644
index 000000000..bceeeb53a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_delete_files.yml
@@ -0,0 +1,34 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Customize ISO file: delete file"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test1.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ delete_files:
+ - "test01.cfg"
+
+- debug: var=ansible_distribution
+
+- include_tasks: iso_mount.yml
+ vars:
+ iso_name: "{{ dest_iso_name }}"
+
+- debug: var=mount_root_dir
+
+- name: Check the file test01.cfg is deleted
+ stat:
+ path: "{{ mount_root_dir }}/test01.cfg"
+ register: check_file
+
+- assert:
+ that:
+ - check_file.stat.exists == False
+
+- name: Umount ISO
+ mount:
+ path: "{{ mount_root_dir }}"
+ fstab: "{{ test_dir }}/temp.fstab"
+ state: unmounted
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_exception.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_exception.yml
new file mode 100644
index 000000000..b2130bb6b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_customize_exception.yml
@@ -0,0 +1,71 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Testcase: local resource ISO does not exists"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test11.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ register: customized_result
+ failed_when: customized_result.msg.find('does not exist') == -1
+
+- name: "Testcase:: dest dir does not exists"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test1.iso"
+ dest_iso: "/aaa/{{ dest_iso_name }}"
+ register: customized_result
+ failed_when: customized_result.msg.find('does not exist') == -1
+
+# Test: nothing is changed when no options "add files" and "delete files"
+- block:
+ - name: "Testcase: no options 'add files' and 'delete files'"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test1.iso"
+ dest_iso: "{{ test_dir }}/iso_customize_nochanged.iso"
+
+ - name: Get stats of a file test1.iso
+ ansible.builtin.stat:
+ path: "{{ test_dir }}/test1.iso"
+ register: iso_orginal
+
+ - name: Get stats of a file iso_customize_nochanged.iso
+ ansible.builtin.stat:
+ path: "{{ test_dir }}/iso_customize_nochanged.iso"
+ register: iso_customized
+
+ - name: compare size
+ fail: msg="Check we have nothing changed for customized ISO"
+ when: iso_orginal.stat.size != iso_customized.stat.size
+
+- name: "Testcase: delete the non-existing file in ISO"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test1.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ delete_files:
+ - "/test03.cfg"
+ register: customized_result
+ failed_when: customized_result.msg.find("does not exist") == -1
+
+# Test: failed when local src file does not exists
+- name: "Testcase: local src file does not exists"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ add_files:
+ - src_file: "{{ test_dir }}/test03.cfg"
+ dest_file: "/preseed/ubuntu.seed"
+ register: customized_result
+ failed_when: customized_result.msg.find("does not exist") == -1
+
+# Test: filenames with whitespaces
+# We report error: the user should be reponsible for the it
+- name: "Testcase: filenames with whitespaces"
+ community.general.iso_customize:
+ src_iso: "{{ test_dir }}/test.iso"
+ dest_iso: "{{ test_dir }}/{{ dest_iso_name }}"
+ add_files:
+ - src_file: " {{ test_dir }}/test01.cfg "
+ dest_file: "/preseed/ubuntu.seed"
+ register: customized_result
+ failed_when: customized_result.msg.find("does not exist") == -1
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_mount.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_mount.yml
new file mode 100644
index 000000000..cf4ab8199
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/iso_mount.yml
@@ -0,0 +1,39 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug: var=ansible_distribution
+
+- block:
+ - name: "Mount customized ISO on MAC"
+ command: "hdiutil attach {{ test_dir }}/{{ iso_name }} -mountroot {{ test_dir }}/iso_mount"
+
+ # For MAC, we have different root directory for different type of ISO
+ - set_fact:
+ mount_root_dir: "{{ test_dir }}/iso_mount/disk_image"
+
+ - set_fact:
+ mount_root_dir: "{{ test_dir }}/iso_mount/AUTOINSTALL"
+ when: iso_name.find('joliet') != -1
+
+ - set_fact:
+ mount_root_dir: "{{ test_dir }}/iso_mount/CDROM"
+ when: iso_name.find('udf') != -1
+ when: ansible_distribution == "MacOSX"
+
+- block:
+ - name: "Mount {{ iso_name }} to {{ test_dir }}/iso_mount on localhost"
+ become: true
+ ansible.posix.mount:
+ path: "{{ test_dir }}/iso_mount"
+ src: "{{ test_dir }}/{{ iso_name }}"
+ opts: "ro,noauto"
+ fstab: "{{ test_dir }}/temp.fstab"
+ fstype: "iso9660"
+ state: mounted
+
+ - set_fact:
+ mount_root_dir: "{{ test_dir }}/iso_mount"
+ when:
+ - ansible_distribution != "MacOSX"
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/main.yml
new file mode 100644
index 000000000..dafd84dd5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/main.yml
@@ -0,0 +1,94 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Skip some platforms which does not support ansible.posix.mount
+ meta: end_play
+ when: ansible_distribution in ['Alpine']
+
+- set_fact:
+ test_dir: '{{ remote_tmp_dir }}/test_iso_customize'
+
+- include_tasks: prepare.yml
+
+- name: Create iso file with a specified file and directory
+ community.general.iso_create:
+ src_files:
+ - "{{ test_dir }}/test01.cfg"
+ - "{{ test_dir }}/test02.cfg"
+ dest_iso: "{{ test_dir }}/test.iso"
+ interchange_level: 3
+
+- include_tasks: iso_customize.yml
+ vars:
+ dest_iso_name: "iso_customize.iso"
+
+- name: Create an ISO file with Rock Ridge extension
+ community.general.iso_create:
+ src_files:
+ - "{{ test_dir }}/test01.cfg"
+ - "{{ test_dir }}/test02.cfg"
+ dest_iso: "{{ test_dir }}/test.iso"
+ rock_ridge: "1.09"
+
+- include_tasks: iso_customize.yml
+ vars:
+ dest_iso_name: "iso_customize_rr.iso"
+
+- name: Create an ISO file with Joliet support
+ community.general.iso_create:
+ src_files:
+ - "{{ test_dir }}/test01.cfg"
+ - "{{ test_dir }}/test02.cfg"
+ dest_iso: "{{ test_dir }}/test.iso"
+ interchange_level: 3
+ joliet: 3
+ vol_ident: AUTOINSTALL
+
+- include_tasks: iso_customize.yml
+ vars:
+ dest_iso_name: "iso_customize_joliet.iso"
+
+- name: Create iso file with UDF enabled
+ community.general.iso_create:
+ src_files:
+ - "{{ test_dir }}/test01.cfg"
+ - "{{ test_dir }}/test02.cfg"
+ dest_iso: "{{ test_dir }}/test.iso"
+ udf: true
+
+- include_tasks: iso_customize.yml
+ vars:
+ dest_iso_name: "iso_customize_udf.iso"
+
+# Create initial iso for customzing with only option add_files/delete_files
+- name: Create iso file with a specified file and directory
+ community.general.iso_create:
+ src_files:
+ - "{{ test_dir }}/test01.cfg"
+ dest_iso: "{{ test_dir }}/test1.iso"
+ interchange_level: 3
+
+- include_tasks: iso_customize_add_files.yml
+ vars:
+ dest_iso_name: "iso_customize_add.iso"
+
+- include_tasks: iso_customize_delete_files.yml
+ vars:
+ dest_iso_name: "iso_customize_delete.iso"
+
+# Test: misc exception
+- include_tasks: iso_customize_exception.yml
+ vars:
+ dest_iso_name: "iso_customize_exception.iso"
+
+- name: Delete testing sub-directory
+ ansible.builtin.file:
+ path: '{{ test_dir }}'
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/prepare.yml b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/prepare.yml
new file mode 100644
index 000000000..e3c860b7c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_customize/tasks/prepare.yml
@@ -0,0 +1,40 @@
+# Copyright (c) 2022, Ansible Project
+# Copyright (c) 2022, VMware, Inc. All Rights Reserved.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install pycdlib
+ ansible.builtin.pip:
+ name: pycdlib
+ extra_args: "-c {{ remote_constraints }}"
+
+- name: Make sure the previous testing sub-directory is deleted
+ ansible.builtin.file:
+ path: '{{ test_dir }}'
+ state: absent
+
+- name: Create our testing sub-directory
+ ansible.builtin.file:
+ path: '{{ test_dir }}'
+ state: directory
+
+- name: Create sub directory to mount customized ISO
+ ansible.builtin.file:
+ path: '{{ test_dir }}/iso_mount'
+ state: directory
+
+- name: Create temporary file test01.cfg for testing
+ ansible.builtin.file:
+ path: "{{ test_dir }}/test01.cfg"
+ state: touch
+
+- name: Add a line to the file test01.cfg and make sure it succeed
+ ansible.builtin.lineinfile:
+ path: "{{ test_dir }}/test01.cfg"
+ regexp: "^aaa"
+ line: "aaa"
+
+- name: Create temporary file test02.cfg for testing
+ ansible.builtin.file:
+ path: "{{ test_dir }}/test02.cfg"
+ state: touch
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases b/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases
new file mode 100644
index 000000000..33041456a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/aliases
@@ -0,0 +1,13 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/target/setup_epel
+destructive
+skip/aix
+skip/osx # FIXME
+skip/rhel9.0 # FIXME
+skip/rhel9.1 # FIXME
+skip/freebsd12.4 # FIXME
+skip/freebsd13.2 # FIXME
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso b/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso
new file mode 100644
index 000000000..d06ff73ca
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso
Binary files differ
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso.license b/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/files/test.iso.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml
new file mode 100644
index 000000000..e0f1586ce
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/7zip.yml
@@ -0,0 +1,54 @@
+---
+# Test code for the iso_extract module.
+# Copyright (c) 2017, James Tanner <tanner.jc@gmail.com>
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Gather facts
+ setup:
+ become: true
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+- name: "{{ ansible_facts.os_family | upper }} | Install 7zip package"
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ iso_extract_7zip_package }}"
+ state: present
+ when: ansible_facts.distribution != 'MacOSX'
+
+- name: macOS
+ when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+ when: ansible_distribution in ['MacOSX']
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+ when: ansible_distribution in ['MacOSX']
+
+ - name: MACOS | Install 7zip package
+ homebrew:
+ name: p7zip
+ state: present
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a
+ # proper solution can be found
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: "True"
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml
new file mode 100644
index 000000000..67ebfa7ab
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the iso_extract module.
+# Copyright (c) 2017, James Tanner <tanner.jc@gmail.com>
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- set_fact:
+ output_test_dir: '{{ remote_tmp_dir }}/test_iso_extract'
+
+- name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('9', '<')
+
+- name: Install 7zip
+ import_tasks: 7zip.yml
+
+- name: Prepare environment
+ import_tasks: prepare.yml
+
+- name: Test in normal mode
+ import_tasks: tests.yml
+ vars:
+ in_check_mode: false
+
+- name: Prepare environment
+ import_tasks: prepare.yml
+
+- name: Test in check-mode
+ import_tasks: tests.yml
+ vars:
+ in_check_mode: true
+ check_mode: true
+
+# FIXME - fill this in after figuring out how to allow mounts
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml
new file mode 100644
index 000000000..57e10c0db
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/prepare.yml
@@ -0,0 +1,21 @@
+---
+# Test code for the iso_extract module.
+# Copyright (c) 2017, James Tanner <tanner.jc@gmail.com>
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Make sure our testing sub-directory does not exist
+ file:
+ path: '{{ output_test_dir }}'
+ state: absent
+
+- name: Create our testing sub-directory
+ file:
+ path: '{{ output_test_dir }}'
+ state: directory
+
+- name: copy the iso to the test dir
+ copy:
+ src: test.iso
+ dest: '{{ output_test_dir }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml
new file mode 100644
index 000000000..6919a7c2e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/tasks/tests.yml
@@ -0,0 +1,40 @@
+---
+# Test code for the iso_extract module.
+# Copyright (c) 2017, James Tanner <tanner.jc@gmail.com>
+# Copyright (c) 2017, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Extract the iso
+ iso_extract:
+ image: '{{ output_test_dir }}/test.iso'
+ dest: '{{ output_test_dir }}'
+ files:
+ - 1.txt
+ - 2.txt
+ register: iso_extract_test0
+
+- assert:
+ that:
+ - iso_extract_test0 is changed
+
+- name: Extract the iso again
+ iso_extract:
+ image: '{{ output_test_dir }}/test.iso'
+ dest: '{{ output_test_dir }}'
+ files:
+ - 1.txt
+ - 2.txt
+ register: iso_extract_test0_again
+
+- name: Test iso_extract_test0_again (normal mode)
+ assert:
+ that:
+ - iso_extract_test0_again is not changed
+ when: not in_check_mode
+
+- name: Test iso_extract_test0_again (check-mode)
+ assert:
+ that:
+ - iso_extract_test0_again is changed
+ when: in_check_mode
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Alpine.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Alpine.yml
new file mode 100644
index 000000000..bcb92f79b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Alpine.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iso_extract_7zip_package: p7zip
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Archlinux.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Archlinux.yml
new file mode 100644
index 000000000..bcb92f79b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Archlinux.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iso_extract_7zip_package: p7zip
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Debian.yml
new file mode 100644
index 000000000..09436ceb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Debian.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iso_extract_7zip_package: p7zip-full
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml
new file mode 100644
index 000000000..bcb92f79b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/FreeBSD.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iso_extract_7zip_package: p7zip
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml
new file mode 100644
index 000000000..2bcdf4bff
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/RedHat.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iso_extract_7zip_package: p7zip-plugins
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml
new file mode 100644
index 000000000..1b695ce72
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Suse.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The 7z executable moved from p7zip to p7zip-full;
+# see https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/p7zip/p7zip.changes?expand=1
+iso_extract_7zip_package: p7zip-full
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml
new file mode 100644
index 000000000..09436ceb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/Ubuntu.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+iso_extract_7zip_package: p7zip-full
diff --git a/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/iso_extract/vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/aliases b/ansible_collections/community/general/tests/integration/targets/java_cert/aliases
new file mode 100644
index 000000000..573cb189b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml
new file mode 100644
index 000000000..ebac2789b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/defaults/main.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_pkcs12_path: testpkcs.p12
+test_keystore_path: keystore.jks
+test_keystore2_path: "{{ remote_tmp_dir }}/keystore2.jks"
+test_keystore2_password: changeit
+test_cert_path: "{{ remote_tmp_dir }}/cert.pem"
+test_key_path: "{{ remote_tmp_dir }}/key.pem"
+test_csr_path: "{{ remote_tmp_dir }}/req.csr"
+test_cert2_path: "{{ remote_tmp_dir }}/cert2.pem"
+test_key2_path: "{{ remote_tmp_dir }}/key2.pem"
+test_csr2_path: "{{ remote_tmp_dir }}/req2.csr"
+test_pkcs_path: "{{ remote_tmp_dir }}/cert.p12"
+test_pkcs2_path: "{{ remote_tmp_dir }}/cert2.p12"
+test_ssl: setupSSLServer.py
+test_ssl_port: 21500
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/files/setupSSLServer.py b/ansible_collections/community/general/tests/integration/targets/java_cert/files/setupSSLServer.py
new file mode 100644
index 000000000..4b0a42185
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/files/setupSSLServer.py
@@ -0,0 +1,24 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import ssl
+import os
+import sys
+
+root_dir = sys.argv[1]
+port = int(sys.argv[2])
+
+try:
+ from BaseHTTPServer import HTTPServer
+ from SimpleHTTPServer import SimpleHTTPRequestHandler
+except ModuleNotFoundError:
+ from http.server import HTTPServer, SimpleHTTPRequestHandler
+
+httpd = HTTPServer(('localhost', port), SimpleHTTPRequestHandler)
+httpd.socket = ssl.wrap_socket(httpd.socket, server_side=True,
+ certfile=os.path.join(root_dir, 'cert.pem'),
+ keyfile=os.path.join(root_dir, 'key.pem'))
+httpd.handle_request()
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12 b/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12
new file mode 100644
index 000000000..e0fee618c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12
Binary files differ
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12.license b/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/files/testpkcs.p12.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml
new file mode 100644
index 000000000..0371df88e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_java_keytool
+ - setup_openssl
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml
new file mode 100644
index 000000000..25ec87e8f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/main.yml
@@ -0,0 +1,116 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when: has_java_keytool
+ block:
+
+ - name: prep pkcs12 file
+ ansible.builtin.copy:
+ src: "{{ test_pkcs12_path }}"
+ dest: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}"
+
+ - name: import pkcs12
+ community.general.java_cert:
+ pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}"
+ pkcs12_password: changeit
+ pkcs12_alias: default
+ cert_alias: default
+ keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ keystore_create: true
+ state: present
+ register: result_success
+
+ - name: verify success
+ ansible.builtin.assert:
+ that:
+ - result_success is successful
+
+ - name: import pkcs12 with wrong password
+ community.general.java_cert:
+ pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}"
+ pkcs12_password: wrong_pass
+ pkcs12_alias: default
+ cert_alias: default_new
+ keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ keystore_create: true
+ state: present
+ ignore_errors: true
+ register: result_wrong_pass
+
+ - name: verify fail with wrong import password
+ ansible.builtin.assert:
+ that:
+ - result_wrong_pass is failed
+
+ - name: test fail on mutually exclusive params
+ community.general.java_cert:
+ cert_path: ca.crt
+ pkcs12_path: "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}"
+ cert_alias: default
+ keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ keystore_create: true
+ state: present
+ ignore_errors: true
+ register: result_excl_params
+
+ - name: verify failed exclusive params
+ ansible.builtin.assert:
+ that:
+ - result_excl_params is failed
+
+ - name: test fail on missing required params
+ community.general.java_cert:
+ keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ state: absent
+ ignore_errors: true
+ register: result_missing_required_param
+
+ - name: verify failed missing required params
+ ansible.builtin.assert:
+ that:
+ - result_missing_required_param is failed
+
+ - name: delete object based on cert_alias parameter
+ community.general.java_cert:
+ keystore_path: "{{ remote_tmp_dir }}/{{ test_keystore_path }}"
+ keystore_pass: changeme_keystore
+ cert_alias: default
+ state: absent
+ ignore_errors: true
+ register: result_alias_deleted
+
+ - name: verify object successfully deleted
+ ansible.builtin.assert:
+ that:
+ - result_alias_deleted is successful
+
+ - name: include extended test suite
+ import_tasks: state_change.yml
+
+ - name: cleanup environment
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ remote_tmp_dir }}/{{ test_pkcs12_path }}"
+ - "{{ remote_tmp_dir }}/{{ test_keystore_path }}"
+ - "{{ test_keystore2_path }}"
+ - "{{ test_cert_path }}"
+ - "{{ test_key_path }}"
+ - "{{ test_csr_path }}"
+ - "{{ test_cert2_path }}"
+ - "{{ test_key2_path }}"
+ - "{{ test_csr2_path }}"
+ - "{{ test_pkcs_path }}"
+ - "{{ test_pkcs2_path }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/state_change.yml b/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/state_change.yml
new file mode 100644
index 000000000..e135a60a3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_cert/tasks/state_change.yml
@@ -0,0 +1,298 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+#
+# Prepare X509 and PKCS#12 materials
+#
+
+- name: Create private keys
+ community.crypto.openssl_privatekey:
+ path: "{{ item }}"
+ mode: "u=rw,go="
+ loop:
+ - "{{ test_key_path }}"
+ - "{{ test_key2_path }}"
+
+- name: Generate CSR for self-signed certificate used as a placeholder to create the java keystore
+ community.crypto.openssl_csr:
+ path: "{{ test_csr_path }}"
+ privatekey_path: "{{ test_key_path }}"
+ commonName: "localhost"
+
+- name: Generate CSR for self-signed certificate used for testing
+ community.crypto.openssl_csr:
+ path: "{{ test_csr2_path }}"
+ privatekey_path: "{{ test_key2_path }}"
+ commonName: "localhost"
+
+- name: Generate the self-signed cert used as a placeholder to create the java keystore
+ community.crypto.x509_certificate:
+ path: "{{ test_cert_path }}"
+ csr_path: "{{ test_csr_path }}"
+ privatekey_path: "{{ test_key_path }}"
+ provider: selfsigned
+
+- name: Generate the self signed cert we will use for testing
+ community.crypto.x509_certificate:
+ path: "{{ test_cert2_path }}"
+ csr_path: "{{ test_csr2_path }}"
+ privatekey_path: "{{ test_key2_path }}"
+ provider: selfsigned
+
+- name: Create the pkcs12 archive from the test x509 cert
+ community.crypto.openssl_pkcs12:
+ name: "test_pkcs12_cert"
+ path: "{{ test_pkcs_path }}"
+ passphrase: "{{ test_keystore2_password }}"
+ certificate_path: "{{ test_cert_path }}"
+ privatekey_path: "{{ test_key_path }}"
+ when:
+ - "not (ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<'))"
+
+- name: Create the pkcs12 archive from the test x509 cert (command)
+ ansible.builtin.command:
+ cmd: >
+ openssl pkcs12 -export
+ -in {{ test_cert_path }}
+ -inkey {{ test_key_path }}
+ -name test_pkcs12_cert
+ -out {{ test_pkcs_path }}
+ -passout stdin
+ stdin: "{{ test_keystore2_password }}"
+ when:
+ - "ansible_os_family == 'RedHat'"
+ - "ansible_distribution_version is version('8.0', '<')"
+
+- name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore
+ community.crypto.openssl_pkcs12:
+ name: "test_pkcs12_cert"
+ path: "{{ test_pkcs2_path }}"
+ passphrase: "{{ test_keystore2_password }}"
+ certificate_path: "{{ test_cert2_path }}"
+ privatekey_path: "{{ test_key2_path }}"
+ when:
+ - "not (ansible_os_family == 'RedHat' and ansible_distribution_version is version('8.0', '<'))"
+
+- name: Create the pkcs12 archive from the certificate we will be trying to add to the keystore (command)
+ ansible.builtin.command:
+ cmd: >
+ openssl pkcs12 -export
+ -in {{ test_cert2_path }}
+ -inkey {{ test_key2_path }}
+ -name test_pkcs12_cert
+ -out {{ test_pkcs2_path }}
+ -passout stdin
+ stdin: "{{ test_keystore2_password }}"
+ when:
+ - "ansible_os_family == 'RedHat'"
+ - "ansible_distribution_version is version('8.0', '<')"
+
+#
+# Run tests
+#
+
+- name: try to create the test keystore based on the just created pkcs12, keystore_create flag not enabled
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ pkcs12_alias: test_pkcs12_cert
+ pkcs12_path: "{{ test_pkcs_path }}"
+ pkcs12_password: "{{ test_keystore2_password }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ ignore_errors: true
+ register: result_x509_changed
+
+- name: Verify the x509 status is failed
+ ansible.builtin.assert:
+ that:
+ - result_x509_changed is failed
+
+- name: Create the test keystore based on the just created pkcs12
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ pkcs12_alias: test_pkcs12_cert
+ pkcs12_path: "{{ test_pkcs_path }}"
+ pkcs12_password: "{{ test_keystore2_password }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ keystore_create: true
+
+- name: List newly created keystore content
+ ansible.builtin.command:
+ cmd: "keytool -list -keystore {{ test_keystore2_path }}"
+ stdin: "{{ test_keystore2_password }}"
+ register: keytool_list_keystore
+
+- name: Assert that the keystore has a private key entry
+ ansible.builtin.assert:
+ that:
+ - "keytool_list_keystore.stdout_lines[5] is match('test_pkcs12_cert,.*, PrivateKeyEntry, $')"
+
+- name: try to import from pkcs12 a non existing alias
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ pkcs12_alias: non_existing_alias
+ pkcs12_path: "{{ test_pkcs_path }}"
+ pkcs12_password: "{{ test_keystore2_password }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ keystore_create: true
+ ignore_errors: true
+ register: result_x509_changed
+
+- name: Verify the x509 status is failed
+ ansible.builtin.assert:
+ that:
+ - result_x509_changed is failed
+
+- name: import initial test certificate from file path
+ community.general.java_cert:
+ cert_alias: test_cert
+ cert_path: "{{ test_cert_path }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ keystore_create: true
+ state: present
+ register: result_x509_changed
+
+- name: Verify the x509 status is changed
+ ansible.builtin.assert:
+ that:
+ - result_x509_changed is changed
+
+- name: |
+ Import the newly created certificate. This is our main test.
+ If the java_cert has been updated properly, then this task will report changed each time
+ since the module will be comparing the hash of the certificate instead of validating that the alias
+ simply exists
+ community.general.java_cert:
+ cert_alias: test_cert
+ cert_path: "{{ test_cert2_path }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: present
+ register: result_x509_changed
+
+- name: Verify the x509 status is changed
+ ansible.builtin.assert:
+ that:
+ - result_x509_changed is changed
+
+- name: |
+ We also want to make sure that the status doesnt change if we import the same cert
+ community.general.java_cert:
+ cert_alias: test_cert
+ cert_path: "{{ test_cert2_path }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: present
+ register: result_x509_succeeded
+
+- name: Verify the x509 status is ok
+ ansible.builtin.assert:
+ that:
+ - result_x509_succeeded is succeeded
+
+- name: >
+ Ensure the original pkcs12 cert is in the keystore
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ pkcs12_alias: test_pkcs12_cert
+ pkcs12_path: "{{ test_pkcs_path }}"
+ pkcs12_password: "{{ test_keystore2_password }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: present
+
+- name: |
+ Perform the same test, but we will now be testing the pkcs12 functionality
+ If we add a different pkcs12 cert with the same alias, we should have a changed result, NOT the same
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ pkcs12_alias: test_pkcs12_cert
+ pkcs12_path: "{{ test_pkcs2_path }}"
+ pkcs12_password: "{{ test_keystore2_password }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: present
+ register: result_pkcs12_changed
+
+- name: Verify the pkcs12 status is changed
+ ansible.builtin.assert:
+ that:
+ - result_pkcs12_changed is changed
+
+- name: |
+ We are requesting the same cert now, so the status should show OK
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ pkcs12_alias: test_pkcs12_cert
+ pkcs12_path: "{{ test_pkcs2_path }}"
+ pkcs12_password: "{{ test_keystore2_password }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ register: result_pkcs12_succeeded
+
+- name: Verify the pkcs12 status is ok
+ ansible.builtin.assert:
+ that:
+ - result_pkcs12_succeeded is succeeded
+
+- name: Copy the ssl server script
+ copy:
+ src: "setupSSLServer.py"
+ dest: "{{ remote_tmp_dir }}"
+
+- name: Create an SSL server that we will use for testing URL imports
+ command: "{{ ansible_python.executable }} {{ remote_tmp_dir }}/setupSSLServer.py {{ remote_tmp_dir }} {{ test_ssl_port }}"
+ async: 10
+ poll: 0
+
+- name: "Wait for one second to make sure that the serve script has actually been started"
+ pause:
+ seconds: 1
+
+- name: |
+ Download the original cert.pem from our temporary server. The current cert should contain
+ cert2.pem. Importing this cert should return a status of changed
+ community.general.java_cert:
+ cert_alias: test_cert_localhost
+ cert_url: localhost
+ cert_port: "{{ test_ssl_port }}"
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: present
+ register: result_url_changed
+
+- name: Verify that the url status is changed
+ ansible.builtin.assert:
+ that:
+ - result_url_changed is changed
+
+- name: Ensure we can remove the x509 cert
+ community.general.java_cert:
+ cert_alias: test_cert
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: absent
+ register: result_x509_absent
+
+- name: Verify the x509 cert is absent
+ ansible.builtin.assert:
+ that:
+ - result_x509_absent is changed
+
+- name: Ensure we can remove the certificate imported from pkcs12 archive
+ community.general.java_cert:
+ cert_alias: test_pkcs12_cert
+ keystore_path: "{{ test_keystore2_path }}"
+ keystore_pass: "{{ test_keystore2_password }}"
+ state: absent
+ register: result_pkcs12_absent
+
+- name: Verify the pkcs12 archive is absent
+ ansible.builtin.assert:
+ that:
+ - result_pkcs12_absent is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases b/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases
new file mode 100644
index 000000000..573cb189b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_keystore/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/java_keystore/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/java_keystore/defaults/main.yml
new file mode 100644
index 000000000..b51461462
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_keystore/defaults/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+java_keystore_certs:
+ - name: cert
+ commonName: example.com
+ - name: cert-pw
+ passphrase: hunter2
+ commonName: example.com
+
+java_keystore_new_certs:
+ - name: cert2
+ keyname: cert
+ commonName: example.org
+ - name: cert2-pw
+ keyname: cert-pw
+ passphrase: hunter2
+ commonName: example.org
diff --git a/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml
new file mode 100644
index 000000000..0371df88e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_keystore/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_java_keytool
+ - setup_openssl
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml
new file mode 100644
index 000000000..2a95cfe50
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when: has_java_keytool
+ connection: local
+ block:
+ - name: Include tasks to create ssl materials on the controller
+ include_tasks: prepare.yml
+
+- set_fact:
+ ssl_backends: ['openssl']
+
+- set_fact:
+ ssl_backends: "{{ ssl_backends + ['cryptography'] }}"
+ when: cryptography_version.stdout is version('3.0', '>=')
+
+- when: has_java_keytool
+ block:
+ - name: Include tasks to play with 'certificate' and 'private_key' contents
+ include_tasks: tests.yml
+ vars:
+ remote_cert: false
+ loop: "{{ ssl_backends }}"
+ loop_control:
+ loop_var: ssl_backend
+
+ - name: Include tasks to create ssl materials on the remote host
+ include_tasks: prepare.yml
+
+ - name: Include tasks to play with 'certificate_path' and 'private_key_path' locations
+ include_tasks: tests.yml
+ vars:
+ remote_cert: true
+ loop: "{{ ssl_backends }}"
+ loop_control:
+ loop_var: ssl_backend
diff --git a/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/prepare.yml b/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/prepare.yml
new file mode 100644
index 000000000..7c4c5c98d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/prepare.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create test directory
+ ansible.builtin.file:
+ path: "{{ remote_tmp_dir }}"
+ state: directory
+
+- name: Create private keys
+ community.crypto.openssl_privatekey:
+ path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}"
+ size: 2048 # this should work everywhere
+ # The following is more efficient, but might not work everywhere:
+ # type: ECC
+ # curve: secp384r1
+ cipher: "{{ 'auto' if item.passphrase is defined else omit }}"
+ passphrase: "{{ item.passphrase | default(omit) }}"
+ loop: "{{ java_keystore_certs }}"
+
+- name: Create CSRs
+ community.crypto.openssl_csr:
+ path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.csr' }}"
+ privatekey_path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}"
+ privatekey_passphrase: "{{ item.passphrase | default(omit) }}"
+ commonName: "{{ item.commonName }}"
+ loop: "{{ java_keystore_certs + java_keystore_new_certs }}"
+
+- name: Create certificates
+ community.crypto.x509_certificate:
+ path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}"
+ csr_path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.csr' }}"
+ privatekey_path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | default(item.name)) ~ '.key' }}"
+ privatekey_passphrase: "{{ item.passphrase | default(omit) }}"
+ provider: selfsigned
+ loop: "{{ java_keystore_certs + java_keystore_new_certs }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/tests.yml b/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/tests.yml
new file mode 100644
index 000000000..899fe27e8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/java_keystore/tasks/tests.yml
@@ -0,0 +1,313 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create test directory
+ ansible.builtin.file:
+ path: "{{ remote_tmp_dir }}"
+ state: directory
+
+- name: Ensure the Java keystore does not exist (cleanup between tests)
+ ansible.builtin.file:
+ path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.jks' }}"
+ state: absent
+ loop: "{{ java_keystore_certs }}"
+ loop_control:
+ label: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.jks' }}"
+
+
+- name: Read certificates
+ slurp:
+ src: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}"
+ loop: "{{ java_keystore_certs }}"
+ when: not remote_cert
+ register: certificates
+
+- name: Read certificate keys
+ slurp:
+ src: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}"
+ loop: "{{ java_keystore_certs }}"
+ when: not remote_cert
+ register: certificate_keys
+
+- name: Create a Java keystore for the given ({{ 'remote' if remote_cert else 'local' }}) certificates (check mode)
+ community.general.java_keystore: &java_keystore_params
+ name: example
+ dest: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}"
+ certificate: "{{ omit if remote_cert else (certificates.results[loop_index].content | b64decode) }}"
+ private_key: "{{ omit if remote_cert else (certificate_keys.results[loop_index].content | b64decode) }}"
+ certificate_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}"
+ private_key_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}"
+ private_key_passphrase: "{{ item.passphrase | d(omit) }}"
+ password: changeit
+ ssl_backend: "{{ ssl_backend }}"
+ keystore_type: "{{ item.keystore_type | d(omit) }}"
+ loop: "{{ java_keystore_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_check
+
+- name: Create a Java keystore for the given certificates
+ community.general.java_keystore: *java_keystore_params
+ loop: "{{ java_keystore_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result
+
+
+- name: Create a Java keystore for the given certificates (idempotency, check mode)
+ community.general.java_keystore: *java_keystore_params
+ loop: "{{ java_keystore_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_idem_check
+
+- name: Create a Java keystore for the given certificates (idempotency)
+ community.general.java_keystore: *java_keystore_params
+ loop: "{{ java_keystore_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_idem
+
+
+- name: Read certificates (new)
+ slurp:
+ src: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}"
+ loop: "{{ java_keystore_new_certs }}"
+ when: not remote_cert
+ register: certificates_new
+
+- name: Read certificate keys (new)
+ slurp:
+ src: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}"
+ loop: "{{ java_keystore_new_certs }}"
+ when: not remote_cert
+ register: certificate_keys_new
+
+- name: Create a Java keystore for the given certificates (certificate changed, check mode)
+ community.general.java_keystore: &java_keystore_params_new_certs
+ name: example
+ dest: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}"
+ certificate: "{{ omit if remote_cert else (certificates_new.results[loop_index].content | b64decode) }}"
+ private_key: "{{ omit if remote_cert else (certificate_keys_new.results[loop_index].content | b64decode) }}"
+ certificate_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}"
+ private_key_path: "{{ omit if not remote_cert else remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.key' }}"
+ private_key_passphrase: "{{ item.passphrase | d(omit) }}"
+ password: changeit
+ ssl_backend: "{{ ssl_backend }}"
+ keystore_type: "{{ item.keystore_type | d(omit) }}"
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_change_check
+
+- name: Create a Java keystore for the given certificates (certificate changed)
+ community.general.java_keystore: *java_keystore_params_new_certs
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_change
+
+
+- name: Create a Java keystore for the given certificates (alias changed, check mode)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_alias_change_check
+
+- name: Create a Java keystore for the given certificates (alias changed)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_alias_change
+
+
+- name: Create a Java keystore for the given certificates (password changed, check mode)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_pw_change_check
+
+- name: Create a Java keystore for the given certificates (password changed)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_pw_change
+
+
+- name: Create a Java keystore for the given certificates (force keystore type pkcs12, check mode)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ keystore_type: pkcs12
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_type_pkcs12_check
+
+- name: Create a Java keystore for the given certificates (force keystore type jks, check mode)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ keystore_type: jks
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_type_jks_check
+
+- name: Create a Java keystore for the given certificates (force keystore type jks)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ keystore_type: jks
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_type_jks
+
+
+- name: Stat keystore (before failure)
+ ansible.builtin.stat:
+ path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}"
+ loop: "{{ java_keystore_new_certs }}"
+ register: result_stat_before
+
+- name: Fail to create a Java keystore for the given certificates (password too short)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: short
+ keystore_type: jks
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_fail_jks
+ ignore_errors: true
+
+- name: Stat keystore (after failure)
+ ansible.builtin.stat:
+ path: "{{ remote_tmp_dir ~ '/' ~ (item.keyname | d(item.name)) ~ '.jks' }}"
+ loop: "{{ java_keystore_new_certs }}"
+ register: result_stat_after
+
+
+- name: Create a Java keystore for the given certificates (keystore type changed, check mode)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ keystore_type: pkcs12
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_type_change_check
+
+- name: Create a Java keystore for the given certificates (keystore type changed)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ keystore_type: pkcs12
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_type_change
+
+
+- name: Create a Java keystore for the given certificates (omit keystore type, check mode)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ check_mode: true
+ register: result_type_omit_check
+
+- name: Create a Java keystore for the given certificates (omit keystore type)
+ community.general.java_keystore:
+ <<: *java_keystore_params_new_certs
+ name: foobar
+ password: hunter2
+ loop: "{{ java_keystore_new_certs }}"
+ loop_control:
+ index_var: loop_index
+ register: result_type_omit
+
+
+- name: Check that the remote certificates have not been removed
+ ansible.builtin.file:
+ path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.pem' }}"
+ state: file
+ loop: "{{ java_keystore_certs + java_keystore_new_certs }}"
+ when: remote_cert
+
+- name: Check that the remote private keys have not been removed
+ ansible.builtin.file:
+ path: "{{ remote_tmp_dir ~ '/' ~ item.name ~ '.key' }}"
+ state: file
+ loop: "{{ java_keystore_certs }}"
+ when: remote_cert
+
+- name: Validate results
+ assert:
+ that:
+ - result is changed
+ - result_check is changed
+ - result_idem is not changed
+ - result_idem_check is not changed
+ - result_change is changed
+ - result_change_check is changed
+ - result_alias_change is changed
+ - result_alias_change_check is changed
+ - result_pw_change is changed
+ - result_pw_change_check is changed
+
+ # We don't know if we start from jks or pkcs12 format, anyway check mode
+ # and actual mode must return the same 'changed' state, and 'jks' and
+ # 'pkcs12' must give opposite results on a same host.
+ - result_type_jks_check.changed != result_type_pkcs12_check.changed
+ - result_type_jks_check.changed == result_type_jks.changed
+
+ - result_type_change is changed
+ - result_type_change_check is changed
+ - result_type_omit is not changed
+ - result_type_omit_check is not changed
+
+ # keystore properties must remain the same after failure
+ - result_fail_jks is failed
+ - result_stat_before.results[0].stat.uid == result_stat_after.results[0].stat.uid
+ - result_stat_before.results[1].stat.uid == result_stat_after.results[1].stat.uid
+ - result_stat_before.results[0].stat.gid == result_stat_after.results[0].stat.gid
+ - result_stat_before.results[1].stat.gid == result_stat_after.results[1].stat.gid
+ - result_stat_before.results[0].stat.mode == result_stat_after.results[0].stat.mode
+ - result_stat_before.results[1].stat.mode == result_stat_after.results[1].stat.mode
+ - result_stat_before.results[0].stat.checksum == result_stat_after.results[0].stat.checksum
+ - result_stat_before.results[1].stat.checksum == result_stat_after.results[1].stat.checksum
diff --git a/ansible_collections/community/general/tests/integration/targets/jboss/aliases b/ansible_collections/community/general/tests/integration/targets/jboss/aliases
new file mode 100644
index 000000000..38b6706fe
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jboss/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml
new file mode 100644
index 000000000..c2bf37df7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jboss/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+- setup_wildfly_server
diff --git a/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml b/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml
new file mode 100644
index 000000000..2403a02c4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jboss/tasks/jboss.yml
@@ -0,0 +1,238 @@
+---
+# Copyright (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# Integration tests for jboss module.
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# helloworld.war (got from https://github.com/aeimer/java-example-helloworld-war/) is licensed
+# under the MIT license:
+#
+# Copyright (c) 2017 Alex Eimer
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+#
+# ===============================
+# Module's note section contains:
+# "- The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
+#
+# Also from https://docs.jboss.org/author/display/WFLY10/Application+deployment?_sscc=t
+# "Deployment content (for example, war, ear, jar, and sar files) can be placed
+# in the standalone/deployments directory of the WildFly distribution,
+# in order to be automatically deployed into the server runtime.
+# For this to work the deployment-scanner subsystem must be present.
+# The scanner periodically checks the contents of the deployments directory
+# and reacts to changes by updating the server."
+# Regarding the information above JBoss server must be installed and running for full test suite.
+# We use WildFly server, free alternative, instead. See setup_wildfly_server role for more information.
+
+- vars:
+ war_file_1: 'helloworld-1.war'
+ war_file_1_path: '{{ wf_homedir }}/{{ war_file_1 }}'
+ fake_src_path: /fake/src
+ test_deployment: helloworld-1.war
+ task_parameters: &task_parameters
+ become_user: '{{ wf_user }}'
+ become: true
+ register: result
+
+ block:
+ - name: Create test files
+ <<: *task_parameters
+ get_url:
+ url: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/jboss/{{ war_file_1 }}'
+ dest: '{{ wf_homedir }}'
+
+ ##################
+ # Start the tests:
+
+ # Test if state=present and not deployed, check_mode:
+ - name: jboss - deploy war in check_mode, the default deploy_path
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ src: '{{ war_file_1_path }}'
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ # Check
+ - name: check that nothing changed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+ ignore_errors: true
+
+ - assert:
+ that:
+ - "'is absent' in result.msg"
+
+ # Test if state=present and not deployed, actual mode:
+ - name: jboss - deploy war
+ <<: *task_parameters
+ jboss:
+ deployment: helloworld-1.war
+ deploy_path: '{{ deploy_dir }}'
+ src: '{{ war_file_1_path }}'
+
+ - assert:
+ that:
+ - result is changed
+
+ # Check
+ - name: check that the file is deployed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=present and deployed in check mode, try again:
+ - name: jboss - try again to deploy war in check_mode, war is deployed now
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ src: '{{ war_file_1_path }}'
+ deploy_path: '{{ deploy_dir }}'
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Test if state=present and deployed, try again:
+ - name: jboss - try again to deploy war in actual mode, war is deployed now
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ src: '{{ war_file_1_path }}'
+ deploy_path: '{{ deploy_dir }}'
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Check
+ - name: check that nothing changed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=absent and deployed:
+ - name: jboss - undeploy war in check_mode, war is deployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+ check_mode: true
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: check that nothing actually changed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.deployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=absent and deployed:
+ - name: jboss - undeploy war in actual mode, war is deployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is changed
+
+ - name: check that file is undeployed after the previous step
+ <<: *task_parameters
+ file:
+ path: '{{ deploy_dir }}/{{ war_file_1 }}.undeployed'
+
+ - assert:
+ that:
+ - result.state == 'file'
+
+ # Test if state=absent and undeployed:
+ - name: jboss - undeploy war in check_mode, war is undeployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+ check_mode: true
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Test if state=absent and undeployed:
+ - name: jboss - undeploy war in actual_mode, war is undeployed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ state: absent
+
+ - assert:
+ that:
+ - result is not changed
+
+ # Test fake src:
+ - name: jboss - test fake src
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ deploy_path: '{{ deploy_dir }}'
+ src: '{{ fake_src_path }}'
+ state: present
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - "'Source file {{ fake_src_path }} does not exist.' in result.msg"
+
+ # Test errors where state=present and src is not passed:
+ - name: jboss - must fail when state=present and src is not passed
+ <<: *task_parameters
+ jboss:
+ deployment: '{{ war_file_1 }}'
+ state: present
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - "'state is present but all of the following are missing: src' in result.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml
new file mode 100644
index 000000000..891c802d7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jboss/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: jboss.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/jira/aliases b/ansible_collections/community/general/tests/integration/targets/jira/aliases
new file mode 100644
index 000000000..9c3dc5670
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jira/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/jira/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/jira/tasks/main.yml
new file mode 100644
index 000000000..129070871
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jira/tasks/main.yml
@@ -0,0 +1,111 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create test ticket
+ community.general.jira:
+ uri: "{{ uri }}"
+ username: "{{ user }}"
+ password: "{{ pasw }}"
+ project: "{{ proj }}"
+ operation: create
+ summary: test ticket
+ description: bla bla bla
+ issuetype: Task
+ register: issue
+- debug:
+ msg: Issue={{ issue }}
+- name: assert test ticket
+ assert:
+ that:
+ - issue is changed
+ - issue.meta.key.startswith(proj)
+
+- name: add comment bleep bleep
+ community.general.jira:
+ uri: "{{ uri }}"
+ username: "{{ user }}"
+ password: "{{ pasw }}"
+ issue: "{{ issue.meta.key }}"
+ operation: comment
+ comment: bleep bleep!
+ register: comment_bleep_bleep
+- name: assert comment bleep bleep
+ assert:
+ that:
+ - comment_bleep_bleep is changed
+ - comment_bleep_bleep.meta.body == "bleep bleep!"
+ - comment_bleep_bleep.meta.body != None
+
+- name: transition -> In Progress with comment
+ community.general.jira:
+ uri: "{{ uri }}"
+ username: "{{ user }}"
+ password: "{{ pasw }}"
+ issue: "{{ issue.meta.key }}"
+ operation: transition
+ status: Start Progress
+ comment: -> in progress
+ register: transition_inprog
+- name: assert transition -> In Progress with comment
+ assert:
+ that:
+ - transition_inprog is changed
+
+- name: change assignee
+ community.general.jira:
+ uri: "{{ uri }}"
+ username: "{{ user }}"
+ password: "{{ pasw }}"
+ issue: "{{ issue.meta.key }}"
+ operation: edit
+ account_id: "{{ user2 }}"
+ register: assign
+- name: assert change assignee
+ assert:
+ that:
+ - assign is changed
+
+- name: Worklog on issue
+ community.general.jira:
+ uri: '{{ server }}'
+ username: '{{ user }}'
+ password: '{{ pass }}'
+ issue: '{{ issue.meta.key }}'
+ operation: worklog
+ comment: Worklog
+ fields:
+ timeSpentSeconds: 1200
+ register: worklog
+- name: assert worklog -> with comment
+ assert:
+ that:
+ - worklog is changed
+ - worklog.meta.comment == 'Worklog'
+ - worklog.meta.timeSpentSeconds == 1200
+
+- name: transition -> Resolved with comment
+ community.general.jira:
+ uri: "{{ uri }}"
+ username: "{{ user }}"
+ password: "{{ pasw }}"
+ issue: "{{ issue.meta.key }}"
+ operation: transition
+ status: Resolve Issue
+ comment: -> resolved
+ account_id: "{{ user1 }}"
+ fields:
+ resolution:
+ name: Done
+ description: wakawakawakawaka
+ register: transition_resolved
+- name: assert transition -> Resolved with comment
+ assert:
+ that:
+ - transition_resolved is changed
+
+- debug:
+ msg:
+ - Issue = {{ issue.meta.key }}
+ - URL = {{ issue.meta.self }}
diff --git a/ansible_collections/community/general/tests/integration/targets/jira/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/jira/vars/main.yml
new file mode 100644
index 000000000..781fde8ca
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/jira/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+uri: https://xxxx.atlassian.net/
+user: xxx@xxxx.xxx
+pasw: supersecret
+proj: ABC
+user1: 6574474636373822y7338
+user2: 6574474636373822y73959696
diff --git a/ansible_collections/community/general/tests/integration/targets/kdeconfig/aliases b/ansible_collections/community/general/tests/integration/targets/kdeconfig/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kdeconfig/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/kdeconfig/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/kdeconfig/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kdeconfig/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/files/kwriteconf_fake b/ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/files/kwriteconf_fake
new file mode 100755
index 000000000..c29627257
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/files/kwriteconf_fake
@@ -0,0 +1,38 @@
+#!/bin/sh
+
+# Copyright (c) 2023, Salvatore Mesoraca <s.mesoraca16@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This script is not supposed to correctly emulate
+# kwriteconf output format.
+# It only tries to emulate its behaviour from the
+# point of view of the Ansible module.
+# Which is: write something that depends on the arguments
+# to the output file, unless we already wrote that before.
+
+set -e
+
+args=""
+prev_was_file=0
+for var in "$@"; do
+ if [ $prev_was_file -eq 1 ]; then
+ fname="$var"
+ prev_was_file=0
+ else
+ args="$args $var"
+ fi
+ if [ "$var" = "--file" ]; then
+ prev_was_file=1
+ fi
+done
+
+if [ "x$fname" = "x" ]; then
+ exit 1
+fi
+
+if [ -e "$fname" ]; then
+ grep -qF "$args" "$fname" && exit 0
+fi
+
+echo "$args" >> "$fname"
diff --git a/ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/main.yml
new file mode 100644
index 000000000..790bb378d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kdeconfig/tasks/main.yml
@@ -0,0 +1,369 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2023, Salvatore Mesoraca <s.mesoraca16@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Set paths
+ set_fact:
+ output_file: "{{ remote_tmp_dir }}/kdeconf"
+ kwriteconf_fake: "{{ remote_tmp_dir }}/kwriteconf"
+
+- name: Install fake kwriteconf
+ copy:
+ dest: "{{ kwriteconf_fake }}"
+ src: kwriteconf_fake
+ mode: 0755
+
+- name: Simple test
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ value: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_simple
+ ignore_errors: true
+
+- name: Simple test - checks
+ assert:
+ that:
+ - result_simple is changed
+ - result_simple is not failed
+
+- name: Simple test - idempotence
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ value: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_simple_idem
+ ignore_errors: true
+
+- name: Simple test - idempotence - checks
+ assert:
+ that:
+ - result_simple_idem is not changed
+ - result_simple_idem is not failed
+
+- name: Reset
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+- name: Group and groups are mutually exclusive
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ groups: [test2]
+ key: test1
+ value: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_group_mutex
+ ignore_errors: true
+
+- name: Group and groups are mutually exclusive - checks
+ assert:
+ that:
+ - result_group_mutex is not changed
+ - result_group_mutex is failed
+ - "result_group_mutex.msg == 'parameters are mutually exclusive: group|groups found in values'"
+
+- name: value and bool_value are mutually exclusive
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ value: test2
+ bool_value: true
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_val_mutex
+ ignore_errors: true
+
+- name: value and bool_value are mutually exclusive - checks
+ assert:
+ that:
+ - result_val_mutex is not changed
+ - result_val_mutex is failed
+ - "result_val_mutex.msg == 'parameters are mutually exclusive: value|bool_value found in values'"
+
+- name: bool_value must be bool
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ bool_value: thisisastring
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_val_bool
+ ignore_errors: true
+
+- name: bool_value must be bool - checks
+ assert:
+ that:
+ - result_val_bool is not changed
+ - result_val_bool is failed
+ - "'is not a valid boolean' in result_val_bool.msg"
+
+- name: Multiple groups test
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - groups:
+ - test
+ - test1
+ - test2
+ key: test3
+ value: test4
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_groups
+ ignore_errors: true
+
+- name: Multiple groups test - checks
+ assert:
+ that:
+ - result_groups is changed
+ - result_groups is not failed
+
+- name: Multiple groups test - idempotence
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - groups:
+ - test
+ - test1
+ - test2
+ key: test3
+ value: test4
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_groups_idem
+ ignore_errors: true
+
+- name: Multiple groups test - idempotence - checks
+ assert:
+ that:
+ - result_groups_idem is not changed
+ - result_groups_idem is not failed
+
+- name: Reset
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+- name: Bool test
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ bool_value: true
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_bool
+ ignore_errors: true
+
+- name: Simple test - checks
+ assert:
+ that:
+ - result_bool is changed
+ - result_bool is not failed
+
+- name: Bool test - idempotence
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ bool_value: on
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_bool_idem
+ ignore_errors: true
+
+- name: Bool test - idempotence - checks
+ assert:
+ that:
+ - result_bool_idem is not changed
+ - result_bool_idem is not failed
+
+- name: Reset
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+- name: check_mode test
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ value: test2
+ - groups: [testx, testy]
+ key: testz
+ bool_value: on
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_checkmode
+ ignore_errors: true
+ check_mode: true
+ diff: true
+
+- name: check_mode test file contents
+ slurp:
+ src: "{{ output_file }}"
+ register: check_mode_contents
+ ignore_errors: true
+
+- name: check_mode test - checks
+ assert:
+ that:
+ - result_checkmode is changed
+ - result_checkmode is not failed
+ - check_mode_contents is failed
+
+- name: check_mode test - apply
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ value: test2
+ - groups: [testx, testy]
+ key: testz
+ bool_value: on
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_checkmode_apply
+ ignore_errors: true
+ check_mode: false
+ diff: true
+
+- name: check_mode test - apply - checks
+ assert:
+ that:
+ - result_checkmode_apply is changed
+ - result_checkmode_apply is not failed
+ - "result_checkmode_apply['diff']['after'] == result_checkmode['diff']['after']"
+ - "result_checkmode_apply['diff']['before'] == result_checkmode['diff']['before']"
+
+- name: check_mode test - idempotence
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test
+ key: test1
+ value: test2
+ - groups: [testx, testy]
+ key: testz
+ bool_value: on
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_checkmode2
+ ignore_errors: true
+ check_mode: true
+
+- name: check_mode test - idempotence - checks
+ assert:
+ that:
+ - result_checkmode2 is not changed
+ - result_checkmode2 is not failed
+
+- name: Reset
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+- name: Unicode test
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: tesòt
+ key: testè1
+ value: testù2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_unicode
+ ignore_errors: true
+
+- name: Unicode test - checks
+ assert:
+ that:
+ - result_unicode is changed
+ - result_unicode is not failed
+
+- name: Reset
+ file:
+ path: "{{ output_file }}"
+ state: absent
+
+- name: Missing groups
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - key: test1
+ value: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_mgroup
+ ignore_errors: true
+
+- name: Missing groups - checks
+ assert:
+ that:
+ - result_mgroup is not changed
+ - result_mgroup is failed
+ - "result_mgroup.msg == 'one of the following is required: group, groups found in values'"
+
+- name: Missing key
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test1
+ value: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_mkey
+ ignore_errors: true
+
+- name: Missing key - checks
+ assert:
+ that:
+ - result_mkey is not changed
+ - result_mkey is failed
+ - "result_mkey.msg == 'missing required arguments: key found in values'"
+
+- name: Missing value
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test1
+ key: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_mvalue
+ ignore_errors: true
+
+- name: Missing value - checks
+ assert:
+ that:
+ - result_mvalue is not changed
+ - result_mvalue is failed
+ - "result_mvalue.msg == 'one of the following is required: value, bool_value found in values'"
+
+- name: Empty key
+ kdeconfig:
+ path: "{{ output_file }}"
+ values:
+ - group: test1
+ key: ''
+ value: test2
+ kwriteconfig_path: "{{ kwriteconf_fake }}"
+ register: result_ekey
+ ignore_errors: true
+
+- name: Empty key - checks
+ assert:
+ that:
+ - result_ekey is not changed
+ - result_ekey is failed
+ - "result_ekey.msg == \"'key' cannot be empty\""
diff --git a/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/aliases b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/aliases
new file mode 100644
index 000000000..afda346c4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
diff --git a/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/files/blacklist b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/files/blacklist
new file mode 100644
index 000000000..eeaf32246
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/files/blacklist
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+blacklist aaaa
+blacklist bbbb
+blacklist cccc
diff --git a/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/tasks/main.yml
new file mode 100644
index 000000000..e169d5479
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/kernel_blacklist/tasks/main.yml
@@ -0,0 +1,111 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: set destination filename
+ set_fact:
+ bl_file: '{{ remote_tmp_dir }}/blacklist-ansible.conf'
+
+- name: copy blacklist file
+ copy:
+ src: 'files/blacklist'
+ dest: '{{ bl_file }}'
+
+- name: Original stat
+ stat:
+ path: '{{ bl_file }}'
+ register: orig_stat
+
+- name: remove non-existing item from list
+ community.general.kernel_blacklist:
+ blacklist_file: '{{ bl_file }}'
+ state: absent
+ name: zzzz
+ register: bl_test_1
+
+- name: add existing item from list
+ community.general.kernel_blacklist:
+ blacklist_file: '{{ bl_file }}'
+ state: present
+ name: bbbb
+ register: bl_test_1a
+
+- name: stat_test_1
+ stat:
+ path: '{{ bl_file }}'
+ register: stat_test_1
+
+- name: assert file is unchanged
+ assert:
+ that:
+ - bl_test_1 is not changed
+ - bl_test_1a is not changed
+ - orig_stat.stat.size == stat_test_1.stat.size
+ - orig_stat.stat.checksum == stat_test_1.stat.checksum
+ - orig_stat.stat.mtime == stat_test_1.stat.mtime
+ - stat_test_1.stat.checksum == expected_content | checksum
+ vars:
+ expected_content: |
+ # Copyright (c) Ansible Project
+ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ # SPDX-{{ '' }}License-Identifier: GPL-3.0-or-later
+
+ blacklist aaaa
+ blacklist bbbb
+ blacklist cccc
+
+- name: add new item to list
+ community.general.kernel_blacklist:
+ blacklist_file: '{{ bl_file }}'
+ state: present
+ name: dddd
+ register: bl_test_2
+
+- name: slurp_test_2
+ slurp:
+ src: '{{ bl_file }}'
+ register: slurp_test_2
+
+- name: assert element is added
+ assert:
+ that:
+ - bl_test_2 is changed
+ - slurp_test_2.content|b64decode == content
+ vars:
+ content: |
+ # Copyright (c) Ansible Project
+ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ # SPDX-{{ '' }}License-Identifier: GPL-3.0-or-later
+
+ blacklist aaaa
+ blacklist bbbb
+ blacklist cccc
+ blacklist dddd
+
+- name: remove item from list
+ community.general.kernel_blacklist:
+ blacklist_file: '{{ bl_file }}'
+ state: absent
+ name: bbbb
+ register: bl_test_3
+
+- name: slurp_test_3
+ slurp:
+ src: '{{ bl_file }}'
+ register: slurp_test_3
+
+- name: assert element is added
+ assert:
+ that:
+ - bl_test_3 is changed
+ - slurp_test_3.content|b64decode == content
+ vars:
+ content: |
+ # Copyright (c) Ansible Project
+ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ # SPDX-{{ '' }}License-Identifier: GPL-3.0-or-later
+
+ blacklist aaaa
+ blacklist cccc
+ blacklist dddd
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/aliases b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/readme.adoc b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/readme.adoc
new file mode 100644
index 000000000..1941e54ef
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/readme.adoc
@@ -0,0 +1,27 @@
+// Copyright (c) Ansible Project
+// GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+To be able to run these integration tests a keycloak server must be
+reachable under a specific url with a specific admin user and password.
+The exact values expected for these parameters can be found in
+'vars/main.yml' file. A simple way to do this is to use the official
+keycloak docker images like this:
+
+----
+docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH=<url-path> -e KEYCLOAK_ADMIN=<admin_user> -e KEYCLOAK_ADMIN_PASSWORD=<admin_password> quay.io/keycloak/keycloak:20.0.2 start-dev
+----
+
+Example with concrete values inserted:
+
+----
+docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH=/auth -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=password quay.io/keycloak/keycloak:20.0.2 start-dev
+----
+
+This test suite can run against a fresh unconfigured server instance
+(no preconfiguration required) and cleans up after itself (undoes all
+its config changes) as long as it runs through completly. While its active
+it changes the server configuration in the following ways:
+
+ * creating, modifying and deleting some keycloak groups
+
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/tasks/main.yml
new file mode 100644
index 000000000..504a24a01
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/tasks/main.yml
@@ -0,0 +1,234 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Remove keycloak client to avoid failures from previous failed runs
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: absent
+
+- name: Create keycloak client with authorization services enabled
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ enabled: true
+ public_client: false
+ service_accounts_enabled: true
+ authorization_services_enabled: true
+
+- name: Create an authorization scope (check mode)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: present
+ name: "file:delete"
+ display_name: "File delete"
+ icon_uri: "http://localhost/icon.png"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ check_mode: true
+ diff: true
+ register: result
+
+- name: Assert that authorization scope was not created in check mode
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+ - result.msg == 'Authorization scope would be created'
+ - result.diff.before == {}
+ - result.diff.after.name == 'file:delete'
+ - result.diff.after.displayName == 'File delete'
+ - result.diff.after.iconUri == 'http://localhost/icon.png'
+
+- name: Create authorization scope
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: present
+ name: "file:delete"
+ display_name: "File delete"
+ icon_uri: "http://localhost/icon.png"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ register: result
+
+- name: Assert that authorization scope was created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "file:delete"
+ - result.end_state.iconUri == "http://localhost/icon.png"
+ - result.end_state.displayName == "File delete"
+
+- name: Create authorization scope (test for idempotency)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: present
+ name: "file:delete"
+ display_name: "File delete"
+ icon_uri: "http://localhost/icon.png"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state != {}
+ - result.end_state.name == "file:delete"
+ - result.end_state.iconUri == "http://localhost/icon.png"
+ - result.end_state.displayName == "File delete"
+
+- name: Authorization scope update (check mode)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: present
+ name: "file:delete"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ diff: true
+ check_mode: true
+ register: result
+
+- name: Assert that authorization scope was not updated in check mode
+ assert:
+ that:
+ - result is changed
+ - result.msg == 'Authorization scope would be updated'
+ - result.diff.before.displayName == 'File delete'
+ - result.diff.before.iconUri == 'http://localhost/icon.png'
+ - result.diff.after.displayName == ''
+ - result.diff.after.iconUri == ''
+
+- name: Authorization scope update (remove optional parameters)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: present
+ name: "file:delete"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ register: result
+
+- name: Assert that optional parameters have been removed
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "file:delete"
+ - result.end_state.iconUri == ""
+ - result.end_state.displayName == ""
+
+- name: Authorization scope update (test for idempotency)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: present
+ name: "file:delete"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state != {}
+ - result.end_state.name == "file:delete"
+ - result.end_state.iconUri == ""
+ - result.end_state.displayName == ""
+
+- name: Authorization scope remove (check mode)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: absent
+ name: "file:delete"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ diff: true
+ check_mode: true
+ register: result
+
+- name: Assert that authorization scope has not been removed in check mode
+ assert:
+ that:
+ - result is changed
+ - result.msg == 'Authorization scope would be removed'
+ - result.diff.before.name == 'file:delete'
+ - result.diff.after == {}
+
+- name: Authorization scope remove
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: absent
+ name: "file:delete"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ register: result
+
+- name: Assert that authorization scope has been removed
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Authorization scope remove (test for idempotency)
+ community.general.keycloak_authz_authorization_scope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ state: absent
+ name: "file:delete"
+ client_id: "{{ client_id }}"
+ realm: "{{ realm }}"
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
+
+- name: Remove keycloak client
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/vars/main.yml
new file mode 100644
index 000000000..c1d5fc983
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_authz_authorization_scope/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: master
+client_id: authz
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_client/README.md b/ansible_collections/community/general/tests/integration/targets/keycloak_client/README.md
new file mode 100644
index 000000000..d8bcc08ec
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_client/README.md
@@ -0,0 +1,17 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+The integration test can be performed as follows:
+
+```
+# 1. Start docker-compose:
+docker-compose -f tests/integration/targets/keycloak_client/docker-compose.yml stop
+docker-compose -f tests/integration/targets/keycloak_client/docker-compose.yml rm -f -v
+docker-compose -f tests/integration/targets/keycloak_client/docker-compose.yml up -d
+
+# 2. Run the integration tests:
+ansible-test integration keycloak_client --allow-unsupported -v
+```
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_client/docker-compose.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_client/docker-compose.yml
new file mode 100644
index 000000000..5e14e9aac
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_client/docker-compose.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3.4'
+
+services:
+ postgres:
+ image: postgres:9.6
+ restart: always
+ environment:
+ POSTGRES_USER: postgres
+ POSTGRES_DB: postgres
+ POSTGRES_PASSWORD: postgres
+
+ keycloak:
+ image: jboss/keycloak:12.0.4
+ ports:
+ - 8080:8080
+
+ environment:
+ DB_VENDOR: postgres
+ DB_ADDR: postgres
+ DB_DATABASE: postgres
+ DB_USER: postgres
+ DB_SCHEMA: public
+ DB_PASSWORD: postgres
+
+ KEYCLOAK_USER: admin
+ KEYCLOAK_PASSWORD: password
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_client/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_client/tasks/main.yml
new file mode 100644
index 000000000..513d5836b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_client/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Delete realm
+ community.general.keycloak_realm: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: absent
+
+- name: Create realm
+ community.general.keycloak_realm: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Desire client
+ community.general.keycloak_client: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ redirect_uris: '{{redirect_uris1}}'
+ attributes: '{{client_attributes1}}'
+ protocol_mappers: '{{protocol_mappers1}}'
+ register: desire_client_not_present
+
+- name: Desire client again with same props
+ community.general.keycloak_client: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ redirect_uris: '{{redirect_uris1}}'
+ attributes: '{{client_attributes1}}'
+ protocol_mappers: '{{protocol_mappers1}}'
+ register: desire_client_when_present_and_same
+
+- name: Check client again with same props
+ community.general.keycloak_client: "{{ auth_args | combine(call_args) }}"
+ check_mode: true
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ redirect_uris: '{{redirect_uris1}}'
+ attributes: '{{client_attributes1}}'
+ protocol_mappers: '{{protocol_mappers1}}'
+ register: check_client_when_present_and_same
+
+- name: Assert changes not detected in last two tasks (desire when same, and check)
+ assert:
+ that:
+ - desire_client_when_present_and_same is not changed
+ - check_client_when_present_and_same is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_client/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_client/vars/main.yml
new file mode 100644
index 000000000..53ba35fca
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_client/vars/main.yml
@@ -0,0 +1,61 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+client_id: myclient
+role: myrole
+description_1: desc 1
+description_2: desc 2
+
+auth_args:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+
+redirect_uris1:
+ - "http://example.c.com/"
+ - "http://example.b.com/"
+ - "http://example.a.com/"
+
+client_attributes1: {"backchannel.logout.session.required": true, "backchannel.logout.revoke.offline.tokens": false}
+
+protocol_mappers1:
+ - name: 'email'
+ protocol: 'openid-connect'
+ protocolMapper: 'oidc-usermodel-property-mapper'
+ config:
+ "claim.name": "email"
+ "user.attribute": "email"
+ "jsonType.label": "String"
+ "id.token.claim": "true"
+ "access.token.claim": "true"
+ "userinfo.token.claim": "true"
+
+ - name: 'email_verified'
+ protocol: 'openid-connect'
+ protocolMapper: 'oidc-usermodel-property-mapper'
+ config:
+ "claim.name": "email_verified"
+ "user.attribute": "emailVerified"
+ "jsonType.label": "boolean"
+ "id.token.claim": "true"
+ "access.token.claim": "true"
+ "userinfo.token.claim": "true"
+
+ - name: 'family_name'
+ protocol: 'openid-connect'
+ protocolMapper: 'oidc-usermodel-property-mapper'
+ config:
+ "claim.name": "family_name"
+ "user.attribute": "lastName"
+ "jsonType.label": "String"
+ "id.token.claim": "true"
+ "access.token.claim": "true"
+ "userinfo.token.claim": "true"
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/README.md b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/README.md
new file mode 100644
index 000000000..3f3685f9b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/README.md
@@ -0,0 +1,16 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+The integration test can be performed as follows:
+
+```
+# 1. Start docker-compose:
+docker-compose -f tests/integration/targets/keycloak_clientscope_type/docker-compose.yml down
+docker-compose -f tests/integration/targets/keycloak_clientscope_type/docker-compose.yml up -d
+
+# 2. Run the integration tests:
+ansible-test integration keycloak_clientscope_type --allow-unsupported -v
+```
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/docker-compose.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/docker-compose.yml
new file mode 100644
index 000000000..b73ddff16
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/docker-compose.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3.4'
+
+services:
+ keycloak:
+ image: quay.io/keycloak/keycloak:21.0.2
+ ports:
+ - 8080:8080
+ environment:
+ KEYCLOAK_ADMIN: admin
+ KEYCLOAK_ADMIN_PASSWORD: password
+ command: start-dev
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/tasks/main.yml
new file mode 100644
index 000000000..76daace73
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/tasks/main.yml
@@ -0,0 +1,164 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+# Fixtures
+- name: Create keycloak realm
+ community.general.keycloak_realm:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: ""
+ state: present
+ enabled: true
+
+- name: Create keycloak client
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ enabled: true
+
+- name: Create a scope1 client scope
+ community.general.keycloak_clientscope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: scope1
+ description: "test 1"
+ protocol: openid-connect
+
+- name: Create a scope2 client scope
+ community.general.keycloak_clientscope:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: scope2
+ description: "test 2"
+ protocol: openid-connect
+
+### Tests
+### Realm
+- name: adjust client-scope types in realm
+ community.general.keycloak_clientscope_type:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ default_clientscopes: ['scope1', 'scope2']
+ optional_clientscopes: []
+ register: result
+
+- name: Assert that client scope types are set
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - '"scope1" in result.end_state.default_clientscopes'
+ - '"scope2" in result.end_state.default_clientscopes'
+ - result.end_state.default_clientscopes|length == 2
+ - result.end_state.optional_clientscopes|length == 0
+
+- name: adjust client-scope types in realm again
+ community.general.keycloak_clientscope_type:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ default_clientscopes: ['scope1', 'scope2']
+ optional_clientscopes: []
+ register: result
+ failed_when: result is changed
+
+- name: adjust client-scope types in realm move scope 2 to optional
+ community.general.keycloak_clientscope_type:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ default_clientscopes: ['scope1']
+ optional_clientscopes: ['scope2']
+ register: result
+
+- name: Assert that client scope types are set
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - '"scope1" in result.end_state.default_clientscopes'
+ - '"scope2" in result.end_state.optional_clientscopes'
+ - result.end_state.default_clientscopes|length == 1
+ - result.end_state.optional_clientscopes|length == 1
+
+### Client
+- name: adjust client-scope types in client
+ community.general.keycloak_clientscope_type:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ default_clientscopes: ['scope1', 'scope2']
+ optional_clientscopes: []
+ register: result
+
+- name: Assert that client scope types are set
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - '"scope1" in result.end_state.default_clientscopes'
+ - '"scope2" in result.end_state.default_clientscopes'
+ - result.end_state.default_clientscopes|length == 2
+ - result.end_state.optional_clientscopes|length == 0
+
+- name: adjust client-scope types in client again
+ community.general.keycloak_clientscope_type:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ default_clientscopes: ['scope1', 'scope2']
+ optional_clientscopes: []
+ register: result
+ failed_when: result is changed
+
+- name: adjust client-scope types in client move scope 2 to optional
+ community.general.keycloak_clientscope_type:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ default_clientscopes: ['scope1']
+ optional_clientscopes: ['scope2']
+ register: result
+
+- name: Assert that client scope types are set
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - '"scope1" in result.end_state.default_clientscopes'
+ - '"scope2" in result.end_state.optional_clientscopes'
+ - result.end_state.default_clientscopes|length == 1
+ - result.end_state.optional_clientscopes|length == 1
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/vars/main.yml
new file mode 100644
index 000000000..7efd2b04e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientscope_type/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: clientscope-type-realm
+client_id: clientscope-type-client
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/README.md b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/README.md
new file mode 100644
index 000000000..fb721801d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/README.md
@@ -0,0 +1,17 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+The integration test can be performed as follows:
+
+```
+# 1. Start docker-compose:
+docker-compose -f tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml stop
+docker-compose -f tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml rm -f -v
+docker-compose -f tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml up -d
+
+# 2. Run the integration tests:
+ansible-test integration keycloak_clientsecret_info --allow-unsupported -v
+```
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml
new file mode 100644
index 000000000..5e14e9aac
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/docker-compose.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3.4'
+
+services:
+ postgres:
+ image: postgres:9.6
+ restart: always
+ environment:
+ POSTGRES_USER: postgres
+ POSTGRES_DB: postgres
+ POSTGRES_PASSWORD: postgres
+
+ keycloak:
+ image: jboss/keycloak:12.0.4
+ ports:
+ - 8080:8080
+
+ environment:
+ DB_VENDOR: postgres
+ DB_ADDR: postgres
+ DB_DATABASE: postgres
+ DB_USER: postgres
+ DB_SCHEMA: public
+ DB_PASSWORD: postgres
+
+ KEYCLOAK_USER: admin
+ KEYCLOAK_PASSWORD: password
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml
new file mode 100644
index 000000000..a0cacf188
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/tasks/main.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create realm
+ community.general.keycloak_realm: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Keycloak Client
+ community.general.keycloak_client: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ register: client
+
+- name: Keycloak Client fetch clientsecret by client_id
+ community.general.keycloak_clientsecret_info: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ register: fetch_by_client_id_result
+
+- name: Assert that the client secret was retrieved
+ assert:
+ that:
+ - fetch_by_client_id_result.clientsecret_info.type == "secret"
+ - "{{ fetch_by_client_id_result.clientsecret_info.value | length }} >= 32"
+
+- name: Keycloak Client fetch clientsecret by id
+ community.general.keycloak_clientsecret_info: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ id: "{{ client.end_state.id }}"
+ register: fetch_by_id_result
+
+- name: Assert that the same client secret was retrieved both times
+ assert:
+ that:
+ - fetch_by_id_result.clientsecret_info.value == fetch_by_client_id_result.clientsecret_info.value
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/vars/main.yml
new file mode 100644
index 000000000..8c913705f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_info/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+client_id: myclient
+role: myrole
+description_1: desc 1
+description_2: desc 2
+
+auth_args:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/README.md b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/README.md
new file mode 100644
index 000000000..08251b4c5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/README.md
@@ -0,0 +1,17 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+The integration test can be performed as follows:
+
+```
+# 1. Start docker-compose:
+docker-compose -f tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml stop
+docker-compose -f tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml rm -f -v
+docker-compose -f tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml up -d
+
+# 2. Run the integration tests:
+ansible-test integration keycloak_clientsecret_regenerate --allow-unsupported -v
+```
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml
new file mode 100644
index 000000000..5e14e9aac
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/docker-compose.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3.4'
+
+services:
+ postgres:
+ image: postgres:9.6
+ restart: always
+ environment:
+ POSTGRES_USER: postgres
+ POSTGRES_DB: postgres
+ POSTGRES_PASSWORD: postgres
+
+ keycloak:
+ image: jboss/keycloak:12.0.4
+ ports:
+ - 8080:8080
+
+ environment:
+ DB_VENDOR: postgres
+ DB_ADDR: postgres
+ DB_DATABASE: postgres
+ DB_USER: postgres
+ DB_SCHEMA: public
+ DB_PASSWORD: postgres
+
+ KEYCLOAK_USER: admin
+ KEYCLOAK_PASSWORD: password
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml
new file mode 100644
index 000000000..9bd52698a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create realm
+ community.general.keycloak_realm: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Keycloak Client
+ community.general.keycloak_client: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ register: client
+
+- name: Keycloak Client regenerate clientsecret by client_id
+ community.general.keycloak_clientsecret_regenerate: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ register: regenerate_by_client_id
+
+- name: Assert that the client secret was retrieved
+ assert:
+ that:
+ - regenerate_by_client_id.end_state.type == "secret"
+ - "{{ regenerate_by_client_id.end_state.value | length }} >= 32"
+
+- name: Keycloak Client regenerate clientsecret by id
+ community.general.keycloak_clientsecret_regenerate: "{{ auth_args | combine(call_args) }}"
+ vars:
+ call_args:
+ realm: "{{ realm }}"
+ id: "{{ client.end_state.id }}"
+ register: regenerate_by_id
+
+- name: Assert that client secret was regenerated
+ assert:
+ that:
+ - "{{ regenerate_by_id.end_state.value | length }} >= 32"
+ - regenerate_by_id.end_state.value != regenerate_by_client_id.end_state.value
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/vars/main.yml
new file mode 100644
index 000000000..8c913705f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_clientsecret_regenerate/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+client_id: myclient
+role: myrole
+description_1: desc 1
+description_2: desc 2
+
+auth_args:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_group/aliases b/ansible_collections/community/general/tests/integration/targets/keycloak_group/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_group/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_group/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_group/meta/main.yml
new file mode 100644
index 000000000..5769ff1cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_group/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_group/readme.adoc b/ansible_collections/community/general/tests/integration/targets/keycloak_group/readme.adoc
new file mode 100644
index 000000000..1941e54ef
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_group/readme.adoc
@@ -0,0 +1,27 @@
+// Copyright (c) Ansible Project
+// GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+To be able to run these integration tests a keycloak server must be
+reachable under a specific url with a specific admin user and password.
+The exact values expected for these parameters can be found in
+'vars/main.yml' file. A simple way to do this is to use the official
+keycloak docker images like this:
+
+----
+docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH=<url-path> -e KEYCLOAK_ADMIN=<admin_user> -e KEYCLOAK_ADMIN_PASSWORD=<admin_password> quay.io/keycloak/keycloak:20.0.2 start-dev
+----
+
+Example with concrete values inserted:
+
+----
+docker run --name mykeycloak -p 8080:8080 -e KC_HTTP_RELATIVE_PATH=/auth -e KEYCLOAK_ADMIN=admin -e KEYCLOAK_ADMIN_PASSWORD=password quay.io/keycloak/keycloak:20.0.2 start-dev
+----
+
+This test suite can run against a fresh unconfigured server instance
+(no preconfiguration required) and cleans up after itself (undoes all
+its config changes) as long as it runs through completly. While its active
+it changes the server configuration in the following ways:
+
+ * creating, modifying and deleting some keycloak groups
+
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_group/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_group/tasks/main.yml
new file mode 100644
index 000000000..8b115e3a2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_group/tasks/main.yml
@@ -0,0 +1,527 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Start container
+ community.docker.docker_container:
+ name: mykeycloak
+ image: "quay.io/keycloak/keycloak:20.0.2"
+ command: start-dev
+ env:
+ KC_HTTP_RELATIVE_PATH: /auth
+ KEYCLOAK_ADMIN: admin
+ KEYCLOAK_ADMIN_PASSWORD: password
+ ports:
+ - "8080:8080"
+ detach: true
+ auto_remove: true
+ memory: 2200M
+
+- name: Check default ports
+ ansible.builtin.wait_for:
+ host: "localhost"
+ port: "8080"
+ state: started # Port should be open
+ delay: 30 # Wait before first check
+ timeout: 50 # Stop checking after timeout (sec)
+
+- name: Create a keycloak group
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: test-group
+ state: present
+ register: result
+ retries: 3
+ delay: 20
+ until: result is not failed
+
+- name: Assert group was created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "test-group"
+ - result.end_state.path == "/test-group"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- set_fact:
+ test_group_id: "{{ result.end_state.id }}"
+
+- name: Group creation rerun (test for idempotency)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: test-group
+ state: present
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state != {}
+ - result.end_state.name == "test-group"
+ - result.end_state.path == "/test-group"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: Update the name of a keycloak group
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: "{{ test_group_id }}"
+ name: new-test-group
+ state: present
+ register: result
+
+- name: Assert that group name was updated
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "new-test-group"
+ - result.end_state.path == "/new-test-group"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: Delete a keycloak group by id
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: "{{ test_group_id }}"
+ state: absent
+ register: result
+
+- name: Assert that group was deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Redo group deletion (check for idempotency)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: "{{ test_group_id }}"
+ state: absent
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
+
+- name: Create a keycloak group with some custom attributes
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: my-new_group
+ attributes:
+ attrib1: value1
+ attrib2: value2
+ attrib3:
+ - item1
+ - item2
+ register: result
+
+- name: Assert that group was correctly created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "my-new_group"
+ - result.end_state.path == "/my-new_group"
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+ - result.end_state.attributes != {}
+ - result.end_state.attributes.attrib1 == ["value1"]
+ - result.end_state.attributes.attrib2 == ["value2"]
+ - result.end_state.attributes.attrib3 == ["item1", "item2"]
+
+- name: Delete a keycloak group based on name
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: my-new_group
+ state: absent
+ register: result
+
+- name: Assert that group was deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+## subgroup tests
+## we already testet this so no asserts for this
+- name: Create a new base group for subgroup testing (test setup)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: rootgrp
+ register: subgrp_basegrp_result
+
+- name: Create a subgroup using parent id
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subgrp1
+ parents:
+ - id: "{{ subgrp_basegrp_result.end_state.id }}"
+ register: result
+
+- name: Assert that subgroup was correctly created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "subgrp1"
+ - result.end_state.path == "/rootgrp/subgrp1"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: Recreate a subgroup using parent id (test idempotency)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subgrp1
+ parents:
+ - id: "{{ subgrp_basegrp_result.end_state.id }}"
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state != {}
+ - result.end_state.name == "subgrp1"
+ - result.end_state.path == "/rootgrp/subgrp1"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: Changing name of existing group
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: "{{ result.end_state.id }}"
+ name: new-subgrp1
+ parents:
+ - id: "{{ subgrp_basegrp_result.end_state.id }}"
+ register: result
+
+- name: Assert that subgroup name has changed correctly
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "new-subgrp1"
+ - result.end_state.path == "/rootgrp/new-subgrp1"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: Create a subgroup using parent name
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subgrp2
+ parents:
+ - name: rootgrp
+ register: result
+
+- name: Assert that subgroup was correctly created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "subgrp2"
+ - result.end_state.path == "/rootgrp/subgrp2"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: Recreate a subgroup using parent name (test idempotency)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subgrp2
+ parents:
+ - name: rootgrp
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state != {}
+ - result.end_state.name == "subgrp2"
+ - result.end_state.path == "/rootgrp/subgrp2"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+## subgroup of subgroup tests
+- name: Create a subgroup of a subgroup using parent names (complete parent chain)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subsubgrp
+ parents:
+ - name: rootgrp
+ - name: subgrp2
+ register: result
+
+- name: Assert subgroup of subgroup was created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "subsubgrp"
+ - result.end_state.path == "/rootgrp/subgrp2/subsubgrp"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: ReCreate a subgroup of a subgroup using parent names (test idempotency)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subsubgrp
+ parents:
+ - name: rootgrp
+ - name: subgrp2
+ register: result_subsubgrp
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result_subsubgrp is not changed
+ - result_subsubgrp.end_state != {}
+ - result_subsubgrp.end_state.name == "subsubgrp"
+ - result_subsubgrp.end_state.path == "/rootgrp/subgrp2/subsubgrp"
+ - result_subsubgrp.end_state.attributes == {}
+ - result_subsubgrp.end_state.clientRoles == {}
+ - result_subsubgrp.end_state.realmRoles == []
+ - result_subsubgrp.end_state.subGroups == []
+
+- name: Create a subgroup of a subgroup using direct parent id (incomplete parent chain)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subsubsubgrp
+ parents:
+ - id: "{{ result_subsubgrp.end_state.id }}"
+ register: result
+
+- name: Assert subgroup of subgroup was created
+ assert:
+ that:
+ - result is changed
+ - result.end_state != {}
+ - result.end_state.name == "subsubsubgrp"
+ - result.end_state.path == "/rootgrp/subgrp2/subsubgrp/subsubsubgrp"
+ - result.end_state.attributes == {}
+ - result.end_state.clientRoles == {}
+ - result.end_state.realmRoles == []
+ - result.end_state.subGroups == []
+
+- name: ReCreate a subgroup of a subgroup using direct parent id (test idempotency)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: subsubsubgrp
+ parents:
+ - id: "{{ result_subsubgrp.end_state.id }}"
+ register: result_subsubsubgrp
+
+- name: Assert that nothing changed
+ assert:
+ that:
+ - result_subsubsubgrp is not changed
+ - result_subsubsubgrp.end_state != {}
+ - result_subsubsubgrp.end_state.name == "subsubsubgrp"
+ - result_subsubsubgrp.end_state.path == "/rootgrp/subgrp2/subsubgrp/subsubsubgrp"
+ - result_subsubsubgrp.end_state.attributes == {}
+ - result_subsubsubgrp.end_state.clientRoles == {}
+ - result_subsubsubgrp.end_state.realmRoles == []
+ - result_subsubsubgrp.end_state.subGroups == []
+
+## subgroup deletion tests
+## note: in principle we already have tested group deletion in general
+## enough already, but what makes it interesting here again is to
+## see it works also properly for subgroups and groups with subgroups
+- name: Deleting a subgroup by id (no parents needed)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: "{{ result_subsubsubgrp.end_state.id }}"
+ state: absent
+ register: result
+
+- name: Assert that subgroup was deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Redo subgroup deletion (idempotency test)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ id: "{{ result_subsubsubgrp.end_state.id }}"
+ state: absent
+ register: result
+
+- name: Assert that nothing changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
+
+- name: Deleting a subgroup by name
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: new-subgrp1
+ parents:
+ - name: rootgrp
+ state: absent
+ register: result
+
+- name: Assert that subgroup was deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Redo deleting a subgroup by name (idempotency test)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: new-subgrp1
+ parents:
+ - name: rootgrp
+ state: absent
+ register: result
+
+- name: Assert that nothing has changed
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
+
+- name: Delete keycloak group which has subgroups
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: rootgrp
+ state: absent
+ register: result
+
+- name: Assert that group was deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Redo delete keycloak group which has subgroups (idempotency test)
+ community.general.keycloak_group:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: rootgrp
+ state: absent
+ register: result
+
+- name: Assert that group was deleted
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_group/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_group/vars/main.yml
new file mode 100644
index 000000000..e8aeb4f3f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_group/vars/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: master
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/aliases b/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/tasks/main.yml
new file mode 100644
index 000000000..79ba33049
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/tasks/main.yml
@@ -0,0 +1,175 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create realm
+ community.general.keycloak_realm:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Create new identity provider
+ community.general.keycloak_identity_provider:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ alias: "{{ idp }}"
+ display_name: OpenID Connect IdP
+ enabled: true
+ provider_id: oidc
+ config:
+ issuer: https://idp.example.com
+ authorizationUrl: https://idp.example.com/auth
+ tokenUrl: https://idp.example.com/token
+ userInfoUrl: https://idp.example.com/userinfo
+ clientAuthMethod: client_secret_post
+ clientId: clientid
+ clientSecret: clientsecret
+ syncMode: FORCE
+ mappers:
+ - name: "first_name"
+ identityProviderAlias: "oidc-idp"
+ identityProviderMapper: "oidc-user-attribute-idp-mapper"
+ config:
+ claim: "first_name"
+ user.attribute: "first_name"
+ syncMode: "INHERIT"
+ - name: "last_name"
+ identityProviderAlias: "oidc-idp"
+ identityProviderMapper: "oidc-user-attribute-idp-mapper"
+ config:
+ claim: "last_name"
+ user.attribute: "last_name"
+ syncMode: "INHERIT"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert identity provider created
+ assert:
+ that:
+ - result is changed
+ - result.existing == {}
+ - result.end_state.alias == "{{ idp }}"
+ - result.end_state.mappers != []
+
+- name: Update existing identity provider (no change)
+ community.general.keycloak_identity_provider:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ alias: "{{ idp }}"
+ enabled: true
+ provider_id: oidc
+ config:
+ issuer: https://idp.example.com
+ authorizationUrl: https://idp.example.com/auth
+ tokenUrl: https://idp.example.com/token
+ userInfoUrl: https://idp.example.com/userinfo
+ clientAuthMethod: client_secret_post
+ clientId: clientid
+ clientSecret: "**********"
+ syncMode: FORCE
+ mappers:
+ - name: "first_name"
+ identityProviderAlias: "oidc-idp"
+ identityProviderMapper: "oidc-user-attribute-idp-mapper"
+ config:
+ claim: "first_name"
+ user.attribute: "first_name"
+ syncMode: "INHERIT"
+ - name: "last_name"
+ identityProviderAlias: "oidc-idp"
+ identityProviderMapper: "oidc-user-attribute-idp-mapper"
+ config:
+ claim: "last_name"
+ user.attribute: "last_name"
+ syncMode: "INHERIT"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert identity provider unchanged
+ assert:
+ that:
+ - result is not changed
+
+- name: Update existing identity provider (with change)
+ community.general.keycloak_identity_provider:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ alias: "{{ idp }}"
+ enabled: false
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert identity provider updated
+ assert:
+ that:
+ - result is changed
+ - result.existing.enabled == true
+ - result.end_state.enabled == false
+
+- name: Delete existing identity provider
+ community.general.keycloak_identity_provider:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ alias: "{{ idp }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert identity provider deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Delete absent identity provider
+ community.general.keycloak_identity_provider:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ alias: "{{ idp }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert identity provider unchanged
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/vars/main.yml
new file mode 100644
index 000000000..6d2078ca0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_identity_provider/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+idp: myidp
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_role/aliases b/ansible_collections/community/general/tests/integration/targets/keycloak_role/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_role/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_role/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_role/tasks/main.yml
new file mode 100644
index 000000000..61b62629a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_role/tasks/main.yml
@@ -0,0 +1,250 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create realm
+ community.general.keycloak_realm:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Create client
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ state: present
+ register: client
+
+- name: Create new realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ description: "{{ description_1 }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert realm role created
+ assert:
+ that:
+ - result is changed
+ - result.existing == {}
+ - result.end_state.name == "{{ role }}"
+ - result.end_state.containerId == "{{ realm }}"
+
+- name: Create existing realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ description: "{{ description_1 }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert realm role unchanged
+ assert:
+ that:
+ - result is not changed
+
+- name: Update realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ description: "{{ description_2 }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert realm role updated
+ assert:
+ that:
+ - result is changed
+ - result.existing.description == "{{ description_1 }}"
+ - result.end_state.description == "{{ description_2 }}"
+
+- name: Delete existing realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert realm role deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Delete absent realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert realm role unchanged
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
+
+- name: Create new client role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ name: "{{ role }}"
+ description: "{{ description_1 }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert client role created
+ assert:
+ that:
+ - result is changed
+ - result.existing == {}
+ - result.end_state.name == "{{ role }}"
+ - result.end_state.containerId == "{{ client.end_state.id }}"
+
+- name: Create existing client role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ name: "{{ role }}"
+ description: "{{ description_1 }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert client role unchanged
+ assert:
+ that:
+ - result is not changed
+
+- name: Update client role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ name: "{{ role }}"
+ description: "{{ description_2 }}"
+ state: present
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert client role updated
+ assert:
+ that:
+ - result is changed
+ - result.existing.description == "{{ description_1 }}"
+ - result.end_state.description == "{{ description_2 }}"
+
+- name: Delete existing client role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ name: "{{ role }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert client role deleted
+ assert:
+ that:
+ - result is changed
+ - result.end_state == {}
+
+- name: Delete absent client role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ name: "{{ role }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert client role unchanged
+ assert:
+ that:
+ - result is not changed
+ - result.end_state == {}
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_role/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_role/vars/main.yml
new file mode 100644
index 000000000..b003311e0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_role/vars/main.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+client_id: myclient
+role: myrole
+description_1: desc 1
+description_2: desc 2
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/aliases b/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/tasks/main.yml
new file mode 100644
index 000000000..ae0b4bf16
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/tasks/main.yml
@@ -0,0 +1,425 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create realm
+ community.general.keycloak_realm:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Create new user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ enabled: true
+ priority: 0
+ fullSyncPeriod: -1
+ changedSyncPeriod: -1
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: "inetOrgPerson, organizationalPerson"
+ connectionUrl: "ldaps://ldap.example.com:636"
+ usersDn: "ou=Users,dc=example,dc=com"
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: secret
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: "ldapsOnly"
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ useKerberosForPasswordAuthentication: false
+ debug: false
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation created
+ assert:
+ that:
+ - result is changed
+ - result.existing == {}
+ - result.end_state.name == "{{ federation }}"
+
+- name: Create new user federation in admin realm
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ admin_realm }}"
+ name: "{{ federation }}"
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ enabled: true
+ priority: 0
+ fullSyncPeriod: -1
+ changedSyncPeriod: -1
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: "inetOrgPerson, organizationalPerson"
+ connectionUrl: "ldaps://ldap.example.com:636"
+ usersDn: "ou=Users,dc=example,dc=com"
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: secret
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: "ldapsOnly"
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ useKerberosForPasswordAuthentication: false
+ debug: false
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation created (admin realm)
+ assert:
+ that:
+ - result is changed
+ - result.existing == {}
+ - result.end_state.name == "{{ federation }}"
+
+- name: Update existing user federation (no change)
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ enabled: true
+ priority: 0
+ fullSyncPeriod: -1
+ changedSyncPeriod: -1
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: "inetOrgPerson, organizationalPerson"
+ connectionUrl: "ldaps://ldap.example.com:636"
+ usersDn: "ou=Users,dc=example,dc=com"
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: "**********"
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: "ldapsOnly"
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ useKerberosForPasswordAuthentication: false
+ debug: false
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation unchanged
+ assert:
+ that:
+ - result is not changed
+ - result.existing != {}
+ - result.existing.name == "{{ federation }}"
+ - result.end_state != {}
+ - result.end_state.name == "{{ federation }}"
+
+- name: Update existing user federation (no change, admin realm)
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ admin_realm }}"
+ name: "{{ federation }}"
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ enabled: true
+ priority: 0
+ fullSyncPeriod: -1
+ changedSyncPeriod: -1
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: "inetOrgPerson, organizationalPerson"
+ connectionUrl: "ldaps://ldap.example.com:636"
+ usersDn: "ou=Users,dc=example,dc=com"
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: "**********"
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: "ldapsOnly"
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ useKerberosForPasswordAuthentication: false
+ debug: false
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation unchanged (admin realm)
+ assert:
+ that:
+ - result is not changed
+ - result.existing != {}
+ - result.existing.name == "{{ federation }}"
+ - result.end_state != {}
+ - result.end_state.name == "{{ federation }}"
+
+- name: Update existing user federation (with change)
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ enabled: true
+ priority: 0
+ fullSyncPeriod: -1
+ changedSyncPeriod: -1
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: "inetOrgPerson, organizationalPerson"
+ connectionUrl: "ldaps://ldap.example.com:636"
+ usersDn: "ou=Users,dc=example,dc=com"
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: "**********"
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: "ldapsOnly"
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ useKerberosForPasswordAuthentication: false
+ debug: false
+ mappers:
+ # overwrite / update pre existing default mapper
+ - name: "username"
+ providerId: "user-attribute-ldap-mapper"
+ config:
+ ldap.attribute: ldap_user
+ user.model.attribute: usr
+ read.only: true
+ # create new mapper
+ - name: "full name"
+ providerId: "full-name-ldap-mapper"
+ providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ config:
+ ldap.full.name.attribute: cn
+ read.only: true
+ write.only: false
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation created
+ assert:
+ that:
+ - result is changed
+ - result.existing != {}
+ - result.existing.name == "{{ federation }}"
+ - result.end_state != {}
+ - result.end_state.name == "{{ federation }}"
+
+- name: Delete existing user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation deleted
+ assert:
+ that:
+ - result is changed
+ - result.existing != {}
+ - result.end_state == {}
+
+- name: Delete absent user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: absent
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation unchanged
+ assert:
+ that:
+ - result is not changed
+ - result.existing == {}
+ - result.end_state == {}
+
+- name: Create new user federation together with mappers
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: present
+ provider_id: ldap
+ provider_type: org.keycloak.storage.UserStorageProvider
+ config:
+ enabled: true
+ priority: 0
+ fullSyncPeriod: -1
+ changedSyncPeriod: -1
+ cachePolicy: DEFAULT
+ batchSizeForSync: 1000
+ editMode: READ_ONLY
+ importEnabled: true
+ syncRegistrations: false
+ vendor: other
+ usernameLDAPAttribute: uid
+ rdnLDAPAttribute: uid
+ uuidLDAPAttribute: entryUUID
+ userObjectClasses: "inetOrgPerson, organizationalPerson"
+ connectionUrl: "ldaps://ldap.example.com:636"
+ usersDn: "ou=Users,dc=example,dc=com"
+ authType: simple
+ bindDn: cn=directory reader
+ bindCredential: secret
+ searchScope: 1
+ validatePasswordPolicy: false
+ trustEmail: false
+ useTruststoreSpi: "ldapsOnly"
+ connectionPooling: true
+ pagination: true
+ allowKerberosAuthentication: false
+ useKerberosForPasswordAuthentication: false
+ debug: false
+ mappers:
+ # overwrite / update pre existing default mapper
+ - name: "username"
+ providerId: "user-attribute-ldap-mapper"
+ config:
+ ldap.attribute: ldap_user
+ user.model.attribute: usr
+ read.only: true
+ # create new mapper
+ - name: "full name"
+ providerId: "full-name-ldap-mapper"
+ providerType: "org.keycloak.storage.ldap.mappers.LDAPStorageMapper"
+ config:
+ ldap.full.name.attribute: cn
+ read.only: true
+ write.only: false
+ register: result
+
+- name: Debug
+ debug:
+ var: result
+
+- name: Assert user federation created
+ assert:
+ that:
+ - result is changed
+ - result.existing == {}
+ - result.end_state.name == "{{ federation }}"
+
+## no point in retesting this, just doing it to clean up introduced server changes
+- name: Delete absent user federation
+ community.general.keycloak_user_federation:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ federation }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/vars/main.yml
new file mode 100644
index 000000000..acf73e2ca
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_user_federation/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+federation: myfed
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/aliases b/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/aliases
new file mode 100644
index 000000000..cdeae1417
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/aliases
@@ -0,0 +1,4 @@
+# Copyright (c) 2022, Dušan Marković (@bratwurzt)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml
new file mode 100644
index 000000000..1a897ad9a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/tasks/main.yml
@@ -0,0 +1,143 @@
+# Copyright (c) 2022, Dušan Marković (@bratwurzt)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create realm
+ community.general.keycloak_realm:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ id: "{{ realm }}"
+ realm: "{{ realm }}"
+ state: present
+
+- name: Create client
+ community.general.keycloak_client:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ service_accounts_enabled: true
+ state: present
+ register: client
+
+- name: Create new realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ description: "{{ description_1 }}"
+ state: present
+
+- name: Map a realm role to client service account
+ vars:
+ - roles: [ {'name': '{{ role }}'} ]
+ community.general.keycloak_user_rolemapping:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ service_account_user_client_id: "{{ client_id }}"
+ roles: "{{ roles }}"
+ state: present
+ register: result
+
+- name: Assert realm role is assigned
+ assert:
+ that:
+ - result is changed
+ - result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count > 0
+
+- name: Unmap a realm role from client service account
+ vars:
+ - roles: [ {'name': '{{ role }}'} ]
+ community.general.keycloak_user_rolemapping:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ service_account_user_client_id: "{{ client_id }}"
+ roles: "{{ roles }}"
+ state: absent
+ register: result
+
+- name: Assert realm role is unassigned
+ assert:
+ that:
+ - result is changed
+ - (result.end_state | length) == (result.existing | length) - 1
+ - result.existing | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count > 0
+ - result.end_state | selectattr("clientRole", "eq", false) | selectattr("name", "eq", "{{role}}") | list | count == 0
+
+- name: Delete existing realm role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ name: "{{ role }}"
+ state: absent
+
+- name: Create new client role
+ community.general.keycloak_role:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ name: "{{ role }}"
+ description: "{{ description_1 }}"
+ state: present
+
+- name: Map a client role to client service account
+ vars:
+ - roles: [ {'name': '{{ role }}'} ]
+ community.general.keycloak_user_rolemapping:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ service_account_user_client_id: "{{ client_id }}"
+ roles: "{{ roles }}"
+ state: present
+ register: result
+
+- name: Assert client role is assigned
+ assert:
+ that:
+ - result is changed
+ - result.end_state | selectattr("clientRole", "eq", true) | selectattr("name", "eq", "{{role}}") | list | count > 0
+
+- name: Unmap a client role from client service account
+ vars:
+ - roles: [ {'name': '{{ role }}'} ]
+ community.general.keycloak_user_rolemapping:
+ auth_keycloak_url: "{{ url }}"
+ auth_realm: "{{ admin_realm }}"
+ auth_username: "{{ admin_user }}"
+ auth_password: "{{ admin_password }}"
+ realm: "{{ realm }}"
+ client_id: "{{ client_id }}"
+ service_account_user_client_id: "{{ client_id }}"
+ roles: "{{ roles }}"
+ state: absent
+ register: result
+
+- name: Assert client role is unassigned
+ assert:
+ that:
+ - result is changed
+ - result.end_state == []
+ - result.existing | selectattr("clientRole", "eq", true) | selectattr("name", "eq", "{{role}}") | list | count > 0
diff --git a/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/vars/main.yml
new file mode 100644
index 000000000..385dbea44
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keycloak_user_rolemapping/vars/main.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) 2022, Dušan Marković (@bratwurzt)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+url: http://localhost:8080/auth
+admin_realm: master
+admin_user: admin
+admin_password: password
+realm: myrealm
+client_id: myclient
+role: myrole
+description_1: desc 1
+description_2: desc 2
diff --git a/ansible_collections/community/general/tests/integration/targets/keyring/aliases b/ansible_collections/community/general/tests/integration/targets/keyring/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keyring/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/keyring/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/keyring/tasks/main.yml
new file mode 100644
index 000000000..3833018e8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keyring/tasks/main.yml
@@ -0,0 +1,99 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Ensure required packages for headless keyring access are installed (RPM)
+ ansible.builtin.package:
+ name: gnome-keyring
+ become: true
+ when: "'localhost' not in inventory_hostname"
+
+- name: Ensure keyring is installed (RPM)
+ ansible.builtin.dnf:
+ name: python3-keyring
+ state: present
+ become: true
+ when: ansible_facts['os_family'] == 'RedHat'
+
+- name: Ensure keyring is installed (pip)
+ ansible.builtin.pip:
+ name: keyring
+ state: present
+ become: true
+ when: ansible_facts['os_family'] != 'RedHat'
+
+# Set password for new account
+# Expected result: success
+- name: Set password for test/test1
+ community.general.keyring:
+ service: test
+ username: test1
+ user_password: "{{ user_password }}"
+ keyring_password: "{{ keyring_password }}"
+ register: set_password
+
+- name: Assert that the password has been set
+ ansible.builtin.assert:
+ that:
+ - set_password.msg == "Passphrase has been updated for test@test1"
+
+# Print out password to confirm it has been set
+# Expected result: success
+- name: Retrieve password for test/test1
+ community.general.keyring_info:
+ service: test
+ username: test1
+ keyring_password: "{{ keyring_password }}"
+ register: test_set_password
+
+- name: Assert that the password exists
+ ansible.builtin.assert:
+ that:
+ - test_set_password.passphrase == user_password
+
+# Attempt to set password again
+# Expected result: success - nothing should happen
+- name: Attempt to re-set password for test/test1
+ community.general.keyring:
+ service: test
+ username: test1
+ user_password: "{{ user_password }}"
+ keyring_password: "{{ keyring_password }}"
+ register: second_set_password
+
+- name: Assert that the password has not been changed
+ ansible.builtin.assert:
+ that:
+ - second_set_password.msg == "Passphrase already set for test@test1"
+
+# Delete account
+# Expected result: success
+- name: Delete password for test/test1
+ community.general.keyring:
+ service: test
+ username: test1
+ user_password: "{{ user_password }}"
+ keyring_password: "{{ keyring_password }}"
+ state: absent
+ register: del_password
+
+- name: Assert that the password has been deleted
+ ansible.builtin.assert:
+ that:
+ - del_password.msg == "Passphrase has been removed for test@test1"
+
+# Attempt to get deleted account (to confirm it has been deleted).
+# Don't use `no_log` as run completes due to failed task.
+# Expected result: fail
+- name: Retrieve password for test/test1
+ community.general.keyring_info:
+ service: test
+ username: test1
+ keyring_password: "{{ keyring_password }}"
+ register: test_del_password
+
+- name: Assert that the password no longer exists
+ ansible.builtin.assert:
+ that:
+ - test_del_password.passphrase is not defined
diff --git a/ansible_collections/community/general/tests/integration/targets/keyring/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/keyring/vars/main.yml
new file mode 100644
index 000000000..b4997b6d3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/keyring/vars/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keyring_password: Password123
+user_password: Test123
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/aliases b/ansible_collections/community/general/tests/integration/targets/launchd/aliases
new file mode 100644
index 000000000..a35088696
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/freebsd
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py b/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py
new file mode 100644
index 000000000..31de6c586
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/files/ansible_test_service.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import sys
+
+if __name__ == '__main__':
+ if sys.version_info[0] >= 3:
+ import http.server
+ import socketserver
+ PORT = int(sys.argv[1])
+ Handler = http.server.SimpleHTTPRequestHandler
+ httpd = socketserver.TCPServer(("", PORT), Handler)
+ httpd.serve_forever()
+ else:
+ import mimetypes
+ mimetypes.init()
+ mimetypes.add_type('application/json', '.json')
+ import SimpleHTTPServer
+ SimpleHTTPServer.test()
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml
new file mode 100644
index 000000000..8f5b14a59
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test launchd module
+ block:
+ - name: Expect that launchctl exists
+ stat:
+ path: /bin/launchctl
+ register: launchctl_check
+ failed_when:
+ - not launchctl_check.stat.exists
+
+ - name: Run tests
+ include_tasks: test.yml
+ with_items:
+ - test_unknown
+ - test_start_stop
+ - test_restart
+ - test_unload
+ - test_reload
+ - test_runatload
+
+ when: ansible_os_family == 'Darwin'
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml
new file mode 100644
index 000000000..bd7134cc0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/setup.yml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "[{{ item }}] Deploy test service configuration"
+ template:
+ src: "{{ launchd_service_name }}.plist.j2"
+ dest: "{{ launchd_plist_location }}"
+ become: true
+
+- name: install the test daemon script
+ copy:
+ src: ansible_test_service.py
+ dest: /usr/local/sbin/ansible_test_service
+ mode: '755'
+
+- name: rewrite shebang in the test daemon script
+ lineinfile:
+ path: /usr/local/sbin/ansible_test_service
+ line: "#!{{ ansible_python_interpreter | realpath }}"
+ insertbefore: BOF
+ firstmatch: true
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml
new file mode 100644
index 000000000..e364056e6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/teardown.yml
@@ -0,0 +1,30 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "[{{ item }}] Unload service"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: true
+ register: launchd_unloaded_result
+
+- name: "[{{ item }}] Validation"
+ assert:
+ that:
+ - launchd_unloaded_result is success
+ - launchd_unloaded_result.status.current_state == 'unloaded'
+ - launchd_unloaded_result.status.current_pid == '-'
+
+- name: "[{{ item }}] Remove test service configuration"
+ file:
+ path: "{{ launchd_plist_location }}"
+ state: absent
+ become: true
+
+- name: "[{{ item }}] Remove test service server"
+ file:
+ path: "/usr/local/sbin/ansible_test_service"
+ state: absent
+ become: true
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml
new file mode 100644
index 000000000..25a7bba00
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/test.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Running {{ item }}"
+ block:
+ - include_tasks: setup.yml
+ - include_tasks: "tests/{{ item }}.yml"
+ always:
+ - include_tasks: teardown.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml
new file mode 100644
index 000000000..04dc8ae72
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_reload.yml
@@ -0,0 +1,71 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service in check_mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result_check_mode"
+ check_mode: true
+
+- name: "[{{ item }}] Assert that everything work in check mode"
+ assert:
+ that:
+ - test_1_launchd_start_result_check_mode is success
+ - test_1_launchd_start_result_check_mode is changed
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] The started service should run on port 21212"
+ wait_for:
+ port: 21212
+ delay: 5
+ timeout: 10
+
+- name: "[{{ item }}] Deploy a new test service configuration with a new port 21213"
+ template:
+ src: "modified.{{ launchd_service_name }}.plist.j2"
+ dest: "{{ launchd_plist_location }}"
+ become: true
+
+- name: "[{{ item }}] When reloading the service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: reloaded
+ become: true
+ register: "test_1_launchd_reload_result"
+
+- name: "[{{ item }}] Validate that service was reloaded"
+ assert:
+ that:
+ - test_1_launchd_reload_result is success
+ - test_1_launchd_reload_result is changed
+ - test_1_launchd_reload_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_1_launchd_reload_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_1_launchd_reload_result.status.current_state == 'stopped'
+ - test_1_launchd_reload_result.status.current_pid == '-'
+
+- name: "[{{ item }}] Start the service with the new configuration..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] The started service should run on port 21213"
+ wait_for:
+ port: 21213
+ delay: 5
+ timeout: 10
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml
new file mode 100644
index 000000000..44064cef1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_restart.yml
@@ -0,0 +1,46 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] When restarting the service in check mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: restarted
+ become: true
+ register: "test_1_launchd_restart_result_check_mode"
+ check_mode: true
+
+- name: "[{{ item }}] Validate that service was restarted in check mode"
+ assert:
+ that:
+ - test_1_launchd_restart_result_check_mode is success
+ - test_1_launchd_restart_result_check_mode is changed
+
+- name: "[{{ item }}] When restarting the service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: restarted
+ become: true
+ register: "test_1_launchd_restart_result"
+
+- name: "[{{ item }}] Validate that service was restarted"
+ assert:
+ that:
+ - test_1_launchd_restart_result is success
+ - test_1_launchd_restart_result is changed
+ - test_1_launchd_restart_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_1_launchd_restart_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_1_launchd_restart_result.status.current_state == 'started'
+ - test_1_launchd_restart_result.status.current_pid != '-'
+ - test_1_launchd_restart_result.status.status_code == '0'
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml
new file mode 100644
index 000000000..87c72d532
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_runatload.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service with RunAtLoad set to true..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ enabled: true
+ become: true
+ register: test_1_launchd_start_result
+
+- name: "[{{ item }}] Validate that service was started"
+ assert:
+ that:
+ - test_1_launchd_start_result is success
+ - test_1_launchd_start_result is changed
+ - test_1_launchd_start_result.status.previous_pid == '-'
+ - test_1_launchd_start_result.status.previous_state == 'unloaded'
+ - test_1_launchd_start_result.status.current_state == 'started'
+ - test_1_launchd_start_result.status.current_pid != '-'
+ - test_1_launchd_start_result.status.status_code == '0'
+
+- name: "[{{ item }}] Validate that RunAtLoad is set to true"
+ replace:
+ path: "{{ launchd_plist_location }}"
+ regexp: |
+ \s+<key>RunAtLoad</key>
+ \s+<true/>
+ replace: found_run_at_load
+ check_mode: true
+ register: contents_would_have
+ failed_when: not contents_would_have is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml
new file mode 100644
index 000000000..bf59979aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_start_stop.yml
@@ -0,0 +1,115 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service in check mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result_check_mode"
+ check_mode: true
+
+
+- name: "[{{ item }}] Validate that service was started in check mode"
+ assert:
+ that:
+ - test_1_launchd_start_result_check_mode is success
+ - test_1_launchd_start_result_check_mode is changed
+
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] Validate that service was started"
+ assert:
+ that:
+ - test_1_launchd_start_result is success
+ - test_1_launchd_start_result is changed
+ - test_1_launchd_start_result.status.previous_pid == '-'
+ - test_1_launchd_start_result.status.previous_state == 'unloaded'
+ - test_1_launchd_start_result.status.current_state == 'started'
+ - test_1_launchd_start_result.status.current_pid != '-'
+ - test_1_launchd_start_result.status.status_code == '0'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a stopped service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: stopped
+ become: true
+ register: "test_2_launchd_stop_result"
+
+- name: "[{{ item }}] Validate that service was stopped after it was started"
+ assert:
+ that:
+ - test_2_launchd_stop_result is success
+ - test_2_launchd_stop_result is changed
+ - test_2_launchd_stop_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_2_launchd_stop_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_2_launchd_stop_result.status.current_state == 'stopped'
+ - test_2_launchd_stop_result.status.current_pid == '-'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a stopped service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: stopped
+ become: true
+ register: "test_3_launchd_stop_result"
+
+- name: "[{{ item }}] Validate that service can be stopped after being already stopped"
+ assert:
+ that:
+ - test_3_launchd_stop_result is success
+ - not test_3_launchd_stop_result is changed
+ - test_3_launchd_stop_result.status.previous_pid == '-'
+ - test_3_launchd_stop_result.status.previous_state == 'stopped'
+ - test_3_launchd_stop_result.status.current_state == 'stopped'
+ - test_3_launchd_stop_result.status.current_pid == '-'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_4_launchd_start_result"
+
+- name: "[{{ item }}] Validate that service was started..."
+ assert:
+ that:
+ - test_4_launchd_start_result is success
+ - test_4_launchd_start_result is changed
+ - test_4_launchd_start_result.status.previous_pid == '-'
+ - test_4_launchd_start_result.status.previous_state == 'stopped'
+ - test_4_launchd_start_result.status.current_state == 'started'
+ - test_4_launchd_start_result.status.current_pid != '-'
+ - test_4_launchd_start_result.status.status_code == '0'
+
+- name: "[{{ item }}] And when service is started again..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_5_launchd_start_result"
+
+- name: "[{{ item }}] Validate that service is still in the same state as before"
+ assert:
+ that:
+ - test_5_launchd_start_result is success
+ - not test_5_launchd_start_result is changed
+ - test_5_launchd_start_result.status.previous_pid == test_4_launchd_start_result.status.current_pid
+ - test_5_launchd_start_result.status.previous_state == test_4_launchd_start_result.status.current_state
+ - test_5_launchd_start_result.status.status_code == '0'
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml
new file mode 100644
index 000000000..d18ea5453
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unknown.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Expect that an error occurs when an unknown service is used."
+ launchd:
+ name: com.acme.unknownservice
+ state: started
+ register: result
+ failed_when:
+ - not '"Unable to infer the path of com.acme.unknownservice service plist file and it was not found among active services" in result.msg'
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml
new file mode 100644
index 000000000..8915aac8b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/tasks/tests/test_unload.yml
@@ -0,0 +1,65 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given a started service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: started
+ become: true
+ register: "test_1_launchd_start_result"
+
+
+- name: "[{{ item }}] When unloading the service in check mode"
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: true
+ register: "test_1_launchd_unloaded_result_check_mode"
+ check_mode: true
+
+- name: "[{{ item }}] Validate that service was unloaded in check mode"
+ assert:
+ that:
+ - test_1_launchd_unloaded_result_check_mode is success
+ - test_1_launchd_unloaded_result_check_mode is changed
+
+
+- name: "[{{ item }}] When unloading the service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: true
+ register: "test_1_launchd_unloaded_result"
+
+- name: "[{{ item }}] Validate that service was unloaded"
+ assert:
+ that:
+ - test_1_launchd_unloaded_result is success
+ - test_1_launchd_unloaded_result is changed
+ - test_1_launchd_unloaded_result.status.previous_pid == test_1_launchd_start_result.status.current_pid
+ - test_1_launchd_unloaded_result.status.previous_state == test_1_launchd_start_result.status.current_state
+ - test_1_launchd_unloaded_result.status.current_state == 'unloaded'
+ - test_1_launchd_unloaded_result.status.current_pid == '-'
+
+# -----------------------------------------------------------
+
+- name: "[{{ item }}] Given an unloaded service on an unloaded service..."
+ launchd:
+ name: "{{ launchd_service_name }}"
+ state: unloaded
+ become: true
+ register: "test_2_launchd_unloaded_result"
+
+- name: "[{{ item }}] Validate that service did not change and is still unloaded"
+ assert:
+ that:
+ - test_2_launchd_unloaded_result is success
+ - not test_2_launchd_unloaded_result is changed
+ - test_2_launchd_unloaded_result.status.previous_pid == '-'
+ - test_2_launchd_unloaded_result.status.previous_state == 'unloaded'
+ - test_2_launchd_unloaded_result.status.current_state == 'unloaded'
+ - test_2_launchd_unloaded_result.status.current_pid == '-'
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j2 b/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j2
new file mode 100644
index 000000000..43f43c24f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/templates/launchd.test.service.plist.j2
@@ -0,0 +1,18 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>{{ launchd_service_name }}</string>
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/local/sbin/ansible_test_service</string>
+ <string>21212</string>
+ </array>
+ </dict>
+</plist>
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2 b/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2
new file mode 100644
index 000000000..a41b65562
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/templates/modified.launchd.test.service.plist.j2
@@ -0,0 +1,18 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+ <dict>
+ <key>Label</key>
+ <string>{{ launchd_service_name }}</string>
+ <key>ProgramArguments</key>
+ <array>
+ <string>/usr/local/sbin/ansible_test_service</string>
+ <string>21213</string>
+ </array>
+ </dict>
+</plist>
diff --git a/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml
new file mode 100644
index 000000000..ce880ed9d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/launchd/vars/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+launchd_service_name: launchd.test.service
+launchd_plist_location: /Library/LaunchDaemons/{{ launchd_service_name }}.plist
diff --git a/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases b/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases
new file mode 100644
index 000000000..795844548
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ldap_search/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml
new file mode 100644
index 000000000..d282aa0dc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ldap_search/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_openldap
diff --git a/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml
new file mode 100644
index 000000000..521075b5e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Run LDAP search module tests
+ block:
+ - include_tasks: "{{ item }}"
+ with_fileglob:
+ - 'tests/*.yml'
+ when: ansible_os_family in ['Ubuntu', 'Debian']
diff --git a/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml b/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml
new file mode 100644
index 000000000..36d245d39
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ldap_search/tasks/tests/basic.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug:
+ msg: Running tests/basic.yml
+
+####################################################################
+## Search ##########################################################
+####################################################################
+- name: Test simple search for a user
+ ldap_search:
+ dn: "ou=users,dc=example,dc=com"
+ scope: "onelevel"
+ filter: "(uid=ldaptest)"
+ ignore_errors: true
+ register: output
+
+- name: assert that test LDAP user can be found
+ assert:
+ that:
+ - output is not failed
+ - output.results | length == 1
+ - output.results.0.displayName == "LDAP Test"
diff --git a/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases b/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases
new file mode 100644
index 000000000..620afe071
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml
new file mode 100644
index 000000000..70649f505
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/listen_ports_facts/tasks/main.yml
@@ -0,0 +1,112 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test playbook for the listen_ports_facts module
+# Copyright (c) 2019, Nathan Davison <ndavison85@gmail.com>
+
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install netstat and netcat on deb
+ ansible.builtin.package:
+ name:
+ - net-tools
+ - netcat
+ state: latest
+ when: ansible_os_family == "Debian"
+
+- name: install netstat and netcat on rh < 7
+ ansible.builtin.package:
+ name:
+ - net-tools
+ - nc.x86_64
+ state: latest
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7
+
+- name: install netcat on rh >= 7
+ ansible.builtin.package:
+ name: 'nmap-ncat'
+ state: latest
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7
+
+- name: start UDP server on port 5555
+ command: nc -u -l -p 5555
+ async: 1000
+ poll: 0
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: start UDP server on port 5555
+ command: nc -u -l 5555
+ async: 1000
+ poll: 0
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7
+
+- name: start TCP server on port 5556
+ command: "nc -l -p 5556"
+ async: 1000
+ poll: 0
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: start TCP server on port 5556
+ command: "nc -l 5556"
+ async: 1000
+ poll: 0
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7
+
+- name: Gather listening ports facts
+ listen_ports_facts:
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check that the include_non_listening parameters ('state' and 'foreign_address') are not active in default setting
+ assert:
+ that:
+ - ansible_facts.tcp_listen | selectattr('state', 'defined') | list | length == 0
+ - ansible_facts.tcp_listen | selectattr('foreign_address', 'defined') | list | length == 0
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: Gather listening ports facts explicitly via netstat and include_non_listening
+ listen_ports_facts:
+ command: 'netstat'
+ include_non_listening: 'yes'
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int < 7) or ansible_os_family == "Debian"
+
+- name: Gather listening ports facts explicitly via ss and include_non_listening
+ listen_ports_facts:
+ command: 'ss'
+ include_non_listening: 'yes'
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7
+
+- name: check for ansible_facts.udp_listen exists
+ assert:
+ that: ansible_facts.udp_listen is defined
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check for ansible_facts.tcp_listen exists
+ assert:
+ that: ansible_facts.tcp_listen is defined
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check that the include_non_listening parameter 'state' and 'foreign_address' exists
+ assert:
+ that:
+ - ansible_facts.tcp_listen | selectattr('state', 'defined') | list | length > 0
+ - ansible_facts.tcp_listen | selectattr('foreign_address', 'defined') | list | length > 0
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian"
+
+- name: check TCP 5556 is in listening ports
+ assert:
+ that: 5556 in ansible_facts.tcp_listen | map(attribute='port') | sort | list
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: check UDP 5555 is in listening ports
+ assert:
+ that: 5555 in ansible_facts.udp_listen | map(attribute='port') | sort | list
+ when: (ansible_os_family == "RedHat" and ansible_distribution_major_version|int >= 7) or ansible_os_family == "Debian"
+
+- name: kill all async commands
+ command: "kill -9 {{ item.pid }}"
+ loop: "{{ [tcp_listen, udp_listen]|flatten }}"
+ when: item.name == 'nc'
+ ignore_errors: true
diff --git a/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases b/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases
new file mode 100644
index 000000000..f7f4063f6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/locale_gen/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+needs/root
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml b/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml
new file mode 100644
index 000000000..c6bdcc046
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/locale_gen.yml
@@ -0,0 +1,99 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Is the locale we're going to test against installed?
+ shell: locale -a | grep pt_BR
+ register: initial_state
+ ignore_errors: true
+
+- name: Make sure the locale is not installed
+ locale_gen:
+ name: pt_BR
+ state: absent
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: cleaned
+ ignore_errors: true
+
+- name: Make sure the locale is not present
+ assert:
+ that:
+ - "cleaned.rc == 1"
+
+- name: Install the locale
+ locale_gen:
+ name: pt_BR
+ state: present
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: true
+
+- name: Make sure the locale is present and we say we installed it
+ assert:
+ that:
+ - "post_check_output.rc == 0"
+ - "output.changed"
+
+- name: Install the locale a second time
+ locale_gen:
+ name: pt_BR
+ state: present
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: true
+
+- name: Make sure the locale is present and we reported no change
+ assert:
+ that:
+ - "post_check_output.rc == 0"
+ - "not output.changed"
+
+- name: Remove the locale
+ locale_gen:
+ name: pt_BR
+ state: absent
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: true
+
+- name: Make sure the locale is absent and we reported a change
+ assert:
+ that:
+ - "post_check_output.rc == 1"
+ - "output.changed"
+
+- name: Remove the locale a second time
+ locale_gen:
+ name: pt_BR
+ state: absent
+ register: output
+
+- name: Is the locale present?
+ shell: locale -a | grep pt_BR
+ register: post_check_output
+ ignore_errors: true
+
+- name: Make sure the locale is absent and we reported no change
+ assert:
+ that:
+ - "post_check_output.rc == 1"
+ - "not output.changed"
+
+# Cleanup
+- name: Reinstall the locale we tested against if it was initially installed
+ locale_gen:
+ name: pt_BR
+ state: present
+ when: initial_state.rc == 0
diff --git a/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml
new file mode 100644
index 000000000..de3e673be
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/locale_gen/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2014, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: 'locale_gen.yml'
+ when: ansible_distribution in ('Ubuntu', 'Debian')
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases
new file mode 100644
index 000000000..2bdcc0113
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml
new file mode 100644
index 000000000..5575f22ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_cartesian/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test cartesian lookup
+ debug: var=item
+ register: product
+ with_community.general.cartesian:
+ - - A
+ - B
+ - C
+ - - '1'
+ - '2'
+ - '3'
+- name: Verify cartesian lookup
+ assert:
+ that:
+ - product.results[0]['item'] == ["A", "1"]
+ - product.results[1]['item'] == ["A", "2"]
+ - product.results[2]['item'] == ["A", "3"]
+ - product.results[3]['item'] == ["B", "1"]
+ - product.results[4]['item'] == ["B", "2"]
+ - product.results[5]['item'] == ["B", "3"]
+ - product.results[6]['item'] == ["C", "1"]
+ - product.results[7]['item'] == ["C", "2"]
+ - product.results[8]['item'] == ["C", "3"]
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/aliases
new file mode 100644
index 000000000..343f119da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml
new file mode 100644
index 000000000..2243e0dba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/galaxy.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+namespace: testns
+name: testcoll
+version: 0.0.1
+authors:
+ - Ansible (https://github.com/ansible)
+description: null
+tags: [community]
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py
new file mode 100644
index 000000000..e7f1a987a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: collection_module
+short_description: Test collection module
+description:
+ - This is a test module in a local collection.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json
new file mode 100644
index 000000000..57bc66cc2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json
@@ -0,0 +1,40 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/collection_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3f0114d080c409c58c8846be8da7b91137b38eaf2d24f72a4a61a303f925f4d",
+ "format": 1
+ }
+ ],
+ "format": 1
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json.license b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/FILES.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json
new file mode 100644
index 000000000..e4a9e7d8f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json
@@ -0,0 +1,30 @@
+{
+ "collection_info": {
+ "namespace": "testns",
+ "name": "testcoll_mf",
+ "version": "0.0.1",
+ "authors": [
+ "Ansible (https://github.com/ansible)"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "community"
+ ],
+ "description": null,
+ "license": [],
+ "license_file": null,
+ "dependencies": {},
+ "repository": null,
+ "documentation": null,
+ "homepage": null,
+ "issues": null
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "025818f18fcae5c9f78d778ae6e246ecffed6d56a886ffbc145cb66d54e9951e",
+ "format": 1
+ },
+ "format": 1
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json.license b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/MANIFEST.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/README.md
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py
new file mode 100644
index 000000000..e7f1a987a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_mf/plugins/modules/collection_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: collection_module
+short_description: Test collection module
+description:
+ - This is a test module in a local collection.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py
new file mode 100644
index 000000000..e7f1a987a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nothing/plugins/modules/collection_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: collection_module
+short_description: Test collection module
+description:
+ - This is a test module in a local collection.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml
new file mode 100644
index 000000000..96aae3d64
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/galaxy.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+namespace: testns
+name: testcoll_nv
+authors:
+ - Ansible (https://github.com/ansible)
+description: null
+tags: [community]
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py
new file mode 100644
index 000000000..e7f1a987a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/collections/ansible_collections/testns/testcoll_nv/plugins/modules/collection_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: collection_module
+short_description: Test collection module
+description:
+ - This is a test module in a local collection.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/library/local_module.py b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/library/local_module.py
new file mode 100644
index 000000000..9e9e649cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/library/local_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: local_module
+short_description: Test local module
+description:
+ - This is a test module locally next to a playbook.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.sh
new file mode 100755
index 000000000..118abbc29
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+source virtualenv.sh
+
+# The collection loader ignores paths which have more than one ansible_collections in it.
+# That's why we have to copy this directory to a temporary place and run the test there.
+
+# Create temporary folder
+TEMPDIR=$(mktemp -d)
+trap '{ rm -rf ${TEMPDIR}; }' EXIT
+
+cp -r . "${TEMPDIR}"
+cd "${TEMPDIR}"
+
+ansible-playbook runme.yml "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.yml b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.yml
new file mode 100644
index 000000000..54c58614f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_collection_version/runme.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Test collection_version
+ assert:
+ that:
+ # Collection that does not exist
+ - query('community.general.collection_version', 'foo.bar') == [none]
+ - lookup('community.general.collection_version', 'foo.bar', result_not_found='foo') == 'foo'
+ # Collection that exists
+ - lookup('community.general.collection_version', 'community.general') is string
+ # Local collection
+ - lookup('community.general.collection_version', 'testns.testcoll') == '0.0.1'
+ # Local collection with no version
+ - lookup('community.general.collection_version', 'testns.testcoll_nv') == '*'
+ - lookup('community.general.collection_version', 'testns.testcoll_nv', result_no_version='') == ''
+ # Local collection with MANIFEST.json
+ - lookup('community.general.collection_version', 'testns.testcoll_mf') == '0.0.1'
+ # Local collection with no galaxy.yml and no MANIFEST.json
+ - lookup('community.general.collection_version', 'testns.testcoll_nothing') == '*'
+ - lookup('community.general.collection_version', 'testns.testcoll_nothing', result_no_version='0.0.0') == '0.0.0'
+ # Multiple collection names at once
+ - lookup('community.general.collection_version', 'testns.testcoll', 'testns.testcoll_nv', 'testns.testcoll_nv', 'testns.testcoll_mf', 'foo.bar')
+ == ['0.0.1', '*', '*', '0.0.1', none]
+
+ - name: Invalid FQCN
+ set_fact:
+ test: "{{ query('community.general.collection_version', 'foo.bar.baz') }}"
+ ignore_errors: true
+ register: invalid_fqcn
+
+ - name: Validate error message
+ assert:
+ that:
+ - >
+ '"foo.bar.baz" is not a FQCN' in invalid_fqcn.msg
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_dependent/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_dependent/aliases
new file mode 100644
index 000000000..26ad5c244
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_dependent/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_dependent/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_dependent/tasks/main.yml
new file mode 100644
index 000000000..b2f209729
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_dependent/tasks/main.yml
@@ -0,0 +1,183 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test 1
+ set_fact:
+ loop_result: >-
+ {{
+ query('community.general.dependent',
+ dict(key1=[1, 2]),
+ dict(key2='[item.key1 + 3, item.key1 + 6]'),
+ dict(key3='[item.key1 + item.key2 * 10]'))
+ }}
+
+- name: Check result of Test 1
+ assert:
+ that:
+ - loop_result == expected_result
+ vars:
+ expected_result:
+ - key1: 1
+ key2: 4
+ key3: 41
+ - key1: 1
+ key2: 7
+ key3: 71
+ - key1: 2
+ key2: 5
+ key3: 52
+ - key1: 2
+ key2: 8
+ key3: 82
+
+- name: Test 2
+ set_fact:
+ loop_result: >-
+ {{ query('community.general.dependent',
+ dict([['a', [1, 2, 3]]]),
+ dict([['b', '[1, 2, 3, 4] if item.a == 1 else [2, 3, 4] if item.a == 2 else [3, 4]']])) }}
+ # The last expression could have been `range(item.a, 5)`, but that's not supported by all Jinja2 versions used in CI
+
+- name: Check result of Test 2
+ assert:
+ that:
+ - loop_result == expected_result
+ vars:
+ expected_result:
+ - a: 1
+ b: 1
+ - a: 1
+ b: 2
+ - a: 1
+ b: 3
+ - a: 1
+ b: 4
+ - a: 2
+ b: 2
+ - a: 2
+ b: 3
+ - a: 2
+ b: 4
+ - a: 3
+ b: 3
+ - a: 3
+ b: 4
+
+- name: Test 3
+ debug:
+ var: item
+ with_community.general.dependent:
+ - var1:
+ a:
+ - 1
+ - 2
+ b:
+ - 3
+ - 4
+ - var2: 'item.var1.value'
+ - var3: 'dependent_lookup_test[item.var1.key ~ "_" ~ item.var2]'
+ loop_control:
+ label: "{{ [item.var1.key, item.var2, item.var3] }}"
+ register: dependent
+ vars:
+ dependent_lookup_test:
+ a_1:
+ - A
+ - B
+ a_2:
+ - C
+ b_3:
+ - D
+ b_4:
+ - E
+ - F
+ - G
+
+- name: Check result of Test 3
+ assert:
+ that:
+ - (dependent.results | length) == 7
+ - dependent.results[0].item.var1.key == "a"
+ - dependent.results[0].item.var2 == 1
+ - dependent.results[0].item.var3 == "A"
+ - dependent.results[1].item.var1.key == "a"
+ - dependent.results[1].item.var2 == 1
+ - dependent.results[1].item.var3 == "B"
+ - dependent.results[2].item.var1.key == "a"
+ - dependent.results[2].item.var2 == 2
+ - dependent.results[2].item.var3 == "C"
+ - dependent.results[3].item.var1.key == "b"
+ - dependent.results[3].item.var2 == 3
+ - dependent.results[3].item.var3 == "D"
+ - dependent.results[4].item.var1.key == "b"
+ - dependent.results[4].item.var2 == 4
+ - dependent.results[4].item.var3 == "E"
+ - dependent.results[5].item.var1.key == "b"
+ - dependent.results[5].item.var2 == 4
+ - dependent.results[5].item.var3 == "F"
+ - dependent.results[6].item.var1.key == "b"
+ - dependent.results[6].item.var2 == 4
+ - dependent.results[6].item.var3 == "G"
+
+- name: "Test 4: template failure"
+ debug:
+ msg: "{{ item }}"
+ with_community.general.dependent:
+ - a:
+ - 1
+ - 2
+ - b: "[item.a + foo]"
+ ignore_errors: true
+ register: eval_error
+
+- name: Check result of Test 4
+ assert:
+ that:
+ - eval_error is failed
+ - eval_error.msg.startswith("Caught \"'foo' is undefined")
+
+- name: "Test 5: same variable name reused"
+ debug:
+ msg: "{{ item }}"
+ with_community.general.dependent:
+ - a: x
+ - b: x
+ ignore_errors: true
+ register: eval_error
+
+- name: Check result of Test 5
+ assert:
+ that:
+ - eval_error is failed
+ - eval_error.msg.startswith("Caught \"'x' is undefined")
+
+- name: "Test 6: multi-value dict"
+ debug:
+ msg: "{{ item }}"
+ with_community.general.dependent:
+ - a: x
+ b: x
+ ignore_errors: true
+ register: eval_error
+
+- name: Check result of Test 6
+ assert:
+ that:
+ - eval_error is failed
+ - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 2 elements'
+
+- name: "Test 7: empty dict"
+ debug:
+ msg: "{{ item }}"
+ with_community.general.dependent:
+ - {}
+ ignore_errors: true
+ register: eval_error
+
+- name: Check result of Test 7
+ assert:
+ that:
+ - eval_error is failed
+ - eval_error.msg == 'Parameter 0 must be a one-element dictionary, got 0 elements'
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_dig/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_dig/aliases
new file mode 100644
index 000000000..eb449a9cf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_dig/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_dig/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_dig/meta/main.yml
new file mode 100644
index 000000000..fe9e33681
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_dig/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_dig/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_dig/tasks/main.yml
new file mode 100644
index 000000000..2f48333cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_dig/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install dnspython library
+ pip:
+ name: dnspython
+ state: present
+ extra_args: "-c {{ remote_constraints }}"
+
+- name: Test dig lookup with existing domain
+ set_fact:
+ dig_existing: "{{ lookup('community.general.dig', 'github.com.') }}"
+
+- name: Test dig lookup with non-existing domain and fail_on_error=no
+ set_fact:
+ dig_nonexisting_fail_no: "{{ lookup('community.general.dig', 'non-existing.domain.', 'fail_on_error=no') }}"
+
+- name: Verify that NXDOMAIN was returned
+ assert:
+ that: dig_nonexisting_fail_no == 'NXDOMAIN'
+
+- name: Test dig lookup with non-existing domain and fail_on_error=yes
+ set_fact:
+ dig_nonexisting_fail_yes: "{{ lookup('community.general.dig', 'non-existing.domain.', 'fail_on_error=yes') }}"
+ ignore_errors: true
+ register: dig_nonexisting_fail_yes_result
+
+- name: Verify that the task failed
+ assert:
+ that: dig_nonexisting_fail_yes_result is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases
new file mode 100644
index 000000000..b9f3395f7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/aliases
@@ -0,0 +1,14 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+needs/file/tests/utils/constraints.txt
+needs/target/setup_etcd3
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
+disabled # see https://github.com/ansible-collections/community.general/issues/322
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml
new file mode 100644
index 000000000..de726382b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ etcd3_prefix: '/keyprefix/'
+ etcd3_singlekey: '/singlekeypath'
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml
new file mode 100644
index 000000000..ea012594d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/dependencies.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Setup etcd3
+ import_role:
+ name: setup_etcd3
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh
new file mode 100755
index 000000000..1b37ae4f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/runme.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test_lookup_etcd3.yml -v "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml
new file mode 100644
index 000000000..47f1916c0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# lookup_etcd3 integration tests
+# Copyright 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: put key/values with an etcd prefix
+ etcd3:
+ key: "{{ etcd3_prefix }}foo{{ item }}"
+ value: "bar{{ item }}"
+ state: present
+ loop:
+ - 1
+ - 2
+ - 3
+
+- name: put a single key/values in etcd
+ etcd3:
+ key: "{{ etcd3_singlekey }}"
+ value: "foobar"
+ state: present
+
+- import_tasks: tests.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml
new file mode 100644
index 000000000..929c6f142
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/tasks/tests.yml
@@ -0,0 +1,27 @@
+---
+# lookup_etcd3 integration tests
+# Copyright 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: 'Fetch secrets using "etcd3" lookup'
+ set_fact:
+ etcdoutkey1: "{{ lookup('community.general.etcd3', etcd3_prefix, prefix=True) }}"
+ etcdoutkey2: "{{ lookup('community.general.etcd3', etcd3_singlekey) }}"
+ key_inexistent: "{{ lookup('community.general.etcd3', 'inexistent_key') }}"
+
+ - name: 'Check etcd values'
+ assert:
+ msg: 'unexpected etcd3 values'
+ that:
+ - etcdoutkey1 is sequence
+ - etcdoutkey1 | length() == 3
+ - etcdoutkey1[0].value == 'bar1'
+ - etcdoutkey1[1].value == 'bar2'
+ - etcdoutkey1[2].value == 'bar3'
+ - etcdoutkey2 is sequence
+ - etcdoutkey2 | length() == 2
+ - etcdoutkey2.value == 'foobar'
+ - key_inexistent is sequence
+ - key_inexistent | length() == 0
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml
new file mode 100644
index 000000000..c18138888
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_etcd3/test_lookup_etcd3.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Test lookup etcd3
+ import_role:
+ name: lookup_etcd3
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases
new file mode 100644
index 000000000..0ac9bad98
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_flattened/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml
new file mode 100644
index 000000000..37af1327b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_flattened/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test with_flattened
+ set_fact: '{{ item }}=flattened'
+ with_community.general.flattened:
+ - - a__
+ - - b__
+ - - c__
+ - d__
+- name: verify with_flattened results
+ assert:
+ that:
+ - a__ == 'flattened'
+ - b__ == 'flattened'
+ - c__ == 'flattened'
+ - d__ == 'flattened'
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases
new file mode 100644
index 000000000..66632fb4a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml
new file mode 100644
index 000000000..9fc63b19f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/dependencies.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Install LMDB Python package
+ pip:
+ name: lmdb
+ environment:
+ LMDB_PURE: "1"
+ - name: Setup test data
+ script: test_db.py
+ args:
+ executable: "{{ ansible_python.executable }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh
new file mode 100755
index 000000000..71faa439d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/runme.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test.yml -v "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml
new file mode 100644
index 000000000..217c020ca
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test.yml
@@ -0,0 +1,31 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - debug:
+ msg: '{{ query("community.general.lmdb_kv", "nl", "be", "lu", db="jp.mdb") }}'
+ - debug:
+ var: item.1
+ loop: '{{ query("community.general.lmdb_kv", db="jp.mdb") }}'
+ - assert:
+ that:
+ - query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') == ['Netherlands', 'Belgium', 'Luxembourg']
+ - query('community.general.lmdb_kv', db='jp.mdb')|length == 5
+ - assert:
+ that:
+ - item.0 == 'nl'
+ - item.1 == 'Netherlands'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - n*
+ - assert:
+ that:
+ - item == 'Belgium'
+ vars:
+ - lmdb_kv_db: jp.mdb
+ with_community.general.lmdb_kv:
+ - be
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py
new file mode 100644
index 000000000..b906c4c39
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_lmdb_kv/test_db.py
@@ -0,0 +1,15 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+import lmdb
+map_size = 1024 * 100
+env = lmdb.open('./jp.mdb', map_size=map_size)
+with env.begin(write=True) as txn:
+ txn.put('fr'.encode(), 'France'.encode())
+ txn.put('nl'.encode(), 'Netherlands'.encode())
+ txn.put('es'.encode(), 'Spain'.encode())
+ txn.put('be'.encode(), 'Belgium'.encode())
+ txn.put('lu'.encode(), 'Luxembourg'.encode())
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/aliases
new file mode 100644
index 000000000..eb449a9cf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/runme.sh
new file mode 100755
index 000000000..52a38f4a5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/runme.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# Copyright (c) 2020, Thales Netherlands
+# Copyright (c) 2021, Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -eux
+
+ANSIBLE_LOG_PATH=/tmp/ansible-test-merge-variables \
+ ansible-playbook test.yml "$@"
+
+ANSIBLE_LOG_PATH=/tmp/ansible-test-merge-variables \
+ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE=suffix \
+ ansible-playbook test_with_env.yml "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test.yml b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test.yml
new file mode 100644
index 000000000..fbd884393
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test.yml
@@ -0,0 +1,174 @@
+---
+# Copyright (c) 2020, Thales Netherlands
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test merge_variables lookup plugin
+ hosts: localhost
+ tasks:
+ - name: Include test data
+ include_vars: vars.yml
+
+ # Test the default behavior
+ - name: Test merge list
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list | length) == 2"
+ - "'item1' in merged_list"
+ - "'item3' in merged_list"
+ vars:
+ merged_list: "{{ lookup('community.general.merge_variables', '^.+__merge_list$') }}"
+
+ - name: Test merge dict
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_dict }}"
+
+ - name: Validate that dict is complete
+ assert:
+ that:
+ - "'item1' in merged_dict"
+ - "'item2' in merged_dict"
+ - "'list_item' in merged_dict"
+ - "(merged_dict.list_item | length) == 2"
+ - "'test1' in (merged_dict.list_item)"
+ - "'test2' in (merged_dict.list_item)"
+ vars:
+ merged_dict: "{{ lookup('community.general.merge_variables', '^.+__merge_dict$') }}"
+
+ # Test the behavior when no results are found
+ - name: Test merge without results
+ block:
+ - debug:
+ msg: "{{ not_found }}"
+ - name: Validate that the variable defaults to an empty list
+ vars:
+ assert:
+ that:
+ - "(not_found | default('default-used', True)) == 'default-used'"
+ vars:
+ not_found: "{{ lookup('community.general.merge_variables', '^.+__merge_not_found$') }}"
+
+ # Test the 'pattern_type' options
+ - name: Test merge list (pattern_type = prefix)
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list | length) == 4"
+ - "'item1' in merged_list"
+ - "'item2' in merged_list"
+ - "'item2' in merged_list"
+ - "'item3' in merged_list"
+ vars:
+ merged_list: "{{ lookup('community.general.merge_variables', 'testlist', pattern_type='prefix') }}"
+
+ - name: Test merge list (pattern_type = suffix)
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list | length) == 2"
+ - "'item1' in merged_list"
+ - "'item3' in merged_list"
+ vars:
+ merged_list: "{{ lookup('community.general.merge_variables', '__merge_list', pattern_type='suffix') }}"
+
+ - name: Test merge list (pattern_type = regex)
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list | length) == 3"
+ - "'item1' in merged_list"
+ - "'item2' in merged_list"
+ - "'item3' in merged_list"
+ vars:
+ merged_list: "{{ lookup('community.general.merge_variables', '^testlist[0-9].*', pattern_type='regex') }}"
+
+ # Test the 'initial_value' option
+ - name: Test merge without results but with initial value
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ not_found_initial_value }}"
+
+ - name: Validate that the variable only contains the initial value
+ vars:
+ assert:
+ that:
+ - "(not_found_initial_value | count) == 1"
+ - "(not_found_initial_value | first) == 'item2'"
+ vars:
+ not_found_initial_value: "{{ lookup('community.general.merge_variables', '^.+__merge_not_found$', initial_value=testlist_initial_value) }}"
+
+ - name: Test merging a list with an initial value
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list_with_initial_value }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list_with_initial_value | length) == 3"
+ - "'item1' in merged_list_with_initial_value"
+ - "'item2' in merged_list_with_initial_value"
+ - "'item3' in merged_list_with_initial_value"
+ vars:
+ merged_list_with_initial_value: "{{ lookup('community.general.merge_variables', '^.+__merge_list$', initial_value=testlist_initial_value) }}"
+
+ # Test the 'override' options
+ - name: Test the 'override=warn' option
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_with_override_warn }}"
+
+ - name: Validate that the dict is complete and the warning is printed
+ assert:
+ that:
+ - "'key_to_override' in merged_with_override_warn"
+ - "merged_with_override_warn.key_to_override == 'Override value'"
+ - "'key_to_override' in lookup('file', logging_output_file)" # Check if a message is given
+ - "'[WARNING]' in lookup('file', logging_output_file)" # and verify that the message is a WARNING
+ vars:
+ merged_with_override_warn: "{{ lookup('community.general.merge_variables', '^.+__override_warn$', initial_value=override_warn_init, override='warn') }}"
+
+ - name: Test the 'override=error' option
+ block:
+ - name: Validate that an override result in an error
+ debug:
+ msg: "{{ lookup('community.general.merge_variables', '^.+__override_error$', initial_value=override_error_init, override='error') }}"
+ ignore_errors: true # Do not stop the playbook
+ register: _override_error_result
+
+ - name: Print the output
+ debug:
+ msg: "{{ _override_error_result }}"
+
+ - name: Validate that the error is reported
+ assert:
+ that:
+ - "_override_error_result.failed"
+ - "'key_to_override' in _override_error_result.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test_with_env.yml b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test_with_env.yml
new file mode 100644
index 000000000..7fbb664fd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/test_with_env.yml
@@ -0,0 +1,44 @@
+---
+# Copyright (c) 2020, Thales Netherlands
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test merge_variables lookup plugin
+ hosts: localhost
+ tasks:
+ - name: Include test data
+ include_vars: vars.yml
+
+ # Test the pattern option using the environment variable
+ - name: Test merge list (pattern_type = regex)
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list | length) == 2"
+ - "'item1' in merged_list"
+ - "'item3' in merged_list"
+ vars:
+ merged_list: "{{ lookup('community.general.merge_variables', '__merge_list') }}"
+
+ # Test whether the pattern option can be overridden
+ - name: Test merge list (pattern_type = suffix)
+ block:
+ - name: Print the merged list
+ debug:
+ msg: "{{ merged_list }}"
+
+ - name: Validate that the list is complete
+ assert:
+ that:
+ - "(merged_list | length) == 3"
+ - "'item1' in merged_list"
+ - "'item2' in merged_list"
+ - "'item3' in merged_list"
+ vars:
+ merged_list: "{{ lookup('community.general.merge_variables', '^testlist[0-9].*', pattern_type='regex') }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/vars.yml b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/vars.yml
new file mode 100644
index 000000000..d1a4ace21
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_merge_variables/vars.yml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) 2020, Thales Netherlands
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+testlist_initial_value: "{{ testlist2 }}"
+testlist1__merge_list:
+ - item1
+testlist2:
+ - item2
+testlist3__merge_list:
+ - item3
+
+testdict1__merge_dict:
+ item1: test
+ list_item:
+ - test1
+testdict2__merge_dict:
+ item2: test
+ list_item:
+ - test2
+
+override_warn_init:
+ key_to_override: Initial value
+override__override_warn:
+ key_to_override: Override value
+
+override_error_init:
+ key_to_override: Initial value
+override__override_error:
+ key_to_override: Override value
+
+logging_output_file: /tmp/ansible-test-merge-variables # The Ansible log output is available in this file
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases
new file mode 100644
index 000000000..0d4c5af3b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/rhel
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
+skip/osx # FIXME https://github.com/ansible-collections/community.general/issues/2978
+skip/macos # FIXME https://github.com/ansible-collections/community.general/issues/2978
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml
new file mode 100644
index 000000000..c0b5eb5bd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - include_tasks: package.yml
+ - include_tasks: tests.yml
+ when:
+ # The pass package is no longer available in EPEL, so only test on Fedora, OpenSUSE, FreeBSD, macOS, and Ubuntu
+ # https://lists.zx2c4.com/pipermail/password-store/2019-July/003689.html
+ - ansible_facts.distribution in ['FreeBSD', 'MacOSX', 'openSUSE Leap', 'Ubuntu']
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml
new file mode 100644
index 000000000..e5ccd5677
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/package.yml
@@ -0,0 +1,84 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+- name: Install package
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ passwordstore_packages }}"
+ state: present
+ when: ansible_facts.pkg_mgr in ['apt', 'dnf', 'yum', 'pkgng', 'community.general.pkgng']
+
+- block:
+ # OpenSUSE Leap>=15.0 don't include password-store in main repo
+ - name: SUSE | Add security:privacy repo
+ template:
+ src: security-privacy.repo.j2
+ dest: /etc/zypp/repos.d/security:privacy.repo
+
+ - name: SUSE | Install package
+ package:
+ name: password-store
+ state: present
+ update_cache: true
+ disable_gpg_check: true
+ when: ansible_facts.pkg_mgr in ['zypper', 'community.general.zypper']
+
+# See https://github.com/gopasspw/gopass/issues/1849#issuecomment-802789285
+- name: Install gopass on Debian
+ when: ansible_facts.os_family == 'Debian'
+ become: true
+ block:
+ - name: Fetch gopass repo keyring
+ ansible.builtin.get_url:
+ url: https://packages.gopass.pw/repos/gopass/gopass-archive-keyring.gpg
+ dest: /usr/share/keyrings/gopass-archive-keyring.gpg
+ - name: Add gopass repo
+ ansible.builtin.apt_repository:
+ repo: "deb [arch=amd64,arm64,armhf \
+ signed-by=/usr/share/keyrings/gopass-archive-keyring.gpg] \
+ https://packages.gopass.pw/repos/gopass stable main"
+ state: present
+ - name: Update apt-cache and install gopass package
+ ansible.builtin.apt:
+ name: gopass
+ update_cache: true
+
+- name: Install on macOS
+ when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+
+ - name: MACOS | Install package
+ homebrew:
+ name:
+ - gnupg2
+ - pass
+ - gopass
+ state: present
+ update_homebrew: false
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ # Newer versions of brew want to compile a package which takes a long time. Do not upgrade homebrew until a
+ # proper solution can be found
+ environment:
+ HOMEBREW_NO_AUTO_UPDATE: "True"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml
new file mode 100644
index 000000000..a94529e46
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/password_tests.yml
@@ -0,0 +1,130 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Create a password ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass', length=8, create=true, backend=backend) }}"
+
+ - name: Fetch password from an existing file ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-pass', backend=backend) }}"
+
+ - name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
+
+ - name: Create a password with equal sign ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal userpass=SimpleSample= create=true', backend=backend) }}"
+
+ - name: Fetch a password with equal sign ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-pass-equal', backend=backend) }}"
+
+ - name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
+
+ - name: Create a password using missing=create ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='create', length=8, backend=backend) }}"
+
+ - name: Fetch password from an existing file ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', backend=backend) }}"
+
+ - name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
+
+ - name: Fetch password from existing file using missing=empty ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'test-missing-create', missing='empty', backend=backend) }}"
+
+ - name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
+
+ - name: Fetch password from non-existing file using missing=empty ({{ backend }})
+ set_fact:
+ readpass: "{{ query('community.general.passwordstore', 'test-missing-pass', missing='empty', backend=backend) }}"
+
+ - name: Verify password ({{ backend }})
+ assert:
+ that:
+ - readpass == [ none ]
+
+ - name: Create the YAML password ({{ backend }})
+ command: "{{ backend }} insert -m -f test-yaml-pass"
+ args:
+ stdin: |
+ testpassword
+ key: |
+ multi
+ line
+
+ - name: Fetch a password with YAML subkey ({{ backend }})
+ set_fact:
+ readyamlpass: "{{ lookup('community.general.passwordstore', 'test-yaml-pass', subkey='key', backend=backend) }}"
+
+ - name: Read a yaml subkey ({{ backend }})
+ assert:
+ that:
+ - readyamlpass == 'multi\nline\n'
+
+ - name: Create a non-YAML multiline file ({{ backend }})
+ command: "{{ backend }} insert -m -f test-multiline-pass"
+ args:
+ stdin: |
+ testpassword
+ random additional line
+
+ - name: Fetch password from multiline file ({{ backend }})
+ set_fact:
+ readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', backend=backend) }}"
+
+ - name: Multiline pass only returns first line ({{ backend }})
+ assert:
+ that:
+ - readyamlpass == 'testpassword'
+
+ - name: Fetch all from multiline file ({{ backend }})
+ set_fact:
+ readyamlpass: "{{ lookup('community.general.passwordstore', 'test-multiline-pass', returnall='yes', backend=backend) }}"
+
+ - name: Multiline pass returnall returns everything in the file ({{ backend }})
+ assert:
+ that:
+ - readyamlpass == 'testpassword\nrandom additional line\n'
+
+ - name: Create a password in a folder ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', length=8, create=true, backend=backend) }}"
+
+ - name: Fetch password from folder ({{ backend }})
+ set_fact:
+ readpass: "{{ lookup('community.general.passwordstore', 'folder/test-pass', backend=backend) }}"
+
+ - name: Verify password from folder ({{ backend }})
+ assert:
+ that:
+ - readpass == newpass
+
+ - name: Try to read folder as passname ({{ backend }})
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder', backend=backend) }}"
+ ignore_errors: true
+ register: eval_error
+
+ - name: Make sure reading folder as passname failed ({{ backend }})
+ assert:
+ that:
+ - eval_error is failed
+ - '"passname folder not found" in eval_error.msg'
+ when: backend != "gopass" # Remove this line once gopass backend can handle this
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
new file mode 100644
index 000000000..65a578c96
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/tasks/tests.yml
@@ -0,0 +1,239 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Check name of gpg2 binary
+ command: which gpg2
+ register: gpg2_check
+ ignore_errors: true
+
+- name: Set gpg2 binary name
+ set_fact:
+ gpg2_bin: '{{ "gpg2" if gpg2_check is successful else "gpg" }}'
+
+- name: Stop gpg-agent so we can remove any locks on the GnuPG dir
+ command: gpgconf --kill gpg-agent
+ ignore_errors: true
+
+- name: Remove previous password files and directory
+ file:
+ dest: "{{ item }}"
+ state: absent
+ loop:
+ - "~/.gnupg"
+ - "~/.password-store"
+
+- name: Get path of pass executable
+ command: which pass
+ register: result
+
+- name: Store path of pass executable
+ set_fact:
+ passpath: "{{ result.stdout }}"
+
+- name: Move original pass into place if there was a leftover
+ command:
+ argv:
+ - mv
+ - "{{ passpath }}.testorig"
+ - "{{ passpath }}"
+ args:
+ removes: "{{ passpath }}.testorig"
+
+# having gopass is not required for this test, but we store
+# its path in case it is installed, so we can restore it
+- name: Try to find gopass in path
+ command: which gopass
+ register: result
+
+- name: Store path of gopass executable
+ set_fact:
+ gopasspath: "{{ result.stdout }}"
+
+- name: Move original gopass into place if there was a leftover
+ command:
+ argv:
+ - mv
+ - "{{ gopasspath }}.testorig"
+ - "{{ gopasspath }}"
+ args:
+ removes: "{{ gopasspath }}.testorig"
+
+- name: Get versions of tools
+ command: "{{ item }} --version"
+ register: versions
+ loop:
+ - "{{ gpg2_bin }}"
+ - pass
+ - gopass
+
+- name: Output versions of tools
+ debug:
+ msg: "{{ versions.results | map(attribute='stdout_lines') }}"
+
+# How to generate a new GPG key:
+# gpg2 --batch --gen-key input # See templates/input
+# gpg2 --list-secret-keys --keyid-format LONG
+# gpg2 --armor --export-secret-keys [key id]
+# # Get the fingerprint
+# gpg2 --fingerprint --keyid-format LONG | grep [key id] -A 1 | tail -1 | tr -d '[:space:]' | awk -F '=' '{print $2":6:"}'
+
+- name: Import GPG private key
+ shell: echo "{{ passwordstore_privkey }}" | {{ gpg2_bin }} --import --allow-secret-key-import -
+
+- name: Trust key
+ shell: echo "D3E1CC8934E97270CEB066023AF1BD3619AB496A:6:" | {{ gpg2_bin }} --import-ownertrust
+
+- name: Initialise pass passwordstore
+ command: pass init ansible-test
+
+- name: Initialise gopass passwordstore
+ command: gopass init --path $HOME/.gopass-store ansible-test
+ args:
+ creates: "{{ lookup('env','HOME') }}/.gopass-store"
+
+# these tests should apply to all backends
+- name: Password tests
+ include_tasks: password_tests.yml
+ loop:
+ - pass
+ - gopass
+ loop_control:
+ loop_var: backend
+
+- name: Change passwordstore location explicitly
+ set_fact:
+ passwordstore: "{{ lookup('env','HOME') }}/.password-store"
+
+- name: Make sure password store still works with explicit location set
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}"
+
+- name: Change passwordstore location to a non-existent place
+ set_fact:
+ passwordstore: "somenonexistentplace"
+
+- name: Try reading from non-existent passwordstore location
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}"
+ ignore_errors: true
+ register: eval_error
+
+- name: Make sure reading from non-existent passwordstore location failed
+ assert:
+ that:
+ - eval_error is failed
+ - >-
+ "Passwordstore directory '" in eval_error.msg
+ - >-
+ "/somenonexistentplace' does not exist" in eval_error.msg
+
+- name: Test pass compatibility shim detection
+ block:
+ - name: Move original pass out of the way
+ command:
+ argv:
+ - mv
+ - "{{ passpath }}"
+ - "{{ passpath }}.testorig"
+ args:
+ creates: "{{ passpath }}.testorig"
+
+ - name: Create dummy pass script
+ ansible.builtin.copy:
+ content: |
+ #!/bin/sh
+ echo "shim_ok"
+ dest: "{{ passpath }}"
+ mode: '0755'
+
+ - name: Try reading from non-existent passwordstore location with different pass utility
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'test-pass') }}"
+ environment:
+ PATH: "/tmp"
+
+ - name: Verify password received from shim
+ assert:
+ that:
+ - newpass == "shim_ok"
+
+ - name: Try to read folder as passname with a different pass utility
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder') }}"
+
+ - name: Verify password received from shim
+ assert:
+ that:
+ - newpass == "shim_ok"
+
+ always:
+ - name: Move original pass back into place
+ command:
+ argv:
+ - mv
+ - "{{ passpath }}.testorig"
+ - "{{ passpath }}"
+ args:
+ removes: "{{ passpath }}.testorig"
+
+# This are in addition to the real gopass tests above
+# and verify plugin logic
+- name: gopass plugin logic tests
+ vars:
+ passwordstore_backend: "gopass"
+ block:
+ - name: Check if gopass executable exists
+ stat:
+ path: "{{ gopasspath }}"
+ register: gopass_check
+
+ - name: Move original gopass out of the way
+ command:
+ argv:
+ - mv
+ - "{{ gopasspath }}"
+ - "{{ gopasspath }}.testorig"
+ args:
+ creates: "{{ gopasspath }}.testorig"
+ when: gopass_check.stat.exists == true
+
+ - name: Create mocked gopass script
+ ansible.builtin.copy:
+ content: |
+ #!/bin/sh
+ if [ "$GOPASS_NO_REMINDER" != "YES" ]; then
+ exit 1
+ fi
+ if [ "$1" = "--version" ]; then
+ exit 2
+ fi
+ echo "gopass_ok"
+ dest: "{{ gopasspath }}"
+ mode: '0755'
+
+ - name: Try to read folder as passname using gopass mock
+ set_fact:
+ newpass: "{{ lookup('community.general.passwordstore', 'folder') }}"
+
+ - name: Verify password received from gopass mock
+ assert:
+ that:
+ - newpass == "gopass_ok"
+
+ always:
+ - name: Remove mocked gopass
+ ansible.builtin.file:
+ path: "{{ gopasspath }}"
+ state: absent
+
+ - name: Move original gopass back into place
+ command:
+ argv:
+ - mv
+ - "{{ gopasspath }}.testorig"
+ - "{{ gopasspath }}"
+ args:
+ removes: "{{ gopasspath }}.testorig"
+ when: gopass_check.stat.exists == true
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input
new file mode 100644
index 000000000..d639accdb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input
@@ -0,0 +1,9 @@
+%echo Generating a Ansible Test PGP key
+Key-Type: RSA
+Key-Length: 4096
+Subkey-Type: RSA
+Subkey-Length: 4096
+Name-Real: ansible-test
+Expire-Date: 0
+%commit
+%echo done
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input.license b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/input.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2 b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2
new file mode 100644
index 000000000..72eca99ec
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/templates/security-privacy.repo.j2
@@ -0,0 +1,13 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+
+[security_privacy]
+name=Crypto applications and utilities (openSUSE_Leap_{{ ansible_distribution_version }})
+type=rpm-md
+baseurl=http://download.opensuse.org/repositories/security:/privacy/openSUSE_Leap_{{ ansible_distribution_version }}/
+gpgcheck=1
+gpgkey=http://download.opensuse.org/repositories/security:/privacy/openSUSE_Leap_{{ ansible_distribution_version }}/repodata/repomd.xml.key
+enabled=1
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Alpine.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Alpine.yml
new file mode 100644
index 000000000..f18329ed1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Alpine.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+passwordstore_packages:
+ - gopass
+ - pass
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml
new file mode 100644
index 000000000..f18329ed1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Archlinux.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+passwordstore_packages:
+ - gopass
+ - pass
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml
new file mode 100644
index 000000000..825a6a8bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Debian.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+passwordstore_packages:
+ - pass
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml
new file mode 100644
index 000000000..f18329ed1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/Fedora.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+passwordstore_packages:
+ - gopass
+ - pass
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml
new file mode 100644
index 000000000..9e9da2772
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/FreeBSD.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+passwordstore_packages:
+ - gopass
+ - gnupg
+ - password-store
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml
new file mode 100644
index 000000000..2b4fa1b22
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_passwordstore/vars/main.yml
@@ -0,0 +1,122 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+passwordstore_privkey: |
+ -----BEGIN PGP PRIVATE KEY BLOCK-----
+ Version: GnuPG v2.0.22 (GNU/Linux)
+
+ lQcYBF0L9gUBEACrYsKIj/rXFQPURHz+YKg54BW6utIvwoF/CbQarc8iXoXfPZW4
+ wQnFaX+dLifkvX5f4xIUVD94qyMXT2oNg+HZXH2y7VwqBFeG9TrNqfgJsYTbTlgP
+ 0MOD3FtZkMy/6TrJyOzY7x7oHUwWY1S5YeBDBsRqWwhS5fIfHLGbZLwoHC/53mlP
+ ve0zL1u28y3Kh8JvYBSlcYrbMlbKQb1g0TSb8gVwq+Iv2CriEmHipUvLI02z02Ny
+ XnT0+rzOEEynaoF7zX1S0eoIwKz46/sen1L5YAYhLM16gq4WSquxUz8klff+jVD5
+ uLgUKzEkhBdBuTvWKjknk6Wu2aDPC6IQXUjm+5dIh+/IyD4SKPA+HY1QZjIF/6nH
+ KV43RRzB8YWkAEW/iQdKCXWyNz1o8zYiun/PxiJtgV2qpz4z5M+ANx1VCj31eMTE
+ A0exSnMLjbUknE3JX2bnBfTgbiKbeXQcL6FNrKIPJrwgNlP3g48JzY6aPT3Tej8Q
+ pd6DqamU0sQfoi2K8zs/Ltl+REocKqzQ9cz0dj7ykQqAMf+TunQfC3pY8yI7IKCr
+ YM2L3aJbiqLTp31dCfwxBSZJ+oalUzp2l91ugu1k1mjuik7ZJaflfBNEFWK9ig3v
+ qhv8FwdoPFNh8TC11btfU1wphcBl65pCaPff2s94/Gj2aQ0PrFYuhPtzqwARAQAB
+ AA//UMXH1yK8XrfJyTXKDv0w663fr9SykM3DyEKjfltiHtmbkF3u/VcFKuQQv9xr
+ 8tMYB0r2T1xxWYaWkDreSaZZP97mYviZEjhfo/xZjWpIuxDOA6nFuRZzvuaQqwKi
+ bOQXz9gBQDBaSZzdKkQAPyqQziYXVeS3ZJJ47Q7R6eGtB95ZAhM/YNSrQQ9V00CC
+ 2UvoaCNJN7vubGYqH0KiZUnT2JdU1wg7Hr9rXoa5WV77/K4Txeefm9xGlNrDNv7Z
+ kaGRiu6K3QiPmzZrjxlwjbsbGOqXmPULlmyWbW0dxAHu5WhEq9SgUEEtiFve2k3i
+ xBfvyny12SAt2t04e7wI0luUqnpQR8j3JPFXcCiFszIauMbXhIAd0QhRfS6/pRCf
+ iBRliFqvQpgmz9w8/enfQtBvyAwZLr3p2vk4OyRW/GLFnfkoKcCvgZtL5vNDPYJm
+ Y1koC+jAsiiFRmUvP9SLyNabVTANz5Hg/jZmcdh+OTWs1xdZl1JyjUOxV6n1n1pW
+ BPc0FaNvFS+wkx6Rp9DgryTP1oTD6wjacNljFh3A9LJ0DTnQgsghoi5ArBKnRP7R
+ 9i0DKUqywoukm+GQHoZlB6bglDBVc3wKZvtw17/SgD6GnKZ3zH+Y8yx3K3MI9wjT
+ Od1jMxQxzKWMxrv72mtzchm/utkubL5BpM5hn6fg32NEkxEIAMhc2f01fuv/UZ1i
+ zlkqXkcMzrd/+9+Mv53meLMJsW2biOwRF52ZXi3k9ulUwHB21FaAXeyOFhueKrh/
+ iKu5Hpydxruj0XCgMRArgvghPL4KLfhh54xvXGKxWw7B0IWkOnvELPikOl3h17cY
+ lQ5rN5mQtlxaqqrJKOxkseEFTvVJudZXZH9oArlVXO88HklDeEHtV4xjdiyvtFKg
+ qWUvo6oNT0LmpFdgstoKJ8H5gKiV3wfl2QJQxqWT40wUFVnNEAoBYC7PbHWajxmD
+ 7ZGoKE9o3ythg11D4rH23uTUFLd5Hc5xeQ2/+OhEKv4Qe0X+okv8/RpM94ObfsW9
+ HdQBsgMIANr6B/SzwhPn8eK0c6wrOYU/B/V370qgTBRpWgrNRCvtN8NuSJKOJRU/
+ qYm74dCsVxBfvUlEPRp9DEsE9djvwZLdIqOfn4amDoLZmYdMQ5LQCOaHlrnEx+ng
+ uHUklFUXIHDNcVRWzhrHm4KQWeB7RrCRL1nEimW/nhh8y++4TmxZQ1Zr2fdUWdMs
+ dSWryw3RE5nwDd7nW8+Wgm3TfS4jhhn3DcKFZxLzG1eo4ZaXoPa4j7zps3xFyBtF
+ KMPgrvAAxzqFwklQKjwXcthYUQ5OzXTt94m8VqOz0nZGoizaGBFRz1l1q9QQxTv4
+ BUI+2OeyfrzWIaKEu+9gsNbx/OfxmzkH/2quLKJj0FQ+aEYkeGXVtf2DsceQXB1l
+ QtBo/qvBWP2XYk6GzAfwjvI8ifEG4MzXCZxm5SKtQ8UljrCo2T6OArG2FK1FSJDX
+ UlQBXPLYWIJCkC9X8SB6UztPSXPoNS6Ktc0K5XFxvGzIDpxAE+zE4UAe3gWGaROe
+ bwxbuRb9/aHAEk6CU3mrgEUUqet+4qUNRpAzJdwYIN6nOAVghHWiVY4OoCSCgMYY
+ 4B9Aa9bVeDQH9S88X5ux3bDW1DPjiewNIYo+0z53gfY3HZeDfHFWD4iM6vaJjSVb
+ 65trGHyGhElkWub56Q3nHXPOxiAKeRBn3iL54wDNiGMlfw/tkoAkTuaNI7QMYW5z
+ aWJsZS10ZXN0iQI5BBMBAgAjBQJdC/YFAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwEC
+ HgECF4AACgkQOvG9NhmrSWo7YhAAhsp+j13R3bJkv/esJwaDa0dbdm1iJzKB9bkf
+ Bi10qMFmJUEDyHHKL9OBiAgSCKna5F4EuEFnaV9YPs1n6PVE+FX3m5UAfCULR6Qk
+ G064+nd25LWEjSJ3I85WHfJNz/fPr3fQvQNH67GEdTZIr7hrWeZHH1nnpGrZ6xx6
+ sVBxKMp3g8wNXey/DJSaDcry5cQ8cZW2RrUzmfXgcSlwAIVBkmHKA1UtgAGu1kq/
+ LzaCJus7ffMdUZd7IYAM5fIdnNEf0fi8/oKcWiv9TqynGJmu2AxjSUu9EG9idESu
+ bZtXZntM2ODbqepfQ0i44ew9Y3HQS8ciP8uhbQYFZuhbMQobXNIkvO6XA1cePSt2
+ Wh4qCh+ymk9u2qBqb4JkqORyOCJhLdOj6TGU0o9TQ8h0EqjB/Wx69ppA0GFQh5si
+ CG7GnwZhabgiPIxoCPQuhDPv+rXgFx5YiGofMddst9DFn0nR/fFx9hqaTuZ4iiuH
+ UzvqQAMGtIMxiOdJKSSI9emsNfQvXTMHjB+s6Cjiw7nF0+G2ciXlLTPbtTFePZVN
+ seDosuN6uMqwm8KNZVJpU0O0qXu5EdI7ptbOooSR7WZSJdQ+MWyr0vGYKKFGYkwp
+ jl/pDzXCA1d3AxK4Mzmb+KvFQvh+9X7VwI9Pgg4HHE5KeyX8wXhrvT2itPoznnC2
+ 33tCCZmdBxcEXQv2BQEQANtkIv93lunfgRUt7X4hJXT8rw/Y787b+xQ/FcApQnfd
+ 5Zg6pubrMPbOoIKFJG4vzNBSmXGSBmqGIdIYqT2eR9eBDoOv5Cl8tCQ+zNoC2V0Q
+ uCOLZV86hoakduHniCv8cKSbsG6mm5oFP61/82yJLlPUarT+EGSuWCR6W1pGC5WR
+ GElnE9VFpaQ5TZ8A3EBWky2YhdX7vOzbjP8x0Jd/3UFfpNd5gRnxfJLx8rrdKt20
+ xYxR4FPUbu9kQFZIyUr2kxNi30R1+oK4hcXbID6gqxt1oW5PWGkNOXYTY6r/Vv6D
+ zU4Bf4gngWc7hgwbtGRkv2jR8Zv3ZIUbo4ZwMAMMs3Un7RWjjEJkrtUzdaIdjtwM
+ jZIH7xMNz/NK748EB3uMKiIOkmWqHrWkU2aa86V8heuTg/AWluKFG6J+WHzzYnPE
+ pb+WbWbZi2PcIQlDY2EDQyluXN0YKdHeFRXdo5QllN+oZ54e0EVioYzpUlzyD4/m
+ sqfGS/ZF//r7EoTeIbrqBJDbEc9pjB3cphpWWHLxxbo42u27w+Gvda6B+1ad2bZX
+ lBp8tnQl2y5JtMPWW7kVZs5EBPS8OY5NRWqVAFPBg1OlnC6OYC2c1rW7tqZll0T0
+ UORR+zdhayYDtLZhJdD5QcSVLRe26jlyeT4920dOUvjI8ANiRSjSOx3wwcnnhtLt
+ ABEBAAEAD/jW435kO/7VlNYaqKG2qUDHFbljDFnXhCCp9CCZ19ADGJWKRei0xilv
+ lXQiY8cLJka2xjEFzMH8FOWLpBwg/qffrclJsz8IY90Oo3SDFcdSIM48Ao2IeQrL
+ Vswa+W2np9utX9GwkABZHEsC5hDIfpWiBq1+Glx0QVCUakSQZ4txNG1VeGE0xMM5
+ 1+bvlygk3KfMQVjV/31Ngr7YNzLZMaTGI6iSZbDOeHpMdDAMWBVkk2vrxUa01Z7T
+ XJ6n5SNFCb+FfZKyu9xjrdlZswgiT71JaC52ncE7JHjj7pnxI6lSIkc14MHJ2ugk
+ 9WiW84v9ybCyOvEsk2Uh+7BwPfLJCee7SIWcVses55mVUm0YNoU68isQfuuuF2+B
+ OwTaoFT5sDwGlE7exdgk7DyUYxIIB3aRTUfNYeAIVW2uR5GruOgTLUw54KPa1N7O
+ NAfiC4OAfc+s6KBTU/Reozdq6mewLEM0UBccEmBtWevet64p5TWleyaL1TtEPZlX
+ DnrkTXA/ZRTCteuSLwmMDpcFEYc3IcgZIQfvqtHO2cDJ67AjlsgvEDwTV65l1MnN
+ krYIgUh8yFMnFGZPO1jw3mRtuU0QottdPj14Xcn855GS2C4CZ31N3YJq7eR+j5Bh
+ SmXk6k5ys8eF/gw4rbEaBbXFTjw8eb1q7Nwus0W+0yrq4r9/J1fxCADkCFXD95jm
+ sddOHz0Dih6c3KEvpkiPLzITOSp5/nPyd3K7T3xx0L2sBQQt2QGT0zeJCwAB5uTE
+ uTb6AjKcvOn7KlPQQqP9O2mgHo7Zzwr/I69DkYLuLGNCsr0XGi5sn9aaDG0dpFqg
+ 2VzItHVTCefoVF5EOmOaDwB1QYunKwXy3vtgNBJ7LwJb4rm1TgNHsheT/rtUsVYP
+ Z7lAxcLGa2xNm215nf8fzmbjZnCfPEyRpMgqlN+YUzGiDJwfN/fwr8OgFnjDSqOL
+ htbqCiLv7vTbsw6TWIuKr21QVEc3Qcqu96v0dNXk6wr4PTnPmezswSK0+Dc2+5JZ
+ PBkYIkbE5eYxCAD2THuc0iiqX4ZFS8KhbjWvmng08xulj45BpHCkY8oRcaMCRD0c
+ AoEDZyPEfwfb8SMNb2NHzvYi8djM0uU5NzqURKUF22bAAbxkgPyBKp8CBECD0X50
+ O4HpJIYShbfUD/XWKLvxGxKYlrOvwHuUboPPnMqsaMLwezu0MoaUztbTlr/OZgA0
+ 8/hwiyXDcsrf7yg5BPfAZghC1OYEvhEiS43VefAPVv/bvwz3Aml0kQLH14eZxGxL
+ z7mb0qoZJ0Qkn36QQ2L9GcWYRfBDgUXcxfZm6JW+UZWuWMBAPMSaPoRd9yt+MMPC
+ QsmyUrBxT9vTLFysUIJYQThycucvO9Xud/19CADt0G5p1u8uRzKi5Qnbtd79V0v5
+ Obud6LRooXJNprYVLhPE6lr0aeeZK6QDIrmRrZqeByQ2kfY6XP6Tl4/9wZX2ajS/
+ 8GJGmv7HP6vqfJdrOQhnAjQkUYYm72C/eicAsm8e/fiOSrpf1ithyPKOArFqVyth
+ pVkAnteqNDABBNellUbqS//XUKnk/Cd2WWaGe4J/nIuj78MphBEzSO4CmWkdbH5G
+ CPyXdq6LEnDp1By3LNmZTwqbeMUtzVcaJVh6k4jW3OEL9h0PIWQ8Ol42+0Wn57Pn
+ 5vPzGwKaf7zFhJlLRCiCdJJl4QOjXo0jvQHBvelQ8NVpeCCAabWmR9OwldqOhJiJ
+ BD4EGAECAAkFAl0L9gUCGy4CKQkQOvG9NhmrSWrBXSAEGQECAAYFAl0L9gUACgkQ
+ kjr8nlUnTh1fBBAAqFbBzvU44nvoxYtBz0b3ZJ8TBCu8rCAwXEquvb+sXWYj52jh
+ ThW+ke24rX178JZDPu6f8EZwF98voZCDxU5XTexqMugTubVSMmTGbJ63YN99Nl5P
+ jjdwO4r1LaBJ7ef30YLT0ZIUI73dDTolQ0ENHxwtD1xnwx8JvXpmQdDJ9/HINQlU
+ HRSt2qWHRSgrutRLFlO7trWQZXIrUwjY3qgKJMPYc2tJqmfsoD6EeISyZOVOJ7m5
+ xgs1f7UgtbVrHYhQOxRiMIAbMDbukRKwlvp1af8R7e+EoFMIcewaObe6/PUTuOFL
+ 0VkCWoHBBKWAJQJ7vHmzW1tgyrDjchHSUAGMqZOEL84uOCWqMQ/6HCj/zaEqiOqg
+ fuoh54K05lKE5OOIBWITVGgqsT9tli29Lov9vJb2p4csN4kSrdKJpLCgP21V8Utk
+ ZWR1OgDhD7h40Eobpph4KauYoAZiAfu3cb4BzNhUAJ69fJ5lrOlKP1GLmYyQ5jfx
+ s73TDCNfj42OBeUCO6tncTSPXs/9P2FziynVLxkCT8cbVq1C4H87BO7TEW9FuxKJ
+ hLfpVGbt/yG1HrvGJ/kRPk0sXu2md9quWkh6qPHF5EThCOrlfbwLD5Pqvt0ZPZlR
+ xxMSRP9L9w09ZYO1Y7f6gegElTpEh/aFLq1jjUxm8h/cO6A9lJ3Bjxb/xmoKoBAA
+ ieGiHu3YgsN0jmxvlnf7GwB89BNajyH6D0tbdsH+OeSU8e6xItxpe6s5GfonWAt+
+ ngPutPSPhgS5AUx8GrxHGu3Sx+wmGvGKpsH+2Tu1ciUN34K/sfrzKjxCuvnpcBTd
+ rOSiEObnKnb6OI6sW329ZH4z/r5tVoetWr45xspc9IE8TVIuavOlJkuInX5/B3Sa
+ DPwAH/aQAYc71j7yDr7ezFzx07h+pH4ePeGsnfdKy6ZWoQ1mmM35j93ZhH8P8YCC
+ N8lklRhxvZhXkHWt4ns/QzT+QawW2sR8Kkha3ydzx9cEcalmNq7nG+QkwSlliazE
+ 6peVL6ga2H1+XM+1p/P/qCsvbLmRobWSyfMURKkL05iykNjnHOYno+A+NaM3IR12
+ uf5tWvNfiJpNXbUf8Yjh0ep73aZfF1kODIcLR1AHtVVt0Yc05XKEwFfv4kADi1Mh
+ Pp0s4MHRMjPCgPU1j2b5ulGZGZKlCECu3799bw8yb7n9Hpj42hL0ZOEsMdMHbCfd
+ 7eQUNNncVW/KDnwrJQAabr/771xSTauWDDdpEJEc2Mdx3m6e7doQvboYaKYvK4kg
+ x/Vi7pcHS1xQO0zC8BPPBq9tMyy5QGSybfVSvMPo+7nIsumZ9fvJwjjRHreZP4pa
+ nbE9Gt4CrEbWoo6jbcscycpQbduEhGtvwj8UFXe5z+M=
+ =o0Ig
+ -----END PGP PRIVATE KEY BLOCK-----
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/aliases
new file mode 100644
index 000000000..0ac9bad98
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/dependencies.yml b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/dependencies.yml
new file mode 100644
index 000000000..464b5c428
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/dependencies.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Install Petname Python package
+ pip:
+ name: petname
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/runme.sh
new file mode 100755
index 000000000..71faa439d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/runme.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test.yml -v "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/test.yml b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/test.yml
new file mode 100644
index 000000000..c61461867
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_pet/test.yml
@@ -0,0 +1,30 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Call plugin
+ set_fact:
+ result1: "{{ query('community.general.random_pet', words=3) }}"
+ result2: "{{ query('community.general.random_pet', length=3) }}"
+ result3: "{{ query('community.general.random_pet', prefix='kubernetes') }}"
+ result4: "{{ query('community.general.random_pet', separator='_') }}"
+ result5: "{{ query('community.general.random_pet', words=2, length=6, prefix='kubernetes', separator='_') }}"
+
+ - name: Check results
+ assert:
+ that:
+ - result1 | length == 1
+ - result1[0].split('-') | length == 3
+ - result2 | length == 1
+ - result2[0].split('-')[0] | length <= 3
+ - result3 | length == 1
+ - result3[0].split('-')[0] == 'kubernetes'
+ - result4 | length == 1
+ - result4[0].split('_') | length == 2
+ - result5 | length == 1
+ - result5[0].split('_') | length == 3
+ - result5[0].split('_')[0] == 'kubernetes'
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_string/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_random_string/aliases
new file mode 100644
index 000000000..0ac9bad98
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_string/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_string/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_random_string/runme.sh
new file mode 100755
index 000000000..35c79500c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_string/runme.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test.yml -v "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_string/test.yml b/ansible_collections/community/general/tests/integration/targets/lookup_random_string/test.yml
new file mode 100644
index 000000000..b1f623410
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_string/test.yml
@@ -0,0 +1,53 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Call plugin
+ set_fact:
+ result1: "{{ query('community.general.random_string') }}"
+ result2: "{{ query('community.general.random_string', length=0) }}"
+ result3: "{{ query('community.general.random_string', length=10) }}"
+ result4: "{{ query('community.general.random_string', length=-1) }}"
+ result5: "{{ query('community.general.random_string', override_special='_', min_special=1) }}"
+ result6: "{{ query('community.general.random_string', upper=false, special=false) }}" # lower case only
+ result7: "{{ query('community.general.random_string', lower=false, special=false) }}" # upper case only
+ result8: "{{ query('community.general.random_string', lower=false, upper=false, special=false) }}" # number only
+ result9: "{{ query('community.general.random_string', lower=false, upper=false, special=false, min_numeric=1, length=1) }}" # single digit only
+ result10: "{{ query('community.general.random_string', numbers=false, upper=false, special=false, min_lower=1, length=1) }}" # single lowercase character only
+ result11: "{{ query('community.general.random_string', base64=true, length=8) }}"
+ result12: "{{ query('community.general.random_string', upper=false, numbers=false, special=false) }}" # all lower case
+ result13: "{{ query('community.general.random_string', override_all='0', length=2) }}"
+
+ - name: Raise error when impossible constraints are provided
+ set_fact:
+ impossible: "{{ query('community.general.random_string', upper=false, lower=false, special=false, numbers=false) }}"
+ ignore_errors: true
+ register: impossible_result
+
+ - name: Check results
+ assert:
+ that:
+ - result1[0] | length == 8
+ - result2[0] | length == 0
+ - result3[0] | length == 10
+ - result4[0] | length == 0
+ - result5[0] | length == 8
+ - "'_' in result5[0]"
+ - result6[0] is lower
+ - result7[0] is upper
+ - result8[0] | regex_replace('^(\d+)$', '') == ''
+ - result9[0] | regex_replace('^(\d+)$', '') == ''
+ - result9[0] | length == 1
+ - result10[0] | length == 1
+ - result10[0] is lower
+ # if input string is not multiple of 3, base64 encoded string will be padded with =
+ - result11[0].endswith('=')
+ - result12[0] is lower
+ - result13[0] | length == 2
+ - result13[0] == '00'
+ - impossible_result is failed
+ - "'Available characters cannot' in impossible_result.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_words/aliases b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/aliases
new file mode 100644
index 000000000..0ac9bad98
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/aix
+skip/python2.6 # lookups are controller only, and we no longer support Python 2.6 on the controller
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_words/dependencies.yml b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/dependencies.yml
new file mode 100644
index 000000000..1cb0b0d3a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/dependencies.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Install xkcdpass Python package
+ pip:
+ name: xkcdpass
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_words/runme.sh b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/runme.sh
new file mode 100755
index 000000000..71faa439d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/runme.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -eux
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook dependencies.yml -v "$@"
+
+ANSIBLE_ROLES_PATH=../ \
+ ansible-playbook test.yml -v "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/lookup_random_words/test.yml b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/test.yml
new file mode 100644
index 000000000..90c672730
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lookup_random_words/test.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Call random_words plugin
+ set_fact:
+ result1: "{{ query('community.general.random_words') }}"
+ result2: "{{ query('community.general.random_words', min_length=5, max_length=5) }}"
+ result3: "{{ query('community.general.random_words', delimiter='!') }}"
+ result4: "{{ query('community.general.random_words', numwords=3, delimiter='-', case='capitalize') }}"
+ result5: "{{ query('community.general.random_words', min_length=5, max_length=5, numwords=3, delimiter='') }}"
+
+ - name: Check results
+ assert:
+ that:
+ - result1 | length == 1
+ - result1[0] | length >= 35
+ - result2 | length == 1
+ - result2[0] | length == 35
+ - result3 | length == 1
+ - result3[0].count("!") == 5
+ - result4 | length == 1
+ - result4[0] | length >= 17
+ - result4[0] | length <= 29
+ - result4[0] | regex_findall("[A-Z]") | length == 3
+ - result4[0].count("-") == 2
+ - result5 | length == 1
+ - result5[0] | length == 15
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/aliases b/ansible_collections/community/general/tests/integration/targets/lvg/aliases
new file mode 100644
index 000000000..3b92ba75c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+destructive
+needs/privileged
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml
new file mode 100644
index 000000000..e14c48c3f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required packages (Linux)
+ package:
+ name: lvm2
+ state: present
+ when: ansible_system == 'Linux'
+
+- name: Test lvg module
+ block:
+ - import_tasks: setup.yml
+
+ - import_tasks: test_indempotency.yml
+
+ - import_tasks: test_grow_reduce.yml
+
+ - import_tasks: test_pvresize.yml
+ always:
+ - import_tasks: teardown.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml
new file mode 100644
index 000000000..3984b9fc3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/setup.yml
@@ -0,0 +1,27 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Create files to use as a disk devices"
+ command: "dd if=/dev/zero of={{ remote_tmp_dir }}/img{{ item }} bs=1M count=10"
+ with_sequence: 'count=2'
+
+- name: "Show next free loop device"
+ command: "losetup -f"
+ register: loop_device1
+
+- name: "Create loop device for file"
+ command: "losetup -f {{ remote_tmp_dir }}/img1"
+
+- name: "Show next free loop device"
+ command: "losetup -f"
+ register: loop_device2
+
+- name: "Create loop device for file"
+ command: "losetup -f {{ remote_tmp_dir }}/img2"
+
+- name: "Affect name on disk to work on"
+ set_fact:
+ loop_device1: "{{ loop_device1.stdout }}"
+ loop_device2: "{{ loop_device2.stdout }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml
new file mode 100644
index 000000000..de4957321
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/teardown.yml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Remove test volume group
+ lvg:
+ vg: testvg
+ state: absent
+
+- name: Detach loop devices
+ command: "losetup -d {{ item }}"
+ loop:
+ - "{{ loop_device1 | default('') }}"
+ - "{{ loop_device2 | default('') }}"
+ when:
+ - item != ''
+
+- name: Remove device files
+ file:
+ path: "{{ remote_tmp_dir }}/img{{ item }}"
+ state: absent
+ with_sequence: 'count=2'
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml
new file mode 100644
index 000000000..857df9246
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_grow_reduce.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Create volume group on first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - ansible_lvm.pvs[loop_device1].vg == "testvg"
+ - 'loop_device2 not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device2].vg == ""'
+
+- name: "Extend to second disk AND reduce from the first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device2 }}"
+
+- name: "get lvm facts"
+ setup:
+
+- debug: var=ansible_lvm
+
+- name: "Assert the testvg span only on first disk"
+ assert:
+ that:
+ - 'loop_device1 not in ansible_lvm.pvs or
+ ansible_lvm.pvs[loop_device1].vg == ""'
+ - ansible_lvm.pvs[loop_device2].vg == "testvg"
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml
new file mode 100644
index 000000000..758912484
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_indempotency.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create volume group on disk device
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+
+- name: Create the volume group again to verify idempotence
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ register: repeat_vg_create
+
+- name: Do all assertions to verify expected results
+ assert:
+ that:
+ - repeat_vg_create is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml
new file mode 100644
index 000000000..f15add91c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lvg/tasks/test_pvresize.yml
@@ -0,0 +1,81 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Create volume group on first disk"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is 8388608B
+ assert:
+ that:
+ - "'8388608B' == cmd_result.stdout"
+
+- name: Increases size in file
+ command: "dd if=/dev/zero bs=8MiB count=1 of={{ remote_tmp_dir }}/img1 conv=notrunc oflag=append"
+
+- name: "Reread size of file associated with loop_device1"
+ command: "losetup -c {{ loop_device1 }}"
+
+- name: "Reruns lvg with pvresize:no"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ pvresize: false
+ register: cmd_result
+
+- assert:
+ that:
+ - cmd_result is not changed
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is still 8388608B
+ assert:
+ that:
+ - "'8388608B' == cmd_result.stdout"
+
+- name: "Reruns lvg with pvresize:yes and check_mode:yes"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ pvresize: true
+ check_mode: true
+ register: cmd_result
+
+- name: Assert that the module returned the state was changed
+ assert:
+ that:
+ - cmd_result is changed
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is still 8388608B
+ assert:
+ that:
+ - "'8388608B' == cmd_result.stdout"
+
+- name: "Reruns lvg with pvresize:yes"
+ lvg:
+ vg: testvg
+ pvs: "{{ loop_device1 }}"
+ pvresize: true
+
+- name: Gets current vg size
+ shell: vgs -v testvg -o pv_size --noheading --units b | xargs
+ register: cmd_result
+
+- name: Assert the testvg size is now 16777216B
+ assert:
+ that:
+ - "'16777216B' == cmd_result.stdout"
diff --git a/ansible_collections/community/general/tests/integration/targets/lxd_project/aliases b/ansible_collections/community/general/tests/integration/targets/lxd_project/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lxd_project/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/lxd_project/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/lxd_project/tasks/main.yml
new file mode 100644
index 000000000..d1340eebd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/lxd_project/tasks/main.yml
@@ -0,0 +1,142 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Clean up test project
+ lxd_project:
+ name: ansible-test-project
+ state: absent
+
+- name: Clean up test project
+ lxd_project:
+ name: ansible-test-project-renamed
+ state: absent
+
+- name: Create test project
+ lxd_project:
+ name: ansible-test-project
+ config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "true"
+ limits.cpu: "3"
+ state: present
+ register: results
+
+- name: Check project has been created correctly
+ assert:
+ that:
+ - results is changed
+ - results.actions is defined
+ - "'create' in results.actions"
+
+- name: Create test project again with merge_project set to true
+ lxd_project:
+ name: ansible-test-project
+ merge_project: true
+ config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "true"
+ limits.cpu: "3"
+ state: present
+ register: results
+
+- name: Check state is not changed
+ assert:
+ that:
+ - results is not changed
+ - "{{ results.actions | length }} == 0"
+
+- name: Create test project again with merge_project set to false
+ lxd_project:
+ name: ansible-test-project
+ merge_project: false
+ config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "true"
+ limits.cpu: "3"
+ state: present
+ register: results
+
+- name: Check state is not changed
+ assert:
+ that:
+ - results is changed
+ - "'apply_projects_configs' in results.actions"
+
+- name: Update project test => update description
+ lxd_project:
+ name: ansible-test-project
+ merge_project: false
+ description: "ansible test project"
+ config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "true"
+ limits.cpu: "3"
+ state: present
+ register: results
+
+- name: Check state is changed
+ assert:
+ that:
+ - results is changed
+ - "'apply_projects_configs' in results.actions"
+
+- name: Update project test => update project config
+ lxd_project:
+ name: ansible-test-project
+ merge_project: false
+ description: "ansible test project"
+ config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "true"
+ limits.cpu: "4"
+ state: present
+ register: results
+
+- name: Check state is changed
+ assert:
+ that:
+ - results is changed
+ - "'apply_projects_configs' in results.actions"
+
+- name: Rename project test
+ lxd_project:
+ name: ansible-test-project
+ new_name: ansible-test-project-renamed
+ merge_project: true
+ description: "ansible test project"
+ config:
+ features.images: "false"
+ features.networks: "true"
+ features.profiles: "true"
+ limits.cpu: "4"
+ state: present
+ register: results
+
+- name: Check state is changed
+ assert:
+ that:
+ - results is changed
+ - "'rename' in results.actions"
+
+- name: Clean up test project
+ lxd_project:
+ name: ansible-test-project-renamed
+ state: absent
+ register: results
+
+- name: Check project is deleted
+ assert:
+ that:
+ - results is changed
+ - "'delete' in results.actions"
diff --git a/ansible_collections/community/general/tests/integration/targets/mail/aliases b/ansible_collections/community/general/tests/integration/targets/mail/aliases
new file mode 100644
index 000000000..afda346c4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mail/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
diff --git a/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt b/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt
new file mode 100644
index 000000000..2fcbb3a1d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.crt
@@ -0,0 +1,26 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIJAJyHQUcqSOQpMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV
+BAYTAkJFMRMwEQYDVQQIDApWbGFhbmRlcmVuMQ0wCwYDVQQHDARHZW50MQ4wDAYD
+VQQKDAVEYWdpdDELMAkGA1UECwwCSVQxHjAcBgNVBAMMFWxvY2FsaG9zdC5sb2Nh
+bGRvbWFpbjAeFw0xODExMjgxMjQ3MzlaFw0yODExMjUxMjQ3MzlaMG4xCzAJBgNV
+BAYTAkJFMRMwEQYDVQQIDApWbGFhbmRlcmVuMQ0wCwYDVQQHDARHZW50MQ4wDAYD
+VQQKDAVEYWdpdDELMAkGA1UECwwCSVQxHjAcBgNVBAMMFWxvY2FsaG9zdC5sb2Nh
+bGRvbWFpbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANLqBGgIF44U
+zRhNupGwSKAeTIXT4nXPIJKlIi1kTSQwtywQmBw6leBlvj1qwU73+nhqwSclIrYx
+3ltvrpKHAWG1jqqsExuLRaKRdWgx1YC2WPgZwYC0C+LkE8vs/Kl1v0HgPuPMkzeK
+hDctQfWOaykFOy0mB/BfP2vSVoEckffMlDjG/bHwNt7cG8BnqKd8e9VR+ZcBazFK
+bnKhht0ldR84Wbp+5wpuCr1R1R0ltdO2O+LACrXzvH9Kf0CGhKXGccwGpi43eXyK
+CDbubkGcLjg9Fo7kZ6uW5nU2vHJ1iDGnvUl8X96qKoOFU0EvBveCisc1bY433uG1
+NjEZ1xLPGK8CAwEAAaNQME4wHQYDVR0OBBYEFO6nDFzJBZBLJt4yza+VrUEOy3Zl
+MB8GA1UdIwQYMBaAFO6nDFzJBZBLJt4yza+VrUEOy3ZlMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQELBQADggEBALTq0ycKhEr/3KOsfKBup4bs5Oqv0x7ePaUNxyef
+JSyKTjD0gPY8YNAeNA7gU5XGjMr4h9cNpRmJ0TyfwWJxH4uK4d2p5k1ZpQWKv8jG
+4U9sZTQzkh8nqRBaEl94qsiCIRCllb6VveWbIGE6eqt4rT0V9l9fvbw+hSXdiYXT
+KkkX5VZxctV2OMkbP1mbOYIA22jqZKQiIvAVcMA6vSnlDAJKTi9/kw99/zjUQ9Jb
+8bF2gcnzAijJAWsCqf8hZVq9+pogptBd/bkKUCuTA4MACX5ppgQltkgX2mLrj6Ep
+Po2euqzUZREzKl2cUaP85m+8tClYk0Wjfm0RjxPRa8fgUfM=
+-----END CERTIFICATE-----
diff --git a/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key b/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key
new file mode 100644
index 000000000..193ec9cda
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.key
@@ -0,0 +1,32 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDS6gRoCBeOFM0Y
+TbqRsEigHkyF0+J1zyCSpSItZE0kMLcsEJgcOpXgZb49asFO9/p4asEnJSK2Md5b
+b66ShwFhtY6qrBMbi0WikXVoMdWAtlj4GcGAtAvi5BPL7Pypdb9B4D7jzJM3ioQ3
+LUH1jmspBTstJgfwXz9r0laBHJH3zJQ4xv2x8Dbe3BvAZ6infHvVUfmXAWsxSm5y
+oYbdJXUfOFm6fucKbgq9UdUdJbXTtjviwAq187x/Sn9AhoSlxnHMBqYuN3l8igg2
+7m5BnC44PRaO5GerluZ1NrxydYgxp71JfF/eqiqDhVNBLwb3gorHNW2ON97htTYx
+GdcSzxivAgMBAAECggEALDCRucYwQTmEexoWA94+aSXP/J4XLX23ImJs1bvVGccH
+KblUVV3E9x36DN4oIEZ+eOpNC8mRC0FJiDjPB643kOQ8PvAMlNHKRjRZt/nw9KW/
+4ENtMm0GrIQCzdAaY9ritoeoRYwgMBvadcEKt8seEpsg+eWk9izOmeWY8DYvMw6N
+hNu5zQLkMGTTqfDxkl7KnyKPhjW/++eUdgsTIA31/wHsJSiNR5Pkoy2fOVtNO7JN
+EghcKE3lYKKzOW6vg0LBY8xVQ4KMbCVgnYNI3MU9qpG2bYxy1hlWIrsjrt9PyRp8
+jDSKnLD4Zvv4L6gj2fhelES/YQ/055YyzG801Q+gUQKBgQDohqr5fwQj8Awrc0K/
+DodaIkVwXHkQAhSWidrqxZXmtn4ZsgDA3V82ZTIjWe2v7ES5U4jeYKGoUweCUodr
+PPT0IKEFmS2Fq1AZZx7Ry+ihA7gw6PV5DbG5DwyNYlhn6F6Bghl8pKAcXPGuwtgd
+BKXj7utEp57Q9ue3P00cGNokKQKBgQDoNNFMPnfv5UQp+K0A89cKW8q6sf93/ul4
+kjh72q/KbK57ouhWPNib3tJLvkl7P8S45nrUGQZtd6zLhU/6SzAnGGnNZ7gNAs3l
+SWidcmZDqIiIXh6BF4/4WxXMXJdhfrux9/O8Xk89v+EDAbLbN8jSrvy87+6mOmRM
+r/MAXToxFwKBgHpGbtxalbPMRKoIp33OxxB32yoWBreLUIZFIfC5THWRW8hpWYoS
+H0J8fpwmax5K0WzfZ6cBC6F3YAiBG6Mh3/IMwoAuJ8kV6D4jgwpx/vfE+/QEXSl2
+MRIOvtwObkzd3eyenIZ2D5g6rADphznjOtUcy21D8/kRDZLIX+U5kGTZAoGBAIYg
+/ETuUJlh9V3JJyXFtBFntFLjPo4x0Oq0i6v/RkvHO4JvN4WY4AYpT5Aw+oEW9KtZ
+dtnNGslgt49YEjqh886ha3wazVW2qPgozyUjT68FSth6hWRMF/19n7nMQiUu73x9
+nWzRjTQ+Aduav5WhQ39vVM5OSav7TrR9bgBn2ZVBAoGBAN4Hle/PIFzApQYQRIT0
+wPpOvEVx56+c70ZMvLv5UgmY2jLKZKFUV6oGGUZlJXfh1ZMnXShWY1pjvi/FnIIi
+AKDB9N17DE5AmpzuXFjU3YwXde98MjuUY03P3yaFQ4cXYryqgZxuMPgyGFM9vtMd
+WXFdvCtm0c3WMpPJSr9kgy6Q
+-----END PRIVATE KEY-----
diff --git a/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py b/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py
new file mode 100644
index 000000000..18c6fbf9b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mail/files/smtpserver.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import asyncore
+import os.path
+import ssl
+import sys
+
+# Handle TLS and non-TLS support
+try:
+ import smtpd_tls
+ HAS_TLS = True
+except ImportError:
+ import smtpd
+ HAS_TLS = False
+ print('Library smtpd-tls is missing or not supported, hence starttls is NOT supported.')
+
+# Handle custom ports
+port = '25:465'
+if len(sys.argv) > 1:
+ port = sys.argv[1]
+ports = port.split(':')
+if len(ports) > 1:
+ port1, port2 = int(ports[0]), int(ports[1])
+else:
+ port1, port2 = int(port), None
+
+# Handle custom certificate
+basename = os.path.splitext(sys.argv[0])[0]
+certfile = basename + '.crt'
+if len(sys.argv) > 2:
+ certfile = sys.argv[2]
+
+# Handle custom key
+keyfile = basename + '.key'
+if len(sys.argv) > 3:
+ keyfile = sys.argv[3]
+
+try:
+ ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+except AttributeError:
+ ssl_ctx = None
+ if HAS_TLS:
+ print('Python ssl library does not support SSLContext, hence starttls and TLS are not supported.')
+ import smtpd
+
+if HAS_TLS and ssl_ctx is not None:
+ print('Using %s and %s' % (certfile, keyfile))
+ ssl_ctx.load_cert_chain(certfile=certfile, keyfile=keyfile)
+
+ print('Start SMTP server on port', port1)
+ smtp_server1 = smtpd_tls.DebuggingServer(('127.0.0.1', port1), None, ssl_ctx=ssl_ctx, starttls=True)
+ if port2:
+ print('Start TLS SMTP server on port', port2)
+ smtp_server2 = smtpd_tls.DebuggingServer(('127.0.0.1', port2), None, ssl_ctx=ssl_ctx, starttls=False)
+else:
+ print('Start SMTP server on port', port1)
+ smtp_server1 = smtpd.DebuggingServer(('127.0.0.1', port1), None) # pylint: disable=used-before-assignment
+ if port2:
+ print('WARNING: TLS is NOT supported on this system, not listening on port %s.' % port2)
+
+asyncore.loop()
diff --git a/ansible_collections/community/general/tests/integration/targets/mail/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/mail/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mail/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml
new file mode 100644
index 000000000..4f3f90a51
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mail/tasks/main.yml
@@ -0,0 +1,105 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# TODO: Our current implementation does not handle SMTP authentication
+
+# NOTE: If the system does not support smtpd-tls (python 2.6 and older) we do basic tests
+- name: Attempt to install smtpd-tls
+ pip:
+ name: smtpd-tls
+ state: present
+ ignore_errors: true
+ register: smtpd_tls
+
+- name: Install test smtpserver
+ copy:
+ src: '{{ item }}'
+ dest: '{{ remote_tmp_dir }}/{{ item }}'
+ loop:
+ - smtpserver.py
+ - smtpserver.crt
+ - smtpserver.key
+
+# FIXME: Verify the mail after it was send would be nice
+# This would require either dumping the content, or registering async task output
+- name: Start test smtpserver
+ shell: '{{ ansible_python.executable }} {{ remote_tmp_dir }}/smtpserver.py 10025:10465'
+ async: 45
+ poll: 0
+ register: smtpserver
+
+- name: Send a basic test-mail
+ mail:
+ port: 10025
+ subject: Test mail 1 (smtp)
+ secure: never
+
+- name: Send a test-mail with body and specific recipient
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 2 (smtp + body)
+ body: Test body 2
+ secure: never
+
+- name: Send a test-mail with attachment
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 3 (smtp + body + attachment)
+ body: Test body 3
+ attach: /etc/group
+ secure: never
+
+# NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
+- name: Send a test-mail using starttls
+ mail:
+ port: 10025
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 4 (smtp + starttls + body + attachment)
+ body: Test body 4
+ attach: /etc/group
+ secure: starttls
+ ignore_errors: true
+ register: starttls_support
+
+# NOTE: This might fail if smtpd-tls is missing or python 2.7.8 or older is used
+- name: Send a test-mail using TLS
+ mail:
+ port: 10465
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 5 (smtp + tls + body + attachment)
+ body: Test body 5
+ attach: /etc/group
+ secure: always
+ ignore_errors: true
+ register: tls_support
+
+- fail:
+ msg: Sending mail using starttls failed.
+ when: smtpd_tls is succeeded and starttls_support is failed and tls_support is succeeded
+
+- fail:
+ msg: Send mail using TLS failed.
+ when: smtpd_tls is succeeded and tls_support is failed and starttls_support is succeeded
+
+- name: Send a test-mail with body, specific recipient and specific ehlohost
+ mail:
+ port: 10025
+ ehlohost: some.domain.tld
+ from: ansible@localhost
+ to: root@localhost
+ subject: Test mail 6 (smtp + body + ehlohost)
+ body: Test body 6
+ secure: never
diff --git a/ansible_collections/community/general/tests/integration/targets/mas/aliases b/ansible_collections/community/general/tests/integration/targets/mas/aliases
new file mode 100644
index 000000000..ea236467f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mas/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/root
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml
new file mode 100644
index 000000000..f659160dc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mas/tasks/main.yml
@@ -0,0 +1,158 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the mas module.
+# Copyright (c) 2020, Lukas Bestle <project-ansible@lukasbestle.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Test preparation
+- name: Uninstall Rested to ensure consistent starting point
+ mas:
+ id: 421879749
+ state: absent
+ become: true
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is uninstalled
+ assert:
+ that:
+ - install_status.stat.exists == false
+
+- name: Wait until the OS-internal cache was updated
+ pause:
+ seconds: 5
+
+# Installation
+- name: Check if Rested needs to be installed
+ mas:
+ id: 421879749
+ state: present
+ register: install_check
+ check_mode: true
+
+- name: Ensure that the status would have changed
+ assert:
+ that:
+ - install_check is changed
+ - install_check.msg == "Installed 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is not yet installed
+ assert:
+ that:
+ - install_status.stat.exists == false
+
+- name: Install Rested
+ mas:
+ id: 421879749
+ state: present
+ register: install
+
+- name: Ensure that the status changed
+ assert:
+ that:
+ - install is changed
+ - install.msg == "Installed 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is installed
+ assert:
+ that:
+ - install_status.stat.exists == true
+
+- name: Wait until the OS-internal cache was updated
+ pause:
+ seconds: 5
+
+- name: Install Rested again
+ mas:
+ id: 421879749
+ state: present
+ register: install_again
+
+- name: Ensure that the status is unchanged (already installed)
+ assert:
+ that:
+ - install_again is not changed
+ - "'msg' not in install_again"
+
+# Uninstallation
+- name: Check if Rested needs to be uninstalled
+ mas:
+ id: 421879749
+ state: absent
+ register: uninstall_check
+ become: true
+ check_mode: true
+
+- name: Ensure that the status would have changed
+ assert:
+ that:
+ - uninstall_check is changed
+ - uninstall_check.msg == "Uninstalled 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: install_status
+
+- name: Ensure the app is not yet uninstalled
+ assert:
+ that:
+ - install_status.stat.exists == true
+
+- name: Unistall Rested
+ mas:
+ id: 421879749
+ state: absent
+ register: uninstall
+ become: true
+
+- name: Ensure that the status changed
+ assert:
+ that:
+ - uninstall is changed
+ - uninstall.msg == "Uninstalled 1 app(s)"
+
+- name: Determine whether the app is installed
+ stat:
+ path: /Applications/Rested.app
+ register: uninstall_status
+
+- name: Ensure the app is uninstalled
+ assert:
+ that:
+ - uninstall_status.stat.exists == false
+
+- name: Wait until the OS-internal cache was updated
+ pause:
+ seconds: 5
+
+- name: Uninstall Rested again
+ mas:
+ id: 421879749
+ state: absent
+ register: uninstall_again
+ become: true
+
+- name: Ensure that the status is unchanged (already uninstalled)
+ assert:
+ that:
+ - uninstall_again is not changed
+ - "'msg' not in uninstall_again"
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases b/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/meta/main.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml
new file mode 100644
index 000000000..153df95ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_dns_reload/tasks/main.yml
@@ -0,0 +1,33 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: request reload with invalid API key
+ memset_dns_reload:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: request reload and poll
+ memset_dns_reload:
+ api_key: "{{ api_key }}"
+ poll: true
+ register: result
+
+- name: check reload succeeded
+ assert:
+ that:
+ - result is changed
+ - result is successful
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases b/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/meta/main.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml
new file mode 100644
index 000000000..7dc7f7c69
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_memstore_info/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: query API with invalid API key
+ memset_memstore_info:
+ api_key: 'wa9aerahhie0eekee9iaphoorovooyia'
+ name: 'mstestyaa1'
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: request memstore infos
+ memset_memstore_info:
+ api_key: "{{ api_key }}"
+ name: 'mstestyaa1'
+ register: result
+
+- name: check the request succeeded
+ assert:
+ that:
+ - result is not changed
+ - result is successful
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases b/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_server_info/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_server_info/meta/main.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml
new file mode 100644
index 000000000..79066fac7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_server_info/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: query API with invalid API key
+ memset_server_info:
+ api_key: 'wa9aerahhie0eekee9iaphoorovooyia'
+ name: 'testyaa1'
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: request server infos
+ memset_server_info:
+ api_key: "{{ api_key }}"
+ name: 'testyaa1'
+ register: result
+
+- name: check the request succeeded
+ assert:
+ that:
+ - result is not changed
+ - result is successful
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases b/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone/meta/main.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml
new file mode 100644
index 000000000..091bab823
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone/tasks/main.yml
@@ -0,0 +1,125 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create random string
+ set_fact:
+ zone_name: "{{ 65535 | random | string }}.ansible.example.com"
+
+- name: create zone with incorrect API key
+ memset_zone:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: test creating zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ check_mode: true
+ register: result
+
+- name: check if the zone would be created
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ register: result
+
+- name: create the zone
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create duplicate zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ ttl: 300
+ register: result
+
+- name: ensure we can't create duplicate zones
+ assert:
+ that:
+ - result is not changed
+
+- name: test deleting zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: absent
+ name: "{{ zone_name }}"
+ check_mode: true
+ register: result
+
+- name: check if the zone would be deleted
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete empty zone
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: absent
+ name: "{{ zone_name }}"
+ force: false
+ register: result
+
+- name: delete the zone
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create zone for deletion test
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: present
+ name: "{{ zone_name }}"
+ register: result
+
+- name: ensure we can't create duplicate zones
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete zone with force
+ memset_zone:
+ api_key: "{{ api_key }}"
+ state: absent
+ name: "{{ zone_name }}"
+ force: true
+ register: result
+
+- name: ensure force is respected
+ assert:
+ that:
+ - result is changed
+ - result is successful
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml
new file mode 100644
index 000000000..8828011b0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone/vars/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+random_string: "baiqui8ci6miedoo9eivohJ0aixei7oo"
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/meta/main.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml
new file mode 100644
index 000000000..e92ffcdcf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/tasks/main.yml
@@ -0,0 +1,152 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create domain with invalid API key
+ memset_zone_domain:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ ignore_errors: true
+ register: result
+
+- name: check API response with invalid API key
+ assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: create domain over 250 chars
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com'
+ zone: "{{ target_zone }}"
+ ignore_errors: true
+ register: result
+
+- name: test domain length is validated
+ assert:
+ that:
+ - result is failed
+ - "'Zone domain must be less than 250 characters in length' in result.stderr"
+
+- name: create domain in non-existent zone
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "non-existent-zone"
+ ignore_errors: true
+ register: result
+
+- name: fail if zone does not exist
+ assert:
+ that:
+ - result is failed
+ - "'does not exist, cannot create domain.' in result.stderr"
+
+- name: create domain in non-unique zone
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ duplicate_zone }}"
+ ignore_errors: true
+ register: result
+
+- name: fail if the zone is not unique
+ assert:
+ that:
+ - result is failed
+ - "'matches multiple zones, cannot create domain' in result.stderr"
+
+- name: test creating domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ check_mode: true
+ register: result
+
+- name: create domain with check mode
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: create domain
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create existing domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: present
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: create existing domain
+ assert:
+ that:
+ - result is not changed
+
+- name: test deleting domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: absent
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ check_mode: true
+ register: result
+
+- name: delete domain with check mode
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: absent
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: delete domain
+ assert:
+ that:
+ - result is changed
+
+- name: delete non-existent domain
+ memset_zone_domain:
+ api_key: "{{ api_key }}"
+ state: absent
+ domain: "{{ test_domain }}"
+ zone: "{{ target_zone }}"
+ register: result
+
+- name: delete absent domain
+ assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml
new file mode 100644
index 000000000..e891ff49f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_domain/vars/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_domain: ansible.example.com
+target_zone: ansible-dns-zone
+duplicate_zone: ansible-dns-zone-dupe
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/meta/main.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml
new file mode 100644
index 000000000..c1bdd6873
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/tasks/main.yml
@@ -0,0 +1,235 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create record with incorrect API key
+ memset_zone_record:
+ api_key: "wa9aerahhie0eekee9iaphoorovooyia"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ ignore_errors: true
+ register: result
+
+- assert:
+ that:
+ - "'Memset API returned a 403 response (ApiErrorForbidden, Bad api_key)' in result.msg"
+ - result is not successful
+
+- name: create record in non-existent zone
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "a-non-existent-zone"
+ type: A
+ address: 127.0.0.1
+ ignore_errors: true
+ register: result
+
+- name: assert that record is not created
+ assert:
+ that:
+ - "'DNS zone a-non-existent-zone does not exist.' in result.msg"
+ - result is not successful
+
+- name: create record in non-unique zone
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ duplicate_zone }}"
+ type: A
+ address: 127.0.0.1
+ ignore_errors: true
+ register: result
+
+- name: assert that record is not created
+ assert:
+ that:
+ - "'ansible-dns-zone-dupe matches multiple zones.' in result.msg"
+ - result is not successful
+
+- name: create record with invalid priority
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: SRV
+ address: "0 5269 hostname.example.com"
+ record: "_jabber._tcp"
+ priority: 1001
+ ignore_errors: true
+ register: result
+
+- name: assert that priority was out of range
+ assert:
+ that:
+ - "'Priority must be in the range 0 > 999 (inclusive).' in result.msg"
+ - result is not successful
+
+- name: create record with address longer than 250 chars
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: CNAME
+ address: "aaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.com"
+ record: "aaa.ansible.com"
+ ignore_errors: true
+ register: result
+
+- name: assert that address was longer than allowed
+ assert:
+ that:
+ - "'Address must be less than 250 characters in length.' in result.msg"
+ - result is not successful
+
+- name: create record longer than 63 chars
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ ignore_errors: true
+ register: result
+
+- name: assert that record was longer than allowed
+ assert:
+ that:
+ - "'Record must be less than 63 characters in length.' in result.msg"
+ - result is not successful
+
+- name: create record which cannot have relative enabled
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ relative: true
+ ignore_errors: true
+ register: result
+
+- name: assert that setting relative failed
+ assert:
+ that:
+ - "'Relative is only valid for CNAME, MX, NS and SRV record types' in result.msg"
+ - result is not successful
+
+- name: test creating valid A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ check_mode: true
+ register: result
+
+- name: assert that result would have changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: actually create valid A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: create valid SPF record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: present
+ zone: "{{ test_zone }}"
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:127.0.0.1 ?all"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: test deleting A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ check_mode: true
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: actually delete A record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: A
+ address: 127.0.0.1
+ record: "www"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete SPF record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:127.0.0.1 ?all"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is changed
+ - result is successful
+
+- name: delete non-existent SPF record
+ memset_zone_record:
+ api_key: "{{ api_key }}"
+ state: absent
+ zone: "{{ test_zone }}"
+ type: TXT
+ address: "v=spf1 +a +mx +ip4:127.0.0.1 ?all"
+ register: result
+
+- name: assert that result changed
+ assert:
+ that:
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml
new file mode 100644
index 000000000..857f1c722
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/memset_zone_record/vars/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_zone: ansible-dns-record-tests
+duplicate_zone: ansible-dns-zone-dupe
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/aliases b/ansible_collections/community/general/tests/integration/targets/module_helper/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/library/mdepfail.py b/ansible_collections/community/general/tests/integration/targets/module_helper/library/mdepfail.py
new file mode 100644
index 000000000..92ebbde6e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/library/mdepfail.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: mdepfail
+author: "Alexei Znamensky (@russoz)"
+short_description: Simple module for testing
+description:
+ - Simple module test description.
+options:
+ a:
+ description: aaaa
+ type: int
+ b:
+ description: bbbb
+ type: str
+ c:
+ description: cccc
+ type: str
+'''
+
+EXAMPLES = ""
+
+RETURN = ""
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible.module_utils.basic import missing_required_lib
+
+with ModuleHelper.dependency("nopackagewiththisname", missing_required_lib("nopackagewiththisname")):
+ import nopackagewiththisname # noqa: F401, pylint: disable=unused-import
+
+
+class MSimple(ModuleHelper):
+ output_params = ('a', 'b', 'c')
+ module = dict(
+ argument_spec=dict(
+ a=dict(type='int'),
+ b=dict(type='str'),
+ c=dict(type='str'),
+ ),
+ )
+
+ def __init_module__(self):
+ self.vars.set('value', None)
+ self.vars.set('abc', "abc", diff=True)
+
+ def __run__(self):
+ if (0 if self.vars.a is None else self.vars.a) >= 100:
+ raise Exception("a >= 100")
+ if self.vars.c == "abc change":
+ self.vars['abc'] = "changed abc"
+ if self.vars.get('a', 0) == 2:
+ self.vars['b'] = str(self.vars.b) * 2
+ self.vars['c'] = str(self.vars.c) * 2
+
+
+def main():
+ msimple = MSimple()
+ msimple.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/library/msimple.py b/ansible_collections/community/general/tests/integration/targets/module_helper/library/msimple.py
new file mode 100644
index 000000000..096e51524
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/library/msimple.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: msimple
+author: "Alexei Znamensky (@russoz)"
+short_description: Simple module for testing
+description:
+ - Simple module test description.
+options:
+ a:
+ description: aaaa
+ type: int
+ b:
+ description: bbbb
+ type: str
+ c:
+ description: cccc
+ type: str
+'''
+
+EXAMPLES = ""
+
+RETURN = ""
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.mh.deco import check_mode_skip
+
+
+class MSimple(ModuleHelper):
+ output_params = ('a', 'b', 'c', 'm')
+ module = dict(
+ argument_spec=dict(
+ a=dict(type='int', default=0),
+ b=dict(type='str'),
+ c=dict(type='str'),
+ m=dict(type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ def __init_module__(self):
+ self.vars.set('value', None)
+ self.vars.set('abc', "abc", diff=True)
+
+ @check_mode_skip
+ def process_a3_bc(self):
+ if self.vars.a == 3:
+ self.vars['b'] = str(self.vars.b) * 3
+ self.vars['c'] = str(self.vars.c) * 3
+
+ def __run__(self):
+ if self.vars.m:
+ self.vars.msg = self.vars.m
+ if self.vars.a >= 100:
+ raise Exception("a >= 100")
+ if self.vars.c == "abc change":
+ self.vars['abc'] = "changed abc"
+ if self.vars.get('a', 0) == 2:
+ self.vars['b'] = str(self.vars.b) * 2
+ self.vars['c'] = str(self.vars.c) * 2
+ self.process_a3_bc()
+
+
+def main():
+ msimple = MSimple()
+ msimple.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/library/msimpleda.py b/ansible_collections/community/general/tests/integration/targets/module_helper/library/msimpleda.py
new file mode 100644
index 000000000..c21c3d2ea
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/library/msimpleda.py
@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: msimpleda
+author: "Alexei Znamensky (@russoz)"
+short_description: Simple module for testing DeprecationAttrsMixin
+description:
+ - Simple module test description.
+options:
+ a:
+ description: aaaa
+ type: int
+'''
+
+EXAMPLES = ""
+
+RETURN = ""
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper
+from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import ( # noqa: F401, pylint: disable=unused-import
+ DeprecateAttrsMixin
+)
+
+
+class MSimpleDA(ModuleHelper):
+ output_params = ('a',)
+ module = dict(
+ argument_spec=dict(
+ a=dict(type='int'),
+ ),
+ )
+
+ attr1 = "abc"
+ attr2 = "def"
+
+ def __init_module__(self):
+ self._deprecate_attr(
+ "attr2",
+ msg="Attribute attr2 is deprecated",
+ version="9.9.9",
+ collection_name="community.general",
+ target=self.__class__,
+ module=self.module,
+ )
+
+ def __run__(self):
+ if self.vars.a == 1:
+ self.vars.attr1 = self.attr1
+ if self.vars.a == 2:
+ self.vars.attr2 = self.attr2
+
+
+def main():
+ MSimpleDA.execute()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/library/mstate.py b/ansible_collections/community/general/tests/integration/targets/module_helper/library/mstate.py
new file mode 100644
index 000000000..bfaab0375
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/library/mstate.py
@@ -0,0 +1,78 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Alexei Znamensky <russoz@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+module: mstate
+author: "Alexei Znamensky (@russoz)"
+short_description: State-based module for testing
+description:
+ - State-based module test description.
+options:
+ a:
+ description: aaaa
+ type: int
+ required: true
+ b:
+ description: bbbb
+ type: str
+ c:
+ description: cccc
+ type: str
+ state:
+ description: test states
+ type: str
+ choices: [join, b_x_a, c_x_a, both_x_a]
+ default: join
+'''
+
+EXAMPLES = ""
+
+RETURN = ""
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper
+
+
+class MState(StateModuleHelper):
+ output_params = ('a', 'b', 'c', 'state')
+ module = dict(
+ argument_spec=dict(
+ a=dict(type='int', required=True),
+ b=dict(type='str'),
+ c=dict(type='str'),
+ state=dict(type='str', choices=['join', 'b_x_a', 'c_x_a', 'both_x_a', 'nop'], default='join'),
+ ),
+ )
+
+ def __init_module__(self):
+ self.vars.set('result', "abc", diff=True)
+
+ def state_join(self):
+ self.vars['result'] = "".join([str(self.vars.a), str(self.vars.b), str(self.vars.c)])
+
+ def state_b_x_a(self):
+ self.vars['result'] = str(self.vars.b) * self.vars.a
+
+ def state_c_x_a(self):
+ self.vars['result'] = str(self.vars.c) * self.vars.a
+
+ def state_both_x_a(self):
+ self.vars['result'] = (str(self.vars.b) + str(self.vars.c)) * self.vars.a
+
+ def state_nop(self):
+ pass
+
+
+def main():
+ mstate = MState()
+ mstate.run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/main.yml
new file mode 100644
index 000000000..2368cfcbb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/main.yml
@@ -0,0 +1,9 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: msimple.yml
+- include_tasks: msimple_output_conflict.yml
+- include_tasks: mdepfail.yml
+- include_tasks: mstate.yml
+- include_tasks: msimpleda.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mdepfail.yml b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mdepfail.yml
new file mode 100644
index 000000000..1655be54e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mdepfail.yml
@@ -0,0 +1,18 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test failing dependency
+ mdepfail:
+ a: 123
+ ignore_errors: true
+ register: result
+
+- name: assert failing dependency
+ assert:
+ that:
+ - result is failed
+ - '"Failed to import" in result.msg'
+ - '"nopackagewiththisname" in result.msg'
+ - '"ModuleNotFoundError:" in result.exception or "ImportError:" in result.exception'
+ - '"nopackagewiththisname" in result.exception'
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple.yml b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple.yml
new file mode 100644
index 000000000..03818639c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple.yml
@@ -0,0 +1,85 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test msimple (set a=80)
+ msimple:
+ a: 80
+ register: simple1
+
+- name: assert simple1
+ assert:
+ that:
+ - simple1.a == 80
+ - simple1.abc == "abc"
+ - simple1 is not changed
+ - simple1.value is none
+
+- name: test msimple 2
+ msimple:
+ a: 101
+ ignore_errors: true
+ register: simple2
+
+- name: assert simple2
+ assert:
+ that:
+ - simple2.a == 101
+ - 'simple2.msg == "Module failed with exception: a >= 100"'
+ - simple2.abc == "abc"
+ - simple2 is failed
+ - simple2 is not changed
+ - simple2.value is none
+
+- name: test msimple 3
+ msimple:
+ a: 2
+ b: potatoes
+ register: simple3
+
+- name: assert simple3
+ assert:
+ that:
+ - simple3.a == 2
+ - simple3.b == "potatoespotatoes"
+ - simple3.c == "NoneNone"
+ - simple3 is not changed
+
+- name: test msimple 4
+ msimple:
+ c: abc change
+ register: simple4
+
+- name: assert simple4
+ assert:
+ that:
+ - simple4.c == "abc change"
+ - simple4.abc == "changed abc"
+ - simple4 is changed
+
+- name: test msimple 5a
+ msimple:
+ a: 3 # should triple b and c
+ b: oh
+ c: my
+ register: simple5a
+
+- name: test msimple 5b
+ check_mode: true
+ msimple:
+ a: 3 # should triple b and c
+ b: oh
+ c: my
+ register: simple5b
+
+- name: assert simple5
+ assert:
+ that:
+ - simple5a.a == 3
+ - simple5a.b == "ohohoh"
+ - simple5a.c == "mymymy"
+ - simple5a is not changed
+ - simple5b.a == 3
+ - simple5b.b == "oh"
+ - simple5b.c == "my"
+ - simple5b is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple_output_conflict.yml b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple_output_conflict.yml
new file mode 100644
index 000000000..62fe1e327
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimple_output_conflict.yml
@@ -0,0 +1,54 @@
+# Copyright (c) 2023, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test msimple conflict output (set a=80)
+ msimple:
+ a: 80
+ register: simple1
+
+- name: assert simple1
+ assert:
+ that:
+ - simple1.a == 80
+ - simple1.abc == "abc"
+ - simple1 is not changed
+ - simple1.value is none
+
+- name: test msimple conflict output 2
+ msimple:
+ a: 80
+ m: a message in a bottle
+ register: simple2
+
+- name: assert simple2
+ assert:
+ that:
+ - simple1.a == 80
+ - simple1.abc == "abc"
+ - simple1 is not changed
+ - simple1.value is none
+ - >
+ "_msg" not in simple2
+ - >
+ simple2.msg == "a message in a bottle"
+
+- name: test msimple 3
+ msimple:
+ a: 101
+ m: a message in a bottle
+ ignore_errors: true
+ register: simple3
+
+- name: assert simple3
+ assert:
+ that:
+ - simple3.a == 101
+ - >
+ simple3.msg == "Module failed with exception: a >= 100"
+ - >
+ simple3._msg == "a message in a bottle"
+ - simple3.abc == "abc"
+ - simple3 is failed
+ - simple3 is not changed
+ - simple3.value is none
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimpleda.yml b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimpleda.yml
new file mode 100644
index 000000000..e01b65e12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/msimpleda.yml
@@ -0,0 +1,39 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- set_fact:
+ attr2_d:
+ msg: Attribute attr2 is deprecated
+ version: 9.9.9
+ collection_name: community.general
+ attr2_d_29:
+ msg: Attribute attr2 is deprecated
+ version: 9.9.9
+- set_fact:
+ attr2_depr_dict: "{{ ((ansible_version.major, ansible_version.minor) < (2, 10))|ternary(attr2_d_29, attr2_d) }}"
+
+- name: test msimpleda 1
+ msimpleda:
+ a: 1
+ register: simple1
+
+- name: assert simple1
+ assert:
+ that:
+ - simple1.a == 1
+ - simple1.attr1 == "abc"
+ - ("deprecations" not in simple1) or attr2_depr_dict not in simple1.deprecations
+
+- name: test msimpleda 2
+ msimpleda:
+ a: 2
+ register: simple2
+
+- name: assert simple2
+ assert:
+ that:
+ - simple2.a == 2
+ - simple2.attr2 == "def"
+ - '"deprecations" in simple2'
+ - attr2_depr_dict in simple2.deprecations
diff --git a/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mstate.yml b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mstate.yml
new file mode 100644
index 000000000..40c695f6d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/module_helper/tasks/mstate.yml
@@ -0,0 +1,83 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: test mstate 1
+ mstate:
+ a: 80
+ b: banana
+ c: cashew
+ state: nop
+ register: state1
+
+- name: assert state1
+ assert:
+ that:
+ - state1.a == 80
+ - state1.b == "banana"
+ - state1.c == "cashew"
+ - state1.result == "abc"
+ - state1 is not changed
+
+- name: test mstate 2
+ mstate:
+ a: 80
+ b: banana
+ c: cashew
+ register: state2
+
+- name: assert state2
+ assert:
+ that:
+ - state2.a == 80
+ - state2.b == "banana"
+ - state2.c == "cashew"
+ - state2.result == "80bananacashew"
+ - state2 is changed
+
+- name: test mstate 3
+ mstate:
+ a: 3
+ b: banana
+ state: b_x_a
+ register: state3
+
+- name: assert state3
+ assert:
+ that:
+ - state3.a == 3
+ - state3.b == "banana"
+ - state3.result == "bananabananabanana"
+ - state3 is changed
+
+- name: test mstate 4
+ mstate:
+ a: 4
+ c: cashew
+ state: c_x_a
+ register: state4
+
+- name: assert state4
+ assert:
+ that:
+ - state4.a == 4
+ - state4.c == "cashew"
+ - state4.result == "cashewcashewcashewcashew"
+ - state4 is changed
+
+- name: test mstate 5
+ mstate:
+ a: 5
+ b: foo
+ c: bar
+ state: both_x_a
+ register: state5
+
+- name: assert state5
+ assert:
+ that:
+ - state5.a == 5
+ - state5.b == "foo"
+ - state5.c == "bar"
+ - state5.result == "foobarfoobarfoobarfoobarfoobar"
+ - state5 is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/aliases b/ansible_collections/community/general/tests/integration/targets/monit/aliases
new file mode 100644
index 000000000..ca39d1353
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/aliases
@@ -0,0 +1,14 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+needs/target/setup_epel
+skip/osx
+skip/macos
+skip/freebsd
+skip/aix
+skip/python2.6 # python-daemon package used in integration tests requires >=2.7
+skip/rhel # FIXME
+unstable # TODO: the tests fail a lot; 'unstable' only requires them to pass when the module itself has been modified
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml
new file mode 100644
index 000000000..ec064643c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+process_root: /opt/httpd_echo
+process_file: "{{ process_root }}/httpd_echo.py"
+process_venv: "{{ process_root }}/venv"
+process_run_cmd: "{{ process_venv }}/bin/python {{ process_file }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py b/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py
new file mode 100644
index 000000000..cd77da26b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/files/httpd_echo.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2020, Simon Kelly <simongdkelly@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import daemon
+
+try:
+ from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+
+ def write_to_output(stream, content):
+ stream.write(content)
+except ImportError:
+ from http.server import BaseHTTPRequestHandler, HTTPServer
+
+ def write_to_output(stream, content):
+ stream.write(bytes(content, "utf-8"))
+
+
+hostname = "localhost"
+server_port = 8082
+
+
+class EchoServer(BaseHTTPRequestHandler):
+ def do_GET(self):
+ self.send_response(200)
+ self.send_header("Content-type", "text/plain")
+ self.end_headers()
+ write_to_output(self.wfile, self.path)
+
+
+def run_webserver():
+ webServer = HTTPServer((hostname, server_port), EchoServer)
+ print("Server started http://%s:%s" % (hostname, server_port))
+
+ try:
+ webServer.serve_forever()
+ except KeyboardInterrupt:
+ pass
+
+ webServer.server_close()
+ print("Server stopped.")
+
+
+if __name__ == "__main__":
+ context = daemon.DaemonContext()
+
+ with context:
+ run_webserver()
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml
new file mode 100644
index 000000000..2d6cafb56
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml b/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml
new file mode 100644
index 000000000..bc8eb7c81
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/tasks/check_state.yml
@@ -0,0 +1,21 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "{{ reason }} ('up')"
+ command: "curl -sf http://localhost:8082/hello"
+ when: service_state == 'up'
+ register: curl_result
+ until: not curl_result.failed
+ retries: 5
+ delay: 1
+
+- name: "{{ reason }} ('down')"
+ command: "curl -sf http://localhost:8082/hello"
+ register: curl_result
+ failed_when: curl_result == 0
+ when: service_state == 'down'
+ until: not curl_result.failed
+ retries: 5
+ delay: 1
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml
new file mode 100644
index 000000000..ea8595412
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/tasks/main.yml
@@ -0,0 +1,99 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('9', '<')
+
+ - name: create required directories
+ become: true
+ file:
+ path: "{{ item }}"
+ state: directory
+ loop:
+ - /var/lib/monit
+ - /var/run/monit
+ - "{{ process_root }}"
+
+ - name: install monit
+ become: true
+ package:
+ name: monit
+ state: present
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - '{{ ansible_os_family }}.yml'
+ - 'defaults.yml'
+
+ - name: monit config
+ become: true
+ template:
+ src: "monitrc.j2"
+ dest: "{{ monitrc }}"
+
+ - name: copy process file
+ become: true
+ copy:
+ src: httpd_echo.py
+ dest: "{{ process_file }}"
+
+ - name: Install virtualenv on CentOS 8
+ package:
+ name: virtualenv
+ state: present
+ when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8'
+
+ - name: Install virtualenv on Arch Linux
+ pip:
+ name: virtualenv
+ state: present
+ when: ansible_os_family == 'Archlinux'
+
+ - name: install dependencies
+ pip:
+ name: "{{ item }}"
+ virtualenv: "{{ process_venv }}"
+ extra_args: "-c {{ remote_constraints }}"
+ loop:
+ - setuptools==44
+ - python-daemon
+
+ - name: restart monit
+ become: true
+ service:
+ name: monit
+ state: restarted
+
+ - include_tasks: test.yml
+
+ always:
+ - name: stop monit
+ become: true
+ service:
+ name: monit
+ state: stopped
+
+ - name: uninstall monit
+ become: true
+ package:
+ name: monit
+ state: absent
+
+ - name: remove process files
+ file:
+ path: "{{ process_root }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml
new file mode 100644
index 000000000..42fd033c7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test.yml
@@ -0,0 +1,33 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# order is important
+- import_tasks: test_reload_present.yml
+
+- import_tasks: test_state.yml
+ vars:
+ state: stopped
+ initial_state: up
+ expected_state: down
+
+- import_tasks: test_state.yml
+ vars:
+ state: started
+ initial_state: down
+ expected_state: up
+
+- import_tasks: test_state.yml
+ vars:
+ state: unmonitored
+ initial_state: up
+ expected_state: down
+
+- import_tasks: test_state.yml
+ vars:
+ state: monitored
+ initial_state: down
+ expected_state: up
+
+- import_tasks: test_errors.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml
new file mode 100644
index 000000000..362672387
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_errors.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Check an error occurs when wrong process name is used
+ monit:
+ name: missing
+ state: started
+ register: result
+ failed_when: result is not skip and (result is success or result is not failed)
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml
new file mode 100644
index 000000000..0bd6cd073
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_reload_present.yml
@@ -0,0 +1,65 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: reload monit when process is missing
+ monit:
+ name: httpd_echo
+ state: reloaded
+ register: result
+
+- name: check that state is changed
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- name: test process not present
+ monit:
+ name: httpd_echo
+ state: present
+ timeout: 5
+ register: result
+ failed_when: result is not skip and result is success
+
+- name: test monitor missing process
+ monit:
+ name: httpd_echo
+ state: monitored
+ register: result
+ failed_when: result is not skip and result is success
+
+- name: start process
+ shell: "{{ process_run_cmd }}"
+
+- import_tasks: check_state.yml
+ vars:
+ reason: verify service running
+ service_state: "up"
+
+- name: add process config
+ blockinfile:
+ path: "{{ monitrc }}"
+ block: |
+ check process httpd_echo with matching "httpd_echo"
+ start program = "{{ process_run_cmd }}"
+ stop program = "/bin/sh -c 'kill `pgrep -f httpd_echo`'"
+ if failed host localhost port 8082 then restart
+
+- name: restart monit
+ service:
+ name: monit
+ state: restarted
+
+- name: test process present again
+ monit:
+ name: httpd_echo
+ state: present
+ register: result
+
+- name: check that state is unchanged
+ assert:
+ that:
+ - result is success
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml
new file mode 100644
index 000000000..33a70c196
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/tasks/test_state.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: check_state.yml
+ vars:
+ reason: verify initial service state
+ service_state: "{{ initial_state }}"
+
+- name: change httpd_echo process state to {{ state }}
+ monit:
+ name: httpd_echo
+ state: "{{ state }}"
+ register: result
+
+- name: check that state changed
+ assert:
+ that:
+ - result is success
+ - result is changed
+
+- import_tasks: check_state.yml
+ vars:
+ reason: check service state after action
+ service_state: "{{ expected_state }}"
+
+- name: try change state again to {{ state }}
+ monit:
+ name: httpd_echo
+ state: "{{ state }}"
+ register: result
+
+- name: check that state is not changed
+ assert:
+ that:
+ - result is success
+ - result is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j2 b/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j2
new file mode 100644
index 000000000..4f1a6e247
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/templates/monitrc.j2
@@ -0,0 +1,19 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+
+set daemon 2
+set logfile /var/log/monit.log
+set idfile /var/lib/monit/id
+set statefile /var/lib/monit/state
+set pidfile /var/run/monit.pid
+
+set eventqueue
+ basedir /var/lib/monit/events
+ slots 100
+
+set httpd port 2812 and
+ use address localhost
+ allow localhost
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/vars/Alpine.yml b/ansible_collections/community/general/tests/integration/targets/monit/vars/Alpine.yml
new file mode 100644
index 000000000..773c9d985
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/vars/Alpine.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+monitrc: "/etc/monitrc"
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/vars/Archlinux.yml b/ansible_collections/community/general/tests/integration/targets/monit/vars/Archlinux.yml
new file mode 100644
index 000000000..773c9d985
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/vars/Archlinux.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+monitrc: "/etc/monitrc"
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml b/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml
new file mode 100644
index 000000000..9ff9c2641
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/vars/CentOS-6.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+monitrc: "/etc/monit.conf"
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml
new file mode 100644
index 000000000..773c9d985
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/vars/RedHat.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+monitrc: "/etc/monitrc"
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml b/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml
new file mode 100644
index 000000000..773c9d985
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/vars/Suse.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+monitrc: "/etc/monitrc"
diff --git a/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml b/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml
new file mode 100644
index 000000000..74c76c7c9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/monit/vars/defaults.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+monitrc: "/etc/monit/monitrc"
diff --git a/ansible_collections/community/general/tests/integration/targets/mqtt/aliases b/ansible_collections/community/general/tests/integration/targets/mqtt/aliases
new file mode 100644
index 000000000..c25e0b6d5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mqtt/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml
new file mode 100644
index 000000000..a9c2068ed
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mqtt/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_mosquitto
diff --git a/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml
new file mode 100644
index 000000000..0beb1b3b2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: ubuntu.yml
+ when:
+ - ansible_distribution == 'Ubuntu'
+ - ansible_distribution_release not in ['focal', 'jammy']
diff --git a/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml b/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml
new file mode 100644
index 000000000..0c0a12d04
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mqtt/tasks/ubuntu.yml
@@ -0,0 +1,147 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install pip packages
+ pip:
+ name: paho-mqtt>=1.4.0
+ state: present
+
+- name: MQTT non-TLS endpoint
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo
+ qos: 1
+ client_id: me001
+ register: result
+
+- assert:
+ that:
+ - result is success
+
+- name: Send a test message to TLS1.1 endpoint, no client version specified
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ port: 8883
+ register: result
+
+- assert:
+ that:
+ - result is success
+
+- name: Send a test message to TLS1.2 endpoint, no client version specified
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ port: 8884
+ register: result
+
+- assert:
+ that:
+ - result is success
+
+# TODO(Uncomment when TLS1.3 is supported in moquitto and ubuntu version)
+#
+# - name: Send a test message to TLS1.3 endpoint
+# mqtt:
+# topic: /node/s/bar/blurb
+# payload: foo-tls
+# qos: 1
+# client_id: me001
+# ca_certs: /tls/ca_certificate.pem
+# certfile: /tls/client_certificate.pem
+# keyfile: /tls/client_key.pem
+# port: 8885
+# register: result
+
+#- assert:
+# that:
+# - result is success
+
+- name: Send a message, client TLS1.1, server (required) TLS1.2 - Expected failure
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ tls_version: tlsv1.1
+ port: 8884
+ register: result
+ failed_when: result is success
+
+- assert:
+ that:
+ - result is success
+
+# TODO(Uncomment when TLS1.3 is supported in moquitto and ubuntu version)
+#
+# - name: Send a message, client TLS1.1, server (required) TLS1.3 - Expected failure
+# mqtt:
+# topic: /node/s/bar/blurb
+# payload: foo-tls
+# qos: 1
+# client_id: me001
+# ca_certs: /tls/ca_certificate.pem
+# certfile: /tls/client_certificate.pem
+# keyfile: /tls/client_key.pem
+# tls_version: tlsv1.1
+# port: 8885
+# register: result
+# failed_when: result is success
+
+# - assert:
+# that:
+# - result is success
+
+- name: Send a message, client TLS1.2, server (required) TLS1.1 - Expected failure
+ mqtt:
+ topic: /node/s/bar/blurb
+ payload: foo-tls
+ qos: 1
+ client_id: me001
+ ca_certs: /tls/ca_certificate.pem
+ certfile: /tls/client_certificate.pem
+ keyfile: /tls/client_key.pem
+ tls_version: tlsv1.2
+ port: 8883
+ register: result
+ failed_when: result is success
+
+- assert:
+ that:
+ - result is success
+
+# TODO(Uncomment when TLS1.3 is supported in moquitto and ubuntu version)
+#
+# - name: Send a message, client TLS1.2, server (required) TLS1.3 - Expected failure
+# mqtt:
+# topic: /node/s/bar/blurb
+# payload: foo-tls
+# qos: 1
+# client_id: me001
+# ca_certs: /tls/ca_certificate.pem
+# certfile: /tls/client_certificate.pem
+# keyfile: /tls/client_key.pem
+# tls_version: tlsv1.2
+# port: 8885
+# register: result
+# failed_when: result is success
+
+# - assert:
+# that:
+# - result is success
diff --git a/ansible_collections/community/general/tests/integration/targets/mssql_script/aliases b/ansible_collections/community/general/tests/integration/targets/mssql_script/aliases
new file mode 100644
index 000000000..023e2edce
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mssql_script/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel
+disabled
+destructive
diff --git a/ansible_collections/community/general/tests/integration/targets/mssql_script/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/mssql_script/defaults/main.yml
new file mode 100644
index 000000000..d1ca77f55
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mssql_script/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+mssql_host: localhost
+mssql_port: 1433
+mssql_login_user: sa
+mssql_login_password: "Abcd!234"
diff --git a/ansible_collections/community/general/tests/integration/targets/mssql_script/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/mssql_script/meta/main.yml
new file mode 100644
index 000000000..5769ff1cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mssql_script/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/general/tests/integration/targets/mssql_script/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/mssql_script/tasks/main.yml
new file mode 100644
index 000000000..6fa4d3501
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/mssql_script/tasks/main.yml
@@ -0,0 +1,246 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# TODO: Find out how to setup mssql server for tests
+# For the moment you have to run the tests locally
+# docker run --name mssql-test -e "ACCEPT_EULA=Y" -e 'SA_PASSWORD={{ mssql_login_password }}' -p "{ mssql_port }"0:"{ mssql_port }" -d mcr.microsoft.com/mssql/server:2019-latest
+# ansible-test integration mssql_script -v --allow-disabled
+
+- name: Install pymssql
+ ansible.builtin.pip:
+ name:
+ - pymssql
+ state: present
+
+- name: Start container
+ community.docker.docker_container:
+ name: mssql-test
+ image: "mcr.microsoft.com/mssql/server:2019-latest"
+ env:
+ ACCEPT_EULA: "Y"
+ SA_PASSWORD: "{{ mssql_login_password }}"
+ MSSQL_PID: Developer
+ ports:
+ - "{{ mssql_port }}:1433"
+ detach: true
+ auto_remove: true
+ memory: 2200M
+
+- name: Check default ports
+ ansible.builtin.wait_for:
+ host: "{{ mssql_host }}"
+ port: "{{ mssql_port }}"
+ state: started # Port should be open
+ delay: 10 # Wait 10 secs before first check
+ timeout: 30 # Stop checking after timeout (sec)
+
+- name: Check DB connection
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: "SELECT 1"
+
+- name: two batches with default output
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ SELECT 'Batch 0 - Select 0'
+ SELECT 'Batch 0 - Select 1'
+ GO
+ SELECT 'Batch 1 - Select 0'
+ register: result_batches
+# "result_batches.query_results":
+# [ # batches
+# [ # selects
+# [ # Rows
+# [ # Columns
+# "Batch 1 - Select 1"
+# ]
+# ],
+# [
+# [
+# "Batch 1 - Select 2"
+# ]
+# ]
+# ],
+# [
+# [
+# [
+# "Batch 2 - Select 1"
+# ]
+# ]
+# ]
+# ]
+
+- assert:
+ that:
+ - result_batches.query_results | length == 2 # two batch results
+ - result_batches.query_results[0] | length == 2 # two selects in first batch
+ - result_batches.query_results[0][0] | length == 1 # one row in first select
+ - result_batches.query_results[0][0][0] | length == 1 # one column in first row
+ - result_batches.query_results[0][0][0][0] == 'Batch 0 - Select 0' # first column of first row
+
+
+- name: two batches with dict output
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ output: dict
+ script: |
+ SELECT 'Batch 0 - Select 0' as b0s0
+ SELECT 'Batch 0 - Select 1' as b0s1
+ GO
+ SELECT 'Batch 1 - Select 0' as b1s0
+ register: result_batches_dict
+# "result_batches_dict.query_results":
+# [ # batches
+# [ # selects
+# [ # Rows
+# { # dict columns
+# "b0s0": "Batch 0 - Select 0"
+# }
+# ],
+# [
+# {
+# "b0s1": "Batch 0 - Select 1"
+# }
+# ]
+# ],
+# [
+# [
+# {
+# "b1s0": "Batch 1 - Select 0"
+# }
+# ]
+# ]
+# ]
+- assert:
+ that:
+ - result_batches_dict.query_results_dict | length == 2 # two batch results
+ - result_batches_dict.query_results_dict[0] | length == 2 # two selects in first batch
+ - result_batches_dict.query_results_dict[0][0] | length == 1 # one row in first select
+ - result_batches_dict.query_results_dict[0][0][0]['b0s0'] == 'Batch 0 - Select 0' # column 'b0s0' of first row
+
+- name: Stored procedure may return multiple result sets
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: sp_spaceused
+ output: dict
+ register: result_spaceused
+- assert:
+ that:
+ - result_spaceused.query_results_dict | length == 1 # one batch
+ - result_spaceused.query_results_dict[0] | length == 2 # stored procedure returns two result sets
+ - result_spaceused.query_results_dict[0][0][0]['database_name'] == 'master' # output dict
+
+- name: Ensure that passed 'db' is used
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: exec sp_spaceused
+ output: dict
+ db: msdb
+ register: result_db
+- assert:
+ that:
+ - result_db.query_results_dict[0][0][0]['database_name'] == 'msdb'
+
+- name: pass params to query
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s
+ params:
+ dbname: msdb
+ register: result_params
+- assert:
+ that:
+ - result_params.query_results[0][0][0][0] == 'msdb'
+ - result_params.query_results[0][0][0][1] == 'ONLINE'
+
+- name: check_mode connects but does not run the query
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: SELECT Invalid_Column FROM Does_Not_Exist WITH Invalid Syntax
+ check_mode: true
+ register: check_mode
+- assert:
+ that: check_mode.query_results is undefined
+
+- name: "Test: Value of unknown type: <class 'uuid.UUID'>"
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ SELECT service_broker_guid, * FROM sys.databases WHERE name = 'master'
+ register: result_databases
+- debug:
+ var: result_databases
+- name: check types
+ assert:
+ that:
+ - result_databases.query_results[0][0][0][0] == '00000000-0000-0000-0000-000000000000' # guid
+ - result_databases.query_results[0][0][0][1] == 'master' # string
+ - result_databases.query_results[0][0][0][3] == None # byte string representation
+ - result_databases.query_results[0][0][0][4] == "b'\\x01'" # byte string representation
+ - result_databases.query_results[0][0][0][6] == 150 # int
+ - result_databases.query_results[0][0][0][10] == false # bool
+
+- name: "Test: Value of unknown type: <class 'uuid.UUID'>-dict"
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ output: dict
+ script: |
+ SELECT service_broker_guid, * FROM sys.databases
+
+# Known issue: empty result set breaks return values
+- name: empty result set
+ community.general.mssql_script:
+ login_user: "{{ mssql_login_user }}"
+ login_password: "{{ mssql_login_password }}"
+ login_host: "{{ mssql_host }}"
+ login_port: "{{ mssql_port }}"
+ script: |
+ SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s
+ SELECT name, state_desc FROM sys.databases WHERE name = 'DoesNotexist'
+ SELECT name, state_desc FROM sys.databases WHERE name = %(dbname)s
+ params:
+ dbname: msdb
+ register: empty_result
+- assert:
+ that:
+ - empty_result.query_results[0] | length == 3 # == 1 ; issue: only first result is returned
+ - empty_result.query_results[0][0][0][0] == 'msdb'
+ - empty_result.query_results[0][1] | length == 0
+ - empty_result.query_results[0][2][0][0] == 'msdb'
+ failed_when: false # known issue
diff --git a/ansible_collections/community/general/tests/integration/targets/nomad/aliases b/ansible_collections/community/general/tests/integration/targets/nomad/aliases
new file mode 100644
index 000000000..ad2435c82
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/nomad/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+nomad_job_info
+destructive
+skip/aix
+skip/centos6
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl b/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl
new file mode 100644
index 000000000..8f01f0439
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/nomad/files/job.hcl
@@ -0,0 +1,400 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# There can only be a single job definition per file. This job is named
+# "example" so it will create a job with the ID and Name "example".
+
+# The "job" stanza is the top-most configuration option in the job
+# specification. A job is a declarative specification of tasks that Nomad
+# should run. Jobs have a globally unique name, one or many task groups, which
+# are themselves collections of one or many tasks.
+#
+# For more information and examples on the "job" stanza, please see
+# the online documentation at:
+#
+#
+# https://www.nomadproject.io/docs/job-specification/job.html
+#
+job "example" {
+ # The "region" parameter specifies the region in which to execute the job.
+ # If omitted, this inherits the default region name of "global".
+ # region = "global"
+ #
+ # The "datacenters" parameter specifies the list of datacenters which should
+ # be considered when placing this task. This must be provided.
+ datacenters = ["dc1"]
+
+ # The "type" parameter controls the type of job, which impacts the scheduler's
+ # decision on placement. This configuration is optional and defaults to
+ # "service". For a full list of job types and their differences, please see
+ # the online documentation.
+ #
+ # For more information, please see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/jobspec/schedulers.html
+ #
+ type = "service"
+
+
+ # The "constraint" stanza defines additional constraints for placing this job,
+ # in addition to any resource or driver constraints. This stanza may be placed
+ # at the "job", "group", or "task" level, and supports variable interpolation.
+ #
+ # For more information and examples on the "constraint" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/constraint.html
+ #
+ # constraint {
+ # attribute = "${attr.kernel.name}"
+ # value = "linux"
+ # }
+
+ # The "update" stanza specifies the update strategy of task groups. The update
+ # strategy is used to control things like rolling upgrades, canaries, and
+ # blue/green deployments. If omitted, no update strategy is enforced. The
+ # "update" stanza may be placed at the job or task group. When placed at the
+ # job, it applies to all groups within the job. When placed at both the job and
+ # group level, the stanzas are merged with the group's taking precedence.
+ #
+ # For more information and examples on the "update" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/update.html
+ #
+ update {
+ # The "max_parallel" parameter specifies the maximum number of updates to
+ # perform in parallel. In this case, this specifies to update a single task
+ # at a time.
+ max_parallel = 1
+
+ # The "min_healthy_time" parameter specifies the minimum time the allocation
+ # must be in the healthy state before it is marked as healthy and unblocks
+ # further allocations from being updated.
+ min_healthy_time = "10s"
+
+ # The "healthy_deadline" parameter specifies the deadline in which the
+ # allocation must be marked as healthy after which the allocation is
+ # automatically transitioned to unhealthy. Transitioning to unhealthy will
+ # fail the deployment and potentially roll back the job if "auto_revert" is
+ # set to true.
+ healthy_deadline = "3m"
+
+ # The "progress_deadline" parameter specifies the deadline in which an
+ # allocation must be marked as healthy. The deadline begins when the first
+ # allocation for the deployment is created and is reset whenever an allocation
+ # as part of the deployment transitions to a healthy state. If no allocation
+ # transitions to the healthy state before the progress deadline, the
+ # deployment is marked as failed.
+ progress_deadline = "10m"
+
+ # The "auto_revert" parameter specifies if the job should auto-revert to the
+ # last stable job on deployment failure. A job is marked as stable if all the
+ # allocations as part of its deployment were marked healthy.
+ auto_revert = false
+
+ # The "canary" parameter specifies that changes to the job that would result
+ # in destructive updates should create the specified number of canaries
+ # without stopping any previous allocations. Once the operator determines the
+ # canaries are healthy, they can be promoted which unblocks a rolling update
+ # of the remaining allocations at a rate of "max_parallel".
+ #
+ # Further, setting "canary" equal to the count of the task group allows
+ # blue/green deployments. When the job is updated, a full set of the new
+ # version is deployed and upon promotion the old version is stopped.
+ canary = 0
+ }
+ # The migrate stanza specifies the group's strategy for migrating off of
+ # draining nodes. If omitted, a default migration strategy is applied.
+ #
+ # For more information on the "migrate" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/migrate.html
+ #
+ migrate {
+ # Specifies the number of task groups that can be migrated at the same
+ # time. This number must be less than the total count for the group as
+ # (count - max_parallel) will be left running during migrations.
+ max_parallel = 1
+
+ # Specifies the mechanism in which allocations health is determined. The
+ # potential values are "checks" or "task_states".
+ health_check = "checks"
+
+ # Specifies the minimum time the allocation must be in the healthy state
+ # before it is marked as healthy and unblocks further allocations from being
+ # migrated. This is specified using a label suffix like "30s" or "15m".
+ min_healthy_time = "10s"
+
+ # Specifies the deadline in which the allocation must be marked as healthy
+ # after which the allocation is automatically transitioned to unhealthy. This
+ # is specified using a label suffix like "2m" or "1h".
+ healthy_deadline = "5m"
+ }
+ # The "group" stanza defines a series of tasks that should be co-located on
+ # the same Nomad client. Any task within a group will be placed on the same
+ # client.
+ #
+ # For more information and examples on the "group" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/group.html
+ #
+ group "cache" {
+ # The "count" parameter specifies the number of the task groups that should
+ # be running under this group. This value must be non-negative and defaults
+ # to 1.
+ count = 1
+
+ # The "restart" stanza configures a group's behavior on task failure. If
+ # left unspecified, a default restart policy is used based on the job type.
+ #
+ # For more information and examples on the "restart" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/restart.html
+ #
+ restart {
+ # The number of attempts to run the job within the specified interval.
+ attempts = 2
+ interval = "30m"
+
+ # The "delay" parameter specifies the duration to wait before restarting
+ # a task after it has failed.
+ delay = "15s"
+
+ # The "mode" parameter controls what happens when a task has restarted
+ # "attempts" times within the interval. "delay" mode delays the next
+ # restart until the next interval. "fail" mode does not restart the task
+ # if "attempts" has been hit within the interval.
+ mode = "fail"
+ }
+
+ # The "ephemeral_disk" stanza instructs Nomad to utilize an ephemeral disk
+ # instead of a hard disk requirement. Clients using this stanza should
+ # not specify disk requirements in the resources stanza of the task. All
+ # tasks in this group will share the same ephemeral disk.
+ #
+ # For more information and examples on the "ephemeral_disk" stanza, please
+ # see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html
+ #
+ ephemeral_disk {
+ # When sticky is true and the task group is updated, the scheduler
+ # will prefer to place the updated allocation on the same node and
+ # will migrate the data. This is useful for tasks that store data
+ # that should persist across allocation updates.
+ # sticky = true
+ #
+ # Setting migrate to true results in the allocation directory of a
+ # sticky allocation directory to be migrated.
+ # migrate = true
+ #
+ # The "size" parameter specifies the size in MB of shared ephemeral disk
+ # between tasks in the group.
+ size = 300
+ }
+
+ # The "affinity" stanza enables operators to express placement preferences
+ # based on node attributes or metadata.
+ #
+ # For more information and examples on the "affinity" stanza, please
+ # see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/affinity.html
+ #
+ # affinity {
+ # attribute specifies the name of a node attribute or metadata
+ # attribute = "${node.datacenter}"
+
+
+ # value specifies the desired attribute value. In this example Nomad
+ # will prefer placement in the "us-west1" datacenter.
+ # value = "us-west1"
+
+
+ # weight can be used to indicate relative preference
+ # when the job has more than one affinity. It defaults to 50 if not set.
+ # weight = 100
+ # }
+
+
+ # The "spread" stanza allows operators to increase the failure tolerance of
+ # their applications by specifying a node attribute that allocations
+ # should be spread over.
+ #
+ # For more information and examples on the "spread" stanza, please
+ # see the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/spread.html
+ #
+ # spread {
+ # attribute specifies the name of a node attribute or metadata
+ # attribute = "${node.datacenter}"
+
+
+ # targets can be used to define desired percentages of allocations
+ # for each targeted attribute value.
+ #
+ # target "us-east1" {
+ # percent = 60
+ # }
+ # target "us-west1" {
+ # percent = 40
+ # }
+ # }
+
+ # The "task" stanza creates an individual unit of work, such as a Docker
+ # container, web application, or batch processing.
+ #
+ # For more information and examples on the "task" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/task.html
+ #
+ task "redis" {
+ # The "driver" parameter specifies the task driver that should be used to
+ # run the task.
+ driver = "docker"
+
+ # The "config" stanza specifies the driver configuration, which is passed
+ # directly to the driver to start the task. The details of configurations
+ # are specific to each driver, so please see specific driver
+ # documentation for more information.
+ config {
+ image = "redis:3.2"
+
+ port_map {
+ db = 6379
+ }
+ }
+
+ # The "artifact" stanza instructs Nomad to download an artifact from a
+ # remote source prior to starting the task. This provides a convenient
+ # mechanism for downloading configuration files or data needed to run the
+ # task. It is possible to specify the "artifact" stanza multiple times to
+ # download multiple artifacts.
+ #
+ # For more information and examples on the "artifact" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/artifact.html
+ #
+ # artifact {
+ # source = "http://foo.com/artifact.tar.gz"
+ # options {
+ # checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
+ # }
+ # }
+
+
+ # The "logs" stanza instructs the Nomad client on how many log files and
+ # the maximum size of those logs files to retain. Logging is enabled by
+ # default, but the "logs" stanza allows for finer-grained control over
+ # the log rotation and storage configuration.
+ #
+ # For more information and examples on the "logs" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/logs.html
+ #
+ # logs {
+ # max_files = 10
+ # max_file_size = 15
+ # }
+
+ # The "resources" stanza describes the requirements a task needs to
+ # execute. Resource requirements include memory, network, cpu, and more.
+ # This ensures the task will execute on a machine that contains enough
+ # resource capacity.
+ #
+ # For more information and examples on the "resources" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/resources.html
+ #
+ resources {
+ cpu = 500 # 500 MHz
+ memory = 256 # 256MB
+
+ network {
+ mbits = 10
+ port "db" {}
+ }
+ }
+ # The "service" stanza instructs Nomad to register this task as a service
+ # in the service discovery engine, which is currently Consul. This will
+ # make the service addressable after Nomad has placed it on a host and
+ # port.
+ #
+ # For more information and examples on the "service" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/service.html
+ #
+ service {
+ name = "redis-cache"
+ tags = ["global", "cache"]
+ port = "db"
+
+ check {
+ name = "alive"
+ type = "tcp"
+ interval = "10s"
+ timeout = "2s"
+ }
+ }
+
+ # The "template" stanza instructs Nomad to manage a template, such as
+ # a configuration file or script. This template can optionally pull data
+ # from Consul or Vault to populate runtime configuration data.
+ #
+ # For more information and examples on the "template" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/template.html
+ #
+ # template {
+ # data = "---\nkey: {{ key \"service/my-key\" }}"
+ # destination = "local/file.yml"
+ # change_mode = "signal"
+ # change_signal = "SIGHUP"
+ # }
+
+ # The "template" stanza can also be used to create environment variables
+ # for tasks that prefer those to config files. The task will be restarted
+ # when data pulled from Consul or Vault changes.
+ #
+ # template {
+ # data = "KEY={{ key \"service/my-key\" }}"
+ # destination = "local/file.env"
+ # env = true
+ # }
+
+ # The "vault" stanza instructs the Nomad client to acquire a token from
+ # a HashiCorp Vault server. The Nomad servers must be configured and
+ # authorized to communicate with Vault. By default, Nomad will inject
+ # The token into the job via an environment variable and make the token
+ # available to the "template" stanza. The Nomad client handles the renewal
+ # and revocation of the Vault token.
+ #
+ # For more information and examples on the "vault" stanza, please see
+ # the online documentation at:
+ #
+ # https://www.nomadproject.io/docs/job-specification/vault.html
+ #
+ # vault {
+ # policies = ["cdn", "frontend"]
+ # change_mode = "signal"
+ # change_signal = "SIGHUP"
+ # }
+
+ # Controls the timeout between signalling a task it will be killed
+ # and killing the task. If not set a default is used.
+ # kill_timeout = "20s"
+ }
+ }
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml
new file mode 100644
index 000000000..0909be206
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/nomad/meta/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_openssl
+ - setup_remote_tmp_dir
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml
new file mode 100644
index 000000000..1a143be05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/nomad/tasks/main.yml
@@ -0,0 +1,111 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Skip unsupported platforms
+ meta: end_play
+ # TODO: figure out why Alpine does not work!
+ when: |
+ ansible_distribution == 'CentOS' and ansible_distribution_major_version is not version('7', '>=')
+ or ansible_distribution == 'Alpine'
+
+- name: Install Nomad and test
+ vars:
+ nomad_version: 0.12.4
+ nomad_uri: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_{{ ansible_system | lower }}_{{ nomad_arch }}.zip
+ nomad_cmd: '{{ remote_tmp_dir }}/nomad'
+ block:
+
+ - name: Install requests<2.20 (CentOS/RHEL 6)
+ pip:
+ name: requests<2.20
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+ when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
+
+ - name: Install python-nomad
+ pip:
+ name: python-nomad
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+
+ - name: Install jmespath
+ pip:
+ name: jmespath
+ extra_args: "-c {{ remote_constraints }}"
+ register: result
+ until: result is success
+
+ - name: Generate privatekey
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/privatekey.pem'
+
+ - name: Generate CSR
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ subject:
+ commonName: localhost
+
+ - name: Generate selfsigned certificate
+ register: selfsigned_certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/cert.pem'
+ csr_path: '{{ remote_tmp_dir }}/csr.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/privatekey.pem'
+ provider: selfsigned
+ selfsigned_digest: sha256
+
+ - name: Install unzip
+ package:
+ name: unzip
+ register: result
+ until: result is success
+ when: ansible_distribution != "MacOSX"
+
+ - assert:
+ that: ansible_architecture in ['i386', 'x86_64', 'amd64']
+
+ - set_fact:
+ nomad_arch: '386'
+ when: ansible_architecture == 'i386'
+
+ - set_fact:
+ nomad_arch: amd64
+ when: ansible_architecture in ['x86_64', 'amd64']
+
+ - name: Download nomad binary
+ unarchive:
+ src: '{{ nomad_uri }}'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ register: result
+ until: result is success
+
+ - vars:
+ remote_dir: '{{ echo_remote_tmp_dir.stdout }}'
+ block:
+
+ - command: echo {{ remote_tmp_dir }}
+ register: echo_remote_tmp_dir
+
+ - name: Run tests integration
+ block:
+ - name: Start nomad (dev mode enabled)
+ shell: nohup {{ nomad_cmd }} agent -dev </dev/null >/dev/null 2>&1 &
+
+ - name: wait nomad up
+ wait_for:
+ host: localhost
+ port: 4646
+ delay: 10
+ timeout: 60
+
+ - import_tasks: nomad_job.yml
+ always:
+
+ - name: kill nomad
+ shell: pkill nomad
diff --git a/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml b/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml
new file mode 100644
index 000000000..2a4f223aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/nomad/tasks/nomad_job.yml
@@ -0,0 +1,111 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: run check deploy nomad job
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_check_deployed
+ check_mode: true
+
+- name: run create nomad job
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ force_start: true
+ register: job_deployed
+
+- name: get nomad job deployed
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ name: example
+ register: get_nomad_job
+
+- name: get list of nomad jobs
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ register: list_nomad_jobs
+
+- name: assert job is deployed and tasks is changed
+ assert:
+ that:
+ - job_check_deployed is changed
+ - job_deployed is changed
+ - get_nomad_job.result[0].ID == "example"
+ - list_nomad_jobs.result | length == 1
+
+- name: run check deploy job idempotence
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_check_deployed_idempotence
+ check_mode: true
+
+- name: run create nomad job idempotence
+ nomad_job:
+ host: localhost
+ state: present
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_deployed_idempotence
+
+- name: get list of nomad jobs
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ register: list_nomad_jobs
+
+- debug:
+ msg: "{{ list_nomad_jobs }}"
+
+- name: run check delete nomad job
+ nomad_job:
+ host: localhost
+ state: absent
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_deleted_check
+ check_mode: true
+
+- name: run delete nomad job
+ nomad_job:
+ host: localhost
+ state: absent
+ use_ssl: false
+ content: "{{ lookup('file', 'job.hcl') }}"
+ register: job_deleted
+
+- name: get job deleted
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ name: example
+ register: get_job_delete
+
+- name: get list of nomad jobs
+ nomad_job_info:
+ host: localhost
+ use_ssl: false
+ register: list_nomad_jobs
+
+- debug:
+ msg: "{{ list_nomad_jobs }}"
+
+- name: assert idempotence
+ assert:
+ that:
+ - job_check_deployed_idempotence is not changed
+ - job_deployed_idempotence is not changed
+ - job_deleted_check is changed
+ - job_deleted is changed
+ - get_job_delete.result[0].Stop
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/aliases b/ansible_collections/community/general/tests/integration/targets/npm/aliases
new file mode 100644
index 000000000..6e2c65f38
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/aix
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml
new file mode 100644
index 000000000..6147ad33e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_gnutar
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml
new file mode 100644
index 000000000..500e15fdb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the npm module
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -------------------------------------------------------------
+# Setup steps
+
+- when:
+ - not (ansible_os_family == 'Alpine') # TODO
+ block:
+
+ # expand remote path
+ - command: 'echo {{ remote_tmp_dir }}'
+ register: echo
+ - set_fact:
+ remote_dir: '{{ echo.stdout }}'
+
+ - include_tasks: run.yml
+ vars:
+ nodejs_version: '{{ item }}'
+ nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}'
+ with_items:
+ - 7.10.1 # provides npm 4.2.0 (last npm < 5 released)
+ - 8.0.0 # provides npm 5.0.0
+ - 8.2.0 # provides npm 5.3.0 (output change with this version)
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/tasks/no_bin_links.yml b/ansible_collections/community/general/tests/integration/targets/npm/tasks/no_bin_links.yml
new file mode 100644
index 000000000..3588f7642
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/tasks/no_bin_links.yml
@@ -0,0 +1,68 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'Remove any node modules'
+ file:
+ path: '{{ remote_dir }}/node_modules'
+ state: absent
+
+- vars:
+ # sample: node-v8.2.0-linux-x64.tar.xz
+ node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin'
+ package: 'ncp'
+ block:
+ - shell: npm --version
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_version
+
+ - debug:
+ var: npm_version.stdout
+
+ - name: 'Install simple package with no_bin_links disabled'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ no_bin_links: false
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_install_no_bin_links_disabled
+
+ - name: 'Make sure .bin folder has been created'
+ stat:
+ path: "{{ remote_dir }}/node_modules/.bin"
+ register: npm_dotbin_folder_disabled
+
+ - name: 'Remove any node modules'
+ file:
+ path: '{{ remote_dir }}/node_modules'
+ state: absent
+
+ - name: 'Install simple package with no_bin_links enabled'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ no_bin_links: true
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_install_no_bin_links_enabled
+
+ - name: 'Make sure .bin folder has not been created'
+ stat:
+ path: "{{ remote_dir }}/node_modules/.bin"
+ register: npm_dotbin_folder_enabled
+
+ - assert:
+ that:
+ - npm_install_no_bin_links_disabled is success
+ - npm_install_no_bin_links_disabled is changed
+ - npm_install_no_bin_links_enabled is success
+ - npm_install_no_bin_links_enabled is changed
+ - npm_dotbin_folder_disabled.stat.exists
+ - not npm_dotbin_folder_enabled.stat.exists
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml b/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml
new file mode 100644
index 000000000..9ce380270
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/tasks/run.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: setup.yml
+- include_tasks: test.yml
+- include_tasks: no_bin_links.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml
new file mode 100644
index 000000000..bad927915
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/tasks/setup.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'Download NPM'
+ unarchive:
+ src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/npm/{{ nodejs_path }}.tar.gz'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ creates: '{{ remote_tmp_dir }}/{{ nodejs_path }}.tar.gz'
diff --git a/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml
new file mode 100644
index 000000000..c8e83f602
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/npm/tasks/test.yml
@@ -0,0 +1,74 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'Remove any node modules'
+ file:
+ path: '{{ remote_dir }}/node_modules'
+ state: absent
+
+- vars:
+ # sample: node-v8.2.0-linux-x64.tar.xz
+ node_path: '{{ remote_dir }}/{{ nodejs_path }}/bin'
+ package: 'iconv-lite'
+ block:
+ - shell: npm --version
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_version
+
+ - debug:
+ var: npm_version.stdout
+
+ - name: 'Install simple package without dependency'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_install
+
+ - assert:
+ that:
+ - npm_install is success
+ - npm_install is changed
+
+ - name: 'Reinstall simple package without dependency'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_reinstall
+
+ - name: Check there is no change
+ assert:
+ that:
+ - npm_reinstall is success
+ - not (npm_reinstall is changed)
+
+ - name: 'Manually delete package'
+ file:
+ path: '{{ remote_dir }}/node_modules/{{ package }}'
+ state: absent
+
+ - name: 'reinstall simple package'
+ npm:
+ path: '{{ remote_dir }}'
+ executable: '{{ node_path }}/npm'
+ state: present
+ name: '{{ package }}'
+ environment:
+ PATH: '{{ node_path }}:{{ ansible_env.PATH }}'
+ register: npm_fix_install
+
+ - name: Check result is changed and successful
+ assert:
+ that:
+ - npm_fix_install is success
+ - npm_fix_install is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/aliases b/ansible_collections/community/general/tests/integration/targets/odbc/aliases
new file mode 100644
index 000000000..e8465c50e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/osx
+skip/macos
+skip/rhel8.0
+skip/rhel9.0
+skip/rhel9.1
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml
new file mode 100644
index 000000000..dd75f5471
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/defaults/main.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# defaults file for test_postgresql_db
+my_user: 'ansible_user'
+my_pass: 'md5d5e044ccd9b4b8adc89e8fed2eb0db8a'
+my_pass_decrypted: '6EjMk<hcX3<5(Yp?Xi5aQ8eS`a#Ni'
+dsn: "DRIVER={PostgreSQL};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+packages:
+ Alpine:
+ - psqlodbc
+ - unixodbc
+ - unixodbc-dev
+ - g++
+ Archlinux:
+ - unixodbc
+ RedHat:
+ - postgresql-odbc
+ - unixODBC
+ - unixODBC-devel
+ - gcc
+ - gcc-c++
+ Debian:
+ - odbc-postgresql
+ - unixodbc
+ - unixodbc-dev
+ - gcc
+ - g++
+ Suse:
+ - psqlODBC
+ - unixODBC
+ - unixODBC-devel
+ - gcc
+ - gcc-c++
+ FreeBSD:
+ - unixODBC
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml
new file mode 100644
index 000000000..0d06eaa39
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_postgresql_db
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml
new file mode 100644
index 000000000..e0cefe14d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/install_pyodbc.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Install {{ ansible_os_family }} Libraries"
+ package:
+ name: "{{ packages[ansible_os_family] }}"
+
+- name: "Install pyodbc"
+ pip:
+ name: pyodbc
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml
new file mode 100644
index 000000000..ce55ea8aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/main.yml
@@ -0,0 +1,158 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when:
+ - ansible_os_family != 'Archlinux' # TODO install driver from AUR: https://aur.archlinux.org/packages/psqlodbc
+ block:
+
+ #
+ # Test for proper failures without pyodbc
+ #
+ # Some of the docker images already have pyodbc installed on it
+ - include_tasks: no_pyodbc.yml
+ when: ansible_os_family != 'FreeBSD' and ansible_os_family != 'Suse' and ansible_os_family != 'Debian'
+
+ #
+ # Get pyodbc installed
+ #
+ - include_tasks: install_pyodbc.yml
+
+ #
+ # Test missing parameters & invalid DSN
+ #
+ - include_tasks: negative_tests.yml
+
+ #
+ # Setup DSN per env
+ #
+ - name: Changing DSN for Suse
+ set_fact:
+ dsn: "DRIVER={PSQL};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+ when: ansible_os_family == 'Suse' or ansible_os_family == 'Alpine'
+
+ - name: Changing DSN for Alpine
+ set_fact:
+ dsn: "DRIVER={/usr/lib/psqlodbcw.so};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+ when: ansible_os_family == 'Alpine'
+
+ - name: Changing DSN for Debian
+ set_fact:
+ dsn: "DRIVER={PostgreSQL Unicode};Server=localhost;Port=5432;Database=postgres;Uid={{ my_user }};Pwd={{ my_pass_decrypted }};UseUnicode=True"
+ when: ansible_os_family == 'Debian'
+
+ #
+ # Name setup database
+ #
+ - name: Create a user to run the tests with
+ shell: echo "CREATE USER {{ my_user }} SUPERUSER PASSWORD '{{ my_pass }}'" | psql postgres
+ become_user: "{{ pg_user }}"
+ become: true
+
+ - name: Create a table
+ odbc:
+ dsn: "{{ dsn }}"
+ query: |
+ CREATE TABLE films (
+ code char(5) CONSTRAINT firstkey PRIMARY KEY,
+ title varchar(40) NOT NULL,
+ did integer NOT NULL,
+ date_prod date,
+ kind varchar(10),
+ len interval hour to minute
+ );
+ become_user: "{{ pg_user }}"
+ become: true
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+
+ #
+ # Insert records
+ #
+ - name: Insert a record without params
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "INSERT INTO films (code, title, did, date_prod, kind, len) VALUES ('asdfg', 'My First Movie', 1, '2019-01-12', 'SyFi', '02:00')"
+ become_user: "{{ pg_user }}"
+ become: true
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+
+ - name: Insert a record with params
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "INSERT INTO films (code, title, did, date_prod, kind, len) VALUES (?, ?, ?, ?, ?, ?)"
+ params:
+ - 'qwert'
+ - 'My Second Movie'
+ - 2
+ - '2019-01-12'
+ - 'Comedy'
+ - '01:30'
+ become_user: "{{ pg_user }}"
+ become: true
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results['row_count'] == -1
+ - results['results'] == []
+ - results['description'] == []
+
+ #
+ # Select data
+ #
+ - name: Perform select single row without params (do not coherse changed)
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "SELECT * FROM films WHERE code='asdfg'"
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results is successful
+ - results.row_count == 1
+
+ - name: Perform select multiple rows with params (coherse changed)
+ odbc:
+ dsn: "{{ dsn }}"
+ query: 'SELECT * FROM films WHERE code=? or code=?'
+ params:
+ - 'asdfg'
+ - 'qwert'
+ register: results
+ changed_when: false
+
+ - assert:
+ that:
+ - results is not changed
+ - results is successful
+ - results.row_count == 2
+
+ - name: Drop the table
+ odbc:
+ dsn: "{{ dsn }}"
+ query: "DROP TABLE films"
+ register: results
+
+ - assert:
+ that:
+ - results is successful
+ - results is changed
+ - results['row_count'] == -1
+ - results['results'] == []
+ - results['description'] == []
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml
new file mode 100644
index 000000000..f779e6a53
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/negative_tests.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+#
+# Missing params for the module
+# There is nothing you need to do here because the params are required
+#
+
+#
+# Invalid DSN in the module
+#
+- name: "Test with an invalid DSN"
+ odbc:
+ dsn: "t1"
+ query: "SELECT * FROM nothing"
+ register: results
+ ignore_errors: true
+
+- assert:
+ that:
+ - results is failed
+ - "'Failed to connect to DSN' in results.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml
new file mode 100644
index 000000000..3a52d85a1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/odbc/tasks/no_pyodbc.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Testing the module without pyodbc
+ odbc:
+ dsn: "Test"
+ query: "SELECT * FROM nothing"
+ ignore_errors: true
+ register: results
+
+- assert:
+ that:
+ - results is failed
+ - "'Failed to import the required Python library (pyodbc) on' in results.msg"
diff --git a/ansible_collections/community/general/tests/integration/targets/one_host/aliases b/ansible_collections/community/general/tests/integration/targets/one_host/aliases
new file mode 100644
index 000000000..100ba0f97
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_host/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/generic/1
+cloud/opennebula
+disabled # FIXME - when this is fixed, also re-enable the generic tests in CI!
diff --git a/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz b/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz
new file mode 100644
index 000000000..8b67b548a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz
Binary files differ
diff --git a/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz.license b/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml
new file mode 100644
index 000000000..1a4a42fb5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_host/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_opennebula
diff --git a/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml
new file mode 100644
index 000000000..ffd5ac04c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_host/tasks/main.yml
@@ -0,0 +1,243 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the one_host module
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ENVIRONENT PREPARACTION
+
+- set_fact: test_number= 0
+
+- name: "test_{{test_number}}: copy fixtures to test host"
+ copy:
+ src: testhost/tmp/opennebula-fixtures.json.gz
+ dest: /tmp
+ when:
+ - opennebula_test_fixture
+ - opennebula_test_fixture_replay
+
+
+# SETUP INITIAL TESTING CONDITION
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: ensure the tests hosts are absent"
+ one_host:
+ name: "{{ item }}"
+ state: absent
+ api_endpoint: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_token: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+# NOT EXISTING HOSTS
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: attempt to enable a host that does not exists"
+ one_host:
+ name: badhost
+ state: "{{item}}"
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{item}}"
+ ignore_errors: true
+ register: result
+ with_items:
+ - enabled
+ - disabled
+ - offline
+
+- name: "assert test_{{test_number}} failed"
+ assert:
+ that:
+ - result is failed
+ - result.results[0].msg == 'invalid host state ERROR'
+
+# ---
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: delete an unexisting host"
+ one_host:
+ name: badhost
+ state: absent
+ validate_certs: false
+ environment:
+ ONE_URL: "{{ opennebula_url }}"
+ ONE_USERNAME: "{{ opennebula_username }}"
+ ONE_PASSWORD: "{{ opennebula_password }}"
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# HOST ENABLEMENT
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+
+- name: "test_{{test_number}}: enable the test hosts"
+ one_host:
+ name: "{{ item }}"
+ state: enabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# TEMPLATE MANAGEMENT
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: setup template values on hosts"
+ one_host:
+ name: "{{ item }}"
+ state: enabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ template:
+ LABELS:
+ - test
+ - custom
+ TEST_VALUE: 2
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# ---
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: setup equivalent template values on hosts"
+ one_host:
+ name: "{{ item }}"
+ state: enabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ labels:
+ - test
+ - custom
+ attributes:
+ TEST_VALUE: "2"
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result is not changed
+
+# HOST DISABLEMENT
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: disable the test hosts"
+ one_host:
+ name: "{{ item }}"
+ state: disabled
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# HOST OFFLINE
+
+- set_fact: test_number={{ test_number | int + 1 }}
+
+- name: "test_{{test_number}}: offline the test hosts"
+ one_host:
+ name: "{{ item }}"
+ state: offline
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ validate_certs: false
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: "test_{{test_number}}_{{ item }}"
+ with_items: "{{opennebula_test.hosts}}"
+ register: result
+
+- name: "assert test_{{test_number}} worked"
+ assert:
+ that:
+ - result.changed
+
+# TEARDOWN
+
+- name: fetch fixtures
+ fetch:
+ src: /tmp/opennebula-fixtures.json.gz
+ dest: targets/one_host/files
+ when:
+ - opennebula_test_fixture
+ - not opennebula_test_fixture_replay
diff --git a/ansible_collections/community/general/tests/integration/targets/one_template/aliases b/ansible_collections/community/general/tests/integration/targets/one_template/aliases
new file mode 100644
index 000000000..100ba0f97
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_template/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/generic/1
+cloud/opennebula
+disabled # FIXME - when this is fixed, also re-enable the generic tests in CI!
diff --git a/ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz b/ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz
new file mode 100644
index 000000000..169451a22
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz
Binary files differ
diff --git a/ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz.license b/ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/one_template/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/one_template/meta/main.yml
new file mode 100644
index 000000000..1a4a42fb5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_template/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_opennebula
diff --git a/ansible_collections/community/general/tests/integration/targets/one_template/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/one_template/tasks/main.yml
new file mode 100644
index 000000000..58bca9c6c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/one_template/tasks/main.yml
@@ -0,0 +1,246 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the one_template module
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ENVIRONMENT PREPARATION
+
+- name: "copy fixtures to test host"
+ copy:
+ src: testhost/tmp/opennebula-fixtures.json.gz
+ dest: /tmp
+ when:
+ - opennebula_test_fixture
+ - opennebula_test_fixture_replay
+
+
+# Create a new template
+
+- name: "Create a new TEMPLATE"
+ one_template:
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ name: ansible-onetemplate-test
+ template: |
+ CONTEXT = [
+ HOSTNAME = "ansible-onetemplate",
+ NETWORK = "YES",
+ SSH_PUBLIC_KEY = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKAQwTkU84eEnhX3r60Mn5TPh99BDxyCNJu12OB5sfMu foxy@FoxPad",
+ USERNAME = "root" ]
+ CPU = "1"
+ CUSTOM_ATTRIBUTE = ""
+ DISK = [
+ CACHE = "writeback",
+ DEV_PREFIX = "sd",
+ DISCARD = "unmap",
+ IMAGE = "ansible-onetemplate",
+ IMAGE_UNAME = "oneadmin",
+ IO = "threads",
+ SIZE = "" ]
+ FEATURES = [
+ VIRTIO_SCSI_QUEUES = "2" ]
+ GRAPHICS = [
+ KEYMAP = "de",
+ LISTEN = "0.0.0.0",
+ TYPE = "VNC" ]
+ MEMORY = "2048"
+ NIC = [
+ MODEL = "virtio",
+ NETWORK = "tf-prd-centos",
+ NETWORK_UNAME = "oneadmin" ]
+ OS = [
+ ARCH = "x86_64",
+ BOOT = "disk0" ]
+ SCHED_REQUIREMENTS = "CLUSTER_ID=\"100\""
+ VCPU = "2"
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: test_create_template
+ register: result
+
+- name: "assert that creation worked"
+ assert:
+ that:
+ - result is changed
+
+
+# Updating a template
+
+- name: "Update an existing TEMPLATE"
+ one_template:
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ name: ansible-onetemplate-test
+ template: |
+ CONTEXT = [
+ HOSTNAME = "ansible-onetemplate",
+ NETWORK = "YES",
+ SSH_PUBLIC_KEY = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKAQwTkU84eEnhX3r60Mn5TPh99BDxyCNJu12OB5sfMu foxy@FoxPad",
+ USERNAME = "root" ]
+ CPU = "1"
+ CUSTOM_ATTRIBUTE = ""
+ DISK = [
+ CACHE = "writeback",
+ DEV_PREFIX = "sd",
+ DISCARD = "unmap",
+ IMAGE = "ansible-onetemplate",
+ IMAGE_UNAME = "oneadmin",
+ IO = "threads",
+ SIZE = "" ]
+ FEATURES = [
+ VIRTIO_SCSI_QUEUES = "2" ]
+ GRAPHICS = [
+ KEYMAP = "de",
+ LISTEN = "0.0.0.0",
+ TYPE = "VNC" ]
+ MEMORY = "4096"
+ NIC = [
+ MODEL = "virtio",
+ NETWORK = "tf-prd-centos",
+ NETWORK_UNAME = "oneadmin" ]
+ OS = [
+ ARCH = "x86_64",
+ BOOT = "disk0" ]
+ SCHED_REQUIREMENTS = "CLUSTER_ID=\"100\""
+ VCPU = "2"
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: test_update_existing_template
+ register: result
+
+- name: "assert that it updated the template"
+ assert:
+ that:
+ - result is changed
+
+- name: "Update an existing TEMPLATE with the same changes again"
+ one_template:
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ name: ansible-onetemplate-test
+ template: |
+ CONTEXT = [
+ HOSTNAME = "ansible-onetemplate",
+ NETWORK = "YES",
+ SSH_PUBLIC_KEY = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKAQwTkU84eEnhX3r60Mn5TPh99BDxyCNJu12OB5sfMu foxy@FoxPad",
+ USERNAME = "root" ]
+ CPU = "1"
+ CUSTOM_ATTRIBUTE = ""
+ DISK = [
+ CACHE = "writeback",
+ DEV_PREFIX = "sd",
+ DISCARD = "unmap",
+ IMAGE = "ansible-onetemplate",
+ IMAGE_UNAME = "oneadmin",
+ IO = "threads",
+ SIZE = "" ]
+ FEATURES = [
+ VIRTIO_SCSI_QUEUES = "2" ]
+ GRAPHICS = [
+ KEYMAP = "de",
+ LISTEN = "0.0.0.0",
+ TYPE = "VNC" ]
+ MEMORY = "4096"
+ NIC = [
+ MODEL = "virtio",
+ NETWORK = "tf-prd-centos",
+ NETWORK_UNAME = "oneadmin" ]
+ OS = [
+ ARCH = "x86_64",
+ BOOT = "disk0" ]
+ SCHED_REQUIREMENTS = "CLUSTER_ID=\"100\""
+ VCPU = "2"
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: test_update_existing_and_already_updated_template
+ register: result
+
+- name: "assert that there was no change"
+ assert:
+ that:
+ - result is not changed
+
+
+# Deletion of templates
+
+- name: "Delete a nonexisting TEMPLATE"
+ one_template:
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ name: ansible-onetemplate-test-nonexisting
+ state: absent
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: test_delete_nonexisting_template
+ register: result
+
+- name: "assert that there was no change"
+ assert:
+ that:
+ - result is not changed
+
+- name: "Delete an existing TEMPLATE"
+ one_template:
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ name: ansible-onetemplate-test
+ state: absent
+ environment:
+ PYONE_TEST_FIXTURE: "{{ opennebula_test_fixture }}"
+ PYONE_TEST_FIXTURE_FILE: /tmp/opennebula-fixtures.json.gz
+ PYONE_TEST_FIXTURE_REPLAY: "{{ opennebula_test_fixture_replay }}"
+ PYONE_TEST_FIXTURE_UNIT: test_delete_existing_template
+ register: result
+
+- name: "assert that there was a change"
+ assert:
+ that:
+ - result is changed
+
+
+# Usage without `template` parameter
+
+- name: "Try to create use one_template with state=present and without the template parameter"
+ one_template:
+ api_url: "{{ opennebula_url }}"
+ api_username: "{{ opennebula_username }}"
+ api_password: "{{ opennebula_password }}"
+ name: ansible-onetemplate-test
+ state: present
+ register: result
+ ignore_errors: true
+
+- name: "assert that it failed because template is missing"
+ assert:
+ that:
+ - result is failed
+
+
+# TEARDOWN
+
+- name: "fetch fixtures"
+ fetch:
+ src: /tmp/opennebula-fixtures.json.gz
+ dest: targets/one_host/files
+ when:
+ - opennebula_test_fixture
+ - not opennebula_test_fixture_replay
diff --git a/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases b/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases
new file mode 100644
index 000000000..bd478505d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/osx_defaults/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/rhel
+skip/docker
diff --git a/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml
new file mode 100644
index 000000000..f7bcb8944
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/osx_defaults/tasks/main.yml
@@ -0,0 +1,255 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the osx_defaults module.
+# Copyright (c) 2019, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Check if name is required for present
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ state: present
+ register: missing_value
+ ignore_errors: true
+
+- name: Test if state and value are required together
+ assert:
+ that:
+ - "'following are missing: value' in '{{ missing_value['msg'] }}'"
+
+- name: Change value of AppleMeasurementUnits to centimeter in check_mode
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: Centimeter
+ state: present
+ register: measure_task_check_mode
+ check_mode: true
+
+- name: Test if AppleMeasurementUnits value is changed to Centimeters in check_mode
+ assert:
+ that:
+ - measure_task_check_mode.changed
+
+- name: Find the current value of AppleMeasurementUnits
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ state: list
+ register: apple_measure_value
+
+- debug:
+ msg: "{{ apple_measure_value['value'] }}"
+
+- set_fact:
+ new_value: "Centimeters"
+ when: apple_measure_value['value'] == 'Inches' or apple_measure_value['value'] == None
+
+- set_fact:
+ new_value: "Inches"
+ when: apple_measure_value['value'] == 'Centimeters'
+
+- name: Change value of AppleMeasurementUnits to {{ new_value }}
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: "{{ new_value }}"
+ state: present
+ register: change_value
+
+- name: Test if AppleMeasurementUnits value is changed to {{ new_value }}
+ assert:
+ that:
+ - change_value.changed
+
+- name: Again change value of AppleMeasurementUnits to {{ new_value }}
+ osx_defaults:
+ domain: NSGlobalDomain
+ key: AppleMeasurementUnits
+ type: string
+ value: "{{ new_value }}"
+ state: present
+ register: change_value
+
+- name: Again test if AppleMeasurementUnits value is not changed to {{ new_value }}
+ assert:
+ that:
+ - not change_value.changed
+
+- name: Check a fake setting for delete operation
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: list
+ register: list_fake_value
+
+- debug:
+ msg: "{{ list_fake_value }}"
+
+- name: Check if fake value is listed
+ assert:
+ that:
+ - not list_fake_value.changed
+
+- name: Create a fake setting for delete operation
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: present
+ value: sample
+ register: present_fake_value
+
+- debug:
+ msg: "{{ present_fake_value }}"
+
+- name: Check if fake is created
+ assert:
+ that:
+ - present_fake_value.changed
+ when: present_fake_value.changed
+
+- name: List a fake setting
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: list
+ register: list_fake
+
+- debug:
+ msg: "{{ list_fake }}"
+
+- name: Delete a fake setting
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: absent
+ register: absent_task
+
+- debug:
+ msg: "{{ absent_task }}"
+
+- name: Check if fake setting is deleted
+ assert:
+ that:
+ - absent_task.changed
+ when: present_fake_value.changed
+
+- name: Try deleting a fake setting again
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: absent
+ register: absent_task
+
+- debug:
+ msg: "{{ absent_task }}"
+
+- name: Check if fake setting is not deleted
+ assert:
+ that:
+ - not absent_task.changed
+
+- name: Delete operation in check_mode
+ osx_defaults:
+ domain: com.ansible.fake_value
+ key: ExampleKeyToRemove
+ state: absent
+ register: absent_check_mode_task
+ check_mode: true
+
+- debug:
+ msg: "{{ absent_check_mode_task }}"
+
+- name: Check delete operation with check mode
+ assert:
+ that:
+ - not absent_check_mode_task.changed
+
+
+- name: Use different data types and check if it works with them
+ osx_defaults:
+ domain: com.ansible.fake_values
+ key: "{{ item.key }}"
+ type: "{{ item.type }}"
+ value: "{{ item.value }}"
+ state: present
+ with_items: &data_type
+ - { type: 'int', value: 1, key: 'sample_int'}
+ - { type: 'integer', value: 1, key: 'sample_int_2'}
+ - { type: 'integer', value: -1, key: 'negative_int'}
+ - { type: 'bool', value: true, key: 'sample_bool'}
+ - { type: 'boolean', value: true, key: 'sample_bool_2'}
+ - { type: 'date', value: "2019-02-19 10:10:10", key: 'sample_date'}
+ - { type: 'float', value: 1.2, key: 'sample_float'}
+ - { type: 'string', value: 'sample', key: 'sample_string'}
+ - { type: 'array', value: ['1', '2'], key: 'sample_array'}
+ register: test_data_types
+
+- assert:
+ that: "{{ item.changed }}"
+ with_items: "{{ test_data_types.results }}"
+
+- name: Use different data types and delete them
+ osx_defaults:
+ domain: com.ansible.fake_values
+ key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ type: "{{ item.type }}"
+ state: absent
+ with_items: *data_type
+ register: test_data_types
+
+- assert:
+ that: "{{ item.changed }}"
+ with_items: "{{ test_data_types.results }}"
+
+
+- name: Ensure test key does not exist
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ state: absent
+
+- name: add array value for the first time
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ value:
+ - 'Value with spaces'
+ type: array
+ array_add: true
+ register: test_array_add
+
+- assert:
+ that: test_array_add.changed
+
+- name: add for the second time, should be skipped
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ value:
+ - 'Value with spaces'
+ type: array
+ array_add: true
+ register: test_array_add
+
+- assert:
+ that: not test_array_add.changed
+
+- name: Clean up test key
+ osx_defaults:
+ domain: com.ansible.fake_array_value
+ key: ExampleArrayKey
+ state: absent
+ register: test_array_add
+
+- assert:
+ that: test_array_add.changed
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/aliases b/ansible_collections/community/general/tests/integration/targets/pacman/aliases
new file mode 100644
index 000000000..1d25c0193
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/pacman/meta/main.yml
new file mode 100644
index 000000000..08ce20c21
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/basic.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/basic.yml
new file mode 100644
index 000000000..ae2f9c0b5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/basic.yml
@@ -0,0 +1,86 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- vars:
+ package_name: unarj
+ block:
+ - name: Make sure that {{ package_name }} is not installed
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+
+ - name: Install {{ package_name }} (check mode)
+ pacman:
+ name: '{{ package_name }}'
+ state: present
+ check_mode: true
+ register: install_1
+
+ - name: Install {{ package_name }}
+ pacman:
+ name: '{{ package_name }}'
+ state: present
+ register: install_2
+
+ - name: Install {{ package_name }} (check mode, idempotent)
+ pacman:
+ name: '{{ package_name }}'
+ state: present
+ check_mode: true
+ register: install_3
+
+ - name: Install {{ package_name }} (idempotent)
+ pacman:
+ name: '{{ package_name }}'
+ state: present
+ register: install_4
+
+ - assert:
+ that:
+ - install_1 is changed
+ - install_1.msg == 'Would have installed 1 packages'
+ - install_2 is changed
+ - install_2.msg == 'Installed 1 package(s)'
+ - install_3 is not changed
+ - install_3.msg == 'package(s) already installed'
+ - install_4 is not changed
+ - install_4.msg == 'package(s) already installed'
+
+ - name: Uninstall {{ package_name }} (check mode)
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+ check_mode: true
+ register: uninstall_1
+
+ - name: Uninstall {{ package_name }}
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+ register: uninstall_2
+
+ - name: Uninstall {{ package_name }} (check mode, idempotent)
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+ check_mode: true
+ register: uninstall_3
+
+ - name: Uninstall {{ package_name }} (idempotent)
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+ register: uninstall_4
+
+ - assert:
+ that:
+ - uninstall_1 is changed
+ - uninstall_1.msg == 'Would have removed 1 packages'
+ - uninstall_2 is changed
+ - uninstall_2.msg == 'Removed 1 package(s)'
+ - uninstall_3 is not changed
+ - uninstall_3.msg == 'package(s) already absent'
+ - uninstall_4 is not changed
+ - uninstall_4.msg == 'package(s) already absent'
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/locally_installed_package.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/locally_installed_package.yml
new file mode 100644
index 000000000..a5f183236
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/locally_installed_package.yml
@@ -0,0 +1,85 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- vars:
+ package_name: ansible-test-foo
+ username: ansible-regular-user
+ block:
+ - name: Install fakeroot
+ pacman:
+ state: present
+ name:
+ - fakeroot
+
+ - name: Create user
+ user:
+ name: '{{ username }}'
+ home: '/home/{{ username }}'
+ create_home: true
+
+ - name: Create directory
+ file:
+ path: '/home/{{ username }}/{{ package_name }}'
+ state: directory
+ owner: '{{ username }}'
+
+ - name: Create PKGBUILD
+ copy:
+ dest: '/home/{{ username }}/{{ package_name }}/PKGBUILD'
+ content: |
+ pkgname=('{{ package_name }}')
+ pkgver=1.0.0
+ pkgrel=1
+ pkgdesc="Test removing a local package not in the repositories"
+ arch=('any')
+ license=('GPL v3+')
+ owner: '{{ username }}'
+
+ - name: Build package
+ command:
+ cmd: su {{ username }} -c "makepkg -srf"
+ chdir: '/home/{{ username }}/{{ package_name }}'
+
+ - name: Install package
+ pacman:
+ state: present
+ name:
+ - '/home/{{ username }}/{{ package_name }}/{{ package_name }}-1.0.0-1-any.pkg.tar.zst'
+
+ - name: Remove package (check mode)
+ pacman:
+ state: absent
+ name:
+ - '{{ package_name }}'
+ check_mode: true
+ register: remove_1
+
+ - name: Remove package
+ pacman:
+ state: absent
+ name:
+ - '{{ package_name }}'
+ register: remove_2
+
+ - name: Remove package (idempotent)
+ pacman:
+ state: absent
+ name:
+ - '{{ package_name }}'
+ register: remove_3
+
+ - name: Check conditions
+ assert:
+ that:
+ - remove_1 is changed
+ - remove_2 is changed
+ - remove_3 is not changed
+
+ always:
+ - name: Remove directory
+ file:
+ path: '{{ remote_tmp_dir }}/{{ package_name }}'
+ state: absent
+ become: true
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/main.yml
new file mode 100644
index 000000000..12d28a2d3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/main.yml
@@ -0,0 +1,19 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when: ansible_os_family == 'Archlinux'
+ block:
+ # Add more tests here by including more task files:
+ - include_tasks: 'basic.yml'
+ - include_tasks: 'package_urls.yml'
+ - include_tasks: 'remove_nosave.yml'
+ - include_tasks: 'update_cache.yml'
+ - include_tasks: 'locally_installed_package.yml'
+ - include_tasks: 'reason.yml'
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/package_urls.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/package_urls.yml
new file mode 100644
index 000000000..4df531285
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/package_urls.yml
@@ -0,0 +1,219 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- vars:
+ http_port: 27617
+ reg_pkg: ed
+ url_pkg: lemon
+ url_pkg_filename: url.pkg.zst
+ url_pkg_path: '/tmp/'
+ url_pkg_url: 'http://localhost:{{http_port}}/{{url_pkg_filename}}'
+ file_pkg: hdparm
+ file_pkg_path: /tmp/file.pkg.zst
+ extra_pkg: core/sdparm
+ extra_pkg_outfmt: sdparm
+ block:
+ - name: Make sure that test packages are not installed
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg}}'
+ - '{{file_pkg}}'
+ - '{{extra_pkg}}'
+ state: absent
+ - name: Make sure that url package is not cached
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Get URL for {{url_pkg}}
+ command:
+ cmd: pacman --sync --print-format "%l" {{url_pkg}}
+ register: url_pkg_stdout
+ - name: Download {{url_pkg}} pkg
+ get_url:
+ url: '{{url_pkg_stdout.stdout}}'
+ dest: '{{url_pkg_path}}/{{url_pkg_filename}}'
+ - name: Download {{url_pkg}} pkg sig
+ get_url:
+ url: '{{url_pkg_stdout.stdout}}.sig'
+ dest: '{{url_pkg_path}}/{{url_pkg_filename}}.sig'
+ - name: Host {{url_pkg}}
+ shell:
+ cmd: 'python -m http.server --directory {{url_pkg_path}} {{http_port}} >/dev/null 2>&1'
+ async: 90
+ poll: 0
+ - name: Wait for http.server to come up online
+ wait_for:
+ host: 'localhost'
+ port: '{{http_port}}'
+ state: started
+
+ - name: Get URL for {{file_pkg}}
+ command:
+ cmd: pacman --sync --print-format "%l" {{file_pkg}}
+ register: file_pkg_url
+ - name: Download {{file_pkg}} pkg
+ get_url:
+ url: '{{file_pkg_url.stdout}}'
+ dest: '{{file_pkg_path}}'
+
+ - name: Install packages from mixed sources (check mode)
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ check_mode: true
+ register: install_1
+
+ - name: Install packages from url (check mode, cached)
+ pacman:
+ name:
+ - '{{url_pkg_url}}'
+ check_mode: true
+ register: install_1c
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Install packages from mixed sources
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ register: install_2
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Install packages from mixed sources - (idempotency)
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ register: install_3
+ - name: Install packages from url - (idempotency, cached)
+ pacman:
+ name:
+ - '{{url_pkg_url}}'
+ register: install_3c
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Install packages with their regular names (idempotency)
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg}}'
+ - '{{file_pkg}}'
+ register: install_4
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Install new package with already installed packages from mixed sources
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ - '{{extra_pkg}}'
+ register: install_5
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Uninstall packages - mixed sources (check mode)
+ pacman:
+ state: absent
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ check_mode: true
+ register: uninstall_1
+ - name: Uninstall packages - url (check mode, cached)
+ pacman:
+ state: absent
+ name:
+ - '{{url_pkg_url}}'
+ check_mode: true
+ register: uninstall_1c
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Uninstall packages - mixed sources
+ pacman:
+ state: absent
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ register: uninstall_2
+ - name: Delete cached {{url_pkg}}
+ file:
+ path: '/var/cache/pacman/pkg/{{url_pkg_filename}}'
+ state: absent
+
+ - name: Uninstall packages - mixed sources (idempotency)
+ pacman:
+ state: absent
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url}}'
+ - '{{file_pkg_path}}'
+ register: uninstall_3
+
+ - name: Uninstall package - url (idempotency, cached)
+ pacman:
+ state: absent
+ name:
+ - '{{url_pkg_url}}'
+ register: uninstall_3c
+
+ - assert:
+ that:
+ - install_1 is changed
+ - install_1.msg == 'Would have installed 3 packages'
+ - install_1.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
+ - install_1c is changed
+ - install_1c.msg == 'Would have installed 1 packages'
+ - install_1c.packages|sort() == [url_pkg]
+ - install_2 is changed
+ - install_2.msg == 'Installed 3 package(s)'
+ - install_2.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
+ - install_3 is not changed
+ - install_3.msg == 'package(s) already installed'
+ - install_3c is not changed
+ - install_3c.msg == 'package(s) already installed'
+ - install_4 is not changed
+ - install_4.msg == 'package(s) already installed'
+ - install_5 is changed
+ - install_5.msg == 'Installed 1 package(s)'
+ - install_5.packages == [extra_pkg_outfmt]
+ - uninstall_1 is changed
+ - uninstall_1.msg == 'Would have removed 3 packages'
+ - uninstall_1.packages | length() == 3 # pkgs have versions here
+ - uninstall_1c is changed
+ - uninstall_1c.msg == 'Would have removed 1 packages'
+ - uninstall_1c.packages | length() == 1 # pkgs have versions here
+ - uninstall_2 is changed
+ - uninstall_2.msg == 'Removed 3 package(s)'
+ - uninstall_2.packages | length() == 3
+ - uninstall_3 is not changed
+ - uninstall_3.msg == 'package(s) already absent'
+ - uninstall_3c is not changed
+ - uninstall_3c.msg == 'package(s) already absent'
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/reason.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/reason.yml
new file mode 100644
index 000000000..5a26e3e10
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/reason.yml
@@ -0,0 +1,101 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- vars:
+ reg_pkg: ed
+ url_pkg: lemon
+ file_pkg: hdparm
+ file_pkg_path: /tmp/pkg.zst
+ extra_pkg: core/sdparm
+ extra_pkg_outfmt: sdparm
+ block:
+ - name: Make sure that test packages are not installed
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg}}'
+ - '{{file_pkg}}'
+ - '{{extra_pkg}}'
+ state: absent
+
+ - name: Get URL for {{url_pkg}}
+ command:
+ cmd: pacman --sync --print-format "%l" {{url_pkg}}
+ register: url_pkg_url
+
+ - name: Get URL for {{file_pkg}}
+ command:
+ cmd: pacman --sync --print-format "%l" {{file_pkg}}
+ register: file_pkg_url
+ - name: Download {{file_pkg}} pkg
+ get_url:
+ url: '{{file_pkg_url.stdout}}'
+ dest: '{{file_pkg_path}}'
+
+ - name: Install packages from mixed sources as dependency (check mode)
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url.stdout}}'
+ - '{{file_pkg_path}}'
+ reason: dependency
+ check_mode: true
+ register: install_1
+
+ - name: Install packages from mixed sources as explicit
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url.stdout}}'
+ - '{{file_pkg_path}}'
+ reason: explicit
+ register: install_2
+
+ - name: Install packages from mixed sources with new packages being installed as dependency - (idempotency)
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url.stdout}}'
+ - '{{file_pkg_path}}'
+ reason: dependency
+ register: install_3
+
+ - name: Install new package with already installed packages from mixed sources as dependency
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url.stdout}}'
+ - '{{file_pkg_path}}'
+ - '{{extra_pkg}}'
+ reason: dependency
+ register: install_4
+
+ - name: Set install reason for all packages to dependency
+ pacman:
+ name:
+ - '{{reg_pkg}}'
+ - '{{url_pkg_url.stdout}}'
+ - '{{file_pkg_path}}'
+ - '{{extra_pkg}}'
+ reason: dependency
+ reason_for: all
+ register: install_5
+
+ - assert:
+ that:
+ - install_1 is changed
+ - install_1.msg == 'Would have installed 3 packages'
+ - install_1.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
+ - install_2 is changed
+ - install_2.msg == 'Installed 3 package(s)'
+ - install_2.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
+ - install_3 is not changed
+ - install_3.msg == 'package(s) already installed'
+ - install_4 is changed
+ - install_4.msg == 'Installed 1 package(s)'
+ - install_4.packages == [extra_pkg_outfmt]
+ - install_5 is changed
+ - install_5.msg == 'Installed 3 package(s)'
+ - install_5.packages|sort() == [reg_pkg, url_pkg, file_pkg]|sort()
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/remove_nosave.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/remove_nosave.yml
new file mode 100644
index 000000000..2271ebc03
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/remove_nosave.yml
@@ -0,0 +1,74 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- vars:
+ package_name: xinetd
+ config_file: /etc/xinetd.conf
+ block:
+ - name: Make sure that {{ package_name }} is not installed
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+ - name: Make sure {{config_file}}.pacsave file doesn't exist
+ file:
+ path: '{{config_file}}.pacsave'
+ state: absent
+
+ - name: Install {{ package_name }}
+ pacman:
+ name: '{{ package_name }}'
+ state: present
+
+ - name: Modify {{config_file}}
+ blockinfile:
+ path: '{{config_file}}'
+ block: |
+ # something something
+ # on 2 lines
+
+ - name: Remove {{ package_name }} - generate pacsave
+ pacman:
+ name: '{{ package_name }}'
+ state: absent
+ - name: Make sure {{config_file}}.pacsave exists
+ stat:
+ path: '{{config_file}}.pacsave'
+ register: pacsave_st_1
+
+ - assert:
+ that:
+ - pacsave_st_1.stat.exists
+
+ - name: Delete {{config_file}}.pacsave
+ file:
+ path: '{{config_file}}.pacsave'
+ state: absent
+
+ - name: Install {{ package_name }}
+ pacman:
+ name: '{{ package_name }}'
+ state: present
+
+ - name: Modify {{config_file}}
+ blockinfile:
+ path: '{{config_file}}'
+ block: |
+ # something something
+ # on 2 lines
+
+ - name: Remove {{ package_name }} - nosave
+ pacman:
+ name: '{{ package_name }}'
+ remove_nosave: true
+ state: absent
+
+ - name: Make sure {{config_file}}.pacsave does not exist
+ stat:
+ path: '{{config_file}}.pacsave'
+ register: pacsave_st_2
+
+ - assert:
+ that:
+ - not pacsave_st_2.stat.exists
diff --git a/ansible_collections/community/general/tests/integration/targets/pacman/tasks/update_cache.yml b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/update_cache.yml
new file mode 100644
index 000000000..ee2ac3b9f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pacman/tasks/update_cache.yml
@@ -0,0 +1,27 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Make sure package cache is updated
+ pacman:
+ update_cache: true
+
+- name: Update package cache again (should not be changed)
+ pacman:
+ update_cache: true
+ register: update_cache_idem
+
+- name: Update package cache again with force=true (should be changed)
+ pacman:
+ update_cache: true
+ force: true
+ register: update_cache_force
+
+- name: Check conditions
+ assert:
+ that:
+ - update_cache_idem is not changed
+ - update_cache_idem.cache_updated == false
+ - update_cache_force is changed
+ - update_cache_force.cache_updated == true
diff --git a/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases b/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pagerduty_user/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml
new file mode 100644
index 000000000..13a0a5e09
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pagerduty_user/tasks/main.yml
@@ -0,0 +1,25 @@
+# Test code for pagerduty_user module
+#
+# Copyright (c) 2020, Zainab Alsaffar <Zainab.Alsaffar@mail.rit.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+- name: Install required library
+ pip:
+ name: pdpyras
+ state: present
+
+- name: Create a user account on PagerDuty
+ pagerduty_user:
+ access_token: '{{ pd_api_access_token }}'
+ pd_user: '{{ fullname }}'
+ pd_email: '{{ email }}'
+ pd_role: '{{ pd_role }}'
+ pd_teams: '{{ pd_team }}'
+ state: present
+
+- name: Remove a user account from PagerDuty
+ pagerduty_user:
+ access_token: "{{ pd_api_access_token }}"
+ pd_user: "{{ fullname }}"
+ pd_email: "{{ email }}"
+ state: "absent"
diff --git a/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml
new file mode 100644
index 000000000..723755727
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pagerduty_user/vars/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+pd_api_access_token: your_api_access_token
+fullname: User Name
+email: user@email.com
+pd_role: observer
+pd_teams: team1
diff --git a/ansible_collections/community/general/tests/integration/targets/pam_limits/aliases b/ansible_collections/community/general/tests/integration/targets/pam_limits/aliases
new file mode 100644
index 000000000..b85ae6419
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pam_limits/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/pam_limits/files/test_pam_limits.conf b/ansible_collections/community/general/tests/integration/targets/pam_limits/files/test_pam_limits.conf
new file mode 100644
index 000000000..7d5d8bc85
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pam_limits/files/test_pam_limits.conf
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# /etc/security/limits.conf
diff --git a/ansible_collections/community/general/tests/integration/targets/pam_limits/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pam_limits/tasks/main.yml
new file mode 100644
index 000000000..5ad68f4a6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pam_limits/tasks/main.yml
@@ -0,0 +1,92 @@
+# Test code for the pam_limits module
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Set value for temp limit configuration
+ set_fact:
+ test_limit_file: "/tmp/limits.conf"
+
+- name: Copy temporary limits.conf
+ copy:
+ src: test_pam_limits.conf
+ dest: "{{ test_limit_file }}"
+
+- name: Test check mode support in pam_limits
+ community.general.pam_limits:
+ domain: smith
+ limit_type: soft
+ limit_item: nofile
+ value: '64000'
+ dest: "{{ test_limit_file }}"
+ check_mode: true
+ register: check_mode_test
+
+- name: Test that check mode is working
+ assert:
+ that:
+ - check_mode_test is changed
+
+- name: Add soft limit for smith user
+ community.general.pam_limits:
+ domain: smith
+ limit_type: soft
+ limit_item: nofile
+ value: '64000'
+ dest: "{{ test_limit_file }}"
+ register: soft_limit_test
+
+- name: Check if changes are made
+ assert:
+ that:
+ - soft_limit_test is changed
+
+- name: Aagin change soft limit for smith user for idempotency
+ community.general.pam_limits:
+ domain: smith
+ limit_type: soft
+ limit_item: nofile
+ value: '64000'
+ dest: "{{ test_limit_file }}"
+ register: soft_limit_test
+
+- name: Check if changes are not made idempotency
+ assert:
+ that:
+ - not soft_limit_test.changed
+
+- name: Change hard limit for Joe user for diff
+ community.general.pam_limits:
+ domain: joe
+ limit_type: hard
+ limit_item: nofile
+ value: '100000'
+ dest: "{{ test_limit_file }}"
+ register: hard_limit_test
+ diff: true
+
+- name: Debugging output for hard limit test
+ debug:
+ msg: "{{ hard_limit_test }}"
+
+- name: Check if changes made
+ assert:
+ that:
+ - hard_limit_test is changed
+ - hard_limit_test.diff.after is defined
+ - hard_limit_test.diff.before is defined
+
+- name: Add comment with change
+ community.general.pam_limits:
+ domain: doom
+ limit_type: hard
+ limit_item: nofile
+ value: '100000'
+ dest: "{{ test_limit_file }}"
+ comment: "This is a nice comment"
+ register: comment_test
+
+- name: Check if changes made
+ assert:
+ that:
+ - comment_test is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/pamd/aliases b/ansible_collections/community/general/tests/integration/targets/pamd/aliases
new file mode 100644
index 000000000..b85ae6419
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pamd/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/pamd/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pamd/tasks/main.yml
new file mode 100644
index 000000000..fdd16d166
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pamd/tasks/main.yml
@@ -0,0 +1,74 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Set value for temp limit configuration
+ set_fact:
+ test_pamd_file: "/tmp/pamd_file"
+
+- name: Create temporary pam.d file
+ copy:
+ content: "session required pam_lastlog.so silent showfailed"
+ dest: "{{ test_pamd_file }}"
+- name: Test working on a single-line file works (2925)
+ community.general.pamd:
+ path: /tmp
+ name: pamd_file
+ type: session
+ control: required
+ module_path: pam_lastlog.so
+ module_arguments: silent
+ state: args_absent
+ register: pamd_file_output
+- name: Check if changes made
+ assert:
+ that:
+ - pamd_file_output is changed
+
+- name: Test removing all arguments from an entry (3260)
+ community.general.pamd:
+ path: /tmp
+ name: pamd_file
+ type: session
+ control: required
+ module_path: pam_lastlog.so
+ module_arguments: ""
+ state: updated
+ register: pamd_file_output_noargs
+- name: Read back the file (3260)
+ slurp:
+ src: "{{ test_pamd_file }}"
+ register: pamd_file_slurp_noargs
+- name: Check if changes made (3260)
+ vars:
+ line_array: "{{ (pamd_file_slurp_noargs.content|b64decode).split('\n')[2].split() }}"
+ assert:
+ that:
+ - pamd_file_output_noargs is changed
+ - line_array == ['session', 'required', 'pam_lastlog.so']
+
+- name: Create temporary pam.d file
+ copy:
+ content: ""
+ dest: "{{ test_pamd_file }}"
+# This test merely demonstrates that, as-is, module will not perform any changes on an empty file
+# All the existing values for "state" will first search for a rule matching type, control, module_path
+# and will not perform any change whatsoever if no existing rules match.
+- name: Test working on a empty file works (2925)
+ community.general.pamd:
+ path: /tmp
+ name: pamd_file
+ type: session
+ control: required
+ module_path: pam_lastlog.so
+ module_arguments: silent
+ register: pamd_file_output_empty
+- name: Read back the file
+ slurp:
+ src: "{{ test_pamd_file }}"
+ register: pamd_file_slurp
+- name: Check if changes made
+ assert:
+ that:
+ - pamd_file_output_empty is not changed
+ - pamd_file_slurp.content|b64decode == ''
diff --git a/ansible_collections/community/general/tests/integration/targets/parted/aliases b/ansible_collections/community/general/tests/integration/targets/parted/aliases
new file mode 100644
index 000000000..b2b1b25fd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/parted/aliases
@@ -0,0 +1,13 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+azp/posix/vm
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/docker
+needs/root
+destructive
diff --git a/ansible_collections/community/general/tests/integration/targets/parted/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/parted/handlers/main.yml
new file mode 100644
index 000000000..e292260d0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/parted/handlers/main.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Remove loopback device
+ command:
+ cmd: losetup -d {{ losetup_name.stdout }}
+ changed_when: true
+
+- name: Remove file
+ file:
+ path: /bigfile
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/parted/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/parted/tasks/main.yml
new file mode 100644
index 000000000..c91258d35
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/parted/tasks/main.yml
@@ -0,0 +1,86 @@
+# Copyright (c) 2021, Alexei Znamensky
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install parted
+ package:
+ name: parted
+ state: present
+ when: ansible_os_family == 'Alpine'
+
+- name: Create empty file
+ community.general.filesize:
+ path: /bigfile
+ size: 1GiB
+ notify: Remove file
+
+- name: Obtain loop device name
+ command:
+ cmd: losetup -f
+ changed_when: false
+ register: losetup_name
+
+- name: Create loopback device
+ command:
+ cmd: losetup -f /bigfile
+ changed_when: true
+ register: losetup_cmd
+ notify: Remove loopback device
+
+- name: Create first partition
+ community.general.parted:
+ device: "{{ losetup_name.stdout }}"
+ number: 1
+ state: present
+ fs_type: ext4
+ part_end: "50%"
+ register: partition1
+
+- name: Make filesystem
+ community.general.filesystem:
+ device: "{{ losetup_name.stdout }}p1"
+ fstype: ext4
+ register: fs1_succ
+
+- name: Make filesystem (fail)
+ community.general.filesystem:
+ device: "{{ losetup_name.stdout }}p2"
+ fstype: ext4
+ ignore_errors: true
+ register: fs_fail
+
+- name: Create second partition
+ community.general.parted:
+ device: "{{ losetup_name.stdout }}"
+ number: 2
+ state: present
+ fs_type: ext4
+ part_start: "{{ partition1.partitions[0].end + 1 }}KiB"
+ part_end: "100%"
+ register: partition2
+
+- name: Make filesystem
+ community.general.filesystem:
+ device: "{{ losetup_name.stdout }}p2"
+ fstype: ext4
+ register: fs2_succ
+
+- name: Remove first partition
+ community.general.parted:
+ device: "{{ losetup_name.stdout }}"
+ number: 1
+ state: absent
+ register: partition_rem1
+
+- name: Assert results
+ assert:
+ that:
+ - partition1 is changed
+ - fs1_succ is changed
+ - fs_fail is failed
+ - fs_fail is not changed
+ - partition2 is changed
+ - partition2.partitions | length == 2
+ - fs2_succ is changed
+ - partition_rem1 is changed
+ - partition_rem1.partitions | length == 1
diff --git a/ansible_collections/community/general/tests/integration/targets/pids/aliases b/ansible_collections/community/general/tests/integration/targets/pids/aliases
new file mode 100644
index 000000000..343f119da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pids/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
diff --git a/ansible_collections/community/general/tests/integration/targets/pids/files/sleeper.c b/ansible_collections/community/general/tests/integration/targets/pids/files/sleeper.c
new file mode 100644
index 000000000..16d4b0eaa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pids/files/sleeper.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+ * GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+int main(int argc, char **argv) {
+ int delay = atoi(argv[1]);
+ sleep(delay);
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/pids/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/pids/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pids/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml
new file mode 100644
index 000000000..2ba7f3754
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pids/tasks/main.yml
@@ -0,0 +1,120 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Test code for the pids module
+# Copyright (c) 2019, Saranya Sridharan
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Attempt installation of latest 'psutil' version
+ pip:
+ name: psutil
+ ignore_errors: true
+ register: psutil_latest_install
+
+- name: Install greatest 'psutil' version which will work with all pip versions
+ pip:
+ name: psutil < 5.7.0
+ when: psutil_latest_install is failed
+
+- name: "Checking the empty result"
+ pids:
+ name: "blahblah"
+ register: emptypids
+
+- name: "Verify that the list of Process IDs (PIDs) returned is empty"
+ assert:
+ that:
+ - emptypids is not changed
+ - emptypids.pids == []
+
+- name: "Picking a random process name"
+ set_fact:
+ random_name: some-random-long-name-{{ 10000000000 + (9999999999 | random) }}
+
+- name: Copy the fake 'sleep' source code
+ copy:
+ src: sleeper.c
+ dest: "{{ remote_tmp_dir }}/sleeper.c"
+ mode: 0644
+
+- name: Compile fake 'sleep' binary
+ command: cc {{ remote_tmp_dir }}/sleeper.c -o {{ remote_tmp_dir }}/{{ random_name }}
+
+- name: Copy templated helper script
+ template:
+ src: obtainpid.sh
+ dest: "{{ remote_tmp_dir }}/obtainpid.sh"
+ mode: 0755
+
+- name: "Run the fake 'sleep' binary"
+ command: sh {{ remote_tmp_dir }}/obtainpid.sh
+ async: 100
+ poll: 0
+
+- name: "Wait for one second to make sure that the fake 'sleep' binary has actually been started"
+ pause:
+ seconds: 1
+
+- name: "Checking the process IDs (PIDs) of fake 'sleep' binary"
+ pids:
+ name: "{{ random_name }}"
+ register: pids
+
+- name: "Checking that exact non-substring matches are required"
+ pids:
+ name: "{{ random_name[0:25] }}"
+ register: exactpidmatch
+
+- name: "Checking that patterns can be used with the pattern option"
+ pids:
+ pattern: "{{ random_name[0:25] }}"
+ register: pattern_pid_match
+
+- name: "Checking that case-insensitive patterns can be used with the pattern option"
+ pids:
+ pattern: "{{ random_name[0:25] | upper }}"
+ ignore_case: true
+ register: caseinsensitive_pattern_pid_match
+
+- name: "Checking that .* includes test pid"
+ pids:
+ pattern: .*
+ register: match_all
+
+- name: "Reading pid from the file"
+ slurp:
+ src: "{{ remote_tmp_dir }}/obtainpid.txt"
+ register: newpid
+
+- name: Gather all processes to make debugging easier
+ command: ps aux
+ register: result
+ no_log: true
+
+- name: List all processes to make debugging easier
+ debug:
+ var: result.stdout_lines
+
+- name: "Verify that the Process IDs (PIDs) returned is not empty and also equal to the PIDs obtained in console"
+ assert:
+ that:
+ - "pids.pids | join(' ') == newpid.content | b64decode | trim"
+ - "pids.pids | length > 0"
+ - "exactpidmatch.pids == []"
+ - "pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim"
+ - "caseinsensitive_pattern_pid_match.pids | join(' ') == newpid.content | b64decode | trim"
+ - newpid.content | b64decode | trim | int in match_all.pids
+
+- name: "Register output of bad input pattern"
+ pids:
+ pattern: (unterminated
+ register: bad_pattern_result
+ ignore_errors: true
+
+- name: "Verify that bad input pattern result is failed"
+ assert:
+ that:
+ - bad_pattern_result is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/pids/templates/obtainpid.sh b/ansible_collections/community/general/tests/integration/targets/pids/templates/obtainpid.sh
new file mode 100644
index 000000000..ecbf56aab
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pids/templates/obtainpid.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"{{ remote_tmp_dir }}/{{ random_name }}" 100 &
+echo "$!" > "{{ remote_tmp_dir }}/obtainpid.txt"
diff --git a/ansible_collections/community/general/tests/integration/targets/pipx/aliases b/ansible_collections/community/general/tests/integration/targets/pipx/aliases
new file mode 100644
index 000000000..9f87ec348
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pipx/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/python2
+skip/python3.5
diff --git a/ansible_collections/community/general/tests/integration/targets/pipx/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pipx/tasks/main.yml
new file mode 100644
index 000000000..567405ec4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pipx/tasks/main.yml
@@ -0,0 +1,316 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install pipx
+ pip:
+ name: pipx
+ extra_args: --user
+
+##############################################################################
+- name: ensure application tox is uninstalled
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox
+
+- name: install application tox
+ community.general.pipx:
+ name: tox
+ register: install_tox
+
+- name: set fact latest_tox_version
+ set_fact:
+ latest_tox_version: "{{ install_tox.application.tox.version }}"
+
+- name: install application tox again
+ community.general.pipx:
+ name: tox
+ register: install_tox_again
+
+- name: install application tox again force
+ community.general.pipx:
+ name: tox
+ force: true
+ register: install_tox_again_force
+
+- name: uninstall application tox
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox
+
+- name: check assertions tox
+ assert:
+ that:
+ - install_tox is changed
+ - "'tox' in install_tox.application"
+ - install_tox_again is not changed
+ - install_tox_again_force is changed
+ - uninstall_tox is changed
+ - "'tox' not in uninstall_tox.application"
+
+##############################################################################
+- name: install application tox with system-site-packages
+ community.general.pipx:
+ name: tox
+ system_site_packages: true
+ register: install_tox
+
+- name: get raw pipx_info
+ community.general.pipx_info:
+ include_raw: true
+ register: pipx_info_raw
+
+- name: uninstall application tox
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox
+
+- name: check assertions tox
+ assert:
+ that:
+ - install_tox is changed
+ - "'tox' in install_tox.application"
+ - pipx_info_raw is not changed
+ - "'--system-site-packages' in pipx_info_raw.raw_output.venvs.tox.metadata.venv_args"
+ - uninstall_tox is changed
+ - "'tox' not in uninstall_tox.application"
+
+##############################################################################
+- name: install application tox 3.24.0
+ community.general.pipx:
+ name: tox
+ source: tox==3.24.0
+ register: install_tox_324
+
+- name: reinstall tox 3.24.0
+ community.general.pipx:
+ name: tox
+ state: reinstall
+ register: reinstall_tox_324
+
+- name: reinstall without name
+ community.general.pipx:
+ state: reinstall
+ register: reinstall_noname
+ ignore_errors: true
+
+- name: upgrade tox from 3.24.0
+ community.general.pipx:
+ name: tox
+ state: upgrade
+ register: upgrade_tox_324
+
+- name: upgrade without name
+ community.general.pipx:
+ state: upgrade
+ register: upgrade_noname
+ ignore_errors: true
+
+- name: downgrade tox 3.24.0
+ community.general.pipx:
+ name: tox
+ source: tox==3.24.0
+ force: true
+ register: downgrade_tox_324
+
+- name: cleanup tox 3.24.0
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox_324
+
+- name: check assertions tox 3.24.0
+ assert:
+ that:
+ - install_tox_324 is changed
+ - "'tox' in install_tox_324.application"
+ - install_tox_324.application.tox.version == '3.24.0'
+ - reinstall_tox_324 is changed
+ - reinstall_tox_324.application.tox.version == '3.24.0'
+ - upgrade_tox_324 is changed
+ - upgrade_tox_324.application.tox.version != '3.24.0'
+ - downgrade_tox_324 is changed
+ - downgrade_tox_324.application.tox.version == '3.24.0'
+ - uninstall_tox_324 is changed
+ - "'tox' not in uninstall_tox_324.application"
+ - upgrade_noname is failed
+ - reinstall_noname is failed
+
+##############################################################################
+- name: install application latest tox
+ community.general.pipx:
+ name: tox
+ state: latest
+ register: install_tox_latest
+
+- name: cleanup tox latest
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox_latest
+
+- name: install application tox 3.24.0 for latest
+ community.general.pipx:
+ name: tox
+ source: tox==3.24.0
+ register: install_tox_324_for_latest
+
+- name: install application latest tox
+ community.general.pipx:
+ name: tox
+ state: latest
+ register: install_tox_latest_with_preinstall
+
+- name: install application latest tox again
+ community.general.pipx:
+ name: tox
+ state: latest
+ register: install_tox_latest_with_preinstall_again
+
+- name: install application latest tox
+ community.general.pipx:
+ name: tox
+ state: latest
+ force: true
+ register: install_tox_latest_with_preinstall_again_force
+
+- name: cleanup tox latest again
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox_latest_again
+
+- name: install application tox with deps
+ community.general.pipx:
+ state: latest
+ name: tox
+ install_deps: true
+ register: install_tox_with_deps
+
+- name: cleanup tox latest yet again
+ community.general.pipx:
+ state: absent
+ name: tox
+ register: uninstall_tox_again
+
+- name: check assertions tox latest
+ assert:
+ that:
+ - install_tox_latest is changed
+ - uninstall_tox_latest is changed
+ - install_tox_324_for_latest is changed
+ - install_tox_324_for_latest.application.tox.version == '3.24.0'
+ - install_tox_latest_with_preinstall is changed
+ - install_tox_latest_with_preinstall.application.tox.version == latest_tox_version
+ - install_tox_latest_with_preinstall_again is not changed
+ - install_tox_latest_with_preinstall_again.application.tox.version == latest_tox_version
+ - install_tox_latest_with_preinstall_again_force is changed
+ - install_tox_latest_with_preinstall_again_force.application.tox.version == latest_tox_version
+ - uninstall_tox_latest_again is changed
+ - install_tox_with_deps is changed
+ - install_tox_with_deps.application.tox.version == latest_tox_version
+ - uninstall_tox_again is changed
+ - "'tox' not in uninstall_tox_again.application"
+
+##############################################################################
+- name: ensure application ansible-lint is uninstalled
+ community.general.pipx:
+ name: ansible-lint
+ state: absent
+
+- name: install application ansible-lint
+ community.general.pipx:
+ name: ansible-lint
+ register: install_ansible_lint
+
+- name: inject packages
+ community.general.pipx:
+ state: inject
+ name: ansible-lint
+ inject_packages:
+ - licenses
+ register: inject_pkgs_ansible_lint
+
+- name: inject packages with apps
+ community.general.pipx:
+ state: inject
+ name: ansible-lint
+ inject_packages:
+ - black
+ install_apps: true
+ register: inject_pkgs_apps_ansible_lint
+
+- name: cleanup ansible-lint
+ community.general.pipx:
+ state: absent
+ name: ansible-lint
+ register: uninstall_ansible_lint
+
+- name: check assertions inject_packages
+ assert:
+ that:
+ - install_ansible_lint is changed
+ - inject_pkgs_ansible_lint is changed
+ - '"ansible-lint" in inject_pkgs_ansible_lint.application'
+ - '"licenses" in inject_pkgs_ansible_lint.application["ansible-lint"]["injected"]'
+ - inject_pkgs_apps_ansible_lint is changed
+ - '"ansible-lint" in inject_pkgs_apps_ansible_lint.application'
+ - '"black" in inject_pkgs_apps_ansible_lint.application["ansible-lint"]["injected"]'
+ - uninstall_ansible_lint is changed
+
+##############################################################################
+- name: install jupyter - not working smoothly in freebsd
+ when: ansible_system != 'FreeBSD'
+ block:
+ - name: ensure application jupyter is uninstalled
+ community.general.pipx:
+ name: jupyter
+ state: absent
+
+ - name: install application jupyter
+ community.general.pipx:
+ name: jupyter
+ install_deps: true
+ register: install_jupyter
+
+ - name: cleanup jupyter
+ community.general.pipx:
+ state: absent
+ name: jupyter
+
+ - name: check assertions
+ assert:
+ that:
+ - install_jupyter is changed
+ - '"ipython" in install_jupyter.stdout'
+
+##############################################################################
+- name: ensure /opt/pipx
+ ansible.builtin.file:
+ path: /opt/pipx
+ state: directory
+ mode: 0755
+
+- name: install tox site-wide
+ community.general.pipx:
+ name: tox
+ state: latest
+ register: install_tox_sitewide
+ environment:
+ PIPX_HOME: /opt/pipx
+ PIPX_BIN_DIR: /usr/local/bin
+
+- name: stat /usr/local/bin/tox
+ ansible.builtin.stat:
+ path: /usr/local/bin/tox
+ register: usrlocaltox
+
+- name: check assertions
+ ansible.builtin.assert:
+ that:
+ - install_tox_sitewide is changed
+ - usrlocaltox.stat.exists
diff --git a/ansible_collections/community/general/tests/integration/targets/pipx_info/aliases b/ansible_collections/community/general/tests/integration/targets/pipx_info/aliases
new file mode 100644
index 000000000..a28278bbc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pipx_info/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/python2
+skip/python3.5
diff --git a/ansible_collections/community/general/tests/integration/targets/pipx_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pipx_info/tasks/main.yml
new file mode 100644
index 000000000..0a01f0af9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pipx_info/tasks/main.yml
@@ -0,0 +1,140 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install pipx
+ pip:
+ name: pipx
+ extra_args: --user
+
+##############################################################################
+- name: ensure application tox is uninstalled
+ community.general.pipx:
+ state: absent
+ name: tox
+
+- name: retrieve applications (empty)
+ community.general.pipx_info: {}
+ register: info_empty
+
+- name: install application tox
+ community.general.pipx:
+ name: tox
+
+- name: retrieve applications
+ community.general.pipx_info: {}
+ register: info_all
+
+- name: retrieve applications (include_deps=true)
+ community.general.pipx_info:
+ include_deps: true
+ register: info_all_deps
+
+- name: retrieve application tox
+ community.general.pipx_info:
+ name: tox
+ include_deps: true
+ register: info_tox
+
+- name: uninstall application tox
+ community.general.pipx:
+ state: absent
+ name: tox
+
+- name: check assertions tox
+ assert:
+ that:
+ - info_empty.application|length == 0
+
+ - info_all.application|length == 1
+ - info_all.application[0].name == "tox"
+ - "'version' in info_all.application[0]"
+ - "'dependencies' not in info_all.application[0]"
+ - "'injected' not in info_all.application[0]"
+
+ - info_all_deps.application|length == 1
+ - info_all_deps.application[0].name == "tox"
+ - "'version' in info_all_deps.application[0]"
+ - info_all_deps.application[0].dependencies == ["chardet", "virtualenv"]
+ or info_all_deps.application[0].dependencies == ["virtualenv"]
+ - "'injected' not in info_all.application[0]"
+
+ - info_tox.application == info_all_deps.application
+
+##############################################################################
+- name: set test applications
+ set_fact:
+ apps:
+ - name: tox
+ source: tox==3.24.0
+ - name: ansible-lint
+ inject_packages:
+ - licenses
+
+- name: ensure applications are uninstalled
+ community.general.pipx:
+ name: "{{ item.name }}"
+ state: absent
+ loop: "{{ apps }}"
+
+- name: install applications
+ community.general.pipx:
+ name: "{{ item.name }}"
+ source: "{{ item.source|default(omit) }}"
+ loop: "{{ apps }}"
+
+- name: inject packages
+ community.general.pipx:
+ state: inject
+ name: "{{ item.name }}"
+ inject_packages: "{{ item.inject_packages }}"
+ when: "'inject_packages' in item"
+ loop: "{{ apps }}"
+
+- name: retrieve applications
+ community.general.pipx_info: {}
+ register: info2_all
+
+- name: retrieve applications (include_deps=true)
+ community.general.pipx_info:
+ include_deps: true
+ include_injected: true
+ register: info2_all_deps
+
+- name: retrieve application ansible-lint
+ community.general.pipx_info:
+ name: ansible-lint
+ include_deps: true
+ include_injected: true
+ register: info2_lint
+
+- name: ensure applications are uninstalled
+ community.general.pipx:
+ name: "{{ item.name }}"
+ state: absent
+ loop: "{{ apps }}"
+
+- name: check assertions multiple apps
+ assert:
+ that:
+ - all_apps|length == 2
+ - all_apps[1].name == "tox"
+ - all_apps[1].version == "3.24.0"
+ - "'dependencies' not in all_apps[1]"
+ - "'injected' not in all_apps[1]"
+
+ - all_apps_deps|length == 2
+ - all_apps_deps[1].name == "tox"
+ - all_apps_deps[1].version == "3.24.0"
+ - all_apps_deps[1].dependencies == ["virtualenv"]
+ - "'injected' in all_apps_deps[0]"
+ - "'licenses' in all_apps_deps[0].injected"
+
+ - lint|length == 1
+ - all_apps_deps|length == 2
+ - lint[0] == all_apps_deps[0]
+ vars:
+ all_apps: "{{ info2_all.application|sort(attribute='name') }}"
+ all_apps_deps: "{{ info2_all_deps.application|sort(attribute='name') }}"
+ lint: "{{ info2_lint.application|sort(attribute='name') }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/aliases b/ansible_collections/community/general/tests/integration/targets/pkgng/aliases
new file mode 100644
index 000000000..e13fde32c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/root
+skip/docker
+skip/osx
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml
new file mode 100644
index 000000000..4028c57d8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/create-outofdate-pkg.yml
@@ -0,0 +1,52 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create temporary directory for package creation
+ tempfile:
+ state: directory
+ register: pkgng_test_outofdate_pkg_tempdir
+
+- name: Copy intentionally out-of-date package manifest to testhost
+ template:
+ src: MANIFEST.json.j2
+ # Plus-sign must be added at the destination
+ # CI doesn't like files with '+' in them in the repository
+ dest: '{{ pkgng_test_outofdate_pkg_tempdir.path }}/MANIFEST'
+
+- name: Create out-of-date test package file
+ command:
+ argv:
+ - pkg
+ - create
+ - '--verbose'
+ - '--out-dir'
+ - '{{ pkgng_test_outofdate_pkg_tempdir.path }}'
+ - '--manifest'
+ - '{{ pkgng_test_outofdate_pkg_tempdir.path }}/MANIFEST'
+
+# pkg switched from .txz to .pkg in version 1.17.0
+# Might as well look for all valid pkg extensions.
+- name: Find created package file
+ find:
+ path: '{{ pkgng_test_outofdate_pkg_tempdir.path }}'
+ use_regex: true
+ pattern: '.*\.(pkg|tzst|t[xbg]z|tar)'
+ register: pkgng_test_outofdate_pkg_tempfile
+
+- name: There should be only one package
+ assert:
+ that:
+ - pkgng_test_outofdate_pkg_tempfile.files | count == 1
+
+- name: Copy the created package file to the expected location
+ copy:
+ remote_src: true
+ src: '{{ pkgng_test_outofdate_pkg_tempfile.files[0].path }}'
+ dest: '{{ pkgng_test_outofdate_pkg_path }}'
+
+- name: Remove temporary directory
+ file:
+ state: absent
+ path: '{{ pkgng_test_outofdate_pkg_tempdir.path }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/freebsd.yml b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/freebsd.yml
new file mode 100644
index 000000000..0c8001899
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/freebsd.yml
@@ -0,0 +1,551 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+##
+## pkgng - prepare test environment
+##
+- name: Remove test package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+
+##
+## pkgng - example - state=present for single package
+##
+- name: 'state=present for single package'
+ include_tasks: install_single_package.yml
+
+##
+## pkgng - example - state=latest for already up-to-date package
+##
+- name: Upgrade package (idempotent)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: latest
+ register: pkgng_example2
+
+- name: Ensure pkgng does not upgrade up-to-date package
+ assert:
+ that:
+ - not pkgng_example2.changed
+
+##
+## pkgng - example - state=absent for single package
+##
+- name: Verify package sentinel file is present
+ stat:
+ path: '{{ pkgng_test_pkg_sentinelfile_path }}'
+ get_attributes: false
+ get_checksum: false
+ get_mime: false
+ register: pkgng_example3_stat_before
+
+- name: Install package (checkmode)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ check_mode: true
+ register: pkgng_example3_checkmode
+
+- name: Remove package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+ register: pkgng_example3
+
+- name: Remove package (idempotent)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+ register: pkgng_example3_idempotent
+
+- name: Verify package sentinel file is not present
+ stat:
+ path: '{{ pkgng_test_pkg_sentinelfile_path }}'
+ get_attributes: false
+ get_checksum: false
+ get_mime: false
+ register: pkgng_example3_stat_after
+
+- name: Ensure pkgng installs package correctly
+ assert:
+ that:
+ - pkgng_example3_stat_before.stat.exists
+ - pkgng_example3_stat_before.stat.executable
+ - not pkgng_example3_checkmode.changed
+ - pkgng_example3.changed
+ - not pkgng_example3_idempotent.changed
+ - not pkgng_example3_stat_after.stat.exists
+
+##
+## pkgng - example - state=latest for out-of-date package
+##
+- name: Install intentionally out-of-date package and upgrade it
+ #
+ # NOTE: The out-of-date package provided is a minimal,
+ # no-contents test package that declares {{ pkgng_test_pkg_name }} with
+ # a version of 0, so it should always be upgraded.
+ #
+ # This test might fail at some point in the
+ # future if the FreeBSD package format receives
+ # breaking changes that prevent pkg from installing
+ # older package formats.
+ #
+ block:
+ - name: Create out-of-date test package
+ import_tasks: create-outofdate-pkg.yml
+
+ - name: Install out-of-date test package
+ command: 'pkg add {{ pkgng_test_outofdate_pkg_path }}'
+ register: pkgng_example4_prepare
+
+ - name: Check for any available package upgrades (checkmode)
+ pkgng:
+ name: '*'
+ state: latest
+ check_mode: true
+ register: pkgng_example4_wildcard_checkmode
+
+ - name: Check for available package upgrade (checkmode)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: latest
+ check_mode: true
+ register: pkgng_example4_checkmode
+
+ - name: Upgrade out-of-date package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: latest
+ register: pkgng_example4
+
+ - name: Upgrade out-of-date package (idempotent)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: latest
+ register: pkgng_example4_idempotent
+
+ - name: Remove test out-of-date package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+
+ - name: Ensure pkgng upgrades package correctly
+ assert:
+ that:
+ - not pkgng_example4_prepare.failed
+ - pkgng_example4_wildcard_checkmode.changed
+ - pkgng_example4_checkmode.changed
+ - pkgng_example4.changed
+ - not pkgng_example4_idempotent.changed
+
+##
+## pkgng - example - state=latest for out-of-date package without privileges
+##
+- name: Install intentionally out-of-date package and try to upgrade it with unprivileged user
+ block:
+ - ansible.builtin.user:
+ name: powerless
+ shell: /bin/bash
+
+ - name: Create out-of-date test package
+ import_tasks: create-outofdate-pkg.yml
+
+ - name: Install out-of-date test package
+ command: 'pkg add {{ pkgng_test_outofdate_pkg_path }}'
+ register: pkgng_example4_nopower_prepare
+
+ - name: Check for any available package upgrades with unprivileged user
+ become: true
+ become_user: powerless
+ pkgng:
+ name: '*'
+ state: latest
+ register: pkgng_example4_nopower_wildcard
+ ignore_errors: true
+
+ - name: Remove test out-of-date package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+
+ - name: Ensure pkgng upgrades package correctly
+ assert:
+ that:
+ - not pkgng_example4_nopower_prepare.failed
+ - pkgng_example4_nopower_wildcard.failed
+
+##
+## pkgng - example - Install multiple packages in one command
+##
+- name: Remove test package (checkmode)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+ check_mode: true
+ register: pkgng_example5_prepare
+
+- name: Install three packages
+ pkgng:
+ name:
+ - '{{ pkgng_test_pkg_name }}'
+ - fish
+ - busybox
+ register: pkgng_example5
+
+- name: Remove three packages
+ pkgng:
+ name:
+ - '{{ pkgng_test_pkg_name }}'
+ - fish
+ - busybox
+ state: absent
+ register: pkgng_example5_cleanup
+
+- name: Ensure pkgng installs multiple packages with one command
+ assert:
+ that:
+ - not pkgng_example5_prepare.changed
+ - pkgng_example5.changed
+ - '(pkgng_example5.stdout | regex_search("^Number of packages to be installed: (\d+)", "\\1", multiline=True) | first | int) >= 3'
+ - '(pkgng_example5.stdout | regex_findall("^Number of packages to be", multiline=True) | count) == 1'
+ - pkgng_example5_cleanup.changed
+
+##
+## pkgng - example - state=latest multiple packages, some already installed
+##
+- name: Remove test package (checkmode)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ state: absent
+ check_mode: true
+ register: pkgng_example6_check
+
+- name: Create out-of-date test package
+ import_tasks: create-outofdate-pkg.yml
+
+- name: Install out-of-date test package
+ command: 'pkg add {{ pkgng_test_outofdate_pkg_path }}'
+ register: pkgng_example6_prepare
+
+- name: Upgrade and/or install two packages
+ pkgng:
+ name:
+ - '{{ pkgng_test_pkg_name }}'
+ - fish
+ state: latest
+ register: pkgng_example6
+
+- name: Remove two packages
+ pkgng:
+ name:
+ - '{{ pkgng_test_pkg_name }}'
+ - fish
+ state: absent
+ register: pkgng_example6_cleanup
+
+- name: Ensure pkgng installs multiple packages with one command
+ assert:
+ that:
+ - not pkgng_example6_check.changed
+ - not pkgng_example6_prepare.failed
+ - pkgng_example6.changed
+ - '(pkgng_example6.stdout | regex_search("^Number of packages to be installed: (\d+)", "\\1", multiline=True) | first | int) >= 1'
+ - '(pkgng_example6.stdout | regex_search("^Number of packages to be upgraded: (\d+)", "\\1", multiline=True) | first | int) >= 1'
+ # Checking that "will be affected" occurs twice in the output ensures
+ # that the module runs two separate commands for install and upgrade,
+ # as the pkg command only outputs the string once per invocation.
+ - '(pkgng_example6.stdout | regex_findall("will be affected", multiline=True) | count) == 2'
+ - pkgng_example6_cleanup.changed
+
+##
+## pkgng - example - autoremove=yes
+##
+- name: "Test autoremove=yes"
+ #
+ # NOTE: FreeBSD 12.0 test runner receives a "connection reset by peer" after ~20% downloaded so we are
+ # only running this on 12.1 or higher
+ #
+ when: ansible_distribution_version is version('12.01', '>=')
+ block:
+ - name: Install GNU autotools
+ pkgng:
+ name: autotools
+ state: latest
+ register: pkgng_example7_prepare_install
+
+ - name: Remove GNU autotools and run pkg autoremove
+ pkgng:
+ name: autotools
+ state: absent
+ autoremove: true
+ register: pkgng_example7
+
+ - name: Check if autoremove uninstalled known autotools dependencies
+ pkgng:
+ name:
+ - autoconf
+ - automake
+ - libtool
+ state: absent
+ check_mode: true
+ register: pkgng_example7_cleanup
+
+ - name: Ensure pkgng autoremove works correctly
+ assert:
+ that:
+ - pkgng_example7_prepare_install is changed
+ - "'autoremoved' is in(pkgng_example7.msg)"
+ - pkgng_example7_cleanup is not changed
+
+##
+## pkgng - example - single annotations
+##
+- name: Install and annotate single package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '+ansibletest_example8=added'
+ register: pkgng_example8_add_annotation
+
+- name: Should fail to add duplicate annotation
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '+ansibletest_example8=duplicate'
+ ignore_errors: true
+ register: pkgng_example8_add_annotation_failure
+
+- name: Verify annotation is actually there
+ command: 'pkg annotate -q -S {{ pkgng_test_pkg_name }} ansibletest_example8'
+ register: pkgng_example8_add_annotation_verify
+
+- name: Install and annotate single package (checkmode, not changed)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '+ansibletest_example8=added'
+ check_mode: true
+ register: pkgng_example8_add_annotation_checkmode_nochange
+
+- name: Install and annotate single package (checkmode, changed)
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '+ansibletest_example8_checkmode=added'
+ check_mode: true
+ register: pkgng_example8_add_annotation_checkmode_change
+
+- name: Verify check_mode did not add an annotation
+ command: 'pkg annotate -q -S {{ pkgng_test_pkg_name }} ansibletest_example8_checkmode'
+ register: pkgng_example8_add_annotation_checkmode_change_verify
+
+- name: Modify annotation on single package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: ':ansibletest_example8=modified'
+ register: pkgng_example8_modify_annotation
+
+- name: Should fail to modify missing annotation
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: ':ansiblemissing=modified'
+ ignore_errors: true
+ register: pkgng_example8_modify_annotation_failure
+
+- name: Verify annotation has been modified
+ command: 'pkg annotate -q -S {{ pkgng_test_pkg_name }} ansibletest_example8'
+ register: pkgng_example8_modify_annotation_verify
+
+- name: Remove annotation on single package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '-ansibletest_example8'
+ register: pkgng_example8_remove_annotation
+
+- name: Verify annotation has been removed
+ command: 'pkg annotate -q -S {{ pkgng_test_pkg_name }} ansibletest_example8'
+ register: pkgng_example8_remove_annotation_verify
+
+- name: Ensure pkgng annotations on single packages work correctly
+ assert:
+ that:
+ - pkgng_example8_add_annotation.changed
+ - pkgng_example8_add_annotation_failure.failed
+ - pkgng_example8_add_annotation_checkmode_nochange is not changed
+ - pkgng_example8_add_annotation_checkmode_change is changed
+ - 'pkgng_example8_add_annotation_checkmode_change_verify.stdout_lines | count == 0'
+ - 'pkgng_example8_add_annotation_verify.stdout_lines | first == "added"'
+ - pkgng_example8_modify_annotation.changed
+ - pkgng_example8_modify_annotation_failure.failed
+ - 'pkgng_example8_modify_annotation_verify.stdout_lines | first == "modified"'
+ - pkgng_example8_remove_annotation.changed
+ - 'pkgng_example8_remove_annotation_verify.stdout_lines | count == 0'
+
+##
+## pkgng - example - multiple annotations
+##
+- name: Annotate single package with multiple annotations
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation:
+ - '+ansibletest_example9_1=added'
+ - '+ansibletest_example9_2=added'
+ register: pkgng_example9_add_annotation
+
+- name: Verify annotation is actually there
+ command: 'pkg info -q -A {{ pkgng_test_pkg_name }}'
+ register: pkgng_example9_add_annotation_verify
+ # Assert, below, tests that stdout includes:
+ # ```
+ # ansibletest_example9_1 : added
+ # ansibletest_example9_2 : added
+ # ```
+
+- name: Multiple annotation operations on single package
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation:
+ - ':ansibletest_example9_1=modified'
+ - '+ansibletest_example9_3=added'
+ register: pkgng_example9_multiple_annotation
+
+- name: Verify multiple operations succeeded
+ command: 'pkg info -q -A {{ pkgng_test_pkg_name }}'
+ register: pkgng_example9_multiple_annotation_verify
+ # Assert, below, tests that stdout includes:
+ # ```
+ # ansibletest_example9_1 : modified
+ # ansibletest_example9_2 : added
+ # ansibletest_example9_3 : added
+ # ```
+
+- name: Add multiple annotations with old syntax
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '+ansibletest_example9_4=added,+ansibletest_example9_5=added'
+ register: pkgng_example9_add_annotation_old
+
+- name: Verify annotation is actually there
+ command: 'pkg info -q -A {{ pkgng_test_pkg_name }}'
+ register: pkgng_example9_add_annotation_old_verify
+ # Assert, below, tests that stdout includes:
+ # ```
+ # ansibletest_example9_4 : added
+ # ansibletest_example9_5 : added
+ # ```
+
+- name: Ensure multiple annotations work correctly
+ assert:
+ that:
+ - pkgng_example9_add_annotation.changed
+ - '(pkgng_example9_add_annotation_verify.stdout_lines | select("match", "ansibletest_example9_[12]\s*:\s*added") | list | count) == 2'
+ - pkgng_example9_multiple_annotation.changed
+ - '(pkgng_example9_multiple_annotation_verify.stdout_lines | select("match", "ansibletest_example9_1\s*:\s*modified") | list | count) == 1'
+ - '(pkgng_example9_multiple_annotation_verify.stdout_lines | select("match", "ansibletest_example9_[23]\s*:\s*added") | list | count) == 2'
+ - pkgng_example9_add_annotation_old.changed
+ - '(pkgng_example9_add_annotation_old_verify.stdout_lines | select("match", "ansibletest_example9_[45]\s*:\s*added") | list | count) == 2'
+
+##
+## pkgng - example - invalid annotation strings
+##
+- name: Should fail on invalid annotate strings
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ annotation: '{{ item }}'
+ ignore_errors: true
+ register: pkgng_example8_invalid_annotation_failure
+ loop:
+ - 'naked_string'
+ - '/invalid_operation'
+ - ',empty_first_tag=validsecond'
+ - '=notag'
+
+- name: Verify invalid annotate strings did not add annotations
+ command: 'pkg info -q -A {{ pkgng_test_pkg_name }}'
+ register: pkgng_example8_invalid_annotation_verify
+
+- name: Ensure invalid annotate strings fail safely
+ assert:
+ that:
+ # Invalid strings should not change anything
+ - '(pkgng_example8_invalid_annotation_failure.results | selectattr("changed") | list | count) == 0'
+ # Invalid strings should always fail
+ - '(pkgng_example8_invalid_annotation_failure.results | rejectattr("failed") | list | count) == 0'
+ # Invalid strings should not cause an exception
+ - '(pkgng_example8_invalid_annotation_failure.results | selectattr("exception", "defined") | list | count) == 0'
+ # Verify annotations are unaffected
+ - '(pkgng_example8_invalid_annotation_verify.stdout_lines | select("search", "(naked_string|invalid_operation|empty_first_tag|validsecond|notag)") | list | count) == 0'
+
+##
+## pkgng - example - pkgsite=...
+##
+# NOTE: testing for failure here to not have to set up our own
+# or depend on a third-party, alternate package repo
+- name: Should fail with invalid pkgsite
+ pkgng:
+ name: '{{ pkgng_test_pkg_name }}'
+ pkgsite: DoesNotExist
+ ignore_errors: true
+ register: pkgng_example10_invalid_pkgsite_failure
+
+- name: Ensure invalid pkgsite fails as expected
+ assert:
+ that:
+ - pkgng_example10_invalid_pkgsite_failure.failed
+ - 'pkgng_example10_invalid_pkgsite_failure.stdout is search("^No repositories are enabled.", multiline=True)'
+
+##
+## pkgng - example - Install single package in jail
+##
+- name: Test within jail
+ #
+ # NOTE: FreeBSD 12.0 test runner receives a "connection reset by peer" after ~20% downloaded so we are
+ # only running this on 12.1 or higher
+ #
+ # NOTE: FreeBSD 12.3 fails with some kernel mismatch for packages
+ # (someone with FreeBSD knowledge has to take a look)
+ #
+ # NOTE: FreeBSD 12.4 fails to update repositories because it cannot load certificates from /usr/share/keys/pkg/trusted
+ # (someone with FreeBSD knowledge has to take a look)
+ #
+ # NOTE: FreeBSD 13.0 fails to update the package catalogue for unknown reasons (someone with FreeBSD
+ # knowledge has to take a look)
+ #
+ # NOTE: FreeBSD 13.1 fails to update the package catalogue for unknown reasons (someone with FreeBSD
+ # knowledge has to take a look)
+ #
+ # NOTE: FreeBSD 13.2 fails to update the package catalogue for unknown reasons (someone with FreeBSD
+ # knowledge has to take a look)
+ #
+ # See also
+ # https://github.com/ansible-collections/community.general/issues/5795
+ when: >-
+ (ansible_distribution_version is version('12.01', '>=') and ansible_distribution_version is version('12.3', '<'))
+ or ansible_distribution_version is version('13.3', '>=')
+ block:
+ - name: Setup testjail
+ include_tasks: setup-testjail.yml
+
+ - name: Install package in jail as rootdir
+ include_tasks: install_single_package.yml
+ vars:
+ pkgng_test_rootdir: /usr/jails/testjail
+ pkgng_test_install_prefix: /usr/jails/testjail
+ pkgng_test_install_cleanup: true
+
+ - name: Install package in jail
+ include_tasks: install_single_package.yml
+ vars:
+ pkgng_test_jail: testjail
+ pkgng_test_install_prefix: /usr/jails/testjail
+ pkgng_test_install_cleanup: true
+
+ - name: Install package in jail as chroot
+ include_tasks: install_single_package.yml
+ vars:
+ pkgng_test_chroot: /usr/jails/testjail
+ pkgng_test_install_prefix: /usr/jails/testjail
+ pkgng_test_install_cleanup: true
+ always:
+ - name: Stop and remove testjail
+ failed_when: false
+ changed_when: false
+ command: "ezjail-admin delete -wf testjail"
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/install_single_package.yml b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/install_single_package.yml
new file mode 100644
index 000000000..5ba529af3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/install_single_package.yml
@@ -0,0 +1,58 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Verify package sentinel file is not present
+ stat:
+ path: '{{ pkgng_test_install_prefix | default("") }}{{ pkgng_test_pkg_sentinelfile_path }}'
+ get_attributes: false
+ get_checksum: false
+ get_mime: false
+ register: pkgng_install_stat_before
+
+- name: Install package
+ pkgng: &pkgng_install_params
+ name: '{{ pkgng_test_pkg_name }}'
+ jail: '{{ pkgng_test_jail | default(omit) }}'
+ chroot: '{{ pkgng_test_chroot | default(omit) }}'
+ rootdir: '{{ pkgng_test_rootdir | default(omit) }}'
+ register: pkgng_install
+
+- name: Remove package (checkmode)
+ pkgng:
+ <<: *pkgng_install_params
+ state: absent
+ check_mode: true
+ register: pkgng_install_checkmode
+
+- name: Install package (idempotent, cached)
+ pkgng:
+ <<: *pkgng_install_params
+ cached: true
+ register: pkgng_install_idempotent_cached
+
+- name: Verify package sentinel file is present
+ stat:
+ path: '{{ pkgng_test_install_prefix | default("") }}{{ pkgng_test_pkg_sentinelfile_path }}'
+ get_attributes: false
+ get_checksum: false
+ get_mime: false
+ register: pkgng_install_stat_after
+
+- name: Remove test package (if requested)
+ pkgng:
+ <<: *pkgng_install_params
+ state: absent
+ when: 'pkgng_test_install_cleanup | default(False)'
+
+- name: Ensure pkgng installs package correctly
+ assert:
+ that:
+ - not pkgng_install_stat_before.stat.exists
+ - pkgng_install.changed
+ - pkgng_install_checkmode.changed
+ - not pkgng_install_idempotent_cached.changed
+ - not pkgng_install_idempotent_cached.stdout is match("Updating \w+ repository catalogue\.\.\.")
+ - pkgng_install_stat_after.stat.exists
+ - pkgng_install_stat_after.stat.executable
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/main.yml
new file mode 100644
index 000000000..59ca83d9f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: freebsd.yml
+ when:
+ - ansible_facts.distribution == 'FreeBSD'
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/setup-testjail.yml b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/setup-testjail.yml
new file mode 100644
index 000000000..3055d29e8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/tasks/setup-testjail.yml
@@ -0,0 +1,100 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+#
+# Instructions for setting up a jail
+# https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails-ezjail.html
+#
+- name: Setup cloned interfaces
+ lineinfile:
+ dest: /etc/rc.conf
+ regexp: ^cloned_interfaces=lo1
+ line: cloned_interfaces=lo1
+
+- name: Activate cloned interfaces
+ command: "service netif cloneup"
+ changed_when: false
+
+- name: Add nat rule for cloned interfaces
+ copy:
+ dest: /etc/pf.conf
+ content: |
+ nat on {{ ansible_default_ipv4.interface }} from 127.0.1.0/24 -> {{ ansible_default_ipv4.interface }}:0
+ validate: "pfctl -nf %s"
+
+- name: Start pf firewall
+ service:
+ name: pf
+ state: started
+ enabled: true
+
+- name: Install ezjail
+ pkgng:
+ name: ezjail
+
+- name: Configure ezjail to use http
+ when: ansible_distribution_version is version('11.01', '>')
+ lineinfile:
+ dest: /usr/local/etc/ezjail.conf
+ regexp: ^ezjail_ftphost
+ line: ezjail_ftphost=http://ftp.freebsd.org
+
+- name: Configure ezjail to use archive for old freebsd releases
+ when: ansible_distribution_version is version('11.01', '<=')
+ lineinfile:
+ dest: /usr/local/etc/ezjail.conf
+ regexp: ^ezjail_ftphost
+ line: ezjail_ftphost=http://ftp-archive.freebsd.org
+
+- name: Start ezjail
+ ignore_errors: true
+ service:
+ name: ezjail
+ state: started
+ enabled: true
+
+- name: Redirect logs depending on verbosity
+ set_fact:
+ pkgng_jail_log_redirect: "2>&1 | tee -a /tmp/ezjail.log {{ '> /dev/null' if ansible_verbosity < 2 else '' }}"
+
+- name: Has ezjail
+ register: ezjail_base_jail
+ stat:
+ path: /usr/jails/basejail
+
+- name: Setup ezjail base
+ when: not ezjail_base_jail.stat.exists
+ shell: "ezjail-admin install {{ pkgng_jail_log_redirect }}"
+ changed_when: false
+
+- name: Has testjail
+ register: ezjail_test_jail
+ stat:
+ path: /usr/jails/testjail
+
+- name: Create testjail
+ when: not ezjail_test_jail.stat.exists
+ shell: "ezjail-admin create testjail 'lo1|127.0.1.1' {{ pkgng_jail_log_redirect }}"
+ changed_when: false
+
+- name: Configure testjail to use Cloudflare DNS
+ lineinfile:
+ dest: /usr/jails/testjail/etc/resolv.conf
+ regexp: "^nameserver[[:blank:]]+{{ item }}$"
+ line: "nameserver {{ item }}"
+ create: true
+ loop:
+ - "1.1.1.1"
+ - "1.0.0.1"
+
+- name: Is testjail running
+ shell: "jls | grep testjail"
+ changed_when: false
+ failed_when: false
+ register: is_testjail_up
+
+- name: Start testjail
+ when: is_testjail_up.rc == 1
+ command: "ezjail-admin start testjail"
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2 b/ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2
new file mode 100644
index 000000000..e8537e89b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2
@@ -0,0 +1,16 @@
+{
+ "name": "{{ pkgng_test_pkg_name }}",
+ "origin": "{{ pkgng_test_pkg_category }}/{{ pkgng_test_pkg_name }}",
+ "version": "{{ pkgng_test_pkg_version | default('0') }}",
+ "comment": "{{ pkgng_test_pkg_name }} (Ansible Integration Test Package)",
+ "maintainer": "ansible-devel@googlegroups.com",
+ "www": "https://github.com/ansible-collections/community.general",
+ "abi": "FreeBSD:*:*",
+ "arch": "freebsd:*:*",
+ "prefix": "/usr/local",
+ "flatsize":0,
+ "licenselogic": "single",
+ "licenses":["GPLv3"],
+ "desc": "This package is only installed temporarily for integration testing of the community.general.pkgng Ansible module.\nIts version number is 0 so that ANY version of the real package, with the same name, will be considered an upgrade.\nIts architecture and abi are FreeBSD:*:* so that it will install on any version or architecture of FreeBSD,\nthus future-proof as long as the package MANIFEST format does not change\nand a wildcard in the version portion of the abi or arch field is not prohibited.",
+ "categories":["{{ pkgng_test_pkg_category }}"]
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2.license b/ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/templates/MANIFEST.json.j2.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgng/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/pkgng/vars/main.yml
new file mode 100644
index 000000000..e32cc4110
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgng/vars/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+pkgng_test_outofdate_pkg_path: "/tmp/ansible_pkgng_test_package.pkg"
+pkgng_test_pkg_name: zsh
+pkgng_test_pkg_category: shells
+pkgng_test_pkg_sentinelfile_path: /usr/local/bin/zsh
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases b/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases
new file mode 100644
index 000000000..4232e0baa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgutil/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+destructive
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml
new file mode 100644
index 000000000..8ceb4adcc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/pkgutil/tasks/main.yml
@@ -0,0 +1,117 @@
+# Test code for the pkgutil module
+
+# Copyright (c) 2019, Dag Wieers (@dagwieers) <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+# CLEAN ENVIRONMENT
+- name: Remove CSWtop
+ pkgutil:
+ name: CSWtop
+ state: absent
+ register: originally_installed
+
+
+# ADD PACKAGE
+- name: Add package (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ check_mode: true
+ register: cm_add_package
+
+- name: Verify cm_add_package
+ assert:
+ that:
+ - cm_add_package is changed
+
+- name: Add package (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ register: nm_add_package
+
+- name: Verify nm_add_package
+ assert:
+ that:
+ - nm_add_package is changed
+
+- name: Add package again (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ check_mode: true
+ register: cm_add_package_again
+
+- name: Verify cm_add_package_again
+ assert:
+ that:
+ - cm_add_package_again is not changed
+
+- name: Add package again (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: present
+ register: nm_add_package_again
+
+- name: Verify nm_add_package_again
+ assert:
+ that:
+ - nm_add_package_again is not changed
+
+
+# REMOVE PACKAGE
+- name: Remove package (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ check_mode: true
+ register: cm_remove_package
+
+- name: Verify cm_remove_package
+ assert:
+ that:
+ - cm_remove_package is changed
+
+- name: Remove package (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ register: nm_remove_package
+
+- name: Verify nm_remove_package
+ assert:
+ that:
+ - nm_remove_package is changed
+
+- name: Remove package again (check_mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ check_mode: true
+ register: cm_remove_package_again
+
+- name: Verify cm_remove_package_again
+ assert:
+ that:
+ - cm_remove_package_again is not changed
+
+- name: Remove package again (normal mode)
+ pkgutil:
+ name: CSWtop
+ state: absent
+ register: nm_remove_package_again
+
+- name: Verify nm_remove_package_again
+ assert:
+ that:
+ - nm_remove_package_again is not changed
+
+
+# RESTORE ENVIRONMENT
+- name: Reinstall CSWtop
+ pkgutil:
+ name: CSWtop
+ state: present
+ when: originally_installed is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/proxmox/aliases b/ansible_collections/community/general/tests/integration/targets/proxmox/aliases
new file mode 100644
index 000000000..5e5957a5c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/proxmox/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
+proxmox_domain_info
+proxmox_group_info
+proxmox_user_info
+proxmox_storage_info
diff --git a/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml
new file mode 100644
index 000000000..22d7fcd29
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/proxmox/tasks/main.yml
@@ -0,0 +1,579 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2020, Tristan Le Guern <tleguern at bouledef.eu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: List domains
+ proxmox_domain_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_domains is defined
+
+- name: Retrieve info about pve
+ proxmox_domain_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ domain: pve
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_domains is defined
+ - results.proxmox_domains|length == 1
+ - results.proxmox_domains[0].type == 'pve'
+
+- name: List groups
+ proxmox_group_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_groups is defined
+
+- name: List users
+ proxmox_user_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ register: results
+
+- assert:
+ that:
+ - results is not changed
+ - results.proxmox_users is defined
+
+- name: Retrieve info about api_user using name and domain
+ proxmox_user_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ user: "{{ user }}"
+ domain: "{{ domain }}"
+ register: results_user_domain
+
+- assert:
+ that:
+ - results_user_domain is not changed
+ - results_user_domain.proxmox_users is defined
+ - results_user_domain.proxmox_users|length == 1
+ - results_user_domain.proxmox_users[0].domain == "{{ domain }}"
+ - results_user_domain.proxmox_users[0].user == "{{ user }}"
+ - results_user_domain.proxmox_users[0].userid == "{{ user }}@{{ domain }}"
+
+- name: Retrieve info about api_user using userid
+ proxmox_user_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ userid: "{{ user }}@{{ domain }}"
+ register: results_userid
+
+- assert:
+ that:
+ - results_userid is not changed
+ - results_userid.proxmox_users is defined
+ - results_userid.proxmox_users|length == 1
+ - results_userid.proxmox_users[0].domain == "{{ domain }}"
+ - results_userid.proxmox_users[0].user == "{{ user }}"
+ - results_userid.proxmox_users[0].userid == "{{ user }}@{{ domain }}"
+
+- name: Retrieve info about storage
+ proxmox_storage_info:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ storage: "{{ storage }}"
+ register: results_storage
+
+- assert:
+ that:
+ - results_storage is not changed
+ - results_storage.proxmox_storages is defined
+ - results_storage.proxmox_storages|length == 1
+ - results_storage.proxmox_storages[0].storage == "{{ storage }}"
+
+- name: VM creation
+ tags: [ 'create' ]
+ block:
+ - name: Create test vm test-instance
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ node: "{{ node }}"
+ storage: "{{ storage }}"
+ vmid: "{{ from_vmid }}"
+ name: test-instance
+ clone: 'yes'
+ state: present
+ timeout: 500
+ register: results_kvm
+
+ - set_fact:
+ vmid: "{{ results_kvm.msg.split(' ')[-7] }}"
+
+ - assert:
+ that:
+ - results_kvm is changed
+ - results_kvm.vmid == from_vmid
+ - results_kvm.msg == "VM test-instance with newid {{ vmid }} cloned from vm with vmid {{ from_vmid }}"
+
+ - pause:
+ seconds: 30
+
+- name: VM start
+ tags: [ 'start' ]
+ block:
+ - name: Start test VM
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ node: "{{ node }}"
+ vmid: "{{ vmid }}"
+ state: started
+ register: results_action_start
+
+ - assert:
+ that:
+ - results_action_start is changed
+ - results_action_start.status == 'stopped'
+ - results_action_start.vmid == {{ vmid }}
+ - results_action_start.msg == "VM {{ vmid }} started"
+
+ - pause:
+ seconds: 90
+
+ - name: Try to start test VM again
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ node: "{{ node }}"
+ vmid: "{{ vmid }}"
+ state: started
+ register: results_action_start_again
+
+ - assert:
+ that:
+ - results_action_start_again is not changed
+ - results_action_start_again.status == 'running'
+ - results_action_start_again.vmid == {{ vmid }}
+ - results_action_start_again.msg == "VM {{ vmid }} is already running"
+
+ - name: Check current status
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ node: "{{ node }}"
+ vmid: "{{ vmid }}"
+ state: current
+ register: results_action_current
+
+ - assert:
+ that:
+ - results_action_current is not changed
+ - results_action_current.status == 'running'
+ - results_action_current.vmid == {{ vmid }}
+ - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is running"
+
+- name: VM add/change/delete NIC
+ tags: [ 'nic' ]
+ block:
+ - name: Add NIC to test VM
+ proxmox_nic:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ vmid: "{{ vmid }}"
+ state: present
+ interface: net5
+ bridge: vmbr0
+ tag: 42
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}"
+
+ - name: Update NIC no changes
+ proxmox_nic:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ vmid: "{{ vmid }}"
+ state: present
+ interface: net5
+ bridge: vmbr0
+ tag: 42
+ register: results
+
+ - assert:
+ that:
+ - results is not changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Nic net5 unchanged on VM with vmid {{ vmid }}"
+
+ - name: Update NIC with changes
+ proxmox_nic:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ vmid: "{{ vmid }}"
+ state: present
+ interface: net5
+ bridge: vmbr0
+ tag: 24
+ firewall: true
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Nic net5 updated on VM with vmid {{ vmid }}"
+
+ - name: Delete NIC
+ proxmox_nic:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ vmid: "{{ vmid }}"
+ state: absent
+ interface: net5
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Nic net5 deleted on VM with vmid {{ vmid }}"
+
+- name: Create new disk in VM
+ tags: ['create_disk']
+ block:
+ - name: Add new disk (without force) to VM
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ storage: "{{ storage }}"
+ size: 1
+ state: present
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} created in VM {{ vmid }}"
+
+ - name: Try add disk again with same options (expect no-op)
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ storage: "{{ storage }}"
+ size: 1
+ state: present
+ register: results
+
+ - assert:
+ that:
+ - results is not changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} is up to date in VM {{ vmid }}"
+
+ - name: Add new disk replacing existing disk (detach old and leave unused)
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ storage: "{{ storage }}"
+ size: 2
+ create: forced
+ state: present
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} created in VM {{ vmid }}"
+
+- name: Update existing disk in VM
+ tags: ['update_disk']
+ block:
+ - name: Update disk configuration
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ backup: false
+ ro: true
+ aio: native
+ state: present
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} updated in VM {{ vmid }}"
+
+- name: Grow existing disk in VM
+ tags: ['grow_disk']
+ block:
+ - name: Increase disk size
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ size: +1G
+ state: resized
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} resized in VM {{ vmid }}"
+
+- name: Detach disk and leave it unused
+ tags: ['detach_disk']
+ block:
+ - name: Detach disk
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ state: detached
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} detached from VM {{ vmid }}"
+
+- name: Move disk to another storage or another VM
+ tags: ['move_disk']
+ block:
+ - name: Move disk to another storage inside same VM
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ target_storage: "{{ target_storage }}"
+ format: "{{ target_format }}"
+ state: moved
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}"
+
+ - name: Move disk to another VM (same storage)
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ vmid }}"
+ disk: "{{ disk }}"
+ target_vmid: "{{ target_vm }}"
+ target_disk: "{{ target_disk }}"
+ state: moved
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ vmid }}
+ - results.msg == "Disk {{ disk }} moved from VM {{ vmid }} storage {{ results.storage }}"
+
+
+- name: Remove disk permanently
+ tags: ['remove_disk']
+ block:
+ - name: Remove disk
+ proxmox_disk:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ vmid: "{{ target_vm }}"
+ disk: "{{ target_disk }}"
+ state: absent
+ register: results
+
+ - assert:
+ that:
+ - results is changed
+ - results.vmid == {{ target_vm }}
+ - results.msg == "Disk {{ target_disk }} removed from VM {{ target_vm }}"
+
+- name: VM stop
+ tags: [ 'stop' ]
+ block:
+ - name: Stop test VM
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ node: "{{ node }}"
+ vmid: "{{ vmid }}"
+ state: stopped
+ register: results_action_stop
+
+ - assert:
+ that:
+ - results_action_stop is changed
+ - results_action_stop.status == 'running'
+ - results_action_stop.vmid == {{ vmid }}
+ - results_action_stop.msg == "VM {{ vmid }} is shutting down"
+
+ - pause:
+ seconds: 5
+
+ - name: Check current status again
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ node: "{{ node }}"
+ vmid: "{{ vmid }}"
+ state: current
+ register: results_action_current
+
+ - assert:
+ that:
+ - results_action_current is not changed
+ - results_action_current.status == 'stopped'
+ - results_action_current.vmid == {{ vmid }}
+ - results_action_current.msg == "VM test-instance with vmid = {{ vmid }} is stopped"
+
+- name: VM destroy
+ tags: [ 'destroy' ]
+ block:
+ - name: Destroy test VM
+ proxmox_kvm:
+ api_host: "{{ api_host }}"
+ api_user: "{{ user }}@{{ domain }}"
+ api_password: "{{ api_password | default(omit) }}"
+ api_token_id: "{{ api_token_id | default(omit) }}"
+ api_token_secret: "{{ api_token_secret | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ proxmox_default_behavior: "no_defaults"
+ node: "{{ node }}"
+ vmid: "{{ vmid }}"
+ state: absent
+ register: results_kvm_destroy
+
+ - assert:
+ that:
+ - results_kvm_destroy is changed
+ - results_kvm_destroy.vmid == {{ vmid }}
+ - results_kvm_destroy.msg == "VM {{ vmid }} removed"
diff --git a/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases b/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/python_requirements_info/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml
new file mode 100644
index 000000000..24a7d1366
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/python_requirements_info/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: run python_requirements_info module
+ python_requirements_info:
+ register: basic_info
+
+- name: ensure python_requirements_info returns desired info
+ assert:
+ that:
+ - "'python' in basic_info"
+ - "'python_version' in basic_info"
+ - basic_info.python_version_info == ansible_python.version
+
+- name: run python_requirements_info module
+ python_requirements_info:
+ dependencies:
+ - notreal<1
+ - pip>1
+ register: dep_info
+
+- name: ensure python_requirements_info returns desired info
+ assert:
+ that:
+ - "'installed' in dep_info.valid.pip"
+ - "'notreal' in dep_info.not_found"
+
+- name: wrong specs
+ python_requirements_info:
+ dependencies:
+ - ansible<
+ register: wrong_spec1
+ ignore_errors: true
+
+- name: ensure wrong specs return error
+ assert:
+ that:
+ - wrong_spec1 is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/read_csv/aliases b/ansible_collections/community/general/tests/integration/targets/read_csv/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/read_csv/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/read_csv/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/read_csv/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/read_csv/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml
new file mode 100644
index 000000000..c09349dd5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/read_csv/tasks/main.yml
@@ -0,0 +1,176 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create basic CSV file
+- name: Create unique CSV file
+ copy:
+ content: &users_content |
+ name,uid,gid,gecos
+ dag,500,500,Dag Wieërs
+ jeroen,501,500,Jeroen Hoekx
+ dest: "{{ remote_tmp_dir }}/users_unique.csv"
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_unique.csv"
+ key: name
+ register: users_unique
+
+- assert:
+ that:
+ - users_unique.dict.dag.name == 'dag'
+ - users_unique.dict.dag.gecos == 'Dag Wieërs'
+ - users_unique.dict.dag.uid == '500'
+ - users_unique.dict.dag.gid == '500'
+ - users_unique.dict.jeroen.name == 'jeroen'
+ - users_unique.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_unique.dict.jeroen.uid == '501'
+ - users_unique.dict.jeroen.gid == '500'
+
+# Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_unique.csv"
+ register: users_unique
+
+- assert:
+ that:
+ - users_unique.list.0.name == 'dag'
+ - users_unique.list.0.gecos == 'Dag Wieërs'
+ - users_unique.list.0.uid == '500'
+ - users_unique.list.0.gid == '500'
+ - users_unique.list.1.name == 'jeroen'
+ - users_unique.list.1.gecos == 'Jeroen Hoekx'
+ - users_unique.list.1.uid == '501'
+ - users_unique.list.1.gid == '500'
+
+
+# Create basic CSV file using semi-colon
+- name: Create non-unique CSV file using semi-colon
+ copy:
+ content: |
+ name;uid;gid;gecos
+ dag;500;500;Dag Wieërs
+ jeroen;501;500;Jeroen Hoekx
+ dag;502;500;Dag Wieers
+ dest: "{{ remote_tmp_dir }}/users_nonunique.csv"
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_nonunique.csv"
+ key: name
+ unique: false
+ delimiter: ';'
+ register: users_nonunique
+
+- assert:
+ that:
+ - users_nonunique.dict.dag.name == 'dag'
+ - users_nonunique.dict.dag.gecos == 'Dag Wieers'
+ - users_nonunique.dict.dag.uid == '502'
+ - users_nonunique.dict.dag.gid == '500'
+ - users_nonunique.dict.jeroen.name == 'jeroen'
+ - users_nonunique.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_nonunique.dict.jeroen.uid == '501'
+ - users_nonunique.dict.jeroen.gid == '500'
+
+
+# Read a CSV file using an non-existing dialect
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_nonunique.csv"
+ dialect: placebo
+ register: users_placebo
+ ignore_errors: true
+
+- assert:
+ that:
+ - users_placebo is failed
+ - users_placebo.msg == "Dialect 'placebo' is not supported by your version of python."
+
+
+# Create basic CSV file without header
+- name: Create unique CSV file without header
+ copy:
+ content: |
+ dag,500,500,Dag Wieërs
+ jeroen,501,500,Jeroen Hoekx
+ dest: "{{ remote_tmp_dir }}/users_noheader.csv"
+
+# Read a CSV file and access user 'dag'
+- name: Read users from CSV file and return a dictionary
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_noheader.csv"
+ key: name
+ fieldnames: name,uid,gid,gecos
+ register: users_noheader
+
+- assert:
+ that:
+ - users_noheader.dict.dag.name == 'dag'
+ - users_noheader.dict.dag.gecos == 'Dag Wieërs'
+ - users_noheader.dict.dag.uid == '500'
+ - users_noheader.dict.dag.gid == '500'
+ - users_noheader.dict.jeroen.name == 'jeroen'
+ - users_noheader.dict.jeroen.gecos == 'Jeroen Hoekx'
+ - users_noheader.dict.jeroen.uid == '501'
+ - users_noheader.dict.jeroen.gid == '500'
+
+
+# Create broken file
+- name: Create unique CSV file
+ copy:
+ content: |
+ name,uid,gid,gecos
+ dag,500,500,Dag Wieërs
+ jeroen,501,500,"Jeroen"Hoekx"
+ dest: "{{ remote_tmp_dir }}/users_broken.csv"
+
+# Read a broken CSV file using strict
+- name: Read users from a broken CSV file
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_broken.csv"
+ key: name
+ strict: true
+ register: users_broken
+ ignore_errors: true
+
+- assert:
+ that:
+ - users_broken is failed
+ - "'Unable to process file' in users_broken.msg"
+
+# Create basic CSV file with BOM
+- name: Create unique CSV file with BOM
+ copy:
+ content: "{{ bom + content }}"
+ dest: "{{ remote_tmp_dir }}/users_bom.csv"
+ vars:
+ content: *users_content
+ bom: "{{ '\ufeff' }}"
+
+ # Read a CSV file and access the first item
+- name: Read users from CSV file and return a list
+ read_csv:
+ path: "{{ remote_tmp_dir }}/users_bom.csv"
+ register: users_bom
+
+- assert:
+ that:
+ - users_bom.list.0.name == 'dag'
+ - users_bom.list.0.gecos == 'Dag Wieërs'
+ - users_bom.list.0.uid == '500'
+ - users_bom.list.0.gid == '500'
+ - users_bom.list.1.name == 'jeroen'
+ - users_bom.list.1.gecos == 'Jeroen Hoekx'
+ - users_bom.list.1.uid == '501'
+ - users_bom.list.1.gid == '500'
diff --git a/ansible_collections/community/general/tests/integration/targets/redis_info/aliases b/ansible_collections/community/general/tests/integration/targets/redis_info/aliases
new file mode 100644
index 000000000..1f1c4baf7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/redis_info/aliases
@@ -0,0 +1,10 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml
new file mode 100644
index 000000000..56e9c4386
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/redis_info/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+redis_password: PASS
+master_port: 6379
+replica_port: 6380
diff --git a/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml
new file mode 100644
index 000000000..cd516fd23
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/redis_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+- setup_redis_replication
diff --git a/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml
new file mode 100644
index 000000000..4a11de365
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/redis_info/tasks/main.yml
@@ -0,0 +1,48 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: redis_info - connect to master with default host/port
+ community.general.redis_info:
+ login_password: "{{ redis_password }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == master_port
+ - result.info.role == 'master'
+
+- name: redis_info - connect to master (check)
+ community.general.redis_info:
+ login_host: 127.0.0.1
+ login_port: "{{ master_port }}"
+ login_password: "{{ redis_password }}"
+ check_mode: true
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == master_port
+ - result.info.role == 'master'
+
+- name: redis_info - connect to replica
+ community.general.redis_info:
+ login_port: "{{ replica_port }}"
+ login_password: "{{ redis_password }}"
+ register: result
+
+- assert:
+ that:
+ - result is not changed
+ - result.info is defined
+ - result.info.tcp_port == replica_port
+ - result.info.role == 'slave'
diff --git a/ansible_collections/community/general/tests/integration/targets/rundeck/aliases b/ansible_collections/community/general/tests/integration/targets/rundeck/aliases
new file mode 100644
index 000000000..3cf494c4c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/rundeck/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
+skip/windows
+skip/freebsd
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/rundeck/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/rundeck/defaults/main.yml
new file mode 100644
index 000000000..4d7ea3146
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/rundeck/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+rundeck_url: http://localhost:4440
+rundeck_api_version: 39
+rundeck_job_id: 3b8a6e54-69fb-42b7-b98f-f82e59238478
diff --git a/ansible_collections/community/general/tests/integration/targets/rundeck/files/test_job.yaml b/ansible_collections/community/general/tests/integration/targets/rundeck/files/test_job.yaml
new file mode 100644
index 000000000..baa852ecc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/rundeck/files/test_job.yaml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- defaultTab: nodes
+ description: ''
+ executionEnabled: true
+ id: 3b8a6e54-69fb-42b7-b98f-f82e59238478
+ loglevel: INFO
+ name: test_job
+ nodeFilterEditable: false
+ options:
+ - label: Exit Code
+ name: exit_code
+ value: '0'
+ - label: Sleep
+ name: sleep
+ value: '1'
+ plugins:
+ ExecutionLifecycle: null
+ scheduleEnabled: true
+ sequence:
+ commands:
+ - exec: sleep $RD_OPTION_SLEEP && echo "Test done!" && exit $RD_OPTION_EXIT_CODE
+ keepgoing: false
+ strategy: node-first
+ uuid: 3b8a6e54-69fb-42b7-b98f-f82e59238478
diff --git a/ansible_collections/community/general/tests/integration/targets/rundeck/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/rundeck/meta/main.yml
new file mode 100644
index 000000000..c125e4046
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/rundeck/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+- setup_rundeck
diff --git a/ansible_collections/community/general/tests/integration/targets/rundeck/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/rundeck/tasks/main.yml
new file mode 100644
index 000000000..e42780b9b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/rundeck/tasks/main.yml
@@ -0,0 +1,127 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Generate a Rundeck API Token
+ ansible.builtin.command: java -jar {{ rdeck_base }}/rundeck-cli.jar tokens create -u admin -d 24h -r admin
+ environment:
+ RD_URL: "{{ rundeck_url }}"
+ RD_USER: admin
+ RD_PASSWORD: admin
+ register: rundeck_api_token
+
+- name: Create a Rundeck project
+ community.general.rundeck_project:
+ name: "test_project"
+ api_version: "{{ rundeck_api_version }}"
+ url: "{{ rundeck_url }}"
+ token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ state: present
+
+- name: Copy test_job definition to /tmp
+ copy:
+ src: test_job.yaml
+ dest: /tmp/test_job.yaml
+
+- name: Create Rundeck job Test
+ ansible.builtin.command: java -jar {{ rdeck_base }}/rundeck-cli.jar jobs load -f /tmp/test_job.yaml -F yaml -p test_project
+ environment:
+ RD_URL: "{{ rundeck_url }}"
+ RD_USER: admin
+ RD_PASSWORD: admin
+
+- name: Wrong Rundeck API Token
+ community.general.rundeck_job_run:
+ url: "{{ rundeck_url }}"
+ api_version: "{{ rundeck_api_version }}"
+ api_token: wrong_token
+ job_id: "{{ rundeck_job_id }}"
+ ignore_errors: true
+ register: rundeck_job_run_wrong_token
+
+- name: Assert that Rundeck authorization failed
+ ansible.builtin.assert:
+ that:
+ - rundeck_job_run_wrong_token.msg == "Token authorization failed"
+
+- name: Success run Rundeck job test_job
+ community.general.rundeck_job_run:
+ url: "{{ rundeck_url }}"
+ api_version: "{{ rundeck_api_version }}"
+ api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ job_id: "{{ rundeck_job_id }}"
+ register: rundeck_job_run_success
+
+- name: Assert that Rundeck job test_job runs successfully
+ ansible.builtin.assert:
+ that:
+ - rundeck_job_run_success.execution_info.status == "succeeded"
+
+- name: Fail run Rundeck job test_job
+ community.general.rundeck_job_run:
+ url: "{{ rundeck_url }}"
+ api_version: "{{ rundeck_api_version }}"
+ api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ job_id: "{{ rundeck_job_id }}"
+ job_options:
+ exit_code: "1"
+ ignore_errors: true
+ register: rundeck_job_run_fail
+
+- name: Assert that Rundeck job test_job failed
+ ansible.builtin.assert:
+ that:
+ - rundeck_job_run_fail.execution_info.status == "failed"
+
+- name: Abort run Rundeck job test_job due timeout
+ community.general.rundeck_job_run:
+ url: "{{ rundeck_url }}"
+ api_version: "{{ rundeck_api_version }}"
+ api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ job_id: "{{ rundeck_job_id }}"
+ job_options:
+ sleep: "5"
+ wait_execution_timeout: 2
+ abort_on_timeout: true
+ ignore_errors: true
+ register: rundeck_job_run_aborted
+
+- name: Assert that Rundeck job test_job is aborted
+ ansible.builtin.assert:
+ that:
+ - rundeck_job_run_aborted.execution_info.status == "aborted"
+
+- name: Fire-and-forget run Rundeck job test_job
+ community.general.rundeck_job_run:
+ url: "{{ rundeck_url }}"
+ api_version: "{{ rundeck_api_version }}"
+ api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ job_id: "{{ rundeck_job_id }}"
+ job_options:
+ sleep: "5"
+ wait_execution: false
+ register: rundeck_job_run_forget
+
+- name: Assert that Rundeck job test_job is running
+ ansible.builtin.assert:
+ that:
+ - rundeck_job_run_forget.execution_info.status == "running"
+
+- name: Get Rundeck job test_job executions info
+ community.general.rundeck_job_executions_info:
+ url: "{{ rundeck_url }}"
+ api_version: "{{ rundeck_api_version }}"
+ api_token: "{{ rundeck_api_token.stdout_lines[-1] }}"
+ job_id: "{{ rundeck_job_id }}"
+ register: rundeck_job_executions_info
+
+- name: Assert that Rundeck job executions info has 4 registers
+ ansible.builtin.assert:
+ that:
+ - rundeck_job_executions_info.paging.total | int == 4
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml
new file mode 100644
index 000000000..dfa104bdd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Below information has been taken from https://developer.scaleway.com/#servers
+scaleway_image_id: 6a601340-19c1-4ca7-9c1c-0704bcc9f5fe
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
+scaleway_commerial_type: START1-S
+scaleway_name: scaleway_compute_test
+first_server_name: scaleway_compute_test_first
+second_server_name: scaleway_compute_test_second
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml
new file mode 100644
index 000000000..094176ce8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/ip.yml
@@ -0,0 +1,206 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a server with no IP (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: server_creation_absent_check_task
+
+- debug: var=server_creation_absent_check_task
+
+- assert:
+ that:
+ - server_creation_absent_check_task is success
+ - server_creation_absent_check_task is changed
+
+- name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_absent_task
+
+- debug: var=server_creation_absent_task
+
+- assert:
+ that:
+ - server_creation_absent_task is success
+ - server_creation_absent_task is changed
+
+- name: Create a server (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_absent_confirmation_task
+
+- debug: var=server_creation_absent_confirmation_task
+
+- assert:
+ that:
+ - server_creation_absent_confirmation_task is success
+ - server_creation_absent_confirmation_task is not changed
+
+# Add a dynamic IP to the instance
+
+- name: Patch server tags (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: dynamic
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: ip_patching_check_task
+
+- debug: var=ip_patching_check_task
+
+- assert:
+ that:
+ - ip_patching_check_task is success
+ - ip_patching_check_task is changed
+
+- name: Patch server tags
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: dynamic
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ register: ip_patching_task
+
+- debug: var=ip_patching_task
+
+- assert:
+ that:
+ - ip_patching_task is success
+ - ip_patching_task is changed
+
+- name: Patch server tags (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: dynamic
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: ip_patching_confirmation_task
+
+- debug: var=ip_patching_confirmation_task
+
+- assert:
+ that:
+ - ip_patching_confirmation_task is success
+ - ip_patching_confirmation_task is not changed
+
+# Remove dynamic IP
+
+- name: Patch server tags (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: remove_ip_check_task
+
+- debug: var=remove_ip_check_task
+
+- assert:
+ that:
+ - remove_ip_check_task is success
+ - remove_ip_check_task is changed
+
+- name: Patch server tags
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: remove_ip_task
+
+- debug: var=remove_ip_task
+
+- assert:
+ that:
+ - remove_ip_task is success
+ - remove_ip_task is changed
+
+- name: Patch server tags (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ public_ip: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: remove_ip_confirmation_task
+
+- debug: var=remove_ip_confirmation_task
+
+- assert:
+ that:
+ - remove_ip_confirmation_task is success
+ - remove_ip_confirmation_task is not changed
+
+- name: Destroy it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
+
+- assert:
+ that:
+ - server_destroy_task is success
+ - server_destroy_task is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml
new file mode 100644
index 000000000..eca689b40
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: state.yml
+- include_tasks: ip.yml
+- include_tasks: security_group.yml
+- include_tasks: pagination.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml
new file mode 100644
index 000000000..5a674b801
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/pagination.yml
@@ -0,0 +1,76 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a first server
+ scaleway_compute:
+ name: '{{ first_server_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+- name: Create a second server
+ scaleway_compute:
+ name: '{{ second_server_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+- name: Get server informations of the first page
+ scaleway_server_info:
+ region: par1
+ query_parameters:
+ per_page: 1
+ page: 1
+ register: first_page
+
+- debug: var=first_page
+
+- assert:
+ that:
+ - first_page is success
+
+- name: Get server informations of the second page
+ scaleway_server_info:
+ region: par1
+ query_parameters:
+ per_page: 1
+ page: 2
+ register: second_page
+
+- debug: var=second_page
+
+- assert:
+ that:
+ - second_page is success
+
+- assert:
+ that:
+ - first_page.scaleway_server_info[0].id != second_page.scaleway_server_info[0].id
+
+- name: Delete first server
+ scaleway_compute:
+ name: '{{ first_server_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+- name: Delete second server
+ scaleway_compute:
+ name: '{{ second_server_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml
new file mode 100644
index 000000000..59f81e6af
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/security_group.yml
@@ -0,0 +1,152 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a scaleway security_group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group
+
+- debug: var=security_group
+
+- block:
+ - name: Create a server with security_group (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+
+ register: server_creation_check_task
+
+ - debug: var=server_creation_check_task
+
+ - assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
+
+ - name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
+
+ register: server_creation_task
+
+ - debug: var=server_creation_task
+
+ - assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
+
+ - name: Create a server with security_group (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+ - debug: var=server_creation_confirmation_task
+
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+ - name: Keep current security_group (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+ - debug: var=server_creation_confirmation_task
+
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+ - name: Keep current security_group
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+ - debug: var=server_creation_confirmation_task
+
+ - assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+ always:
+ - name: Destroy it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_destroy_task
+
+ - debug: var=server_destroy_task
+
+ - assert:
+ that:
+ - server_destroy_task is success
+ - server_destroy_task is changed
+
+ - name: Create a scaleway security_group
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml
new file mode 100644
index 000000000..b3f256762
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_compute/tasks/state.yml
@@ -0,0 +1,392 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a server (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+
+ register: server_creation_check_task
+
+- debug: var=server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
+
+- name: Create a server
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_task
+
+- debug: var=server_creation_task
+
+- assert:
+ that:
+ - server_creation_task is success
+ - server_creation_task is changed
+
+- name: Create a server (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_confirmation_task
+
+- debug: var=server_creation_confirmation_task
+
+- assert:
+ that:
+ - server_creation_confirmation_task is success
+ - server_creation_confirmation_task is not changed
+
+- name: Patch server tags (Check)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_patching_check_task
+
+- debug: var=server_patching_check_task
+
+- assert:
+ that:
+ - server_patching_check_task is success
+ - server_patching_check_task is changed
+
+- name: Patch server tags
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_patching_task
+
+- debug: var=server_patching_task
+
+- assert:
+ that:
+ - server_patching_task is success
+ - server_patching_task is changed
+
+- name: Patch server tags (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_patching_confirmation_task
+
+- debug: var=server_patching_confirmation_task
+
+- assert:
+ that:
+ - server_patching_confirmation_task is success
+ - server_patching_confirmation_task is not changed
+
+- name: Run it (Check mode)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: running
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_run_check_task
+
+- debug: var=server_run_check_task
+
+- assert:
+ that:
+ - server_run_check_task is success
+ - server_run_check_task is changed
+
+- name: Run it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: running
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_run_task
+
+- debug: var=server_run_task
+
+- assert:
+ that:
+ - server_run_task is success
+ - server_run_task is changed
+
+- name: Run it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: running
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_run_confirmation_task
+
+- debug: var=server_run_confirmation_task
+
+- assert:
+ that:
+ - server_run_confirmation_task is success
+ - server_run_confirmation_task is not changed
+
+- name: Reboot it (Check mode)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: restarted
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_reboot_check_task
+
+- debug: var=server_reboot_check_task
+
+- assert:
+ that:
+ - server_reboot_check_task is success
+ - server_reboot_check_task is changed
+
+- name: Reboot it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: restarted
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_reboot_task
+
+- debug: var=server_reboot_task
+
+- assert:
+ that:
+ - server_reboot_task is success
+ - server_reboot_task is changed
+
+- name: Stop it (Check mode)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: stopped
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_stop_check_task
+
+- debug: var=server_stop_check_task
+
+- assert:
+ that:
+ - server_stop_check_task is success
+ - server_stop_check_task is changed
+
+- name: Stop it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: stopped
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_stop_task
+
+- debug: var=server_stop_task
+
+- assert:
+ that:
+ - server_stop_task is success
+ - server_stop_task is changed
+
+- name: Stop it (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: stopped
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_stop_confirmation_task
+
+- debug: var=server_stop_confirmation_task
+
+- assert:
+ that:
+ - server_stop_confirmation_task is success
+ - server_stop_confirmation_task is not changed
+
+- name: Destroy it (Check mode)
+ check_mode: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ tags:
+ - test
+ - www
+ register: server_destroy_check_task
+
+- debug: var=server_destroy_check_task
+
+- assert:
+ that:
+ - server_destroy_check_task is success
+ - server_destroy_check_task is changed
+
+- name: Destroy it
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
+
+- assert:
+ that:
+ - server_destroy_task is success
+ - server_destroy_task is changed
+
+- name: Destroy it (Confirmation)
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ tags:
+ - test
+ - www
+ register: server_destroy_confirmation_task
+
+- debug: var=server_destroy_confirmation_task
+
+- assert:
+ that:
+ - server_destroy_confirmation_task is success
+ - server_destroy_confirmation_task is not changed
+
+- name: Testing for unauthorized organization
+ ignore_errors: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: this-organization-does-not-exists
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ register: unauthorized_organization_task
+
+- debug: var=unauthorized_organization_task
+
+- assert:
+ that:
+ - unauthorized_organization_task is not success
+ - unauthorized_organization_task is not changed
+
+- name: Testing for unexisting image
+ ignore_errors: true
+ scaleway_compute:
+ name: '{{ scaleway_name }}'
+ state: present
+ image: this-image-does-not-exists
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ register: unexisting_image_check
+
+- debug: var=unexisting_image_check
+
+- assert:
+ that:
+ - unexisting_image_check is not success
+ - unexisting_image_check is not changed
+ - unexisting_image_check.msg == "Error in getting image this-image-does-not-exists on https://cp-{{scaleway_region}}.scaleway.com"
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_container/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container/defaults/main.yml
new file mode 100644
index 000000000..01b8719fc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+container_namespace_name: cn-ansible-test
+name: cn-ansible-test
+description: Container used for testing scaleway_container ansible module
+updated_description: Container used for testing scaleway_container ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+updated_secret_environment_variables:
+ MY_SECRET_VAR: my_other_secret_value
+image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest
+port: 80
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container/tasks/main.yml
new file mode 100644
index 000000000..d0bf4206f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container/tasks/main.yml
@@ -0,0 +1,290 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create container_namespace
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ container_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: integration_container_namespace
+
+- name: Create a container (Check)
+ check_mode: true
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_creation_check_task
+
+- ansible.builtin.debug:
+ var: cn_creation_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_creation_check_task is success
+ - cn_creation_check_task is changed
+
+- name: Create container
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_creation_task
+
+- ansible.builtin.debug:
+ var: cn_creation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_creation_task is success
+ - cn_creation_task is changed
+ - cn_creation_task.container.status in ["created", "ready"]
+
+- name: Create container (Confirmation)
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_creation_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_creation_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_creation_confirmation_task is success
+ - cn_creation_confirmation_task is not changed
+ - cn_creation_confirmation_task.container.status in ["created", "ready"]
+
+- name: Update container (Check)
+ check_mode: true
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_update_check_task
+
+- ansible.builtin.debug:
+ var: cn_update_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_check_task is success
+ - cn_update_check_task is changed
+
+- name: Update container
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_update_task
+
+- ansible.builtin.debug:
+ var: cn_update_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_task is success
+ - cn_update_task is changed
+ - cn_update_task.container.status in ["created", "ready"]
+
+- name: Update container (Confirmation)
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_update_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_update_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_confirmation_task is success
+ - cn_update_confirmation_task is not changed
+ - cn_update_confirmation_task.container.status in ["created", "ready"]
+
+- name: Update container secret variables (Check)
+ check_mode: true
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_update_secret_check_task
+
+- ansible.builtin.debug:
+ var: cn_update_secret_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_secret_check_task is success
+ - cn_update_secret_check_task is changed
+
+- name: Update container secret variables
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_update_secret_task
+
+- ansible.builtin.debug:
+ var: cn_update_secret_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_secret_task is success
+ - cn_update_secret_task is changed
+ - cn_update_secret_task.container.status in ["created", "ready"]
+ - "'hashed_value' in cn_update_secret_task.container.secret_environment_variables[0]"
+
+- name: Update container secret variables (Confirmation)
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ port: '{{ port }}'
+ register: cn_update_secret_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_update_secret_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_secret_confirmation_task is success
+ - cn_update_secret_confirmation_task is not changed
+ - cn_update_secret_confirmation_task.container.status == "ready"
+ - "'hashed_value' in cn_update_secret_confirmation_task.container.secret_environment_variables[0]"
+
+- name: Delete container (Check)
+ check_mode: true
+ community.general.scaleway_container:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ register: cn_deletion_check_task
+
+- ansible.builtin.debug:
+ var: cn_deletion_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_deletion_check_task is success
+ - cn_deletion_check_task is changed
+
+- name: Delete container
+ community.general.scaleway_container:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ register: cn_deletion_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_deletion_task is success
+ - cn_deletion_task is changed
+
+- name: Delete container (Confirmation)
+ community.general.scaleway_container:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ register: cn_deletion_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_deletion_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_deletion_confirmation_task is success
+ - cn_deletion_confirmation_task is not changed
+
+- name: Delete container namespace
+ community.general.scaleway_container_namespace:
+ state: absent
+ name: '{{ container_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/defaults/main.yml
new file mode 100644
index 000000000..f3dadf71a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/defaults/main.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+container_namespace_name: cn-ansible-test
+name: cn-ansible-test
+description: Container used for testing scaleway_container_info ansible module
+updated_description: Container used for testing scaleway_container_info ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+image: rg.fr-par.scw.cloud/namespace-ansible-ci/nginx:latest
+port: 80
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/tasks/main.yml
new file mode 100644
index 000000000..9f9fe401c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_info/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create container_namespace
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ container_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: integration_container_namespace
+
+- name: Create container
+ community.general.scaleway_container:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ port: '{{ port }}'
+
+- name: Get container info
+ community.general.scaleway_container_info:
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ register: cn_info_task
+
+- ansible.builtin.debug:
+ var: cn_info_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_info_task is success
+ - cn_info_task is not changed
+
+- name: Delete container
+ community.general.scaleway_container:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_container_namespace.container_namespace.id }}'
+ registry_image: '{{ image }}'
+
+- name: Delete container namespace
+ community.general.scaleway_container_namespace:
+ state: absent
+ name: '{{ container_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/defaults/main.yml
new file mode 100644
index 000000000..876f8b7a6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: cn-ansible-test
+description: Container namespace used for testing scaleway_container_namespace ansible module
+updated_description: Container namespace used for testing scaleway_container_namespace ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+updated_secret_environment_variables:
+ MY_SECRET_VAR: my_other_secret_value \ No newline at end of file
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/tasks/main.yml
new file mode 100644
index 000000000..73e43ff15
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace/tasks/main.yml
@@ -0,0 +1,255 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a container namespace (Check)
+ check_mode: true
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: cn_creation_check_task
+
+- ansible.builtin.debug:
+ var: cn_creation_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_creation_check_task is success
+ - cn_creation_check_task is changed
+
+- name: Create container_namespace
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: cn_creation_task
+
+- ansible.builtin.debug:
+ var: cn_creation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_creation_task is success
+ - cn_creation_task is changed
+ - cn_creation_task.container_namespace.status == "ready"
+
+- name: Create container namespace (Confirmation)
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: cn_creation_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_creation_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_creation_confirmation_task is success
+ - cn_creation_confirmation_task is not changed
+ - cn_creation_confirmation_task.container_namespace.status == "ready"
+
+- name: Update container namespace (Check)
+ check_mode: true
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: cn_update_check_task
+
+- ansible.builtin.debug:
+ var: cn_update_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_check_task is success
+ - cn_update_check_task is changed
+
+- name: Update container namespace
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: cn_update_task
+
+- ansible.builtin.debug:
+ var: cn_update_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_task is success
+ - cn_update_task is changed
+ - cn_update_task.container_namespace.status == "ready"
+
+- name: Update container namespace (Confirmation)
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: cn_update_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_update_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_confirmation_task is success
+ - cn_update_confirmation_task is not changed
+ - cn_update_confirmation_task.container_namespace.status == "ready"
+
+- name: Update container namespace secret variables (Check)
+ check_mode: true
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: cn_update_secret_check_task
+
+- ansible.builtin.debug:
+ var: cn_update_secret_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_secret_check_task is success
+ - cn_update_secret_check_task is changed
+
+- name: Update container namespace secret variables
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: cn_update_secret_task
+
+- ansible.builtin.debug:
+ var: cn_update_secret_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_secret_task is success
+ - cn_update_secret_task is changed
+ - cn_update_secret_task.container_namespace.status == "ready"
+ - "'hashed_value' in cn_update_secret_task.container_namespace.secret_environment_variables[0]"
+
+- name: Update container namespace secret variables (Confirmation)
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: cn_update_secret_congfirmation_task
+
+- ansible.builtin.debug:
+ var: cn_update_secret_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_update_secret_confirmation_task is success
+ - cn_update_secret_confirmation_task is not changed
+ - cn_update_secret_confirmation_task.container_namespace.status == "ready"
+ - "'hashed_value' in cn_update_secret_confirmation_task.container_namespace.secret_environment_variables[0]"
+
+- name: Delete container namespace (Check)
+ check_mode: true
+ community.general.scaleway_container_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: cn_deletion_check_task
+
+- ansible.builtin.debug:
+ var: cn_deletion_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_deletion_check_task is success
+ - cn_deletion_check_task is changed
+
+- name: Delete container namespace
+ community.general.scaleway_container_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: cn_deletion_task
+
+- ansible.builtin.debug:
+ var: cn_deletion_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_deletion_task is success
+ - cn_deletion_task is changed
+
+- name: Delete container namespace (Confirmation)
+ community.general.scaleway_container_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: cn_deletion_confirmation_task
+
+- ansible.builtin.debug:
+ var: cn_deletion_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_deletion_confirmation_task is success
+ - cn_deletion_confirmation_task is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/defaults/main.yml
new file mode 100644
index 000000000..238f79fe5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: cn-ansible-test
+description: Container namespace used for testing scaleway_container_namespace_info ansible module
+updated_description: Container namespace used for testing scaleway_container_namespace_info ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/tasks/main.yml
new file mode 100644
index 000000000..17ac07e81
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_namespace_info/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create container_namespace
+ community.general.scaleway_container_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+
+- name: Get container namespace info
+ community.general.scaleway_container_namespace_info:
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ register: cn_info_task
+
+- ansible.builtin.debug:
+ var: cn_info_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cn_info_task is success
+ - cn_info_task is not changed
+
+- name: Delete container namespace
+ community.general.scaleway_container_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/defaults/main.yml
new file mode 100644
index 000000000..73b31423f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: cr_ansible_test
+description: Container registry used for testing scaleway_container_registry ansible module
+updated_description: Container registry used for testing scaleway_container_registry ansible module (Updated description)
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/tasks/main.yml
new file mode 100644
index 000000000..91cea20f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry/tasks/main.yml
@@ -0,0 +1,178 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a container registry (Check)
+ check_mode: true
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: cr_creation_check_task
+
+- ansible.builtin.debug:
+ var: cr_creation_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_creation_check_task is success
+ - cr_creation_check_task is changed
+
+- name: Create container_registry
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: cr_creation_task
+
+- ansible.builtin.debug:
+ var: cr_creation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_creation_task is success
+ - cr_creation_task is changed
+ - cr_creation_task.container_registry.status == "ready"
+
+- name: Create container registry (Confirmation)
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: cr_creation_confirmation_task
+
+- ansible.builtin.debug:
+ var: cr_creation_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_creation_confirmation_task is success
+ - cr_creation_confirmation_task is not changed
+ - cr_creation_confirmation_task.container_registry.status == "ready"
+
+- name: Update container registry (Check)
+ check_mode: true
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ register: cr_update_check_task
+
+- ansible.builtin.debug:
+ var: cr_update_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_update_check_task is success
+ - cr_update_check_task is changed
+
+- name: Update container registry
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ register: cr_update_task
+
+- ansible.builtin.debug:
+ var: cr_update_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_update_task is success
+ - cr_update_task is changed
+ - cr_update_task.container_registry.status == "ready"
+
+- name: Update container registry (Confirmation)
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ register: cr_update_confirmation_task
+
+- ansible.builtin.debug:
+ var: cr_update_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_update_confirmation_task is success
+ - cr_update_confirmation_task is not changed
+ - cr_update_confirmation_task.container_registry.status == "ready"
+
+- name: Delete container registry (Check)
+ check_mode: true
+ community.general.scaleway_container_registry:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: cr_deletion_check_task
+
+- ansible.builtin.debug:
+ var: cr_deletion_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_deletion_check_task is success
+ - cr_deletion_check_task is changed
+
+- name: Delete container registry
+ community.general.scaleway_container_registry:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: cr_deletion_task
+
+- ansible.builtin.debug:
+ var: cr_deletion_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_deletion_task is success
+ - cr_deletion_task is changed
+
+- name: Delete container regitry (Confirmation)
+ community.general.scaleway_container_registry:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: cr_deletion_confirmation_task
+
+- ansible.builtin.debug:
+ var: cr_deletion_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_deletion_confirmation_task is success
+ - cr_deletion_confirmation_task is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/defaults/main.yml
new file mode 100644
index 000000000..8c53a31e7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: cr_ansible_test
+description: Container registry used for testing scaleway_container_registry_info ansible module
+updated_description: Container registry used for testing scaleway_container_registry_info ansible module (Updated description)
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/tasks/main.yml
new file mode 100644
index 000000000..7de4d245a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_container_registry_info/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create container_registry
+ community.general.scaleway_container_registry:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+
+- name: Get container registry info
+ community.general.scaleway_container_registry_info:
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ register: cr_info_task
+
+- ansible.builtin.debug:
+ var: cr_info_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - cr_info_task is success
+ - cr_info_task is not changed
+
+- name: Delete container registry
+ community.general.scaleway_container_registry:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml
new file mode 100644
index 000000000..1a5306ecf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_name: scaleway_database_backup_test
+scaleway_region: fr-par
+scaleway_database_name: scaleway_database_test
+scaleway_instance_id:
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml
new file mode 100644
index 000000000..fdef03fb3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_database_backup/tasks/main.yml
@@ -0,0 +1,238 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a backup (Check)
+ check_mode: true
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+
+ register: backup_creation_check_task
+
+- debug: var=backup_creation_check_task
+
+- assert:
+ that:
+ - backup_creation_check_task is success
+ - backup_creation_check_task is changed
+
+- name: Create a backup
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ wait: true
+
+ register: backup_creation_task
+
+- debug: var=backup_creation_task
+
+- assert:
+ that:
+ - backup_creation_task is success
+ - backup_creation_task is changed
+
+- name: Create a backup (Confirmation)
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+
+ register: backup_creation_confirmation_task
+
+- debug: var=backup_creation_confirmation_task
+
+- assert:
+ that:
+ - backup_creation_confirmation_task is success
+ - backup_creation_confirmation_task is not changed
+
+- name: Patch backup name (Check)
+ check_mode: true
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}-changed'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+ register: backup_patching_check_task
+
+- debug: var=backup_patching_check_task
+
+- assert:
+ that:
+ - backup_patching_check_task is success
+ - backup_patching_check_task is changed
+
+- name: Patch backup name
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}-changed'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+ register: backup_patching_task
+
+- debug: var=backup_patching_task
+
+- assert:
+ that:
+ - backup_patching_task is success
+ - backup_patching_task is changed
+
+- name: Patch backup name (Confirmation)
+ scaleway_database_backup:
+ name: '{{ scaleway_name }}-changed'
+ state: present
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ id: '{{ backup_creation_task.metadata.id }}'
+ register: backup_patching_confirmation_task
+
+- debug: var=backup_patching_confirmation_task
+
+- assert:
+ that:
+ - backup_patching_confirmation_task is success
+ - backup_patching_confirmation_task is not changed
+
+- name: Export backup (Check)
+ check_mode: true
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: exported
+ region: '{{ scaleway_region }}'
+ register: backup_export_check_task
+
+- debug: var=backup_export_check_task
+
+- assert:
+ that:
+ - backup_export_check_task is success
+ - backup_export_check_task is changed
+
+- name: Export backup
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: exported
+ region: '{{ scaleway_region }}'
+ wait: true
+ register: backup_export_task
+
+- debug: var=backup_export_task
+
+- assert:
+ that:
+ - backup_export_task is success
+ - backup_export_task is changed
+ - backup_export_task.metadata.download_url != ""
+
+- name: Export backup (Confirmation)
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: exported
+ region: '{{ scaleway_region }}'
+ register: backup_export_confirmation_task
+
+- debug: var=backup_export_confirmation_task
+
+- assert:
+ that:
+ - backup_export_confirmation_task is success
+ - backup_export_confirmation_task is not changed
+ - backup_export_confirmation_task.metadata.download_url != ""
+
+- name: Restore backup (Check)
+ check_mode: true
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: restored
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ register: backup_restore_check_task
+
+- debug: var=backup_restore_check_task
+
+- assert:
+ that:
+ - backup_restore_check_task is success
+ - backup_restore_check_task is changed
+
+- name: Restore backup
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: restored
+ region: '{{ scaleway_region }}'
+ database_name: '{{ scaleway_database_name }}'
+ instance_id: '{{ scaleway_instance_id }}'
+ wait: true
+ register: backup_restore_task
+
+- debug: var=backup_restore_task
+
+- assert:
+ that:
+ - backup_restore_task is success
+ - backup_restore_task is changed
+
+- name: Delete backup (Check)
+ check_mode: true
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: absent
+ region: '{{ scaleway_region }}'
+ register: backup_delete_check_task
+
+- debug: var=backup_delete_check_task
+
+- assert:
+ that:
+ - backup_delete_check_task is success
+ - backup_delete_check_task is changed
+
+- name: Delete backup
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: absent
+ region: '{{ scaleway_region }}'
+ register: backup_delete_task
+
+- debug: var=backup_delete_task
+
+- assert:
+ that:
+ - backup_delete_task is success
+ - backup_delete_task is changed
+
+- name: Delete backup (Confirmation)
+ scaleway_database_backup:
+ id: '{{ backup_creation_task.metadata.id }}'
+ state: absent
+ region: '{{ scaleway_region }}'
+ register: backup_delete_confirmation_task
+
+- debug: var=backup_delete_confirmation_task
+
+- assert:
+ that:
+ - backup_delete_confirmation_task is success
+ - backup_delete_confirmation_task is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_function/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function/defaults/main.yml
new file mode 100644
index 000000000..df02a4665
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+function_namespace_name: fn-ansible-test
+name: fn-ansible-test
+description: Function used for testing scaleway_function_infoansible module
+updated_description: Function used for testing scaleway_function_info ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+updated_secret_environment_variables:
+ MY_SECRET_VAR: my_other_secret_value
+runtime: python310
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function/tasks/main.yml
new file mode 100644
index 000000000..d4552d0b3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function/tasks/main.yml
@@ -0,0 +1,284 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create function_namespace
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ function_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: integration_function_namespace
+
+- name: Create a function (Check)
+ check_mode: true
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_creation_check_task
+
+- ansible.builtin.debug:
+ var: fn_creation_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_creation_check_task is success
+ - fn_creation_check_task is changed
+
+- name: Create function
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_creation_task
+
+- ansible.builtin.debug:
+ var: fn_creation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_creation_task is success
+ - fn_creation_task is changed
+ - fn_creation_task.function.status in ["created", "ready"]
+
+- name: Create function (Confirmation)
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_creation_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_creation_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_creation_confirmation_task is success
+ - fn_creation_confirmation_task is not changed
+ - fn_creation_confirmation_task.function.status in ["created", "ready"]
+
+- name: Update function (Check)
+ check_mode: true
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_update_check_task
+
+- ansible.builtin.debug:
+ var: fn_update_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_check_task is success
+ - fn_update_check_task is changed
+
+- name: Update function
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_update_task
+
+- ansible.builtin.debug:
+ var: fn_update_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_task is success
+ - fn_update_task is changed
+ - fn_update_task.function.status in ["created", "ready"]
+
+- name: Update function (Confirmation)
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_update_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_update_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_confirmation_task is success
+ - fn_update_confirmation_task is not changed
+ - fn_update_confirmation_task.function.status in ["created", "ready"]
+
+- name: Update function secret variables (Check)
+ check_mode: true
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: fn_update_secret_check_task
+
+- ansible.builtin.debug:
+ var: fn_update_secret_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_secret_check_task is success
+ - fn_update_secret_check_task is changed
+
+- name: Update function secret variables
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: fn_update_secret_task
+
+- ansible.builtin.debug:
+ var: fn_update_secret_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_secret_task is success
+ - fn_update_secret_task is changed
+ - fn_update_secret_task.function.status in ["created", "ready"]
+ - "'hashed_value' in fn_update_secret_task.function.secret_environment_variables[0]"
+
+- name: Update function secret variables (Confirmation)
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: fn_update_secret_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_update_secret_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_secret_confirmation_task is success
+ - fn_update_secret_confirmation_task is not changed
+ - fn_update_secret_confirmation_task.function.status in ["created", "ready"]
+ - "'hashed_value' in fn_update_secret_confirmation_task.function.secret_environment_variables[0]"
+
+- name: Delete function (Check)
+ check_mode: true
+ community.general.scaleway_function:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ register: fn_deletion_check_task
+
+- ansible.builtin.debug:
+ var: fn_deletion_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_deletion_check_task is success
+ - fn_deletion_check_task is changed
+
+- name: Delete function
+ community.general.scaleway_function:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ register: fn_deletion_task
+
+- ansible.builtin.debug:
+ var: fn_deletion_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_deletion_task is success
+ - fn_deletion_task is changed
+
+- name: Delete function (Confirmation)
+ community.general.scaleway_function:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ register: fn_deletion_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_deletion_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_deletion_confirmation_task is success
+ - fn_deletion_confirmation_task is not changed
+
+- name: Delete function namespace
+ community.general.scaleway_function_namespace:
+ state: absent
+ name: '{{ function_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/defaults/main.yml
new file mode 100644
index 000000000..71d2fe780
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+function_namespace_name: fn-ansible-test
+name: fn-ansible-test
+description: Container used for testing scaleway_function_info ansible module
+updated_description: Container used for testing scaleway_function_info ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+runtime: python310
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/tasks/main.yml
new file mode 100644
index 000000000..17b07f5f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_info/tasks/main.yml
@@ -0,0 +1,62 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create function_namespace
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ function_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ register: integration_function_namespace
+
+- name: Create function
+ community.general.scaleway_function:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+
+- name: Get function info
+ community.general.scaleway_function_info:
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ register: fn_info_task
+
+- ansible.builtin.debug:
+ var: fn_info_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_info_task is success
+ - fn_info_task is not changed
+
+- name: Delete function
+ community.general.scaleway_function:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ namespace_id: '{{ integration_function_namespace.function_namespace.id }}'
+ runtime: '{{ runtime }}'
+
+- name: Delete function namespace
+ community.general.scaleway_function_namespace:
+ state: absent
+ name: '{{ function_namespace_name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/defaults/main.yml
new file mode 100644
index 000000000..399d0ea1a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: fn-ansible-test
+description: Function namespace used for testing scaleway_function_namespace ansible module
+updated_description: Function namespace used for testing scaleway_function_namespace ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
+updated_secret_environment_variables:
+ MY_SECRET_VAR: my_other_secret_value
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/tasks/main.yml
new file mode 100644
index 000000000..50af4449d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace/tasks/main.yml
@@ -0,0 +1,260 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a function namespace (Check)
+ check_mode: true
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_creation_check_task
+
+- ansible.builtin.debug:
+ var: fn_creation_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_creation_check_task is success
+ - fn_creation_check_task is changed
+
+- name: Create function_namespace
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_creation_task
+
+- ansible.builtin.debug:
+ var: fn_creation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_creation_task is success
+ - fn_creation_task is changed
+ - fn_creation_task.function_namespace.status == "ready"
+ - "'hashed_value' in fn_creation_task.function_namespace.secret_environment_variables[0]"
+
+- name: Create function namespace (Confirmation)
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_creation_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_creation_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_creation_confirmation_task is success
+ - fn_creation_confirmation_task is not changed
+ - fn_creation_confirmation_task.function_namespace.status == "ready"
+ - "'hashed_value' in fn_creation_task.function_namespace.secret_environment_variables[0]"
+
+- name: Update function namespace (Check)
+ check_mode: true
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_update_check_task
+
+- ansible.builtin.debug:
+ var: fn_update_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_check_task is success
+ - fn_update_check_task is changed
+
+- name: Update function namespace
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_update_task
+
+- ansible.builtin.debug:
+ var: fn_update_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_task is success
+ - fn_update_task is changed
+ - fn_update_task.function_namespace.status == "ready"
+ - "'hashed_value' in fn_creation_task.function_namespace.secret_environment_variables[0]"
+
+- name: Update function namespace (Confirmation)
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+ register: fn_update_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_update_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_confirmation_task is success
+ - fn_update_confirmation_task is not changed
+ - fn_update_confirmation_task.function_namespace.status == "ready"
+ - "'hashed_value' in fn_creation_task.function_namespace.secret_environment_variables[0]"
+
+- name: Update function namespace secret variables (Check)
+ check_mode: true
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: fn_update_secret_check_task
+
+- ansible.builtin.debug:
+ var: fn_update_secret_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_secret_check_task is success
+ - fn_update_secret_check_task is changed
+
+- name: Update function namespace secret variables
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: fn_update_secret_task
+
+- ansible.builtin.debug:
+ var: fn_update_secret_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_secret_task is success
+ - fn_update_secret_task is changed
+ - fn_update_secret_task.function_namespace.status == "ready"
+ - "'hashed_value' in fn_update_secret_task.function_namespace.secret_environment_variables[0]"
+
+- name: Update function namespace secret variables (Confirmation)
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ updated_description }}'
+ environment_variables: '{{ environment_variables }}'
+ secret_environment_variables: '{{ updated_secret_environment_variables }}'
+ register: fn_update_secret_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_update_secret_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_update_secret_confirmation_task is success
+ - fn_update_secret_confirmation_task is not changed
+ - fn_update_secret_confirmation_task.function_namespace.status == "ready"
+ - "'hashed_value' in fn_update_secret_confirmation_task.function_namespace.secret_environment_variables[0]"
+
+- name: Delete function namespace (Check)
+ check_mode: true
+ community.general.scaleway_function_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: fn_deletion_check_task
+
+- ansible.builtin.debug:
+ var: fn_deletion_check_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_deletion_check_task is success
+ - fn_deletion_check_task is changed
+
+- name: Delete function namespace
+ community.general.scaleway_function_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: fn_deletion_task
+
+- ansible.builtin.debug:
+ var: fn_deletion_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_deletion_task is success
+ - fn_deletion_task is changed
+ - "'hashed_value' in fn_creation_task.function_namespace.secret_environment_variables[0]"
+
+- name: Delete function namespace (Confirmation)
+ community.general.scaleway_function_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
+ register: fn_deletion_confirmation_task
+
+- ansible.builtin.debug:
+ var: fn_deletion_confirmation_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_deletion_confirmation_task is success
+ - fn_deletion_confirmation_task is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/aliases
new file mode 100644
index 000000000..a5ac5181f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/defaults/main.yml
new file mode 100644
index 000000000..0b05eaac0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: fn-ansible-test
+description: Function namespace used for testing scaleway_function_namespace_info ansible module
+updated_description: Function namespace used for testing scaleway_function_namespace_info ansible module (Updated description)
+environment_variables:
+ MY_VAR: my_value
+secret_environment_variables:
+ MY_SECRET_VAR: my_secret_value
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/tasks/main.yml
new file mode 100644
index 000000000..793cd0923
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_function_namespace_info/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2022, Guillaume MARTINEZ <lunik@tiwabbit.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create function_namespace
+ community.general.scaleway_function_namespace:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ description: '{{ description }}'
+ secret_environment_variables: '{{ secret_environment_variables }}'
+
+- name: Get function namespace info
+ community.general.scaleway_function_namespace_info:
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ project_id: '{{ scw_project }}'
+ register: fn_info_task
+
+- ansible.builtin.debug:
+ var: fn_info_task
+
+- name: Check module call result
+ ansible.builtin.assert:
+ that:
+ - fn_info_task is success
+ - fn_info_task is not changed
+ - "'hashed_value' in fn_info_task.function_namespace.secret_environment_variables[0]"
+
+- name: Delete function namespace
+ community.general.scaleway_function_namespace:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ project_id: '{{ scw_project }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml
new file mode 100644
index 000000000..2cdf34fdd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_image_info/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get image informations and register it in a variable
+ scaleway_image_info:
+ region: par1
+ register: images
+
+- name: Display images variable
+ debug:
+ var: images
+
+- name: Ensure retrieval of images info is success
+ assert:
+ that:
+ - images is success
+
+- name: Get image informations from ams1 and register it in a variable
+ scaleway_image_info:
+ region: ams1
+ register: images_ams1
+
+- name: Display images variable from ams1
+ debug:
+ var: images_ams1
+
+- name: Ensure retrieval of images info is success
+ assert:
+ that:
+ - images_ams1 is success
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_ip/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml
new file mode 100644
index 000000000..00ec26ff6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_ip/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
+scaleway_image_id: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+scaleway_commerial_type: START1-S
+scaleway_server_name: scaleway_ip_test_server
+scaleway_reverse_name: scaleway.com
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml
new file mode 100644
index 000000000..5a396ba4f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_ip/tasks/main.yml
@@ -0,0 +1,449 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create IP (Check)
+ check_mode: true
+ scaleway_ip:
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_creation_check_task
+
+- debug: var=ip_creation_check_task
+
+- name: ip_creation_check_task is success
+ assert:
+ that:
+ - ip_creation_check_task is success
+
+- name: ip_creation_check_task is changed
+ assert:
+ that:
+ - ip_creation_check_task is changed
+
+- name: Create IP
+ scaleway_ip:
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_creation_task
+
+- debug: var=ip_creation_task
+
+- name: ip_creation_task is success
+ assert:
+ that:
+ - ip_creation_task is success
+
+- name: ip_creation_task is changed
+ assert:
+ that:
+ - ip_creation_task is changed
+
+- name: ip_creation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_creation_task.scaleway_ip.server is none }}'
+
+- name: Create IP (Confirmation)
+ scaleway_ip:
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_creation_confirmation_task
+
+- debug: var=ip_creation_confirmation_task
+
+- name: ip_creation_confirmation_task is success
+ assert:
+ that:
+ - ip_creation_confirmation_task is success
+
+- name: ip_creation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_creation_confirmation_task is not changed
+
+- name: ip_creation_confirmation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_creation_task.scaleway_ip.server is none }}'
+
+- name: Assign reverse to server (Check)
+ check_mode: true
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_reverse_assignation_check_task
+
+- debug: var=ip_reverse_assignation_check_task
+
+- name: ip_reverse_assignation_check_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_check_task is success
+
+- name: ip_reverse_assignation_check_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_check_task is success
+
+- name: Assign reverse to an IP
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_reverse_assignation_task
+
+- debug: var=ip_reverse_assignation_task
+
+- name: ip_reverse_assignation_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_task is success
+
+- name: ip_reverse_assignation_task is changed
+ assert:
+ that:
+ - ip_reverse_assignation_task is changed
+
+- name: Assign reverse to an IP (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_reverse_assignation_confirmation_task
+
+- debug: var=ip_reverse_assignation_confirmation_task
+
+- name: ip_reverse_assignation_confirmation_task is success
+ assert:
+ that:
+ - ip_reverse_assignation_confirmation_task is success
+
+- name: ip_reverse_assignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_reverse_assignation_confirmation_task is not changed
+
+- name: Create a server
+ scaleway_compute:
+ state: present
+ name: '{{ scaleway_server_name }}'
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ dynamic_ip_required: false
+ wait: true
+
+ register: server_creation_task
+
+- debug: var=server_creation_task
+
+- name: server_creation_task is success
+ assert:
+ that:
+ - server_creation_task is success
+
+- name: Assign IP to server (Check)
+ check_mode: true
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ server: '{{ server_creation_task.msg.id }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_assignation_check_task
+
+- debug: var=ip_assignation_check_task
+
+- name: ip_assignation_check_task is success
+ assert:
+ that:
+ - ip_assignation_check_task is success
+
+- name: ip_assignation_check_task is success
+ assert:
+ that:
+ - ip_assignation_check_task is success
+
+- name: Assign IP to server
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ server: '{{ server_creation_task.msg.id }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_assignation_task
+
+- debug: var=ip_assignation_task
+
+- name: ip_assignation_task is success
+ assert:
+ that:
+ - ip_assignation_task is success
+
+- name: ip_assignation_task is changed
+ assert:
+ that:
+ - ip_assignation_task is changed
+
+- name: Assign IP to server (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ server: '{{ server_creation_task.msg.id }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_assignation_confirmation_task
+
+- debug: var=ip_assignation_confirmation_task
+
+- name: ip_assignation_confirmation_task is success
+ assert:
+ that:
+ - ip_assignation_confirmation_task is success
+
+- name: ip_assignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_assignation_confirmation_task is not changed
+
+- name: Unassign IP to server (Check)
+ check_mode: true
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_unassignation_check_task
+
+- debug: var=ip_unassignation_check_task
+
+- name: ip_unassignation_check_task is success
+ assert:
+ that:
+ - ip_unassignation_check_task is success
+
+- name: ip_unassignation_check_task is changed
+ assert:
+ that:
+ - ip_unassignation_check_task is changed
+
+- name: Unassign IP to server
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_unassignation_task
+
+- debug: var=ip_unassignation_task
+
+- name: ip_unassignation_task is success
+ assert:
+ that:
+ - ip_unassignation_task is success
+
+- name: ip_unassignation_task is changed
+ assert:
+ that:
+ - ip_unassignation_task is changed
+
+- name: ip_unassignation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_unassignation_task.scaleway_ip.server is none }}'
+
+- name: Unassign IP to server (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ reverse: '{{ scaleway_reverse_name }}'
+ register: ip_unassignation_confirmation_task
+
+- debug: var=ip_unassignation_confirmation_task
+
+- name: ip_unassignation_confirmation_task is success
+ assert:
+ that:
+ - ip_unassignation_confirmation_task is success
+
+- name: ip_unassignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_unassignation_confirmation_task is not changed
+
+- name: ip_unassignation_confirmation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_unassignation_task.scaleway_ip.server is none }}'
+
+- name: Unassign reverse to IP (Check)
+ check_mode: true
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_reverse_unassignation_check_task
+
+- debug: var=ip_reverse_unassignation_check_task
+
+- name: ip_reverse_unassignation_check_task is success
+ assert:
+ that:
+ - ip_reverse_unassignation_check_task is success
+
+- name: ip_reverse_unassignation_check_task is changed
+ assert:
+ that:
+ - ip_reverse_unassignation_check_task is changed
+
+- name: Unassign reverse to an IP
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_reverse_unassignation_task
+
+- debug: var=ip_reverse_unassignation_task
+
+- name: ip_reverse_unassignation_task is success
+ assert:
+ that:
+ - ip_reverse_unassignation_task is success
+
+- name: ip_reverse_unassignation_task is changed
+ assert:
+ that:
+ - ip_reverse_unassignation_task is changed
+
+- name: ip_reverse_unassignation_task.scaleway_ip.reverse is none
+ assert:
+ that:
+ - '{{ ip_reverse_unassignation_task.scaleway_ip.reverse is none }}'
+
+- name: Unassign reverse to an IP (Confirmation)
+ scaleway_ip:
+ state: present
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: ip_reverse_unassignation_confirmation_task
+
+- debug: var=ip_reverse_unassignation_confirmation_task
+
+- name: ip_reverse_unassignation_confirmation_task is success
+ assert:
+ that:
+ - ip_reverse_unassignation_confirmation_task is success
+
+- name: ip_reverse_unassignation_confirmation_task is not changed
+ assert:
+ that:
+ - ip_reverse_unassignation_confirmation_task is not changed
+
+- name: ip_reverse_unassignation_confirmation_task.scaleway_ip.server is none
+ assert:
+ that:
+ - '{{ ip_reverse_unassignation_confirmation_task.scaleway_ip.reverse is none }}'
+
+- name: Destroy a server
+ scaleway_compute:
+ name: '{{ scaleway_server_name }}'
+ state: absent
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
+
+- name: server_destroy_task is success
+ assert:
+ that:
+ - server_destroy_task is success
+
+- name: server_destroy_task is changed
+ assert:
+ that:
+ - server_destroy_task is changed
+
+- name: Delete IP (Check)
+ check_mode: true
+ scaleway_ip:
+ state: absent
+ region: '{{ scaleway_region }}'
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ register: ip_deletion_check_task
+
+- name: ip_deletion_check_task is success
+ assert:
+ that:
+ - ip_deletion_check_task is success
+
+- name: ip_deletion_check_task is changed
+ assert:
+ that:
+ - ip_deletion_check_task is changed
+
+- name: Delete IP
+ scaleway_ip:
+ state: absent
+ region: '{{ scaleway_region }}'
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ register: ip_deletion_task
+
+- name: ip_deletion_task is success
+ assert:
+ that:
+ - ip_deletion_task is success
+
+- name: ip_deletion_task is changed
+ assert:
+ that:
+ - ip_deletion_task is changed
+
+- name: Delete IP (Confirmation)
+ scaleway_ip:
+ state: absent
+ region: '{{ scaleway_region }}'
+ id: '{{ ip_creation_task.scaleway_ip.id }}'
+ register: ip_deletion_confirmation_task
+
+- name: ip_deletion_confirmation_task is success
+ assert:
+ that:
+ - ip_deletion_confirmation_task is success
+
+- name: ip_deletion_confirmation_task is not changed
+ assert:
+ that:
+ - ip_deletion_confirmation_task is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml
new file mode 100644
index 000000000..b560b5658
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_ip_info/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get ip informations and register it in a variable
+ scaleway_ip_info:
+ region: par1
+ register: ips
+
+- name: Display ips variable
+ debug:
+ var: ips
+
+- name: Ensure retrieval of ips info is success
+ assert:
+ that:
+ - ips is success
+
+- name: Get ip informations and register it in a variable
+ scaleway_ip_info:
+ region: ams1
+ register: ips_ams1
+
+- name: Display ips variable
+ debug:
+ var: ips_ams1
+
+- name: Ensure retrieval of ips info is success
+ assert:
+ that:
+ - ips_ams1 is success
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_lb/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml
new file mode 100644
index 000000000..c0d37800a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_lb/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_region: fr-par
+name: lb_ansible_test
+description: Load-balancer used for testing scaleway_lb ansible module
+updated_description: Load-balancer used for testing scaleway_lb ansible module (Updated description)
+tags:
+ - first_tag
+ - second_tag
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml
new file mode 100644
index 000000000..2567abd07
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_lb/tasks/main.yml
@@ -0,0 +1,224 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a load-balancer (Check)
+ check_mode: true
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ description }}'
+ tags: '{{ tags }}'
+ register: lb_creation_check_task
+
+- debug: var=lb_creation_check_task
+
+- name: lb_creation_check_task is success
+ assert:
+ that:
+ - lb_creation_check_task is success
+
+- name: lb_creation_check_task is changed
+ assert:
+ that:
+ - lb_creation_check_task is changed
+
+- name: Create load-balancer
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ description }}'
+ tags: '{{ tags }}'
+ wait: true
+ register: lb_creation_task
+
+- debug: var=lb_creation_task
+
+- name: lb_creation_task is success
+ assert:
+ that:
+ - lb_creation_task is success
+
+- name: lb_creation_task is changed
+ assert:
+ that:
+ - lb_creation_task is changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_creation_task.scaleway_lb.status == "ready"
+
+- name: Create load-balancer (Confirmation)
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ tags: '{{ tags }}'
+ description: '{{ description }}'
+ register: lb_creation_confirmation_task
+
+- debug: var=lb_creation_confirmation_task
+
+- name: lb_creation_confirmation_task is success
+ assert:
+ that:
+ - lb_creation_confirmation_task is success
+
+- name: lb_creation_confirmation_task is not changed
+ assert:
+ that:
+ - lb_creation_confirmation_task is not changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_creation_confirmation_task.scaleway_lb.status == "ready"
+
+- name: Update load-balancer (Check)
+ check_mode: true
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ organization_id: '{{ scw_org }}'
+ tags: '{{ tags }}'
+ description: '{{ updated_description }}'
+ register: lb_update_check_task
+
+- debug: var=lb_update_check_task
+
+- name: lb_update_check_task is success
+ assert:
+ that:
+ - lb_update_check_task is success
+
+- name: lb_update_check_task is changed
+ assert:
+ that:
+ - lb_update_check_task is changed
+
+- name: Update load-balancer
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ tags: '{{ tags }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ updated_description }}'
+ wait: true
+ register: lb_update_task
+
+- debug: var=lb_update_task
+
+- name: lb_update_task is success
+ assert:
+ that:
+ - lb_update_task is success
+
+- name: lb_update_task is changed
+ assert:
+ that:
+ - lb_update_task is changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_update_task.scaleway_lb.status == "ready"
+
+- name: Update load-balancer (Confirmation)
+ scaleway_lb:
+ state: present
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ tags: '{{ tags }}'
+ organization_id: '{{ scw_org }}'
+ description: '{{ updated_description }}'
+ register: lb_update_confirmation_task
+
+- debug: var=lb_update_confirmation_task
+
+- name: lb_update_confirmation_task is success
+ assert:
+ that:
+ - lb_update_confirmation_task is success
+
+- name: lb_update_confirmation_task is not changed
+ assert:
+ that:
+ - lb_update_confirmation_task is not changed
+
+- name: Assert that the load-balancer is in a valid state
+ assert:
+ that:
+ - lb_update_confirmation_task.scaleway_lb.status == "ready"
+
+- name: Delete load-balancer (Check)
+ check_mode: true
+ scaleway_lb:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ organization_id: '{{ scw_org }}'
+ register: lb_deletion_check_task
+
+- name: lb_deletion_check_task is success
+ assert:
+ that:
+ - lb_deletion_check_task is success
+
+- name: lb_deletion_check_task is changed
+ assert:
+ that:
+ - lb_deletion_check_task is changed
+
+- name: Delete load-balancer
+ scaleway_lb:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ organization_id: '{{ scw_org }}'
+ wait: true
+ register: lb_deletion_task
+
+- name: lb_deletion_task is success
+ assert:
+ that:
+ - lb_deletion_task is success
+
+- name: lb_deletion_task is changed
+ assert:
+ that:
+ - lb_deletion_task is changed
+
+- name: Delete load-balancer (Confirmation)
+ scaleway_lb:
+ state: absent
+ name: '{{ name }}'
+ region: '{{ scaleway_region }}'
+ description: '{{ description }}'
+ organization_id: '{{ scw_org }}'
+ register: lb_deletion_confirmation_task
+
+- name: lb_deletion_confirmation_task is success
+ assert:
+ that:
+ - lb_deletion_confirmation_task is success
+
+- name: lb_deletion_confirmation_task is not changed
+ assert:
+ that:
+ - lb_deletion_confirmation_task is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml
new file mode 100644
index 000000000..7326ca226
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_organization_info/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get organization informations and register it in a variable
+ scaleway_organization_info:
+ register: organizations
+
+- name: Display organizations variable
+ debug:
+ var: organizations
+
+- name: Ensure retrieval of organizations info is success
+ assert:
+ that:
+ - organizations is success
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml
new file mode 100644
index 000000000..ffada8080
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml
new file mode 100644
index 000000000..cab972ae5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group/tasks/main.yml
@@ -0,0 +1,139 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create security group check
+ check_mode: true
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
+
+- debug: var=security_group_creation
+
+- name: Ensure security groups check facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is changed
+
+- block:
+ - name: Create security group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
+
+ - debug: var=security_group_creation
+
+ - name: Ensure security groups facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is changed
+
+ - name: Create security group duplicate
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_creation
+
+ - debug: var=security_group_creation
+
+ - name: Ensure security groups duplicate facts is success
+ assert:
+ that:
+ - security_group_creation is success
+ - security_group_creation is not changed
+
+ - name: Delete security group check
+ check_mode: true
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
+
+ - debug: var=security_group_deletion
+
+ - name: Ensure security groups delete check facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is changed
+
+ always:
+ - name: Delete security group
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
+
+ - debug: var=security_group_deletion
+
+ - name: Ensure security groups delete facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is changed
+
+- name: Delete security group duplicate
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: security_group
+ description: 'my security group description'
+ organization: '{{ scaleway_organization }}'
+ stateful: false
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group_deletion
+
+- debug: var=security_group_deletion
+
+- name: Ensure security groups delete duplicate facts is success
+ assert:
+ that:
+ - security_group_deletion is success
+ - security_group_deletion is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml
new file mode 100644
index 000000000..8029a1e9a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_info/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get security group informations and register it in a variable
+ scaleway_security_group_info:
+ region: par1
+ register: security_groups
+
+- name: Display security_groups variable
+ debug:
+ var: security_groups
+
+- name: Ensure retrieval of security groups info is success
+ assert:
+ that:
+ - security_groups is success
+
+- name: Get security group informations and register it in a variable (AMS1)
+ scaleway_security_group_info:
+ region: ams1
+ register: ams1_security_groups
+
+- name: Display security_groups variable (AMS1)
+ debug:
+ var: ams1_security_groups
+
+- name: Ensure retrieval of security groups info is success (AMS1)
+ assert:
+ that:
+ - ams1_security_groups is success
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml
new file mode 100644
index 000000000..965ccf594
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/defaults/main.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: par1
+protocol: "TCP"
+port: 80
+ip_range: "0.0.0.0/0"
+direction: "inbound"
+action: "accept"
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
new file mode 100644
index 000000000..383942195
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_security_group_rule/tasks/main.yml
@@ -0,0 +1,252 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a scaleway security_group
+ scaleway_security_group:
+ state: present
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
+ register: security_group
+
+- debug: var=security_group
+
+- name: Create security_group_rule check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+- debug: var=security_group_rule_creation_task
+
+- assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
+
+- block:
+ - name: Create security_group_rule check
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
+
+ - name: Create security_group_rule duplicate
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is not changed
+
+ - name: Delete security_group_rule check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+ always:
+ - name: Delete security_group_rule check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+- name: Delete security_group_rule check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: '{{ port }}'
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+- debug: var=security_group_rule_deletion_task
+
+- assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is not changed
+
+- block:
+ - name: Create security_group_rule with null check
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is changed
+
+ - name: Create security_group_rule with null duplicate
+ scaleway_security_group_rule:
+ state: present
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_creation_task
+
+ - debug: var=security_group_rule_creation_task
+
+ - assert:
+ that:
+ - security_group_rule_creation_task is success
+ - security_group_rule_creation_task is not changed
+
+ - name: Delete security_group_rule with null check
+ check_mode: true
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+ always:
+ - name: Delete security_group_rule with null check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+ - debug: var=security_group_rule_deletion_task
+
+ - assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is changed
+
+- name: Delete security_group_rule with null check
+ scaleway_security_group_rule:
+ state: absent
+ region: '{{ scaleway_region }}'
+ protocol: '{{ protocol }}'
+ port: null
+ ip_range: '{{ ip_range }}'
+ direction: '{{ direction }}'
+ action: '{{ action }}'
+ security_group: '{{ security_group.scaleway_security_group.id }}'
+ register: security_group_rule_deletion_task
+
+- debug: var=security_group_rule_deletion_task
+
+- assert:
+ that:
+ - security_group_rule_deletion_task is success
+ - security_group_rule_deletion_task is not changed
+
+- name: Delete scaleway security_group
+ scaleway_security_group:
+ state: absent
+ region: '{{ scaleway_region }}'
+ name: test_compute
+ description: test_compute
+ organization: '{{ scaleway_organization }}'
+ stateful: true
+ inbound_default_policy: accept
+ outbound_default_policy: accept
+ organization_default: false
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml
new file mode 100644
index 000000000..7274e8a85
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_server_info/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get server informations and register it in a variable
+ scaleway_server_info:
+ region: par1
+ register: servers
+
+- name: Display servers variable
+ debug:
+ var: servers
+
+- name: Ensure retrieval of servers info is success
+ assert:
+ that:
+ - servers is success
+
+- name: Get server informations and register it in a variable
+ scaleway_server_info:
+ region: ams1
+ register: ams1_servers
+
+- name: Display servers variable
+ debug:
+ var: ams1_servers
+
+- name: Ensure retrieval of servers info is success
+ assert:
+ that:
+ - ams1_servers is success
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml
new file mode 100644
index 000000000..44f15d515
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_snapshot_info/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get snapshot informations and register it in a variable
+ scaleway_snapshot_info:
+ region: par1
+ register: snapshots
+
+- name: Display snapshots variable
+ debug:
+ var: snapshots
+
+- name: Ensure retrieval of snapshots info is success
+ assert:
+ that:
+ - snapshots is success
+
+- name: Get snapshot informations and register it in a variable (AMS1)
+ scaleway_snapshot_info:
+ region: ams1
+ register: ams1_snapshots
+
+- name: Display snapshots variable (AMS1)
+ debug:
+ var: ams1_snapshots
+
+- name: Ensure retrieval of snapshots info is success (AMS1)
+ assert:
+ that:
+ - ams1_snapshots is success
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml
new file mode 100644
index 000000000..588745abd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_sshkey/tasks/main.yml
@@ -0,0 +1,49 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: present
+ check_mode: true
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: present
+ register: result1
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: present
+ register: result2
+
+- assert:
+ that:
+ - result1 is success and result1 is changed
+ - result2 is success and result2 is not changed
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: absent
+ check_mode: true
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: absent
+ register: result1
+
+- scaleway_sshkey:
+ ssh_pub_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDf29yyommeGyKSIgSmX0ISVXP+3x6RUY4JDGLoAMFh2efkfDaRVdsvkvnFuUywgP2RewrjTyLE8w0NpCBHVS5Fm1BAn3yvxOUtTMxTbsQcw6HQ8swJ02+1tewJYjHPwc4GrBqiDo3Nmlq354Us0zBOJg/bBzuEnVD5eJ3GO3gKaCSUYTVrYwO0U4eJE0D9OJeUP9J48kl4ULbCub976+mTHdBvlzRw0Tzfl2kxgdDwlks0l2NefY/uiTdz2oMt092bAY3wZHxjto/DXoChxvaf5s2k8Zb+J7CjimUYnzPlH+zA9F6ROjP5AUu6ZWPd0jOIBl1nDWWb2j/qfNLYM43l sieben@sieben-macbook.local"
+ state: absent
+ register: result2
+
+- assert:
+ that:
+ - result1 is success and result1 is changed
+ - result2 is success and result2 is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml
new file mode 100644
index 000000000..7c53696b6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/defaults/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud_init_script: '''
+#cloud-config
+
+# final_message
+# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
+# this message is written by cloud-final when the system is finished
+# its first boot
+final_message: "The system is finally up, after $UPTIME seconds"
+'''
+scaleway_image_id: 89ee4018-f8c3-4dc4-a6b5-bca14f985ebe
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
+scaleway_commerial_type: START1-S
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml
new file mode 100644
index 000000000..ce4284127
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_user_data/tasks/main.yml
@@ -0,0 +1,87 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create a server
+ scaleway_compute:
+ name: foobar
+ state: present
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ region: '{{ scaleway_region }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+
+ register: server_creation_task
+
+- debug: var=server_creation_task
+
+- set_fact:
+ server_id: "{{ server_creation_task.msg.id }}"
+
+- debug: var=server_id
+
+- name: Patch user_data cloud-init configuration (Check)
+ check_mode: true
+ scaleway_user_data:
+ region: '{{ scaleway_region }}'
+ server_id: "{{ server_id }}"
+ user_data:
+ cloud-init: "{{ cloud_init_script }}"
+ register: user_data_check_task
+
+- debug: var=user_data_check_task
+
+- assert:
+ that:
+ - user_data_check_task is success
+ - user_data_check_task is changed
+
+- name: Patch user_data cloud-init configuration
+ scaleway_user_data:
+ region: '{{ scaleway_region }}'
+ server_id: "{{ server_id }}"
+ user_data:
+ cloud-init: "{{ cloud_init_script }}"
+ register: user_data_task
+
+- debug: var=user_data_task
+
+- assert:
+ that:
+ - user_data_task is success
+ - user_data_task is changed
+
+- name: Patch user_data cloud-init configuration (Confirmation)
+ scaleway_user_data:
+ region: '{{ scaleway_region }}'
+ server_id: "{{ server_id }}"
+ user_data:
+ cloud-init: "{{ cloud_init_script }}"
+ register: user_data_confirmation_task
+
+- debug: var=user_data_confirmation_task
+
+- assert:
+ that:
+ - user_data_confirmation_task is success
+ - user_data_confirmation_task is not changed
+
+- name: Destroy it
+ scaleway_compute:
+ name: foobar
+ state: absent
+ region: '{{ scaleway_region }}'
+ image: '{{ scaleway_image_id }}'
+ organization: '{{ scaleway_organization }}'
+ commercial_type: '{{ scaleway_commerial_type }}'
+ wait: true
+ register: server_destroy_task
+
+- debug: var=server_destroy_task
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_volume/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml
new file mode 100644
index 000000000..ffada8080
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_volume/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+scaleway_organization: '{{ scw_org }}'
+scaleway_region: ams1
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml
new file mode 100644
index 000000000..2828a8502
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_volume/tasks/main.yml
@@ -0,0 +1,51 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Make sure volume is not there before tests
+ scaleway_volume:
+ name: ansible-test-volume
+ state: absent
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+
+- name: Create volume
+ scaleway_volume:
+ name: ansible-test-volume
+ state: present
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ "size": 10000000000
+ volume_type: l_ssd
+ register: server_creation_check_task
+
+- debug: var=server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
+
+- name: Make sure volume is deleted
+ scaleway_volume:
+ name: ansible-test-volume
+ state: absent
+ region: '{{ scaleway_region }}'
+ organization: '{{ scaleway_organization }}'
+ register: server_creation_check_task
+
+- assert:
+ that:
+ - server_creation_check_task is success
+ - server_creation_check_task is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases b/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases
new file mode 100644
index 000000000..b2267f631
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cloud/scaleway
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml
new file mode 100644
index 000000000..45995a54c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/scaleway_volume_info/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get volume informations and register it in a variable
+ scaleway_volume_info:
+ region: par1
+ register: volumes
+
+- name: Display volumes variable
+ debug:
+ var: volumes
+
+- name: Ensure retrieval of volumes info is success
+ assert:
+ that:
+ - volumes is success
+
+- name: Get volume informations and register it in a variable (AMS1)
+ scaleway_volume_info:
+ region: ams1
+ register: ams1_volumes
+
+- name: Display volumes variable
+ debug:
+ var: ams1_volumes
+
+- name: Ensure retrieval of volumes info is success
+ assert:
+ that:
+ - ams1_volumes is success
diff --git a/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases b/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases
new file mode 100644
index 000000000..d318128f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sefcontext/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+needs/root
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sefcontext/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml
new file mode 100644
index 000000000..04143d1cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# FIXME: Unfortunately ansible_selinux could be a boolean or a dictionary !
+- debug:
+ msg: SELinux is disabled
+ when: ansible_selinux is defined and ansible_selinux == False
+
+- debug:
+ msg: SELinux is {{ ansible_selinux.status }}
+ when: ansible_selinux is defined and ansible_selinux != False
+
+- include_tasks: sefcontext.yml
+ when: ansible_selinux is defined and ansible_selinux != False and ansible_selinux.status == 'enabled'
diff --git a/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml b/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml
new file mode 100644
index 000000000..258f1ace9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sefcontext/tasks/sefcontext.yml
@@ -0,0 +1,233 @@
+---
+# Copyright (c) 2016, Dag Wieers <dag@wieers.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install requirements for RHEL
+ package:
+ name: policycoreutils-python
+ when:
+ - ansible_distribution == 'RedHat'
+ - ansible_distribution_major_version|int < 8
+
+- name: install requirements for rhel8 beta
+ package:
+ name: python3-policycoreutils
+ when:
+ - ansible_distribution == 'RedHat'
+ - ansible_distribution_major_version|int >= 8
+
+- name: Ensure we start with a clean state
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: absent
+
+- name: Ensure we start with a clean state
+ sefcontext:
+ path: /tmp/foo
+ state: absent
+
+- name: Set SELinux file context of foo/bar
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: present
+ reload: false
+ register: first
+
+- assert:
+ that:
+ - first is changed
+ - first.setype == 'httpd_sys_content_t'
+
+- name: Set SELinux file context of foo/bar (again)
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: present
+ reload: false
+ register: second
+
+- assert:
+ that:
+ - second is not changed
+ - second.setype == 'httpd_sys_content_t'
+
+- name: Change SELinux file context of foo/bar
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: unlabeled_t
+ state: present
+ reload: false
+ register: third
+
+- assert:
+ that:
+ - third is changed
+ - third.setype == 'unlabeled_t'
+
+- name: Change SELinux file context of foo/bar (again)
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: unlabeled_t
+ state: present
+ reload: false
+ register: fourth
+
+- assert:
+ that:
+ - fourth is not changed
+ - fourth.setype == 'unlabeled_t'
+
+- name: Delete SELinux file context of foo/bar
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: httpd_sys_content_t
+ state: absent
+ reload: false
+ register: fifth
+
+- assert:
+ that:
+ - fifth is changed
+ - fifth.setype == 'httpd_sys_content_t'
+
+- name: Delete SELinux file context of foo/bar (again)
+ sefcontext:
+ path: '/tmp/foo/bar(/.*)?'
+ setype: unlabeled_t
+ state: absent
+ reload: false
+ register: sixth
+
+- assert:
+ that:
+ - sixth is not changed
+ - sixth.setype == 'unlabeled_t'
+
+- name: Set SELinux file context path substitution of foo
+ sefcontext:
+ path: /tmp/foo
+ substitute: /home
+ state: present
+ reload: false
+ register: subst_first
+
+- assert:
+ that:
+ - subst_first is changed
+ - subst_first.substitute == '/home'
+
+- name: Set SELinux file context path substitution of foo (again)
+ sefcontext:
+ path: /tmp/foo
+ substitute: /home
+ state: present
+ reload: false
+ register: subst_second
+
+- assert:
+ that:
+ - subst_second is not changed
+ - subst_second.substitute == '/home'
+
+- name: Change SELinux file context path substitution of foo
+ sefcontext:
+ path: /tmp/foo
+ substitute: /boot
+ state: present
+ reload: false
+ register: subst_third
+
+- assert:
+ that:
+ - subst_third is changed
+ - subst_third.substitute == '/boot'
+
+- name: Change SELinux file context path substitution of foo (again)
+ sefcontext:
+ path: /tmp/foo
+ substitute: /boot
+ state: present
+ reload: false
+ register: subst_fourth
+
+- assert:
+ that:
+ - subst_fourth is not changed
+ - subst_fourth.substitute == '/boot'
+
+- name: Try to delete non-existing SELinux file context path substitution of foo
+ sefcontext:
+ path: /tmp/foo
+ substitute: /dev
+ state: absent
+ reload: false
+ register: subst_fifth
+
+- assert:
+ that:
+ - subst_fifth is not changed
+ - subst_fifth.substitute == '/dev'
+
+- name: Delete SELinux file context path substitution of foo
+ sefcontext:
+ path: /tmp/foo
+ substitute: /boot
+ state: absent
+ reload: false
+ register: subst_sixth
+
+- assert:
+ that:
+ - subst_sixth is changed
+ - subst_sixth.substitute == '/boot'
+
+- name: Delete SELinux file context path substitution of foo (again)
+ sefcontext:
+ path: /tmp/foo
+ substitute: /boot
+ state: absent
+ reload: false
+ register: subst_seventh
+
+- assert:
+ that:
+ - subst_seventh is not changed
+ - subst_seventh.substitute == '/boot'
+
+- name: Set SELinux file context path substitution of foo
+ sefcontext:
+ path: /tmp/foo
+ substitute: /home
+ state: present
+ reload: false
+ register: subst_eighth
+
+- assert:
+ that:
+ - subst_eighth is changed
+ - subst_eighth.substitute == '/home'
+
+- name: Delete SELinux file context path substitution of foo
+ sefcontext:
+ path: /tmp/foo
+ state: absent
+ reload: false
+ register: subst_ninth
+
+- assert:
+ that:
+ - subst_ninth is changed
+
+- name: Delete SELinux file context path substitution of foo (again)
+ sefcontext:
+ path: /tmp/foo
+ state: absent
+ reload: false
+ register: subst_tenth
+
+- assert:
+ that:
+ - subst_tenth is not changed
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases b/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases
new file mode 100644
index 000000000..bca9905ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_client/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml
new file mode 100644
index 000000000..61e49cda0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_client/tasks/main.yml
@@ -0,0 +1,179 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Creating a client if the directory doesn't exist should work
+ sensu_client:
+ subscriptions:
+ - default
+
+- name: Set variable for client file
+ set_fact:
+ client_file: "/etc/sensu/conf.d/client.json"
+
+- name: Insert invalid JSON in the client file
+ lineinfile:
+ state: "present"
+ create: "yes"
+ path: "{{ client_file }}"
+ line: "{'foo' = bar}"
+
+- name: Configure Sensu client with an existing invalid file
+ sensu_client:
+ name: "client"
+ state: "present"
+ subscriptions:
+ - default
+ register: client
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ client_file }}"
+ register: client_config
+
+- name: Assert that client data was set successfully and properly
+ assert:
+ that:
+ - "client is successful"
+ - "client is changed"
+ - "client['config']['name'] == 'client'"
+ - "'default' in client['config']['subscriptions']"
+ - "client['file'] == client_file"
+
+- name: Assert that the client configuration file is actually configured properly
+ vars:
+ config: "{{ client_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "config['client']['keepalives'] == true"
+ - "config['client']['name'] == 'client'"
+ - "config['client']['safe_mode'] == false"
+ - "'default' in config['client']['subscriptions']"
+
+- name: Delete Sensu client configuration
+ sensu_client:
+ state: "absent"
+ register: client_delete
+
+- name: Delete Sensu client configuration (again)
+ sensu_client:
+ state: "absent"
+ register: client_delete_twice
+
+- name: Retrieve configuration file stat
+ stat:
+ path: "{{ client_file }}"
+ register: client_stat
+
+- name: Assert that client deletion was successful
+ assert:
+ that:
+ - "client_delete is successful"
+ - "client_delete is changed"
+ - "client_delete_twice is successful"
+ - "client_delete_twice is not changed"
+ - "client_stat.stat.exists == false"
+
+- name: Configuring a client without subscriptions should fail
+ sensu_client:
+ name: "failure"
+ register: failure
+ ignore_errors: true
+
+- name: Assert failure to create client
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: subscriptions' in failure['msg']"
+
+- name: Configure a new client from scratch with custom parameters
+ sensu_client:
+ name: "custom"
+ address: "host.fqdn"
+ subscriptions:
+ - "default"
+ - "webserver"
+ redact:
+ - "password"
+ socket:
+ bind: "127.0.0.1"
+ port: "3030"
+ keepalive:
+ thresholds:
+ warning: "180"
+ critical: "300"
+ handlers:
+ - "email"
+ custom:
+ - broadcast: "irc"
+ occurrences: "3"
+ register: client
+
+- name: Configure a new client from scratch with custom parameters (twice)
+ sensu_client:
+ name: "custom"
+ address: "host.fqdn"
+ subscriptions:
+ - "default"
+ - "webserver"
+ redact:
+ - "password"
+ socket:
+ bind: "127.0.0.1"
+ port: "3030"
+ keepalive:
+ thresholds:
+ warning: "180"
+ critical: "300"
+ handlers:
+ - "email"
+ custom:
+ - broadcast: "irc"
+ occurrences: "3"
+ register: client_twice
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ client_file }}"
+ register: client_config
+
+- name: Assert that client data was set successfully and properly
+ assert:
+ that:
+ - "client is successful"
+ - "client is changed"
+ - "client_twice is successful"
+ - "client_twice is not changed"
+ - "client['config']['name'] == 'custom'"
+ - "client['config']['address'] == 'host.fqdn'"
+ - "'default' in client['config']['subscriptions']"
+ - "'webserver' in client['config']['subscriptions']"
+ - "'password' in client['config']['redact']"
+ - "client['config']['keepalive']['thresholds']['warning'] == '180'"
+ - "client['config']['keepalive']['thresholds']['critical'] == '300'"
+ - "'email' in client['config']['keepalive']['handlers']"
+ - "client['config']['keepalive']['occurrences'] == '3'"
+ - "client['file'] == client_file"
+
+- name: Assert that the client configuration file is actually configured properly
+ vars:
+ config: "{{ client_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "config['client']['name'] == 'custom'"
+ - "config['client']['address'] == 'host.fqdn'"
+ - "config['client']['keepalives'] == true"
+ - "config['client']['safe_mode'] == false"
+ - "'default' in config['client']['subscriptions']"
+ - "'webserver' in config['client']['subscriptions']"
+ - "'password' in config['client']['redact']"
+ - "config['client']['keepalive']['thresholds']['warning'] == '180'"
+ - "config['client']['keepalive']['thresholds']['critical'] == '300'"
+ - "'email' in config['client']['keepalive']['handlers']"
+ - "config['client']['keepalive']['occurrences'] == '3'"
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases b/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases
new file mode 100644
index 000000000..bca9905ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/root
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml
new file mode 100644
index 000000000..ec73a14c4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/main.yml
@@ -0,0 +1,129 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Creating a handler if the directory doesn't exist should work
+ sensu_handler:
+ name: "handler"
+ type: "pipe"
+ command: "/bin/bash"
+ state: "present"
+
+- name: Insert junk JSON in a handlers file
+ lineinfile:
+ state: "present"
+ create: "yes"
+ path: "/etc/sensu/conf.d/handlers/handler.json"
+ line: "{'foo' = bar}"
+
+- name: Configure a handler with an existing invalid file
+ sensu_handler:
+ name: "handler"
+ type: "pipe"
+ command: "/bin/bash"
+ state: "present"
+ register: handler
+
+- name: Configure a handler (again)
+ sensu_handler:
+ name: "handler"
+ type: "pipe"
+ command: "/bin/bash"
+ state: "present"
+ register: handler_twice
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Assert that handler data was set successfully and properly
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler_twice is successful"
+ - "handler_twice is not changed"
+ - "handler['name'] == 'handler'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/handler.json'"
+ - "handler['config']['type'] == 'pipe'"
+ - "handler['config']['command'] == '/bin/bash'"
+ - "handler['config']['timeout'] == 10"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'handler' in config['handlers']"
+ - "config['handlers']['handler']['type'] == 'pipe'"
+ - "config['handlers']['handler']['command'] == '/bin/bash'"
+ - "config['handlers']['handler']['timeout'] == 10"
+ - "config['handlers']['handler']['handle_flapping'] == false"
+ - "config['handlers']['handler']['handle_silenced'] == false"
+
+- name: Delete Sensu handler configuration
+ sensu_handler:
+ name: "handler"
+ state: "absent"
+ register: handler_delete
+
+- name: Delete Sensu handler configuration (again)
+ sensu_handler:
+ name: "handler"
+ state: "absent"
+ register: handler_delete_twice
+
+- name: Retrieve configuration file stat
+ stat:
+ path: "{{ handler['file'] }}"
+ register: handler_stat
+
+- name: Assert that handler deletion was successful
+ assert:
+ that:
+ - "handler_delete is successful"
+ - "handler_delete is changed"
+ - "handler_delete_twice is successful"
+ - "handler_delete_twice is not changed"
+ - "handler_stat.stat.exists == false"
+
+- name: Configuring a handler without a name should fail
+ sensu_handler:
+ type: "pipe"
+ command: "/bin/bash"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler without a name fails
+ assert:
+ that:
+ - failure is failed
+ - "'required arguments: name' in failure['msg']"
+
+- name: Configuring a handler without a type should fail
+ sensu_handler:
+ name: "pipe"
+ command: "/bin/bash"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler without a type fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: type' in failure['msg']"
+
+- include_tasks: pipe.yml
+- include_tasks: tcp.yml
+- include_tasks: udp.yml
+- include_tasks: set.yml
+- include_tasks: transport.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml
new file mode 100644
index 000000000..46fe24080
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/pipe.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Note: Pipe handlers are also tested and used as part of basic main.yml coverage
+- name: Configuring a handler with missing pipe parameters should fail
+ sensu_handler:
+ name: "pipe"
+ type: "pipe"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing pipe parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: command' in failure['msg']"
+
+- name: Configure a handler with pipe parameters
+ sensu_handler:
+ name: "pipe"
+ type: "pipe"
+ command: "/bin/bash"
+ register: handler
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml
new file mode 100644
index 000000000..e9a86057c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/set.yml
@@ -0,0 +1,53 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Configuring a handler with missing set parameters should fail
+ sensu_handler:
+ name: "set"
+ type: "set"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing set parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: handlers' in failure['msg']"
+
+- name: Configure a set handler
+ sensu_handler:
+ name: "set"
+ type: "set"
+ handlers:
+ - anotherhandler
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate set handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'set'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/set.json'"
+ - "handler['config']['type'] == 'set'"
+ - "'anotherhandler' in handler['config']['handlers']"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'set' in config['handlers']"
+ - "config['handlers']['set']['type'] == 'set'"
+ - "'anotherhandler' in config['handlers']['set']['handlers']"
+ - "config['handlers']['set']['handle_flapping'] == false"
+ - "config['handlers']['set']['handle_silenced'] == false"
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml
new file mode 100644
index 000000000..a5db1d397
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/tcp.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Configuring a handler with missing tcp parameters should fail
+ sensu_handler:
+ name: "tcp"
+ type: "tcp"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing tcp parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: socket' in failure['msg']"
+
+- name: Configure a tcp handler
+ sensu_handler:
+ name: "tcp"
+ type: "tcp"
+ socket:
+ host: 127.0.0.1
+ port: 8000
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate tcp handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'tcp'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/tcp.json'"
+ - "handler['config']['type'] == 'tcp'"
+ - "handler['config']['socket']['host'] == '127.0.0.1'"
+ - "handler['config']['socket']['port'] == 8000"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'tcp' in config['handlers']"
+ - "config['handlers']['tcp']['type'] == 'tcp'"
+ - "config['handlers']['tcp']['socket']['host'] == '127.0.0.1'"
+ - "config['handlers']['tcp']['socket']['port'] == 8000"
+ - "config['handlers']['tcp']['handle_flapping'] == false"
+ - "config['handlers']['tcp']['handle_silenced'] == false"
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml
new file mode 100644
index 000000000..fa2563fa9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/transport.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Configuring a handler with missing transport parameters should fail
+ sensu_handler:
+ name: "transport"
+ type: "transport"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing transport parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: pipe' in failure['msg']"
+
+- name: Configure a transport handler
+ sensu_handler:
+ name: "transport"
+ type: "transport"
+ pipe:
+ type: "topic"
+ name: "transport_handler"
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate transport handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'transport'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/transport.json'"
+ - "handler['config']['type'] == 'transport'"
+ - "handler['config']['pipe']['type'] == 'topic'"
+ - "handler['config']['pipe']['name'] == 'transport_handler'"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'transport' in config['handlers']"
+ - "config['handlers']['transport']['type'] == 'transport'"
+ - "config['handlers']['transport']['pipe']['type'] == 'topic'"
+ - "config['handlers']['transport']['pipe']['name'] == 'transport_handler'"
+ - "config['handlers']['transport']['handle_flapping'] == false"
+ - "config['handlers']['transport']['handle_silenced'] == false"
diff --git a/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml
new file mode 100644
index 000000000..60e88bb98
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sensu_handler/tasks/udp.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Configuring a handler with missing udp parameters should fail
+ sensu_handler:
+ name: "udp"
+ type: "udp"
+ register: failure
+ ignore_errors: true
+
+- name: Assert that configuring a handler with missing udp parameters fails
+ assert:
+ that:
+ - failure is failed
+ - "'the following are missing: socket' in failure['msg']"
+
+- name: Configure a udp handler
+ sensu_handler:
+ name: "udp"
+ type: "udp"
+ socket:
+ host: 127.0.0.1
+ port: 8000
+ register: handler
+
+- name: Retrieve configuration file
+ slurp:
+ src: "{{ handler['file'] }}"
+ register: handler_config
+
+- name: Validate udp handler return data
+ assert:
+ that:
+ - "handler is successful"
+ - "handler is changed"
+ - "handler['name'] == 'udp'"
+ - "handler['file'] == '/etc/sensu/conf.d/handlers/udp.json'"
+ - "handler['config']['type'] == 'udp'"
+ - "handler['config']['socket']['host'] == '127.0.0.1'"
+ - "handler['config']['socket']['port'] == 8000"
+ - "handler['config']['handle_flapping'] == false"
+ - "handler['config']['handle_silenced'] == false"
+
+- name: Assert that the handler configuration file is actually configured properly
+ vars:
+ config: "{{ handler_config.content | b64decode | from_json }}"
+ assert:
+ that:
+ - "'udp' in config['handlers']"
+ - "config['handlers']['udp']['type'] == 'udp'"
+ - "config['handlers']['udp']['socket']['host'] == '127.0.0.1'"
+ - "config['handlers']['udp']['socket']['port'] == 8000"
+ - "config['handlers']['udp']['handle_flapping'] == false"
+ - "config['handlers']['udp']['handle_silenced'] == false"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml
new file mode 100644
index 000000000..aa7de77fe
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+remote_dir: "{{ lookup('env', 'OUTPUT_DIR') }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml
new file mode 100644
index 000000000..cca7071a3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/tasks/main.yml
@@ -0,0 +1,75 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when:
+ - not (ansible_os_family == 'Alpine' and ansible_distribution_version is version('3.15', '<')) # TODO
+ block:
+ - name: Include distribution specific variables
+ include_vars: '{{ lookup(''first_found'', search) }}'
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution | lower }}.yml'
+ - '{{ ansible_os_family | lower }}.yml'
+ - '{{ ansible_system | lower }}.yml'
+ - default.yml
+ paths:
+ - vars
+ - name: install cron package
+ package:
+ name: '{{ cron_pkg }}'
+ when: cron_pkg | default(false, true)
+ register: cron_package_installed
+ until: cron_package_installed is success
+ - when: faketime_pkg | default(false, true)
+ block:
+ - name: install cron and faketime packages
+ package:
+ name: '{{ faketime_pkg }}'
+ register: faketime_package_installed
+ until: faketime_package_installed is success
+ - name: Find libfaketime path
+ shell: '{{ list_pkg_files }} {{ faketime_pkg }} | grep -F libfaketime.so.1'
+ register: libfaketime_path
+ - when: ansible_service_mgr == 'systemd'
+ block:
+ - name: create directory for cron drop-in file
+ file:
+ path: /etc/systemd/system/{{ cron_service }}.service.d
+ state: directory
+ owner: root
+ group: root
+ mode: '0755'
+ - name: Use faketime with cron service
+ copy:
+ content: '[Service]
+
+ Environment=LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }}
+
+ Environment="FAKETIME=+0y x10"
+
+ Environment=RANDOM_DELAY=0'
+ dest: /etc/systemd/system/{{ cron_service }}.service.d/faketime.conf
+ owner: root
+ group: root
+ mode: '0644'
+ - when: ansible_system == 'FreeBSD'
+ name: Use faketime with cron service
+ copy:
+ content: cron_env='LD_PRELOAD={{ libfaketime_path.stdout_lines[0].strip() }} FAKETIME="+0y x10"'
+ dest: /etc/rc.conf.d/cron
+ owner: root
+ group: wheel
+ mode: '0644'
+ - name: enable cron service
+ service:
+ daemon-reload: '{{ (ansible_service_mgr == ''systemd'') | ternary(true, omit) }}'
+ name: '{{ cron_service }}'
+ state: restarted
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/alpine.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/alpine.yml
new file mode 100644
index 000000000..7c28829c6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/alpine.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg: cronie
+cron_service: cronie
+list_pkg_files: apk info -L
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/archlinux.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/archlinux.yml
new file mode 100644
index 000000000..c714683ad
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/archlinux.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg: cronie
+cron_service: cronie
+list_pkg_files: pacman -Ql
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml
new file mode 100644
index 000000000..1eefe007c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/debian.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg: cron
+cron_service: cron
+list_pkg_files: dpkg -L
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml
new file mode 100644
index 000000000..f55df21f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml
new file mode 100644
index 000000000..0d17db6ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/fedora.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg: cronie
+cron_service: crond
+list_pkg_files: rpm -ql
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml
new file mode 100644
index 000000000..284871016
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/freebsd.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg:
+cron_service: cron
+list_pkg_files: pkg info --list-files
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml
new file mode 100644
index 000000000..2998f7b84
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/redhat.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg: cronie
+cron_service: crond
+faketime_pkg:
+list_pkg_files: rpm -ql
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml
new file mode 100644
index 000000000..77e7e09e3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_cron/vars/suse.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cron_pkg: cron
+cron_service: cron
+list_pkg_files: rpm -ql
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/README.md b/ansible_collections/community/general/tests/integration/targets/setup_docker/README.md
new file mode 100644
index 000000000..f2f26b7cc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/README.md
@@ -0,0 +1,73 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+Setup Docker
+============
+
+This role provides a mechanism to install docker automatically within the context of an integration test.
+
+For the time being (Apr 2023) it has been tested in Fedora 37 and Ubuntu Jammy.
+
+This role was largely based on the `setup_snap` one written by @felixfontein.
+
+
+Quickstart
+----------
+
+Add the file `meta/main.yml` to your integration test target it it does not yet contain one, and add (or update) the `dependencies` block with `setup_docker`, as in:
+
+```yaml
+dependencies:
+ - setup_docker
+```
+
+In your integration test target, add to the beginning of the `tasks/main.yml` something like (example from `mssql_script`):
+
+```yaml
+- name: Start container
+ community.docker.docker_container:
+ name: mssql-test
+ image: "mcr.microsoft.com/mssql/server:2019-latest"
+ env:
+ ACCEPT_EULA: "Y"
+ SA_PASSWORD: "{{ mssql_login_password }}"
+ MSSQL_PID: Developer
+ ports:
+ - "{{ mssql_port }}:1433"
+ detach: true
+ auto_remove: true
+ memory: 2200M
+```
+
+That's it! Your integration test will be using a docker container to support the test.
+
+
+What it does
+------------
+
+The role will install `docker` on the test target, allowing the test to run a container to support its execution.
+
+The installation of the package sends a notification to an Ansible handler that will remove `docker` from the system after the integration test target is done.
+
+This role assumes that developers will use the collection `community.docker` to manage the containers used in the test. To support that assumption, this role will install the `requests` package in the Python runtime environment used, usually a *virtualenv* used for the test. That package is **not removed** from that environment after the test.
+
+The most common use case is to use `community.docker.docker_container` to start a container, as in the example above. It is likely that `community.docker.docker_compose` can be used as well, although this has **not been tested** yet.
+
+
+Recommendations
+---------------
+
+* Don't forget to publish the service ports when starting the container
+* Take into consideration that the services inside the container will take a while to get started. Use both/either `ansible.builtin.wait_for` to check for the availability of the network port and/or `retries` on the first task effectively using those services
+* As a precautionary measure, start using the role in a test that is marked either `disabled` or `unsupported`, and move forward from there.
+
+
+Known Issues & Caveats
+----------------------
+
+* Support only Ubuntu and Fedora, having been tested in Ubuntu Jammy and Fedora 37, respectively
+* Lack mechanism to choose or constraint the `docker` version to be used
+* Lack option to prevent `docker` from being removed at the end of the integration test
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases b/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases
new file mode 100644
index 000000000..0a430dff1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/target/setup_epel
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml
new file mode 100644
index 000000000..55dc6fdb2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/defaults/main.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+distro_lookup_names:
+ - "D-{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_version }}.yml"
+ - "D-{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "D-{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - "default.yml"
+
+has_docker: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml
new file mode 100644
index 000000000..283496714
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/handlers/main.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Remove Docker packages
+ package:
+ name: "{{ docker_packages }}"
+ state: absent
+
+- name: "D-Fedora : Remove repository"
+ file:
+ path: /etc/yum.repos.d/docker-ce.repo
+ state: absent
+
+- name: "D-Fedora : Remove dnf-plugins-core"
+ package:
+ name: dnf-plugins-core
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/D-Fedora.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/D-Fedora.yml
new file mode 100644
index 000000000..80d6e869d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/D-Fedora.yml
@@ -0,0 +1,33 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# dnf -y install dnf-plugins-core
+# dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
+# sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+
+- name: Install dnf-plugins-core
+ become: true
+ package:
+ name: dnf-plugins-core
+ state: present
+ notify: "D-Fedora : Remove dnf-plugins-core"
+
+- name: Add docker repo
+ become: true
+ ansible.builtin.command:
+ cmd: dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
+ notify: "D-Fedora : Remove repository"
+
+- name: Install docker
+ become: true
+ package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ docker_packages }}"
+ notify: Remove Docker packages
+
+- name: Inform that docker is installed
+ set_fact:
+ has_docker: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/default.yml
new file mode 100644
index 000000000..a628e074b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/default.yml
@@ -0,0 +1,21 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# dnf -y install dnf-plugins-core
+# dnf config-manager --add-repo https://download.docker.com/linux/fedora/docker-ce.repo
+# sudo dnf -y install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+
+- name: Install docker
+ become: true
+ package:
+ name: "{{ item }}"
+ state: present
+ loop: "{{ docker_packages }}"
+ notify:
+ - Remove Docker packages
+
+- name: Inform that docker is installed
+ set_fact:
+ has_docker: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml
new file mode 100644
index 000000000..4f41da31a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/tasks/main.yml
@@ -0,0 +1,55 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Print information on which we distinguish
+ debug:
+ msg: "Distribution '{{ ansible_facts.distribution }}', version '{{ ansible_facts.distribution_version }}', OS family '{{ ansible_facts.os_family }}'"
+
+- name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('9', '<')
+
+- name: Distribution specific
+ block:
+ - name: Include distribution specific vars
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files: "{{ distro_lookup_names }}"
+ paths:
+ - "{{ role_path }}/vars"
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files: "{{ distro_lookup_names }}"
+ paths:
+ - "{{ role_path }}/tasks"
+
+- name: Start docker service
+ become: true
+ ansible.builtin.service:
+ name: docker
+ state: started
+
+- name: Cheat on the docker socket permissions
+ become: true
+ ansible.builtin.file:
+ path: /var/run/docker.sock
+ mode: 0666
+
+- name: Install python "requests"
+ ansible.builtin.pip:
+ name:
+ - requests
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Fedora.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Fedora.yml
new file mode 100644
index 000000000..f03626f4a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Fedora.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_packages:
+ - docker-ce
+ - docker-ce-cli
+ - containerd.io
+ - docker-compose-plugin
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Ubuntu.yml b/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Ubuntu.yml
new file mode 100644
index 000000000..260dc1d5e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_docker/vars/D-Ubuntu.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_packages:
+ - docker.io
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml
new file mode 100644
index 000000000..186d515f4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_epel/tasks/main.yml
@@ -0,0 +1,25 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install EPEL
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
+ disable_gpg_check: true
+ when:
+ - ansible_facts.distribution in ['RedHat', 'CentOS']
+ - ansible_facts.distribution_major_version == '6'
+
+- name: Install EPEL
+ yum:
+ name: https://ci-files.testing.ansible.com/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
+ disable_gpg_check: true
+ when:
+ - ansible_facts.distribution in ['RedHat', 'CentOS']
+ - ansible_facts.distribution_major_version != '6'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml
new file mode 100644
index 000000000..f185ef0c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+# setup etcd3 for integration tests on module/lookup
+# (c) 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+# # Copyright (c) 2018, Ansible Project
+#
+etcd3_ver: "v3.2.14"
+etcd3_download_server: "https://storage.googleapis.com/etcd"
+#etcd3_download_server: "https://github.com/coreos/etcd/releases/download"
+etcd3_download_url: "{{ etcd3_download_server }}/{{ etcd3_ver }}/etcd-{{ etcd3_ver }}-linux-amd64.tar.gz"
+etcd3_download_location: /tmp/etcd-download-test
+etcd3_path: "{{ etcd3_download_location }}/etcd-{{ etcd3_ver }}-linux-amd64"
+
+etcd3_pip_module: etcd3>=0.12
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml
new file mode 100644
index 000000000..fe6b9cd02
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/tasks/main.yml
@@ -0,0 +1,104 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# setup etcd3 for integration tests on module/lookup
+# Copyright 2017, Jean-Philippe Evrard <jean-philippe@evrard.me>
+# Copyright 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ============================================================
+
+# setup etcd3 for supported distros
+- block:
+
+ - name: python 2
+ set_fact:
+ python_suffix: ""
+ when: ansible_python_version is version('3', '<')
+
+ - name: python 3
+ set_fact:
+ python_suffix: "-py3"
+ when: ansible_python_version is version('3', '>=')
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - 'default{{ python_suffix }}.yml'
+ - 'default.yml'
+ paths: '../vars'
+
+ - name: Upgrade setuptools python2 module
+ pip:
+ name: setuptools<45
+ extra_args: --upgrade
+ state: present
+ when: python_suffix == ''
+
+ - name: Install etcd3 python modules
+ pip:
+ name: "{{ etcd3_pip_module }}"
+ extra_args: --only-binary grpcio
+ state: present
+
+ # Check if re-installing etcd3 is required
+ - name: Check if etcd3ctl exists for re-use.
+ shell: "ETCDCTL_API=3 {{ etcd3_path }}/etcdctl --endpoints=localhost:2379 get foo"
+ args:
+ executable: /bin/bash
+ changed_when: false
+ failed_when: false
+ register: _testetcd3ctl
+
+ - block:
+ # Installing etcd3
+ - name: If can't reuse, prepare download folder
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: directory
+ register: _etcddownloadexists
+ when:
+ - _testetcd3ctl.rc != 0
+
+ - name: Delete download folder if already exists (to start clean)
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: absent
+ when:
+ - _etcddownloadexists is not changed
+
+ - name: Recreate download folder if purged
+ file:
+ path: "{{ etcd3_download_location }}"
+ state: directory
+ when:
+ - _etcddownloadexists is not changed
+
+ - name: Download etcd3
+ unarchive:
+ src: "{{ etcd3_download_url }}"
+ dest: "{{ etcd3_download_location }}"
+ remote_src: true
+
+ # Running etcd3 and kill afterwards if it wasn't running before.
+ - name: Run etcd3
+ shell: "{{ etcd3_path }}/etcd &"
+ register: _etcd3run
+ changed_when: true
+
+# - name: kill etcd3
+# command: "pkill etcd"
+
+ when:
+ - _testetcd3ctl.rc != 0
+
+ when:
+ - ansible_distribution | lower ~ "-" ~ ansible_distribution_major_version | lower != 'centos-6'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml
new file mode 100644
index 000000000..6e1017fe8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/RedHat-7.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+etcd3_pip_module: etcd3<0.12
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml
new file mode 100644
index 000000000..4e7c275b8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse-py3.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SuSE's python 3.6.10 comes with six 1.11.0 as distutil
+# we restrict to etcd3 < 0.11 to avoid pip to try to upgrade six
+etcd3_pip_module: 'etcd3<0.11'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml
new file mode 100644
index 000000000..4e7c275b8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/Suse.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# SuSE's python 3.6.10 comes with six 1.11.0 as distutil
+# we restrict to etcd3 < 0.11 to avoid pip to try to upgrade six
+etcd3_pip_module: 'etcd3<0.11'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml
new file mode 100644
index 000000000..f7e08fa31
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_etcd3/vars/default.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# default should don't touch anything
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md
new file mode 100644
index 000000000..44dfeb0c6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/README.md
@@ -0,0 +1,144 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+# Create a dummy flatpak repository remote
+
+This document describes how to create a local flatpak dummy repo. Just like the one contained in the `files/repo.tar.gxz` archive.
+
+
+## Create a hello world app
+
+Prerequisites:
+
+ - flathub
+
+Prepare the environment:
+
+```
+flatpak install --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6
+```
+
+Create a hello world executable:
+
+```
+echo $'#!/bin/sh\necho hello world' > hello.sh
+```
+
+To create dummy flatpaks, run this (defining a unique NUM for every flatpak to add):
+
+```
+export NUM=1
+flatpak build-init appdir$NUM com.dummy.App$NUM org.freedesktop.Sdk org.freedesktop.Platform 1.6;
+flatpak build appdir$NUM mkdir /app/bin;
+flatpak build appdir$NUM install --mode=750 hello.sh /app/bin;
+flatpak build-finish --command=hello.sh appdir$NUM
+```
+
+## Create a repo and/or add the app to it
+
+Create a repo and add the file to it in one command:
+
+```
+flatpak build-export repo appdir$NUM stable
+```
+
+## Create flatpak*-files
+
+Put a flatpakref file under the repo folder (`repo/com.dummy.App1.flatpakref`):
+
+```
+[Flatpak Ref]
+Title=Dummy App$NUM
+Name=com.dummy.App$NUM
+Branch=stable
+Url=file:///tmp/flatpak/repo
+GPGKey={{ base64-encoded public KEY }}
+IsRuntime=false
+RuntimeRepo=https://flathub.org/repo/flathub.flatpakrepo
+```
+
+Add a `.flatpakrepo` file to the `repo` folder (`repo/dummy-repo.flatpakrepo`):
+
+```
+[Flatpak Repo]
+Title=Dummy Repo
+Url=file:///tmp/flatpak/repo
+Comment=Dummy repo for ansible module integration testing
+Description=Dummy repo for ansible module integration testing
+GPGKey={{ base64-encoded public KEY }}
+```
+
+## Sign the repo
+
+Create a new key in a new gpg home folder (On RedHat systems, the executable needs to addressed as gpg2):
+
+```
+mkdir gpg
+gpg --homedir gpg --quick-gen-key test@dummy.com
+```
+
+Sign the repo and summary file, you need to redo this when you update the repository:
+
+```
+flatpak build-sign repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+flatpak build-update-repo repo --gpg-sign=KEY_ID --gpg-homedir=gpg
+```
+
+Export the public key as a file:
+
+```
+gpg --homedir=gpg --export KEY_ID > dummy-repo.gpg
+```
+
+Create base64-encoded string from gpg-file for `GPGKey=` property in flatpak*-files:
+
+```
+base64 dummy-repo.gpg | tr -d '\n'
+```
+
+## How to use the repo
+
+Now you can add the `repo` folder as a local repo:
+
+```
+flatpak --system remote-add --gpg-import=/tmp/flatpak/repo/dummy-repo.gpg dummy-repo /tmp/flatpak/repo
+```
+
+Or, via `.flatpakrepo` file:
+
+```
+flatpak --system remote-add dummy-repo /tmp/flatpak/repo/dummy-repo.flatpakrepo
+```
+
+And install the hello world flatpaks like this:
+
+```
+flatpak --system install dummy-repo com.dummy.App$NUM
+```
+
+Or from flatpakref:
+
+```
+flatpak --system install --from /tmp/flatpak/repo/com.dummy.App$NUM.flatpakref
+```
+
+Run the app:
+
+```
+flatpak run com.dummy.App$NUM
+```
+
+To install an app without any runtime dependencies (the app will be broken, but it is enough to test flatpak installation):
+
+```
+flatpak --system install --no-deps dummy-repo com.dummy.App$NUM
+```
+
+## Sources:
+
+* https://blogs.gnome.org/alexl/2017/02/10/maintaining-a-flatpak-repository/
+
+* http://docs.flatpak.org/en/latest/first-build.html
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/create-repo.sh b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/create-repo.sh
new file mode 100755
index 000000000..11c762184
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/create-repo.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Delete traces from last run
+rm -rf appdir* dummy-repo.gpg gpg hello.sh repo
+
+# Create GPG key
+mkdir -p gpg
+chmod 0700 gpg
+gpg --homedir gpg --batch --passphrase '' --quick-gen-key test@dummy.com future-default default 10y
+KEY_ID=$(gpg --homedir=gpg --list-keys --with-colons test@dummy.com | grep fpr: | head -1 | cut -d ':' -f 10)
+gpg --homedir=gpg --export "${KEY_ID}" > dummy-repo.gpg
+BASE64_PUBLIC_KEY=$(base64 dummy-repo.gpg | tr -d '\n')
+
+# Install dependencies
+flatpak install -y --system flathub org.freedesktop.Platform//1.6 org.freedesktop.Sdk//1.6
+
+# Add individual flatpaks
+echo $'#!/bin/sh\necho hello world' > hello.sh
+
+for NUM in 1 2 3; do
+ flatpak build-init appdir${NUM} com.dummy.App${NUM} org.freedesktop.Sdk org.freedesktop.Platform 1.6;
+ flatpak build appdir${NUM} mkdir /app/bin;
+ flatpak build appdir${NUM} install --mode=750 hello.sh /app/bin;
+ flatpak build-finish --command=hello.sh appdir${NUM}
+
+ flatpak build-export repo appdir${NUM} stable
+
+ cat > repo/com.dummy.App${NUM}.flatpakref <<EOF
+ [Flatpak Ref]
+ Title=Dummy App${NUM}
+ Name=com.dummy.App${NUM}
+ Branch=stable
+ Url=file:///tmp/flatpak/repo
+ GPGKey=${BASE64_PUBLIC_KEY}
+ IsRuntime=false
+ RuntimeRepo=https://flathub.org/repo/flathub.flatpakrepo
+EOF
+done
+
+# Build repository
+cat > repo/dummy-repo.flatpakrepo <<EOF
+ [Flatpak Repo]
+ Title=Dummy Repo
+ Url=file:///tmp/flatpak/repo
+ Comment=Dummy repo for ansible module integration testing
+ Description=Dummy repo for ansible module integration testing
+ GPGKey=${BASE64_PUBLIC_KEY}
+EOF
+
+flatpak build-sign repo --gpg-sign="${KEY_ID}" --gpg-homedir=gpg
+flatpak build-update-repo repo --gpg-sign="${KEY_ID}" --gpg-homedir=gpg
+
+# Compress repository
+tar cvfJ repo.tar.xz repo/
+mv repo.tar.xz files/
+
+# Cleanup
+rm -rf appdir* dummy-repo.gpg gpg hello.sh repo
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz
new file mode 100644
index 000000000..609acaad7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz
Binary files differ
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz.license b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml
new file mode 100644
index 000000000..0c88d256e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/handlers/main.yaml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: remove temporary flatpak link
+ file:
+ state: absent
+ path: /tmp/flatpak
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
new file mode 100644
index 000000000..1b3d5b875
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/meta/main.yaml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
new file mode 100644
index 000000000..037784738
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_flatpak_remote/tasks/main.yaml
@@ -0,0 +1,32 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Set up dummy flatpak repository remote
+ when: |
+ ansible_distribution == 'Fedora' or
+ ansible_distribution == 'Ubuntu' and not ansible_distribution_major_version | int < 16
+ block:
+ - name: Copy repo into place
+ unarchive:
+ src: repo.tar.xz
+ dest: '{{ remote_tmp_dir }}'
+ owner: root
+ group: root
+ mode: '0644'
+ - name: Create deterministic link to temp directory
+ file:
+ state: link
+ src: '{{ remote_tmp_dir }}/'
+ path: /tmp/flatpak
+ owner: root
+ group: root
+ mode: '0644'
+ notify: remove temporary flatpak link
+ become: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml
new file mode 100644
index 000000000..f75354097
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_gnutar/handlers/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall gnu-tar
+ community.general.homebrew:
+ name: gnu-tar
+ state: absent
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml
new file mode 100644
index 000000000..8dbfebf6f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_gnutar/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- when: ansible_facts.distribution == 'MacOSX'
+ block:
+ - name: MACOS | Find brew binary
+ command: which brew
+ register: brew_which
+
+ - name: MACOS | Get owner of brew binary
+ stat:
+ path: "{{ brew_which.stdout }}"
+ register: brew_stat
+
+ - name: MACOS | Install gnu-tar
+ community.general.homebrew:
+ name: gnu-tar
+ state: present
+ become: true
+ become_user: "{{ brew_stat.stat.pw_name }}"
+ notify:
+ - uninstall gnu-tar
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml
new file mode 100644
index 000000000..bc64ab319
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: setup.yml
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'trusty'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml
new file mode 100644
index 000000000..205bd27d8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_influxdb/tasks/setup.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install https transport for apt and ca-certificates
+ apt: name={{ item }} state=latest force=yes
+ with_items:
+ - apt-transport-https
+ - ca-certificates
+
+- name: Install apt_key dependencies
+ pip: name={{ item }}
+ with_items:
+ - pyOpenSSL
+ - ndg-httpsclient
+ - pyasn1
+
+- name: Add InfluxDB public GPG key
+ apt_key: url=https://repos.influxdata.com/influxdb.key state=present
+
+- name: Add InfluxDB repository
+ apt_repository: repo='deb https://repos.influxdata.com/ubuntu trusty stable' filename='influxdb' state=present update_cache=yes
+
+- name: Install InfluxDB
+ apt: name=influxdb state=latest
+
+- name: Start InfluxDB service
+ service: name=influxdb state=started
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml
new file mode 100644
index 000000000..d4a5c7d05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml
new file mode 100644
index 000000000..2ab57d59d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/tasks/main.yml
@@ -0,0 +1,26 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- set_fact:
+ has_java_keytool: >-
+ {{
+ ansible_os_family not in ['Darwin', 'FreeBSD']
+ and not (ansible_distribution == "CentOS" and ansible_distribution_version is version("7.0", "<"))
+ }}
+
+- name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+ when: has_java_keytool
+
+- name: Install keytool
+ package:
+ name: '{{ keytool_package_name }}'
+ become: true
+ when: has_java_keytool
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Alpine.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Alpine.yml
new file mode 100644
index 000000000..4ff75ae8c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Alpine.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keytool_package_name: openjdk11-jre-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Archlinux.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Archlinux.yml
new file mode 100644
index 000000000..9e29065b3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Archlinux.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keytool_package_name: jre11-openjdk-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml
new file mode 100644
index 000000000..30ae5cd04
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Debian.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keytool_package_name: ca-certificates-java
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml
new file mode 100644
index 000000000..c200091f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/RedHat.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keytool_package_name: java-11-openjdk-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml
new file mode 100644
index 000000000..c200091f8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_java_keytool/vars/Suse.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+keytool_package_name: java-11-openjdk-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf
new file mode 100644
index 000000000..450330293
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/files/mosquitto.conf
@@ -0,0 +1,39 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Plain MQTT protocol
+listener 1883
+
+# MQTT over TLS 1.1
+listener 8883
+tls_version tlsv1.1
+cafile /tls/ca_certificate.pem
+certfile /tls/server_certificate.pem
+keyfile /tls/server_key.pem
+
+# MQTT over TLS 1.2
+listener 8884
+tls_version tlsv1.2
+cafile /tls/ca_certificate.pem
+certfile /tls/server_certificate.pem
+keyfile /tls/server_key.pem
+
+# TODO(This does not appear to be supported on Ubuntu 18.04. Re-try on 20.04 or next LTS release)
+# MQTT over TLS 1.3
+#
+# listener 8885
+# tls_version tlsv1.3
+# cafile /tls/ca_certificate.pem
+# certfile /tls/server_certificate.pem
+# keyfile /tls/server_key.pem
+
+log_dest syslog
+
+log_type error
+log_type warning
+log_type notice
+log_type information
+log_type debug
+
+connection_messages true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml
new file mode 100644
index 000000000..488c355d0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_tls
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml
new file mode 100644
index 000000000..2dd0674dc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: ubuntu.yml
+ when: ansible_distribution == 'Ubuntu'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml
new file mode 100644
index 000000000..8222aa175
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_mosquitto/tasks/ubuntu.yml
@@ -0,0 +1,29 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install https transport for apt
+ apt:
+ name: apt-transport-https
+ state: latest
+ force: true
+
+- name: Install Mosquitto Server
+ apt:
+ name: mosquitto
+ state: latest
+ register: result
+ until: result is success
+ delay: 3
+ retries: 10
+
+- name: Ensure TLS config
+ copy:
+ src: mosquitto.conf
+ dest: /etc/mosquitto/mosquitto.conf
+
+- name: Start Mosquitto service
+ service:
+ name: mosquitto
+ state: restarted
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif
new file mode 100644
index 000000000..8f8c537bd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif
@@ -0,0 +1,22 @@
+dn: ou=users,dc=example,dc=com
+objectClass: organizationalUnit
+objectClass: top
+ou: users
+
+dn: uid=ldaptest,ou=users,dc=example,dc=com
+uid: ldaptest
+uidNumber: 1111
+gidNUmber: 100
+objectClass: top
+objectClass: posixAccount
+objectClass: shadowAccount
+objectClass: person
+objectClass: organizationalPerson
+objectClass: inetOrgPerson
+loginShell: /bin/sh
+homeDirectory: /home/ldaptest
+cn: LDAP Test
+gecos: LDAP Test
+displayName: LDAP Test
+mail: ldap.test@example.com
+sn: Test
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif.license b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/initial_config.ldif.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif
new file mode 100644
index 000000000..7fb34d93b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif
@@ -0,0 +1,4 @@
+dn: olcDatabase={0}config,cn=config
+changetype: modify
+replace: olcRootPW
+olcRootPW: "Test1234!"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif.license b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/files/rootpw_cnconfig.ldif.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml
new file mode 100644
index 000000000..25077de16
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/tasks/main.yml
@@ -0,0 +1,72 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Setup OpenLDAP on Debian or Ubuntu
+ block:
+ - name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+
+ - name: Install OpenLDAP server and tools
+ become: true
+ package:
+ name: '{{ item }}'
+ loop: '{{ openldap_packages_name }}'
+
+ - name: Install python-ldap (Python 3)
+ become: true
+ package:
+ name: '{{ python_ldap_package_name_python3 }}'
+ when: ansible_python_version is version('3.0', '>=')
+
+ - name: Install python-ldap (Python 2)
+ become: true
+ package:
+ name: '{{ python_ldap_package_name }}'
+ when: ansible_python_version is version('3.0', '<')
+
+ - name: Make sure OpenLDAP service is stopped
+ become: true
+ shell: 'cat /var/run/slapd/slapd.pid | xargs -r kill -9 '
+
+ - name: Debconf
+ shell: 'echo "slapd {{ item.question }} {{ item.vtype }} {{ item.value }}" >> /root/debconf-slapd.conf'
+ loop: "{{ openldap_debconfs }}"
+
+ - name: Dpkg reconfigure
+ shell:
+ cmd: "export DEBIAN_FRONTEND=noninteractive; cat /root/debconf-slapd.conf | debconf-set-selections; dpkg-reconfigure -f noninteractive slapd"
+ creates: "/root/slapd_configured"
+
+ - name: Start OpenLDAP service
+ become: true
+ service:
+ name: '{{ openldap_service_name }}'
+ enabled: true
+ state: started
+
+ - name: Copy initial config ldif file
+ become: true
+ copy:
+ src: 'files/{{ item }}'
+ dest: '/tmp/{{ item }}'
+ owner: root
+ group: root
+ mode: '0644'
+ loop:
+ - rootpw_cnconfig.ldif
+ - initial_config.ldif
+
+ - name: Configure admin password for cn=config
+ shell: "ldapmodify -Y EXTERNAL -H ldapi:/// -f /tmp/rootpw_cnconfig.ldif"
+
+ - name: Add initial config
+ become: true
+ shell: 'ldapadd -H ldapi:/// -x -D "cn=admin,dc=example,dc=com" -w Test1234! -f /tmp/initial_config.ldif'
+ when: ansible_os_family in ['Ubuntu', 'Debian']
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml
new file mode 100644
index 000000000..3b4f19810
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Debian.yml
@@ -0,0 +1,60 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+python_ldap_package_name: python-ldap
+python_ldap_package_name_python3: python3-ldap
+openldap_packages_name:
+ - slapd
+ - ldap-utils
+openldap_service_name: slapd
+openldap_debconfs:
+ - question: "shared/organization"
+ value: "Example Organization"
+ vtype: "string"
+ - question: "slapd/allow_ldap_v2"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/backend"
+ value: "MDB"
+ vtype: "select"
+ - question: "slapd/domain"
+ value: "example.com"
+ vtype: "string"
+ - question: "slapd/dump_database"
+ value: "when needed"
+ vtype: "select"
+ - question: "slapd/dump_database_destdir"
+ value: "/var/backups/slapd-VERSION"
+ vtype: "string"
+ - question: "slapd/internal/adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/internal/generated_adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/invalid_config"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/move_old_database"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/no_configuration"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/password1"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password2"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password_mismatch"
+ value: ""
+ vtype: "note"
+ - question: "slapd/purge_database"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/upgrade_slapcat_failure"
+ value: ""
+ vtype: "error"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml b/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml
new file mode 100644
index 000000000..3b4f19810
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openldap/vars/Ubuntu.yml
@@ -0,0 +1,60 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+python_ldap_package_name: python-ldap
+python_ldap_package_name_python3: python3-ldap
+openldap_packages_name:
+ - slapd
+ - ldap-utils
+openldap_service_name: slapd
+openldap_debconfs:
+ - question: "shared/organization"
+ value: "Example Organization"
+ vtype: "string"
+ - question: "slapd/allow_ldap_v2"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/backend"
+ value: "MDB"
+ vtype: "select"
+ - question: "slapd/domain"
+ value: "example.com"
+ vtype: "string"
+ - question: "slapd/dump_database"
+ value: "when needed"
+ vtype: "select"
+ - question: "slapd/dump_database_destdir"
+ value: "/var/backups/slapd-VERSION"
+ vtype: "string"
+ - question: "slapd/internal/adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/internal/generated_adminpw"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/invalid_config"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/move_old_database"
+ value: "true"
+ vtype: "boolean"
+ - question: "slapd/no_configuration"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/password1"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password2"
+ value: "Test1234!"
+ vtype: "password"
+ - question: "slapd/password_mismatch"
+ value: ""
+ vtype: "note"
+ - question: "slapd/purge_database"
+ value: "false"
+ vtype: "boolean"
+ - question: "slapd/upgrade_slapcat_failure"
+ value: ""
+ vtype: "error"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_opennebula/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_opennebula/meta/main.yml
new file mode 100644
index 000000000..fe9e33681
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_opennebula/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_opennebula/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_opennebula/tasks/main.yml
new file mode 100644
index 000000000..b7babbaab
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_opennebula/tasks/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required library
+ pip:
+ name: pyone
+ extra_args: "-c {{ remote_constraints }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml
new file mode 100644
index 000000000..39b48270a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_opennebula/vars/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+opennebula_test:
+ hosts:
+ - hv1
+ - hv2
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml
new file mode 100644
index 000000000..d4a5c7d05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml
new file mode 100644
index 000000000..b8e003710
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/tasks/main.yml
@@ -0,0 +1,69 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Include OS-specific variables
+ include_vars: '{{ lookup("first_found", search) }}'
+ vars:
+ search:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}.yml'
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - default.yml
+ paths:
+ - vars
+
+- name: Install OpenSSL
+ become: true
+ package:
+ name: '{{ openssl_package_name }}'
+ when: not ansible_os_family == 'Darwin'
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ block:
+ - name: Install cryptography (Python 3)
+ become: true
+ package:
+ name: '{{ cryptography_package_name_python3 }}'
+ when: not cryptography_from_pip and ansible_python_version is version('3.0', '>=')
+
+ - name: Install cryptography (Python 2)
+ become: true
+ package:
+ name: '{{ cryptography_package_name }}'
+ when: not cryptography_from_pip and ansible_python_version is version('3.0', '<')
+
+ - name: Install cryptography (pip)
+ become: true
+ pip:
+ name: cryptography>=3.3
+ extra_args: "-c {{ remote_constraints }}"
+ when: cryptography_from_pip
+
+- name: Install pyOpenSSL (Python 3)
+ become: true
+ package:
+ name: '{{ pyopenssl_package_name_python3 }}'
+ when: pyopenssl_package_name_python3 is defined and ansible_python_version is version('3.0', '>=')
+
+- name: Install pyOpenSSL (Python 2)
+ become: true
+ package:
+ name: '{{ pyopenssl_package_name }}'
+ when: pyopenssl_package_name is defined and ansible_python_version is version('3.0', '<')
+
+- name: register openssl version
+ shell: "openssl version | cut -d' ' -f2"
+ register: openssl_version
+
+- name: register cryptography version
+ command: "{{ ansible_python.executable }} -c 'import cryptography; print(cryptography.__version__)'"
+ register: cryptography_version
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Alpine.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Alpine.yml
new file mode 100644
index 000000000..c5d4d23a4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Alpine.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: py-cryptography
+cryptography_package_name_python3: py3-cryptography
+pyopenssl_package_name: py-openssl
+pyopenssl_package_name_python3: py3-openssl
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Archlinux.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Archlinux.yml
new file mode 100644
index 000000000..b6ae2fe10
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Archlinux.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python-cryptography
+pyopenssl_package_name: python-pyopenssl
+pyopenssl_package_name_python3: python-pyopenssl
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/CentOS-8.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/CentOS-8.yml
new file mode 100644
index 000000000..875a69718
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/CentOS-8.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
+openssl_package_name: openssl
+cryptography_from_pip: '{{ ansible_python_version is version("3.8", ">=") }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Darwin.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Darwin.yml
new file mode 100644
index 000000000..b3dbd9811
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Darwin.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_from_pip: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml
new file mode 100644
index 000000000..6ef3df1a1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Debian.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
+pyopenssl_package_name: python-openssl
+pyopenssl_package_name_python3: python3-openssl
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml
new file mode 100644
index 000000000..e5ad6812f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/FreeBSD.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: py27-cryptography
+cryptography_package_name_python3: "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-cryptography"
+pyopenssl_package_name: py27-openssl
+pyopenssl_package_name_python3: "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-openssl"
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat-9.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat-9.yml
new file mode 100644
index 000000000..ac9b3344e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat-9.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml
new file mode 100644
index 000000000..ef78bab01
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/RedHat.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
+pyopenssl_package_name: pyOpenSSL
+pyopenssl_package_name_python3: python3-pyOpenSSL
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml
new file mode 100644
index 000000000..b7d246653
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_openssl/vars/Suse.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
+pyopenssl_package_name: python-pyOpenSSL
+pyopenssl_package_name_python3: python3-pyOpenSSL
+openssl_package_name: openssl
+cryptography_from_pip: false
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml b/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml
new file mode 100644
index 000000000..fc75f84df
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/archlinux.yml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Since Arch Linux is a rolling distribution, it regularly needs its packages upgraded, otherwise some tests might
+# stop working due to conflicts during package installation. Since there is no good way to do this on container
+# startup time, we use the setup_pkg_mgr setup role to do this once per CI run (hopefully). In case the Arch Linux
+# tests are run outside of a container, we're using a date-based tag (see below) to avoid this running more than
+# once per day.
+
+- name: Create tag
+ copy:
+ dest: /tmp/.ansible_archlinux_sysupgrade_tag
+ content: |
+ Last ArchLinux system upgrade by integration tests was done on {{ ansible_facts.date_time.date }}.
+ register: archlinux_upgrade_tag
+
+- name: Upgrade all packages
+ pacman:
+ update_cache: true
+ upgrade: true
+ when: archlinux_upgrade_tag is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
new file mode 100644
index 000000000..5bff53b3b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
@@ -0,0 +1,39 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- set_fact:
+ pkg_mgr: community.general.pkgng
+ ansible_pkg_mgr: community.general.pkgng
+ cacheable: true
+ when: ansible_os_family == "FreeBSD"
+
+- set_fact:
+ pkg_mgr: community.general.zypper
+ ansible_pkg_mgr: community.general.zypper
+ cacheable: true
+ when: ansible_os_family == "Suse"
+
+- set_fact:
+ pkg_mgr: community.general.pacman
+ ansible_pkg_mgr: community.general.pacman
+ cacheable: true
+ when: ansible_os_family == "Archlinux"
+
+- shell:
+ cmd: |
+ sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*.repo
+ sed -i 's%#baseurl=http://mirror.centos.org/$contentdir/$releasever/%baseurl=https://vault.centos.org/8.4.2105/%g' /etc/yum.repos.d/CentOS-Linux-*.repo
+ ignore_errors: true # This fails for CentOS Stream 8
+ when: ansible_distribution in 'CentOS' and ansible_distribution_major_version == '8'
+
+- when: ansible_os_family == "Archlinux"
+ block:
+ - name: ArchLinux specific setup
+ include_tasks: archlinux.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml
new file mode 100644
index 000000000..1a33ecafa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/defaults/main.yml
@@ -0,0 +1,22 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_service: postgresql
+
+postgresql_packages:
+ - postgresql-server
+ - python-psycopg2
+
+pg_user: postgres
+pg_group: root
+
+locale_latin_suffix:
+locale_utf8_suffix:
+
+# defaults for test SSL
+ssl_db: 'ssl_db'
+ssl_user: 'ssl_user'
+ssl_pass: 'ssl_pass'
+ssl_rootcert: '~{{ pg_user }}/root.crt'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql
new file mode 100644
index 000000000..89b318d77
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--1.0.sql
@@ -0,0 +1,6 @@
+-- Copyright (c) Ansible Project
+-- GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+-- SPDX-License-Identifier: GPL-3.0-or-later
+
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''1.0'')::text';
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql
new file mode 100644
index 000000000..c9386cac4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--2.0.sql
@@ -0,0 +1,6 @@
+-- Copyright (c) Ansible Project
+-- GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+-- SPDX-License-Identifier: GPL-3.0-or-later
+
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''2.0'')::text';
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql
new file mode 100644
index 000000000..a96bc8587
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy--3.0.sql
@@ -0,0 +1,6 @@
+-- Copyright (c) Ansible Project
+-- GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+-- SPDX-License-Identifier: GPL-3.0-or-later
+
+CREATE OR REPLACE FUNCTION dummy_display_ext_version()
+RETURNS text LANGUAGE SQL AS 'SELECT (''3.0'')::text';
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control
new file mode 100644
index 000000000..4f8553c22
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control
@@ -0,0 +1,3 @@
+comment = 'dummy extension used to test postgresql_ext Ansible module'
+default_version = '3.0'
+relocatable = true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control.license b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/dummy.control.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf
new file mode 100644
index 000000000..e6b14c4d7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/files/pg_hba.conf
@@ -0,0 +1,14 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# !!! This file managed by Ansible. Any local changes may be overwritten. !!!
+
+# Database administrative login by UNIX sockets
+# note: you may wish to restrict this further later
+local all {{ pg_user }} trust
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+local all all md5
+host all all 127.0.0.1/32 md5
+host all all ::1/128 md5
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml
new file mode 100644
index 000000000..3dac4a098
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/tasks/main.yml
@@ -0,0 +1,257 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Exit when Suse because it causes CI problems
+- meta: end_play
+ when: ansible_os_family == 'Suse'
+
+# To avoid hangings on service start/stop postgres during CI runs:
+- meta: end_play
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+
+# Temporary disable Fedora 34
+- meta: end_play
+ when: ansible_facts.distribution == 'Fedora' and ansible_facts.distribution_major_version == '34'
+
+- name: python 2
+ set_fact:
+ python_suffix: ''
+ when: ansible_python_version is version('3', '<')
+
+- name: python 3
+ set_fact:
+ python_suffix: -py3
+ when: ansible_python_version is version('3', '>=')
+
+- name: Include distribution and Python version specific variables
+ include_vars: '{{ lookup(''first_found'', params) }}'
+ vars:
+ params:
+ files:
+ - '{{ ansible_distribution }}-{{ ansible_distribution_major_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_distribution }}-{{ ansible_distribution_version }}{{ python_suffix }}.yml'
+ - '{{ ansible_os_family }}{{ python_suffix }}.yml'
+ - default{{ python_suffix }}.yml
+ paths:
+ - '{{ role_path }}/vars'
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when: ansible_service_mgr == 'systemd' and ansible_distribution == 'Fedora'
+
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: true
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: true
+
+- name: stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ ignore_errors: true
+
+- name: remove old db (RedHat or Suse)
+ file:
+ path: '{{ pg_dir }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "RedHat" or ansible_os_family == "Suse"
+
+- name: remove old db (FreeBSD)
+ file:
+ path: '{{ pg_dir }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "FreeBSD"
+
+- name: remove old db config and files (debian)
+ file:
+ path: '{{ loop_item }}'
+ state: absent
+ ignore_errors: true
+ when: ansible_os_family == "Debian"
+ loop:
+ - /etc/postgresql
+ - /var/lib/postgresql
+ loop_control:
+ loop_var: loop_item
+
+- name: install dependencies for postgresql test
+ package:
+ name: '{{ postgresql_package_item }}'
+ state: present
+ with_items: '{{ postgresql_packages }}'
+ loop_control:
+ loop_var: postgresql_package_item
+
+- name: initialize postgres (FreeBSD)
+ command: /usr/local/etc/rc.d/postgresql oneinitdb
+ when: ansible_os_family == "FreeBSD"
+
+- name: Initialize postgres (RedHat systemd)
+ command: postgresql-setup initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr == "systemd"
+
+- name: Initialize postgres (RedHat sysv)
+ command: /sbin/service postgresql initdb
+ when: ansible_os_family == "RedHat" and ansible_service_mgr != "systemd"
+
+- name: Initialize postgres (Archlinux)
+ command: su - postgres -c "initdb --locale en_US.UTF-8 -D '/var/lib/postgres/data'"
+ when: ansible_os_family == "Archlinux"
+
+- name: Initialize postgres (Alpine)
+ command: su - postgres -c "initdb --locale en_US.UTF-8 -D '/var/lib/postgresql/data'"
+ when: ansible_os_family == "Alpine"
+
+- name: Initialize postgres (Debian)
+ shell: . /usr/share/postgresql-common/maintscripts-functions && set_system_locale && /usr/bin/pg_createcluster -u postgres {{ pg_ver }} main
+ args:
+ creates: /etc/postgresql/{{ pg_ver }}/
+ when: ansible_os_family == 'Debian'
+
+- name: Initialize postgres (Suse)
+ service: name=postgresql state=stopped
+ when: ansible_os_family == 'Suse'
+
+- name: Pause between stop and start postgresql
+ pause:
+ seconds: 5
+ when: ansible_os_family == 'Suse'
+
+- name: Initialize postgres (Suse)
+ service: name=postgresql state=started
+ when: ansible_os_family == 'Suse'
+
+- name: Copy pg_hba into place
+ template:
+ src: files/pg_hba.conf
+ dest: '{{ pg_hba_location }}'
+ owner: '{{ pg_user }}'
+ group: '{{ pg_group }}'
+ mode: '0644'
+
+- name: Generate locales (Debian)
+ locale_gen:
+ name: '{{ item }}'
+ state: present
+ with_items:
+ - pt_BR
+ - es_ES
+ when: ansible_os_family == 'Debian'
+
+- block:
+ - name: Install langpacks (RHEL8)
+ yum:
+ name:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ - glibc-all-langpacks
+ state: present
+ when: ansible_distribution_major_version is version('8', '>=')
+
+ - name: Check if locales need to be generated (RedHat)
+ shell: localedef --list-archive | grep -a -q '^{{ locale }}$'
+ register: locale_present
+ ignore_errors: true
+ with_items:
+ - es_ES
+ - pt_BR
+ loop_control:
+ loop_var: locale
+
+ - name: Reinstall internationalization files
+ shell: yum -y reinstall glibc-common || yum -y install glibc-common
+ when: locale_present is failed
+
+ - name: Generate locale (RedHat)
+ command: localedef -f ISO-8859-1 -i {{ item.locale }} {{ item.locale }}
+ when: item is failed
+ with_items: '{{ locale_present.results }}'
+ when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora'
+
+- name: Install glibc langpacks (Fedora >= 24)
+ package:
+ name: '{{ item }}'
+ state: latest
+ with_items:
+ - glibc-langpack-es
+ - glibc-langpack-pt
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version is version('24', '>=')
+
+- name: enable postgresql service (FreeBSD)
+ lineinfile:
+ path: /etc/rc.conf
+ line: postgresql_enable="YES"
+ when: ansible_os_family == "FreeBSD"
+
+- name: start postgresql service
+ service: name={{ postgresql_service }} state=started
+
+- name: Pause between start and stop
+ pause:
+ seconds: 5
+
+- name: Kill all postgres processes
+ shell: 'pkill -u {{ pg_user }}'
+ become: true
+ when: ansible_facts.distribution == 'CentOS' and ansible_facts.distribution_major_version == '8'
+ ignore_errors: true
+ register: terminate
+
+- name: Stop postgresql service
+ service: name={{ postgresql_service }} state=stopped
+ when: terminate is not succeeded
+
+- name: Pause between stop and start
+ pause:
+ seconds: 5
+
+- name: Start postgresql service
+ service: name={{ postgresql_service }} state=started
+
+- name: copy control file for dummy ext
+ copy:
+ src: dummy.control
+ dest: /usr/share/postgresql/{{ pg_ver }}/extension/dummy.control
+ mode: '0444'
+ when: ansible_os_family == 'Debian'
+
+- name: copy version files for dummy ext
+ copy:
+ src: '{{ item }}'
+ dest: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
+ mode: '0444'
+ with_items:
+ - dummy--1.0.sql
+ - dummy--2.0.sql
+ - dummy--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: add update paths
+ file:
+ path: /usr/share/postgresql/{{ pg_ver }}/extension/{{ item }}
+ mode: '0444'
+ state: touch
+ with_items:
+ - dummy--1.0--2.0.sql
+ - dummy--2.0--3.0.sql
+ when: ansible_os_family == 'Debian'
+
+- name: Get PostgreSQL version
+ become_user: '{{ pg_user }}'
+ become: true
+ shell: echo 'SHOW SERVER_VERSION' | psql --tuples-only --no-align --dbname postgres
+ register: postgres_version_resp
+
+- name: Print PostgreSQL server version
+ debug:
+ msg: '{{ postgres_version_resp.stdout }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml
new file mode 100644
index 000000000..99a8a14b8
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Alpine-py3.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "py3-psycopg2"
+
+pg_hba_location: "/var/lib/postgresql/data/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/data"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml
new file mode 100644
index 000000000..40215d1ad
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Archlinux-py3.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "python-psycopg2"
+
+pg_hba_location: "/var/lib/postgres/data/pg_hba.conf"
+pg_dir: "/var/lib/postgres/data"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml
new file mode 100644
index 000000000..1ffd257b2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-11-py3.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/13/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/13/main"
+pg_ver: 13
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml
new file mode 100644
index 000000000..87063214a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Debian-8.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.4/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.4/main"
+pg_ver: 9.4
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml
new file mode 100644
index 000000000..a92de47cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11-py3.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - postgresql95-server
+ - "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-psycopg2"
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml
new file mode 100644
index 000000000..c38f3b59b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-11.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - postgresql95-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
new file mode 100644
index 000000000..a92de47cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0-py3.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - postgresql95-server
+ - "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-psycopg2"
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.5
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml
new file mode 100644
index 000000000..7c5586f5b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.0.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - postgresql96-server
+ - py27-psycopg2
+
+pg_dir: /usr/local/pgsql/data
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 9.6
+pg_user: pgsql
+pg_group: pgsql
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
new file mode 100644
index 000000000..8e5ddfa13
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1-py3.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - postgresql11-server
+ - "py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-psycopg2"
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml
new file mode 100644
index 000000000..0cdc22e13
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/FreeBSD-12.1.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - postgresql11-server
+ - py27-psycopg2
+
+pg_dir: /var/db/postgres/data11
+pg_hba_location: "{{ pg_dir }}/pg_hba.conf"
+pg_ver: 11
+pg_user: postgres
+pg_group: postgres
+
+locale_latin_suffix: .ISO8859-1
+locale_utf8_suffix: .UTF-8
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml
new file mode 100644
index 000000000..3892f2e45
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat-py3.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+ - "bzip2"
+ - "xz"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml
new file mode 100644
index 000000000..5670a7fc9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/RedHat.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+ - "bzip2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml
new file mode 100644
index 000000000..1d850de84
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-12.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.1/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.1/main"
+pg_ver: 9.1
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml
new file mode 100644
index 000000000..881fc533e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-14.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.3/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.3/main"
+pg_ver: 9.3
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml
new file mode 100644
index 000000000..482982fe1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16-py3.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml
new file mode 100644
index 000000000..f2df72af7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-16.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python-psycopg2"
+
+pg_hba_location: "/etc/postgresql/9.5/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/9.5/main"
+pg_ver: 9.5
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml
new file mode 100644
index 000000000..19213f4d6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-18-py3.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/10/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/10/main"
+pg_ver: 10
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml
new file mode 100644
index 000000000..58fb8a06f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-20-py3.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/12/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/12/main"
+pg_ver: 12
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml
new file mode 100644
index 000000000..8ea444099
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/Ubuntu-22-py3.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql"
+ - "postgresql-common"
+ - "python3-psycopg2"
+
+pg_hba_location: "/etc/postgresql/14/main/pg_hba.conf"
+pg_dir: "/var/lib/postgresql/14/main"
+pg_ver: 14
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml
new file mode 100644
index 000000000..6f96043a3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql-server"
+ - "python3-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml
new file mode 100644
index 000000000..9d64d969a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_postgresql_db/vars/default.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+postgresql_packages:
+ - "postgresql-server"
+ - "python-psycopg2"
+
+pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf"
+pg_dir: "/var/lib/pgsql/data"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml
new file mode 100644
index 000000000..46dae9898
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/defaults/main.yml
@@ -0,0 +1,56 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# General
+redis_packages:
+ Alpine:
+ - redis
+ Archlinux:
+ - redis
+ Debian:
+ - redis-server
+ Ubuntu:
+ - redis-server
+ openSUSE Leap:
+ - redis
+ Fedora:
+ - redis
+ CentOS:
+ - redis
+ FreeBSD:
+ - redis
+
+redis_bin:
+ Alpine: /usr/bin/redis-server
+ Archlinux: /usr/bin/redis-server
+ Debian: /usr/bin/redis-server
+ Ubuntu: /usr/bin/redis-server
+ openSUSE Leap: /usr/sbin/redis-server
+ Fedora: /usr/bin/redis-server
+ CentOS: /usr/bin/redis-server
+ FreeBSD: /usr/local/bin/redis-server
+
+redis_module: redis
+
+redis_password: PASS
+
+old_redis: >-
+ {{
+ (ansible_distribution == 'CentOS' and ansible_distribution_major_version|int <= 7) or
+ (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version|int <= 18) or
+ (ansible_os_family == 'FreeBSD' and ansible_distribution_major_version|int <= 12)
+ }}
+
+# Master
+master_port: 6379
+master_conf: /etc/redis-master.conf
+master_datadir: /var/lib/redis-master
+master_logdir: /var/log/redis-master
+
+# Replica
+replica_port: 6380
+replica_conf: /etc/redis-replica.conf
+replica_datadir: /var/lib/redis-replica
+replica_logdir: /var/log/redis-replica
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml
new file mode 100644
index 000000000..a0595cbe3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/handlers/main.yml
@@ -0,0 +1,39 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: stop redis services
+ shell: |
+ kill -TERM $(cat /var/run/redis_{{ master_port }}.pid)
+ kill -TERM $(cat /var/run/redis_{{ replica_port }}.pid)
+ listen: cleanup redis
+
+- name: remove redis packages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ redis_packages[ansible_distribution] }}"
+ listen: cleanup redis
+
+- name: remove pip packages
+ pip:
+ name: redis
+ state: absent
+ listen: cleanup redis
+
+- name: remove redis data
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - "{{ master_conf }}"
+ - "{{ master_datadir }}"
+ - "{{ master_logdir }}"
+ - /var/run/redis_{{ master_port }}.pid
+ - "{{ replica_conf }}"
+ - "{{ replica_datadir }}"
+ - "{{ replica_logdir }}"
+ - /var/run/redis_{{ replica_port }}.pid
+ listen: cleanup redis
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml
new file mode 100644
index 000000000..db2617f4c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+- setup_pkg_mgr
+- setup_remote_constraints
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml
new file mode 100644
index 000000000..076a47359
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/main.yml
@@ -0,0 +1,12 @@
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- import_tasks: setup_redis_cluster.yml
+ when:
+ - ansible_distribution in ['CentOS', 'Fedora', 'FreeBSD', 'openSUSE Leap', 'Ubuntu', 'Debian', 'Archlinux', 'Alpine']
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
new file mode 100644
index 000000000..dd48bf2b6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_redis_replication/tasks/setup_redis_cluster.yml
@@ -0,0 +1,78 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# We run two servers listening different ports
+# to be able to check replication (one server for master, another for replica).
+
+- name: Install redis dependencies
+ package:
+ name: "{{ redis_packages[ansible_distribution] }}"
+ state: latest
+ policy_rc_d: "{{ 101 if ansible_facts.pkg_mgr == 'apt' else omit }}"
+ notify: cleanup redis
+
+- name: Install redis module
+ pip:
+ name: "{{ redis_module }}"
+ extra_args: "-c {{ remote_constraints }}"
+ state: present
+ notify: cleanup redis
+
+- name: Create redis directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: redis
+ group: redis
+ loop:
+ - "{{ master_datadir }}"
+ - "{{ master_logdir }}"
+ - "{{ replica_datadir }}"
+ - "{{ replica_logdir }}"
+
+- name: Create redis configs
+ copy:
+ dest: "{{ item.file }}"
+ content: |
+ daemonize yes
+ port {{ item.port }}
+ pidfile /var/run/redis_{{ item.port }}.pid
+ logfile {{ item.logdir }}/redis.log
+ dir {{ item.datadir }}
+ requirepass {{ redis_password }}
+ masterauth {{ redis_password }}
+ loop:
+ - file: "{{ master_conf }}"
+ port: "{{ master_port }}"
+ logdir: "{{ master_logdir }}"
+ datadir: "{{ master_datadir }}"
+ - file: "{{ replica_conf }}"
+ port: "{{ replica_port }}"
+ logdir: "{{ replica_logdir }}"
+ datadir: "{{ replica_datadir }}"
+
+- name: Start redis master
+ shell: "{{ redis_bin[ansible_distribution] }} {{ master_conf }}"
+
+- name: Start redis replica
+ shell: "{{ redis_bin[ansible_distribution] }} {{ replica_conf }} --{% if old_redis %}slaveof{% else %}replicaof{% endif %} 127.0.0.1 {{ master_port }}"
+
+- name: Wait for redis master to be started
+ ansible.builtin.wait_for:
+ host: 127.0.0.1
+ port: "{{ master_port }}"
+ state: started
+ delay: 1
+ connect_timeout: 5
+ timeout: 30
+
+- name: Wait for redis replica to be started
+ ansible.builtin.wait_for:
+ host: 127.0.0.1
+ port: "{{ replica_port }}"
+ state: started
+ delay: 1
+ connect_timeout: 5
+ timeout: 30
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases b/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases
new file mode 100644
index 000000000..27ce6b087
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/file/tests/utils/constraints.txt
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml
new file mode 100644
index 000000000..a1ac1aead
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_constraints/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: record constraints.txt path on remote host
+ set_fact:
+ remote_constraints: "{{ remote_tmp_dir }}/constraints.txt"
+
+- name: copy constraints.txt to remote host
+ copy:
+ src: "{{ role_path }}/../../../utils/constraints.txt"
+ dest: "{{ remote_constraints }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 000000000..f1c55b04f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 000000000..cc74b70af
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 000000000..c9d871c69
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 000000000..6632cc848
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/handlers/main.yml
new file mode 100644
index 000000000..f1c55b04f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/handlers/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default-cleanup.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default-cleanup.yml
new file mode 100644
index 000000000..cc74b70af
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default-cleanup.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default.yml
new file mode 100644
index 000000000..0aef57f99
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/default.yml
@@ -0,0 +1,22 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create ~/tmp
+ file:
+ path: '~/tmp'
+ state: directory
+
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ path: ~/tmp
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/main.yml
new file mode 100644
index 000000000..6632cc848
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_remote_tmp_dir_outside_tmp/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/defaults/main.yml
new file mode 100644
index 000000000..c842901c0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+rundeck_war_url: https://packagecloud.io/pagerduty/rundeck/packages/java/org.rundeck/rundeck-3.4.4-20210920.war/artifacts/rundeck-3.4.4-20210920.war/download
+rundeck_cli_url: https://github.com/rundeck/rundeck-cli/releases/download/v1.3.10/rundeck-cli-1.3.10-all.jar
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/tasks/main.yml
new file mode 100644
index 000000000..ea8b35f65
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Skip unsupported platforms
+ meta: end_play
+ when: ansible_distribution not in ['CentOS', 'Fedora', 'Debian', 'Ubuntu']
+
+- name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+ when: ansible_os_family in ['Debian', 'RedHat']
+
+- name: Set Rundeck base dir
+ set_fact:
+ rdeck_base: /home/rundeck
+
+- name: Install OpenJDK
+ package:
+ name: "{{ openjdk_pkg }}"
+ state: present
+
+- name: Install Rundeck
+ shell: |
+ mkdir -p $RDECK_BASE;
+ curl -k -o $RDECK_BASE/rundeck.war -L '{{ rundeck_war_url }}';
+ curl -k -o $RDECK_BASE/rundeck-cli.jar -L '{{ rundeck_cli_url }}'
+ cd $RDECK_BASE;
+ java -Xmx4g -jar rundeck.war &
+ environment:
+ RDECK_BASE: "{{ rdeck_base }}"
+
+- name: Wait for Rundeck port 4440
+ wait_for:
+ host: localhost
+ port: 4440
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Alpine.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Alpine.yml
new file mode 100644
index 000000000..dbf16b747
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Alpine.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: openjdk11-jre-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Archlinux.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Archlinux.yml
new file mode 100644
index 000000000..fd4429c0b
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Archlinux.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: jre11-openjdk-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Debian.yml
new file mode 100644
index 000000000..1407cede6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/Debian.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: openjdk-11-jre-headless
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/RedHat.yml
new file mode 100644
index 000000000..314f0ef41
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_rundeck/vars/RedHat.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+openjdk_pkg: java-1.8.0-openjdk
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/aliases b/ansible_collections/community/general/tests/integration/targets/setup_snap/aliases
new file mode 100644
index 000000000..0a430dff1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/target/setup_epel
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/defaults/main.yml
new file mode 100644
index 000000000..7b4ab8dc7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+has_snap: false
+
+snap_packages:
+ - snapd
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/handlers/main.yml
new file mode 100644
index 000000000..08c1a23f1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/handlers/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Remove snapd
+ package:
+ name: "{{ snap_packages }}"
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Fedora.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Fedora.yml
new file mode 100644
index 000000000..d0681fa38
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Fedora.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install snapd (default)
+ package:
+ name: "{{ snap_packages }}"
+ state: present
+ notify: Remove snapd
+
+- name: Make sure that snapd is running
+ service:
+ name: snapd
+ state: started
+
+- name: Create link /snap
+ file:
+ src: /var/lib/snapd/snap
+ dest: /snap
+ state: link
+
+- name: Inform that snap is installed
+ set_fact:
+ has_snap: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml
new file mode 100644
index 000000000..5bbfaff12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.2.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Do nothing
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml
new file mode 100644
index 000000000..5bbfaff12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-8.3.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Do nothing
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.0.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.0.yml
new file mode 100644
index 000000000..5bbfaff12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.0.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Do nothing
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.1.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.1.yml
new file mode 100644
index 000000000..5bbfaff12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-RedHat-9.1.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Do nothing
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Ubuntu.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Ubuntu.yml
new file mode 100644
index 000000000..93958d38d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/D-Ubuntu.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install snapd (ubuntu)
+ package:
+ name: "{{ snap_packages }}"
+ state: present
+ notify: Remove snapd
+
+- name: Make sure that snapd is running
+ service:
+ name: snapd
+ state: started
+
+- name: Inform that snap is installed
+ set_fact:
+ has_snap: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/Debian.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/Debian.yml
new file mode 100644
index 000000000..d0681fa38
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/Debian.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install snapd (default)
+ package:
+ name: "{{ snap_packages }}"
+ state: present
+ notify: Remove snapd
+
+- name: Make sure that snapd is running
+ service:
+ name: snapd
+ state: started
+
+- name: Create link /snap
+ file:
+ src: /var/lib/snapd/snap
+ dest: /snap
+ state: link
+
+- name: Inform that snap is installed
+ set_fact:
+ has_snap: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/RedHat.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/RedHat.yml
new file mode 100644
index 000000000..d0681fa38
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/RedHat.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install snapd (default)
+ package:
+ name: "{{ snap_packages }}"
+ state: present
+ notify: Remove snapd
+
+- name: Make sure that snapd is running
+ service:
+ name: snapd
+ state: started
+
+- name: Create link /snap
+ file:
+ src: /var/lib/snapd/snap
+ dest: /snap
+ state: link
+
+- name: Inform that snap is installed
+ set_fact:
+ has_snap: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/default.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/default.yml
new file mode 100644
index 000000000..d0681fa38
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/default.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install snapd (default)
+ package:
+ name: "{{ snap_packages }}"
+ state: present
+ notify: Remove snapd
+
+- name: Make sure that snapd is running
+ service:
+ name: snapd
+ state: started
+
+- name: Create link /snap
+ file:
+ src: /var/lib/snapd/snap
+ dest: /snap
+ state: link
+
+- name: Inform that snap is installed
+ set_fact:
+ has_snap: true
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/main.yml
new file mode 100644
index 000000000..8f3744a70
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Print information on which we distinguish
+ debug:
+ msg: "Distribution '{{ ansible_facts.distribution }}', version '{{ ansible_facts.distribution_version }}', OS family '{{ ansible_facts.os_family }}'"
+
+- name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('9', '<')
+
+- name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "D-{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_version }}.yml"
+ - "D-{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "D-{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - "nothing.yml"
+ paths:
+ - "{{ role_path }}/tasks"
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/nothing.yml b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/nothing.yml
new file mode 100644
index 000000000..5bbfaff12
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_snap/tasks/nothing.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Do nothing
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem
new file mode 100644
index 000000000..130e0a2da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_certificate.pem
@@ -0,0 +1,23 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN CERTIFICATE-----
+MIIDAjCCAeqgAwIBAgIJANguFROhaWocMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV
+BAMMF1RMU0dlblNlbGZTaWduZWR0Um9vdENBMQ0wCwYDVQQHDAQkJCQkMB4XDTE5
+MDExMTA4MzMxNVoXDTI5MDEwODA4MzMxNVowMTEgMB4GA1UEAwwXVExTR2VuU2Vs
+ZlNpZ25lZHRSb290Q0ExDTALBgNVBAcMBCQkJCQwggEiMA0GCSqGSIb3DQEBAQUA
+A4IBDwAwggEKAoIBAQDqVt84czSxWnWW4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp
+7PrBbYF05FOgSdJLvL6grlRSQK2VPsXdLfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4l
+JVpSDsBV2orR4pOIf1s1+iSwvcRQkX46SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy
+0K2MbRs7oG2rdKks8zisfT0ymKnrFTdVeUjIrg0sStaMnf9VVkcEeYkfNY0vWqdn
+CV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET+q+gOvjsEqzn7DvlPkmk86hIIWXKi3aM
+A9swknL3rnagJL6GioWRpYUwKdRKmZxdyr4I2JTTAgMBAAGjHTAbMAwGA1UdEwQF
+MAMBAf8wCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBCwUAA4IBAQACTpPBf5WSwZ7r
+hrbPUN3qVh70HI0ZNK2jlK6b5fpSdw3JI/GQl0Kw3eGICLzwTByWvhD62U7IigL5
+0UWxWuEod310Y/qo/7OxRVPp5PH/0oNGoKHhEzas2ii0heQYGsHQUKGzYNNyVfjy
+nqBFz5AcKf067LcXivYqod6JDQHqFq/5/hWlIsHHrZIeijqqtthPq39GlGAYO+AB
+U66nzlH7YQgmfYfy6l7O4LsjXf/bz9rWvueO3NqCsmXV+FacDkOkwWA5Kf6rcgNL
+3G+2HAVTRIXDnO4ShnK6aYMW+UklpYRlVYBBUOdwoNIp5gI+BlSc1IuF6PdLVt3q
+VdjN1MjY
+-----END CERTIFICATE-----
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem
new file mode 100644
index 000000000..d9dc5ca0f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/ca_key.pem
@@ -0,0 +1,32 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqVt84czSxWnWW
+4Ng6hmKE3NarbLsycwtjrYBokV7Kk7Mp7PrBbYF05FOgSdJLvL6grlRSQK2VPsXd
+LfEv5uFXX6gyd2WQwKCiGGf4UY4ZIl4lJVpSDsBV2orR4pOIf1s1+iSwvcRQkX46
+SVjoKWbDUc4VLo1uy8UvavQI+DMioYyy0K2MbRs7oG2rdKks8zisfT0ymKnrFTdV
+eUjIrg0sStaMnf9VVkcEeYkfNY0vWqdnCV5wPfDBlnnxGMgqGdLSpzfyJ7qafFET
++q+gOvjsEqzn7DvlPkmk86hIIWXKi3aMA9swknL3rnagJL6GioWRpYUwKdRKmZxd
+yr4I2JTTAgMBAAECggEBALpg9ZDUMCiOpc+mbNO/ZkP90M7u38Q0M+7HY8XHOPkt
+l+XUkWueSMRLhSeLDzMlnwf1HyN8RZLaJkzP6XAL1VXEwuXAiIskaZ4Cg07Arp/W
+8cHhf4CcMuUVuCtOZcC+ajD4Do5zn9vkm9yH0ap0o0LdoWa/a8WfU+luy0EHBsSW
+6qqI+nqNFmISluVbfWt7t3zp273+8sir6YeHQu9G91/jzggv8rHmu4EHhi3cnU0K
+vY6OPCGBL7nrg9Rv1LSFpH95TvlIM6/Cm0AjgW7m6XwWUTaI9p+GvKzrYUSLd9L/
+QxlmAwiu/sBTXLrsWyr8XEtj+lVGxQ6eFbf6E+lUm8ECgYEA+8Wgmhf3VsC3gvJz
+w2jApEoOioD5iGOWGClGVURkfaBhFELr4XCTVMdBuCtxT7LYTMHTAlBqIbdWDjB4
+m/E417hLGogSDy7j0R0Mx75OOGEitxYUhe0VGDNoytgCNd2UnTMt42lp+9vAHZag
+INhVDOnxRNdtNTf1yYkWUMEbh1sCgYEA7kZNJXPVYJtR78+km/Gcv64Umci7KUV+
+hYc7chR5xv3cXvXg5eojKa4G7CyMQTX7VnRa6CiQKdN73AbIAhS4Oy5UlCOKtmb8
+xnBiOAYwSpOfIeZhjq0RvEeZX0t6u7XsErBZ03rEPKXF2nNDo1x8byrlKPtlUzwJ
+gb5yjmK/mekCgYEA1TWQAs5m4+2Bun+tbv7nnHkmhT4hktGays0xRYYMf6Jwc6MU
+dC5MZg/zZI5Nf8uZhq7hDWWh6vmCA7QifxSxKWVlHIu8l2UDAhRSvVg4j2Aa8Obe
+7GdQZNUsWhLBFHKXpuQvaRTc7q8yqxvicM4igDQg4EZ6sgW4vDm+TxapRF8CgYAz
+n6mhPqpxRtWGxo8cdkmGwfmWpAXg2DykQ3teqQ8FTQUM0erLBWJe6mR3kONGUaLF
+xWnYuMkbNsW0EwgMY17S+6O5gMXR5RhJChpNlxGpZrhoiNiEJ/0atMyG9/x8ZNrj
+5a9ggU248hWe0bBK2YPgNgP2UBlQ4kYRBSkerkhi2QKBgF+tlpyqcU+0iY82qRS2
+wMf7oI2pWR8nX9LPAY/nnvwWvqwcAFJPMlSMTu8Ext6h7l9yu+7JGL6JWwsO57Lb
+Gm/RxbuZ/kG/13+lSNmZiyHrhj6hZhkAMeFM34fpT4+DBXqSxZuvdrmwBc5B2jYg
+F9Bv8gcmZlGhqONL23evr9Gu
+-----END PRIVATE KEY-----
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem
new file mode 100644
index 000000000..9e956e6b0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_certificate.pem
@@ -0,0 +1,24 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBAjANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MThaFw0yOTAxMDgwODMzMThaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCoM+OQ3HCnCUAAz9KGGTwWB9hQbUfAZXm/stlb2/uOAp3rNwxAlCs/giymBHE6
+Iu6mrK006Vn+Z9ibqIrD2LuCOxcu25y8goqG62TgdP5sa9wR+597s0XssnwnaY8y
+bJ3p2zWAJvMgqQ0iNW/ZynpWbO85K5SryUykF7FAeNU9ogGGlIwCPjHhPvnwjkqd
+yDqaA1VaJKDUWIF9joI7sV4VLgGhQvzXRrHULsTeIF2m0+ebL0PTNEWHQ0dtgLYX
+kW7YO4Y6+n3cjHNH4qTof8V30EK8pk8kTdJ/x6ubwf+klFCAyroOxNOaxUy299Oo
+yD6qIPJPnGkPhrKtWnWIhNzJAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAK214+VVXnGnsUlvd9Q6A2Ea6UGrr6b7xkmlnIaNd+6xoUsDsHob
+srHYm7UC0uLi1KwSunI7AU5ZELVEUfAmJzh3O4d6C5sQyqKYPqd5harWOQ3BOD0I
+plHpp7qMtsPDuJBtmE/bmvF85eto0H7pPz+cTTXRlOaVVeiHjMggFcXdy1MzGo9C
+X/4wLQmsFeypTfe+ZGqvDh99VV+ffNMIsMh+opWEloaKiHmDKB6S9aC/MsVVM4RR
+nHm/UKTOukaGE9QIPkSSaygv3sBkVnQ2SHMvvtnjPHVHlizNoq6+YTnuOvKpo4o5
+V7Bij+W7rkBQLsEfwv2IC+gzmRz2yxr2tXk=
+-----END CERTIFICATE-----
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem
new file mode 100644
index 000000000..3848ad7cf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/client_key.pem
@@ -0,0 +1,31 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAqDPjkNxwpwlAAM/Shhk8FgfYUG1HwGV5v7LZW9v7jgKd6zcM
+QJQrP4IspgRxOiLupqytNOlZ/mfYm6iKw9i7gjsXLtucvIKKhutk4HT+bGvcEfuf
+e7NF7LJ8J2mPMmyd6ds1gCbzIKkNIjVv2cp6VmzvOSuUq8lMpBexQHjVPaIBhpSM
+Aj4x4T758I5Kncg6mgNVWiSg1FiBfY6CO7FeFS4BoUL810ax1C7E3iBdptPnmy9D
+0zRFh0NHbYC2F5Fu2DuGOvp93IxzR+Kk6H/Fd9BCvKZPJE3Sf8erm8H/pJRQgMq6
+DsTTmsVMtvfTqMg+qiDyT5xpD4ayrVp1iITcyQIDAQABAoIBAHPszzpXs4xr46Cr
+mvyxB6hnX76OkpUXWwGz0fptcsI9K3mhRuB7PhNXNE53YVIgITreZ8G/0jZ0e+VM
+E9dG2HS5JRE2ap/BmJfERJIuD+vJqrL6KMCondi0arz/E6I9GdjDK+xW69nmqRaa
+nawM0KQgD//m+WAsLJYrfg5hORZwI2SHaahawnCp0QaMmz3bdDWKRacM3q0UFX46
+Ze6CaZkUn+e1rHsTMcZBvxQWIVzysFNXh150idIB/PxL5YfCQqTSAj1c/nxaxz6a
+BvHFlpaYR3tvXXlexxfjglCwsGyckbvTyP1cBZqpv5oES+VKt2PrOve9Zyax+CYT
+0uQf6cECgYEA09+46QHXLfWh6jiJYu9skC9UrLU5czfCNB6PrUtFcjPFMYjZDcw9
+inJmcuTPXmfplxc47YDfpwotU+szTJDF+R8kknnfw9zVr/sIwZ5wsFfUQl/56Svn
+AIOVvHHvcvMX95XKGiuTsoCIJZNjJN3l3ztu/bRciuiVLyizglwIVrMCgYEAyzvK
+PFlWilbp3GPJlnW7x1bUxe1ziLE/Um+ujZx96+fy34hJLFdNdNzpNUjoOf3IDTGq
+6xl+vXcf12gimWMFcD3qNIGKHBDM9cIB2RDbb6YcqI8lOqopsmOyGmVLPkRpCoUK
+72kacQwvw6M9xjmpiG3dN8lE881jDmZi+hyCnJMCgYEAoIQnQAhP8Jbeo2dP1q+T
+bS0elnX532uH6xqYOW8EXwAPznZiEw0ANspzCWqGHHzXQMusKmtvhcq1CpXvWHt6
+MUHB4GMK/wVosxmZya5yq3bu7ZZu7JOBQCdwosMi6NB5AO7vnaIUFLFB9E3UWBLw
+243YicdCMU8B7yeD0ChPfPcCgYA1dYHKBBn+g8Q6Y8lIGaoOUmnfsok8gJtOfPAm
+ce6xmi7J29iboE9QmTeC+62Sa44u4ky6UNeE0QwAJnVLcb+hebfcneKNZWH0l1bT
+GVsPcFuDfzvkxZP4R782sERtmaMj0EFDHpuE9xatWIhMVyigKX4SSZAorXML+6S3
+c75rnwKBgBR+WU934wS+DbwTLlUB2mJWqJMEbOH/CUwPC7+VN4h1h3/i455iAeiU
+BizLS0SlD+MoSbC7URcZuquqGkmMlnJXoxF+NdxoWZK78tYNftryWoR87TloiVc/
+LhkxZxje4tgW/mTLqH3zKDoyyzDzG6Q6tAUN2ZTjJFEws7qF30Qe
+-----END RSA PRIVATE KEY-----
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem
new file mode 100644
index 000000000..b714ddbfb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_certificate.pem
@@ -0,0 +1,24 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN CERTIFICATE-----
+MIIDRjCCAi6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH
+ZW5TZWxmU2lnbmVkdFJvb3RDQTENMAsGA1UEBwwEJCQkJDAeFw0xOTAxMTEwODMz
+MTZaFw0yOTAxMDgwODMzMTZaMC0xGjAYBgNVBAMMEWFuc2libGUudGxzLnRlc3Rz
+MQ8wDQYDVQQKDAZzZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQDIwErHwAesRBfd9HiZkmB3VYh28c1QkE9I8nYyHJKX2ZBUhAzK+h80BkcTJJ94
+265qWyACH/wl54Xe/ofFUFrGa4vz0qz4UkL/KI0OGw28Y4qnKdorb9DumbiIPB+9
+I9TJT9vhtXTxBNlBTpv3ONHL8EzdV6ZmuvELU11H27oQ4xoUYhfXPXLMLK0sOnXZ
+lt0BOMMd5fVpJVa8fvXiw3626a0aXCr4e/MWUsBFRnzrXfgoW+AjYoTjKKS2hLYo
+8//MM05h7ROIXrNe990sf9C1G+fOThmOMszK9sjMhu2xHranRcz5aA0UTfyOjTs8
+9WexUYhC5VorYyRWtVZu2mDjAgMBAAGjbTBrMAkGA1UdEwQCMAAwCwYDVR0PBAQD
+AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMDwGA1UdEQQ1MDOCEWFuc2libGUudGxz
+LnRlc3RzghNNYWNCb29rLVByby00LmxvY2Fsgglsb2NhbGhvc3QwDQYJKoZIhvcN
+AQELBQADggEBAFoPBeB6tQhFS1198sia5NDHDDrghDOIlE0QbaoA+MSKzsaIy8Mu
+mNcM2ewYpT600XXTBxcqF6/vuKL9OEbvivtRYQu1YfkifN1jzREoWTieUkR5ytzt
+8ATfFkgTWJmiRiOIb/fNgewvhd+aKxep0OGwDiSKKl1ab6F17Cp4iK8sDBWmnUb6
+0Wf7pfver1Gl0Gp8vRXGUuc8a7udA9a8mV70HJlLkMdMvR9U8Bqih0+iRaqNWXRZ
+7Lc6v5LbzrW/ntilmgU6F0lwxPydg49MY4UrSXcjYLZs9T4iYHwTfLxFjFMIgGwn
+peYMKRj18akP9i2mjj5O2mRu4K+ecuUSOGI=
+-----END CERTIFICATE-----
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem
new file mode 100644
index 000000000..ec0134993
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/files/server_key.pem
@@ -0,0 +1,31 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAyMBKx8AHrEQX3fR4mZJgd1WIdvHNUJBPSPJ2MhySl9mQVIQM
+yvofNAZHEySfeNuualsgAh/8JeeF3v6HxVBaxmuL89Ks+FJC/yiNDhsNvGOKpyna
+K2/Q7pm4iDwfvSPUyU/b4bV08QTZQU6b9zjRy/BM3VemZrrxC1NdR9u6EOMaFGIX
+1z1yzCytLDp12ZbdATjDHeX1aSVWvH714sN+tumtGlwq+HvzFlLARUZ86134KFvg
+I2KE4yiktoS2KPP/zDNOYe0TiF6zXvfdLH/QtRvnzk4ZjjLMyvbIzIbtsR62p0XM
++WgNFE38jo07PPVnsVGIQuVaK2MkVrVWbtpg4wIDAQABAoIBAHw3wA3pnNXTLJGC
+fD1KfbZZjp9K76gyI10X6lsHow2i6dPiAah3LGecms4VkzfNdxcIW7303Kj3obZh
++ND277RnR6oPakgdXqdUCDP6OX2gemMFWqIWBkodhDmIOntmeHw4le4LwdiBD42B
+frBy0B5JCsbLPYPDmPNRGh8krvVS+Eir4hb4tK95TPMSL0vEjvHYFbCxv7//Ri1p
+3CROGp2CGX0WZ+Zs0crRNoIhRRM6kLAhROcqejtnEy6o7l5CWpCAL2vxlE9y8/kL
+iRawSZRFZnz/zGnqpx0vswgvijkuPfcNGMSzdwaiDgQz8D0GkJ7s9VgzZJazNy+1
+ET/4YIECgYEA612rwP9Ar9qdYbmmMPaJzITnaIrNGfO2JvaQqZt+DG8sVgdxL7V5
+D6emcw406drKRZvFAxnW6ZW2bVpmit02osl0re2A/nOTXLNuo338Qkap/hG8YZrF
+bw7w75pFa/rwlDtedjBnGHO2KbRXeU5Hn5wLoKjYgJoF6Ht+PPdL0IsCgYEA2lnC
+pQEhM51iRMDqNdmVJyvsTNU1ikoO8HaXHq+LwOQETaKMnDwp4Bn14E815CTulAc/
+tsDTKSDk6umZ+IufG1a2v7CqgKVwkB4HkgxKFQs2gQdTFfoMi5eeHR+njuNtklp1
+9fWfKHsP/ddrg+iTVTRZBLWexgKK89IMHYalpAkCgYEAy0Q3a9NF81mTJ+3kOE8C
+zO1OyLtuzGXsvxOb9c6C+owctyNwPeq05a89EgqH6hr5K0qOx9HOCCcyyJgVDQJl
+CAuByB/gkmAQOTQBbhMFA9vxPanljknTDsnRjKwoHkw2712ig+Hjd3ufK79C+FGB
+i7eBVzva1p2uUowshsxv3mcCgYAOFiRciMofjlO8o8V4W+Undcn02vxtQ4HbOYte
+S2z0sMEmUQpJOghpkMMwCWwsn8VUf3M40w/MY3bhQNjSFA/br6hyjW8yhXnRkl5i
+qbBN0z9c66AMlukgSFPHBTfGHB4Bhxx9Fa+C6Q2LDs6839BBevMTPrRTie509GQb
+s4gUIQKBgAvE8wLcmozno0GLDnBdKRZP/C7tmVnAINuraITPUBTASwI+Qo8ILigQ
+LRLaDqF84BEpjb8vdzkYFQqRQSZ8BI8NydfuKEFSBfL27sBvSGMYQJVm6bryUmPq
+T3ayaeZ4Wb3FFDijgtM9dRKyf7p4hQPOqM44QrntAtb43b2Q5L7M
+-----END RSA PRIVATE KEY-----
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml
new file mode 100644
index 000000000..ea4b9ecaa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_tls/tasks/main.yml
@@ -0,0 +1,30 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Generated certificate with: https://github.com/michaelklishin/tls-gen
+# ~/tls-gen/basic# make PASSWORD=bunnies CN=ansible.tls.tests
+# verify with: make info
+
+- name: ensure target directory is present
+ file:
+ path: /tls
+ state: directory
+
+- name: ensure TLS files are present
+ copy:
+ src: "{{ item }}"
+ dest: "/tls/{{ item }}"
+ loop:
+ - ca_certificate.pem
+ - ca_key.pem
+ - client_certificate.pem
+ - client_key.pem
+ - server_certificate.pem
+ - server_key.pem
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml
new file mode 100644
index 000000000..03b12c95c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/defaults/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+wf_tmp_dir: '{{ remote_tmp_dir }}/wildfly_tmp'
+wf_homedir: '{{ wf_tmp_dir }}/wildfly'
+wf_service_file_path: /etc/systemd/system/wildfly.service
+wf_version: 16.0.0.Final
+wf_user: wildfly
+jboss_root: '{{ wf_homedir }}'
+deploy_dir: '{{ jboss_root }}/standalone/deployments'
+default_deploy_root: /var/lib/jbossas/standalone/deployments
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf
new file mode 100644
index 000000000..684ce12a2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/files/wildfly.conf
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The configuration you want to run
+WILDFLY_CONFIG=standalone.xml
+
+# The mode you want to run
+WILDFLY_MODE=standalone
+
+# The address to bind to
+WILDFLY_BIND=0.0.0.0
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml
new file mode 100644
index 000000000..1383b1575
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/handlers/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Stop wildfly (jboss)
+ systemd:
+ name: wildfly
+ state: stopped
+ ignore_errors: true
+
+- name: Remove files
+ file:
+ path: '{{ item }}'
+ state: absent
+ loop:
+ - '{{ wf_service_file_path }}'
+ - '{{ default_deploy_root }}'
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml
new file mode 100644
index 000000000..2d29ebb67
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+- setup_pkg_mgr
+- setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml
new file mode 100644
index 000000000..26f5083b0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/tasks/main.yml
@@ -0,0 +1,107 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Skip unsupported platforms
+ meta: end_play
+ when: (ansible_distribution != 'CentOS') or
+ (ansible_distribution == 'CentOS' and ansible_distribution_major_version is not version('7', '>='))
+
+- name: Install java
+ package:
+ name: java-1.8.0-openjdk-devel
+
+- name: Create wf_tmp_dir
+ file:
+ path: '{{ wf_tmp_dir }}'
+ state: directory
+
+- name: Download wildfly
+ get_url:
+ url: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/setup_wildfly_server/wildfly-{{ wf_version }}.tar.gz'
+ dest: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}.tar.gz'
+
+- name: Unarchive tar
+ unarchive:
+ src: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}.tar.gz'
+ dest: '{{ wf_tmp_dir }}'
+ remote_src: true
+
+- name: Remove tar
+ file:
+ path: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}.tar.gz'
+ state: absent
+
+- name: Create symlink
+ file:
+ src: '{{ wf_tmp_dir }}/wildfly-{{ wf_version }}'
+ dest: '{{ wf_tmp_dir }}/wildfly'
+ state: link
+
+- name: Create group for wildfly
+ group:
+ name: '{{ wf_user }}'
+ system: true
+
+- name: Create user for wildfly
+ user:
+ name: '{{ wf_user }}'
+ system: true
+ group: '{{ wf_user }}'
+ home: '{{ wf_homedir }}'
+
+- name: Set permissions
+ file:
+ path: '{{ remote_tmp_dir }}'
+ state: directory
+ owner: '{{ wf_user }}'
+ group: '{{ wf_user }}'
+ recurse: true
+
+- name: Create config file
+ copy:
+ src: wildfly.conf
+ dest: '{{ wf_homedir }}/wildfly.conf'
+ mode: "0644"
+
+- name: Create launcher
+ template:
+ src: launch.sh.j2
+ dest: '{{ wf_homedir }}/bin/launch.sh'
+ mode: "0755"
+
+- name: Make scripts executable
+ shell: 'chmod +rx {{ wf_homedir }}/bin/*.sh'
+
+- name: Create service file
+ template:
+ src: wildfly.service.j2
+ dest: '{{ wf_service_file_path }}'
+ mode: "0644"
+
+- name: Create directories for testing the default deploy_path
+ become: true
+ file:
+ path: '{{ default_deploy_root }}'
+ state: directory
+ recurse: true
+ owner: '{{ wf_user }}'
+ group: '{{ wf_user }}'
+
+- name: Create simlink for testing the default deploy_path
+ file:
+ state: link
+ src: '{{ deploy_dir }}'
+ dest: '{{ default_deploy_root }}/deployments'
+
+- name: Reload systemd and start wildfly
+ systemd:
+ daemon_reload: true
+ name: wildfly
+ state: started
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2 b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2
new file mode 100644
index 000000000..7a80251a1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/launch.sh.j2
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+if [ "x$WILDFLY_HOME" = "x" ]; then
+ WILDFLY_HOME="{{ wf_homedir }}"
+fi
+
+if [[ "$1" == "domain" ]]; then
+ $WILDFLY_HOME/bin/domain.sh -c "$2" -b "$3"
+else
+ $WILDFLY_HOME/bin/standalone.sh -c "$2" -b "$3"
+fi
diff --git a/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2 b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2
new file mode 100644
index 000000000..ec1055132
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/setup_wildfly_server/templates/wildfly.service.j2
@@ -0,0 +1,20 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[Unit]
+Description=The WildFly Application Server
+After=syslog.target network.target
+Before=httpd.service
+
+[Service]
+Environment=LAUNCH_JBOSS_IN_BACKGROUND=1
+EnvironmentFile=-{{ wf_homedir }}/wildfly.conf
+User=wildfly
+LimitNOFILE=102642
+PIDFile=/var/run/wildfly/wildfly.pid
+ExecStart={{ wf_homedir }}/bin/launch.sh $WILDFLY_MODE $WILDFLY_CONFIG $WILDFLY_BIND
+StandardOutput=null
+
+[Install]
+WantedBy=multi-user.target
diff --git a/ansible_collections/community/general/tests/integration/targets/shutdown/aliases b/ansible_collections/community/general/tests/integration/targets/shutdown/aliases
new file mode 100644
index 000000000..afda346c4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/shutdown/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
diff --git a/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml
new file mode 100644
index 000000000..dadeb6269
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/shutdown/tasks/main.yml
@@ -0,0 +1,93 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install systemd-sysv on Ubuntu 18 and Debian
+ apt:
+ name: systemd-sysv
+ state: present
+ when: (ansible_distribution == 'Ubuntu' and ansible_distribution_major_version is version('18', '>=')) or (ansible_distribution == 'Debian')
+ register: systemd_sysv_install
+
+- name: Execute shutdown with custom message and delay
+ community.general.shutdown:
+ delay: 100
+ msg: "Custom Message"
+ register: shutdown_result
+ check_mode: true
+
+- name: Execute shutdown with minus delay
+ community.general.shutdown:
+ delay: -100
+ register: shutdown_result_minus
+ check_mode: true
+
+- name: Verify Custom Message except Alpine, AIX
+ assert:
+ that:
+ - '"Custom Message" in shutdown_result["shutdown_command"]'
+ - '"Shut down initiated by Ansible" in shutdown_result_minus["shutdown_command"]'
+ - '"Custom Message" not in shutdown_result_minus["shutdown_command"]'
+ when: ansible_os_family not in ['Alpine', 'AIX']
+
+- name: Verify shutdown command is present except Alpine, VMKernel
+ assert:
+ that: '"shutdown" in shutdown_result["shutdown_command"]'
+ when: ansible_os_family != 'Alpine' and ansible_system != 'VMKernel'
+
+- name: Verify shutdown command is present in Alpine
+ assert:
+ that: '"poweroff" in shutdown_result["shutdown_command"]'
+ when: ansible_os_family == 'Alpine'
+
+- name: Verify shutdown command is present in VMKernel
+ assert:
+ that: '"halt" in shutdown_result["shutdown_command"]'
+ when: ansible_system == 'VMKernel'
+
+- name: Verify shutdown delay is present in minutes in Linux
+ assert:
+ that:
+ - '"-h 1" in shutdown_result["shutdown_command"]'
+ - '"-h 0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system == 'Linux' and ansible_os_family != 'Alpine'
+
+- name: Verify shutdown delay is present in minutes in Void, MacOSX, OpenBSD
+ assert:
+ that:
+ - '"-h +1" in shutdown_result["shutdown_command"]'
+ - '"-h +0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system in ['Void', 'Darwin', 'OpenBSD']
+
+- name: Verify shutdown delay is present in seconds in FreeBSD
+ assert:
+ that:
+ - '"-h +100s" in shutdown_result["shutdown_command"]'
+ - '"-h +0s" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system == 'FreeBSD'
+
+- name: Verify shutdown delay is present in seconds in Solaris, SunOS
+ assert:
+ that:
+ - '"-g 100" in shutdown_result["shutdown_command"]'
+ - '"-g 0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system in ['Solaris', 'SunOS']
+
+- name: Verify shutdown delay is present in seconds, VMKernel
+ assert:
+ that:
+ - '"-d 100" in shutdown_result["shutdown_command"]'
+ - '"-d 0" in shutdown_result_minus["shutdown_command"]'
+ when: ansible_system == 'VMKernel'
+
+- name: Remove systemd-sysv in ubuntu 18 in case it has been installed in test
+ apt:
+ name: systemd-sysv
+ state: absent
+ when: systemd_sysv_install is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/snap/aliases b/ansible_collections/community/general/tests/integration/targets/snap/aliases
new file mode 100644
index 000000000..b209bbc01
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap/aliases
@@ -0,0 +1,13 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+skip/aix
+skip/alpine
+skip/fedora
+skip/freebsd
+skip/osx
+skip/macos
+skip/docker
diff --git a/ansible_collections/community/general/tests/integration/targets/snap/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/snap/meta/main.yml
new file mode 100644
index 000000000..f36427f71
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_snap
diff --git a/ansible_collections/community/general/tests/integration/targets/snap/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/snap/tasks/main.yml
new file mode 100644
index 000000000..0f24e69f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap/tasks/main.yml
@@ -0,0 +1,243 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Has-snap block
+ when: has_snap
+ block:
+ - name: Make sure package is not installed (hello-world)
+ community.general.snap:
+ name: hello-world
+ state: absent
+
+ - name: Install package (hello-world) (check mode)
+ community.general.snap:
+ name: hello-world
+ state: present
+ register: install_check
+ check_mode: true
+
+ - name: Install package (hello-world)
+ community.general.snap:
+ name: hello-world
+ state: present
+ register: install
+
+ - name: Install package again (hello-world) (check mode)
+ community.general.snap:
+ name: hello-world
+ state: present
+ register: install_again_check
+ check_mode: true
+
+ - name: Install package again (hello-world)
+ community.general.snap:
+ name: hello-world
+ state: present
+ register: install_again
+
+ - name: Assert package has been installed just once (hello-world)
+ assert:
+ that:
+ - install is changed
+ - install_check is changed
+ - install_again is not changed
+ - install_again_check is not changed
+
+ - name: Check package has been installed correctly (hello-world)
+ command: hello-world
+ environment:
+ PATH: /snap/bin/
+
+ - name: Remove package (hello-world) (check mode)
+ community.general.snap:
+ name: hello-world
+ state: absent
+ register: remove_check
+ check_mode: true
+
+ - name: Remove package (hello-world)
+ community.general.snap:
+ name: hello-world
+ state: absent
+ register: remove
+
+ - name: Remove package again (hello-world) (check mode)
+ community.general.snap:
+ name: hello-world
+ state: absent
+ register: remove_again_check
+ check_mode: true
+
+ - name: Remove package again (hello-world)
+ community.general.snap:
+ name: hello-world
+ state: absent
+ register: remove_again
+
+ - name: Assert package has been removed just once (hello-world)
+ assert:
+ that:
+ - remove is changed
+ - remove_check is changed
+ - remove_again is not changed
+ - remove_again_check is not changed
+
+ - name: Make sure package from classic snap is not installed (nvim)
+ community.general.snap:
+ name: nvim
+ state: absent
+
+ - name: Install package from classic snap (nvim)
+ community.general.snap:
+ name: nvim
+ state: present
+ classic: true
+ register: classic_install
+
+ # testing classic idempotency
+ - name: Install package from classic snap again (nvim)
+ community.general.snap:
+ name: nvim
+ state: present
+ classic: true
+ register: classic_install_again
+
+ - name: Assert package has been installed just once (nvim)
+ assert:
+ that:
+ - classic_install is changed
+ - classic_install_again is not changed
+
+ # this is just testing if a package which has been installed
+ # with true classic can be removed without setting classic to true
+ - name: Remove package from classic snap without setting classic to true (nvim)
+ community.general.snap:
+ name: nvim
+ state: absent
+ register: classic_remove_without_true_classic
+
+ - name: Remove package from classic snap with setting classic to true (nvim)
+ community.general.snap:
+ name: nvim
+ state: absent
+ classic: true
+ register: classic_remove_with_true_classic
+
+ - name: Assert package has been removed without setting classic to true (nvim)
+ assert:
+ that:
+ - classic_remove_without_true_classic is changed
+ - classic_remove_with_true_classic is not changed
+
+
+ - name: Make sure package is not installed (uhttpd)
+ community.general.snap:
+ name: uhttpd
+ state: absent
+
+ - name: Install package (uhttpd)
+ community.general.snap:
+ name: uhttpd
+ state: present
+ register: install
+
+ - name: Install package (uhttpd)
+ community.general.snap:
+ name: uhttpd
+ state: present
+ options:
+ - "listening-port=8080"
+ register: install_with_option
+
+ - name: Install package again with option (uhttpd)
+ community.general.snap:
+ name: uhttpd
+ state: present
+ options:
+ - "listening-port=8080"
+ register: install_with_option_again
+
+ - name: Install package again with different options (uhttpd)
+ community.general.snap:
+ name: uhttpd
+ state: present
+ options:
+ - "listening-port=8088"
+ - "document-root-dir=/tmp"
+ register: install_with_option_changed
+
+ - name: Remove package (uhttpd)
+ community.general.snap:
+ name: uhttpd
+ state: absent
+ register: remove
+
+ - name: Assert package has been installed with options just once and only changed options trigger a change (uhttpd)
+ assert:
+ that:
+ - install is changed
+ - install_with_option is changed
+ - "install_with_option.options_changed[0] == 'uhttpd:listening-port=8080'"
+ - install_with_option_again is not changed
+ - install_with_option_changed is changed
+ - "'uhttpd:listening-port=8088' in install_with_option_changed.options_changed"
+ - "'uhttpd:document-root-dir=/tmp' in install_with_option_changed.options_changed"
+ - "'uhttpd:listening-port=8080' not in install_with_option_changed.options_changed"
+ - remove is changed
+
+ - name: Install two packages at the same time
+ community.general.snap:
+ name:
+ - hello-world
+ - uhttpd
+ state: present
+ register: install_two
+
+ - name: Install two packages at the same time (again)
+ community.general.snap:
+ name:
+ - hello-world
+ - uhttpd
+ state: present
+ register: install_two_again
+
+ - name: Remove packages (hello-world & uhttpd)
+ community.general.snap:
+ name:
+ - hello-world
+ - uhttpd
+ state: absent
+ register: install_two_remove
+
+ - name: Remove packages again (hello-world & uhttpd)
+ community.general.snap:
+ name:
+ - hello-world
+ - uhttpd
+ state: absent
+ register: install_two_remove_again
+
+ - name: Assert installation of two packages
+ assert:
+ that:
+ - install_two is changed
+ - "'hello-world' in install_two.snaps_installed"
+ - "'uhttpd' in install_two.snaps_installed"
+ - install_two.snaps_removed is not defined
+ - install_two_again is not changed
+ - install_two_again.snaps_installed is not defined
+ - install_two_again.snaps_removed is not defined
+ - install_two_remove is changed
+ - install_two_again.snaps_installed is not defined
+ - "'hello-world' in install_two_remove.snaps_removed"
+ - "'uhttpd' in install_two_remove.snaps_removed"
+ - install_two_remove_again is not changed
+ - install_two_remove_again.snaps_installed is not defined
+ - install_two_remove_again.snaps_removed is not defined
diff --git a/ansible_collections/community/general/tests/integration/targets/snap_alias/aliases b/ansible_collections/community/general/tests/integration/targets/snap_alias/aliases
new file mode 100644
index 000000000..b209bbc01
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap_alias/aliases
@@ -0,0 +1,13 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+skip/aix
+skip/alpine
+skip/fedora
+skip/freebsd
+skip/osx
+skip/macos
+skip/docker
diff --git a/ansible_collections/community/general/tests/integration/targets/snap_alias/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/snap_alias/meta/main.yml
new file mode 100644
index 000000000..f36427f71
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap_alias/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_snap
diff --git a/ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/main.yml
new file mode 100644
index 000000000..1934eeb9f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test
+ include_tasks: test.yml
+ when: has_snap
diff --git a/ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/test.yml
new file mode 100644
index 000000000..50e6e33b4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/snap_alias/tasks/test.yml
@@ -0,0 +1,159 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Ensure snap 'hello-world' is not installed
+ community.general.snap:
+ name: hello-world
+ state: absent
+
+- name: Ensure snap 'hello-world' is installed fresh
+ community.general.snap:
+ name: hello-world
+
+################################################################################
+
+- name: Create snap alias (check mode)
+ community.general.snap_alias:
+ name: hello-world
+ alias: hw
+ check_mode: true
+ register: alias_single_0
+
+- name: Create snap alias
+ community.general.snap_alias:
+ name: hello-world
+ alias: hw
+ register: alias_single_1
+
+- name: Create snap alias (check mode idempotent)
+ community.general.snap_alias:
+ name: hello-world
+ alias: hw
+ check_mode: true
+ register: alias_single_2
+
+- name: Create snap alias (idempotent)
+ community.general.snap_alias:
+ name: hello-world
+ alias: hw
+ register: alias_single_3
+
+- name: assert single alias
+ assert:
+ that:
+ - alias_single_0 is changed
+ - alias_single_1 is changed
+ - alias_single_2 is not changed
+ - alias_single_3 is not changed
+ - 'alias_single_1.snap_aliases["hello-world"] == ["hw"]'
+ - 'alias_single_3.snap_aliases["hello-world"] == ["hw"]'
+
+- name: Create multiple aliases (check mode)
+ community.general.snap_alias:
+ name: hello-world
+ aliases: [hw, hw2, hw3]
+ check_mode: true
+ register: alias_multi_0
+
+- name: Create multiple aliases
+ community.general.snap_alias:
+ name: hello-world
+ aliases: [hw, hw2, hw3]
+ register: alias_multi_1
+
+- name: Create multiple aliases (check mode idempotent)
+ community.general.snap_alias:
+ name: hello-world
+ aliases: [hw, hw2, hw3]
+ check_mode: true
+ register: alias_multi_2
+
+- name: Create multiple aliases (idempotent)
+ community.general.snap_alias:
+ name: hello-world
+ aliases: [hw, hw2, hw3]
+ register: alias_multi_3
+
+- name: assert multi alias
+ assert:
+ that:
+ - alias_multi_0 is changed
+ - alias_multi_1 is changed
+ - alias_multi_2 is not changed
+ - alias_multi_3 is not changed
+ - 'alias_multi_1.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]'
+ - 'alias_multi_3.snap_aliases["hello-world"] == ["hw", "hw2", "hw3"]'
+
+- name: Remove one specific alias (check mode)
+ community.general.snap_alias:
+ alias: hw
+ state: absent
+ check_mode: true
+ register: alias_remove_0
+
+- name: Remove one specific alias
+ community.general.snap_alias:
+ alias: hw
+ state: absent
+ register: alias_remove_1
+
+- name: Remove one specific alias (check mode idempotent)
+ community.general.snap_alias:
+ alias: hw
+ state: absent
+ check_mode: true
+ register: alias_remove_2
+
+- name: Remove one specific alias (idempotent)
+ community.general.snap_alias:
+ alias: hw
+ state: absent
+ register: alias_remove_3
+
+- name: assert remove alias
+ assert:
+ that:
+ - alias_remove_0 is changed
+ - alias_remove_1 is changed
+ - alias_remove_2 is not changed
+ - alias_remove_3 is not changed
+ - 'alias_remove_1.snap_aliases["hello-world"] == ["hw2", "hw3"]'
+ - 'alias_remove_3.snap_aliases["hello-world"] == ["hw2", "hw3"]'
+
+- name: Remove all aliases for snap (check mode)
+ community.general.snap_alias:
+ name: hello-world
+ state: absent
+ check_mode: true
+ register: alias_remove_all_0
+
+- name: Remove all aliases for snap
+ community.general.snap_alias:
+ name: hello-world
+ state: absent
+ register: alias_remove_all_1
+
+- name: Remove all aliases for snap (check mode idempotent)
+ community.general.snap_alias:
+ name: hello-world
+ state: absent
+ check_mode: true
+ register: alias_remove_all_2
+
+- name: Remove all aliases for snap (idempotent)
+ community.general.snap_alias:
+ name: hello-world
+ state: absent
+ register: alias_remove_all_3
+
+- name: assert remove_all alias
+ assert:
+ that:
+ - alias_remove_all_0 is changed
+ - alias_remove_all_1 is changed
+ - alias_remove_all_2 is not changed
+ - alias_remove_all_3 is not changed
+ - 'alias_remove_all_1.snap_aliases["hello-world"] == []'
+ - 'alias_remove_all_3.snap_aliases["hello-world"] == []'
diff --git a/ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/aliases b/ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/aliases
new file mode 100644
index 000000000..bd1f02444
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unsupported
diff --git a/ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/tasks/main.yml
new file mode 100644
index 000000000..42e53d7d7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/spectrum_model_attrs/tasks/main.yml
@@ -0,0 +1,78 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Verify required variables: model_name, model_type, oneclick_username, oneclick_password, oneclick_url"
+ fail:
+ msg: "One or more of the following variables are not set: model_name, model_type, oneclick_username, oneclick_password, oneclick_url"
+ when: >
+ model_name is not defined
+ or model_type is not defined
+ or oneclick_username is not defined
+ or oneclick_password is not defined
+ or oneclick_url is not defined
+
+- block:
+ - name: "001: Enforce maintenance mode for {{ model_name }} with a note about why [check_mode test]"
+ spectrum_model_attrs: &mm_enabled_args
+ url: "{{ oneclick_url }}"
+ username: "{{ oneclick_username }}"
+ password: "{{ oneclick_password }}"
+ name: "{{ model_name }}"
+ type: "{{ model_type }}"
+ validate_certs: false
+ attributes:
+ - name: "isManaged"
+ value: "false"
+ - name: "Notes"
+ value: "{{ note_mm_enabled }}"
+ check_mode: true
+ register: mm_enabled_check_mode
+
+ - name: "001: assert that changes were made"
+ assert:
+ that:
+ - mm_enabled_check_mode is changed
+
+ - name: "001: assert that changed_attrs is properly set"
+ assert:
+ that:
+ - mm_enabled_check_mode.changed_attrs.Notes == note_mm_enabled
+ - mm_enabled_check_mode.changed_attrs.isManaged == "false"
+
+ - name: "002: Enforce maintenance mode for {{ model_name }} with a note about why"
+ spectrum_model_attrs:
+ <<: *mm_enabled_args
+ register: mm_enabled
+ check_mode: false
+
+ - name: "002: assert that changes were made"
+ assert:
+ that:
+ - mm_enabled is changed
+
+ - name: "002: assert that changed_attrs is properly set"
+ assert:
+ that:
+ - mm_enabled.changed_attrs.Notes == note_mm_enabled
+ - mm_enabled.changed_attrs.isManaged == "false"
+
+ - name: "003: Enforce maintenance mode for {{ model_name }} with a note about why [idempontence test]"
+ spectrum_model_attrs:
+ <<: *mm_enabled_args
+ register: mm_enabled_idp
+ check_mode: false
+
+ - name: "003: assert that changes were not made"
+ assert:
+ that:
+ - mm_enabled_idp is not changed
+
+ - name: "003: assert that changed_attrs is not set"
+ assert:
+ that:
+ - mm_enabled_idp.changed_attrs == {}
+
+ vars:
+ note_mm_enabled: "MM set via CO #1234 by OJ Simpson"
diff --git a/ansible_collections/community/general/tests/integration/targets/ssh_config/aliases b/ansible_collections/community/general/tests/integration/targets/ssh_config/aliases
new file mode 100644
index 000000000..6011128da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ssh_config/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/python2.6 # stromssh only supports python3
+skip/python2.7 # stromssh only supports python3
+skip/freebsd # stromssh installation fails on freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/ssh_config/files/fake_id_rsa b/ansible_collections/community/general/tests/integration/targets/ssh_config/files/fake_id_rsa
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ssh_config/files/fake_id_rsa
diff --git a/ansible_collections/community/general/tests/integration/targets/ssh_config/files/ssh_config_test b/ansible_collections/community/general/tests/integration/targets/ssh_config/files/ssh_config_test
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ssh_config/files/ssh_config_test
diff --git a/ansible_collections/community/general/tests/integration/targets/ssh_config/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/ssh_config/meta/main.yml
new file mode 100644
index 000000000..4cdaaefba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ssh_config/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/main.yml
new file mode 100644
index 000000000..c8b96d0c0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/main.yml
@@ -0,0 +1,245 @@
+# Test code for ssh_config module
+# Copyright (c) 2021, Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install required libs
+ pip:
+ name:
+ - 'paramiko<3.0.0'
+ state: present
+ extra_args: "-c {{ remote_constraints }}"
+
+- set_fact:
+ output_test_dir: '{{ remote_tmp_dir }}/test_ssh_config'
+
+- set_fact:
+ ssh_config_test: '{{ output_test_dir }}/ssh_config_test'
+ ssh_private_key: '{{ output_test_dir }}/fake_id_rsa'
+
+- name: create a temporary directory
+ file:
+ path: "{{ output_test_dir }}"
+ state: directory
+
+- name: Copy sample config file
+ copy:
+ src: 'files/ssh_config_test'
+ dest: '{{ ssh_config_test }}'
+
+- name: Copy sample private key file
+ copy:
+ src: 'files/fake_id_rsa'
+ dest: '{{ ssh_private_key }}'
+
+- name: Fail for required argument
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ ignore_errors: true
+ register: host_required
+
+- name: Check if ssh_config fails for required parameter host
+ assert:
+ that:
+ - not host_required.changed
+
+- name: Add a host in check mode
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2223'
+ state: present
+ register: host_add
+ check_mode: true
+
+- name: Check if changes are made in check mode
+ assert:
+ that:
+ - host_add.changed
+ - "'example.com' in host_add.hosts_added"
+ - host_add.hosts_changed is defined
+ - host_add.hosts_removed is defined
+
+- name: Add a host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2223'
+ state: present
+ register: host_add
+
+- name: Check if changes are made
+ assert:
+ that:
+ - host_add.changed
+ - "'example.com' in host_add.hosts_added"
+ - host_add.hosts_changed is defined
+ - host_add.hosts_removed is defined
+
+- name: Add same host again for idempotency
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2223'
+ state: present
+ register: host_add_again
+
+- name: Check for idempotency
+ assert:
+ that:
+ - not host_add_again.changed
+ - host_add.hosts_changed is defined
+ - host_add.hosts_removed is defined
+ - host_add.hosts_added is defined
+
+- name: Update host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2224'
+ state: present
+ register: host_update
+
+- name: Check for update operation
+ assert:
+ that:
+ - host_update.changed
+ - host_update.hosts_changed is defined
+ - "'example.com' in host_update.hosts_changed"
+ - host_update.hosts_removed is defined
+ - host_update.hosts_added is defined
+ - host_update.hosts_change_diff is defined
+
+- name: Update host again
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2224'
+ state: present
+ register: host_update
+
+- name: Check update operation for idempotency
+ assert:
+ that:
+ - not host_update.changed
+ - host_update.hosts_changed is defined
+ - host_update.hosts_removed is defined
+ - host_update.hosts_added is defined
+ - host_update.hosts_change_diff is defined
+
+- name: Delete a host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ state: absent
+ register: host_delete
+
+- name: Check if changes are made
+ assert:
+ that:
+ - host_delete.changed
+ - "'example.com' in host_delete.hosts_removed"
+ - host_delete.hosts_changed is defined
+ - host_delete.hosts_added is defined
+
+- name: Delete same host again for idempotency
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ state: absent
+ register: host_delete_again
+
+- name: Check for idempotency
+ assert:
+ that:
+ - not host_delete_again.changed
+ - host_delete_again.hosts_changed is defined
+ - host_delete_again.hosts_removed is defined
+ - host_delete_again.hosts_added is defined
+
+- name: Check if user and ssh_config_file are mutually exclusive
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ user: root
+ host: "example.com"
+ hostname: github.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2223'
+ state: present
+ register: mut_ex
+ ignore_errors: true
+
+- name: Check mutual exclusive test - user and ssh_config_file
+ assert:
+ that:
+ - not mut_ex.changed
+ - "'parameters are mutually exclusive' in mut_ex.msg"
+
+- name: Check if proxycommand and proxyjump are mutually exclusive
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "example.com"
+ hostname: github.com
+ proxycommand: "ssh jumphost.example.com -W %h:%p"
+ proxyjump: "jumphost.example.com"
+ identity_file: '{{ ssh_private_key }}'
+ port: '2224'
+ state: present
+ register: proxy_mut_ex
+ ignore_errors: true
+
+- name: Check mutual exclusive test - proxycommand and proxyjump
+ assert:
+ that:
+ - not proxy_mut_ex.changed
+ - "'parameters are mutually exclusive' in proxy_mut_ex.msg"
+
+- name: Add a full name host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "full_name"
+ hostname: full_name.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2223'
+ state: present
+ register: full_name
+
+- name: Check if changes are made
+ assert:
+ that:
+ - full_name is changed
+ - full_name.hosts_added == ["full_name"]
+ - full_name.hosts_changed == []
+ - full_name.hosts_removed == []
+
+- name: Add a host with name which is contained in full name host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "full"
+ hostname: full.com
+ identity_file: '{{ ssh_private_key }}'
+ port: '2223'
+ state: present
+ register: short_name
+
+- name: Check that short name host is added and full name host is not updated
+ assert:
+ that:
+ - short_name is changed
+ - short_name.hosts_added == ["full"]
+ - short_name.hosts_changed == []
+ - short_name.hosts_removed == []
+
+- name: Include integration tests for additional options (e.g. proxycommand, proxyjump)
+ include_tasks: 'options.yml'
diff --git a/ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/options.yml b/ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/options.yml
new file mode 100644
index 000000000..406de6831
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ssh_config/tasks/options.yml
@@ -0,0 +1,422 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Reset ssh_config before testing options
+- name: Copy sample config file
+ copy:
+ src: 'files/ssh_config_test'
+ dest: '{{ ssh_config_test }}'
+
+- name: Options - Add in check mode
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxycommand: "ssh jumphost.example.com -W %h:%p"
+ forward_agent: true
+ host_key_algorithms: "+ssh-rsa"
+ state: present
+ register: options_add
+ check_mode: true
+
+- name: Options - Check if changes are made in check mode
+ assert:
+ that:
+ - options_add.changed
+ - "'options.example.com' in options_add.hosts_added"
+ - options_add.hosts_changed is defined
+ - options_add.hosts_removed is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Options - Verify that nothign was added to {{ ssh_config_test }} during change mode"
+ assert:
+ that:
+ - "'options.example.com' not in slurp_ssh_config['content'] | b64decode"
+
+- name: Options - Add a host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxycommand: "ssh jumphost.example.com -W %h:%p"
+ forward_agent: true
+ host_key_algorithms: "+ssh-rsa"
+ state: present
+ register: options_add
+
+- name: Options - Check if changes are made
+ assert:
+ that:
+ - options_add.changed
+ - "'options.example.com' in options_add.hosts_added"
+ - options_add.hosts_changed is defined
+ - options_add.hosts_removed is defined
+
+- name: Options - Add same host again for idempotency
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxycommand: "ssh jumphost.example.com -W %h:%p"
+ forward_agent: true
+ host_key_algorithms: "+ssh-rsa"
+ state: present
+ register: options_add_again
+
+- name: Options - Check for idempotency
+ assert:
+ that:
+ - not options_add_again.changed
+ - options_add.hosts_changed is defined
+ - options_add.hosts_removed is defined
+ - options_add.hosts_added is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} contains added options"
+ assert:
+ that:
+ - "'proxycommand ssh jumphost.example.com -W %h:%p' in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent yes' in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-rsa' in slurp_ssh_config['content'] | b64decode"
+
+- name: Options - Update host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxycommand: "ssh new-jumphost.example.com -W %h:%p"
+ forward_agent: false
+ host_key_algorithms: "+ssh-ed25519"
+ state: present
+ register: options_update
+
+- name: Options - Check for update operation
+ assert:
+ that:
+ - options_update.changed
+ - options_update.hosts_changed is defined
+ - "'options.example.com' in options_update.hosts_changed"
+ - options_update.hosts_removed is defined
+ - options_update.hosts_added is defined
+ - options_update.hosts_change_diff is defined
+
+- name: Options - Update host again
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxycommand: "ssh new-jumphost.example.com -W %h:%p"
+ forward_agent: false
+ host_key_algorithms: "+ssh-ed25519"
+ state: present
+ register: options_update
+
+- name: Options - Check update operation for idempotency
+ assert:
+ that:
+ - not options_update.changed
+ - options_update.hosts_changed is defined
+ - options_update.hosts_removed is defined
+ - options_update.hosts_added is defined
+ - options_update.hosts_change_diff is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} contains changed options"
+ assert:
+ that:
+ - "'proxycommand ssh new-jumphost.example.com -W %h:%p' in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent no' in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-ed25519' in slurp_ssh_config['content'] | b64decode"
+
+- name: Options - Ensure no update in case option exist in ssh_config file but wasn't defined in playbook
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ state: present
+ register: options_no_update
+
+- name: Options - Check that no update took place
+ assert:
+ that:
+ - not options_update.changed
+ - options_update.hosts_changed is defined
+ - options_update.hosts_removed is defined
+ - options_update.hosts_added is defined
+ - options_update.hosts_change_diff is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} wasn't changed"
+ assert:
+ that:
+ - "'proxycommand ssh new-jumphost.example.com -W %h:%p' in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent no' in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-ed25519' in slurp_ssh_config['content'] | b64decode"
+
+- name: Debug
+ debug:
+ msg: "{{ slurp_ssh_config['content'] | b64decode }}"
+
+- name: Options - Delete a host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ state: absent
+ register: options_delete
+
+- name: Options - Check if host was removed
+ assert:
+ that:
+ - options_delete.changed
+ - "'options.example.com' in options_delete.hosts_removed"
+ - options_delete.hosts_changed is defined
+ - options_delete.hosts_added is defined
+
+- name: Options - Delete same host again for idempotency
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ state: absent
+ register: options_delete_again
+
+- name: Options - Check delete operation for idempotency
+ assert:
+ that:
+ - not options_delete_again.changed
+ - options_delete_again.hosts_changed is defined
+ - options_delete_again.hosts_removed is defined
+ - options_delete_again.hosts_added is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} does not contains deleted options"
+ assert:
+ that:
+ - "'proxycommand ssh new-jumphost.example.com -W %h:%p' not in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent no' not in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-ed25519' not in slurp_ssh_config['content'] | b64decode"
+
+# Proxycommand and ProxyJump are mutually exclusive.
+# Reset ssh_config before testing options with proxyjump
+
+- name: Copy sample config file
+ copy:
+ src: 'files/ssh_config_test'
+ dest: '{{ ssh_config_test }}'
+
+- name: Options - Add in check mode
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxyjump: "jumphost.example.com"
+ forward_agent: true
+ host_key_algorithms: "+ssh-rsa"
+ state: present
+ register: options_add
+ check_mode: true
+
+- name: Options - Check if changes are made in check mode
+ assert:
+ that:
+ - options_add.changed
+ - "'options.example.com' in options_add.hosts_added"
+ - options_add.hosts_changed is defined
+ - options_add.hosts_removed is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Options - Verify that nothign was added to {{ ssh_config_test }} during change mode"
+ assert:
+ that:
+ - "'options.example.com' not in slurp_ssh_config['content'] | b64decode"
+
+- name: Options - Add a host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxyjump: "jumphost.example.com"
+ forward_agent: true
+ host_key_algorithms: "+ssh-rsa"
+ state: present
+ register: options_add
+
+- name: Options - Check if changes are made
+ assert:
+ that:
+ - options_add.changed
+ - "'options.example.com' in options_add.hosts_added"
+ - options_add.hosts_changed is defined
+ - options_add.hosts_removed is defined
+
+- name: Options - Add same host again for idempotency
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxyjump: "jumphost.example.com"
+ forward_agent: true
+ host_key_algorithms: "+ssh-rsa"
+ state: present
+ register: options_add_again
+
+- name: Options - Check for idempotency
+ assert:
+ that:
+ - not options_add_again.changed
+ - options_add.hosts_changed is defined
+ - options_add.hosts_removed is defined
+ - options_add.hosts_added is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} contains added options"
+ assert:
+ that:
+ - "'proxyjump jumphost.example.com' in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent yes' in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-rsa' in slurp_ssh_config['content'] | b64decode"
+
+- name: Options - Update host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxyjump: "new-jumphost.example.com"
+ forward_agent: false
+ host_key_algorithms: "+ssh-ed25519"
+ state: present
+ register: options_update
+
+- name: Options - Check for update operation
+ assert:
+ that:
+ - options_update.changed
+ - options_update.hosts_changed is defined
+ - "'options.example.com' in options_update.hosts_changed"
+ - options_update.hosts_removed is defined
+ - options_update.hosts_added is defined
+ - options_update.hosts_change_diff is defined
+
+- name: Options - Update host again
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ proxyjump: "new-jumphost.example.com"
+ forward_agent: false
+ host_key_algorithms: "+ssh-ed25519"
+ state: present
+ register: options_update
+
+- name: Options - Check update operation for idempotency
+ assert:
+ that:
+ - not options_update.changed
+ - options_update.hosts_changed is defined
+ - options_update.hosts_removed is defined
+ - options_update.hosts_added is defined
+ - options_update.hosts_change_diff is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} contains changed options"
+ assert:
+ that:
+ - "'proxyjump new-jumphost.example.com' in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent no' in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-ed25519' in slurp_ssh_config['content'] | b64decode"
+
+- name: Options - Ensure no update in case option exist in ssh_config file but wasn't defined in playbook
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ state: present
+ register: options_no_update
+
+- name: Options - Check that no update took place
+ assert:
+ that:
+ - not options_update.changed
+ - options_update.hosts_changed is defined
+ - options_update.hosts_removed is defined
+ - options_update.hosts_added is defined
+ - options_update.hosts_change_diff is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} wasn't changed"
+ assert:
+ that:
+ - "'proxyjump new-jumphost.example.com' in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent no' in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-ed25519' in slurp_ssh_config['content'] | b64decode"
+
+- name: Debug
+ debug:
+ msg: "{{ slurp_ssh_config['content'] | b64decode }}"
+
+- name: Options - Delete a host
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ state: absent
+ register: options_delete
+
+- name: Options - Check if host was removed
+ assert:
+ that:
+ - options_delete.changed
+ - "'options.example.com' in options_delete.hosts_removed"
+ - options_delete.hosts_changed is defined
+ - options_delete.hosts_added is defined
+
+- name: Options - Delete same host again for idempotency
+ community.general.ssh_config:
+ ssh_config_file: "{{ ssh_config_test }}"
+ host: "options.example.com"
+ state: absent
+ register: options_delete_again
+
+- name: Options - Check delete operation for idempotency
+ assert:
+ that:
+ - not options_delete_again.changed
+ - options_delete_again.hosts_changed is defined
+ - options_delete_again.hosts_removed is defined
+ - options_delete_again.hosts_added is defined
+
+- name: "Options - Get content of {{ ssh_config_test }}"
+ slurp:
+ src: "{{ ssh_config_test }}"
+ register: slurp_ssh_config
+
+- name: "Verify that {{ ssh_config_test }} does not contains deleted options"
+ assert:
+ that:
+ - "'proxyjump new-jumphost.example.com' not in slurp_ssh_config['content'] | b64decode"
+ - "'forwardagent no' not in slurp_ssh_config['content'] | b64decode"
+ - "'hostkeyalgorithms +ssh-ed25519' not in slurp_ssh_config['content'] | b64decode"
diff --git a/ansible_collections/community/general/tests/integration/targets/sudoers/aliases b/ansible_collections/community/general/tests/integration/targets/sudoers/aliases
new file mode 100644
index 000000000..12d1d6617
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sudoers/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
diff --git a/ansible_collections/community/general/tests/integration/targets/sudoers/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/sudoers/tasks/main.yml
new file mode 100644
index 000000000..dd62025d5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sudoers/tasks/main.yml
@@ -0,0 +1,279 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Initialise environment
+
+- name: Register variables
+ set_fact:
+ sudoers_path: /etc/sudoers.d
+ alt_sudoers_path: /etc/sudoers_alt
+
+- name: Install sudo package
+ ansible.builtin.package:
+ name: sudo
+ when: ansible_os_family != 'Darwin'
+
+- name: Ensure sudoers directory exists
+ ansible.builtin.file:
+ path: "{{ sudoers_path }}"
+ state: directory
+ recurse: true
+
+- name: Ensure alternative sudoers directory exists
+ ansible.builtin.file:
+ path: "{{ alt_sudoers_path }}"
+ state: directory
+ recurse: true
+
+
+# Run module and collect data
+
+- name: Create first rule
+ community.general.sudoers:
+ name: my-sudo-rule-1
+ state: present
+ user: alice
+ commands: /usr/local/bin/command
+ register: rule_1
+
+- name: Stat my-sudo-rule-1 file
+ ansible.builtin.stat:
+ path: "{{ sudoers_path }}/my-sudo-rule-1"
+ register: rule_1_stat
+
+- name: Grab contents of my-sudo-rule-1
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-1"
+ register: rule_1_contents
+
+- name: Create first rule again
+ community.general.sudoers:
+ name: my-sudo-rule-1
+ state: present
+ user: alice
+ commands: /usr/local/bin/command
+ register: rule_1_again
+
+
+- name: Create second rule with two commands
+ community.general.sudoers:
+ name: my-sudo-rule-2
+ state: present
+ user: alice
+ commands:
+ - /usr/local/bin/command1
+ - /usr/local/bin/command2
+ register: rule_2
+
+- name: Grab contents of my-sudo-rule-2
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-2"
+ register: rule_2_contents
+
+
+- name: Create rule requiring a password
+ community.general.sudoers:
+ name: my-sudo-rule-3
+ state: present
+ user: alice
+ commands: /usr/local/bin/command
+ nopassword: false
+ register: rule_3
+
+- name: Grab contents of my-sudo-rule-3
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-3"
+ register: rule_3_contents
+
+
+- name: Create rule using a group
+ community.general.sudoers:
+ name: my-sudo-rule-4
+ state: present
+ group: students
+ commands: /usr/local/bin/command
+ register: rule_4
+
+- name: Grab contents of my-sudo-rule-4
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-4"
+ register: rule_4_contents
+
+
+- name: Create rule in a alternative directory
+ community.general.sudoers:
+ name: my-sudo-rule-5
+ state: present
+ user: alice
+ commands: /usr/local/bin/command
+ sudoers_path: "{{ alt_sudoers_path }}"
+ register: rule_5
+
+- name: Grab contents of my-sudo-rule-5 (in alternative directory)
+ ansible.builtin.slurp:
+ src: "{{ alt_sudoers_path }}/my-sudo-rule-5"
+ register: rule_5_contents
+
+- name: Create rule to runas another user
+ community.general.sudoers:
+ name: my-sudo-rule-6
+ state: present
+ user: alice
+ commands: /usr/local/bin/command
+ runas: bob
+ sudoers_path: "{{ sudoers_path }}"
+ register: rule_6
+
+- name: Grab contents of my-sudo-rule-6 (in alternative directory)
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-6"
+ register: rule_6_contents
+
+- name: Create rule to allow user to sudo just on host-1
+ community.general.sudoers:
+ name: my-sudo-rule-7
+ state: present
+ user: alice
+ host: host-1
+ commands: /usr/local/bin/command
+ register: rule_7
+
+- name: Grab contents of my-sudo-rule-7
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-7"
+ register: rule_7_contents
+
+- name: Create rule with setenv parameters
+ community.general.sudoers:
+ name: my-sudo-rule-8
+ state: present
+ user: alice
+ commands: /usr/local/bin/command
+ setenv: true
+ register: rule_8
+
+- name: Grab contents of my-sudo-rule-8
+ ansible.builtin.slurp:
+ src: "{{ sudoers_path }}/my-sudo-rule-8"
+ register: rule_8_contents
+
+- name: Revoke rule 1
+ community.general.sudoers:
+ name: my-sudo-rule-1
+ state: absent
+ register: revoke_rule_1
+
+- name: Stat rule 1
+ ansible.builtin.stat:
+ path: "{{ sudoers_path }}/my-sudo-rule-1"
+ register: revoke_rule_1_stat
+
+
+# Validation testing
+
+- name: Attempt command without full path to executable
+ community.general.sudoers:
+ name: edge-case-1
+ state: present
+ user: alice
+ commands: systemctl
+ ignore_errors: true
+ register: edge_case_1
+
+
+- name: Attempt command without full path to executable, but disabling validation
+ community.general.sudoers:
+ name: edge-case-2
+ state: present
+ user: alice
+ commands: systemctl
+ validation: absent
+ sudoers_path: "{{ alt_sudoers_path }}"
+ register: edge_case_2
+
+- name: find visudo
+ command:
+ cmd: which visudo
+ register: which_visudo
+ when: ansible_os_family != 'Darwin'
+
+- name: Prevent visudo being executed
+ file:
+ path: "{{ which_visudo.stdout }}"
+ mode: '-x'
+ when: ansible_os_family != 'Darwin'
+
+- name: Attempt command without full path to executable, but enforcing validation with no visudo present
+ community.general.sudoers:
+ name: edge-case-3
+ state: present
+ user: alice
+ commands: systemctl
+ validation: required
+ ignore_errors: true
+ when: ansible_os_family != 'Darwin'
+ register: edge_case_3
+
+- name: Revoke non-existing rule
+ community.general.sudoers:
+ name: non-existing-rule
+ state: absent
+ register: revoke_non_existing_rule
+
+- name: Stat non-existing rule
+ ansible.builtin.stat:
+ path: "{{ sudoers_path }}/non-existing-rule"
+ register: revoke_non_existing_rule_stat
+
+
+# Run assertions
+
+- name: Check rule 1 file stat
+ ansible.builtin.assert:
+ that:
+ - rule_1_stat.stat.exists
+ - rule_1_stat.stat.isreg
+ - rule_1_stat.stat.mode == '0440'
+
+- name: Check changed status
+ ansible.builtin.assert:
+ that:
+ - rule_1 is changed
+ - rule_1_again is not changed
+ - rule_5 is changed
+ - revoke_rule_1 is changed
+ - revoke_non_existing_rule is not changed
+
+- name: Check contents
+ ansible.builtin.assert:
+ that:
+ - "rule_1_contents['content'] | b64decode == 'alice ALL=NOPASSWD: /usr/local/bin/command\n'"
+ - "rule_2_contents['content'] | b64decode == 'alice ALL=NOPASSWD: /usr/local/bin/command1, /usr/local/bin/command2\n'"
+ - "rule_3_contents['content'] | b64decode == 'alice ALL= /usr/local/bin/command\n'"
+ - "rule_4_contents['content'] | b64decode == '%students ALL=NOPASSWD: /usr/local/bin/command\n'"
+ - "rule_5_contents['content'] | b64decode == 'alice ALL=NOPASSWD: /usr/local/bin/command\n'"
+ - "rule_6_contents['content'] | b64decode == 'alice ALL=(bob)NOPASSWD: /usr/local/bin/command\n'"
+ - "rule_7_contents['content'] | b64decode == 'alice host-1=NOPASSWD: /usr/local/bin/command\n'"
+ - "rule_8_contents['content'] | b64decode == 'alice ALL=NOPASSWD:SETENV: /usr/local/bin/command\n'"
+
+- name: Check revocation stat
+ ansible.builtin.assert:
+ that:
+ - not revoke_rule_1_stat.stat.exists
+ - not revoke_non_existing_rule_stat.stat.exists
+
+- name: Check edge case responses
+ ansible.builtin.assert:
+ that:
+ - edge_case_1 is failed
+ - "'Failed to validate sudoers rule' in edge_case_1.msg"
+ - edge_case_2 is not failed
+
+- name: Check missing validation edge case
+ ansible.builtin.assert:
+ that:
+ - edge_case_3 is failed
+ - "'Failed to find required executable' in edge_case_3.msg"
+ when: ansible_os_family != 'Darwin'
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases b/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases
new file mode 100644
index 000000000..58524f1fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+destructive
+skip/python3
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py b/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py
new file mode 100644
index 000000000..8635b0749
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/files/sendProcessStdin.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -*- coding: utf-8 -*-
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+proc = sys.argv[1]
+value = sys.argv[2]
+username = sys.argv[3]
+password = sys.argv[4]
+
+if sys.version_info[0] == 2:
+ from xmlrpclib import ServerProxy
+ from urllib import quote
+else:
+ from xmlrpc.client import ServerProxy
+ from urllib.parse import quote
+
+if username:
+ url = 'http://%s:%s@127.0.0.1:9001/RPC2' % (quote(username, safe=''), quote(password, safe=''))
+else:
+ url = 'http://127.0.0.1:9001/RPC2'
+
+server = ServerProxy(url, verbose=True)
+server.supervisor.sendProcessStdin(proc, 'import sys; print(%s); sys.stdout.flush();\n' % value)
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml
new file mode 100644
index 000000000..b1d3bd779
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Darwin.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml
new file mode 100644
index 000000000..b1d3bd779
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_FreeBSD.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml
new file mode 100644
index 000000000..2f70b284c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Linux.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install supervisor
+ package:
+ name: supervisor
+ state: present
+
+- name: disable supervisord system service
+ service:
+ name: '{{ supervisor_service_name }}'
+ state: stopped
+ enabled: false
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml
new file mode 100644
index 000000000..b1d3bd779
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_RedHat.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml
new file mode 100644
index 000000000..b1d3bd779
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_Suse.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml
new file mode 100644
index 000000000..b1d3bd779
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/install_pip.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: install supervisord
+ pip:
+ name: supervisor<4.0.0 # supervisor version 4.0.0 fails tests
+ state: present
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml
new file mode 100644
index 000000000..6f8c7968c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/main.yml
@@ -0,0 +1,57 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - tempfile:
+ state: directory
+ suffix: supervisorctl-tests
+ register: supervisord_sock_path
+
+ - command: 'echo {{ remote_tmp_dir }}'
+ register: echo
+ - set_fact:
+ remote_dir: '{{ echo.stdout }}'
+
+ - include_vars: '{{ item }}'
+ with_first_found:
+ - files:
+ - '{{ ansible_distribution }}.yml'
+ - '{{ ansible_os_family }}.yml'
+ - 'defaults.yml'
+
+ - include_tasks: '{{ item }}'
+ with_first_found:
+ - files:
+ - 'install_{{ ansible_distribution }}.yml' # CentOS
+ - 'install_{{ ansible_os_family }}.yml' # RedHat
+ - 'install_{{ ansible_system }}.yml' # Linux
+
+ - include_tasks: test.yml
+ with_items:
+ - { username: '', password: '' }
+ - { username: 'testétest', password: 'passéword' } # non-ASCII credentials
+ loop_control:
+ loop_var: credentials
+
+ # setuptools is too old on RHEL/CentOS 6 (https://github.com/Supervisor/meld3/issues/23)
+ when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6
+
+ always:
+ - include_tasks: '{{ item }}'
+ when: ansible_os_family != 'RedHat' or ansible_distribution_major_version|int > 6
+ with_first_found:
+ - files:
+ - 'uninstall_{{ ansible_distribution }}.yml' # CentOS
+ - 'uninstall_{{ ansible_os_family }}.yml' # RedHat
+ - 'uninstall_{{ ansible_system }}.yml' # Linux
+
+ - file:
+ path: '{{ supervisord_sock_path.path }}'
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml
new file mode 100644
index 000000000..906d7aca4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/start_supervisord.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: start supervisord
+ command: 'supervisord -c {{ remote_dir }}/supervisord.conf'
+
+- name: wait_for supervisord
+ ansible.builtin.wait_for:
+ port: 9001
+ host: 127.0.0.1
+ timeout: 15
+ state: started
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml
new file mode 100644
index 000000000..52e064d15
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/stop_supervisord.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: stop supervisord
+ command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} shutdown"
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml
new file mode 100644
index 000000000..5d1a867ed
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: generate supervisor configuration
+ template:
+ src: supervisord.conf
+ dest: '{{ remote_dir }}/supervisord.conf'
+
+- block:
+ - import_tasks: start_supervisord.yml
+
+ - import_tasks: test_start.yml
+ - import_tasks: test_stop.yml
+ always:
+ - import_tasks: stop_supervisord.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml
new file mode 100644
index 000000000..b814486cd
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_start.yml
@@ -0,0 +1,140 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: start py1 service (without auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ when: credentials.username == ''
+
+- name: start py1 service (with auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status"
+
+- name: check that service is started
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+
+- name: check that service is running (part1) # py1.log content is checked below
+ script: "files/sendProcessStdin.py 'pys:py1' 2 \
+ '{{ credentials.username }}' '{{ credentials.password }}'"
+
+- name: try again to start py1 service (without auth)
+ supervisorctl:
+ name: pys:py1
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ when: credentials.username == ''
+
+- name: try again to start py1 service (with auth)
+ supervisorctl:
+ name: pys:py1
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- name: check that service is already running
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is success)
+ - (result is not changed and result_with_auth is skip) or (result is skip and result_with_auth is not changed)
+
+- import_tasks: stop_supervisord.yml
+
+# supervisord has been stopped, check logfile
+- name: check that service has done what it was expected (part 2)
+ shell: 'test "$(tail -2 {{ remote_dir }}/py1.log | head -1)" = ">>> 2"'
+
+# restart supervisord and py1 service for next tasks
+- import_tasks: start_supervisord.yml
+
+- name: start py1 service (without auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ when: credentials.username == ''
+
+- name: start py1 service (with auth)
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- name: check that service is started
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+
+#############################################################
+
+- name: Check an error occurs when wrong credentials are used
+ supervisorctl:
+ name: pys:py1
+ state: started
+ server_url: http://127.0.0.1:9001
+ username: '{{ credentials.username }}wrong_creds'
+ password: '{{ credentials.password }}same_here'
+ register: result
+ failed_when: result is not skip and (result is success or result is not failed)
+ when: credentials.username != ''
+
+- name: Check an error occurs when wrong URL is used
+ supervisorctl:
+ name: pys:py1
+ state: started
+ server_url: http://127.0.0.1:9002
+ register: result
+ failed_when: result is success or result is not failed
+
+- name: Check an error occurs when wrong config path is used
+ supervisorctl:
+ name: 'pys:py1'
+ state: started
+ config: '{{ remote_dir }}/supervisord_not_here.conf'
+ register: result
+ failed_when: result is success or result is not failed
+
+- name: Check an error occurs wrong name is used (without auth)
+ supervisorctl:
+ name: 'invalid'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ register: result
+ failed_when: result is skip or (result is success or result is not failed)
+ when: credentials.username == ''
+
+- name: Check an error occurs wrong name is used (with auth)
+ supervisorctl:
+ name: 'invalid'
+ state: started
+ config: '{{ remote_dir }}/supervisord.conf'
+ username: '{{ credentials.username }}wrong_creds'
+ password: '{{ credentials.password }}same_here'
+ register: result
+ failed_when: result is skip or (result is success or result is not failed)
+ when: credentials.username != ''
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml
new file mode 100644
index 000000000..8d8fdd42a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/test_stop.yml
@@ -0,0 +1,64 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: stop py1 service
+ supervisorctl:
+ name: 'pys:py1'
+ state: stopped
+ # test with 'server_url' parameter
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ register: result
+ when: credentials.username == ''
+
+- name: stop py1 service
+ supervisorctl:
+ name: 'pys:py1'
+ state: stopped
+ # test with unix socket
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- command: "supervisorctl -c {{ remote_dir }}/supervisord.conf {% if credentials.username %}-u {{ credentials.username }} -p {{ credentials.password }}{% endif %} status"
+
+- name: check that service is stopped
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is success)
+ - (result is changed and result_with_auth is skip) or (result is skip and result_with_auth is changed)
+
+- name: "check that service isn't running"
+ script: "files/sendProcessStdin.py 'pys:py1' 1 \
+ '{{ credentials.username }}' '{{ credentials.password }}'"
+ register: is_py1_alive
+ failed_when: is_py1_alive is success
+
+- name: try again to stop py1 service (without auth)
+ supervisorctl:
+ name: pys:py1
+ state: stopped
+ # test with 'server_url' parameter
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ register: result
+ when: credentials.username == ''
+
+- name: try again to stop py1 service (with auth)
+ supervisorctl:
+ name: pys:py1
+ state: stopped
+ # test with unix socket
+ server_url: 'unix://{{ supervisord_sock_path.path }}/supervisord.sock'
+ username: '{{ credentials.username }}'
+ password: '{{ credentials.password }}'
+ register: result_with_auth
+ when: credentials.username != ''
+
+- name: check that service is already stopped
+ assert:
+ that:
+ - (result is success and result_with_auth is skip) or (result is skip and result_with_auth is success)
+ - (result is not changed and result_with_auth is skip) or (result is skip and result_with_auth is not changed)
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml
new file mode 100644
index 000000000..cf339dfd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Darwin.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml
new file mode 100644
index 000000000..cf339dfd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_FreeBSD.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml
new file mode 100644
index 000000000..442c61b72
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Linux.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall supervisor
+ package:
+ name: supervisor
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml
new file mode 100644
index 000000000..cf339dfd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_RedHat.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml
new file mode 100644
index 000000000..cf339dfd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_Suse.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml
new file mode 100644
index 000000000..cf339dfd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/tasks/uninstall_pip.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: uninstall supervisord
+ pip:
+ name: supervisor
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf b/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf
new file mode 100644
index 000000000..f3d36b92e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/templates/supervisord.conf
@@ -0,0 +1,48 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+
+[supervisord]
+pidfile={{ remote_dir }}/supervisord.pid
+logfile={{ remote_dir }}/supervisord.log
+
+[program:py1]
+command={{ ansible_python.executable }} -i -u -
+user={{ ansible_user_id }}
+autostart=false
+autorestart=false
+stdout_logfile={{ remote_dir }}/py1.log
+redirect_stderr=yes
+
+[program:py2]
+command={{ ansible_python.executable }} -i -u -
+user={{ ansible_user_id }}
+autostart=false
+autorestart=false
+stdout_logfile={{ remote_dir }}/py2.log
+redirect_stderr=yes
+
+[group:pys]
+programs=py1,py2
+
+[unix_http_server]
+file={{ supervisord_sock_path.path }}/supervisord.sock
+{% if credentials.username is defined and credentials.username|default(false, boolean=true) %}
+username = {{ credentials.username }}
+password = {{ credentials.password }}
+{% endif %}
+
+[inet_http_server]
+port=127.0.0.1:9001
+{% if credentials.username is defined and credentials.username|default(false, boolean=true) %}
+username = {{ credentials.username }}
+password = {{ credentials.password }}
+{% endif %}
+
+[supervisorctl]
+serverurl=unix://{{ supervisord_sock_path.path }}/supervisord.sock
+
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml
new file mode 100644
index 000000000..cba575a7c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/Debian.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+supervisor_service_name: supervisor
diff --git a/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml b/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml
new file mode 100644
index 000000000..1df9ae250
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/supervisorctl/vars/defaults.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+supervisor_service_name: supervisord
diff --git a/ansible_collections/community/general/tests/integration/targets/sysrc/aliases b/ansible_collections/community/general/tests/integration/targets/sysrc/aliases
new file mode 100644
index 000000000..e13fde32c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sysrc/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+needs/root
+skip/docker
+skip/osx
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/sysrc/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/sysrc/tasks/main.yml
new file mode 100644
index 000000000..2c45c3b1c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sysrc/tasks/main.yml
@@ -0,0 +1,343 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Test on FreeBSD VMs
+ when:
+ - ansible_facts.virtualization_type != 'docker'
+ - ansible_facts.distribution == 'FreeBSD'
+ block:
+ - name: Cache original contents of /etc/rc.conf
+ shell: "cat /etc/rc.conf"
+ register: cached_etc_rcconf_content
+
+ - name: Cache original contents of /boot/loader.conf
+ shell: "cat /boot/loader.conf"
+ register: cached_boot_loaderconf_content
+
+ ##
+ ## sysrc - example - set mysqlpidfile
+ ##
+ - name: Configure mysql pid file
+ sysrc:
+ name: mysql_pidfile
+ value: /tmp/mysql.pid
+ register: sysrc_example1
+
+ - name: Configure mysql pid file (checkmode)
+ sysrc:
+ name: mysql_pidfile
+ value: checkmode
+ check_mode: true
+ register: sysrc_example1_checkmode
+
+ - name: Configure mysql pid file (idempotent)
+ sysrc:
+ name: mysql_pidfile
+ value: /tmp/mysql.pid
+ register: sysrc_example1_idempotent
+
+ - name: Get file content
+ shell: "cat /etc/rc.conf | egrep -v ^\\#"
+ register: sysrc_example1_content
+
+ - name: Ensure sysrc updates rc.conf properly
+ assert:
+ that:
+ - sysrc_example1.changed
+ - sysrc_example1_checkmode.changed
+ - not sysrc_example1_idempotent.changed
+ - "'mysql_pidfile=\"/tmp/mysql.pid\"' in sysrc_example1_content.stdout_lines"
+ - "'mysql_pidfile=\"checkmode\"' not in sysrc_example1_content.stdout_lines"
+
+ ##
+ ## sysrc - example - Enable accf_http kld in /boot/loader.conf
+ ##
+ - name: Enable accf_http kld in /boot/loader.conf
+ sysrc:
+ name: accf_http_load
+ state: present
+ value: "YES"
+ path: /boot/loader.conf
+ register: sysrc_example2
+
+ - name: Enable accf_http kld in /boot/loader.conf (checkmode)
+ sysrc:
+ name: accf_http_load
+ state: present
+ value: "NO"
+ path: /boot/loader.conf
+ check_mode: true
+ register: sysrc_example2_checkmode
+
+ - name: Enable accf_http kld in /boot/loader.conf (idempotent)
+ sysrc:
+ name: accf_http_load
+ state: present
+ value: "YES"
+ path: /boot/loader.conf
+ register: sysrc_example2_idempotent
+
+ - name: Get file content
+ shell: "cat /boot/loader.conf | egrep -v ^\\#"
+ register: sysrc_example2_content
+
+ - name: Ensure sysrc did not change the file, but marked as changed
+ assert:
+ that:
+ - sysrc_example2.changed
+ - sysrc_example2_checkmode.changed
+ - not sysrc_example2_idempotent.changed
+ - "'accf_http_load=\"YES\"' in sysrc_example2_content.stdout_lines"
+ - "'accf_http_load=\"NO\"' not in sysrc_example2_content.stdout_lines"
+
+ ##
+ ## sysrc - example - Add gif0 interface
+ ##
+ - name: Set cloned_interfaces
+ sysrc:
+ name: cloned_interfaces
+ value: "lo0"
+
+ - name: Add gif0 interface
+ sysrc:
+ name: cloned_interfaces
+ state: value_present
+ value: "gif0"
+ register: sysrc_example3
+
+ - name: Add gif1 interface (checkmode)
+ sysrc:
+ name: cloned_interfaces
+ state: value_present
+ value: "gif1"
+ check_mode: true
+ register: sysrc_example3_checkmode
+
+ - name: Add gif0 interface (idempotent)
+ sysrc:
+ name: cloned_interfaces
+ state: value_present
+ value: "gif0"
+ register: sysrc_example3_idempotent
+
+ - name: Get file content
+ shell: "cat /etc/rc.conf | egrep -v ^\\#"
+ register: sysrc_example3_content
+
+ - name: Ensure sysrc did not change the file, but marked as changed
+ assert:
+ that:
+ - sysrc_example3.changed
+ - sysrc_example3_checkmode.changed
+ - not sysrc_example3_idempotent.changed
+ - "'cloned_interfaces=\"lo0 gif0\"' in sysrc_example3_content.stdout_lines"
+
+ ##
+ ## sysrc - example - Enable nginx in testjail
+ ##
+ - name: Test within jail
+ #
+ # NOTE: currently fails with FreeBSD 12 with minor version less than 4
+ # NOTE: currently fails with FreeBSD 13 with minor version less than 1
+ #
+ when: >-
+ ansible_distribution_version is version('12.4', '>=') and ansible_distribution_version is version('13', '<')
+ or ansible_distribution_version is version('13.1', '>=')
+ block:
+ - name: Setup testjail
+ include_tasks: setup-testjail.yml
+
+ - name: Enable nginx in test jail
+ sysrc:
+ name: nginx_enable
+ value: "YES"
+ jail: testjail
+ register: sysrc_example4
+
+ - name: Enable nginx in test jail (checkmode)
+ sysrc:
+ name: nginx_enable
+ value: "NO"
+ jail: testjail
+ check_mode: true
+ register: sysrc_example4_checkmode
+
+ - name: Enable nginx in test jail (idempotent)
+ sysrc:
+ name: nginx_enable
+ value: "YES"
+ jail: testjail
+ register: sysrc_example4_idempotent
+
+ - name: Get file content
+ shell: "cat /usr/jails/testjail/etc/rc.conf | grep nginx_enable"
+ register: sysrc_example4_content
+
+ - name: Ensure sysrc worked in testjail
+ assert:
+ that:
+ - sysrc_example4.changed
+ - sysrc_example4_checkmode.changed
+ - not sysrc_example4_idempotent.changed
+ - "'nginx_enable=\"YES\"' in sysrc_example4_content.stdout_lines"
+ always:
+ - name: Stop and remove testjail
+ failed_when: false
+ changed_when: false
+ command: "ezjail-admin delete -wf testjail"
+
+ ##
+ ## sysrc - Test Absent
+ ##
+ - name: Set sysrc_absent to test removal
+ sysrc:
+ name: sysrc_absent
+ value: test
+
+ - name: Remove sysrc_absent (checkmode)
+ sysrc:
+ name: sysrc_absent
+ state: absent
+ check_mode: true
+ register: sysrc_absent_checkmode
+
+ - name: Remove sysrc_absent
+ sysrc:
+ name: sysrc_absent
+ state: absent
+ register: sysrc_absent
+
+ - name: Remove sysrc_absent (idempotent)
+ sysrc:
+ name: sysrc_absent
+ state: absent
+ register: sysrc_absent_idempotent
+
+ - name: Get file content
+ shell: "cat /etc/rc.conf | egrep -v ^\\#"
+ register: sysrc_absent_content
+
+ - name: Ensure sysrc did as intended
+ assert:
+ that:
+ - sysrc_absent_checkmode.changed
+ - sysrc_absent.changed
+ - not sysrc_absent_idempotent.changed
+ - "'sysrc_absent=\"test\"' not in sysrc_absent_content.stdout_lines"
+
+ ##
+ ## sysrc - Test alternate delimiter
+ ##
+ - name: Set sysrc_delim to known value
+ sysrc:
+ name: sysrc_delim
+ value: "t1,t2"
+
+ - name: Add to value with delimiter (not-exists)
+ sysrc:
+ name: sysrc_delim_create
+ state: value_present
+ delim: ","
+ value: t3
+ register: sysrc_delim_create
+
+ - name: Add to value with delimiter
+ sysrc:
+ name: sysrc_delim
+ state: value_present
+ delim: ","
+ value: t3
+ register: sysrc_delim
+
+ - name: Add to value with delimiter (checkmode)
+ sysrc:
+ name: sysrc_delim
+ state: value_present
+ delim: ","
+ value: t4
+ check_mode: true
+ register: sysrc_delim_checkmode
+
+ - name: Add to value with delimiter (idempotent)
+ sysrc:
+ name: sysrc_delim
+ state: value_present
+ delim: ","
+ value: t3
+ register: sysrc_delim_idempotent
+
+ - name: Get file content
+ shell: "cat /etc/rc.conf | egrep -v ^\\#"
+ register: sysrc_delim_content
+
+ - name: Ensure sysrc did as intended
+ assert:
+ that:
+ - sysrc_delim_create.changed
+ - sysrc_delim.changed
+ - sysrc_delim_checkmode.changed
+ - not sysrc_delim_idempotent.changed
+ - "'sysrc_delim=\"t1,t2,t3\"' in sysrc_delim_content.stdout_lines"
+ - "'sysrc_delim_create=\"t3\"' in sysrc_delim_content.stdout_lines"
+
+ ##
+ ## sysrc - value_absent
+ ##
+ - name: Remove value (when not exists)
+ sysrc:
+ name: sysrc_value_absent_delete
+ state: value_absent
+ delim: ","
+ value: t3
+ register: sysrc_value_absent_ignored
+
+ - name: Remove value from sysrc_delim
+ sysrc:
+ name: sysrc_delim
+ state: value_absent
+ value: t3
+ delim: ","
+ register: sysrc_value_absent
+
+ - name: Remove value from sysrc_delim (checkmode)
+ sysrc:
+ name: sysrc_delim
+ state: value_absent
+ value: t2
+ delim: ","
+ check_mode: true
+ register: sysrc_value_absent_checkmode
+
+ - name: Remove value from sysrc_delim (idempotent
+ sysrc:
+ name: sysrc_delim
+ state: value_absent
+ value: t3
+ delim: ","
+ register: sysrc_value_absent_idempotent
+
+ - name: Get file content
+ shell: "cat /etc/rc.conf | egrep -v ^\\#"
+ register: sysrc_delim_content
+
+ - name: Ensure sysrc did as intended with value_absent
+ assert:
+ that:
+ - not sysrc_value_absent_ignored.changed
+ - sysrc_value_absent.changed
+ - sysrc_value_absent_checkmode.changed
+ - not sysrc_value_absent_idempotent.changed
+ - "'sysrc_delim=\"t1,t2\"' in sysrc_delim_content.stdout_lines"
+ - "'sysrc_delim_delete' not in sysrc_delim_content.stdout_lines"
+ always:
+ - name: Restore /etc/rc.conf
+ copy:
+ content: "{{ cached_etc_rcconf_content }}"
+ dest: /etc/rc.conf
+
+ - name: Restore /boot/loader.conf
+ copy:
+ content: "{{ cached_boot_loaderconf_content }}"
+ dest: /boot/loader.conf
diff --git a/ansible_collections/community/general/tests/integration/targets/sysrc/tasks/setup-testjail.yml b/ansible_collections/community/general/tests/integration/targets/sysrc/tasks/setup-testjail.yml
new file mode 100644
index 000000000..8aac7a430
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/sysrc/tasks/setup-testjail.yml
@@ -0,0 +1,73 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+#
+# Instructions for setting up a jail
+# https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails-ezjail.html
+#
+- name: Setup cloned interfaces
+ lineinfile:
+ dest: /etc/rc.conf
+ regexp: ^cloned_interfaces=lo1
+ line: cloned_interfaces=lo1
+
+- name: Activate cloned interfaces
+ command: "service netif cloneup"
+ changed_when: false
+
+- name: Install ezjail
+ pkgng:
+ name: ezjail
+
+- name: Configure ezjail to use http
+ when: ansible_distribution_version is version('11.01', '>')
+ lineinfile:
+ dest: /usr/local/etc/ezjail.conf
+ regexp: ^ezjail_ftphost
+ line: ezjail_ftphost=http://ftp.freebsd.org
+
+- name: Configure ezjail to use archive for old freebsd releases
+ when: ansible_distribution_version is version('11.01', '<=')
+ lineinfile:
+ dest: /usr/local/etc/ezjail.conf
+ regexp: ^ezjail_ftphost
+ line: ezjail_ftphost=http://ftp-archive.freebsd.org
+
+- name: Start ezjail
+ ignore_errors: true
+ service:
+ name: ezjail
+ state: started
+ enabled: true
+
+- name: Has ezjail
+ register: ezjail_base_jail
+ stat:
+ path: /usr/jails/basejail
+
+- name: Setup ezjail base
+ when: not ezjail_base_jail.stat.exists
+ shell: "ezjail-admin install >> /tmp/ezjail.log"
+ changed_when: false
+
+- name: Has testjail
+ register: ezjail_test_jail
+ stat:
+ path: /usr/jails/testjail
+
+- name: Create testjail
+ when: not ezjail_test_jail.stat.exists
+ shell: "ezjail-admin create testjail 'lo1|127.0.1.1' >> /tmp/ezjail.log"
+ changed_when: false
+
+- name: Is testjail running
+ shell: "jls | grep testjail"
+ changed_when: false
+ failed_when: false
+ register: is_testjail_up
+
+- name: Start testjail
+ when: is_testjail_up.rc == 1
+ command: "ezjail-admin start testjail"
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/.gitignore b/ansible_collections/community/general/tests/integration/targets/terraform/.gitignore
new file mode 100644
index 000000000..c477f5db7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/.gitignore
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+**/.terraform/*
+*.tfstate
+*.tfstate.*
+.terraform.lock.hcl
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/aliases b/ansible_collections/community/general/tests/integration/targets/terraform/aliases
new file mode 100644
index 000000000..1b6e4a26d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/windows
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/python2
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/main.tf b/ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/main.tf
new file mode 100644
index 000000000..8b7956ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/main.tf
@@ -0,0 +1,35 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+resource "null_resource" "mynullresource" {
+ triggers = {
+ # plain dictionaries
+ dict_name = var.dictionaries.name
+ dict_age = var.dictionaries.age
+
+ # list of dicrs
+ join_dic_name = join(",", var.list_of_objects.*.name)
+
+ # list-of-strings
+ join_list = join(",", var.list_of_strings.*)
+
+ # testing boolean
+ name = var.boolean ? var.dictionaries.name : var.list_of_objects[0].name
+
+ # top level string
+ sample_string_1 = var.string_type
+
+ # nested lists
+ num_from_matrix = var.list_of_lists[1][2]
+ }
+
+}
+
+output "string_type" {
+ value = var.string_type
+}
+
+output "multiline_string" {
+ value = var.multiline_string
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/variables.tf b/ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/variables.tf
new file mode 100644
index 000000000..34b050747
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/files/complex_variables/variables.tf
@@ -0,0 +1,62 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+variable "dictionaries" {
+ type = object({
+ name = string
+ age = number
+ })
+ description = "Same as ansible Dict"
+ default = {
+ age = 1
+ name = "value"
+ }
+}
+
+variable "list_of_strings" {
+ type = list(string)
+ description = "list of strings"
+ validation {
+ condition = (var.list_of_strings[1] == "cli specials\"&$%@#*!(){}[]:\"\" \\\\")
+ error_message = "Strings do not match."
+ }
+}
+
+variable "list_of_objects" {
+ type = list(object({
+ name = string
+ age = number
+ }))
+ validation {
+ condition = (var.list_of_objects[1].name == "cli specials\"&$%@#*!(){}[]:\"\" \\\\")
+ error_message = "Strings do not match."
+ }
+}
+
+variable "boolean" {
+ type = bool
+ description = "boolean"
+
+}
+
+variable "string_type" {
+ type = string
+ validation {
+ condition = (var.string_type == "cli specials\"&$%@#*!(){}[]:\"\" \\\\")
+ error_message = "Strings do not match."
+ }
+}
+
+variable "multiline_string" {
+ type = string
+ validation {
+ condition = (var.multiline_string == "one\ntwo\n")
+ error_message = "Strings do not match."
+ }
+}
+
+variable "list_of_lists" {
+ type = list(list(any))
+ default = [ [ 1 ], [1, 2, 3], [3] ]
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/terraform/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/tasks/complex_variables.yml b/ansible_collections/community/general/tests/integration/targets/terraform/tasks/complex_variables.yml
new file mode 100644
index 000000000..9788a3eed
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/tasks/complex_variables.yml
@@ -0,0 +1,60 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create terraform project directory (complex variables)
+ ansible.builtin.file:
+ path: "{{ terraform_project_dir }}/complex_vars"
+ state: directory
+ mode: 0755
+
+- name: copy terraform files to work space
+ ansible.builtin.copy:
+ src: "complex_variables/{{ item }}"
+ dest: "{{ terraform_project_dir }}/complex_vars/{{ item }}"
+ with_items:
+ - main.tf
+ - variables.tf
+
+# This task would test the various complex variable structures of the with the
+# terraform null_resource
+- name: test complex variables
+ community.general.terraform:
+ project_path: "{{ terraform_project_dir }}/complex_vars"
+ binary_path: "{{ terraform_binary_path }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ dictionaries:
+ name: "kosala"
+ age: 99
+ list_of_strings:
+ - "kosala"
+ - 'cli specials"&$%@#*!(){}[]:"" \\'
+ - "xxx"
+ - "zzz"
+ list_of_objects:
+ - name: "kosala"
+ age: 99
+ - name: 'cli specials"&$%@#*!(){}[]:"" \\'
+ age: 0.1
+ - name: "zzz"
+ age: 9.789
+ - name: "lll"
+ age: 1000
+ boolean: true
+ string_type: 'cli specials"&$%@#*!(){}[]:"" \\'
+ multiline_string: |
+ one
+ two
+ list_of_lists:
+ - [ 1 ]
+ - [ 11, 12, 13 ]
+ - [ 2 ]
+ - [ 3 ]
+ state: present
+ register: terraform_init_result
+
+- assert:
+ that: terraform_init_result is not failed
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/terraform/tasks/main.yml
new file mode 100644
index 000000000..1c66990be
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/tasks/main.yml
@@ -0,0 +1,67 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+# This block checks and registers Terraform version of the binary found in path.
+
+- name: Check for existing Terraform in path
+ block:
+ - name: Check if terraform is present in path
+ ansible.builtin.command: "command -v terraform"
+ register: terraform_binary_path
+ ignore_errors: true
+
+ - name: Check Terraform version
+ ansible.builtin.command: terraform version
+ register: terraform_version_output
+ when: terraform_binary_path.rc == 0
+
+ - name: Set terraform version
+ ansible.builtin.set_fact:
+ terraform_version_installed: "{{ terraform_version_output.stdout | regex_search('(?!Terraform.*v)([0-9]+\\.[0-9]+\\.[0-9]+)') }}"
+ when: terraform_version_output.changed
+
+# This block handles the tasks of installing the Terraform binary. This happens if there is no existing
+# terraform in $PATH OR version does not match `terraform_version`.
+
+- name: Execute Terraform install tasks
+ block:
+
+ - name: Install Terraform
+ ansible.builtin.debug:
+ msg: "Installing terraform {{ terraform_version }}, found: {{ terraform_version_installed | default('no terraform binary found') }}."
+
+ - name: Ensure unzip is present
+ ansible.builtin.package:
+ name: unzip
+ state: present
+
+ - name: Install Terraform binary
+ ansible.builtin.unarchive:
+ src: "{{ terraform_url }}"
+ dest: "{{ remote_tmp_dir }}"
+ mode: 0755
+ remote_src: true
+ validate_certs: "{{ validate_certs }}"
+
+ when: terraform_version_installed is not defined or terraform_version_installed != terraform_version
+
+# This sets `terraform_binary_path` to coalesced output of first non-empty string in this order:
+# path from the 'Check if terraform is present in path' task, and lastly, the fallback path.
+
+- name: Set path to terraform binary
+ ansible.builtin.set_fact:
+ terraform_binary_path: "{{ terraform_binary_path.stdout or remote_tmp_dir ~ '/terraform' }}"
+
+- name: Loop over provider upgrade test tasks
+ ansible.builtin.include_tasks: test_provider_upgrade.yml
+ vars:
+ tf_provider: "{{ terraform_provider_versions[provider_index] }}"
+ loop: "{{ terraform_provider_versions }}"
+ loop_control:
+ index_var: provider_index
+
+- name: Test Complex Varibles
+ ansible.builtin.include_tasks: complex_variables.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml b/ansible_collections/community/general/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml
new file mode 100644
index 000000000..b20182c9f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/tasks/test_provider_upgrade.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create terraform project directory (provider upgrade)
+ file:
+ path: "{{ terraform_project_dir }}/{{ item['name'] }}"
+ state: directory
+ mode: 0755
+ loop: "{{ terraform_provider_versions }}"
+ loop_control:
+ index_var: provider_index
+
+- name: Output terraform provider test project
+ ansible.builtin.template:
+ src: templates/provider_test/main.tf.j2
+ dest: "{{ terraform_project_dir }}/{{ tf_provider['name'] }}/main.tf"
+ force: true
+ register: terraform_provider_hcl
+
+# The purpose of this task is to init terraform multiple times with different provider module
+# versions, so that we can verify that provider upgrades during init work as intended.
+
+- name: Init Terraform configuration with pinned provider version
+ community.general.terraform:
+ project_path: "{{ terraform_provider_hcl.dest | dirname }}"
+ binary_path: "{{ terraform_binary_path }}"
+ force_init: true
+ provider_upgrade: "{{ terraform_provider_upgrade }}"
+ state: present
+ register: terraform_init_result
+
+- assert:
+ that: terraform_init_result is not failed
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/templates/provider_test/main.tf.j2 b/ansible_collections/community/general/tests/integration/targets/terraform/templates/provider_test/main.tf.j2
new file mode 100644
index 000000000..886a0c2de
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/templates/provider_test/main.tf.j2
@@ -0,0 +1,13 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+terraform {
+ required_providers {
+ {{ tf_provider['name'] }} = {
+ source = "{{ tf_provider['source'] }}"
+ version = "{{ tf_provider['version'] }}"
+ }
+ }
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/terraform/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/terraform/vars/main.yml
new file mode 100644
index 000000000..1032adee4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/terraform/vars/main.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Terraform version that will be downloaded
+terraform_version: 1.1.7
+
+# Architecture of the downloaded Terraform release (needs to match target testing platform)
+
+terraform_arch: "{{ ansible_system | lower }}_{{terraform_arch_map[ansible_architecture] }}"
+
+# URL of where the Terraform binary will be downloaded from
+terraform_url: "https://releases.hashicorp.com/terraform/{{ terraform_version }}/terraform_{{ terraform_version }}_{{ terraform_arch }}.zip"
+
+# Controls whether the unarchive task will validate TLS certs of the Terraform binary host
+validate_certs: true
+
+# Directory where Terraform tests will be created
+terraform_project_dir: "{{ remote_tmp_dir }}/tf_provider_test"
+
+# Controls whether terraform init will use the `-upgrade` flag
+terraform_provider_upgrade: true
+
+# list of dicts containing Terraform providers that will be tested
+# The null provider is a good candidate, as it's small and has no external dependencies
+terraform_provider_versions:
+ - name: "null"
+ source: "hashicorp/null"
+ version: ">=2.0.0, < 3.0.0"
+ - name: "null"
+ source: "hashicorp/null"
+ version: ">=3.0.0"
+
+# mapping between values returned from ansible_architecture and arch names used by golang builds of Terraform
+# see https://www.terraform.io/downloads
+
+terraform_arch_map:
+ x86_64: amd64
+ arm64: arm64
diff --git a/ansible_collections/community/general/tests/integration/targets/test_a_module/aliases b/ansible_collections/community/general/tests/integration/targets/test_a_module/aliases
new file mode 100644
index 000000000..343f119da
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/test_a_module/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
diff --git a/ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml b/ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml
new file mode 100644
index 000000000..2243e0dba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/galaxy.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+namespace: testns
+name: testcoll
+version: 0.0.1
+authors:
+ - Ansible (https://github.com/ansible)
+description: null
+tags: [community]
diff --git a/ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py b/ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py
new file mode 100644
index 000000000..e7f1a987a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/test_a_module/collections/ansible_collections/testns/testcoll/plugins/modules/collection_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: collection_module
+short_description: Test collection module
+description:
+ - This is a test module in a local collection.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/test_a_module/library/local_module.py b/ansible_collections/community/general/tests/integration/targets/test_a_module/library/local_module.py
new file mode 100644
index 000000000..9e9e649cb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/test_a_module/library/local_module.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: local_module
+short_description: Test local module
+description:
+ - This is a test module locally next to a playbook.
+author: "Felix Fontein (@felixfontein)"
+options: {}
+'''
+
+EXAMPLES = ''' # '''
+
+RETURN = ''' # '''
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ AnsibleModule(argument_spec={}).exit_json()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/integration/targets/test_a_module/runme.sh b/ansible_collections/community/general/tests/integration/targets/test_a_module/runme.sh
new file mode 100755
index 000000000..118abbc29
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/test_a_module/runme.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+source virtualenv.sh
+
+# The collection loader ignores paths which have more than one ansible_collections in it.
+# That's why we have to copy this directory to a temporary place and run the test there.
+
+# Create temporary folder
+TEMPDIR=$(mktemp -d)
+trap '{ rm -rf ${TEMPDIR}; }' EXIT
+
+cp -r . "${TEMPDIR}"
+cd "${TEMPDIR}"
+
+ansible-playbook runme.yml "$@"
diff --git a/ansible_collections/community/general/tests/integration/targets/test_a_module/runme.yml b/ansible_collections/community/general/tests/integration/targets/test_a_module/runme.yml
new file mode 100644
index 000000000..4b7a5ec2c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/test_a_module/runme.yml
@@ -0,0 +1,42 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ tasks:
+ - name: Test a_module
+ assert:
+ that:
+ # Modules/actions that do not exist
+ - "'foo_bar' is not community.general.a_module"
+ - "'foo.bar.baz' is not community.general.a_module"
+ # Short name and FQCN for builtin and other collections
+ - "'file' is community.general.a_module"
+ - "'set_fact' is community.general.a_module"
+ - "'ansible.builtin.file' is community.general.a_module"
+ - "'ansible.builtin.set_fact' is community.general.a_module"
+ - "'ansible.builtin.foo_bar' is not community.general.a_module"
+ - "'community.crypto.acme_certificate' is community.general.a_module"
+ - "'community.crypto.openssl_privatekey_pipe' is community.general.a_module"
+ - "'community.crypto.foo_bar' is not community.general.a_module"
+ # Modules from this collection (that exist or not)
+ - "'community.general.ufw' is community.general.a_module"
+ - "'community.general.foooo_really_does_not_exist' is not community.general.a_module"
+ # Local module
+ - "'local_module' is community.general.a_module"
+ # Local collection module (that exist or not)
+ - "'testns.testcoll.collection_module' is community.general.a_module"
+ - "'testns.testcoll.foobar' is not community.general.a_module"
+
+ - name: Test a_module in case of routing
+ assert:
+ that:
+ # Redirected module
+ - "'ufw' is community.general.a_module"
+ # Redirected module where target collection does not exist
+ # (the target collection must not have been installed in CI!)
+ - "'onyx_pfc_interface' is not community.general.a_module"
+ # Tombstoned module
+ - "'community.general.docker_image_facts' is not community.general.a_module"
+ when: ansible_version.string is version('2.10.0', '>=')
diff --git a/ansible_collections/community/general/tests/integration/targets/timezone/aliases b/ansible_collections/community/general/tests/integration/targets/timezone/aliases
new file mode 100644
index 000000000..007bed538
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/timezone/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/osx
+skip/macos
diff --git a/ansible_collections/community/general/tests/integration/targets/timezone/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/timezone/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/timezone/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml
new file mode 100644
index 000000000..3644eeafa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/timezone/tasks/main.yml
@@ -0,0 +1,96 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Because hwclock usually isn't available inside Docker containers in Shippable
+# these tasks will detect if hwclock works and only run hwclock tests if it is
+# supported. That is why it is recommended to run these tests locally with
+# `--docker-privileged` on centos6, centos7 and ubuntu1404 images. Example
+# command to run on centos6:
+#
+# ansible-test integration --docker centos6 --docker-privileged -v timezone
+
+##
+## set path to timezone config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ timezone_config_file: '/etc/timezone'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ timezone_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+##
+## set path to hwclock config files
+##
+
+- name: set config file path on Debian
+ set_fact:
+ hwclock_config_file: '/etc/default/rcS'
+ when: ansible_os_family == 'Debian'
+
+- name: set config file path on RedHat
+ set_fact:
+ hwclock_config_file: '/etc/sysconfig/clock'
+ when: ansible_os_family == 'RedHat'
+
+####
+#### timezone tests
+####
+
+- name: make sure diffutils are installed on ArchLinux
+ package:
+ name: diffutils
+ state: present
+ when: ansible_distribution == 'Archlinux'
+
+- name: make sure tzdata is installed on Alpine
+ package:
+ name: tzdata
+ state: present
+ when: ansible_distribution == 'Alpine'
+
+- name: make sure the dbus service is started under systemd
+ systemd:
+ name: dbus
+ state: started
+ when:
+ - ansible_service_mgr == 'systemd'
+ - ansible_distribution == 'Fedora'
+ - ansible_facts.distribution_major_version is version('31', '<')
+
+
+- name: Run tests
+ # Skip tests on Fedora 31 and 32 because dbus fails to start unless the container is run in privileged mode.
+ # Even then, it starts unreliably. This may be due to the move to cgroup v2 in Fedora 31 and 32.
+ # https://www.redhat.com/sysadmin/fedora-31-control-group-v2
+ when:
+ - ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['Fedora31', 'Fedora32']
+ - not (ansible_os_family == 'Alpine') # TODO
+ block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: original_timezone
+
+ - name: Value of original_timezone
+ debug:
+ msg: "{{ original_timezone }}"
+
+ - block:
+ - include_tasks: test.yml
+ always:
+ - name: Restore original system timezone - {{ original_timezone.diff.before.name }}
+ timezone:
+ name: "{{ original_timezone.diff.before.name }}"
+ when: original_timezone is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml
new file mode 100644
index 000000000..975526800
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/timezone/tasks/test.yml
@@ -0,0 +1,612 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+##
+## test setting timezone, idempotency and checkmode
+##
+
+- name: set timezone to Australia/Brisbane (checkmode)
+ timezone:
+ name: Australia/Brisbane
+ check_mode: true
+ register: timezone_set_checkmode
+
+- name: ensure timezone reported as changed in checkmode
+ assert:
+ that:
+ - timezone_set_checkmode.changed
+ - timezone_set_checkmode.diff.after.name == 'Australia/Brisbane'
+ - timezone_set_checkmode.diff.before.name == 'Etc/UTC'
+
+- name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: false
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_set
+
+- name: ensure timezone changed
+ assert:
+ that:
+ - timezone_set.changed
+ - timezone_set.diff.after.name == 'Australia/Brisbane'
+ - timezone_set.diff.before.name == 'Etc/UTC'
+
+- name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ changed_when: false
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Australia/Brisbane"' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'RedHat'
+
+- name: ensure that the timezone is updated in the config file
+ command: egrep '^Australia/Brisbane' {{ timezone_config_file }}
+ when:
+ - ansible_service_mgr != 'systemd'
+ - ansible_os_family == 'Debian'
+
+- name: set timezone to Australia/Brisbane again
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again
+
+- name: ensure timezone idempotency
+ assert:
+ that:
+ - not timezone_again.changed
+
+- name: set timezone to Australia/Brisbane again in checkmode
+ timezone:
+ name: Australia/Brisbane
+ register: timezone_again_checkmode
+
+- name: set timezone idempotency (checkmode)
+ assert:
+ that:
+ - not timezone_again_checkmode.changed
+
+##
+## tests for same timezones with different names
+##
+
+- name: check dpkg-reconfigure
+ shell: type dpkg-reconfigure
+ register: check_dpkg_reconfigure
+ ignore_errors: true
+ changed_when: false
+
+- name: check timedatectl
+ shell: type timedatectl && timedatectl
+ register: check_timedatectl
+ ignore_errors: true
+ changed_when: false
+
+- block:
+ - name: set timezone to Etc/UTC
+ timezone:
+ name: Etc/UTC
+
+ - name: change timezone from Etc/UTC to UTC
+ timezone:
+ name: UTC
+ register: timezone_etcutc_to_utc
+
+ - name: check timezone changed from Etc/UTC to UTC
+ assert:
+ that:
+ - timezone_etcutc_to_utc.changed
+ - timezone_etcutc_to_utc.diff.before.name == 'Etc/UTC'
+ - timezone_etcutc_to_utc.diff.after.name == 'UTC'
+
+ - name: change timezone from UTC to Etc/UTC
+ timezone:
+ name: Etc/UTC
+ register: timezone_utc_to_etcutc
+
+ - name: check timezone changed from UTC to Etc/UTC
+ assert:
+ that:
+ - timezone_utc_to_etcutc.changed
+ - timezone_utc_to_etcutc.diff.before.name == 'UTC'
+ - timezone_utc_to_etcutc.diff.after.name == 'Etc/UTC'
+
+ when:
+ # FIXME: Due to the bug of the dpkg-reconfigure, those tests failed on non-systemd debian
+ - check_dpkg_reconfigure.rc != 0 or check_timedatectl.rc == 0
+
+##
+## no systemd tests for timezone
+##
+
+- block:
+ ##
+ ## test with empty config file
+ ##
+
+ - name: empty config file
+ command: cp /dev/null {{ timezone_config_file }}
+
+ - name: set timezone to Europe/Belgrade (empty config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_empty_conf
+
+ - name: check if timezone set (empty config file)
+ assert:
+ that:
+ - timezone_empty_conf.changed
+ - timezone_empty_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_empty_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (empty config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: false
+
+
+ ##
+ ## test with deleted config file
+ ##
+
+ - name: remove config file
+ file:
+ path: '{{ timezone_config_file }}'
+ state: absent
+
+ - name: set timezone to Europe/Belgrade (no config file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_missing_conf
+
+ - name: check if timezone set (no config file)
+ assert:
+ that:
+ - timezone_missing_conf.changed
+ - timezone_missing_conf.diff.after.name == 'Europe/Belgrade'
+ - timezone_missing_conf.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (no config file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: false
+
+
+ ##
+ ## test with /etc/localtime as symbolic link to a zoneinfo file
+ ##
+
+ - name: create symlink /etc/locatime -> /usr/share/zoneinfo/Etc/UTC
+ file:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ state: link
+ force: true
+
+ - name: set timezone to Europe/Belgrade (over symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink
+
+ - name: check if timezone set (over symlink)
+ assert:
+ that:
+ - timezone_symllink.changed
+ - timezone_symllink.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink.diff.before.name == 'Etc/UTC'
+
+ - name: check if the timezone is actually set (over symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: false
+
+
+ ##
+ ## test with /etc/localtime as broken symbolic link
+ ##
+
+ - name: set timezone to a broken symlink
+ file:
+ src: /tmp/foo
+ dest: /etc/localtime
+ state: link
+ force: true
+
+ - name: set timezone to Europe/Belgrade (over broken symlink)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_symllink_broken
+
+ - name: check if timezone set (over broken symlink)
+ assert:
+ that:
+ - timezone_symllink_broken.changed
+ - timezone_symllink_broken.diff.after.name == 'Europe/Belgrade'
+ - timezone_symllink_broken.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over broken symlink)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: false
+
+
+ ##
+ ## test with /etc/localtime set manually using copy
+ ##
+
+ - name: set timezone manually by coping zone info file to /etc/localtime
+ copy:
+ src: /usr/share/zoneinfo/Etc/UTC
+ dest: /etc/localtime
+ remote_src: true
+
+ - name: set timezone to Europe/Belgrade (over copied file)
+ timezone:
+ name: Europe/Belgrade
+ register: timezone_copied
+
+ - name: check if timezone set (over copied file)
+ assert:
+ that:
+ - timezone_copied.changed
+ - timezone_copied.diff.after.name == 'Europe/Belgrade'
+ - timezone_copied.diff.before.name == 'n/a'
+
+ - name: check if the timezone is actually set (over copied file)
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: false
+ when:
+ - ansible_service_mgr != 'systemd'
+ - timezone_config_file is defined
+
+
+####
+#### hwclock tests
+####
+
+- name: check if hwclock is supported in the environment
+ command: hwclock --test
+ register: hwclock_test
+ ignore_errors: true
+
+- name: check if timedatectl works in the environment
+ command: timedatectl
+ register: timedatectl_test
+ ignore_errors: true
+
+- name:
+ set_fact:
+ hwclock_supported: '{{ hwclock_test is successful or (timedatectl_test is successful and "RTC time: n/a" not in timedatectl_test.stdout) }}'
+##
+## test set hwclock, idempotency and checkmode
+##
+
+- block:
+ - name: set hwclock to local
+ timezone:
+ hwclock: local
+
+ - name: set hwclock to UTC (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: true
+ register: hwclock_set_checkmode
+
+ - name: ensure hwclock reported as changed (checkmode)
+ assert:
+ that:
+ - hwclock_set_checkmode.changed
+ - hwclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - hwclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to UTC
+ timezone:
+ hwclock: UTC
+ register: hwclock_set
+
+ - name: ensure hwclock changed
+ assert:
+ that:
+ - hwclock_set.changed
+ - hwclock_set.diff.after.hwclock == 'UTC'
+ - hwclock_set.diff.before.hwclock == 'local'
+
+ - block:
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set hwclock to RTC again
+ timezone:
+ hwclock: UTC
+ register: hwclock_again
+
+ - name: set hwclock idempotency
+ assert:
+ that:
+ - not hwclock_again.changed
+
+ - name: set hwclock to RTC again (checkmode)
+ timezone:
+ hwclock: UTC
+ check_mode: true
+ register: hwclock_again_checkmode
+
+ - name: set hwclock idempotency (checkmode)
+ assert:
+ that:
+ - not hwclock_again_checkmode.changed
+
+
+ ##
+ ## no systemd tests for hwclock
+ ##
+
+ - block:
+ ##
+ ## test set hwclock with both /etc/adjtime and conf file deleted
+ ##
+
+ - name: remove /etc/adjtime and conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted /etc/adjtime and conf file
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_and_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime and conf
+ assert:
+ that:
+ - hwclock_set_utc_deleted_adjtime_and_conf.changed
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_and_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime deleted
+ ##
+
+ - name: remove /etc/adjtime
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - /etc/adjtime
+
+ - name: set hwclock to UTC with deleted /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_adjtime_utc
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_deleted_adjtime_utc.changed
+ - hwclock_set_utc_deleted_adjtime_utc.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_adjtime_utc.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with deleted /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_deleted_adjtime_local
+
+ - name: ensure hwclock changed to LOCAL with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_deleted_adjtime_local.changed
+ - hwclock_set_local_deleted_adjtime_local.diff.after.hwclock == 'local'
+ - hwclock_set_local_deleted_adjtime_local.diff.before.hwclock == 'UTC'
+
+
+ ##
+ ## test set hwclock with conf file deleted
+ ##
+
+ - name: remove conf file
+ file:
+ path: '{{ item }}'
+ state: absent
+ with_items:
+ - '{{ hwclock_config_file }}'
+
+ - name: set hwclock to UTC with deleted conf
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_deleted_conf
+
+ - name: ensure hwclock changed with deleted /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_utc_deleted_conf.changed
+ - hwclock_set_utc_deleted_conf.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_deleted_conf.diff.before.hwclock == 'n/a'
+
+
+ ##
+ ## test set hwclock with /etc/adjtime missing UTC/LOCAL strings
+ ##
+
+ - name: create /etc/adjtime without UTC/LOCAL
+ copy:
+ content: '0.0 0 0\n0'
+ dest: /etc/adjtime
+
+ - name: set hwclock to UTC with broken /etc/adjtime
+ timezone:
+ hwclock: UTC
+ register: hwclock_set_utc_broken_adjtime
+
+ - name: ensure hwclock doesn't report changed with broken /etc/adjtime
+ assert:
+ that:
+ - not hwclock_set_utc_broken_adjtime.changed
+ - hwclock_set_utc_broken_adjtime.diff.after.hwclock == 'UTC'
+ - hwclock_set_utc_broken_adjtime.diff.before.hwclock == 'UTC'
+
+ - name: set hwclock to LOCAL with broken /etc/adjtime
+ timezone:
+ hwclock: local
+ register: hwclock_set_local_broken_adjtime
+
+ - name: ensure hwclock changed to LOCAL with broken /etc/adjtime
+ assert:
+ that:
+ - hwclock_set_local_broken_adjtime.changed
+ - hwclock_set_local_broken_adjtime.diff.after.hwclock == 'local'
+ - hwclock_set_local_broken_adjtime.diff.before.hwclock == 'UTC'
+ when:
+ - ansible_service_mgr != 'systemd'
+ - hwclock_config_file is defined
+
+ ####
+ #### timezone + hwclock tests
+ ####
+
+ ##
+ ## test set timezone and hwclock, idempotency and checkmode
+ ##
+
+ - name: set timezone to Etc/UTC and hwclock to local
+ timezone:
+ name: Etc/UTC
+ hwclock: local
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ check_mode: true
+ register: tzclock_set_checkmode
+
+ - name: ensure timezone and hwclock reported as changed in checkmode
+ assert:
+ that:
+ - tzclock_set_checkmode.changed
+ - tzclock_set_checkmode.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set_checkmode.diff.before.name == 'Etc/UTC'
+ - tzclock_set_checkmode.diff.after.hwclock == 'UTC'
+ - tzclock_set_checkmode.diff.before.hwclock == 'local'
+
+ - name: ensure checkmode didn't change the timezone
+ command: cmp /etc/localtime /usr/share/zoneinfo/Australia/Brisbane
+ register: result
+ failed_when: result is not failed
+ changed_when: false
+
+ - block:
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^(TIME)?ZONE="Etc/UTC"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that checkmode didn't update the timezone in the config file
+ command: egrep '^Etc/UTC' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that checkmode didn't update hwclock in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+ register: result
+ failed_when: result is not failed
+
+ - name: ensure that checkmode didn't update hwclock the config file
+ command: grep ^UTC=no {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set
+
+ - name: ensure timezone and hwclock changed
+ assert:
+ that:
+ - tzclock_set.changed
+ - tzclock_set.diff.after.name == 'Europe/Belgrade'
+ - tzclock_set.diff.before.name == 'Etc/UTC'
+ - tzclock_set.diff.after.hwclock == 'UTC'
+ - tzclock_set.diff.before.hwclock == 'local'
+
+ - name: ensure that the timezone is actually set
+ command: cmp /etc/localtime /usr/share/zoneinfo/Europe/Belgrade
+ changed_when: false
+
+ - block:
+ - name: ensure that the timezone is updated in the config file
+ command: egrep '^(TIME)?ZONE="Europe/Belgrade"' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'RedHat'
+
+ - name: ensure that the timezone is updated in the config file
+ command: egrep 'Europe/Belgrade' {{ timezone_config_file }}
+ when:
+ - ansible_os_family == 'Debian'
+
+ - name: ensure that hwclock is updated in /etc/adjtime
+ command: grep ^UTC /etc/adjtime
+
+ - name: ensure that hwclock is updated in the config file
+ command: grep ^UTC=yes {{ hwclock_config_file }}
+ when: ansible_service_mgr != 'systemd'
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again
+
+ - name: set timezone and hwclock idempotency
+ assert:
+ that:
+ - not tzclock_set_again.changed
+
+ - name: set timezone to Europe/Belgrade and hwclock to UTC again (checkmode)
+ timezone:
+ name: Europe/Belgrade
+ hwclock: UTC
+ register: tzclock_set_again_checkmode
+
+ - name: set timezone and hwclock idempotency in checkmode
+ assert:
+ that:
+ - not tzclock_set_again_checkmode.changed
+
+ when:
+ - ansible_system == 'Linux'
+ - hwclock_supported
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/aliases b/ansible_collections/community/general/tests/integration/targets/ufw/aliases
new file mode 100644
index 000000000..2ef1a4133
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/aliases
@@ -0,0 +1,17 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+azp/posix/vm
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
+skip/rhel8.0 # FIXME
+skip/rhel9.0 # FIXME
+skip/rhel9.1 # FIXME
+skip/docker
+needs/root
+needs/target/setup_epel
+destructive
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml
new file mode 100644
index 000000000..5fba2fa4d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make sure ufw is installed
+- name: Install EPEL repository (RHEL only)
+ include_role:
+ name: setup_epel
+ when:
+ - ansible_distribution in ['RedHat', 'CentOS']
+ - ansible_distribution_major_version is version('9', '<')
+- name: Install iptables (SuSE only)
+ package:
+ name: iptables
+ become: true
+ when: ansible_os_family == 'Suse'
+- name: Install ufw
+ become: true
+ package:
+ name: ufw
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+ become: true
+
+ # Cleanup
+ always:
+ - pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+ - name: Reset ufw to factory defaults and disable
+ ufw:
+ state: reset
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml
new file mode 100644
index 000000000..e9e7b33f5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/run-test.yml
@@ -0,0 +1,25 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+- name: Reset ufw to factory defaults
+ ufw:
+ state: reset
+- name: Disable ufw
+ ufw:
+ # Some versions of ufw have a bug which won't disable on reset.
+ # That's why we explicitly deactivate here. See
+ # https://bugs.launchpad.net/ufw/+bug/1810082
+ state: disabled
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
+- name: Reset to factory defaults
+ ufw:
+ state: reset
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml
new file mode 100644
index 000000000..8c179d7ae
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/basic.yml
@@ -0,0 +1,406 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ############################################
+- name: Make sure it is off
+ ufw:
+ state: disabled
+- name: Enable (check mode)
+ ufw:
+ state: enabled
+ check_mode: true
+ register: enable_check
+- name: Enable
+ ufw:
+ state: enabled
+ register: enable
+- name: Enable (idempotency)
+ ufw:
+ state: enabled
+ register: enable_idem
+- name: Enable (idempotency, check mode)
+ ufw:
+ state: enabled
+ check_mode: true
+ register: enable_idem_check
+- assert:
+ that:
+ - enable_check is changed
+ - enable is changed
+ - enable_idem is not changed
+ - enable_idem_check is not changed
+
+# ############################################
+- name: ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: true
+ register: ipv4_allow_check
+- name: ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow
+- name: ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow_idem
+- name: ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: true
+ register: ipv4_allow_idem_check
+- assert:
+ that:
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ check_mode: true
+ register: delete_ipv4_allow_check
+- name: delete ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ register: delete_ipv4_allow
+- name: delete ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ register: delete_ipv4_allow_idem
+- name: delete ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ check_mode: true
+ register: delete_ipv4_allow_idem_check
+- assert:
+ that:
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: true
+ register: ipv6_allow_check
+- name: ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow
+- name: ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow_idem
+- name: ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: true
+ register: ipv6_allow_idem_check
+- assert:
+ that:
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ check_mode: true
+ register: delete_ipv6_allow_check
+- name: delete ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ register: delete_ipv6_allow
+- name: delete ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ register: delete_ipv6_allow_idem
+- name: delete ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ check_mode: true
+ register: delete_ipv6_allow_idem_check
+- assert:
+ that:
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
+
+
+# ############################################
+- name: ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: true
+ register: ipv4_allow_check
+- name: ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow
+- name: ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ register: ipv4_allow_idem
+- name: ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ check_mode: true
+ register: ipv4_allow_idem_check
+- assert:
+ that:
+ - ipv4_allow_check is changed
+ - ipv4_allow is changed
+ - ipv4_allow_idem is not changed
+ - ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv4 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ check_mode: true
+ register: delete_ipv4_allow_check
+- name: delete ipv4 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ register: delete_ipv4_allow
+- name: delete ipv4 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ register: delete_ipv4_allow_idem
+- name: delete ipv4 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: 0.0.0.0
+ delete: true
+ check_mode: true
+ register: delete_ipv4_allow_idem_check
+- assert:
+ that:
+ - delete_ipv4_allow_check is changed
+ - delete_ipv4_allow is changed
+ - delete_ipv4_allow_idem is not changed
+ - delete_ipv4_allow_idem_check is not changed
+
+# ############################################
+- name: ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: true
+ register: ipv6_allow_check
+- name: ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow
+- name: ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ register: ipv6_allow_idem
+- name: ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ check_mode: true
+ register: ipv6_allow_idem_check
+- assert:
+ that:
+ - ipv6_allow_check is changed
+ - ipv6_allow is changed
+ - ipv6_allow_idem is not changed
+ - ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: delete ipv6 allow (check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ check_mode: true
+ register: delete_ipv6_allow_check
+- name: delete ipv6 allow
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ register: delete_ipv6_allow
+- name: delete ipv6 allow (idempotency)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ register: delete_ipv6_allow_idem
+- name: delete ipv6 allow (idempotency, check mode)
+ ufw:
+ rule: allow
+ port: 23
+ to_ip: "::"
+ delete: true
+ check_mode: true
+ register: delete_ipv6_allow_idem_check
+- assert:
+ that:
+ - delete_ipv6_allow_check is changed
+ - delete_ipv6_allow is changed
+ - delete_ipv6_allow_idem is not changed
+ - delete_ipv6_allow_idem_check is not changed
+
+# ############################################
+- name: Reload ufw
+ ufw:
+ state: reloaded
+ register: reload
+- name: Reload ufw (check mode)
+ ufw:
+ state: reloaded
+ check_mode: true
+ register: reload_check
+- assert:
+ that:
+ - reload is changed
+ - reload_check is changed
+
+# ############################################
+- name: Disable (check mode)
+ ufw:
+ state: disabled
+ check_mode: true
+ register: disable_check
+- name: Disable
+ ufw:
+ state: disabled
+ register: disable
+- name: Disable (idempotency)
+ ufw:
+ state: disabled
+ register: disable_idem
+- name: Disable (idempotency, check mode)
+ ufw:
+ state: disabled
+ check_mode: true
+ register: disable_idem_check
+- assert:
+ that:
+ - disable_check is changed
+ - disable is changed
+ - disable_idem is not changed
+ - disable_idem_check is not changed
+
+# ############################################
+- name: Re-enable
+ ufw:
+ state: enabled
+- name: Reset (check mode)
+ ufw:
+ state: reset
+ check_mode: true
+ register: reset_check
+- pause:
+ # Should not be needed, but since ufw is ignoring --dry-run for reset
+ # (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
+ seconds: 1
+- name: Reset
+ ufw:
+ state: reset
+ register: reset
+- pause:
+ # ufw creates backups of the rule files with a timestamp; if reset is called
+ # twice in a row fast enough (so that both timestamps are taken in the same second),
+ # the second call will notice that the backup files are already there and fail.
+ # Waiting one second fixes this problem.
+ seconds: 1
+- name: Reset (idempotency)
+ ufw:
+ state: reset
+ register: reset_idem
+- pause:
+ # Should not be needed, but since ufw is ignoring --dry-run for reset
+ # (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
+ seconds: 1
+- name: Reset (idempotency, check mode)
+ ufw:
+ state: reset
+ check_mode: true
+ register: reset_idem_check
+- assert:
+ that:
+ - reset_check is changed
+ - reset is changed
+ - reset_idem is changed
+ - reset_idem_check is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml
new file mode 100644
index 000000000..f5f100751
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/global-state.yml
@@ -0,0 +1,154 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Enable ufw
+ ufw:
+ state: enabled
+
+# ############################################
+- name: Make sure logging is off
+ ufw:
+ logging: false
+- name: Logging (check mode)
+ ufw:
+ logging: true
+ check_mode: true
+ register: logging_check
+- name: Logging
+ ufw:
+ logging: true
+ register: logging
+- name: Get logging
+ shell: |
+ ufw status verbose | grep "^Logging:"
+ register: ufw_logging
+ environment:
+ LC_ALL: C
+- name: Logging (idempotency)
+ ufw:
+ logging: true
+ register: logging_idem
+- name: Logging (idempotency, check mode)
+ ufw:
+ logging: true
+ check_mode: true
+ register: logging_idem_check
+- name: Logging (change, check mode)
+ ufw:
+ logging: full
+ check_mode: true
+ register: logging_change_check
+- name: Logging (change)
+ ufw:
+ logging: full
+ register: logging_change
+- name: Get logging
+ shell: |
+ ufw status verbose | grep "^Logging:"
+ register: ufw_logging_change
+ environment:
+ LC_ALL: C
+- assert:
+ that:
+ - logging_check is changed
+ - logging is changed
+ - "ufw_logging.stdout == 'Logging: on (low)'"
+ - logging_idem is not changed
+ - logging_idem_check is not changed
+ - "ufw_logging_change.stdout == 'Logging: on (full)'"
+ - logging_change is changed
+ - logging_change_check is changed
+
+# ############################################
+- name: Default (check mode)
+ ufw:
+ default: reject
+ direction: incoming
+ check_mode: true
+ register: default_check
+- name: Default
+ ufw:
+ default: reject
+ direction: incoming
+ register: default
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults
+ environment:
+ LC_ALL: C
+- name: Default (idempotency)
+ ufw:
+ default: reject
+ direction: incoming
+ register: default_idem
+- name: Default (idempotency, check mode)
+ ufw:
+ default: reject
+ direction: incoming
+ check_mode: true
+ register: default_idem_check
+- name: Default (change, check mode)
+ ufw:
+ default: allow
+ direction: incoming
+ check_mode: true
+ register: default_change_check
+- name: Default (change)
+ ufw:
+ default: allow
+ direction: incoming
+ register: default_change
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults_change
+ environment:
+ LC_ALL: C
+- name: Default (change again)
+ ufw:
+ default: deny
+ direction: incoming
+ register: default_change_2
+- name: Default (change incoming implicitly, check mode)
+ ufw:
+ default: allow
+ check_mode: true
+ register: default_change_implicit_check
+- name: Default (change incoming implicitly)
+ ufw:
+ default: allow
+ register: default_change_implicit
+- name: Get defaults
+ shell: |
+ ufw status verbose | grep "^Default:"
+ register: ufw_defaults_change_implicit
+ environment:
+ LC_ALL: C
+- name: Default (change incoming implicitly, idempotent, check mode)
+ ufw:
+ default: allow
+ check_mode: true
+ register: default_change_implicit_idem_check
+- name: Default (change incoming implicitly, idempotent)
+ ufw:
+ default: allow
+ register: default_change_implicit_idem
+- assert:
+ that:
+ - default_check is changed
+ - default is changed
+ - "'reject (incoming)' in ufw_defaults.stdout"
+ - default_idem is not changed
+ - default_idem_check is not changed
+ - default_change_check is changed
+ - default_change is changed
+ - "'allow (incoming)' in ufw_defaults_change.stdout"
+ - default_change_2 is changed
+ - default_change_implicit_check is changed
+ - default_change_implicit is changed
+ - default_change_implicit_idem_check is not changed
+ - default_change_implicit_idem is not changed
+ - "'allow (incoming)' in ufw_defaults_change_implicit.stdout"
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
new file mode 100644
index 000000000..67328a0e3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/insert_relative_to.yml
@@ -0,0 +1,84 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Enable
+ ufw:
+ state: enabled
+ register: enable
+
+# ## CREATE RULES ############################
+- name: ipv4
+ ufw:
+ rule: deny
+ port: 22
+ to_ip: 0.0.0.0
+- name: ipv4
+ ufw:
+ rule: deny
+ port: 23
+ to_ip: 0.0.0.0
+
+- name: ipv6
+ ufw:
+ rule: deny
+ port: 122
+ to_ip: "::"
+- name: ipv6
+ ufw:
+ rule: deny
+ port: 123
+ to_ip: "::"
+
+- name: first-ipv4
+ ufw:
+ rule: deny
+ port: 10
+ to_ip: 0.0.0.0
+ insert: 0
+ insert_relative_to: first-ipv4
+- name: last-ipv4
+ ufw:
+ rule: deny
+ port: 11
+ to_ip: 0.0.0.0
+ insert: 0
+ insert_relative_to: last-ipv4
+
+- name: first-ipv6
+ ufw:
+ rule: deny
+ port: 110
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: first-ipv6
+- name: last-ipv6
+ ufw:
+ rule: deny
+ port: 111
+ to_ip: "::"
+ insert: 0
+ insert_relative_to: last-ipv6
+
+# ## CHECK RESULT ############################
+- name: Get rules
+ shell: |
+ ufw status | grep DENY | cut -f 1-2 -d ' ' | grep -E "^(0\.0\.0\.0|::) [123]+"
+ # Note that there was also a rule "ff02::fb mDNS" on at least one CI run;
+ # to ignore these, the extra filtering (grepping for DENY and the regex) makes
+ # sure to remove all rules not added here.
+ register: ufw_status
+- assert:
+ that:
+ - ufw_status.stdout_lines == expected_stdout
+ vars:
+ expected_stdout:
+ - "0.0.0.0 10"
+ - "0.0.0.0 22"
+ - "0.0.0.0 11"
+ - "0.0.0.0 23"
+ - ":: 110"
+ - ":: 122"
+ - ":: 111"
+ - ":: 123"
diff --git a/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml
new file mode 100644
index 000000000..1ec3568aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/ufw/tasks/tests/interface.yml
@@ -0,0 +1,86 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Enable
+ ufw:
+ state: enabled
+
+- name: Route with interface in and out
+ ufw:
+ rule: allow
+ route: true
+ interface_in: foo
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ to_ip: 8.8.8.8
+ from_port: 1111
+ to_port: 2222
+
+- name: Route with interface in
+ ufw:
+ rule: allow
+ route: true
+ interface_in: foo
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+
+- name: Route with interface out
+ ufw:
+ rule: allow
+ route: true
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+
+- name: Non-route with interface in
+ ufw:
+ rule: allow
+ interface_in: foo
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 3333
+
+- name: Non-route with interface out
+ ufw:
+ rule: allow
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 4444
+
+- name: Check result
+ shell: ufw status |grep -E '(ALLOW|DENY|REJECT|LIMIT)' |sed -E 's/[ \t]+/ /g'
+ register: ufw_status
+
+- assert:
+ that:
+ - '"8.8.8.8 2222/tcp on bar ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
+ - '"Anywhere ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
+ - '"Anywhere on bar ALLOW FWD 1.1.1.1 1111/tcp " in stdout'
+ - '"Anywhere on foo ALLOW 1.1.1.1 3333/tcp " in stdout'
+ - '"Anywhere ALLOW OUT 1.1.1.1 4444/tcp on bar " in stdout'
+ vars:
+ stdout: '{{ ufw_status.stdout_lines }}'
+
+- name: Non-route with interface_in and interface_out
+ ufw:
+ rule: allow
+ interface_in: foo
+ interface_out: bar
+ proto: tcp
+ from_ip: 1.1.1.1
+ from_port: 1111
+ to_ip: 8.8.8.8
+ to_port: 2222
+ ignore_errors: true
+ register: ufw_non_route_iface
+
+- assert:
+ that:
+ - ufw_non_route_iface is failed
+ - '"Only route rules" in ufw_non_route_iface.msg'
diff --git a/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases b/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases
new file mode 100644
index 000000000..dadd9f37a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/wakeonlan/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml
new file mode 100644
index 000000000..059748031
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/wakeonlan/tasks/main.yml
@@ -0,0 +1,58 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66
+ wakeonlan:
+ mac: 00:00:5E:00:53:66
+ broadcast: 192.0.2.255
+
+- name: Send a magic Wake-on-LAN packet on port 9 to 00-00-5E-00-53-66
+ wakeonlan:
+ mac: 00-00-5E-00-53-66
+ port: 9
+
+- name: Provide an incorrect MAC length
+ wakeonlan:
+ mac: 00-00-5E-00-53-66-AB
+ port: 9
+ ignore_errors: true
+ register: incorrect_mac_length
+
+- name: Check error message
+ assert:
+ that:
+ - incorrect_mac_length is failed
+ - incorrect_mac_length.msg is search('Incorrect MAC address length')
+
+- name: Provide an incorrect MAC format
+ wakeonlan:
+ mac: ZW-YX-WV-UT-SR-QP
+ port: 9
+ ignore_errors: true
+ register: incorrect_mac_format
+
+- name: Check error message
+ assert:
+ that:
+ - incorrect_mac_format is failed
+ - incorrect_mac_format.msg is search('Incorrect MAC address format')
+
+- name: Cause a socket error
+ wakeonlan:
+ mac: 00-00-5E-00-53-66
+ broadcast: 345.567.678.890
+ ignore_errors: true
+ register: incorrect_broadcast_address
+
+- name: Check error message
+ assert:
+ that:
+ - incorrect_broadcast_address is failed
+ - incorrect_broadcast_address.msg is search('not known|Name does not resolve')
diff --git a/ansible_collections/community/general/tests/integration/targets/xattr/aliases b/ansible_collections/community/general/tests/integration/targets/xattr/aliases
new file mode 100644
index 000000000..5cd9c012e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xattr/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/2
+azp/posix/vm
+skip/aix
+skip/docker
+skip/freebsd
+skip/osx
+skip/macos
+destructive
diff --git a/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml
new file mode 100644
index 000000000..29c6d5d15
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xattr/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+test_file: "{{ remote_tmp_dir }}/foo.txt"
diff --git a/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml
new file mode 100644
index 000000000..ca1915e05
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xattr/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml
new file mode 100644
index 000000000..6c1c02b3e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xattr/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Setup
+ include_tasks: setup.yml
+
+- name: Check availability of xattr support
+ command: setfattr -n user.foo {{ test_file }}
+ ignore_errors: true
+ register: xattr
+
+- name: Test
+ include_tasks: test.yml
+ when: xattr is not failed
diff --git a/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml b/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml
new file mode 100644
index 000000000..0eda72d8c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xattr/tasks/setup.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install
+ package:
+ name: attr
+ state: present
+
+- name: Create file
+ file:
+ path: "{{ test_file }}"
+ state: touch
diff --git a/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml
new file mode 100644
index 000000000..7fe852d77
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xattr/tasks/test.yml
@@ -0,0 +1,72 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Set attributes
+ xattr:
+ path: "{{ test_file }}"
+ key: user.foo
+ value: bar
+ register: xattr_set_result
+
+- name: Get attributes
+ xattr:
+ path: "{{ test_file }}"
+ register: xattr_get_all_result
+
+- name: Get specific attribute
+ xattr:
+ path: "{{ test_file }}"
+ key: foo
+ register: xattr_get_specific_result
+
+- assert:
+ that:
+ - "xattr_set_result.changed"
+ - "xattr_get_all_result['xattr']['user.foo'] == 'bar'"
+ - "not xattr_get_all_result.changed"
+ - "xattr_get_specific_result['xattr']['user.foo'] == 'bar'"
+ - "not xattr_get_specific_result.changed"
+
+- name: Set attribute again
+ xattr:
+ path: "{{ test_file }}"
+ namespace: user
+ key: foo
+ value: bar
+ register: xattr_set_again_result
+
+- assert:
+ that:
+ - "not xattr_set_again_result.changed"
+
+- name: Unset attribute
+ xattr:
+ path: "{{ test_file }}"
+ key: foo
+ state: absent
+ register: xattr_unset_result
+
+- name: Get attributes
+ xattr:
+ path: "{{ test_file }}"
+ register: xattr_get_after_unset_result
+
+- assert:
+ that:
+ - "xattr_unset_result.changed"
+ - "xattr_get_after_unset_result['xattr'] == {}"
+ - "not xattr_get_after_unset_result.changed"
+
+- name: Unset attribute again
+ xattr:
+ path: "{{ test_file }}"
+ namespace: user
+ key: foo
+ state: absent
+ register: xattr_unset_result
+
+- assert:
+ that:
+ - "not xattr_set_again_result.changed"
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases b/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases
new file mode 100644
index 000000000..d9f5f0fa3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/aliases
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+azp/posix/vm
+needs/privileged
+needs/root
+skip/aix
+skip/osx
+skip/macos
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml b/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml
new file mode 100644
index 000000000..b209f949a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/defaults/main.yml
@@ -0,0 +1,46 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+uquota_default_bsoft: 1m
+uquota_default_bhard: 2m
+uquota_default_isoft: 100
+uquota_default_ihard: 200
+uquota_default_rtbsoft: 1m
+uquota_default_rtbhard: 2m
+
+uquota_user_bsoft: 2m
+uquota_user_bhard: 3m
+uquota_user_isoft: 300
+uquota_user_ihard: 400
+uquota_user_rtbsoft: 3m
+uquota_user_rtbhard: 4m
+
+gquota_default_bsoft: 1m
+gquota_default_bhard: 2m
+gquota_default_isoft: 100
+gquota_default_ihard: 200
+gquota_default_rtbsoft: 1m
+gquota_default_rtbhard: 2m
+
+gquota_group_bsoft: 2m
+gquota_group_bhard: 3m
+gquota_group_isoft: 300
+gquota_group_ihard: 400
+gquota_group_rtbsoft: 3m
+gquota_group_rtbhard: 4m
+
+pquota_default_bsoft: 1m
+pquota_default_bhard: 2m
+pquota_default_isoft: 100
+pquota_default_ihard: 200
+pquota_default_rtbsoft: 1m
+pquota_default_rtbhard: 2m
+
+pquota_project_bsoft: 2m
+pquota_project_bhard: 3m
+pquota_project_isoft: 300
+pquota_project_ihard: 400
+pquota_project_rtbsoft: 3m
+pquota_project_rtbhard: 4m
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml
new file mode 100644
index 000000000..caca1d341
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/gquota.yml
@@ -0,0 +1,147 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create disk image
+ command: 'dd if=/dev/zero of={{ remote_tmp_dir }}/img-gquota bs=1M count=400
+
+ '
+- name: Create XFS filesystem
+ filesystem:
+ dev: '{{ remote_tmp_dir }}/img-gquota'
+ fstype: xfs
+- block:
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-gquota'
+ path: '{{ remote_tmp_dir }}/gquota'
+ fstype: xfs
+ opts: gquota
+ state: mounted
+ - name: Apply default group limits
+ xfs_quota:
+ bsoft: '{{ gquota_default_bsoft }}'
+ bhard: '{{ gquota_default_bhard }}'
+ isoft: '{{ gquota_default_isoft }}'
+ ihard: '{{ gquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ rtbsoft: '{{ gquota_default_rtbsoft }}'
+ rtbhard: '{{ gquota_default_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_default_before
+ - name: Assert default group limits results
+ assert:
+ that:
+ - test_gquota_default_before.changed
+ - test_gquota_default_before.bsoft == gquota_default_bsoft|human_to_bytes
+ - test_gquota_default_before.bhard == gquota_default_bhard|human_to_bytes
+ - test_gquota_default_before.isoft == gquota_default_isoft
+ - test_gquota_default_before.ihard == gquota_default_ihard
+ - test_gquota_default_before.rtbsoft == gquota_default_rtbsoft|human_to_bytes
+ - test_gquota_default_before.rtbhard == gquota_default_rtbhard|human_to_bytes
+ - name: Apply group limits
+ xfs_quota:
+ bsoft: '{{ gquota_group_bsoft }}'
+ bhard: '{{ gquota_group_bhard }}'
+ isoft: '{{ gquota_group_isoft }}'
+ ihard: '{{ gquota_group_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ rtbsoft: '{{ gquota_group_rtbsoft }}'
+ rtbhard: '{{ gquota_group_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_group_before
+ - name: Assert group limits results for xfsquotauser
+ assert:
+ that:
+ - test_gquota_group_before.changed
+ - test_gquota_group_before.bsoft == gquota_group_bsoft|human_to_bytes
+ - test_gquota_group_before.bhard == gquota_group_bhard|human_to_bytes
+ - test_gquota_group_before.isoft == gquota_group_isoft
+ - test_gquota_group_before.ihard == gquota_group_ihard
+ - test_gquota_group_before.rtbsoft == gquota_group_rtbsoft|human_to_bytes
+ - test_gquota_group_before.rtbhard == gquota_group_rtbhard|human_to_bytes
+ - name: Re-apply default group limits
+ xfs_quota:
+ bsoft: '{{ gquota_default_bsoft }}'
+ bhard: '{{ gquota_default_bhard }}'
+ isoft: '{{ gquota_default_isoft }}'
+ ihard: '{{ gquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ rtbsoft: '{{ gquota_default_rtbsoft }}'
+ rtbhard: '{{ gquota_default_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_default_after
+ - name: Assert default group limits results after re-apply
+ assert:
+ that:
+ - not test_gquota_default_after.changed
+ - name: Re-apply group limits
+ xfs_quota:
+ bsoft: '{{ gquota_group_bsoft }}'
+ bhard: '{{ gquota_group_bhard }}'
+ isoft: '{{ gquota_group_isoft }}'
+ ihard: '{{ gquota_group_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ rtbsoft: '{{ gquota_group_rtbsoft }}'
+ rtbhard: '{{ gquota_group_rtbhard }}'
+ type: group
+ become: true
+ register: test_gquota_group_after
+ - name: Assert group limits results for xfsquotauser after re-apply
+ assert:
+ that:
+ - not test_gquota_group_after.changed
+ - name: Reset default group limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ state: absent
+ type: group
+ become: true
+ register: test_reset_gquota_default
+ - name: Assert reset of default group limits results
+ assert:
+ that:
+ - test_reset_gquota_default.changed
+ - test_reset_gquota_default.bsoft == 0
+ - test_reset_gquota_default.bhard == 0
+ - test_reset_gquota_default.isoft == 0
+ - test_reset_gquota_default.ihard == 0
+ - test_reset_gquota_default.rtbsoft == 0
+ - test_reset_gquota_default.rtbhard == 0
+ - name: Reset group limits for xfsquotauser
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/gquota'
+ name: xfsquotauser
+ state: absent
+ type: group
+ become: true
+ register: test_reset_gquota_group
+ - name: Assert reset of default group limits results
+ assert:
+ that:
+ - test_reset_gquota_group.changed
+ - test_reset_gquota_group.bsoft == 0
+ - test_reset_gquota_group.bhard == 0
+ - test_reset_gquota_group.isoft == 0
+ - test_reset_gquota_group.ihard == 0
+ - test_reset_gquota_group.rtbsoft == 0
+ - test_reset_gquota_group.rtbhard == 0
+ always:
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/gquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-gquota'
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml
new file mode 100644
index 000000000..8977cf9ec
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/main.yml
@@ -0,0 +1,37 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: setup Alpine
+ when: ansible_distribution == 'Alpine'
+ package:
+ name:
+ - xfsprogs
+ - xfsprogs-extra
+ - mount
+ - umount
+ state: latest
+
+- block:
+ - name: Create test user
+ user:
+ name: xfsquotauser
+ state: present
+ become: true
+
+ - include_tasks: uquota.yml
+ - include_tasks: gquota.yml
+ - include_tasks: pquota.yml
+
+ always:
+ - name: cleanup test user
+ user:
+ name: xfsquotauser
+ state: absent
+ become: true
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml
new file mode 100644
index 000000000..db364ffd5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/pquota.yml
@@ -0,0 +1,184 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create disk image
+ command: 'dd if=/dev/zero of={{ remote_tmp_dir }}/img-pquota bs=1M count=400
+
+ '
+- name: Create XFS filesystem
+ filesystem:
+ dev: '{{ remote_tmp_dir }}/img-pquota'
+ fstype: xfs
+- name: Create xfs related files
+ file:
+ path: /etc/{{ item }}
+ state: touch
+ become: true
+ loop:
+ - projid
+ - projects
+- name: Add test xfs quota project id
+ lineinfile:
+ path: /etc/projid
+ line: xft_quotaval:99999
+ state: present
+ become: true
+- name: Add test xfs quota project path
+ lineinfile:
+ path: /etc/projects
+ line: 99999:{{ remote_tmp_dir }}/pquota/test
+ state: present
+ become: true
+- block:
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-pquota'
+ path: '{{ remote_tmp_dir }}/pquota'
+ fstype: xfs
+ opts: pquota
+ state: mounted
+ - name: Create test directory
+ file:
+ path: '{{ remote_tmp_dir }}/pquota/test'
+ state: directory
+ become: true
+ - name: Apply default project limits
+ xfs_quota:
+ bsoft: '{{ pquota_default_bsoft }}'
+ bhard: '{{ pquota_default_bhard }}'
+ isoft: '{{ pquota_default_isoft }}'
+ ihard: '{{ pquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ rtbsoft: '{{ pquota_default_rtbsoft }}'
+ rtbhard: '{{ pquota_default_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_default_before
+ - name: Assert default project limits results
+ assert:
+ that:
+ - test_pquota_default_before.changed
+ - test_pquota_default_before.bsoft == pquota_default_bsoft|human_to_bytes
+ - test_pquota_default_before.bhard == pquota_default_bhard|human_to_bytes
+ - test_pquota_default_before.isoft == pquota_default_isoft
+ - test_pquota_default_before.ihard == pquota_default_ihard
+ - test_pquota_default_before.rtbsoft == pquota_default_rtbsoft|human_to_bytes
+ - test_pquota_default_before.rtbhard == pquota_default_rtbhard|human_to_bytes
+ - name: Apply project limits
+ xfs_quota:
+ bsoft: '{{ pquota_project_bsoft }}'
+ bhard: '{{ pquota_project_bhard }}'
+ isoft: '{{ pquota_project_isoft }}'
+ ihard: '{{ pquota_project_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ rtbsoft: '{{ pquota_project_rtbsoft }}'
+ rtbhard: '{{ pquota_project_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_project_before
+ - name: Assert project limits results for xft_quotaval
+ assert:
+ that:
+ - test_pquota_project_before.changed
+ - test_pquota_project_before.bsoft == pquota_project_bsoft|human_to_bytes
+ - test_pquota_project_before.bhard == pquota_project_bhard|human_to_bytes
+ - test_pquota_project_before.isoft == pquota_project_isoft
+ - test_pquota_project_before.ihard == pquota_project_ihard
+ - test_pquota_project_before.rtbsoft == pquota_project_rtbsoft|human_to_bytes
+ - test_pquota_project_before.rtbhard == pquota_project_rtbhard|human_to_bytes
+ - name: Re-apply default project limits
+ xfs_quota:
+ bsoft: '{{ pquota_default_bsoft }}'
+ bhard: '{{ pquota_default_bhard }}'
+ isoft: '{{ pquota_default_isoft }}'
+ ihard: '{{ pquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ rtbsoft: '{{ pquota_default_rtbsoft }}'
+ rtbhard: '{{ pquota_default_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_default_after
+ - name: Assert default project limits results after re-apply
+ assert:
+ that:
+ - not test_pquota_default_after.changed
+ - name: Re-apply project limits
+ xfs_quota:
+ bsoft: '{{ pquota_project_bsoft }}'
+ bhard: '{{ pquota_project_bhard }}'
+ isoft: '{{ pquota_project_isoft }}'
+ ihard: '{{ pquota_project_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ rtbsoft: '{{ pquota_project_rtbsoft }}'
+ rtbhard: '{{ pquota_project_rtbhard }}'
+ type: project
+ become: true
+ register: test_pquota_project_after
+ - name: Assert project limits results for xft_quotaval after re-apply
+ assert:
+ that:
+ - test_pquota_project_after is not changed
+ - name: Reset default project limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ state: absent
+ type: project
+ become: true
+ register: test_reset_pquota_default
+ - name: Assert reset of default projecy limits results
+ assert:
+ that:
+ - test_reset_pquota_default.changed
+ - test_reset_pquota_default.bsoft == 0
+ - test_reset_pquota_default.bhard == 0
+ - test_reset_pquota_default.isoft == 0
+ - test_reset_pquota_default.ihard == 0
+ - test_reset_pquota_default.rtbsoft == 0
+ - test_reset_pquota_default.rtbhard == 0
+ - name: Reset project limits for xft_quotaval
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/pquota'
+ name: xft_quotaval
+ state: absent
+ type: project
+ become: true
+ register: test_reset_pquota_project
+ - name: Assert reset of project limits results for xft_quotaval
+ assert:
+ that:
+ - test_reset_pquota_project.changed
+ - test_reset_pquota_project.bsoft == 0
+ - test_reset_pquota_project.bhard == 0
+ - test_reset_pquota_project.isoft == 0
+ - test_reset_pquota_project.ihard == 0
+ - test_reset_pquota_project.rtbsoft == 0
+ - test_reset_pquota_project.rtbhard == 0
+ always:
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/pquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-pquota'
+ state: absent
+ - name: Remove xfs quota project id
+ lineinfile:
+ path: /etc/projid
+ regexp: ^xft_quotaval:99999$
+ state: absent
+ become: true
+ - name: Remove xfs quota project path
+ lineinfile:
+ path: /etc/projects
+ regexp: ^99999:.*$
+ state: absent
+ become: true
diff --git a/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml
new file mode 100644
index 000000000..36a7eff76
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xfs_quota/tasks/uquota.yml
@@ -0,0 +1,147 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create disk image
+ command: 'dd if=/dev/zero of={{ remote_tmp_dir }}/img-uquota bs=1M count=400
+
+ '
+- name: Create XFS filesystem
+ filesystem:
+ dev: '{{ remote_tmp_dir }}/img-uquota'
+ fstype: xfs
+- block:
+ - name: Mount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ src: '{{ remote_tmp_dir }}/img-uquota'
+ path: '{{ remote_tmp_dir }}/uquota'
+ fstype: xfs
+ opts: uquota
+ state: mounted
+ - name: Apply default user limits
+ xfs_quota:
+ bsoft: '{{ uquota_default_bsoft }}'
+ bhard: '{{ uquota_default_bhard }}'
+ isoft: '{{ uquota_default_isoft }}'
+ ihard: '{{ uquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ rtbsoft: '{{ uquota_default_rtbsoft }}'
+ rtbhard: '{{ uquota_default_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_default_before
+ - name: Assert default user limits results
+ assert:
+ that:
+ - test_uquota_default_before.changed
+ - test_uquota_default_before.bsoft == uquota_default_bsoft|human_to_bytes
+ - test_uquota_default_before.bhard == uquota_default_bhard|human_to_bytes
+ - test_uquota_default_before.isoft == uquota_default_isoft
+ - test_uquota_default_before.ihard == uquota_default_ihard
+ - test_uquota_default_before.rtbsoft == uquota_default_rtbsoft|human_to_bytes
+ - test_uquota_default_before.rtbhard == uquota_default_rtbhard|human_to_bytes
+ - name: Apply user limits
+ xfs_quota:
+ bsoft: '{{ uquota_user_bsoft }}'
+ bhard: '{{ uquota_user_bhard }}'
+ isoft: '{{ uquota_user_isoft }}'
+ ihard: '{{ uquota_user_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ rtbsoft: '{{ uquota_user_rtbsoft }}'
+ rtbhard: '{{ uquota_user_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_user_before
+ - name: Assert user limits results
+ assert:
+ that:
+ - test_uquota_user_before.changed
+ - test_uquota_user_before.bsoft == uquota_user_bsoft|human_to_bytes
+ - test_uquota_user_before.bhard == uquota_user_bhard|human_to_bytes
+ - test_uquota_user_before.isoft == uquota_user_isoft
+ - test_uquota_user_before.ihard == uquota_user_ihard
+ - test_uquota_user_before.rtbsoft == uquota_user_rtbsoft|human_to_bytes
+ - test_uquota_user_before.rtbhard == uquota_user_rtbhard|human_to_bytes
+ - name: Re-apply default user limits
+ xfs_quota:
+ bsoft: '{{ uquota_default_bsoft }}'
+ bhard: '{{ uquota_default_bhard }}'
+ isoft: '{{ uquota_default_isoft }}'
+ ihard: '{{ uquota_default_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ rtbsoft: '{{ uquota_default_rtbsoft }}'
+ rtbhard: '{{ uquota_default_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_default_after
+ - name: Assert default user limits results after re-apply
+ assert:
+ that:
+ - not test_uquota_default_after.changed
+ - name: Re-apply user limits
+ xfs_quota:
+ bsoft: '{{ uquota_user_bsoft }}'
+ bhard: '{{ uquota_user_bhard }}'
+ isoft: '{{ uquota_user_isoft }}'
+ ihard: '{{ uquota_user_ihard }}'
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ rtbsoft: '{{ uquota_user_rtbsoft }}'
+ rtbhard: '{{ uquota_user_rtbhard }}'
+ type: user
+ become: true
+ register: test_uquota_user_after
+ - name: Assert user limits results for xfsquotauser after re-apply
+ assert:
+ that:
+ - not test_uquota_user_after.changed
+ - name: Reset default user limits
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ state: absent
+ type: user
+ become: true
+ register: test_reset_uquota_default
+ - name: Assert reset of default user limits results
+ assert:
+ that:
+ - test_reset_uquota_default.changed
+ - test_reset_uquota_default.bsoft == 0
+ - test_reset_uquota_default.bhard == 0
+ - test_reset_uquota_default.isoft == 0
+ - test_reset_uquota_default.ihard == 0
+ - test_reset_uquota_default.rtbsoft == 0
+ - test_reset_uquota_default.rtbhard == 0
+ - name: Reset user limits for xfsquotauser
+ xfs_quota:
+ mountpoint: '{{ remote_tmp_dir }}/uquota'
+ name: xfsquotauser
+ state: absent
+ type: user
+ become: true
+ register: test_reset_uquota_user
+ - name: Assert reset of default user limits results
+ assert:
+ that:
+ - test_reset_uquota_user.changed
+ - test_reset_uquota_user.bsoft == 0
+ - test_reset_uquota_user.bhard == 0
+ - test_reset_uquota_user.isoft == 0
+ - test_reset_uquota_user.ihard == 0
+ - test_reset_uquota_user.rtbsoft == 0
+ - test_reset_uquota_user.rtbhard == 0
+ always:
+ - name: Unmount filesystem
+ become: true
+ ansible.posix.mount:
+ fstab: '{{ remote_tmp_dir }}/fstab'
+ path: '{{ remote_tmp_dir }}/uquota'
+ state: unmounted
+ - name: Remove disk image
+ file:
+ path: '{{ remote_tmp_dir }}/img-uquota'
+ state: absent
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/aliases b/ansible_collections/community/general/tests/integration/targets/xml/aliases
new file mode 100644
index 000000000..0d1324b22
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/3
+destructive
+skip/aix
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml
new file mode 100644
index 000000000..d0e3e39af
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Толстый бар</name>
+ <beers>
+ <beer>Окское</beer>
+ <beer>Невское</beer>
+ </beers>
+ <rating subjective="да">десять</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tolstyybar.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers-unicode.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml
new file mode 100644
index 000000000..f47909ac6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-beers.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml
new file mode 100644
index 000000000..acaca7f59
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/fixtures/ansible-xml-namespaced-beers.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml
new file mode 100644
index 000000000..2fcd152f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml
new file mode 100644
index 000000000..ebf02ecf5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Окское</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements-unicode.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml
new file mode 100644
index 000000000..3fff3d0d2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-elements.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml
new file mode 100644
index 000000000..e9b59a6ac
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Natty Lite</beer><beer>Miller Lite</beer><beer>Coors Lite</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-from-groupvars.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml
new file mode 100644
index 000000000..8da963363
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertafter.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml
new file mode 100644
index 000000000..c409e54bf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml
@@ -0,0 +1,17 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>Old Rasputin</beer>
+ <beer>Old Motor Oil</beer>
+ <beer>Old Curmudgeon</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-insertbefore.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml
new file mode 100644
index 000000000..f206af231
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Окское" type="экстра"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes-unicode.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml
new file mode 100644
index 000000000..a4471b7f1
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer name="Ansible Brew" type="light"/></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-children-with-attributes.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml
new file mode 100644
index 000000000..fa1ddfca2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml
@@ -0,0 +1,32 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer color="red">George Killian's Irish Red</beer>
+ <beer origin="CZ" color="blonde">Pilsner Urquell</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ <validxhtml validateon=""/>
+ </website>
+ <phonenumber>555-555-1234</phonenumber>
+ <owner dob="1976-04-12">
+ <name>
+ <last>Smith</last>
+ <first>John</first>
+ <middle>Q</middle>
+ </name>
+ </owner>
+ <website_bis>
+ <validxhtml validateon=""/>
+ </website_bis>
+ <testnormalelement>xml tag with no special characters</testnormalelement>
+ <test-with-dash>xml tag with dashes</test-with-dash>
+ <test-with-dash.and.dot>xml tag with dashes and dots</test-with-dash.and.dot>
+ <test-with.dash_and.dot_and-underscores>xml tag with dashes, dots and underscores</test-with.dash_and.dot_and-underscores>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-element-implicitly.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml
new file mode 100644
index 000000000..dc53d2f8a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer></beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-add-namespaced-children-elements.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml
new file mode 100644
index 000000000..f47909ac6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print-only.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml
new file mode 100644
index 000000000..b5c38262f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ <beer>Old Rasputin</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-pretty-print.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml
new file mode 100644
index 000000000..579741918
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating>10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-attribute.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml
new file mode 100644
index 000000000..b7b1a1a5c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-element.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml
new file mode 100644
index 000000000..4c4dcb180
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-attribute.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml
new file mode 100644
index 000000000..e72312032
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml
@@ -0,0 +1,13 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-remove-namespaced-element.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml
new file mode 100644
index 000000000..df50daba4
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="нет">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value-unicode.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml
new file mode 100644
index 000000000..28dcff81a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="false">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-attribute-value.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml
new file mode 100644
index 000000000..51eb98f16
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business> \ No newline at end of file
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-empty-list.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml
new file mode 100644
index 000000000..b985090d7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer alcohol="0.5" name="90 Minute IPA"><Water liter="0.2" quantity="200g"/><Starch quantity="10g"/><Hops quantity="50g"/><Yeast quantity="20g"/></beer><beer alcohol="0.3" name="Harvest Pumpkin Ale"><Water liter="0.2" quantity="200g"/><Hops quantity="25g"/><Yeast quantity="20g"/></beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-level.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml
new file mode 100644
index 000000000..3cd586dd0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Окское</beer><beer>Невское</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements-unicode.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml
new file mode 100644
index 000000000..6348c3e8f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml
@@ -0,0 +1,11 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>90 Minute IPA</beer><beer>Harvest Pumpkin Ale</beer></beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-children-elements.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml
new file mode 100644
index 000000000..5ecab798e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">10</rating>
+ <website>
+ <mobilefriendly/>
+ <address></address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-empty.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml
new file mode 100644
index 000000000..6c5ca8dd9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">пять</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>пять</rating></business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value-unicode.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml
new file mode 100644
index 000000000..59fb1e516
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers>
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating subjective="true">5</rating>
+ <website>
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+<rating>5</rating></business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-element-value.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml
new file mode 100644
index 000000000..229b31ad7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="false">10</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-attribute-value.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml
new file mode 100644
index 000000000..f78873e7a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml
@@ -0,0 +1,14 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<business xmlns="http://test.business" xmlns:attr="http://test.attribute" type="bar">
+ <name>Tasty Beverage Co.</name>
+ <beers xmlns="http://test.beers">
+ <beer>Rochefort 10</beer>
+ <beer>St. Bernardus Abbot 12</beer>
+ <beer>Schlitz</beer>
+ </beers>
+ <rating xmlns="http://test.rating" attr:subjective="true">11</rating>
+ <website xmlns="http://test.website">
+ <mobilefriendly/>
+ <address>http://tastybeverageco.com</address>
+ </website>
+</business>
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml.license b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/results/test-set-namespaced-element-value.xml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml
new file mode 100644
index 000000000..fe46b3ae5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/main.yml
@@ -0,0 +1,77 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install lxml (FreeBSD)
+ package:
+ name: 'py{{ ansible_python.version.major }}{{ ansible_python.version.minor }}-lxml'
+ state: present
+ when: ansible_os_family == "FreeBSD"
+
+# Needed for MacOSX !
+- name: Install lxml
+ pip:
+ name: lxml
+ state: present
+# when: ansible_os_family == "Darwin"
+
+- name: Get lxml version
+ command: "{{ ansible_python_interpreter }} -c 'from lxml import etree; print(\".\".join(str(v) for v in etree.LXML_VERSION))'"
+ register: lxml_version
+
+- name: Set lxml capabilities as variables
+ set_fact:
+ # NOTE: Some tests require predictable element attribute order,
+ # which is only guaranteed starting from lxml v3.0alpha1
+ lxml_predictable_attribute_order: '{{ lxml_version.stdout is version("3", ">=") }}'
+
+ # NOTE: The xml module requires at least lxml v2.3.0
+ lxml_xpath_attribute_result_attrname: '{{ lxml_version.stdout is version("2.3.0", ">=") }}'
+
+- name: Only run the tests when lxml v2.3.0+
+ when: lxml_xpath_attribute_result_attrname
+ block:
+
+ - include_tasks: test-add-children-elements.yml
+ - include_tasks: test-add-children-from-groupvars.yml
+ - include_tasks: test-add-children-insertafter.yml
+ - include_tasks: test-add-children-insertbefore.yml
+ - include_tasks: test-add-children-with-attributes.yml
+ - include_tasks: test-add-element-implicitly.yml
+ - include_tasks: test-count.yml
+ - include_tasks: test-mutually-exclusive-attributes.yml
+ - include_tasks: test-remove-attribute.yml
+ - include_tasks: test-remove-attribute-nochange.yml
+ - include_tasks: test-remove-element.yml
+ - include_tasks: test-remove-element-nochange.yml
+ - include_tasks: test-set-attribute-value.yml
+ - include_tasks: test-set-children-elements.yml
+ - include_tasks: test-set-children-elements-level.yml
+ - include_tasks: test-set-element-value.yml
+ - include_tasks: test-set-element-value-empty.yml
+ - include_tasks: test-pretty-print.yml
+ - include_tasks: test-pretty-print-only.yml
+ - include_tasks: test-add-namespaced-children-elements.yml
+ - include_tasks: test-remove-namespaced-attribute.yml
+ - include_tasks: test-remove-namespaced-attribute-nochange.yml
+ - include_tasks: test-set-namespaced-attribute-value.yml
+ - include_tasks: test-set-namespaced-element-value.yml
+ - include_tasks: test-set-namespaced-children-elements.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-xmlstring.yml
+ - include_tasks: test-children-elements-xml.yml
+
+ # Unicode tests
+ - include_tasks: test-add-children-elements-unicode.yml
+ - include_tasks: test-add-children-with-attributes-unicode.yml
+ - include_tasks: test-set-attribute-value-unicode.yml
+ - include_tasks: test-count-unicode.yml
+ - include_tasks: test-get-element-content.yml
+ - include_tasks: test-set-children-elements-unicode.yml
+ - include_tasks: test-set-element-value-unicode.yml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
new file mode 100644
index 000000000..e15ac5fd9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements-unicode.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Окское
+ register: add_children_elements_unicode
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements_unicode is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml
new file mode 100644
index 000000000..29467f6d6
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-elements.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_children_elements
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_elements is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
new file mode 100644
index 000000000..2b232b6d0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-from-groupvars.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children: '{{ bad_beers }}'
+ register: add_children_from_groupvars
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-from-groupvars.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_from_groupvars is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-from-groupvars.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
new file mode 100644
index 000000000..7795c8966
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertafter.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertafter: true
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: true
+ register: add_children_insertafter
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertafter.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertafter is changed
+ - comparison is not changed # identical
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
new file mode 100644
index 000000000..b14c5e06f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-insertbefore.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: '/business/beers/beer[text()="St. Bernardus Abbot 12"]'
+ insertbefore: true
+ add_children:
+ - beer: Old Rasputin
+ - beer: Old Motor Oil
+ - beer: Old Curmudgeon
+ pretty_print: true
+ register: add_children_insertbefore
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-insertbefore.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_insertbefore is changed
+ - comparison is not changed # identical
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
new file mode 100644
index 000000000..07905aa15
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes-unicode.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Окское
+ type: экстра
+ register: add_children_with_attributes_unicode
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes_unicode is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
new file mode 100644
index 000000000..fede24395
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-children-with-attributes.yml
@@ -0,0 +1,42 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ add_children:
+ - beer:
+ name: Ansible Brew
+ type: light
+ register: add_children_with_attributes
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-with-attributes.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ # NOTE: This test may fail if lxml does not support predictable element attribute order
+ # So we filter the failure out for these platforms (e.g. CentOS 6)
+ # The module still works fine, we simply are not comparing as smart as we should.
+ - name: Test expected result
+ assert:
+ that:
+ - add_children_with_attributes is changed
+ - comparison is not changed # identical
+ when: lxml_predictable_attribute_order
+ #command: diff -u {{ role_path }}/results/test-add-children-with-attributes.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
new file mode 100644
index 000000000..b1718e452
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-element-implicitly.yml
@@ -0,0 +1,241 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+
+
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/phonenumber
+ value: 555-555-1234
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/last
+ value: Smith
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name/first
+ value: John
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website/validxhtml/@validateon
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/website_bis/validxhtml/@validateon
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/@dob='1976-04-12'
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: true
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: true
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: true
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: true
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="George Killian's Irish Red"]/@color='red'
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/beers/beer[text()="Pilsner Urquell" and @origin='CZ']/@color='blonde'
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/owner/name[first/text()='John']/middle
+ value: Q
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: true
+
+- name: Compare to expected result
+ copy:
+ src: results/test-add-element-implicitly.xml
+ dest: /tmp/ansible-xml-beers-implicit.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+- name: Test expected result
+ assert:
+ that:
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-element-implicitly.xml /tmp/ansible-xml-beers-implicit.xml
+
+
+# Now we repeat the same, just to ensure proper use of namespaces
+- name: Add a phonenumber element to the business element. Implicit mkdir -p behavior where applicable
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:phonenumber
+ value: 555-555-1234
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 1/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:last
+ value: Smith
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 2/2
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name/a:first
+ value: John
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a validxhtml element to the website element. Note that ensure is present by default and while value defaults to null for elements, if one doesn't specify it we don't know what to do.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. This actually makes the previous example redundant because of the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an empty validateon attribute to the validxhtml element. Actually verifies the implicit parent-node creation behavior.
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:website_bis/a:validxhtml/@a:validateon
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute with a value
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/@a:dob='1976-04-12'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with a value, alternate syntax
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer/text()="George Killian's Irish Red" # note the quote within an XPath string thing
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an attribute on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="George Killian's Irish Red"]/@a:color='red'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add two attributes on a conditional element
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:beers/a:beer[text()="Pilsner Urquell" and @a:origin='CZ']/@a:color='blonde'
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add a owner element to the business element, testing implicit mkdir -p behavior 3/2 -- complex lookup
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/a:owner/a:name[a:first/text()='John']/a:middle
+ value: Q
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element without special characters
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/testnormalelement
+ value: xml tag with no special characters
+ pretty_print: true
+ namespaces:
+ a: http://example.com/some/namespace
+
+
+- name: Add an element with dash
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash
+ value: xml tag with dashes
+ pretty_print: true
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with dot
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with-dash.and.dot
+ value: xml tag with dashes and dots
+ pretty_print: true
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Add an element with underscore
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ xpath: /business/test-with.dash_and.dot_and-underscores
+ value: xml tag with dashes, dots and underscores
+ pretty_print: true
+ namespaces:
+ a: http://example.com/some/namespace
+
+- name: Pretty Print this!
+ xml:
+ file: /tmp/ansible-xml-beers-implicit.xml
+ pretty_print: true
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
new file mode 100644
index 000000000..2a9daab78
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-add-namespaced-children-elements.yml
@@ -0,0 +1,39 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Add namespaced child element
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ add_children:
+ - beer: Old Rasputin
+ register: add_namespaced_children_elements
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-namespaced-children-elements.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - add_namespaced_children_elements is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-namespaced-children-elements.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
new file mode 100644
index 000000000..1c8c2b804
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-children-elements-xml.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element with xml format
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ input_type: xml
+ add_children:
+ - '<beer>Old Rasputin</beer>'
+ register: children_elements
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-add-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - children_elements is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-add-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml
new file mode 100644
index 000000000..118e2986d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count-unicode.yml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Count child element
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/beers/beer
+ count: true
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers is not changed
+ - beers.count == 2
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml
new file mode 100644
index 000000000..79be9402f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-count.yml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add child element
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers/beer
+ count: true
+ register: beers
+
+ - name: Test expected result
+ assert:
+ that:
+ - beers is not changed
+ - beers.count == 3
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
new file mode 100644
index 000000000..475f962eb
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content-unicode.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers-unicode.xml
+ dest: /tmp/ansible-xml-beers-unicode.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute is not changed
+ - get_element_attribute.matches[0]['rating'] is defined and get_element_attribute.matches[0]['rating']['subjective'] == 'да'
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers-unicode.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text is not changed
+ - get_element_text.matches[0]['rating'] == 'десять'
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml
new file mode 100644
index 000000000..c75bdb223
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-get-element-content.yml
@@ -0,0 +1,51 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Get element attributes
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ register: get_element_attribute
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute is not changed
+ - get_element_attribute.matches[0]['rating'] is defined
+ - get_element_attribute.matches[0]['rating']['subjective'] == 'true'
+
+ - name: Get element attributes (should fail)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: attribute
+ attribute: subjective
+ register: get_element_attribute_wrong
+ ignore_errors: true
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_attribute_wrong is failed
+
+ - name: Get element text
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ content: text
+ register: get_element_text
+
+ - name: Test expected result
+ assert:
+ that:
+ - get_element_text is not changed
+ - get_element_text.matches[0]['rating'] == '10'
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
new file mode 100644
index 000000000..33f129e2e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-mutually-exclusive-attributes.yml
@@ -0,0 +1,26 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Specify both children to add and a value
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ add_children:
+ - child01
+ - child02
+ value: conflict!
+ register: module_output
+ ignore_errors: true
+
+ - name: Test expected result
+ assert:
+ that:
+ - module_output is not changed
+ - module_output is failed
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
new file mode 100644
index 000000000..03d3299aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print-only.yml
@@ -0,0 +1,33 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml.orig
+
+ - name: Remove spaces from test fixture
+ shell: sed 's/^[ ]*//g' < /tmp/ansible-xml-beers.xml.orig > /tmp/ansible-xml-beers.xml
+
+ - name: Pretty print without modification
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ pretty_print: true
+ register: pretty_print_only
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print-only.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print_only is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml
new file mode 100644
index 000000000..51b34502d
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-pretty-print.yml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Pretty print
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ pretty_print: true
+ add_children:
+ - beer: Old Rasputin
+ register: pretty_print
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-pretty-print.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - pretty_print is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
new file mode 100644
index 000000000..3222bd436
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute-nochange.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove non-existing '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_attribute is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml
new file mode 100644
index 000000000..e8952a655
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-attribute.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating/@subjective'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating/@subjective
+ state: absent
+ register: remove_attribute
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-attribute.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_attribute is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-attribute.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
new file mode 100644
index 000000000..c1312c5a7
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element-nochange.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove non-existing '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml
new file mode 100644
index 000000000..bea376ba9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-element.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Remove '/business/rating'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ state: absent
+ register: remove_element
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_element is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
new file mode 100644
index 000000000..61b7179ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute-nochange.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove non-existing namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_attribute is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
new file mode 100644
index 000000000..a725ee79c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-attribute.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating/@attr:subjective'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating/@attr:subjective
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_attribute
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-namespaced-attribute.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_attribute is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-namespaced-attribute.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
new file mode 100644
index 000000000..fd83c54c3
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element-nochange.yml
@@ -0,0 +1,37 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove non-existing namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_element is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
new file mode 100644
index 000000000..c4129f33e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-remove-namespaced-element.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Remove namespaced '/bus:business/rat:rating'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ state: absent
+ register: remove_namespaced_element
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-remove-element.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - remove_namespaced_element is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-remove-element.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
new file mode 100644
index 000000000..bf35bfdd9
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value-unicode.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'нет'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: нет
+ register: set_attribute_value_unicode
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value_unicode is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
new file mode 100644
index 000000000..2908e00aa
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-attribute-value.yml
@@ -0,0 +1,36 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/rating/@subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ attribute: subjective
+ value: 'false'
+ register: set_attribute_value
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-attribute-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_attribute_value is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-attribute-value.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
new file mode 100644
index 000000000..648f5b25a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-level.yml
@@ -0,0 +1,81 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer:
+ alcohol: "0.5"
+ name: 90 Minute IPA
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Starch:
+ quantity: 10g
+ - Hops:
+ quantity: 50g
+ - Yeast:
+ quantity: 20g
+ - beer:
+ alcohol: "0.3"
+ name: Harvest Pumpkin Ale
+ _:
+ - Water:
+ liter: "0.2"
+ quantity: 200g
+ - Hops:
+ quantity: 25g
+ - Yeast:
+ quantity: 20g
+ register: set_children_elements_level
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_level is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-level.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-level.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again is not changed
+ - comparison is not changed # identical
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
new file mode 100644
index 000000000..8c4fc1094
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements-unicode.yml
@@ -0,0 +1,53 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: Окское
+ - beer: Невское
+ register: set_children_elements_unicode
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements_unicode is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml
new file mode 100644
index 000000000..ed9e4a54e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-children-elements.yml
@@ -0,0 +1,86 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+ - name: Set child elements - empty list
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: []
+ register: set_children_elements
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements-empty-list.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: &children
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_elements
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_elements is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
+
+
+ - name: Set child elements (again)
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/beers
+ set_children: *children
+ register: set_children_again
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-children-elements.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-children-elements.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
new file mode 100644
index 000000000..4041bf910
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-empty.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Set '/business/website/address' to empty string.
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/website/address
+ value: ''
+ register: set_element_value_empty
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-empty.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_value_empty is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-empty.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
new file mode 100644
index 000000000..616f26ddc
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value-unicode.yml
@@ -0,0 +1,50 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: пять
+
+ - name: Set '/business/rating' to 'пять'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to 'false'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: пять
+ register: set_element_second_run
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value-unicode.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run is changed
+ - set_element_second_run is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value-unicode.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml
new file mode 100644
index 000000000..b563b2576
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-element-value.yml
@@ -0,0 +1,50 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-beers.xml
+ dest: /tmp/ansible-xml-beers.xml
+
+
+ - name: Add 2nd '/business/rating' with value '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business
+ add_children:
+ - rating: '5'
+
+ - name: Set '/business/rating' to '5'
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_first_run
+
+ - name: Set '/business/rating' to '5'... again
+ xml:
+ path: /tmp/ansible-xml-beers.xml
+ xpath: /business/rating
+ value: '5'
+ register: set_element_second_run
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-element-value.xml
+ dest: /tmp/ansible-xml-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run is changed
+ - set_element_second_run is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-element-value.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
new file mode 100644
index 000000000..7c1bbd237
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-attribute-value.yml
@@ -0,0 +1,41 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating/@attr:subjective' to 'false'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ xpath: /bus:business/rat:rating
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ attribute: attr:subjective
+ value: 'false'
+ register: set_namespaced_attribute_value
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-attribute-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_namespaced_attribute_value is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-attribute-value.xml /tmp/ansible-xml-namespaced-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
new file mode 100644
index 000000000..e6ed1bdec
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-children-elements.yml
@@ -0,0 +1,61 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-xml.xml
+
+ - name: Set child elements
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+
+ - name: Copy state after first set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-1.xml
+ remote_src: true
+
+ - name: Set child elements again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers-xml.xml
+ xpath: /bus:business/ber:beers
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ set_children:
+ - beer: 90 Minute IPA
+ - beer: Harvest Pumpkin Ale
+ register: set_children_again
+
+ - name: Copy state after second set_children
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: true
+
+ - name: Compare to expected result
+ copy:
+ src: /tmp/ansible-xml-namespaced-beers-1.xml
+ dest: /tmp/ansible-xml-namespaced-beers-2.xml
+ remote_src: true
+ check_mode: true
+ diff: true
+ register: comparison
+ #command: diff /tmp/ansible-xml-namespaced-beers-1.xml /tmp/ansible-xml-namespaced-beers-2.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_children_again is not changed # idempotency
+ - set_namespaced_attribute_value is changed
+ - comparison is not changed # identical
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
new file mode 100644
index 000000000..9944da8a5
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-set-namespaced-element-value.yml
@@ -0,0 +1,53 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Setup test fixture
+ copy:
+ src: fixtures/ansible-xml-namespaced-beers.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11'
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_first_run
+
+ - name: Set namespaced '/bus:business/rat:rating' to '11' again
+ xml:
+ path: /tmp/ansible-xml-namespaced-beers.xml
+ namespaces:
+ bus: http://test.business
+ ber: http://test.beers
+ rat: http://test.rating
+ attr: http://test.attribute
+ xpath: /bus:business/rat:rating
+ value: '11'
+ register: set_element_second_run
+
+ - name: Add trailing newline
+ shell: echo "" >> /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Compare to expected result
+ copy:
+ src: results/test-set-namespaced-element-value.xml
+ dest: /tmp/ansible-xml-namespaced-beers.xml
+ check_mode: true
+ diff: true
+ register: comparison
+ #command: diff -u {{ role_path }}/results/test-set-namespaced-element-value.xml /tmp/ansible-xml-namespaced-beers.xml
+
+ - name: Test expected result
+ assert:
+ that:
+ - set_element_first_run is changed
+ - set_element_second_run is not changed
+ - comparison is not changed # identical
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml
new file mode 100644
index 000000000..1c2e4de4a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/tasks/test-xmlstring.yml
@@ -0,0 +1,85 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ - name: Copy expected results to remote
+ copy:
+ src: "results/{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - test-pretty-print.xml
+ - test-pretty-print-only.xml
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (not using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: .
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: "{{ xmlresponse.xmlstring }}\n"
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: true
+ diff: true
+ register: comparison
+
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse is not changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring (using pretty_print)
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ pretty_print: true
+ register: xmlresponse
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse.xmlstring }}'
+ dest: '/tmp/test-pretty-print-only.xml'
+ check_mode: true
+ diff: true
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print-only.xml /tmp/ansible-xml-beers.xml
+
+
+ # NOTE: Jinja2 templating eats trailing newlines
+ - name: Read from xmlstring
+ xml:
+ xmlstring: "{{ lookup('file', '{{ role_path }}/fixtures/ansible-xml-beers.xml') }}"
+ xpath: /business/beers
+ pretty_print: true
+ add_children:
+ - beer: Old Rasputin
+ register: xmlresponse_modification
+
+ - name: Compare to expected result
+ copy:
+ content: '{{ xmlresponse_modification.xmlstring }}'
+ dest: '/tmp/test-pretty-print.xml'
+ check_mode: true
+ diff: true
+ register: comparison
+
+ # FIXME: This change is related to the newline added by pretty_print
+ - name: Test expected result
+ assert:
+ that:
+ - xmlresponse_modification is changed
+ - comparison is not changed # identical
+ #command: diff -u {{ role_path }}/results/test-pretty-print.xml /tmp/ansible-xml-beers.xml
diff --git a/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml b/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml
new file mode 100644
index 000000000..a8dfc2396
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/xml/vars/main.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# -*- mode: yaml -*
+
+bad_beers:
+- beer: "Natty Lite"
+- beer: "Miller Lite"
+- beer: "Coors Lite"
diff --git a/ansible_collections/community/general/tests/integration/targets/yarn/aliases b/ansible_collections/community/general/tests/integration/targets/yarn/aliases
new file mode 100644
index 000000000..cb1bc115a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yarn/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/freebsd
diff --git a/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml
new file mode 100644
index 000000000..6147ad33e
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yarn/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_pkg_mgr
+ - setup_gnutar
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml
new file mode 100644
index 000000000..e12d891c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yarn/tasks/main.yml
@@ -0,0 +1,23 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Yarn package manager integration tests
+# Copyright (c) 2018 David Gunter, <david.gunter@tivix.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# ============================================================
+
+- include_tasks: run.yml
+ vars:
+ nodejs_version: '{{ item.node_version }}'
+ nodejs_path: 'node-v{{ nodejs_version }}-{{ ansible_system|lower }}-x{{ ansible_userspace_bits }}'
+ yarn_version: '{{ item.yarn_version }}'
+ with_items:
+ - {node_version: 4.8.0, yarn_version: 1.6.0} # Lowest compatible nodejs version
+ - {node_version: 8.0.0, yarn_version: 1.6.0}
+ when:
+ - not (ansible_os_family == 'Alpine') # TODO
diff --git a/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml b/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml
new file mode 100644
index 000000000..0d7d6fb42
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yarn/tasks/run.yml
@@ -0,0 +1,233 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: 'Create directory for Node'
+ file:
+ path: /usr/local/lib/nodejs
+ state: directory
+
+- name: 'Download Nodejs'
+ unarchive:
+ src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/{{ nodejs_path }}.tar.gz'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ creates: '{{ remote_tmp_dir }}/{{ nodejs_path }}.tar.gz'
+
+- name: 'Download Yarn'
+ unarchive:
+ src: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/yarn/yarn-v{{yarn_version}}.tar.gz'
+ dest: '{{ remote_tmp_dir }}'
+ remote_src: true
+ creates: '{{ remote_tmp_dir }}/yarn-v{{yarn_version}}_pkg.tar.gz'
+
+- name: 'Copy node to directory created earlier'
+ command: "mv {{ remote_tmp_dir }}/{{ nodejs_path }} /usr/local/lib/nodejs/{{nodejs_path}}"
+
+# Clean up before running tests
+- name: Remove any previous Nodejs modules
+ file:
+ path: '{{remote_tmp_dir}}/node_modules'
+ state: absent
+
+# Set vars for our test harness
+- vars:
+ #node_bin_path: "/usr/local/lib/nodejs/node-v{{nodejs_version}}/bin"
+ node_bin_path: "/usr/local/lib/nodejs/{{ nodejs_path }}/bin"
+ yarn_bin_path: "{{ remote_tmp_dir }}/yarn-v{{ yarn_version }}/bin"
+ package: 'iconv-lite'
+ environment:
+ PATH: "{{ node_bin_path }}:{{ansible_env.PATH}}"
+ YARN_IGNORE_ENGINES: true
+ block:
+
+ # Get the version of Yarn and register to a variable
+ - shell: '{{ yarn_bin_path }}/yarn --version'
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_version
+
+ - name: 'Create dummy package.json'
+ template:
+ src: package.j2
+ dest: '{{ remote_tmp_dir }}/package.json'
+
+ - name: 'Install all packages.'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+
+ - name: 'Install the same package from package.json again.'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: '{{ package }}'
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_install
+
+ - assert:
+ that:
+ - not (yarn_install is changed)
+
+ - name: 'Install all packages in check mode.'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ check_mode: true
+ register: yarn_install_check
+
+ - name: verify test yarn global installation in check mode
+ assert:
+ that:
+ - yarn_install_check.err is defined
+ - yarn_install_check.out is defined
+ - yarn_install_check.err is none
+ - yarn_install_check.out is none
+
+ - name: 'Install package with explicit version (older version of package)'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ version: 1.1.0
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_install_old_package
+
+ - assert:
+ that:
+ - yarn_install_old_package is changed
+
+ - name: 'Again but without explicit executable path'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ name: left-pad
+ version: 1.1.0
+ state: present
+ environment:
+ PATH: '{{ yarn_bin_path }}:{{ node_bin_path }}:{{ ansible_env.PATH }}'
+
+ - name: 'Upgrade old package'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ state: latest
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_update_old_package
+
+ - assert:
+ that:
+ - yarn_update_old_package is changed
+
+ - name: 'Remove a package'
+ yarn:
+ path: '{{ remote_tmp_dir }}'
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: '{{ package }}'
+ state: absent
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_uninstall_package
+
+ - name: 'Assert package removed'
+ assert:
+ that:
+ - yarn_uninstall_package is changed
+
+ - name: 'Global install binary with explicit version (older version of package)'
+ yarn:
+ global: true
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: prettier
+ version: 2.0.0
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_global_install_old_binary
+
+ - assert:
+ that:
+ - yarn_global_install_old_binary is changed
+
+ - name: 'Global upgrade old binary'
+ yarn:
+ global: true
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: prettier
+ state: latest
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_global_update_old_binary
+
+ - assert:
+ that:
+ - yarn_global_update_old_binary is changed
+
+ - name: 'Global remove a binary'
+ yarn:
+ global: true
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: prettier
+ state: absent
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_global_uninstall_binary
+
+ - assert:
+ that:
+ - yarn_global_uninstall_binary is changed
+
+ - name: 'Global install package with no binary with explicit version (older version of package)'
+ yarn:
+ global: true
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ version: 1.1.0
+ state: present
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_global_install_old_package
+
+ - assert:
+ that:
+ - yarn_global_install_old_package is changed
+
+ - name: 'Global upgrade old package with no binary'
+ yarn:
+ global: true
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ state: latest
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_global_update_old_package
+
+ - assert:
+ that:
+ - yarn_global_update_old_package is changed
+
+ - name: 'Global remove a package with no binary'
+ yarn:
+ global: true
+ executable: '{{ yarn_bin_path }}/yarn'
+ name: left-pad
+ state: absent
+ environment:
+ PATH: '{{ node_bin_path }}:{{ ansible_env.PATH }}'
+ register: yarn_global_uninstall_package
+
+ - assert:
+ that:
+ - yarn_global_uninstall_package is changed
diff --git a/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j2 b/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j2
new file mode 100644
index 000000000..3f5456ad2
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yarn/templates/package.j2
@@ -0,0 +1,14 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+{
+ "name": "ansible-yarn-testing",
+ "version": "1.0.0",
+ "license": "MIT",
+ "dependencies": {
+ "iconv-lite": "^0.4.21",
+ "@types/node": "^12.0.0"
+ }
+}
diff --git a/ansible_collections/community/general/tests/integration/targets/yum_versionlock/aliases b/ansible_collections/community/general/tests/integration/targets/yum_versionlock/aliases
new file mode 100644
index 000000000..ca3f7e796
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yum_versionlock/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel8.4 # TODO make sure that tests work on 8.4 as well!
+disabled # TODO
diff --git a/ansible_collections/community/general/tests/integration/targets/yum_versionlock/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/yum_versionlock/tasks/main.yml
new file mode 100644
index 000000000..05f1f7495
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/yum_versionlock/tasks/main.yml
@@ -0,0 +1,87 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Update procps-ng temporary until issue (#2539) is fixed
+ yum:
+ name: procps-ng
+ state: latest
+ when: ansible_distribution == 'Fedora' and ansible_distribution_major_version == '34'
+
+- block:
+ - name: Install necessary packages to test yum_versionlock
+ yum:
+ name: yum-plugin-versionlock
+ state: present
+ register: yum_versionlock_install
+
+ - name: Yum checkupdate
+ yum:
+ list: updates
+ register: yum_updates
+
+ - block:
+ - name: Lock all packages
+ community.general.yum_versionlock:
+ name: "{{ yum_updates.results | map(attribute='name') | list }}"
+ state: present
+ register: lock_all_packages
+
+ - name: Lock all packages again
+ community.general.yum_versionlock:
+ name: "{{ yum_updates.results | map(attribute='name') | list }}"
+ state: present
+ register: lock_all_packages_again
+
+ - name: Lock packages wildcard
+ community.general.yum_versionlock:
+ name: "nss*"
+ state: present
+ register: lock_nss_wildcard
+
+ # This should fail when it needs user interaction and missing -y is on purpose.
+ - name: Update all packages (not really)
+ command: yum update --setopt=obsoletes=0
+ register: update_all_locked_packages
+ changed_when:
+ - '"No packages marked for update" not in update_all_locked_packages.stdout'
+ - '"Nothing to do" not in update_all_locked_packages.stdout'
+
+ - name: Unlock all packages
+ community.general.yum_versionlock:
+ name: "{{ yum_updates.results | map(attribute='name') | list }}"
+ state: absent
+ register: unlock_all_packages
+
+ - name: Update all packages
+ yum:
+ name: '*'
+ state: latest
+ check_mode: true
+ register: update_all_packages
+ when: yum_updates.results | length != 0
+
+ - name: Assert everything is fine
+ assert:
+ that:
+ - lock_all_packages is changed
+ - lock_all_packages_again is not changed
+ - lock_nss_wildcard is not changed
+ - update_all_locked_packages is not changed
+ - unlock_all_packages is changed
+ - update_all_packages is changed
+ when: yum_updates.results | length != 0
+
+ - name: Remove installed packages in case it was not installed
+ yum:
+ name: yum-plugin-versionlock
+ state: absent
+ when: yum_versionlock_install is changed
+ when: (ansible_distribution in ['CentOS', 'RedHat'] and ansible_distribution_major_version is version('7', '>=')) or
+ (ansible_distribution == 'Fedora')
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/aliases b/ansible_collections/community/general/tests/integration/targets/zypper/aliases
new file mode 100644
index 000000000..e0a62a673
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec b/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec
new file mode 100644
index 000000000..044ea3a54
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec
@@ -0,0 +1,12 @@
+Summary: Empty RPM
+Name: empty
+Version: 1
+Release: 0
+License: GPLv3
+Group: Applications/System
+BuildArch: noarch
+
+%description
+Empty RPM
+
+%files
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec.license b/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/files/empty.spec.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml
new file mode 100644
index 000000000..185f2f90a
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the zypper module
+# Copyright 2015, Guido Günther <agx@sigxcpu.org>
+# heavily based on the yum tests which are
+# Copyright 2014, James Tanner <tanner.jc@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: 'zypper.yml'
+ when: ansible_os_family == 'Suse'
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml b/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml
new file mode 100644
index 000000000..3eefddbdf
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/tasks/zypper.yml
@@ -0,0 +1,530 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: get hello package version
+ shell: zypper --xmlout se -svx hello | grep 'name="hello"' | grep 'repository="Main Repository"' | sed 's/.*edition="\([^ ]*\)".*/\1/'
+ register: hello_version
+
+- name: set URL of test package
+ set_fact:
+ hello_package_url: https://download.opensuse.org/distribution/leap/{{ ansible_distribution_version }}/repo/oss/x86_64/hello-{{ hello_version.stdout }}.x86_64.rpm
+
+- debug: var=hello_package_url
+
+# UNINSTALL
+- name: uninstall hello
+ zypper:
+ name: hello
+ state: removed
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: false
+ register: rpm_result
+
+- debug: var=zypper_result
+- debug: var=rpm_result
+
+- name: verify uninstallation of hello
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "rpm_result.rc == 1"
+
+# UNINSTALL AGAIN
+- name: uninstall hello again
+ zypper:
+ name: hello
+ state: removed
+ register: zypper_result
+
+- name: verify no change on re-uninstall
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+# INSTALL
+- name: install hello
+ zypper:
+ name: hello
+ state: present
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: false
+ register: rpm_result
+
+- debug: var=zypper_result
+- debug: var=rpm_result
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
+
+# INSTALL AGAIN
+- name: install hello again
+ zypper:
+ name: hello
+ state: present
+ register: zypper_result
+
+- name: verify no change on second install
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+# Multiple packages
+- name: uninstall hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: removed
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: false
+ register: rpm_hello_result
+
+- name: check metamail with rpm
+ shell: rpm -q metamail
+ failed_when: false
+ register: rpm_metamail_result
+
+- name: verify packages uninstalled
+ assert:
+ that:
+ - "rpm_hello_result.rc != 0"
+ - "rpm_metamail_result.rc != 0"
+
+- name: install hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: present
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: false
+ register: rpm_hello_result
+
+- name: check metamail with rpm
+ shell: rpm -q metamail
+ failed_when: false
+ register: rpm_metamail_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_hello_result.rc == 0"
+ - "rpm_metamail_result.rc == 0"
+
+- name: uninstall hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: removed
+
+# INSTALL nonexistent package
+- name: install hello from url
+ zypper:
+ name: doesnotexist
+ state: present
+ register: zypper_result
+ ignore_errors: true
+
+- name: verify package installation failed
+ assert:
+ that:
+ - "zypper_result.rc == 104"
+ - "zypper_result.msg.startswith('No provider of')"
+
+# INSTALL broken local package
+- name: create directory
+ file:
+ path: "{{remote_tmp_dir | expanduser}}/zypper1"
+ state: directory
+
+- name: fake rpm package
+ file:
+ path: "{{remote_tmp_dir | expanduser}}/zypper1/broken.rpm"
+ state: touch
+
+- name: install broken rpm
+ zypper:
+ name: "{{remote_tmp_dir | expanduser}}/zypper1/broken.rpm"
+ state: present
+ register: zypper_result
+ ignore_errors: true
+
+- debug: var=zypper_result
+
+- name: verify we failed installation of broken rpm
+ assert:
+ that:
+ - "zypper_result.rc == 3"
+ - "'Problem reading the RPM header' in zypper_result.stdout"
+
+# Build and install an empty rpm
+- name: uninstall empty
+ zypper:
+ name: empty
+ state: removed
+
+- name: install rpmbuild
+ zypper:
+ name: rpmbuild
+ state: present
+
+- name: clean zypper RPM cache
+ file:
+ name: /var/cache/zypper/RPMS
+ state: absent
+
+- name: create directory
+ file:
+ path: "{{remote_tmp_dir | expanduser}}/zypper2"
+ state: directory
+
+- name: copy spec file
+ copy:
+ src: empty.spec
+ dest: "{{ remote_tmp_dir | expanduser }}/zypper2/empty.spec"
+
+- name: build rpm
+ command: |
+ rpmbuild -bb \
+ --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build"
+ --define "_builddir %{_topdir}" \
+ --define "_rpmdir %{_topdir}" \
+ --define "_srcrpmdir %{_topdir}" \
+ --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \
+ --define "_sourcedir %{_topdir}" \
+ {{ remote_tmp_dir }}/zypper2/empty.spec
+ register: rpm_build_result
+
+- name: install empty rpm
+ zypper:
+ name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm"
+ disable_gpg_check: true
+ register: zypper_result
+
+- name: check empty with rpm
+ shell: rpm -q empty
+ failed_when: false
+ register: rpm_result
+
+- name: verify installation of empty
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: uninstall empty
+ zypper:
+ name: empty
+ state: removed
+
+- name: extract from rpm
+ zypper:
+ name: "{{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/empty-1-0.noarch.rpm"
+ state: installed
+ disable_gpg_check: true
+ extra_args_precommand: --root {{ remote_tmp_dir | expanduser }}/testdir/
+
+- name: check that dir var is exist
+ stat: path={{ remote_tmp_dir | expanduser }}/testdir/var
+ register: stat_result
+
+- name: check that we extract rpm package in testdir folder and folder var is exist
+ assert:
+ that:
+ - "stat_result.stat.exists == true"
+
+
+# test simultaneous remove and install using +- prefixes
+
+- name: install hello to prep next task
+ zypper:
+ name: hello
+ state: present
+
+- name: remove metamail to prep next task
+ zypper:
+ name: metamail
+ state: absent
+
+- name: install and remove in the same run, with +- prefix
+ zypper:
+ name:
+ - -hello
+ - +metamail
+ state: present
+ register: zypper_res1
+
+- name: install and remove again, leave out plus
+ zypper:
+ name:
+ - metamail
+ - -hello
+ state: present
+ register: zypper_res1a
+
+- name: in and rm swapped
+ zypper:
+ name:
+ - -metamail
+ - hello
+ state: present
+ register: zypper_res1b
+
+- name: install metamail
+ zypper:
+ name: metamail
+ state: absent
+ register: zypper_res2
+
+- name: remove hello
+ zypper:
+ name: hello
+ state: present
+ register: zypper_res3
+
+- name: verify simultaneous install/remove worked
+ assert:
+ that:
+ - zypper_res1 is successful
+ - zypper_res1 is changed
+ - zypper_res1a is not changed
+ - zypper_res1b is changed
+ - zypper_res2 is not changed
+ - zypper_res3 is not changed
+
+
+- name: install and remove with state=absent
+ zypper:
+ name:
+ - metamail
+ - +hello
+ state: absent
+ register: zypper_res
+ ignore_errors: true
+
+- name: verify simultaneous install/remove failed with absent
+ assert:
+ that:
+ - zypper_res is failed
+ - zypper_res.msg == "Can not combine '+' prefix with state=remove/absent."
+
+- name: try rm patch
+ zypper:
+ name: openSUSE-2016-128
+ type: patch
+ state: absent
+ ignore_errors: true
+ register: zypper_patch
+- assert:
+ that:
+ - zypper_patch is failed
+ - zypper_patch.msg.startswith('Can not remove patches.')
+
+- name: try rm URL
+ zypper:
+ name: "{{ hello_package_url }}"
+ state: absent
+ ignore_errors: true
+ register: zypper_rm
+- assert:
+ that:
+ - zypper_rm is failed
+ - zypper_rm.msg.startswith('Can not remove via URL.')
+
+- name: remove pattern update_test
+ zypper:
+ name: update_test
+ type: pattern
+ state: absent
+
+- name: install pattern update_test
+ zypper:
+ name: update_test
+ type: pattern
+ state: present
+ register: zypper_install_pattern1
+
+- name: install pattern update_test again
+ zypper:
+ name: update_test
+ type: pattern
+ state: present
+ register: zypper_install_pattern2
+
+- assert:
+ that:
+ - zypper_install_pattern1 is changed
+ - zypper_install_pattern2 is not changed
+
+- name: remove hello
+ zypper:
+ name: hello
+ state: absent
+
+- name: install via URL
+ zypper:
+ state: present
+ name: "{{ hello_package_url }}"
+ register: zypperin1
+
+- name: test install
+ zypper:
+ name: hello
+ state: present
+ register: zypperin2
+
+- assert:
+ that:
+ - zypperin1 is succeeded
+ - zypperin1 is changed
+ - zypperin2 is not changed
+
+# check for https://github.com/ansible/ansible/issues/20139
+- name: run updatecache
+ zypper:
+ name: hello
+ state: present
+ update_cache: true
+ register: zypper_result_update_cache
+
+- name: run updatecache in check mode
+ zypper:
+ name: hello
+ state: present
+ update_cache: true
+ check_mode: true
+ register: zypper_result_update_cache_check
+
+
+- assert:
+ that:
+ - zypper_result_update_cache is successful
+ - zypper_result_update_cache_check is successful
+ - zypper_result_update_cache_check is not changed
+
+# - name: ensure no previous netcat package still exists
+# zypper:
+# name:
+# - netcat-openbsd
+# - gnu-netcat
+# state: absent
+#
+# - name: install netcat-openbsd which conflicts with gnu-netcat
+# zypper:
+# name: netcat-openbsd
+# state: present
+#
+# - name: try installation of gnu-netcat which should fail due to the conflict
+# zypper:
+# name: gnu-netcat
+# state: present
+# ignore_errors: true
+# register: zypper_pkg_conflict
+#
+# - assert:
+# that:
+# - zypper_pkg_conflict is failed
+# - "'conflicts with netcat-openbsd provided' in zypper_pkg_conflict.stdout"
+#
+# - name: retry installation of gnu-netcat with force_resolution set to choose a resolution
+# zypper:
+# name: gnu-netcat
+# state: present
+# force_resolution: True
+
+- name: duplicate rpms block
+ vars:
+ looplist:
+ - 1
+ - 2
+ block:
+ - name: Deploy spec files to build 2 packages with duplicate files.
+ template:
+ src: duplicate.spec.j2
+ dest: "{{ remote_tmp_dir | expanduser }}/zypper2/duplicate{{ item }}.spec"
+ loop: "{{ looplist }}"
+
+ - name: build rpms with duplicate files
+ command: |
+ rpmbuild -bb \
+ --define "_topdir {{remote_tmp_dir | expanduser }}/zypper2/rpm-build"
+ --define "_builddir %{_topdir}" \
+ --define "_rpmdir %{_topdir}" \
+ --define "_srcrpmdir %{_topdir}" \
+ --define "_specdir {{remote_tmp_dir | expanduser}}/zypper2" \
+ --define "_sourcedir %{_topdir}" \
+ {{ remote_tmp_dir | expanduser }}/zypper2/duplicate{{ item }}.spec
+ loop: "{{ looplist }}"
+
+ - name: install duplicate rpms
+ zypper:
+ name: >-
+ {{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm
+ disable_gpg_check: true
+ ignore_errors: true
+ register: zypper_duplicate_result
+ loop: "{{ looplist }}"
+
+ - name: Read in duplicate file contents
+ slurp:
+ src: /usr/lib/duplicate/duplicate.txt
+ register: duplicate_out
+
+ - name: Check failure when installing rpms with duplicate files without replacefiles option
+ assert:
+ that:
+ - zypper_duplicate_result.results[0] is successful
+ - zypper_duplicate_result.results[1] is failed
+ - '"fileconflict" in zypper_duplicate_result.results[1].stdout'
+ - '"/usr/lib/duplicate/duplicate.txt" in zypper_duplicate_result.results[1].stdout'
+ - '"duplicate1" in duplicate_out.content | b64decode'
+
+ - name: install duplicate rpms
+ zypper:
+ name: >-
+ {{ remote_tmp_dir | expanduser }}/zypper2/rpm-build/noarch/duplicate{{ item }}-1-0.noarch.rpm
+ disable_gpg_check: true
+ replacefiles: true
+ ignore_errors: true
+ register: zypper_duplicate_result
+ loop: "{{ looplist }}"
+
+ - name: Read in duplicate file contents
+ slurp:
+ src: /usr/lib/duplicate/duplicate.txt
+ register: duplicate_out
+
+ - name: Check success installing rpms with duplicate files using replacefiles option
+ assert:
+ that:
+ - zypper_duplicate_result is successful
+ - zypper_duplicate_result is changed
+ - '"duplicate2" in duplicate_out.content | b64decode'
+
+ - name: Remove installed duplicate rpms
+ zypper:
+ name: "duplicate{{ item }}-1-0"
+ state: absent
+ loop: "{{ looplist }}"
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j2 b/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j2
new file mode 100644
index 000000000..6f63b665c
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper/templates/duplicate.spec.j2
@@ -0,0 +1,24 @@
+{#
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+#}
+
+Summary: Duplicate{{ item }} RPM. Installs one file that is a duplicate of other Duplicate# RPMs
+Name: duplicate{{ item }}
+Version: 1
+Release: 0
+License: GPLv3
+Group: Applications/System
+BuildArch: noarch
+
+%description
+Duplicate {{ item }} RPM. Package one file that will be a duplicate of other Duplicate RPM contents.
+This is only for testing of the replacefiles zypper option.
+
+%install
+mkdir -p "%{buildroot}/usr/lib/duplicate"
+echo "%{name}" > "%{buildroot}/usr/lib/duplicate/duplicate.txt"
+
+%files
+/usr/lib/duplicate/duplicate.txt
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases b/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases
new file mode 100644
index 000000000..e0a62a673
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper_repository/aliases
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/posix/1
+destructive
+skip/aix
+skip/freebsd
+skip/osx
+skip/macos
+skip/rhel
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo b/ansible_collections/community/general/tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo
new file mode 100644
index 000000000..aaa486d92
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper_repository/files/systemsmanagement_Uyuni_Utils.repo
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[systemsmanagement_Uyuni_Utils]
+name=Several utilities to develop, build or release Uyuni (openSUSE_Leap_15.3)
+type=rpm-md
+baseurl=https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Utils/openSUSE_Leap_15.3/
+gpgcheck=1
+gpgkey=https://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Utils/openSUSE_Leap_15.3/repodata/repomd.xml.key
+enabled=1
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml b/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml
new file mode 100644
index 000000000..982de6eb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper_repository/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml b/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml
new file mode 100644
index 000000000..1d655a56f
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# test code for the zypper repository module
+# Copyright (c) 2016, Guido Günther <agx@sigxcpu.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- include_tasks: 'test.yml'
+ when: ansible_os_family == 'Suse'
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml b/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml
new file mode 100644
index 000000000..739b4c264
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/test.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: collect repo configuration before test
+ shell: "grep . /etc/zypp/repos.d/*"
+ register: before
+
+- name: ensure zypper ref works
+ command: zypper -n ref
+
+- block:
+ - include_tasks: 'zypper_repository.yml'
+ always:
+ - name: remove repositories added during test
+ community.general.zypper_repository:
+ name: "{{item}}"
+ state: absent
+ with_items:
+ - chrome1
+ - chrome2
+ - test
+ - testrefresh
+ - testprio
+ - Apache_PHP_Modules
+ - systemsmanagement_Uyuni_Stable
+ - systemsmanagement_Uyuni_Utils
+
+ - name: collect repo configuration after test
+ shell: "grep . /etc/zypp/repos.d/*"
+ register: after
+
+ - name: verify repo configuration has been restored
+ assert:
+ that:
+ - before.stdout == after.stdout
+
+ - name: ensure zypper ref still works
+ command: zypper -n ref
diff --git a/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml b/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
new file mode 100644
index 000000000..ec362af10
--- /dev/null
+++ b/ansible_collections/community/general/tests/integration/targets/zypper_repository/tasks/zypper_repository.yml
@@ -0,0 +1,289 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Delete test repo
+ community.general.zypper_repository:
+ name: test
+ state: absent
+ register: zypper_result
+
+- name: verify no change on test repo deletion
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+- name: Add test repo
+ community.general.zypper_repository:
+ name: test
+ state: present
+ repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64
+ register: zypper_result
+
+- name: verify repo addition
+ assert:
+ that:
+ - "zypper_result.changed"
+
+- name: Add same repo again
+ community.general.zypper_repository:
+ name: test
+ state: present
+ repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64
+ register: zypper_result
+
+- name: verify no change on second install
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+- name: Change repo URL
+ community.general.zypper_repository:
+ name: test
+ state: present
+ repo: http://download.videolan.org/pub/vlc/SuSE/Leap_{{ ansible_distribution_version }}/
+ register: zypper_result
+
+- name: Verify change on URL only change
+ assert:
+ that:
+ - "zypper_result.changed"
+
+- name: use refresh option
+ community.general.zypper_repository:
+ name: testrefresh
+ refresh: false
+ state: present
+ repo: http://download.videolan.org/pub/vlc/SuSE/Leap_{{ ansible_distribution_version }}/
+
+- name: check refreshoption
+ command: zypper -x lr testrefresh
+ register: zypper_result
+
+- name: verify autorefresh option set properly
+ assert:
+ that:
+ - '"autorefresh=\"0\"" in zypper_result.stdout'
+
+- name: set repo priority
+ community.general.zypper_repository:
+ name: testprio
+ priority: 55
+ state: present
+ repo: http://download.videolan.org/pub/vlc/SuSE/Leap_{{ ansible_distribution_version }}/
+
+- name: check refreshoption
+ command: zypper -x lr testprio
+ register: zypper_result
+
+- name: verify priority option set properly
+ assert:
+ that:
+ - '"priority=\"55\"" in zypper_result.stdout'
+
+- name: add two repos with same url
+ community.general.zypper_repository:
+ name: "{{item}}"
+ state: present
+ repo: http://dl.google.com/linux/chrome/rpm/stable/x86_64
+ with_items:
+ - chrome1
+ - chrome2
+
+- name: check repo is updated by url
+ command: zypper lr chrome1
+ register: zypper_result1
+ ignore_errors: true
+
+- name: check repo is updated by url
+ command: zypper lr chrome2
+ register: zypper_result2
+
+- name: ensure same url cause update of existing repo even if name differ
+ assert:
+ that:
+ - "zypper_result1.rc != 0"
+ - "'not found' in zypper_result1.stderr"
+ - "zypper_result2.rc == 0"
+ - "'http://dl.google.com/linux/chrome/rpm/stable/x86_64' in zypper_result2.stdout"
+
+- name: add two repos with same name
+ community.general.zypper_repository:
+ name: samename
+ state: present
+ repo: "{{ item }}"
+ with_items:
+ - http://download.opensuse.org/repositories/science/openSUSE_Leap_{{ ansible_distribution_version }}/
+ - http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/
+
+- name: check repo is updated by name
+ command: zypper lr samename
+ register: zypper_result
+
+- name: ensure url get updated on repo with same name
+ assert:
+ that:
+ - "'/science/' not in zypper_result.stdout"
+ - "'/devel:/languages:/ruby/' in zypper_result.stdout"
+
+- name: remove last added repos (by URL to test that)
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/
+ state: absent
+
+# FIXME: this currently fails with `Repository 'Apache_PHP_Modules' is invalid.`
+# - name: "Test adding a repo with custom GPG key"
+# community.general.zypper_repository:
+# name: "Apache_PHP_Modules"
+# repo: "http://download.opensuse.org/repositories/server:/php:/applications/openSUSE_Tumbleweed/"
+# priority: 100
+# auto_import_keys: true
+# state: "present"
+
+- name: add a repo by releasever
+ community.general.zypper_repository:
+ name: releaseverrepo
+ repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_$releasever/
+ state: present
+ register: add_repo
+
+- name: add a repo by releasever again
+ community.general.zypper_repository:
+ name: releaseverrepo
+ repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_$releasever/
+ state: present
+ register: add_repo_again
+
+- name: no update in case of $releasever usage in url
+ assert:
+ that:
+ - add_repo is changed
+ - add_repo_again is not changed
+
+- name: remove added repo
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/devel:/languages:/ruby/openSUSE_Leap_{{ ansible_distribution_version }}/
+ state: absent
+ register: remove_repo
+
+- name: verify repo was removed
+ assert:
+ that:
+ - remove_repo is changed
+
+- name: get list of files in /etc/zypp/repos.d/
+ command: ls /etc/zypp/repos.d/
+ changed_when: false
+ register: releaseverrepo_etc_zypp_reposd
+
+- name: verify removal of file releaseverrepo.repo in /etc/zypp/repos.d/
+ assert:
+ that:
+ - "'releaseverrepo' not in releaseverrepo_etc_zypp_reposd.stdout"
+
+- name: add a repo by basearch
+ community.general.zypper_repository:
+ name: basearchrepo
+ repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/$basearch
+ state: present
+ register: add_repo
+
+- name: add a repo by basearch again
+ community.general.zypper_repository:
+ name: basearchrepo
+ repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/$basearch
+ state: present
+ register: add_repo_again
+
+- name: no update in case of $basearch usage in url
+ assert:
+ that:
+ - add_repo is changed
+ - add_repo_again is not changed
+
+- name: remove added repo
+ community.general.zypper_repository:
+ repo: https://packagecloud.io/netdata/netdata/opensuse/13.2/x86_64
+ state: absent
+ register: remove_repo
+
+- name: verify repo was removed
+ assert:
+ that:
+ - remove_repo is changed
+
+# For now, the URL does not work for 15.4
+# FIXME: Try to get this working with newer versions
+# (Maybe 'Uyuni' needs to be replaced with something else?)
+- when: ansible_distribution_version is version('15.4', '<')
+ block:
+ - name: add new repository via url to .repo file
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
+ state: present
+ register: added_by_repo_file
+
+ - name: get repository details from zypper
+ command: zypper lr systemsmanagement_Uyuni_Stable
+ register: get_repository_details_from_zypper
+
+ - name: verify adding via .repo file was successful
+ assert:
+ that:
+ - "added_by_repo_file is changed"
+ - "get_repository_details_from_zypper.rc == 0"
+ - "'/systemsmanagement:/Uyuni:/Stable/' in get_repository_details_from_zypper.stdout"
+
+ - name: add same repository via url to .repo file again to verify idempotency
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
+ state: present
+ register: added_again_by_repo_file
+
+ - name: verify nothing was changed adding a repo with the same .repo file
+ assert:
+ that:
+ - added_again_by_repo_file is not changed
+
+ - name: remove repository via url to .repo file
+ community.general.zypper_repository:
+ repo: http://download.opensuse.org/repositories/systemsmanagement:/Uyuni:/Stable/openSUSE_Leap_{{ ansible_distribution_version }}/systemsmanagement:Uyuni:Stable.repo
+ state: absent
+ register: removed_by_repo_file
+
+ - name: get list of files in /etc/zypp/repos.d/
+ command: ls /etc/zypp/repos.d/
+ changed_when: false
+ register: etc_zypp_reposd
+
+ - name: verify removal via .repo file was successful, including cleanup of local .repo file in /etc/zypp/repos.d/
+ assert:
+ that:
+ - "removed_by_repo_file"
+ - "'/systemsmanagement:/Uyuni:/Stable/' not in etc_zypp_reposd.stdout"
+
+# FIXME: THIS DOESN'T SEEM TO WORK ANYMORE WITH ANY OPENSUSE VERSION IN CI!
+- when: false
+ block:
+ - name: Copy test .repo file
+ copy:
+ src: 'files/systemsmanagement_Uyuni_Utils.repo'
+ dest: '{{ remote_tmp_dir }}'
+
+ - name: add new repository via local path to .repo file
+ community.general.zypper_repository:
+ repo: "{{ remote_tmp_dir }}/systemsmanagement_Uyuni_Utils.repo"
+ state: present
+ register: added_by_repo_local_file
+
+ - name: get repository details for systemsmanagement_Uyuni_Utils from zypper
+ command: zypper lr systemsmanagement_Uyuni_Utils
+ register: get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils
+
+ - name: verify adding repository via local .repo file was successful
+ assert:
+ that:
+ - "added_by_repo_local_file is changed"
+ - "get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.rc == 0"
+ - "'/systemsmanagement:/Uyuni:/Utils/' in get_repository_details_from_zypper_for_systemsmanagement_Uyuni_Utils.stdout"
diff --git a/ansible_collections/community/general/tests/sanity/extra/aliases.json b/ansible_collections/community/general/tests/sanity/extra/aliases.json
new file mode 100644
index 000000000..dabdcd6a1
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/aliases.json
@@ -0,0 +1,11 @@
+{
+ "include_symlinks": false,
+ "prefixes": [
+ ".azure-pipelines/azure-pipelines.yml",
+ "tests/integration/targets/"
+ ],
+ "output": "path-message",
+ "requirements": [
+ "PyYAML"
+ ]
+}
diff --git a/ansible_collections/community/general/tests/sanity/extra/aliases.json.license b/ansible_collections/community/general/tests/sanity/extra/aliases.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/aliases.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/extra/aliases.py b/ansible_collections/community/general/tests/sanity/extra/aliases.py
new file mode 100755
index 000000000..c1dcba0df
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/aliases.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Check extra collection docs with antsibull-docs."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import yaml
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+ paths = [path for path in paths if path.endswith('/aliases')]
+
+ with open('.azure-pipelines/azure-pipelines.yml', 'rb') as f:
+ azp = yaml.safe_load(f)
+
+ allowed_targets = set(['azp/generic/1'])
+ for stage in azp['stages']:
+ if stage['stage'].startswith(('Sanity', 'Unit', 'Generic', 'Summary')):
+ continue
+ for job in stage['jobs']:
+ for group in job['parameters']['groups']:
+ allowed_targets.add('azp/posix/{0}'.format(group))
+
+ for path in paths:
+ targets = []
+ skip = False
+ with open(path, 'r') as f:
+ for line in f:
+ if '#' in line:
+ line = line[:line.find('#')]
+ line = line.strip()
+ if line.startswith('needs/'):
+ continue
+ if line.startswith('skip/'):
+ continue
+ if line.startswith('cloud/'):
+ continue
+ if line.startswith('context/'):
+ continue
+ if line in ('unsupported', 'disabled', 'hidden'):
+ skip = True
+ if line in ('destructive', ):
+ continue
+ if '/' not in line:
+ continue
+ targets.append(line)
+ if skip:
+ continue
+ if not targets:
+ if 'targets/setup_' in path:
+ continue
+ print('%s: %s' % (path, 'found no targets'))
+ for target in targets:
+ if target not in allowed_targets:
+ print('%s: %s' % (path, 'found invalid target "{0}"'.format(target)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/sanity/extra/botmeta.json b/ansible_collections/community/general/tests/sanity/extra/botmeta.json
new file mode 100644
index 000000000..c546ab5fd
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/botmeta.json
@@ -0,0 +1,8 @@
+{
+ "include_symlinks": false,
+ "output": "path-line-column-message",
+ "requirements": [
+ "PyYAML",
+ "voluptuous==0.12.1"
+ ]
+}
diff --git a/ansible_collections/community/general/tests/sanity/extra/botmeta.json.license b/ansible_collections/community/general/tests/sanity/extra/botmeta.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/botmeta.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/extra/botmeta.py b/ansible_collections/community/general/tests/sanity/extra/botmeta.py
new file mode 100755
index 000000000..3b6c34834
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/botmeta.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Check BOTMETA file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import os
+import re
+import sys
+
+import yaml
+
+from voluptuous import Any, MultipleInvalid, PREVENT_EXTRA, Schema
+from voluptuous.humanize import humanize_error
+
+
+IGNORE_NO_MAINTAINERS = [
+ 'plugins/cache/memcached.py',
+ 'plugins/cache/redis.py',
+ 'plugins/callback/cgroup_memory_recap.py',
+ 'plugins/callback/context_demo.py',
+ 'plugins/callback/counter_enabled.py',
+ 'plugins/callback/hipchat.py',
+ 'plugins/callback/jabber.py',
+ 'plugins/callback/log_plays.py',
+ 'plugins/callback/logdna.py',
+ 'plugins/callback/logentries.py',
+ 'plugins/callback/null.py',
+ 'plugins/callback/selective.py',
+ 'plugins/callback/slack.py',
+ 'plugins/callback/splunk.py',
+ 'plugins/callback/yaml.py',
+ 'plugins/inventory/nmap.py',
+ 'plugins/inventory/virtualbox.py',
+ 'plugins/connection/chroot.py',
+ 'plugins/connection/iocage.py',
+ 'plugins/connection/lxc.py',
+ 'plugins/lookup/cartesian.py',
+ 'plugins/lookup/chef_databag.py',
+ 'plugins/lookup/consul_kv.py',
+ 'plugins/lookup/credstash.py',
+ 'plugins/lookup/cyberarkpassword.py',
+ 'plugins/lookup/flattened.py',
+ 'plugins/lookup/keyring.py',
+ 'plugins/lookup/lastpass.py',
+ 'plugins/lookup/passwordstore.py',
+ 'plugins/lookup/shelvefile.py',
+ 'plugins/filter/json_query.py',
+ 'plugins/filter/random_mac.py',
+]
+
+FILENAME = '.github/BOTMETA.yml'
+
+LIST_ENTRIES = frozenset(('supershipit', 'maintainers', 'labels', 'keywords', 'notify', 'ignore'))
+
+AUTHOR_REGEX = re.compile(r'^\w.*\(@([\w-]+)\)(?![\w.])')
+
+
+def read_authors(filename):
+ data = {}
+ try:
+ with open(filename, 'rb') as b_module_data:
+ M = ast.parse(b_module_data.read())
+
+ for child in M.body:
+ if isinstance(child, ast.Assign):
+ for t in child.targets:
+ try:
+ theid = t.id
+ except AttributeError:
+ # skip errors can happen when trying to use the normal code
+ continue
+
+ if theid == 'DOCUMENTATION':
+ if isinstance(child.value, ast.Dict):
+ data = ast.literal_eval(child.value)
+ else:
+ data = yaml.safe_load(child.value.s)
+
+ except Exception as e:
+ print('%s:%d:%d: Cannot load DOCUMENTATION: %s' % (filename, 0, 0, e))
+ return []
+
+ author = data.get('author') or []
+ if isinstance(author, str):
+ author = [author]
+ return author
+
+
+def extract_author_name(author):
+ m = AUTHOR_REGEX.match(author)
+ if m:
+ return m.group(1)
+ if author == 'Ansible Core Team':
+ return '$team_ansible_core'
+ return None
+
+
+def validate(filename, filedata):
+ if not filename.startswith('plugins/'):
+ return
+ if filename.startswith(('plugins/doc_fragments/', 'plugins/module_utils/')):
+ return
+ # Compile lis tof all active and inactive maintainers
+ all_maintainers = filedata['maintainers'] + filedata['ignore']
+ if not filename.startswith('plugins/filter/'):
+ maintainers = read_authors(filename)
+ for maintainer in maintainers:
+ maintainer = extract_author_name(maintainer)
+ if maintainer is not None and maintainer not in all_maintainers:
+ msg = 'Author %s not mentioned as active or inactive maintainer for %s (mentioned are: %s)' % (
+ maintainer, filename, ', '.join(all_maintainers))
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, msg))
+ should_have_no_maintainer = filename in IGNORE_NO_MAINTAINERS
+ if not all_maintainers and not should_have_no_maintainer:
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'No (active or inactive) maintainer mentioned for %s' % filename))
+ if all_maintainers and should_have_no_maintainer:
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Please remove %s from the ignore list of %s' % (filename, sys.argv[0])))
+
+
+def main():
+ """Main entry point."""
+ try:
+ with open(FILENAME, 'rb') as f:
+ botmeta = yaml.safe_load(f)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (FILENAME, ex.context_mark.line +
+ 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ return
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' %
+ (FILENAME, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+ return
+
+ # Validate schema
+
+ MacroSchema = Schema({
+ (str): Any(str, None),
+ }, extra=PREVENT_EXTRA)
+
+ FilesSchema = Schema({
+ (str): {
+ ('supershipit'): str,
+ ('support'): Any('community'),
+ ('maintainers'): str,
+ ('labels'): str,
+ ('keywords'): str,
+ ('notify'): str,
+ ('ignore'): str,
+ },
+ }, extra=PREVENT_EXTRA)
+
+ schema = Schema({
+ ('notifications'): bool,
+ ('automerge'): bool,
+ ('macros'): MacroSchema,
+ ('files'): FilesSchema,
+ }, extra=PREVENT_EXTRA)
+
+ try:
+ schema(botmeta)
+ except MultipleInvalid as ex:
+ for error in ex.errors:
+ # No way to get line/column numbers
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, humanize_error(botmeta, error)))
+ return
+
+ # Preprocess (substitute macros, convert to lists)
+ macros = botmeta.get('macros') or {}
+ macro_re = re.compile(r'\$([a-zA-Z_]+)')
+
+ def convert_macros(text, macros):
+ def f(m):
+ macro = m.group(1)
+ replacement = (macros[macro] or '')
+ if macro == 'team_ansible_core':
+ return '$team_ansible_core %s' % replacement
+ return replacement
+
+ return macro_re.sub(f, text)
+
+ files = {}
+ try:
+ for file, filedata in (botmeta.get('files') or {}).items():
+ file = convert_macros(file, macros)
+ filedata = dict((k, convert_macros(v, macros)) for k, v in filedata.items())
+ files[file] = filedata
+ for k, v in filedata.items():
+ if k in LIST_ENTRIES:
+ filedata[k] = v.split()
+ except KeyError as e:
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Found unknown macro %s' % e))
+ return
+
+ # Scan all files
+ unmatched = set(files)
+ for dirs in ('plugins', 'tests', 'changelogs'):
+ for dirpath, dirnames, filenames in os.walk(dirs):
+ for file in sorted(filenames):
+ if file.endswith('.pyc'):
+ continue
+ filename = os.path.join(dirpath, file)
+ if os.path.islink(filename):
+ continue
+ if os.path.isfile(filename):
+ matching_files = []
+ for file, filedata in files.items():
+ if filename.startswith(file):
+ matching_files.append((file, filedata))
+ if file in unmatched:
+ unmatched.remove(file)
+ if not matching_files:
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Did not find any entry for %s' % filename))
+
+ matching_files.sort(key=lambda kv: kv[0])
+ filedata = dict()
+ for k in LIST_ENTRIES:
+ filedata[k] = []
+ for dummy, data in matching_files:
+ for k, v in data.items():
+ if k in LIST_ENTRIES:
+ v = filedata[k] + v
+ filedata[k] = v
+ validate(filename, filedata)
+
+ for file in unmatched:
+ print('%s:%d:%d: %s' % (FILENAME, 0, 0, 'Entry %s was not used' % file))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/sanity/extra/extra-docs.json b/ansible_collections/community/general/tests/sanity/extra/extra-docs.json
new file mode 100644
index 000000000..9a28d174f
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/extra-docs.json
@@ -0,0 +1,13 @@
+{
+ "include_symlinks": false,
+ "prefixes": [
+ "docs/docsite/",
+ "plugins/",
+ "roles/"
+ ],
+ "output": "path-line-column-message",
+ "requirements": [
+ "ansible-core",
+ "antsibull-docs"
+ ]
+}
diff --git a/ansible_collections/community/general/tests/sanity/extra/extra-docs.json.license b/ansible_collections/community/general/tests/sanity/extra/extra-docs.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/extra-docs.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/extra/extra-docs.py b/ansible_collections/community/general/tests/sanity/extra/extra-docs.py
new file mode 100755
index 000000000..c636beb08
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/extra-docs.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Check extra collection docs with antsibull-docs."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import subprocess
+
+
+def main():
+ """Main entry point."""
+ env = os.environ.copy()
+ suffix = ':{env}'.format(env=env["ANSIBLE_COLLECTIONS_PATH"]) if 'ANSIBLE_COLLECTIONS_PATH' in env else ''
+ env['ANSIBLE_COLLECTIONS_PATH'] = '{root}{suffix}'.format(root=os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd()))), suffix=suffix)
+ p = subprocess.run(
+ ['antsibull-docs', 'lint-collection-docs', '--plugin-docs', '--disallow-semantic-markup', '--skip-rstcheck', '.'],
+ env=env,
+ check=False,
+ )
+ if p.returncode not in (0, 3):
+ print('{0}:0:0: unexpected return code {1}'.format(sys.argv[0], p.returncode))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/sanity/extra/licenses.json b/ansible_collections/community/general/tests/sanity/extra/licenses.json
new file mode 100644
index 000000000..50e47ca88
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/licenses.json
@@ -0,0 +1,4 @@
+{
+ "include_symlinks": false,
+ "output": "path-message"
+}
diff --git a/ansible_collections/community/general/tests/sanity/extra/licenses.json.license b/ansible_collections/community/general/tests/sanity/extra/licenses.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/licenses.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/extra/licenses.py b/ansible_collections/community/general/tests/sanity/extra/licenses.py
new file mode 100755
index 000000000..6227ee22f
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/licenses.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Prevent files without a correct license identifier from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import glob
+import sys
+
+
+def format_license_list(licenses):
+ if not licenses:
+ return '(empty)'
+ return ', '.join(['"%s"' % license for license in licenses])
+
+
+def find_licenses(filename, relax=False):
+ spdx_license_identifiers = []
+ other_license_identifiers = []
+ has_copyright = False
+ try:
+ with open(filename, 'r', encoding='utf-8') as f:
+ for line in f:
+ line = line.rstrip()
+ if 'Copyright ' in line:
+ has_copyright = True
+ if 'Copyright: ' in line:
+ print('%s: found copyright line with "Copyright:". Please remove the colon.' % (filename, ))
+ if 'SPDX-FileCopyrightText: ' in line:
+ has_copyright = True
+ idx = line.find('SPDX-License-Identifier: ')
+ if idx >= 0:
+ lic_id = line[idx + len('SPDX-License-Identifier: '):]
+ spdx_license_identifiers.extend(lic_id.split(' OR '))
+ if 'GNU General Public License' in line:
+ if 'v3.0+' in line:
+ other_license_identifiers.append('GPL-3.0-or-later')
+ if 'version 3 or later' in line:
+ other_license_identifiers.append('GPL-3.0-or-later')
+ if 'Simplified BSD License' in line:
+ other_license_identifiers.append('BSD-2-Clause')
+ if 'Apache License 2.0' in line:
+ other_license_identifiers.append('Apache-2.0')
+ if 'PSF License' in line or 'Python-2.0' in line:
+ other_license_identifiers.append('PSF-2.0')
+ if 'MIT License' in line:
+ other_license_identifiers.append('MIT')
+ except Exception as exc:
+ print('%s: error while processing file: %s' % (filename, exc))
+ if len(set(spdx_license_identifiers)) < len(spdx_license_identifiers):
+ print('%s: found identical SPDX-License-Identifier values' % (filename, ))
+ if other_license_identifiers and set(other_license_identifiers) != set(spdx_license_identifiers):
+ print('%s: SPDX-License-Identifier yielded the license list %s, while manual guessing yielded the license list %s' % (
+ filename, format_license_list(spdx_license_identifiers), format_license_list(other_license_identifiers)))
+ if not has_copyright and not relax:
+ print('%s: found no copyright notice' % (filename, ))
+ return sorted(spdx_license_identifiers)
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ # The following paths are allowed to have no license identifier
+ no_comments_allowed = [
+ 'changelogs/fragments/*.yml',
+ 'changelogs/fragments/*.yaml',
+ ]
+
+ # These files are completely ignored
+ ignore_paths = [
+ '.ansible-test-timeout.json',
+ '.reuse/dep5',
+ 'LICENSES/*.txt',
+ 'COPYING',
+ ]
+
+ no_comments_allowed = [fn for pattern in no_comments_allowed for fn in glob.glob(pattern)]
+ ignore_paths = [fn for pattern in ignore_paths for fn in glob.glob(pattern)]
+
+ valid_licenses = [license_file[len('LICENSES/'):-len('.txt')] for license_file in glob.glob('LICENSES/*.txt')]
+
+ for path in paths:
+ if path.startswith('./'):
+ path = path[2:]
+ if path in ignore_paths or path.startswith('tests/output/'):
+ continue
+ if os.stat(path).st_size == 0:
+ continue
+ if not path.endswith('.license') and os.path.exists(path + '.license'):
+ path = path + '.license'
+ valid_licenses_for_path = valid_licenses
+ if path.startswith('plugins/') and not path.startswith(('plugins/modules/', 'plugins/module_utils/', 'plugins/doc_fragments/')):
+ valid_licenses_for_path = [license for license in valid_licenses if license == 'GPL-3.0-or-later']
+ licenses = find_licenses(path, relax=path in no_comments_allowed)
+ if not licenses:
+ if path not in no_comments_allowed:
+ print('%s: must have at least one license' % (path, ))
+ else:
+ for license in licenses:
+ if license not in valid_licenses_for_path:
+ print('%s: found not allowed license "%s", must be one of %s' % (
+ path, license, format_license_list(valid_licenses_for_path)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/sanity/extra/licenses.py.license b/ansible_collections/community/general/tests/sanity/extra/licenses.py.license
new file mode 100644
index 000000000..6c4958feb
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/licenses.py.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Felix Fontein <felix@fontein.de>
diff --git a/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json b/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json
new file mode 100644
index 000000000..c789a7fd3
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json
@@ -0,0 +1,7 @@
+{
+ "include_symlinks": true,
+ "prefixes": [
+ "plugins/"
+ ],
+ "output": "path-message"
+}
diff --git a/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json.license b/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py b/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py
new file mode 100755
index 000000000..b39df83a1
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/extra/no-unwanted-files.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Prevent unwanted files from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = (
+ '.cs',
+ '.ps1',
+ '.psm1',
+ '.py',
+ )
+
+ skip_paths = set([
+ ])
+
+ skip_directories = (
+ )
+
+ yaml_directories = (
+ 'plugins/test/',
+ 'plugins/filter/',
+ )
+
+ for path in paths:
+ if path in skip_paths:
+ continue
+
+ if any(path.startswith(skip_directory) for skip_directory in skip_directories):
+ continue
+
+ if os.path.islink(path):
+ print('%s: is a symbolic link' % (path, ))
+ elif not os.path.isfile(path):
+ print('%s: is not a regular file' % (path, ))
+
+ ext = os.path.splitext(path)[1]
+
+ if ext in ('.yml', ) and any(path.startswith(yaml_directory) for yaml_directory in yaml_directories):
+ continue
+
+ if ext not in allowed_extensions:
+ print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.11.txt b/ansible_collections/community/general/tests/sanity/ignore-2.11.txt
new file mode 100644
index 000000000..f2c30270c
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,28 @@
+.azure-pipelines/scripts/publish-codecov.py compile-2.6!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-2.7!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py future-import-boilerplate
+.azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/consul.py validate-modules:doc-missing-type
+plugins/modules/consul.py validate-modules:undocumented-parameter
+plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/gconftool2.py validate-modules:parameter-state-invalid-choice # state=get - removed in 8.0.0
+plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
+plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/manageiq_policies.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
+plugins/modules/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
+plugins/modules/rax_files_objects.py use-argspec-type-path
+plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/rax.py use-argspec-type-path # fix needed
+plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/xfconf.py validate-modules:return-syntax-error
+tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.6 # django generated code
+tests/integration/targets/django_manage/files/base_test/simple_project/p1/manage.py compile-2.7 # django generated code
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.11.txt.license b/ansible_collections/community/general/tests/sanity/ignore-2.11.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.11.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.12.txt b/ansible_collections/community/general/tests/sanity/ignore-2.12.txt
new file mode 100644
index 000000000..a8e04ff30
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,21 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/consul.py validate-modules:doc-missing-type
+plugins/modules/consul.py validate-modules:undocumented-parameter
+plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/gconftool2.py validate-modules:parameter-state-invalid-choice # state=get - removed in 8.0.0
+plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
+plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/manageiq_policies.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
+plugins/modules/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
+plugins/modules/rax_files_objects.py use-argspec-type-path
+plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/rax.py use-argspec-type-path # fix needed
+plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/xfconf.py validate-modules:return-syntax-error
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.12.txt.license b/ansible_collections/community/general/tests/sanity/ignore-2.12.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.12.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.13.txt b/ansible_collections/community/general/tests/sanity/ignore-2.13.txt
new file mode 100644
index 000000000..a8e04ff30
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,21 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/consul.py validate-modules:doc-missing-type
+plugins/modules/consul.py validate-modules:undocumented-parameter
+plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/gconftool2.py validate-modules:parameter-state-invalid-choice # state=get - removed in 8.0.0
+plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
+plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/manageiq_policies.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
+plugins/modules/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
+plugins/modules/rax_files_objects.py use-argspec-type-path
+plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/rax.py use-argspec-type-path # fix needed
+plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/xfconf.py validate-modules:return-syntax-error
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.13.txt.license b/ansible_collections/community/general/tests/sanity/ignore-2.13.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.13.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.14.txt b/ansible_collections/community/general/tests/sanity/ignore-2.14.txt
new file mode 100644
index 000000000..7e00143a6
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,23 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/consul.py validate-modules:doc-missing-type
+plugins/modules/consul.py validate-modules:undocumented-parameter
+plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/gconftool2.py validate-modules:parameter-state-invalid-choice # state=get - removed in 8.0.0
+plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
+plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/manageiq_policies.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
+plugins/modules/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
+plugins/modules/rax_files_objects.py use-argspec-type-path
+plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/rax.py use-argspec-type-path # fix needed
+plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/xfconf.py validate-modules:return-syntax-error
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.14.txt.license b/ansible_collections/community/general/tests/sanity/ignore-2.14.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.14.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.15.txt b/ansible_collections/community/general/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..7e00143a6
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,23 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/consul.py validate-modules:doc-missing-type
+plugins/modules/consul.py validate-modules:undocumented-parameter
+plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/gconftool2.py validate-modules:parameter-state-invalid-choice # state=get - removed in 8.0.0
+plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
+plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/manageiq_policies.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
+plugins/modules/manageiq_tags.py validate-modules:parameter-state-invalid-choice
+plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
+plugins/modules/rax_files_objects.py use-argspec-type-path
+plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice
+plugins/modules/rax.py use-argspec-type-path # fix needed
+plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/xfconf.py validate-modules:return-syntax-error
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.15.txt.license b/ansible_collections/community/general/tests/sanity/ignore-2.15.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.15.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.16.txt b/ansible_collections/community/general/tests/sanity/ignore-2.16.txt
new file mode 100644
index 000000000..5fa3d90ba
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.16.txt
@@ -0,0 +1,23 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/consul.py validate-modules:doc-missing-type
+plugins/modules/consul.py validate-modules:undocumented-parameter
+plugins/modules/consul_session.py validate-modules:parameter-state-invalid-choice
+plugins/modules/gconftool2.py validate-modules:parameter-state-invalid-choice # state=get - removed in 8.0.0
+plugins/modules/homectl.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/iptables_state.py validate-modules:undocumented-parameter # params _back and _timeout used by action plugin
+plugins/modules/lxc_container.py validate-modules:use-run-command-not-popen
+plugins/modules/manageiq_policies.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/manageiq_provider.py validate-modules:doc-choices-do-not-match-spec # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:doc-missing-type # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:parameter-type-not-in-doc # missing docs on suboptions
+plugins/modules/manageiq_provider.py validate-modules:undocumented-parameter # missing docs on suboptions
+plugins/modules/manageiq_tags.py validate-modules:parameter-state-invalid-choice # state=list - removed in 8.0.0
+plugins/modules/osx_defaults.py validate-modules:parameter-state-invalid-choice
+plugins/modules/parted.py validate-modules:parameter-state-invalid-choice
+plugins/modules/puppet.py validate-modules:parameter-invalid # invalid alias - removed in 7.0.0
+plugins/modules/rax_files_objects.py use-argspec-type-path # module deprecated - removed in 9.0.0
+plugins/modules/rax_files.py validate-modules:parameter-state-invalid-choice # module deprecated - removed in 9.0.0
+plugins/modules/rax.py use-argspec-type-path # module deprecated - removed in 9.0.0
+plugins/modules/rhevm.py validate-modules:parameter-state-invalid-choice
+plugins/modules/udm_user.py import-3.11 # Uses deprecated stdlib library 'crypt'
+plugins/modules/xfconf.py validate-modules:return-syntax-error
diff --git a/ansible_collections/community/general/tests/sanity/ignore-2.16.txt.license b/ansible_collections/community/general/tests/sanity/ignore-2.16.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/sanity/ignore-2.16.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/compat/__init__.py b/ansible_collections/community/general/tests/unit/compat/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/compat/__init__.py
diff --git a/ansible_collections/community/general/tests/unit/compat/builtins.py b/ansible_collections/community/general/tests/unit/compat/builtins.py
new file mode 100644
index 000000000..d548601d4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/compat/builtins.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__ # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/ansible_collections/community/general/tests/unit/compat/mock.py b/ansible_collections/community/general/tests/unit/compat/mock.py
new file mode 100644
index 000000000..bdbea945e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/compat/mock.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import * # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import * # noqa: F401, pylint: disable=unused-import
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
+
+
+# Prior to 3.4.4, mock_open cannot handle binary read_data
+if sys.version_info >= (3,) and sys.version_info < (3, 4, 4):
+ file_spec = None
+
+ def _iterate_read_data(read_data):
+ # Helper for mock_open:
+ # Retrieve lines from read_data via a generator so that separate calls to
+ # readline, read, and readlines are properly interleaved
+ sep = b'\n' if isinstance(read_data, bytes) else '\n'
+ data_as_list = [l + sep for l in read_data.split(sep)]
+
+ if data_as_list[-1] == sep:
+ # If the last line ended in a newline, the list comprehension will have an
+ # extra entry that's just a newline. Remove this.
+ data_as_list = data_as_list[:-1]
+ else:
+ # If there wasn't an extra newline by itself, then the file being
+ # emulated doesn't have a newline to end the last line remove the
+ # newline that our naive format() added
+ data_as_list[-1] = data_as_list[-1][:-1]
+
+ for line in data_as_list:
+ yield line
+
+ def mock_open(mock=None, read_data=''):
+ """
+ A helper function to create a mock to replace the use of `open`. It works
+ for `open` called directly or used as a context manager.
+
+ The `mock` argument is the mock object to configure. If `None` (the
+ default) then a `MagicMock` will be created for you, with the API limited
+ to methods or attributes available on standard file handles.
+
+ `read_data` is a string for the `read` methoddline`, and `readlines` of the
+ file handle to return. This is an empty string by default.
+ """
+ def _readlines_side_effect(*args, **kwargs):
+ if handle.readlines.return_value is not None:
+ return handle.readlines.return_value
+ return list(_data)
+
+ def _read_side_effect(*args, **kwargs):
+ if handle.read.return_value is not None:
+ return handle.read.return_value
+ return type(read_data)().join(_data)
+
+ def _readline_side_effect():
+ if handle.readline.return_value is not None:
+ while True:
+ yield handle.readline.return_value
+ for line in _data:
+ yield line
+
+ global file_spec
+ if file_spec is None:
+ import _io
+ file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
+
+ if mock is None:
+ mock = MagicMock(name='open', spec=open)
+
+ handle = MagicMock(spec=file_spec)
+ handle.__enter__.return_value = handle
+
+ _data = _iterate_read_data(read_data)
+
+ handle.write.return_value = None
+ handle.read.return_value = None
+ handle.readline.return_value = None
+ handle.readlines.return_value = None
+
+ handle.read.side_effect = _read_side_effect
+ handle.readline.side_effect = _readline_side_effect()
+ handle.readlines.side_effect = _readlines_side_effect
+
+ mock.return_value = handle
+ return mock
diff --git a/ansible_collections/community/general/tests/unit/compat/unittest.py b/ansible_collections/community/general/tests/unit/compat/unittest.py
new file mode 100644
index 000000000..d50bab86f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/compat/unittest.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import * # noqa: F401, pylint: disable=unused-import
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import * # noqa: F401, pylint: disable=unused-import
diff --git a/ansible_collections/community/general/tests/unit/mock/loader.py b/ansible_collections/community/general/tests/unit/mock/loader.py
new file mode 100644
index 000000000..948f4eecd
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/mock/loader.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.dataloader import DataLoader
+from ansible.module_utils.common.text.converters import to_bytes, to_text
+
+
+class DictDataLoader(DataLoader):
+
+ def __init__(self, file_mapping=None):
+ file_mapping = {} if file_mapping is None else file_mapping
+ assert type(file_mapping) == dict
+
+ super(DictDataLoader, self).__init__()
+
+ self._file_mapping = file_mapping
+ self._build_known_directories()
+ self._vault_secrets = None
+
+ def load_from_file(self, path, cache=True, unsafe=False):
+ path = to_text(path)
+ if path in self._file_mapping:
+ return self.load(self._file_mapping[path], path)
+ return None
+
+ # TODO: the real _get_file_contents returns a bytestring, so we actually convert the
+ # unicode/text it's created with to utf-8
+ def _get_file_contents(self, file_name):
+ path = to_text(file_name)
+ if path in self._file_mapping:
+ return (to_bytes(self._file_mapping[path]), False)
+ else:
+ raise AnsibleParserError("file not found: %s" % path)
+
+ def path_exists(self, path):
+ path = to_text(path)
+ return path in self._file_mapping or path in self._known_directories
+
+ def is_file(self, path):
+ path = to_text(path)
+ return path in self._file_mapping
+
+ def is_directory(self, path):
+ path = to_text(path)
+ return path in self._known_directories
+
+ def list_directory(self, path):
+ ret = []
+ path = to_text(path)
+ for x in (list(self._file_mapping.keys()) + self._known_directories):
+ if x.startswith(path):
+ if os.path.dirname(x) == path:
+ ret.append(os.path.basename(x))
+ return ret
+
+ def is_executable(self, path):
+ # FIXME: figure out a way to make paths return true for this
+ return False
+
+ def _add_known_directory(self, directory):
+ if directory not in self._known_directories:
+ self._known_directories.append(directory)
+
+ def _build_known_directories(self):
+ self._known_directories = []
+ for path in self._file_mapping:
+ dirname = os.path.dirname(path)
+ while dirname not in ('/', ''):
+ self._add_known_directory(dirname)
+ dirname = os.path.dirname(dirname)
+
+ def push(self, path, content):
+ rebuild_dirs = False
+ if path not in self._file_mapping:
+ rebuild_dirs = True
+
+ self._file_mapping[path] = content
+
+ if rebuild_dirs:
+ self._build_known_directories()
+
+ def pop(self, path):
+ if path in self._file_mapping:
+ del self._file_mapping[path]
+ self._build_known_directories()
+
+ def clear(self):
+ self._file_mapping = dict()
+ self._known_directories = []
+
+ def get_basedir(self):
+ return os.getcwd()
+
+ def set_vault_secrets(self, vault_secrets):
+ self._vault_secrets = vault_secrets
diff --git a/ansible_collections/community/general/tests/unit/mock/path.py b/ansible_collections/community/general/tests/unit/mock/path.py
new file mode 100644
index 000000000..62ae02343
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/mock/path.py
@@ -0,0 +1,12 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock
+from ansible.utils.path import unfrackpath
+
+
+mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x)
diff --git a/ansible_collections/community/general/tests/unit/mock/procenv.py b/ansible_collections/community/general/tests/unit/mock/procenv.py
new file mode 100644
index 000000000..4646d7f35
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/mock/procenv.py
@@ -0,0 +1,77 @@
+# Copyright (c) 2016, Matt Davis <mdavis@ansible.com>
+# Copyright (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import json
+
+from contextlib import contextmanager
+from io import BytesIO, StringIO
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils.six import PY3
+from ansible.module_utils.common.text.converters import to_bytes
+
+
+@contextmanager
+def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
+ """
+ context manager that temporarily masks the test runner's values for stdin and argv
+ """
+ real_stdin = sys.stdin
+ real_argv = sys.argv
+
+ if PY3:
+ fake_stream = StringIO(stdin_data)
+ fake_stream.buffer = BytesIO(to_bytes(stdin_data))
+ else:
+ fake_stream = BytesIO(to_bytes(stdin_data))
+
+ try:
+ sys.stdin = fake_stream
+ sys.argv = argv_data
+
+ yield
+ finally:
+ sys.stdin = real_stdin
+ sys.argv = real_argv
+
+
+@contextmanager
+def swap_stdout():
+ """
+ context manager that temporarily replaces stdout for tests that need to verify output
+ """
+ old_stdout = sys.stdout
+
+ if PY3:
+ fake_stream = StringIO()
+ else:
+ fake_stream = BytesIO()
+
+ try:
+ sys.stdout = fake_stream
+
+ yield fake_stream
+ finally:
+ sys.stdout = old_stdout
+
+
+class ModuleTestCase(unittest.TestCase):
+ def setUp(self, module_args=None):
+ if module_args is None:
+ module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
+
+ args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
+
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
+ self.stdin_swap.__enter__()
+
+ def tearDown(self):
+ # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
+ self.stdin_swap.__exit__(None, None, None)
diff --git a/ansible_collections/community/general/tests/unit/mock/vault_helper.py b/ansible_collections/community/general/tests/unit/mock/vault_helper.py
new file mode 100644
index 000000000..2b116129f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/mock/vault_helper.py
@@ -0,0 +1,29 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.common.text.converters import to_bytes
+
+from ansible.parsing.vault import VaultSecret
+
+
+class TextVaultSecret(VaultSecret):
+ '''A secret piece of text. ie, a password. Tracks text encoding.
+
+ The text encoding of the text may not be the default text encoding so
+ we keep track of the encoding so we encode it to the same bytes.'''
+
+ def __init__(self, text, encoding=None, errors=None, _bytes=None):
+ super(TextVaultSecret, self).__init__()
+ self.text = text
+ self.encoding = encoding or 'utf-8'
+ self._bytes = _bytes
+ self.errors = errors or 'strict'
+
+ @property
+ def bytes(self):
+ '''The text encoded with encoding, unless we specifically set _bytes.'''
+ return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
diff --git a/ansible_collections/community/general/tests/unit/mock/yaml_helper.py b/ansible_collections/community/general/tests/unit/mock/yaml_helper.py
new file mode 100644
index 000000000..ce1bd719b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/mock/yaml_helper.py
@@ -0,0 +1,128 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import yaml
+
+from ansible.module_utils.six import PY3
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.dumper import AnsibleDumper
+
+
+class YamlTestUtils(object):
+ """Mixin class to combine with a unittest.TestCase subclass."""
+ def _loader(self, stream):
+ """Vault related tests will want to override this.
+
+ Vault cases should setup a AnsibleLoader that has the vault password."""
+ return AnsibleLoader(stream)
+
+ def _dump_stream(self, obj, stream, dumper=None):
+ """Dump to a py2-unicode or py3-string stream."""
+ if PY3:
+ return yaml.dump(obj, stream, Dumper=dumper)
+ else:
+ return yaml.dump(obj, stream, Dumper=dumper, encoding=None)
+
+ def _dump_string(self, obj, dumper=None):
+ """Dump to a py2-unicode or py3-string"""
+ if PY3:
+ return yaml.dump(obj, Dumper=dumper)
+ else:
+ return yaml.dump(obj, Dumper=dumper, encoding=None)
+
+ def _dump_load_cycle(self, obj):
+ # Each pass though a dump or load revs the 'generation'
+ # obj to yaml string
+ string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper)
+
+ # wrap a stream/file like StringIO around that yaml
+ stream_from_object_dump = io.StringIO(string_from_object_dump)
+ loader = self._loader(stream_from_object_dump)
+ # load the yaml stream to create a new instance of the object (gen 2)
+ obj_2 = loader.get_data()
+
+ # dump the gen 2 objects directory to strings
+ string_from_object_dump_2 = self._dump_string(obj_2,
+ dumper=AnsibleDumper)
+
+ # The gen 1 and gen 2 yaml strings
+ self.assertEqual(string_from_object_dump, string_from_object_dump_2)
+ # the gen 1 (orig) and gen 2 py object
+ self.assertEqual(obj, obj_2)
+
+ # again! gen 3... load strings into py objects
+ stream_3 = io.StringIO(string_from_object_dump_2)
+ loader_3 = self._loader(stream_3)
+ obj_3 = loader_3.get_data()
+
+ string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper)
+
+ self.assertEqual(obj, obj_3)
+ # should be transitive, but...
+ self.assertEqual(obj_2, obj_3)
+ self.assertEqual(string_from_object_dump, string_from_object_dump_3)
+
+ def _old_dump_load_cycle(self, obj):
+ '''Dump the passed in object to yaml, load it back up, dump again, compare.'''
+ stream = io.StringIO()
+
+ yaml_string = self._dump_string(obj, dumper=AnsibleDumper)
+ self._dump_stream(obj, stream, dumper=AnsibleDumper)
+
+ yaml_string_from_stream = stream.getvalue()
+
+ # reset stream
+ stream.seek(0)
+
+ loader = self._loader(stream)
+ # loader = AnsibleLoader(stream, vault_password=self.vault_password)
+ obj_from_stream = loader.get_data()
+
+ stream_from_string = io.StringIO(yaml_string)
+ loader2 = self._loader(stream_from_string)
+ # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password)
+ obj_from_string = loader2.get_data()
+
+ stream_obj_from_stream = io.StringIO()
+ stream_obj_from_string = io.StringIO()
+
+ if PY3:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue()
+ yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue()
+
+ stream_obj_from_stream.seek(0)
+ stream_obj_from_string.seek(0)
+
+ if PY3:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper)
+ else:
+ yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None)
+ yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None)
+
+ assert yaml_string == yaml_string_obj_from_stream
+ assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream ==
+ yaml_string_stream_obj_from_string)
+ assert obj == obj_from_stream
+ assert obj == obj_from_string
+ assert obj == yaml_string_obj_from_stream
+ assert obj == yaml_string_obj_from_string
+ assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string
+ return {'obj': obj,
+ 'yaml_string': yaml_string,
+ 'yaml_string_from_stream': yaml_string_from_stream,
+ 'obj_from_stream': obj_from_stream,
+ 'obj_from_string': obj_from_string,
+ 'yaml_string_obj_from_string': yaml_string_obj_from_string}
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/conftest.py b/ansible_collections/community/general/tests/unit/plugins/become/conftest.py
new file mode 100644
index 000000000..93b593bdf
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/conftest.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2017 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.cli.arguments import option_helpers as opt_help
+from ansible.utils import context_objects as co
+
+
+@pytest.fixture
+def parser():
+ parser = opt_help.create_base_parser('testparser')
+
+ opt_help.add_runas_options(parser)
+ opt_help.add_meta_options(parser)
+ opt_help.add_runtask_options(parser)
+ opt_help.add_vault_options(parser)
+ opt_help.add_async_options(parser)
+ opt_help.add_connect_options(parser)
+ opt_help.add_subset_options(parser)
+ opt_help.add_check_options(parser)
+ opt_help.add_inventory_options(parser)
+
+ return parser
+
+
+@pytest.fixture
+def reset_cli_args():
+ co.GlobalCLIArgs._Singleton__instance = None
+ yield
+ co.GlobalCLIArgs._Singleton__instance = None
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/helper.py b/ansible_collections/community/general/tests/unit/plugins/become/helper.py
new file mode 100644
index 000000000..9949e1bef
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/helper.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.loader import become_loader, get_shell_plugin
+
+
+def call_become_plugin(task, var_options, cmd, executable=None):
+ """Helper function to call become plugin simiarly on how Ansible itself handles this."""
+ plugin = become_loader.get(task['become_method'])
+ plugin.set_options(task_keys=task, var_options=var_options)
+ shell = get_shell_plugin(executable=executable)
+ return plugin.build_become_command(cmd, shell)
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py b/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py
new file mode 100644
index 000000000..4a922e9f2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/test_doas.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_doas_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ doas_exe = 'doas'
+ doas_flags = '-n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.doas',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, default_exe, success,
+ default_cmd), cmd) is not None)
+
+
+def test_doas(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ doas_exe = 'doas'
+ doas_flags = '-n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.doas',
+ 'become_flags': doas_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, task['become_user'], default_exe, success,
+ default_cmd), cmd) is not None)
+
+
+def test_doas_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ doas_exe = 'doas'
+ doas_flags = '-n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.doas',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': doas_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, var_options['ansible_become_user'], default_exe, success,
+ default_cmd), cmd) is not None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py b/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py
new file mode 100644
index 000000000..24af2b50c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/test_dzdo.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_dzdo_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ dzdo_exe = 'dzdo'
+ dzdo_flags = '-H -S -n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.dzdo',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, default_exe,
+ success, default_cmd), cmd) is not None
+
+
+def test_dzdo(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ dzdo_exe = 'dzdo'
+ dzdo_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.dzdo',
+ 'become_flags': dzdo_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, task['become_user'], default_exe,
+ success, default_cmd), cmd) is not None
+ task['become_pass'] = 'testpass'
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, r'\"\[dzdo via ansible, key=.+?\] password:\"',
+ task['become_user'], default_exe, success, default_cmd), cmd) is not None
+
+
+def test_dzdo_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ dzdo_exe = 'dzdo'
+ dzdo_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.dzdo',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': dzdo_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, var_options['ansible_become_user'], default_exe,
+ success, default_cmd), cmd) is not None
+ var_options['ansible_become_pass'] = 'testpass'
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, dzdo_flags, r'\"\[dzdo via ansible, key=.+?\] password:\"',
+ var_options['ansible_become_user'], default_exe, success, default_cmd), cmd) is not None
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py b/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
new file mode 100644
index 000000000..3ec171661
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/test_ksu.py
@@ -0,0 +1,86 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_ksu_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ ksu_exe = 'ksu'
+ ksu_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.ksu',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, task['become_user'], ksu_flags,
+ default_exe, success, default_cmd), cmd) is not None)
+
+
+def test_ksu(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ ksu_exe = 'ksu'
+ ksu_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.ksu',
+ 'become_flags': ksu_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, task['become_user'], ksu_flags,
+ default_exe, success, default_cmd), cmd) is not None)
+
+
+def test_ksu_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ ksu_exe = 'ksu'
+ ksu_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.ksu',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': ksu_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, var_options['ansible_become_user'], ksu_flags,
+ default_exe, success, default_cmd), cmd) is not None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py b/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py
new file mode 100644
index 000000000..eceea2e66
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/test_pbrun.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_pbrun_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.pbrun',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags,
+ success, default_cmd), cmd) is not None
+
+
+def test_pbrun(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pbrun',
+ 'become_flags': pbrun_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, task['become_user'],
+ success, default_cmd), cmd) is not None
+
+
+def test_pbrun_var_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pbrun_exe = 'pbrun'
+ pbrun_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pbrun',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': pbrun_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, var_options['ansible_become_user'],
+ success, default_cmd), cmd) is not None
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py b/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py
new file mode 100644
index 000000000..350cd5ad2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/test_pfexec.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2020 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_pfexec_basic(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pfexec_exe = 'pfexec'
+ pfexec_flags = '-H -S -n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_method': 'community.general.pfexec',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s 'echo %s; %s'""" % (pfexec_exe, pfexec_flags, success, default_cmd), cmd) is not None
+
+
+def test_pfexec(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pfexec_exe = 'pfexec'
+ pfexec_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pfexec',
+ 'become_flags': pfexec_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s 'echo %s; %s'""" % (pfexec_exe, pfexec_flags, success, default_cmd), cmd) is not None
+
+
+def test_pfexec_varoptions(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ pfexec_exe = 'pfexec'
+ pfexec_flags = ''
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.pfexec',
+ 'become_flags': 'xxx',
+ }
+ var_options = {
+ 'ansible_become_user': 'bar',
+ 'ansible_become_flags': pfexec_flags,
+ }
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert re.match("""%s %s 'echo %s; %s'""" % (pfexec_exe, pfexec_flags, success, default_cmd), cmd) is not None
diff --git a/ansible_collections/community/general/tests/unit/plugins/become/test_sudosu.py b/ansible_collections/community/general/tests/unit/plugins/become/test_sudosu.py
new file mode 100644
index 000000000..f63f48df7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/become/test_sudosu.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2021 Ansible Project
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from ansible import context
+
+from .helper import call_become_plugin
+
+
+def test_sudosu(mocker, parser, reset_cli_args):
+ options = parser.parse_args([])
+ context._init_global_context(options)
+
+ default_cmd = "/bin/foo"
+ default_exe = "/bin/bash"
+ sudo_exe = 'sudo'
+ sudo_flags = '-H -s -n'
+
+ success = 'BECOME-SUCCESS-.+?'
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.sudosu',
+ 'become_flags': sudo_flags,
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, task['become_user'],
+ default_exe, success, default_cmd), cmd) is not None)
+
+ task = {
+ 'become_user': 'foo',
+ 'become_method': 'community.general.sudosu',
+ 'become_flags': sudo_flags,
+ 'become_pass': 'testpass',
+ }
+ var_options = {}
+ cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
+ print(cmd)
+ assert (re.match("""%s %s -p "%s" su -l %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''),
+ r"\[sudo via ansible, key=.+?\] password:", task['become_user'],
+ default_exe, success, default_cmd), cmd) is not None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py b/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py
new file mode 100644
index 000000000..8e203cfab
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/cache/test_memcached.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+pytest.importorskip('memcache')
+
+from ansible.plugins.loader import cache_loader
+from ansible_collections.community.general.plugins.cache.memcached import CacheModule as MemcachedCache
+
+
+def test_memcached_cachemodule():
+ assert isinstance(cache_loader.get('community.general.memcached'), MemcachedCache)
diff --git a/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py b/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py
new file mode 100644
index 000000000..81ae9293e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/cache/test_redis.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2012-2015, Michael DeHaan <michael.dehaan@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+pytest.importorskip('redis')
+
+from ansible.plugins.loader import cache_loader
+from ansible_collections.community.general.plugins.cache.redis import CacheModule as RedisCache
+
+
+def test_redis_cachemodule():
+ # The _uri option is required for the redis plugin
+ connection = '127.0.0.1:6379:1'
+ assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache)
+
+
+def test_redis_cachemodule():
+ # The _uri option is required for the redis plugin
+ connection = '[::1]:6379:1'
+ assert isinstance(cache_loader.get('community.general.redis', **{'_uri': connection}), RedisCache)
diff --git a/ansible_collections/community/general/tests/unit/plugins/callback/test_elastic.py b/ansible_collections/community/general/tests/unit/plugins/callback/test_elastic.py
new file mode 100644
index 000000000..73f4a6c27
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/callback/test_elastic.py
@@ -0,0 +1,127 @@
+# Copyright (c) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.task import Task
+from ansible.executor.task_result import TaskResult
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from ansible_collections.community.general.plugins.callback.elastic import ElasticSource, TaskData
+from collections import OrderedDict
+import sys
+
+ELASTIC_MINIMUM_PYTHON_VERSION = (3, 6)
+
+
+class TestOpentelemetry(unittest.TestCase):
+ @patch('ansible_collections.community.general.plugins.callback.elastic.socket')
+ def setUp(self, mock_socket):
+ if sys.version_info < ELASTIC_MINIMUM_PYTHON_VERSION:
+ self.skipTest("Python %s+ is needed for Elastic" %
+ ",".join(map(str, ELASTIC_MINIMUM_PYTHON_VERSION)))
+ mock_socket.gethostname.return_value = 'my-host'
+ mock_socket.gethostbyname.return_value = '1.2.3.4'
+ self.elastic = ElasticSource(display=None)
+ self.task_fields = {'args': {}}
+ self.mock_host = Mock('MockHost')
+ self.mock_host.name = 'myhost'
+ self.mock_host._uuid = 'myhost_uuid'
+ self.mock_task = Task()
+ self.mock_task.action = 'myaction'
+ self.mock_task.no_log = False
+ self.mock_task._role = 'myrole'
+ self.mock_task._uuid = 'myuuid'
+ self.mock_task.args = {}
+ self.mock_task.get_name = MagicMock(return_value='mytask')
+ self.mock_task.get_path = MagicMock(return_value='/mypath')
+ self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '')
+ self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+
+ def test_start_task(self):
+ tasks_data = OrderedDict()
+
+ self.elastic.start_task(
+ tasks_data,
+ False,
+ 'myplay',
+ self.mock_task
+ )
+
+ task_data = tasks_data['myuuid']
+ self.assertEqual(task_data.uuid, 'myuuid')
+ self.assertEqual(task_data.name, 'mytask')
+ self.assertEqual(task_data.path, '/mypath')
+ self.assertEqual(task_data.play, 'myplay')
+ self.assertEqual(task_data.action, 'myaction')
+ self.assertEqual(task_data.args, '')
+
+ def test_finish_task_with_a_host_match(self):
+ tasks_data = OrderedDict()
+ tasks_data['myuuid'] = self.my_task
+
+ self.elastic.finish_task(
+ tasks_data,
+ 'ok',
+ self.my_task_result
+ )
+
+ task_data = tasks_data['myuuid']
+ host_data = task_data.host_data['myhost_uuid']
+ self.assertEqual(host_data.uuid, 'myhost_uuid')
+ self.assertEqual(host_data.name, 'myhost')
+ self.assertEqual(host_data.status, 'ok')
+
+ def test_finish_task_without_a_host_match(self):
+ result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+ tasks_data = OrderedDict()
+ tasks_data['myuuid'] = self.my_task
+
+ self.elastic.finish_task(
+ tasks_data,
+ 'ok',
+ result
+ )
+
+ task_data = tasks_data['myuuid']
+ host_data = task_data.host_data['include']
+ self.assertEqual(host_data.uuid, 'include')
+ self.assertEqual(host_data.name, 'include')
+ self.assertEqual(host_data.status, 'ok')
+
+ def test_get_error_message(self):
+ test_cases = (
+ ('my-exception', 'my-msg', None, 'my-exception'),
+ (None, 'my-msg', None, 'my-msg'),
+ (None, None, None, 'failed'),
+ )
+
+ for tc in test_cases:
+ result = self.elastic.get_error_message(generate_test_data(tc[0], tc[1], tc[2]))
+ self.assertEqual(result, tc[3])
+
+ def test_enrich_error_message(self):
+ test_cases = (
+ ('my-exception', 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"'),
+ ('my-exception', None, 'my-stderr', 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'),
+ (None, 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'),
+ ('my-exception', 'my-msg', None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'),
+ ('my-exception', 'my-msg', '\nline1\nline2', 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"')
+ )
+
+ for tc in test_cases:
+ result = self.elastic.enrich_error_message(generate_test_data(tc[0], tc[1], tc[2]))
+ self.assertEqual(result, tc[3])
+
+
+def generate_test_data(exception=None, msg=None, stderr=None):
+ res_data = OrderedDict()
+ if exception:
+ res_data['exception'] = exception
+ if msg:
+ res_data['msg'] = msg
+ if stderr:
+ res_data['stderr'] = stderr
+ return res_data
diff --git a/ansible_collections/community/general/tests/unit/plugins/callback/test_loganalytics.py b/ansible_collections/community/general/tests/unit/plugins/callback/test_loganalytics.py
new file mode 100644
index 000000000..f9fef3c5d
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/callback/test_loganalytics.py
@@ -0,0 +1,66 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.executor.task_result import TaskResult
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, Mock
+from ansible_collections.community.general.plugins.callback.loganalytics import AzureLogAnalyticsSource
+from datetime import datetime
+
+import json
+
+
+class TestAzureLogAnalytics(unittest.TestCase):
+ @patch('ansible_collections.community.general.plugins.callback.loganalytics.socket')
+ def setUp(self, mock_socket):
+ mock_socket.gethostname.return_value = 'my-host'
+ mock_socket.gethostbyname.return_value = '1.2.3.4'
+ self.loganalytics = AzureLogAnalyticsSource()
+ self.mock_task = Mock('MockTask')
+ self.mock_task._role = 'myrole'
+ self.mock_task._uuid = 'myuuid'
+ self.task_fields = {'args': {}}
+ self.mock_host = Mock('MockHost')
+ self.mock_host.name = 'myhost'
+
+ @patch('ansible_collections.community.general.plugins.callback.loganalytics.datetime')
+ @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url')
+ def test_overall(self, open_url_mock, mock_datetime):
+ mock_datetime.utcnow.return_value = datetime(2020, 12, 1)
+ result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+
+ self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a',
+ shared_key='dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==',
+ state='OK',
+ result=result,
+ runtime=100)
+
+ args, kwargs = open_url_mock.call_args
+ sent_data = json.loads(args[1])
+
+ self.assertEqual(sent_data['event']['timestamp'], 'Tue, 01 Dec 2020 00:00:00 GMT')
+ self.assertEqual(sent_data['event']['host'], 'my-host')
+ self.assertEqual(sent_data['event']['uuid'], 'myuuid')
+ self.assertEqual(args[0], 'https://01234567-0123-0123-0123-01234567890a.ods.opinsights.azure.com/api/logs?api-version=2016-04-01')
+
+ @patch('ansible_collections.community.general.plugins.callback.loganalytics.datetime')
+ @patch('ansible_collections.community.general.plugins.callback.loganalytics.open_url')
+ def test_auth_headers(self, open_url_mock, mock_datetime):
+ mock_datetime.utcnow.return_value = datetime(2020, 12, 1)
+ result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+
+ self.loganalytics.send_event(workspace_id='01234567-0123-0123-0123-01234567890a',
+ shared_key='dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA==',
+ state='OK',
+ result=result,
+ runtime=100)
+
+ args, kwargs = open_url_mock.call_args
+ headers = kwargs['headers']
+
+ self.assertRegexpMatches(headers['Authorization'], r'^SharedKey 01234567-0123-0123-0123-01234567890a:.*=$')
+ self.assertEqual(headers['Log-Type'], 'ansible_playbook')
diff --git a/ansible_collections/community/general/tests/unit/plugins/callback/test_opentelemetry.py b/ansible_collections/community/general/tests/unit/plugins/callback/test_opentelemetry.py
new file mode 100644
index 000000000..dea2e29d4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/callback/test_opentelemetry.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Victor Martinez <VictorMartinezRubio@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.task import Task
+from ansible.executor.task_result import TaskResult
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock, Mock
+from ansible_collections.community.general.plugins.callback.opentelemetry import OpenTelemetrySource, TaskData
+from collections import OrderedDict
+import sys
+
+OPENTELEMETRY_MINIMUM_PYTHON_VERSION = (3, 7)
+
+
+class TestOpentelemetry(unittest.TestCase):
+ @patch('ansible_collections.community.general.plugins.callback.opentelemetry.socket')
+ def setUp(self, mock_socket):
+ # TODO: this python version validation won't be needed as long as the _time_ns call is mocked.
+ if sys.version_info < OPENTELEMETRY_MINIMUM_PYTHON_VERSION:
+ self.skipTest("Python %s+ is needed for OpenTelemetry" %
+ ",".join(map(str, OPENTELEMETRY_MINIMUM_PYTHON_VERSION)))
+
+ mock_socket.gethostname.return_value = 'my-host'
+ mock_socket.gethostbyname.return_value = '1.2.3.4'
+ self.opentelemetry = OpenTelemetrySource(display=None)
+ self.task_fields = {'args': {}}
+ self.mock_host = Mock('MockHost')
+ self.mock_host.name = 'myhost'
+ self.mock_host._uuid = 'myhost_uuid'
+ self.mock_task = Task()
+ self.mock_task.action = 'myaction'
+ self.mock_task.no_log = False
+ self.mock_task._role = 'myrole'
+ self.mock_task._uuid = 'myuuid'
+ self.mock_task.args = {}
+ self.mock_task.get_name = MagicMock(return_value='mytask')
+ self.mock_task.get_path = MagicMock(return_value='/mypath')
+ self.my_task = TaskData('myuuid', 'mytask', '/mypath', 'myplay', 'myaction', '')
+ self.my_task_result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+
+ def test_start_task(self):
+ tasks_data = OrderedDict()
+
+ self.opentelemetry.start_task(
+ tasks_data,
+ False,
+ 'myplay',
+ self.mock_task
+ )
+
+ task_data = tasks_data['myuuid']
+ self.assertEqual(task_data.uuid, 'myuuid')
+ self.assertEqual(task_data.name, 'mytask')
+ self.assertEqual(task_data.path, '/mypath')
+ self.assertEqual(task_data.play, 'myplay')
+ self.assertEqual(task_data.action, 'myaction')
+ self.assertEqual(task_data.args, {})
+
+ def test_finish_task_with_a_host_match(self):
+ tasks_data = OrderedDict()
+ tasks_data['myuuid'] = self.my_task
+
+ self.opentelemetry.finish_task(
+ tasks_data,
+ 'ok',
+ self.my_task_result,
+ ""
+ )
+
+ task_data = tasks_data['myuuid']
+ host_data = task_data.host_data['myhost_uuid']
+ self.assertEqual(host_data.uuid, 'myhost_uuid')
+ self.assertEqual(host_data.name, 'myhost')
+ self.assertEqual(host_data.status, 'ok')
+
+ def test_finish_task_without_a_host_match(self):
+ result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+ tasks_data = OrderedDict()
+ tasks_data['myuuid'] = self.my_task
+
+ self.opentelemetry.finish_task(
+ tasks_data,
+ 'ok',
+ result,
+ ""
+ )
+
+ task_data = tasks_data['myuuid']
+ host_data = task_data.host_data['include']
+ self.assertEqual(host_data.uuid, 'include')
+ self.assertEqual(host_data.name, 'include')
+ self.assertEqual(host_data.status, 'ok')
+ self.assertEqual(self.opentelemetry.ansible_version, None)
+
+ def test_finish_task_include_with_ansible_version(self):
+ task_fields = {'args': {'_ansible_version': '1.2.3'}}
+ result = TaskResult(host=None, task=self.mock_task, return_data={}, task_fields=task_fields)
+ tasks_data = OrderedDict()
+ tasks_data['myuuid'] = self.my_task
+
+ self.opentelemetry.finish_task(
+ tasks_data,
+ 'ok',
+ result,
+ ""
+ )
+
+ self.assertEqual(self.opentelemetry.ansible_version, '1.2.3')
+
+ def test_get_error_message(self):
+ test_cases = (
+ ('my-exception', 'my-msg', None, 'my-exception'),
+ (None, 'my-msg', None, 'my-msg'),
+ (None, None, None, 'failed'),
+ )
+
+ for tc in test_cases:
+ result = self.opentelemetry.get_error_message(generate_test_data(tc[0], tc[1], tc[2]))
+ self.assertEqual(result, tc[3])
+
+ def test_get_error_message_from_results(self):
+ test_cases = (
+ ('my-exception', 'my-msg', None, False, None),
+ (None, 'my-msg', None, False, None),
+ (None, None, None, False, None),
+ ('my-exception', 'my-msg', None, True, 'shell(none) - my-exception'),
+ (None, 'my-msg', None, True, 'shell(none) - my-msg'),
+ (None, None, None, True, 'shell(none) - failed'),
+ )
+
+ for tc in test_cases:
+ result = self.opentelemetry.get_error_message_from_results([generate_test_data(tc[0], tc[1], tc[2], tc[3])], 'shell')
+ self.assertEqual(result, tc[4])
+
+ def test_enrich_error_message(self):
+ test_cases = (
+ ('my-exception', 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"'),
+ ('my-exception', None, 'my-stderr', 'message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"'),
+ (None, 'my-msg', 'my-stderr', 'message: "my-msg"\nexception: "None"\nstderr: "my-stderr"'),
+ ('my-exception', 'my-msg', None, 'message: "my-msg"\nexception: "my-exception"\nstderr: "None"'),
+ ('my-exception', 'my-msg', '\nline1\nline2', 'message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"')
+ )
+
+ for tc in test_cases:
+ result = self.opentelemetry.enrich_error_message(generate_test_data(tc[0], tc[1], tc[2]))
+ self.assertEqual(result, tc[3])
+
+ def test_enrich_error_message_from_results(self):
+ test_cases = (
+ ('my-exception', 'my-msg', 'my-stderr', False, ''),
+ ('my-exception', None, 'my-stderr', False, ''),
+ (None, 'my-msg', 'my-stderr', False, ''),
+ ('my-exception', 'my-msg', None, False, ''),
+ ('my-exception', 'my-msg', '\nline1\nline2', False, ''),
+ ('my-exception', 'my-msg', 'my-stderr', True, 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "my-stderr"\n'),
+ ('my-exception', None, 'my-stderr', True, 'shell(none) - message: "failed"\nexception: "my-exception"\nstderr: "my-stderr"\n'),
+ (None, 'my-msg', 'my-stderr', True, 'shell(none) - message: "my-msg"\nexception: "None"\nstderr: "my-stderr"\n'),
+ ('my-exception', 'my-msg', None, True, 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "None"\n'),
+ ('my-exception', 'my-msg', '\nline1\nline2', True, 'shell(none) - message: "my-msg"\nexception: "my-exception"\nstderr: "\nline1\nline2"\n')
+ )
+
+ for tc in test_cases:
+ result = self.opentelemetry.enrich_error_message_from_results([generate_test_data(tc[0], tc[1], tc[2], tc[3])], 'shell')
+ self.assertEqual(result, tc[4])
+
+ def test_url_from_args(self):
+ test_cases = (
+ ({}, ""),
+ ({'url': 'my-url'}, 'my-url'),
+ ({'url': 'my-url', 'api_url': 'my-api_url'}, 'my-url'),
+ ({'api_url': 'my-api_url'}, 'my-api_url'),
+ ({'api_url': 'my-api_url', 'chart_repo_url': 'my-chart_repo_url'}, 'my-api_url')
+ )
+
+ for tc in test_cases:
+ result = self.opentelemetry.url_from_args(tc[0])
+ self.assertEqual(result, tc[1])
+
+ def test_parse_and_redact_url_if_possible(self):
+ test_cases = (
+ ({}, None),
+ ({'url': 'wrong'}, None),
+ ({'url': 'https://my-url'}, 'https://my-url'),
+ ({'url': 'https://user:pass@my-url'}, 'https://my-url'),
+ ({'url': 'https://my-url:{{ my_port }}'}, 'https://my-url:{{ my_port }}'),
+ ({'url': 'https://{{ my_hostname }}:{{ my_port }}'}, None),
+ ({'url': '{{my_schema}}{{ my_hostname }}:{{ my_port }}'}, None)
+ )
+
+ for tc in test_cases:
+ result = self.opentelemetry.parse_and_redact_url_if_possible(tc[0])
+ if tc[1]:
+ self.assertEqual(result.geturl(), tc[1])
+ else:
+ self.assertEqual(result, tc[1])
+
+
+def generate_test_data(exception=None, msg=None, stderr=None, failed=False):
+ res_data = OrderedDict()
+ if exception:
+ res_data['exception'] = exception
+ if msg:
+ res_data['msg'] = msg
+ if stderr:
+ res_data['stderr'] = stderr
+ res_data['failed'] = failed
+ return res_data
diff --git a/ansible_collections/community/general/tests/unit/plugins/callback/test_splunk.py b/ansible_collections/community/general/tests/unit/plugins/callback/test_splunk.py
new file mode 100644
index 000000000..ddcdae24c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/callback/test_splunk.py
@@ -0,0 +1,64 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.executor.task_result import TaskResult
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, Mock
+from ansible_collections.community.general.plugins.callback.splunk import SplunkHTTPCollectorSource
+from datetime import datetime
+
+import json
+
+
+class TestSplunkClient(unittest.TestCase):
+ @patch('ansible_collections.community.general.plugins.callback.splunk.socket')
+ def setUp(self, mock_socket):
+ mock_socket.gethostname.return_value = 'my-host'
+ mock_socket.gethostbyname.return_value = '1.2.3.4'
+ self.splunk = SplunkHTTPCollectorSource()
+ self.mock_task = Mock('MockTask')
+ self.mock_task._role = 'myrole'
+ self.mock_task._uuid = 'myuuid'
+ self.task_fields = {'args': {}}
+ self.mock_host = Mock('MockHost')
+ self.mock_host.name = 'myhost'
+
+ @patch('ansible_collections.community.general.plugins.callback.splunk.datetime')
+ @patch('ansible_collections.community.general.plugins.callback.splunk.open_url')
+ def test_timestamp_with_milliseconds(self, open_url_mock, mock_datetime):
+ mock_datetime.utcnow.return_value = datetime(2020, 12, 1)
+ result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+
+ self.splunk.send_event(
+ url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=True,
+ batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100
+ )
+
+ args, kwargs = open_url_mock.call_args
+ sent_data = json.loads(args[1])
+
+ self.assertEqual(sent_data['event']['timestamp'], '2020-12-01 00:00:00.000000 +0000')
+ self.assertEqual(sent_data['event']['host'], 'my-host')
+ self.assertEqual(sent_data['event']['ip_address'], '1.2.3.4')
+
+ @patch('ansible_collections.community.general.plugins.callback.splunk.datetime')
+ @patch('ansible_collections.community.general.plugins.callback.splunk.open_url')
+ def test_timestamp_without_milliseconds(self, open_url_mock, mock_datetime):
+ mock_datetime.utcnow.return_value = datetime(2020, 12, 1)
+ result = TaskResult(host=self.mock_host, task=self.mock_task, return_data={}, task_fields=self.task_fields)
+
+ self.splunk.send_event(
+ url='endpoint', authtoken='token', validate_certs=False, include_milliseconds=False,
+ batch="abcefghi-1234-5678-9012-abcdefghijkl", state='OK', result=result, runtime=100
+ )
+
+ args, kwargs = open_url_mock.call_args
+ sent_data = json.loads(args[1])
+
+ self.assertEqual(sent_data['event']['timestamp'], '2020-12-01 00:00:00 +0000')
+ self.assertEqual(sent_data['event']['host'], 'my-host')
+ self.assertEqual(sent_data['event']['ip_address'], '1.2.3.4')
diff --git a/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py b/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py
new file mode 100644
index 000000000..8733a92e0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/connection/test_lxc.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2020 Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.connection import lxc
+from ansible.playbook.play_context import PlayContext
+
+
+class TestLXCConnectionClass(unittest.TestCase):
+
+ def test_lxc_connection_module(self):
+ play_context = PlayContext()
+ play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ in_stream = StringIO()
+
+ self.assertIsInstance(lxc.Connection(play_context, in_stream), lxc.Connection)
diff --git a/ansible_collections/community/general/tests/unit/plugins/filter/test_crc32.py b/ansible_collections/community/general/tests/unit/plugins/filter/test_crc32.py
new file mode 100644
index 000000000..820104513
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/filter/test_crc32.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Julien Riou <julien@riou.xyz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.filter.crc32 import crc32s
+
+
+class TestFilterCrc32(unittest.TestCase):
+
+ def test_checksum(self):
+ self.assertEqual(crc32s('test'), 'd87f7e0c')
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd
new file mode 100644
index 000000000..dd6baa885
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd
@@ -0,0 +1,174 @@
+{
+ "instances":{
+ "vlantest":{
+ "instances":{
+ "metadata":{
+ "config":{
+ "image.os":"ubuntu",
+ "image.release":"focal",
+ "image.version":"20.04",
+ "volatile.last_state.power":"RUNNING"
+ },
+ "devices":{
+ "eth0":{
+ "name":"eth0",
+ "network":"my-macvlan",
+ "type":"nic"
+ }
+ },
+ "profiles":[
+ "default"
+ ],
+ "expanded_devices":{
+ "eth0":{
+ "name":"eth0",
+ "network":"my-macvlan",
+ "type":"nic"
+ }
+ },
+ "name":"vlantest",
+ "status":"Running",
+ "location":"Berlin"
+ }
+ },
+ "state":{
+ "metadata":{
+ "status":"Running",
+ "network":{
+ "eth0":{
+ "addresses":[
+ {
+ "family":"inet",
+ "address":"10.98.143.199",
+ "netmask":"24",
+ "scope":"global"
+ },
+ {
+ "family":"inet6",
+ "address":"fd42:bd00:7b11:2167:216:3eff:fe78:2ef3",
+ "netmask":"64",
+ "scope":"global"
+ },
+ {
+ "family":"inet6",
+ "address":"fe80::216:3eff:fed3:7af3",
+ "netmask":"64",
+ "scope":"link"
+ }
+ ]
+ },
+ "lo":{
+ "addresses":[
+ {
+ "family":"inet",
+ "address":"127.0.0.1",
+ "netmask":"8",
+ "scope":"local"
+ },
+ {
+ "family":"inet6",
+ "address":"::1",
+ "netmask":"128",
+ "scope":"local"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+
+ "networks":{
+ "my-macvlan":{
+ "state":{
+ "metadata":{
+ "addresses":[
+ {
+ "family":"inet",
+ "address":"192.168.178.199",
+ "netmask":"24",
+ "scope":"global"
+ },
+ {
+ "family":"inet6",
+ "address":"fd42:bd00:7b11:2167:216:3eff:fe78:2ef3",
+ "netmask":"64",
+ "scope":"global"
+ },
+ {
+ "family":"inet6",
+ "address":"fe80::216:3eff:fed3:7af3",
+ "netmask":"64",
+ "scope":"link"
+ }
+ ],
+ "vlan":{
+ "lower_device":"eno1",
+ "vid":666
+ }
+ }
+ }
+ },
+ "lo":{
+ "state":{
+ "metadata":{
+ "addresses":[
+ {
+ "family":"inet",
+ "address":"127.0.0.1",
+ "netmask":"8",
+ "scope":"local"
+ },
+ {
+ "family":"inet6",
+ "address":"::1",
+ "netmask":"128",
+ "scope":"local"
+ }
+ ],
+ "vlan":null
+ }
+ }
+ },
+ "eno1":{
+ "state":{
+ "metadata":{
+ "addresses":[
+ {
+ "family":"inet",
+ "address":"192.168.178.126",
+ "netmask":"24",
+ "scope":"global"
+ },
+ {
+ "family":"inet6",
+ "address":"fe80::3c0b:7da9:3cc7:9e40",
+ "netmask":"64",
+ "scope":"link"
+ }
+ ],
+ "vlan":null
+ }
+ }
+ },
+ "eno1.666":{
+ "state":{
+ "metadata":{
+ "addresses":[
+ {
+ "family":"inet6",
+ "address":"fe80::de4a:3eff:fe8d:f356",
+ "netmask":"64",
+ "scope":"link"
+ }
+ ],
+ "vlan":{
+ "lower_device":"eno1",
+ "vid":666
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd.license b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/lxd_inventory.atd.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json
new file mode 100644
index 000000000..f7be74f90
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json
@@ -0,0 +1,222 @@
+[
+ {
+ "DEPLOY_ID": "bcfec9d9-c0d0-4523-b5e7-62993947e94c",
+ "ETIME": 0,
+ "GID": 105,
+ "GNAME": "SW",
+ "HISTORY_RECORDS": {},
+ "ID": 451,
+ "LAST_POLL": 0,
+ "LCM_STATE": 3,
+ "MONITORING": {},
+ "NAME": "terraform_demo_00",
+ "RESCHED": 0,
+ "STATE": 3,
+ "STIME": 1649886492,
+ "TEMPLATE": {
+ "NIC": [
+ {
+ "AR_ID": "0",
+ "BRIDGE": "mgmt0",
+ "BRIDGE_TYPE": "linux",
+ "CLUSTER_ID": "0",
+ "IP": "192.168.11.248",
+ "MAC": "02:00:c0:a8:2b:bb",
+ "MODEL": "virtio",
+ "NAME": "NIC0",
+ "NETWORK": "Infrastructure",
+ "NETWORK_ID": "0",
+ "NIC_ID": "0",
+ "SECURITY_GROUPS": "0,101",
+ "TARGET": "one-453-0",
+ "VLAN_ID": "12",
+ "VN_MAD": "802.1Q"
+ }
+ ],
+ "NIC_DEFAULT": {
+ "MODEL": "virtio"
+ },
+ "TEMPLATE_ID": "28",
+ "TM_MAD_SYSTEM": "shared",
+ "VCPU": "4",
+ "VMID": "453"
+ },
+ "USER_TEMPLATE": {
+ "GUEST_OS": "linux",
+ "INPUTS_ORDER": "",
+ "LABELS": "foo,bench",
+ "LOGO": "images/logos/linux.png",
+ "MEMORY_UNIT_COST": "MB",
+ "SCHED_REQUIREMENTS": "ARCH=\"x86_64\"",
+ "TGROUP": "bench_clients"
+ }
+ },
+ {
+ "DEPLOY_ID": "25895435-5e3a-4d50-a025-e03a7a463abd",
+ "ETIME": 0,
+ "GID": 105,
+ "GNAME": "SW",
+ "HISTORY_RECORDS": {},
+ "ID": 451,
+ "LAST_POLL": 0,
+ "LCM_STATE": 3,
+ "MONITORING": {},
+ "NAME": "terraform_demo_01",
+ "RESCHED": 0,
+ "STATE": 3,
+ "STIME": 1649886492,
+ "TEMPLATE": {
+ "NIC": [
+ {
+ "AR_ID": "0",
+ "BRIDGE": "mgmt0",
+ "BRIDGE_TYPE": "linux",
+ "CLUSTER_ID": "0",
+ "IP": "192.168.11.241",
+ "MAC": "02:00:c0:a8:4b:bb",
+ "MODEL": "virtio",
+ "NAME": "NIC0",
+ "NETWORK": "Infrastructure",
+ "NETWORK_ID": "0",
+ "NIC_ID": "0",
+ "SECURITY_GROUPS": "0,101",
+ "TARGET": "one-451-0",
+ "VLAN_ID": "12",
+ "VN_MAD": "802.1Q"
+ }
+ ],
+ "NIC_DEFAULT": {
+ "MODEL": "virtio"
+ },
+ "TEMPLATE_ID": "28",
+ "TM_MAD_SYSTEM": "shared",
+ "VCPU": "4",
+ "VMID": "451"
+ },
+ "USER_TEMPLATE": {
+ "GUEST_OS": "linux",
+ "INPUTS_ORDER": "",
+ "LABELS": "foo,bench",
+ "LOGO": "images/logos/linux.png",
+ "MEMORY_UNIT_COST": "MB",
+ "SCHED_REQUIREMENTS": "ARCH=\"x86_64\"",
+ "TESTATTR": "testvar",
+ "TGROUP": "bench_clients"
+ }
+ },
+ {
+ "DEPLOY_ID": "2b00c379-3601-45ee-acf5-e7b3ff2b7bca",
+ "ETIME": 0,
+ "GID": 105,
+ "GNAME": "SW",
+ "HISTORY_RECORDS": {},
+ "ID": 451,
+ "LAST_POLL": 0,
+ "LCM_STATE": 3,
+ "MONITORING": {},
+ "NAME": "terraform_demo_srv_00",
+ "RESCHED": 0,
+ "STATE": 3,
+ "STIME": 1649886492,
+ "TEMPLATE": {
+ "NIC": [
+ {
+ "AR_ID": "0",
+ "BRIDGE": "mgmt0",
+ "BRIDGE_TYPE": "linux",
+ "CLUSTER_ID": "0",
+ "IP": "192.168.11.247",
+ "MAC": "02:00:c0:a8:0b:cc",
+ "MODEL": "virtio",
+ "NAME": "NIC0",
+ "NETWORK": "Infrastructure",
+ "NETWORK_ID": "0",
+ "NIC_ID": "0",
+ "SECURITY_GROUPS": "0,101",
+ "TARGET": "one-452-0",
+ "VLAN_ID": "12",
+ "VN_MAD": "802.1Q"
+ }
+ ],
+ "NIC_DEFAULT": {
+ "MODEL": "virtio"
+ },
+ "TEMPLATE_ID": "28",
+ "TM_MAD_SYSTEM": "shared",
+ "VCPU": "4",
+ "VMID": "452"
+ },
+ "USER_TEMPLATE": {
+ "GUEST_OS": "linux",
+ "INPUTS_ORDER": "",
+ "LABELS": "serv,bench",
+ "LOGO": "images/logos/linux.png",
+ "MEMORY_UNIT_COST": "MB",
+ "SCHED_REQUIREMENTS": "ARCH=\"x86_64\"",
+ "TGROUP": "bench_server"
+ }
+ },
+ {
+ "DEPLOY_ID": "97037f55-dd2c-4549-8d24-561a6569e870",
+ "ETIME": 0,
+ "GID": 105,
+ "GNAME": "SW",
+ "HISTORY_RECORDS": {},
+ "ID": 311,
+ "LAST_POLL": 0,
+ "LCM_STATE": 3,
+ "MONITORING": {},
+ "NAME": "bs-windows",
+ "RESCHED": 0,
+ "STATE": 3,
+ "STIME": 1648076254,
+ "TEMPLATE": {
+ "NIC": [
+ {
+ "AR_ID": "0",
+ "BRIDGE": "mgmt0",
+ "BRIDGE_TYPE": "linux",
+ "CLUSTER_ID": "0",
+ "IP": "192.168.11.209",
+ "MAC": "02:00:c0:a8:0b:dd",
+ "MODEL": "virtio",
+ "NAME": "NIC0",
+ "NETWORK": "Infrastructure",
+ "NETWORK_ID": "0",
+ "NETWORK_UNAME": "admin",
+ "NIC_ID": "0",
+ "SECURITY_GROUPS": "0,101",
+ "TARGET": "one-311-0",
+ "VLAN_ID": "12",
+ "VN_MAD": "802.1Q"
+ },
+ [
+ "TEMPLATE_ID",
+ "23"
+ ],
+ [
+ "TM_MAD_SYSTEM",
+ "shared"
+ ],
+ [
+ "VCPU",
+ "4"
+ ],
+ [
+ "VMID",
+ "311"
+ ]
+ ]
+ },
+ "UID": 22,
+ "UNAME": "bsanders",
+ "USER_TEMPLATE": {
+ "GUEST_OS": "windows",
+ "INPUTS_ORDER": "",
+ "LABELS": "serv",
+ "HYPERVISOR": "kvm",
+ "SCHED_REQUIREMENTS": "ARCH=\"x86_64\"",
+ "SET_HOSTNAME": "windows"
+ }
+ }
+]
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json.license b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/fixtures/opennebula_inventory.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py
new file mode 100644
index 000000000..a09001ad6
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_cobbler.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Orion Poplawski <orion@nwra.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.inventory.cobbler import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ return InventoryModule()
+
+
+def test_init_cache(inventory):
+ inventory._init_cache()
+ assert inventory._cache[inventory.cache_key] == {}
+
+
+def test_verify_file(tmp_path, inventory):
+ file = tmp_path / "foobar.cobbler.yml"
+ file.touch()
+ assert inventory.verify_file(str(file)) is True
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.cobbler.yml') is False
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_icinga2.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_icinga2.py
new file mode 100644
index 000000000..e3928b0db
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_icinga2.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Cliff Hults <cliff.hlts@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The API responses used in these tests were recorded from PVE version 6.2.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.icinga2 import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.icinga2.yml') is False
+
+
+def check_api():
+ return True
+
+
+# NOTE: when updating/adding replies to this function,
+# be sure to only add only the _contents_ of the 'data' dict in the API reply
+def query_hosts(hosts=None, attrs=None, joins=None, host_filter=None):
+ # _get_hosts - list of dicts
+ json_host_data = [
+ {
+ 'attrs': {
+ 'address': 'test-host1.home.local',
+ 'groups': ['home_servers', 'servers_dell'],
+ 'display_name': 'Test Host 1',
+ 'state': 0.0,
+ 'state_type': 1.0
+ },
+ 'joins': {},
+ 'meta': {},
+ 'name': 'test-host1',
+ 'type': 'Host'
+ },
+ {
+ 'attrs': {
+ 'address': 'test-host2.home.local',
+ 'display_name': 'Test Host 2',
+ 'groups': ['home_servers', 'servers_hp'],
+ 'state': 1.0,
+ 'state_type': 1.0
+ },
+ 'joins': {},
+ 'meta': {},
+ 'name': 'test-host2',
+ 'type': 'Host'
+ },
+ {
+ 'attrs': {
+ 'address': '',
+ 'display_name': 'Test Host 3',
+ 'groups': ['not_home_servers', 'servers_hp'],
+ 'state': 1.0,
+ 'state_type': 1.0
+ },
+ 'joins': {},
+ 'meta': {},
+ 'name': 'test-host3.example.com',
+ 'type': 'Host'
+ }
+ ]
+ return json_host_data
+
+
+def get_option(option):
+ if option == 'groups':
+ return {}
+ elif option == 'keyed_groups':
+ return []
+ elif option == 'compose':
+ return {}
+ elif option == 'strict':
+ return False
+ else:
+ return None
+
+
+def test_populate(inventory, mocker):
+ # module settings
+ inventory.icinga2_user = 'ansible'
+ inventory.icinga2_password = 'password'
+ inventory.icinga2_url = 'https://localhost:5665' + '/v1'
+ inventory.inventory_attr = "address"
+
+ # bypass authentication and API fetch calls
+ inventory._check_api = mocker.MagicMock(side_effect=check_api)
+ inventory._query_hosts = mocker.MagicMock(side_effect=query_hosts)
+ inventory.get_option = mocker.MagicMock(side_effect=get_option)
+ inventory._populate()
+
+ # get different hosts
+ host1_info = inventory.inventory.get_host('test-host1.home.local')
+ print(host1_info)
+ host2_info = inventory.inventory.get_host('test-host2.home.local')
+ print(host2_info)
+ host3_info = inventory.inventory.get_host('test-host3.example.com')
+ assert inventory.inventory.get_host('test-host3.example.com') is not None
+ print(host3_info)
+
+ # check if host in the home_servers group
+ assert 'home_servers' in inventory.inventory.groups
+ group1_data = inventory.inventory.groups['home_servers']
+ group1_test_data = [host1_info, host2_info]
+ print(group1_data.hosts)
+ print(group1_test_data)
+ assert group1_data.hosts == group1_test_data
+ # Test servers_hp group
+ group2_data = inventory.inventory.groups['servers_hp']
+ group2_test_data = [host2_info, host3_info]
+ print(group2_data.hosts)
+ print(group2_test_data)
+ assert group2_data.hosts == group2_test_data
+
+ # check if host state rules apply properly
+ assert host1_info.get_vars()['state'] == 'on'
+ assert host1_info.get_vars()['display_name'] == "Test Host 1"
+ assert host2_info.get_vars()['state'] == 'off'
+ assert host3_info.get_vars().get('ansible_host') is None
+
+ # Confirm attribute options switcher
+ inventory.inventory_attr = "name"
+ inventory._populate()
+ assert inventory.inventory.get_host('test-host3.example.com') is not None
+ host2_info = inventory.inventory.get_host('test-host2')
+ assert host2_info is not None
+ assert host2_info.get_vars().get('ansible_host') == 'test-host2.home.local'
+
+ # Confirm attribute options switcher
+ inventory.inventory_attr = "display_name"
+ inventory._populate()
+ assert inventory.inventory.get_host('Test Host 3') is not None
+ host2_info = inventory.inventory.get_host('Test Host 2')
+ assert host2_info is not None
+ assert host2_info.get_vars().get('ansible_host') == 'test-host2.home.local'
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py
new file mode 100644
index 000000000..a4f556761
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_linode.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+# Copyright 2018 Luke Murphy <lukewm@riseup.net>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import sys
+
+linode_apiv4 = pytest.importorskip('linode_api4')
+mandatory_py_version = pytest.mark.skipif(
+ sys.version_info < (2, 7),
+ reason='The linode_api4 dependency requires python2.7 or higher'
+)
+
+
+from ansible.errors import AnsibleError
+from ansible.parsing.dataloader import DataLoader
+from ansible.template import Templar
+from ansible_collections.community.general.plugins.inventory.linode import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ plugin = InventoryModule()
+ plugin.templar = Templar(loader=DataLoader())
+ return plugin
+
+
+def test_missing_access_token_lookup(inventory):
+ loader = DataLoader()
+ inventory._options = {'access_token': None}
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._build_client(loader)
+ assert 'Could not retrieve Linode access token' in error_message
+
+
+def test_verify_file(tmp_path, inventory):
+ file = tmp_path / "foobar.linode.yml"
+ file.touch()
+ assert inventory.verify_file(str(file)) is True
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.linode.yml') is False
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_lxd.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_lxd.py
new file mode 100644
index 000000000..a1f31fdc5
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_lxd.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Frank Dornheim <dornheim@posteo.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.lxd import InventoryModule
+
+
+HOST_COMPARATIVE_DATA = {
+ 'ansible_connection': 'ssh', 'ansible_host': '10.98.143.199', 'ansible_lxd_os': 'ubuntu', 'ansible_lxd_release': 'focal',
+ 'ansible_lxd_profile': ['default'], 'ansible_lxd_state': 'running', 'ansible_lxd_location': 'Berlin',
+ 'ansible_lxd_vlan_ids': {'my-macvlan': 666}, 'inventory_hostname': 'vlantest', 'inventory_hostname_short': 'vlantest'}
+GROUP_COMPARATIVE_DATA = {
+ 'all': [], 'ungrouped': [], 'testpattern': ['vlantest'], 'vlan666': ['vlantest'], 'locationBerlin': ['vlantest'],
+ 'osUbuntu': ['vlantest'], 'releaseFocal': ['vlantest'], 'releaseBionic': [], 'profileDefault': ['vlantest'],
+ 'profileX11': [], 'netRangeIPv4': ['vlantest'], 'netRangeIPv6': ['vlantest']}
+GROUP_Config = {
+ 'testpattern': {'type': 'pattern', 'attribute': 'test'},
+ 'vlan666': {'type': 'vlanid', 'attribute': 666},
+ 'locationBerlin': {'type': 'location', 'attribute': 'Berlin'},
+ 'osUbuntu': {'type': 'os', 'attribute': 'ubuntu'},
+ 'releaseFocal': {'type': 'release', 'attribute': 'focal'},
+ 'releaseBionic': {'type': 'release', 'attribute': 'bionic'},
+ 'profileDefault': {'type': 'profile', 'attribute': 'default'},
+ 'profileX11': {'type': 'profile', 'attribute': 'x11'},
+ 'netRangeIPv4': {'type': 'network_range', 'attribute': '10.98.143.0/24'},
+ 'netRangeIPv6': {'type': 'network_range', 'attribute': 'fd42:bd00:7b11:2167:216:3eff::/96'}}
+
+
+@pytest.fixture
+def inventory():
+ inv = InventoryModule()
+ inv.inventory = InventoryData()
+
+ # Test Values
+ inv.data = inv.load_json_data('tests/unit/plugins/inventory/fixtures/lxd_inventory.atd') # Load Test Data
+ inv.groupby = GROUP_Config
+ inv.prefered_instance_network_interface = 'eth'
+ inv.prefered_instance_network_family = 'inet'
+ inv.filter = 'running'
+ inv.dump_data = False
+ inv.type_filter = 'both'
+
+ return inv
+
+
+def test_verify_file(tmp_path, inventory):
+ file = tmp_path / "foobar.lxd.yml"
+ file.touch()
+ assert inventory.verify_file(str(file)) is True
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.lxd.yml') is False
+
+
+def test_build_inventory_hosts(inventory):
+ """Load example data and start the inventoryto test the host generation.
+
+ After the inventory plugin has run with the test data, the result of the host is checked."""
+ inventory._populate()
+ generated_data = inventory.inventory.get_host('vlantest').get_vars()
+
+ eq = True
+ for key, value in HOST_COMPARATIVE_DATA.items():
+ if generated_data[key] != value:
+ eq = False
+ assert eq
+
+
+def test_build_inventory_groups(inventory):
+ """Load example data and start the inventory to test the group generation.
+
+ After the inventory plugin has run with the test data, the result of the host is checked."""
+ inventory._populate()
+ generated_data = inventory.inventory.get_groups_dict()
+
+ eq = True
+ for key, value in GROUP_COMPARATIVE_DATA.items():
+ if generated_data[key] != value:
+ eq = False
+ assert eq
+
+
+def test_build_inventory_groups_with_no_groupselection(inventory):
+ """Load example data and start the inventory to test the group generation with groupby is none.
+
+ After the inventory plugin has run with the test data, the result of the host is checked."""
+ inventory.groupby = None
+ inventory._populate()
+ generated_data = inventory.inventory.get_groups_dict()
+ group_comparative_data = {'all': [], 'ungrouped': []}
+
+ eq = True
+ print("data: {0}".format(generated_data))
+ for key, value in group_comparative_data.items():
+ if generated_data[key] != value:
+ eq = False
+ assert eq
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_opennebula.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_opennebula.py
new file mode 100644
index 000000000..bbc2fe699
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_opennebula.py
@@ -0,0 +1,342 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ <support@feldhost.cz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The API responses used in these tests were recorded from OpenNebula version 5.10.
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from collections import OrderedDict
+import json
+
+import pytest
+
+from ansible.inventory.data import InventoryData
+from ansible.parsing.dataloader import DataLoader
+from ansible.template import Templar
+from ansible_collections.community.general.plugins.inventory.opennebula import InventoryModule
+from ansible_collections.community.general.tests.unit.compat.mock import create_autospec
+
+
+@pytest.fixture
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_verify_file(tmp_path, inventory):
+ file = tmp_path / "foobar.opennebula.yml"
+ file.touch()
+ assert inventory.verify_file(str(file)) is True
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.opennebula.yml') is False
+
+
+def get_vm_pool_json():
+ with open('tests/unit/plugins/inventory/fixtures/opennebula_inventory.json', 'r') as json_file:
+ jsondata = json.load(json_file)
+
+ data = type('pyone.bindings.VM_POOLSub', (object,), {'VM': []})()
+
+ for fake_server in jsondata:
+ data.VM.append(type('pyone.bindings.VMType90Sub', (object,), fake_server)())
+
+ return data
+
+
+def get_vm_pool():
+ data = type('pyone.bindings.VM_POOLSub', (object,), {'VM': []})()
+
+ vm = type('pyone.bindings.VMType90Sub', (object,), {
+ 'DEPLOY_ID': 'one-7157',
+ 'ETIME': 0,
+ 'GID': 132,
+ 'GNAME': 'CSApparelVDC',
+ 'HISTORY_RECORDS': {},
+ 'ID': 7157,
+ 'LAST_POLL': 1632762935,
+ 'LCM_STATE': 3,
+ 'MONITORING': {},
+ 'NAME': 'sam-691-sam',
+ 'RESCHED': 0,
+ 'SNAPSHOTS': [],
+ 'STATE': 3,
+ 'STIME': 1632755245,
+ 'TEMPLATE': OrderedDict({
+ 'NIC': OrderedDict({
+ 'AR_ID': '0',
+ 'BRIDGE': 'onebr80',
+ 'BRIDGE_TYPE': 'linux',
+ 'CLUSTER_ID': '0',
+ 'IP': '172.22.4.187',
+ 'MAC': '02:00:ac:16:04:bb',
+ 'MTU': '8192',
+ 'NAME': 'NIC0',
+ 'NETWORK': 'Private Net CSApparel',
+ 'NETWORK_ID': '80',
+ 'NETWORK_UNAME': 'CSApparelVDC-admin',
+ 'NIC_ID': '0',
+ 'PHYDEV': 'team0',
+ 'SECURITY_GROUPS': '0',
+ 'TARGET': 'one-7157-0',
+ 'VLAN_ID': '480',
+ 'VN_MAD': '802.1Q'
+ })
+ }),
+ 'USER_TEMPLATE': OrderedDict({
+ 'HYPERVISOR': 'kvm',
+ 'INPUTS_ORDER': '',
+ 'LOGO': 'images/logos/centos.png',
+ 'MEMORY_UNIT_COST': 'MB',
+ 'SCHED_REQUIREMENTS': 'CLUSTER_ID="0"'
+ })
+ })()
+ data.VM.append(vm)
+
+ vm = type('pyone.bindings.VMType90Sub', (object,), {
+ 'DEPLOY_ID': 'one-327',
+ 'ETIME': 0,
+ 'GID': 0,
+ 'GNAME': 'oneadmin',
+ 'HISTORY_RECORDS': {},
+ 'ID': 327,
+ 'LAST_POLL': 1632763543,
+ 'LCM_STATE': 3,
+ 'MONITORING': {},
+ 'NAME': 'zabbix-327',
+ 'RESCHED': 0,
+ 'SNAPSHOTS': [],
+ 'STATE': 3,
+ 'STIME': 1575410106,
+ 'TEMPLATE': OrderedDict({
+ 'NIC': [
+ OrderedDict({
+ 'AR_ID': '0',
+ 'BRIDGE': 'onerb.103',
+ 'BRIDGE_TYPE': 'linux',
+ 'IP': '185.165.1.1',
+ 'IP6_GLOBAL': '2000:a001::b9ff:feae:aa0d',
+ 'IP6_LINK': 'fe80::b9ff:feae:aa0d',
+ 'MAC': '02:00:b9:ae:aa:0d',
+ 'NAME': 'NIC0',
+ 'NETWORK': 'Public',
+ 'NETWORK_ID': '7',
+ 'NIC_ID': '0',
+ 'PHYDEV': 'team0',
+ 'SECURITY_GROUPS': '0',
+ 'TARGET': 'one-327-0',
+ 'VLAN_ID': '100',
+ 'VN_MAD': '802.1Q'
+ }),
+ OrderedDict({
+ 'AR_ID': '0',
+ 'BRIDGE': 'br0',
+ 'BRIDGE_TYPE': 'linux',
+ 'CLUSTER_ID': '0',
+ 'IP': '192.168.1.1',
+ 'MAC': '02:00:c0:a8:3b:01',
+ 'NAME': 'NIC1',
+ 'NETWORK': 'Management',
+ 'NETWORK_ID': '11',
+ 'NIC_ID': '1',
+ 'SECURITY_GROUPS': '0',
+ 'TARGET': 'one-327-1',
+ 'VN_MAD': 'bridge'
+ })
+ ]
+ }),
+ 'USER_TEMPLATE': OrderedDict({
+ 'HYPERVISOR': 'kvm',
+ 'INPUTS_ORDER': '',
+ 'LABELS': 'Oracle Linux',
+ 'LOGO': 'images/logos/centos.png',
+ 'MEMORY_UNIT_COST': 'MB',
+ 'SAVED_TEMPLATE_ID': '29'
+ })
+ })()
+ data.VM.append(vm)
+
+ vm = type('pyone.bindings.VMType90Sub', (object,), {
+ 'DEPLOY_ID': 'one-107',
+ 'ETIME': 0,
+ 'GID': 0,
+ 'GNAME': 'oneadmin',
+ 'HISTORY_RECORDS': {},
+ 'ID': 107,
+ 'LAST_POLL': 1632764186,
+ 'LCM_STATE': 3,
+ 'MONITORING': {},
+ 'NAME': 'gitlab-107',
+ 'RESCHED': 0,
+ 'SNAPSHOTS': [],
+ 'STATE': 3,
+ 'STIME': 1572485522,
+ 'TEMPLATE': OrderedDict({
+ 'NIC': OrderedDict({
+ 'AR_ID': '0',
+ 'BRIDGE': 'onerb.103',
+ 'BRIDGE_TYPE': 'linux',
+ 'IP': '185.165.1.3',
+ 'IP6_GLOBAL': '2000:a001::b9ff:feae:aa03',
+ 'IP6_LINK': 'fe80::b9ff:feae:aa03',
+ 'MAC': '02:00:b9:ae:aa:03',
+ 'NAME': 'NIC0',
+ 'NETWORK': 'Public',
+ 'NETWORK_ID': '7',
+ 'NIC_ID': '0',
+ 'PHYDEV': 'team0',
+ 'SECURITY_GROUPS': '0',
+ 'TARGET': 'one-107-0',
+ 'VLAN_ID': '100',
+ 'VN_MAD': '802.1Q'
+ })
+ }),
+ 'USER_TEMPLATE': OrderedDict({
+ 'HYPERVISOR': 'kvm',
+ 'INPUTS_ORDER': '',
+ 'LABELS': 'Gitlab,Centos',
+ 'LOGO': 'images/logos/centos.png',
+ 'MEMORY_UNIT_COST': 'MB',
+ 'SCHED_REQUIREMENTS': 'ID="0" | ID="1" | ID="2"',
+ 'SSH_PORT': '8822'
+ })
+ })()
+ data.VM.append(vm)
+
+ return data
+
+
+options_base_test = {
+ 'api_url': 'https://opennebula:2633/RPC2',
+ 'api_username': 'username',
+ 'api_password': 'password',
+ 'api_authfile': '~/.one/one_auth',
+ 'hostname': 'v4_first_ip',
+ 'group_by_labels': True,
+ 'filter_by_label': None,
+}
+
+options_constructable_test = options_base_test.copy()
+options_constructable_test.update({
+ 'compose': {'is_linux': "GUEST_OS == 'linux'"},
+ 'filter_by_label': 'bench',
+ 'groups': {
+ 'benchmark_clients': "TGROUP.endswith('clients')",
+ 'lin': 'is_linux == True'
+ },
+ 'keyed_groups': [{'key': 'TGROUP', 'prefix': 'tgroup'}],
+
+})
+
+
+# given a dictionary `opts_dict`, return a function that behaves like ansible's inventory get_options
+def mk_get_options(opts_dict):
+ def inner(opt):
+ return opts_dict.get(opt, False)
+ return inner
+
+
+def test_get_connection_info(inventory, mocker):
+ inventory.get_option = mocker.MagicMock(side_effect=mk_get_options(options_base_test))
+
+ auth = inventory._get_connection_info()
+ assert (auth.username and auth.password)
+
+
+def test_populate_constructable_templating(inventory, mocker):
+ # bypass API fetch call
+ inventory._get_vm_pool = mocker.MagicMock(side_effect=get_vm_pool_json)
+ inventory.get_option = mocker.MagicMock(side_effect=mk_get_options(options_constructable_test))
+
+ # the templating engine is needed for the constructable groups/vars
+ # so give that some fake data and instantiate it.
+ fake_config_filepath = '/fake/opennebula.yml'
+ fake_cache = {fake_config_filepath: options_constructable_test.copy()}
+ fake_cache[fake_config_filepath]['plugin'] = 'community.general.opennebula'
+ dataloader = create_autospec(DataLoader, instance=True)
+ dataloader._FILE_CACHE = fake_cache
+ inventory.templar = Templar(loader=dataloader)
+
+ inventory._populate()
+
+ # note the vm_pool (and json data file) has four hosts,
+ # but options_constructable_test asks ansible to filter it out
+ assert len(get_vm_pool_json().VM) == 4
+ assert set([vm.NAME for vm in get_vm_pool_json().VM]) == set([
+ 'terraform_demo_00',
+ 'terraform_demo_01',
+ 'terraform_demo_srv_00',
+ 'bs-windows',
+ ])
+ assert set(inventory.inventory.hosts) == set(['terraform_demo_00', 'terraform_demo_01', 'terraform_demo_srv_00'])
+
+ host_demo00 = inventory.inventory.get_host('terraform_demo_00')
+ host_demo01 = inventory.inventory.get_host('terraform_demo_01')
+ host_demosrv = inventory.inventory.get_host('terraform_demo_srv_00')
+
+ assert 'benchmark_clients' in inventory.inventory.groups
+ assert 'lin' in inventory.inventory.groups
+ assert inventory.inventory.groups['benchmark_clients'].hosts == [host_demo00, host_demo01]
+ assert inventory.inventory.groups['lin'].hosts == [host_demo00, host_demo01, host_demosrv]
+
+ # test group by label:
+ assert 'bench' in inventory.inventory.groups
+ assert 'foo' in inventory.inventory.groups
+ assert inventory.inventory.groups['bench'].hosts == [host_demo00, host_demo01, host_demosrv]
+ assert inventory.inventory.groups['serv'].hosts == [host_demosrv]
+ assert inventory.inventory.groups['foo'].hosts == [host_demo00, host_demo01]
+
+ # test `compose` transforms GUEST_OS=Linux to is_linux == True
+ assert host_demo00.get_vars()['GUEST_OS'] == 'linux'
+ assert host_demo00.get_vars()['is_linux'] is True
+
+ # test `keyed_groups`
+ assert inventory.inventory.groups['tgroup_bench_clients'].hosts == [host_demo00, host_demo01]
+ assert inventory.inventory.groups['tgroup_bench_server'].hosts == [host_demosrv]
+
+
+def test_populate(inventory, mocker):
+ # bypass API fetch call
+ inventory._get_vm_pool = mocker.MagicMock(side_effect=get_vm_pool)
+ inventory.get_option = mocker.MagicMock(side_effect=mk_get_options(options_base_test))
+ inventory._populate()
+
+ # get different hosts
+ host_sam = inventory.inventory.get_host('sam-691-sam')
+ host_zabbix = inventory.inventory.get_host('zabbix-327')
+ host_gitlab = inventory.inventory.get_host('gitlab-107')
+
+ # test if groups exists
+ assert 'Gitlab' in inventory.inventory.groups
+ assert 'Centos' in inventory.inventory.groups
+ assert 'Oracle_Linux' in inventory.inventory.groups
+
+ # check if host_zabbix is in Oracle_Linux group
+ group_oracle_linux = inventory.inventory.groups['Oracle_Linux']
+ assert group_oracle_linux.hosts == [host_zabbix]
+
+ # check if host_gitlab is in Gitlab and Centos group
+ group_gitlab = inventory.inventory.groups['Gitlab']
+ group_centos = inventory.inventory.groups['Centos']
+ assert group_gitlab.hosts == [host_gitlab]
+ assert group_centos.hosts == [host_gitlab]
+
+ # check IPv4 address
+ assert '172.22.4.187' == host_sam.get_vars()['v4_first_ip']
+
+ # check IPv6 address
+ assert '2000:a001::b9ff:feae:aa0d' == host_zabbix.get_vars()['v6_first_ip']
+
+ # check ansible_hosts
+ assert '172.22.4.187' == host_sam.get_vars()['ansible_host']
+ assert '185.165.1.1' == host_zabbix.get_vars()['ansible_host']
+ assert '185.165.1.3' == host_gitlab.get_vars()['ansible_host']
+
+ # check for custom ssh port
+ assert '8822' == host_gitlab.get_vars()['ansible_port']
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py
new file mode 100644
index 000000000..13832c938
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_proxmox.py
@@ -0,0 +1,745 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Jeffrey van Pelt <jeff@vanpelt.one>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The API responses used in these tests were recorded from PVE version 6.2.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.proxmox import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_verify_file(tmp_path, inventory):
+ file = tmp_path / "foobar.proxmox.yml"
+ file.touch()
+ assert inventory.verify_file(str(file)) is True
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.proxmox.yml') is False
+
+
+def get_auth():
+ return True
+
+
+# NOTE: when updating/adding replies to this function,
+# be sure to only add only the _contents_ of the 'data' dict in the API reply
+def get_json(url):
+ if url == "https://localhost:8006/api2/json/nodes":
+ # _get_nodes
+ return [{"type": "node",
+ "cpu": 0.01,
+ "maxdisk": 500,
+ "mem": 500,
+ "node": "testnode",
+ "id": "node/testnode",
+ "maxcpu": 1,
+ "status": "online",
+ "ssl_fingerprint": "xx",
+ "disk": 1000,
+ "maxmem": 1000,
+ "uptime": 10000,
+ "level": ""},
+ {"type": "node",
+ "node": "testnode2",
+ "id": "node/testnode2",
+ "status": "offline",
+ "ssl_fingerprint": "yy"}]
+ elif url == "https://localhost:8006/api2/json/pools":
+ # _get_pools
+ return [{"poolid": "test"}]
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc":
+ # _get_lxc_per_node
+ return [{"cpus": 1,
+ "name": "test-lxc",
+ "cpu": 0.01,
+ "diskwrite": 0,
+ "lock": "",
+ "maxmem": 1000,
+ "template": "",
+ "diskread": 0,
+ "mem": 1000,
+ "swap": 0,
+ "type": "lxc",
+ "maxswap": 0,
+ "maxdisk": "1000",
+ "netout": 1000,
+ "pid": "1000",
+ "netin": 1000,
+ "status": "running",
+ "vmid": "100",
+ "disk": "1000",
+ "uptime": 1000}]
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu":
+ # _get_qemu_per_node
+ return [{"name": "test-qemu",
+ "cpus": 1,
+ "mem": 1000,
+ "template": "",
+ "diskread": 0,
+ "cpu": 0.01,
+ "maxmem": 1000,
+ "diskwrite": 0,
+ "netout": 1000,
+ "pid": "1001",
+ "netin": 1000,
+ "maxdisk": 1000,
+ "vmid": "101",
+ "uptime": 1000,
+ "disk": 0,
+ "status": "running"},
+ {"name": "test-qemu-windows",
+ "cpus": 1,
+ "mem": 1000,
+ "template": "",
+ "diskread": 0,
+ "cpu": 0.01,
+ "maxmem": 1000,
+ "diskwrite": 0,
+ "netout": 1000,
+ "pid": "1001",
+ "netin": 1000,
+ "maxdisk": 1000,
+ "vmid": "102",
+ "uptime": 1000,
+ "disk": 0,
+ "status": "running"},
+ {"name": "test-qemu-multi-nic",
+ "cpus": 1,
+ "mem": 1000,
+ "template": "",
+ "diskread": 0,
+ "cpu": 0.01,
+ "maxmem": 1000,
+ "diskwrite": 0,
+ "netout": 1000,
+ "pid": "1001",
+ "netin": 1000,
+ "maxdisk": 1000,
+ "vmid": "103",
+ "uptime": 1000,
+ "disk": 0,
+ "status": "running"},
+ {"name": "test-qemu-template",
+ "cpus": 1,
+ "mem": 0,
+ "template": 1,
+ "diskread": 0,
+ "cpu": 0,
+ "maxmem": 1000,
+ "diskwrite": 0,
+ "netout": 0,
+ "pid": "1001",
+ "netin": 0,
+ "maxdisk": 1000,
+ "vmid": "9001",
+ "uptime": 0,
+ "disk": 0,
+ "status": "stopped"}]
+ elif url == "https://localhost:8006/api2/json/pools/test":
+ # _get_members_per_pool
+ return {"members": [{"uptime": 1000,
+ "template": 0,
+ "id": "qemu/101",
+ "mem": 1000,
+ "status": "running",
+ "cpu": 0.01,
+ "maxmem": 1000,
+ "diskwrite": 1000,
+ "name": "test-qemu",
+ "netout": 1000,
+ "netin": 1000,
+ "vmid": 101,
+ "node": "testnode",
+ "maxcpu": 1,
+ "type": "qemu",
+ "maxdisk": 1000,
+ "disk": 0,
+ "diskread": 1000}]}
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/network":
+ # _get_node_ip
+ return [{"families": ["inet"],
+ "priority": 3,
+ "active": 1,
+ "cidr": "10.1.1.2/24",
+ "iface": "eth0",
+ "method": "static",
+ "exists": 1,
+ "type": "eth",
+ "netmask": "24",
+ "gateway": "10.1.1.1",
+ "address": "10.1.1.2",
+ "method6": "manual",
+ "autostart": 1},
+ {"method6": "manual",
+ "autostart": 1,
+ "type": "OVSPort",
+ "exists": 1,
+ "method": "manual",
+ "iface": "eth1",
+ "ovs_bridge": "vmbr0",
+ "active": 1,
+ "families": ["inet"],
+ "priority": 5,
+ "ovs_type": "OVSPort"},
+ {"type": "OVSBridge",
+ "method": "manual",
+ "iface": "vmbr0",
+ "families": ["inet"],
+ "priority": 4,
+ "ovs_ports": "eth1",
+ "ovs_type": "OVSBridge",
+ "method6": "manual",
+ "autostart": 1,
+ "active": 1}]
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/config":
+ # _get_vm_config (lxc)
+ return {
+ "console": 1,
+ "rootfs": "local-lvm:vm-100-disk-0,size=4G",
+ "cmode": "tty",
+ "description": "A testnode",
+ "cores": 1,
+ "hostname": "test-lxc",
+ "arch": "amd64",
+ "tty": 2,
+ "swap": 0,
+ "cpulimit": "0",
+ "net0": "name=eth0,bridge=vmbr0,gw=10.1.1.1,hwaddr=FF:FF:FF:FF:FF:FF,ip=10.1.1.3/24,type=veth",
+ "ostype": "ubuntu",
+ "digest": "123456789abcdef0123456789abcdef01234567890",
+ "protection": 0,
+ "memory": 1000,
+ "onboot": 0,
+ "cpuunits": 1024,
+ "tags": "one, two, three",
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/config":
+ # _get_vm_config (qemu)
+ return {
+ "tags": "one, two, three",
+ "cores": 1,
+ "ide2": "none,media=cdrom",
+ "memory": 1000,
+ "kvm": 1,
+ "digest": "0123456789abcdef0123456789abcdef0123456789",
+ "description": "A test qemu",
+ "sockets": 1,
+ "onboot": 1,
+ "vmgenid": "ffffffff-ffff-ffff-ffff-ffffffffffff",
+ "numa": 0,
+ "bootdisk": "scsi0",
+ "cpu": "host",
+ "name": "test-qemu",
+ "ostype": "l26",
+ "hotplug": "network,disk,usb",
+ "scsi0": "local-lvm:vm-101-disk-0,size=8G",
+ "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0,firewall=1",
+ "agent": "1,fstrim_cloned_disks=1",
+ "bios": "seabios",
+ "ide0": "local-lvm:vm-101-cloudinit,media=cdrom,size=4M",
+ "boot": "cdn",
+ "scsihw": "virtio-scsi-pci",
+ "smbios1": "uuid=ffffffff-ffff-ffff-ffff-ffffffffffff"
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/config":
+ # _get_vm_config (qemu)
+ return {
+ "numa": 0,
+ "digest": "460add1531a7068d2ae62d54f67e8fb9493dece9",
+ "ide2": "none,media=cdrom",
+ "bootdisk": "sata0",
+ "name": "test-qemu-windows",
+ "balloon": 0,
+ "cpulimit": "4",
+ "agent": "1",
+ "cores": 6,
+ "sata0": "storage:vm-102-disk-0,size=100G",
+ "memory": 10240,
+ "smbios1": "uuid=127301fc-0122-48d5-8fc5-c04fa78d8146",
+ "scsihw": "virtio-scsi-pci",
+ "sockets": 1,
+ "ostype": "win8",
+ "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0",
+ "onboot": 1
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/config":
+ # _get_vm_config (qemu)
+ return {
+ 'scsi1': 'storage:vm-103-disk-3,size=30G',
+ 'sockets': 1,
+ 'memory': 8192,
+ 'ostype': 'l26',
+ 'scsihw': 'virtio-scsi-pci',
+ "net0": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr0",
+ "net1": "virtio=ff:ff:ff:ff:ff:ff,bridge=vmbr1",
+ 'bootdisk': 'scsi0',
+ 'scsi0': 'storage:vm-103-disk-0,size=10G',
+ 'name': 'test-qemu-multi-nic',
+ 'cores': 4,
+ 'digest': '51b7599f869b9a3f564804a0aed290f3de803292',
+ 'smbios1': 'uuid=863b31c3-42ca-4a92-aed7-4111f342f70a',
+ 'agent': '1,type=virtio',
+ 'ide2': 'none,media=cdrom',
+ 'balloon': 0,
+ 'numa': 0,
+ 'scsi2': 'storage:vm-103-disk-2,size=10G',
+ 'serial0': 'socket',
+ 'vmgenid': 'ddfb79b2-b484-4d66-88e7-6e76f2d1be77',
+ 'onboot': 1,
+ 'tablet': 0
+ }
+
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/agent/network-get-interfaces":
+ # _get_agent_network_interfaces
+ return {"result": [
+ {
+ "hardware-address": "00:00:00:00:00:00",
+ "ip-addresses": [
+ {
+ "prefix": 8,
+ "ip-address-type": "ipv4",
+ "ip-address": "127.0.0.1"
+ },
+ {
+ "ip-address-type": "ipv6",
+ "ip-address": "::1",
+ "prefix": 128
+ }],
+ "statistics": {
+ "rx-errs": 0,
+ "rx-bytes": 163244,
+ "rx-packets": 1623,
+ "rx-dropped": 0,
+ "tx-dropped": 0,
+ "tx-packets": 1623,
+ "tx-bytes": 163244,
+ "tx-errs": 0},
+ "name": "lo"},
+ {
+ "statistics": {
+ "rx-packets": 4025,
+ "rx-dropped": 12,
+ "rx-bytes": 324105,
+ "rx-errs": 0,
+ "tx-errs": 0,
+ "tx-bytes": 368860,
+ "tx-packets": 3479,
+ "tx-dropped": 0},
+ "name": "eth0",
+ "ip-addresses": [
+ {
+ "prefix": 24,
+ "ip-address-type": "ipv4",
+ "ip-address": "10.1.2.3"
+ },
+ {
+ "prefix": 64,
+ "ip-address": "fd8c:4687:e88d:1be3:5b70:7b88:c79c:293",
+ "ip-address-type": "ipv6"
+ }],
+ "hardware-address": "ff:ff:ff:ff:ff:ff"
+ },
+ {
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "ip-addresses": [
+ {
+ "prefix": 16,
+ "ip-address": "10.10.2.3",
+ "ip-address-type": "ipv4"
+ }],
+ "name": "docker0",
+ "statistics": {
+ "rx-bytes": 0,
+ "rx-errs": 0,
+ "rx-dropped": 0,
+ "rx-packets": 0,
+ "tx-packets": 0,
+ "tx-dropped": 0,
+ "tx-errs": 0,
+ "tx-bytes": 0
+ }}]}
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/agent/network-get-interfaces":
+ # _get_agent_network_interfaces
+ return {"result": {'error': {'desc': 'this feature or command is not currently supported', 'class': 'Unsupported'}}}
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/agent/network-get-interfaces":
+ # _get_agent_network_interfaces
+ return {
+ "result": [
+ {
+ "statistics": {
+ "tx-errs": 0,
+ "rx-errs": 0,
+ "rx-dropped": 0,
+ "tx-bytes": 48132932372,
+ "tx-dropped": 0,
+ "rx-bytes": 48132932372,
+ "tx-packets": 178578980,
+ "rx-packets": 178578980
+ },
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "ip-addresses": [
+ {
+ "ip-address-type": "ipv4",
+ "prefix": 8,
+ "ip-address": "127.0.0.1"
+ }
+ ],
+ "name": "lo"
+ },
+ {
+ "name": "eth0",
+ "ip-addresses": [
+ {
+ "ip-address-type": "ipv4",
+ "prefix": 24,
+ "ip-address": "172.16.0.143"
+ }
+ ],
+ "statistics": {
+ "rx-errs": 0,
+ "tx-errs": 0,
+ "rx-packets": 660028,
+ "tx-packets": 304599,
+ "tx-dropped": 0,
+ "rx-bytes": 1846743499,
+ "tx-bytes": 1287844926,
+ "rx-dropped": 0
+ },
+ "hardware-address": "ff:ff:ff:ff:ff:ff"
+ },
+ {
+ "name": "eth1",
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "statistics": {
+ "rx-bytes": 235717091946,
+ "tx-dropped": 0,
+ "rx-dropped": 0,
+ "tx-bytes": 123411636251,
+ "rx-packets": 540431277,
+ "tx-packets": 468411864,
+ "rx-errs": 0,
+ "tx-errs": 0
+ },
+ "ip-addresses": [
+ {
+ "ip-address": "10.0.0.133",
+ "prefix": 24,
+ "ip-address-type": "ipv4"
+ }
+ ]
+ },
+ {
+ "name": "docker0",
+ "ip-addresses": [
+ {
+ "ip-address": "172.17.0.1",
+ "prefix": 16,
+ "ip-address-type": "ipv4"
+ }
+ ],
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "statistics": {
+ "rx-errs": 0,
+ "tx-errs": 0,
+ "rx-packets": 0,
+ "tx-packets": 0,
+ "tx-dropped": 0,
+ "rx-bytes": 0,
+ "rx-dropped": 0,
+ "tx-bytes": 0
+ }
+ },
+ {
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "name": "datapath"
+ },
+ {
+ "name": "weave",
+ "ip-addresses": [
+ {
+ "ip-address": "10.42.0.1",
+ "ip-address-type": "ipv4",
+ "prefix": 16
+ }
+ ],
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "statistics": {
+ "rx-bytes": 127289123306,
+ "tx-dropped": 0,
+ "rx-dropped": 0,
+ "tx-bytes": 43827573343,
+ "rx-packets": 132750542,
+ "tx-packets": 74218762,
+ "rx-errs": 0,
+ "tx-errs": 0
+ }
+ },
+ {
+ "name": "vethwe-datapath",
+ "hardware-address": "ff:ff:ff:ff:ff:ff"
+ },
+ {
+ "name": "vethwe-bridge",
+ "hardware-address": "ff:ff:ff:ff:ff:ff"
+ },
+ {
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "name": "vxlan-6784"
+ },
+ {
+ "name": "vethwepl0dfe1fe",
+ "hardware-address": "ff:ff:ff:ff:ff:ff"
+ },
+ {
+ "name": "vethweplf1e7715",
+ "hardware-address": "ff:ff:ff:ff:ff:ff"
+ },
+ {
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "name": "vethwepl9d244a1"
+ },
+ {
+ "hardware-address": "ff:ff:ff:ff:ff:ff",
+ "name": "vethwepl2ca477b"
+ },
+ {
+ "name": "nomacorip",
+ }
+ ]
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/lxc/100/status/current":
+ # _get_vm_status (lxc)
+ return {
+ "swap": 0,
+ "name": "test-lxc",
+ "diskread": 0,
+ "vmid": 100,
+ "diskwrite": 0,
+ "pid": 9000,
+ "mem": 89980928,
+ "netin": 1950776396424,
+ "disk": 4998168576,
+ "cpu": 0.00163430613110039,
+ "type": "lxc",
+ "uptime": 6793736,
+ "maxmem": 1073741824,
+ "status": "running",
+ "cpus": "1",
+ "ha": {
+ "group": 'null',
+ "state": "started",
+ "managed": 1
+ },
+ "maxdisk": 3348329267200,
+ "netout": 1947793356037,
+ "maxswap": 1073741824
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/101/status/current":
+ # _get_vm_status (qemu)
+ return {
+ "status": "stopped",
+ "uptime": 0,
+ "maxmem": 5364514816,
+ "maxdisk": 34359738368,
+ "netout": 0,
+ "cpus": 2,
+ "ha": {
+ "managed": 0
+ },
+ "diskread": 0,
+ "vmid": 101,
+ "diskwrite": 0,
+ "name": "test-qemu",
+ "cpu": 0,
+ "disk": 0,
+ "netin": 0,
+ "mem": 0,
+ "qmpstatus": "stopped"
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/102/status/current":
+ # _get_vm_status (qemu)
+ return {
+ "status": "stopped",
+ "uptime": 0,
+ "maxmem": 5364514816,
+ "maxdisk": 34359738368,
+ "netout": 0,
+ "cpus": 2,
+ "ha": {
+ "managed": 0
+ },
+ "diskread": 0,
+ "vmid": 102,
+ "diskwrite": 0,
+ "name": "test-qemu-windows",
+ "cpu": 0,
+ "disk": 0,
+ "netin": 0,
+ "mem": 0,
+ "qmpstatus": "prelaunch"
+ }
+ elif url == "https://localhost:8006/api2/json/nodes/testnode/qemu/103/status/current":
+ # _get_vm_status (qemu)
+ return {
+ "status": "stopped",
+ "uptime": 0,
+ "maxmem": 5364514816,
+ "maxdisk": 34359738368,
+ "netout": 0,
+ "cpus": 2,
+ "ha": {
+ "managed": 0
+ },
+ "diskread": 0,
+ "vmid": 103,
+ "diskwrite": 0,
+ "name": "test-qemu-multi-nic",
+ "cpu": 0,
+ "disk": 0,
+ "netin": 0,
+ "mem": 0,
+ "qmpstatus": "paused"
+ }
+
+
+def get_vm_snapshots(node, properties, vmtype, vmid, name):
+ return [
+ {"description": "",
+ "name": "clean",
+ "snaptime": 1000,
+ "vmstate": 0
+ },
+ {"name": "current",
+ "digest": "1234689abcdf",
+ "running": 0,
+ "description": "You are here!",
+ "parent": "clean"
+ }]
+
+
+def get_option(opts):
+ def fn(option):
+ default = opts.get('default', False)
+ return opts.get(option, default)
+ return fn
+
+
+def test_populate(inventory, mocker):
+ # module settings
+ inventory.proxmox_user = 'root@pam'
+ inventory.proxmox_password = 'password'
+ inventory.proxmox_url = 'https://localhost:8006'
+ inventory.group_prefix = 'proxmox_'
+ inventory.facts_prefix = 'proxmox_'
+ inventory.strict = False
+
+ opts = {
+ 'group_prefix': 'proxmox_',
+ 'facts_prefix': 'proxmox_',
+ 'want_facts': True,
+ 'want_proxmox_nodes_ansible_host': True,
+ 'qemu_extended_statuses': True
+ }
+
+ # bypass authentication and API fetch calls
+ inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
+ inventory._get_json = mocker.MagicMock(side_effect=get_json)
+ inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots)
+ inventory.get_option = mocker.MagicMock(side_effect=get_option(opts))
+ inventory._can_add_host = mocker.MagicMock(return_value=True)
+ inventory._populate()
+
+ # get different hosts
+ host_qemu = inventory.inventory.get_host('test-qemu')
+ host_qemu_windows = inventory.inventory.get_host('test-qemu-windows')
+ host_qemu_multi_nic = inventory.inventory.get_host('test-qemu-multi-nic')
+ host_qemu_template = inventory.inventory.get_host('test-qemu-template')
+ host_lxc = inventory.inventory.get_host('test-lxc')
+
+ # check if qemu-test is in the proxmox_pool_test group
+ assert 'proxmox_pool_test' in inventory.inventory.groups
+ group_qemu = inventory.inventory.groups['proxmox_pool_test']
+ assert group_qemu.hosts == [host_qemu]
+
+ # check if qemu-test has eth0 interface in agent_interfaces fact
+ assert 'eth0' in [d['name'] for d in host_qemu.get_vars()['proxmox_agent_interfaces']]
+
+ # check if qemu-multi-nic has multiple network interfaces
+ for iface_name in ['eth0', 'eth1', 'weave']:
+ assert iface_name in [d['name'] for d in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces']]
+
+ # check if interface with no mac-address or ip-address defaults correctly
+ assert [iface for iface in host_qemu_multi_nic.get_vars()['proxmox_agent_interfaces']
+ if iface['name'] == 'nomacorip'
+ and iface['mac-address'] == ''
+ and iface['ip-addresses'] == []
+ ]
+
+ # check to make sure qemu-windows doesn't have proxmox_agent_interfaces
+ assert "proxmox_agent_interfaces" not in host_qemu_windows.get_vars()
+
+ # check if lxc-test has been discovered correctly
+ group_lxc = inventory.inventory.groups['proxmox_all_lxc']
+ assert group_lxc.hosts == [host_lxc]
+
+ # check if qemu template is not present
+ assert host_qemu_template is None
+
+ # check that offline node is in inventory
+ assert inventory.inventory.get_host('testnode2')
+
+ # make sure that ['prelaunch', 'paused'] are in the group list
+ for group in ['paused', 'prelaunch']:
+ assert ('%sall_%s' % (inventory.group_prefix, group)) in inventory.inventory.groups
+
+ # check if qemu-windows is in the prelaunch group
+ group_prelaunch = inventory.inventory.groups['proxmox_all_prelaunch']
+ assert group_prelaunch.hosts == [host_qemu_windows]
+
+ # check if qemu-multi-nic is in the paused group
+ group_paused = inventory.inventory.groups['proxmox_all_paused']
+ assert group_paused.hosts == [host_qemu_multi_nic]
+
+
+def test_populate_missing_qemu_extended_groups(inventory, mocker):
+ # module settings
+ inventory.proxmox_user = 'root@pam'
+ inventory.proxmox_password = 'password'
+ inventory.proxmox_url = 'https://localhost:8006'
+ inventory.group_prefix = 'proxmox_'
+ inventory.facts_prefix = 'proxmox_'
+ inventory.strict = False
+
+ opts = {
+ 'group_prefix': 'proxmox_',
+ 'facts_prefix': 'proxmox_',
+ 'want_facts': True,
+ 'want_proxmox_nodes_ansible_host': True,
+ 'qemu_extended_statuses': False
+ }
+
+ # bypass authentication and API fetch calls
+ inventory._get_auth = mocker.MagicMock(side_effect=get_auth)
+ inventory._get_json = mocker.MagicMock(side_effect=get_json)
+ inventory._get_vm_snapshots = mocker.MagicMock(side_effect=get_vm_snapshots)
+ inventory.get_option = mocker.MagicMock(side_effect=get_option(opts))
+ inventory._can_add_host = mocker.MagicMock(return_value=True)
+ inventory._populate()
+
+ # make sure that ['prelaunch', 'paused'] are not in the group list
+ for group in ['paused', 'prelaunch']:
+ assert ('%sall_%s' % (inventory.group_prefix, group)) not in inventory.inventory.groups
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py
new file mode 100644
index 000000000..781db50b7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_stackpath_compute.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2020 Shay Rybak <shay.rybak@stackpath.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.errors import AnsibleError
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.stackpath_compute import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_get_stack_slugs(inventory):
+ stacks = [
+ {
+ 'status': 'ACTIVE',
+ 'name': 'test1',
+ 'id': 'XXXX',
+ 'updatedAt': '2020-07-08T01:00:00.000000Z',
+ 'slug': 'test1',
+ 'createdAt': '2020-07-08T00:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }, {
+ 'status': 'ACTIVE',
+ 'name': 'test2',
+ 'id': 'XXXX',
+ 'updatedAt': '2019-10-22T18:00:00.000000Z',
+ 'slug': 'test2',
+ 'createdAt': '2019-10-22T18:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }, {
+ 'status': 'DISABLED',
+ 'name': 'test3',
+ 'id': 'XXXX',
+ 'updatedAt': '2020-01-16T20:00:00.000000Z',
+ 'slug': 'test3',
+ 'createdAt': '2019-10-15T13:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }, {
+ 'status': 'ACTIVE',
+ 'name': 'test4',
+ 'id': 'XXXX',
+ 'updatedAt': '2019-11-20T22:00:00.000000Z',
+ 'slug': 'test4',
+ 'createdAt': '2019-11-20T22:00:00.000000Z',
+ 'accountId': 'XXXX',
+ }
+ ]
+ inventory._get_stack_slugs(stacks)
+ assert len(inventory.stack_slugs) == 4
+ assert inventory.stack_slugs == [
+ "test1",
+ "test2",
+ "test3",
+ "test4"
+ ]
+
+
+def test_verify_file(tmp_path, inventory):
+ file = tmp_path / "foobar.stackpath_compute.yml"
+ file.touch()
+ assert inventory.verify_file(str(file)) is True
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.stackpath_compute.yml') is False
+
+
+def test_validate_config(inventory):
+ config = {
+ "client_secret": "short_client_secret",
+ "use_internal_ip": False,
+ "stack_slugs": ["test1"],
+ "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "client_secret must be 64 characters long" in error_message
+
+ config = {
+ "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "use_internal_ip": True,
+ "stack_slugs": ["test1"],
+ "client_id": "short_client_id",
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "client_id must be 32 characters long" in error_message
+
+ config = {
+ "use_internal_ip": True,
+ "stack_slugs": ["test1"],
+ "client_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "config missing client_secret, a required parameter" in error_message
+
+ config = {
+ "client_secret": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+ "use_internal_ip": False,
+ "plugin": "community.general.stackpath_compute",
+ }
+ with pytest.raises(AnsibleError) as error_message:
+ inventory._validate_config(config)
+ assert "config missing client_id, a required parameter" in error_message
+
+
+def test_populate(inventory):
+ instances = [
+ {
+ "name": "instance1",
+ "countryCode": "SE",
+ "workloadSlug": "wokrload1",
+ "continent": "Europe",
+ "workloadId": "id1",
+ "cityCode": "ARN",
+ "externalIpAddress": "20.0.0.1",
+ "target": "target1",
+ "stackSlug": "stack1",
+ "ipAddress": "10.0.0.1",
+ },
+ {
+ "name": "instance2",
+ "countryCode": "US",
+ "workloadSlug": "wokrload2",
+ "continent": "America",
+ "workloadId": "id2",
+ "cityCode": "JFK",
+ "externalIpAddress": "20.0.0.2",
+ "target": "target2",
+ "stackSlug": "stack1",
+ "ipAddress": "10.0.0.2",
+ },
+ {
+ "name": "instance3",
+ "countryCode": "SE",
+ "workloadSlug": "workload3",
+ "continent": "Europe",
+ "workloadId": "id3",
+ "cityCode": "ARN",
+ "externalIpAddress": "20.0.0.3",
+ "target": "target1",
+ "stackSlug": "stack2",
+ "ipAddress": "10.0.0.3",
+ },
+ {
+ "name": "instance4",
+ "countryCode": "US",
+ "workloadSlug": "workload3",
+ "continent": "America",
+ "workloadId": "id4",
+ "cityCode": "JFK",
+ "externalIpAddress": "20.0.0.4",
+ "target": "target2",
+ "stackSlug": "stack2",
+ "ipAddress": "10.0.0.4",
+ },
+ ]
+ inventory.hostname_key = "externalIpAddress"
+ inventory._populate(instances)
+ # get different hosts
+ host1 = inventory.inventory.get_host('20.0.0.1')
+ host2 = inventory.inventory.get_host('20.0.0.2')
+ host3 = inventory.inventory.get_host('20.0.0.3')
+ host4 = inventory.inventory.get_host('20.0.0.4')
+
+ # get different groups
+ assert 'citycode_arn' in inventory.inventory.groups
+ group_citycode_arn = inventory.inventory.groups['citycode_arn']
+ assert 'countrycode_se' in inventory.inventory.groups
+ group_countrycode_se = inventory.inventory.groups['countrycode_se']
+ assert 'continent_america' in inventory.inventory.groups
+ group_continent_america = inventory.inventory.groups['continent_america']
+ assert 'name_instance1' in inventory.inventory.groups
+ group_name_instance1 = inventory.inventory.groups['name_instance1']
+ assert 'stackslug_stack1' in inventory.inventory.groups
+ group_stackslug_stack1 = inventory.inventory.groups['stackslug_stack1']
+ assert 'target_target1' in inventory.inventory.groups
+ group_target_target1 = inventory.inventory.groups['target_target1']
+ assert 'workloadslug_workload3' in inventory.inventory.groups
+ group_workloadslug_workload3 = inventory.inventory.groups['workloadslug_workload3']
+ assert 'workloadid_id1' in inventory.inventory.groups
+ group_workloadid_id1 = inventory.inventory.groups['workloadid_id1']
+
+ assert group_citycode_arn.hosts == [host1, host3]
+ assert group_countrycode_se.hosts == [host1, host3]
+ assert group_continent_america.hosts == [host2, host4]
+ assert group_name_instance1.hosts == [host1]
+ assert group_stackslug_stack1.hosts == [host1, host2]
+ assert group_target_target1.hosts == [host1, host3]
+ assert group_workloadslug_workload3.hosts == [host3, host4]
+ assert group_workloadid_id1.hosts == [host1]
diff --git a/ansible_collections/community/general/tests/unit/plugins/inventory/test_xen_orchestra.py b/ansible_collections/community/general/tests/unit/plugins/inventory/test_xen_orchestra.py
new file mode 100644
index 000000000..bae038e80
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/inventory/test_xen_orchestra.py
@@ -0,0 +1,211 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Jeffrey van Pelt <jeff@vanpelt.one>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# The API responses used in these tests were recorded from PVE version 6.2.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.inventory.data import InventoryData
+from ansible_collections.community.general.plugins.inventory.xen_orchestra import InventoryModule
+
+objects = {
+ 'vms': {
+ '0e64588-2bea-2d82-e922-881654b0a48f':
+ {
+ 'type': 'VM',
+ 'addresses': {},
+ 'CPUs': {'max': 4, 'number': 4},
+ 'memory': {'dynamic': [1073741824, 2147483648], 'static': [536870912, 4294967296], 'size': 2147483648},
+ 'name_description': '',
+ 'name_label': 'XCP-NG lab 2',
+ 'os_version': {},
+ 'parent': 'd3af89b2-d846-0874-6acb-031ccf11c560',
+ 'power_state': 'Running',
+ 'tags': [],
+ 'id': '0e645898-2bea-2d82-e922-881654b0a48f',
+ 'uuid': '0e645898-2bea-2d82-e922-881654b0a48f',
+ '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$container': '222d8594-9426-468a-ad69-7a6f02330fa3'
+ },
+ 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331':
+ {
+ 'type': 'VM',
+ 'addresses': {'0/ipv4/0': '192.168.1.55', '1/ipv4/0': '10.0.90.1'},
+ 'CPUs': {'max': 4, 'number': 4},
+ 'mainIpAddress': '192.168.1.55',
+ 'memory': {'dynamic': [2147483648, 2147483648], 'static': [134217728, 2147483648], 'size': 2147483648},
+ 'name_description': '',
+ 'name_label': 'XCP-NG lab 3',
+ 'os_version': {'name': 'FreeBSD 11.3-STABLE', 'uname': '11.3-STABLE', 'distro': 'FreeBSD'},
+ 'power_state': 'Halted',
+ 'tags': [],
+ 'id': 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331',
+ 'uuid': 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331',
+ '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$container': 'c96ec4dd-28ac-4df4-b73c-4371bd202728',
+ }
+ },
+ 'pools': {
+ '3d315997-73bd-5a74-8ca7-289206cb03ab': {
+ 'master': '222d8594-9426-468a-ad69-7a6f02330fa3',
+ 'tags': [],
+ 'name_description': '',
+ 'name_label': 'Storage Lab',
+ 'cpus': {'cores': 120, 'sockets': 6},
+ 'id': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ 'type': 'pool',
+ 'uuid': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab'
+ }
+ },
+ 'hosts': {
+ 'c96ec4dd-28ac-4df4-b73c-4371bd202728': {
+ 'type': 'host',
+ 'uuid': 'c96ec4dd-28ac-4df4-b73c-4371bd202728',
+ 'enabled': True,
+ 'CPUs': {
+ 'cpu_count': '40',
+ 'socket_count': '2',
+ 'vendor': 'GenuineIntel',
+ 'speed': '1699.998',
+ 'modelname': 'Intel(R) Xeon(R) CPU E5-2650L v2 @ 1.70GHz',
+ 'family': '6',
+ 'model': '62',
+ 'stepping': '4'
+ },
+ 'address': '172.16.210.14',
+ 'build': 'release/stockholm/master/7',
+ 'cpus': {'cores': 40, 'sockets': 2},
+ 'hostname': 'r620-s1',
+ 'name_description': 'Default install',
+ 'name_label': 'R620-S1',
+ 'memory': {'usage': 45283590144, 'size': 137391292416},
+ 'power_state': 'Running',
+ 'tags': [],
+ 'version': '8.2.0',
+ 'productBrand': 'XCP-ng',
+ 'id': 'c96ec4dd-28ac-4df4-b73c-4371bd202728',
+ '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab'
+ },
+ '222d8594-9426-468a-ad69-7a6f02330fa3': {
+ 'type': 'host',
+ 'uuid': '222d8594-9426-468a-ad69-7a6f02330fa3',
+ 'enabled': True,
+ 'CPUs': {
+ 'cpu_count': '40',
+ 'socket_count': '2',
+ 'vendor': 'GenuineIntel',
+ 'speed': '1700.007',
+ 'modelname': 'Intel(R) Xeon(R) CPU E5-2650L v2 @ 1.70GHz',
+ 'family': '6',
+ 'model': '62',
+ 'stepping': '4'
+ },
+ 'address': '172.16.210.16',
+ 'build': 'release/stockholm/master/7',
+ 'cpus': {'cores': 40, 'sockets': 2},
+ 'hostname': 'r620-s2',
+ 'name_description': 'Default install',
+ 'name_label': 'R620-S2',
+ 'memory': {'usage': 10636521472, 'size': 137391292416},
+ 'power_state': 'Running',
+ 'tags': ['foo', 'bar', 'baz'],
+ 'version': '8.2.0',
+ 'productBrand': 'XCP-ng',
+ 'id': '222d8594-9426-468a-ad69-7a6f02330fa3',
+ '$pool': '3d315997-73bd-5a74-8ca7-289206cb03ab',
+ '$poolId': '3d315997-73bd-5a74-8ca7-289206cb03ab'
+ }
+ }
+}
+
+
+def get_option(option):
+ if option == 'groups':
+ return {}
+ elif option == 'keyed_groups':
+ return []
+ elif option == 'compose':
+ return {}
+ elif option == 'strict':
+ return False
+ else:
+ return None
+
+
+def serialize_groups(groups):
+ return list(map(str, groups))
+
+
+@ pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+def test_verify_file_bad_config(inventory):
+ assert inventory.verify_file('foobar.xen_orchestra.yml') is False
+
+
+def test_populate(inventory, mocker):
+ inventory.get_option = mocker.MagicMock(side_effect=get_option)
+ inventory._populate(objects)
+ actual = sorted(inventory.inventory.hosts.keys())
+ expected = sorted(['c96ec4dd-28ac-4df4-b73c-4371bd202728', '222d8594-9426-468a-ad69-7a6f02330fa3',
+ '0e64588-2bea-2d82-e922-881654b0a48f', 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331'])
+
+ assert actual == expected
+
+ # Host with ip assertions
+ host_with_ip = inventory.inventory.get_host(
+ 'b0d25e70-019d-6182-2f7c-b0f5d8ef9331')
+ host_with_ip_vars = host_with_ip.vars
+
+ assert host_with_ip_vars['ansible_host'] == '192.168.1.55'
+ assert host_with_ip_vars['power_state'] == 'halted'
+ assert host_with_ip_vars['type'] == 'VM'
+
+ assert host_with_ip in inventory.inventory.groups['with_ip'].hosts
+
+ # Host without ip
+ host_without_ip = inventory.inventory.get_host(
+ '0e64588-2bea-2d82-e922-881654b0a48f')
+ host_without_ip_vars = host_without_ip.vars
+
+ assert host_without_ip_vars['ansible_host'] is None
+ assert host_without_ip_vars['power_state'] == 'running'
+
+ assert host_without_ip in inventory.inventory.groups['without_ip'].hosts
+
+ assert host_with_ip in inventory.inventory.groups['xo_host_r620_s1'].hosts
+ assert host_without_ip in inventory.inventory.groups['xo_host_r620_s2'].hosts
+
+ r620_s1 = inventory.inventory.get_host(
+ 'c96ec4dd-28ac-4df4-b73c-4371bd202728')
+ r620_s2 = inventory.inventory.get_host(
+ '222d8594-9426-468a-ad69-7a6f02330fa3')
+
+ assert r620_s1.vars['address'] == '172.16.210.14'
+ assert r620_s1.vars['tags'] == []
+ assert r620_s2.vars['address'] == '172.16.210.16'
+ assert r620_s2.vars['tags'] == ['foo', 'bar', 'baz']
+
+ storage_lab = inventory.inventory.groups['xo_pool_storage_lab']
+
+ # Check that hosts are in their corresponding pool
+ assert r620_s1 in storage_lab.hosts
+ assert r620_s2 in storage_lab.hosts
+
+ # Check that hosts are in their corresponding pool
+ assert host_without_ip in storage_lab.hosts
+ assert host_with_ip in storage_lab.hosts
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_common.py b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_common.py
new file mode 100644
index 000000000..092979225
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_common.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+
+from ansible_collections.community.general.plugins.lookup.onepassword import (
+ OnePassCLIv1,
+ OnePassCLIv2,
+)
+
+
+def load_file(file):
+ with open((os.path.join(os.path.dirname(__file__), "onepassword_fixtures", file)), "r") as f:
+ return json.loads(f.read())
+
+
+# Intentionally excludes metadata leaf nodes that would exist in real output if not relevant.
+MOCK_ENTRIES = {
+ OnePassCLIv1: [
+ {
+ 'vault_name': 'Acme "Quot\'d" Servers',
+ 'queries': [
+ '0123456789',
+ 'Mock "Quot\'d" Server'
+ ],
+ 'expected': ['t0pS3cret', 't0pS3cret'],
+ 'output': load_file("v1_out_01.json"),
+ },
+ {
+ 'vault_name': 'Acme Logins',
+ 'queries': [
+ '9876543210',
+ 'Mock Website',
+ 'acme.com'
+ ],
+ 'expected': ['t0pS3cret', 't0pS3cret', 't0pS3cret'],
+ 'output': load_file("v1_out_02.json"),
+ },
+ {
+ 'vault_name': 'Acme Logins',
+ 'queries': [
+ '864201357'
+ ],
+ 'expected': ['vauxhall'],
+ 'output': load_file("v1_out_03.json"),
+ },
+ ],
+ OnePassCLIv2: [
+ {
+ "vault_name": "Test Vault",
+ "queries": [
+ "ywvdbojsguzgrgnokmcxtydgdv",
+ "Authy Backup",
+ ],
+ "expected": ["OctoberPoppyNuttyDraperySabbath", "OctoberPoppyNuttyDraperySabbath"],
+ "output": load_file("v2_out_01.json"),
+ },
+ {
+ # Request a custom field where ID and label are different
+ "vault_name": "Test Vault",
+ "queries": ["Dummy Login"],
+ "kwargs": {
+ "field": "password1",
+ },
+ "expected": ["data in custom field"],
+ "output": load_file("v2_out_02.json")
+ },
+ {
+ # Request data from a custom section
+ "vault_name": "Test Vault",
+ "queries": ["Duplicate Sections"],
+ "kwargs": {
+ "field": "s2 text",
+ "section": "Section 2",
+ },
+ "expected": ["first value"],
+ "output": load_file("v2_out_03.json")
+ },
+ ],
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_conftest.py b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_conftest.py
new file mode 100644
index 000000000..18afae1a3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_conftest.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.lookup.onepassword import OnePass
+
+
+OP_VERSION_FIXTURES = [
+ "opv1",
+ "opv2"
+]
+
+
+@pytest.fixture
+def fake_op(mocker):
+ def _fake_op(version):
+ mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase.get_current_version", return_value=version)
+ op = OnePass(None, None, None, None, None)
+ op._config._config_file_path = "/home/jin/.op/config"
+ mocker.patch.object(op._cli, "_run")
+
+ return op
+
+ return _fake_op
+
+
+@pytest.fixture
+def opv1(fake_op):
+ return fake_op("1.17.2")
+
+
+@pytest.fixture
+def opv2(fake_op):
+ return fake_op("2.27.2")
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json
new file mode 100644
index 000000000..57eab09c5
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json
@@ -0,0 +1,18 @@
+{
+ "uuid": "0123456789",
+ "vaultUuid": "2468",
+ "overview": {
+ "title": "Mock \"Quot'd\" Server"
+ },
+ "details": {
+ "sections": [{
+ "title": "",
+ "fields": [
+ {"t": "username", "v": "jamesbond"},
+ {"t": "password", "v": "t0pS3cret"},
+ {"t": "notes", "v": "Test note with\nmultiple lines and trailing space.\n\n"},
+ {"t": "tricksy \"quot'd\" field\\", "v": "\"quot'd\" value"}
+ ]
+ }]
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json.license b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json.license
new file mode 100644
index 000000000..969b956c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_01.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json
new file mode 100644
index 000000000..da133fe59
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json
@@ -0,0 +1,18 @@
+{
+ "uuid": "9876543210",
+ "vaultUuid": "1357",
+ "overview": {
+ "title": "Mock Website",
+ "URLs": [
+ {"l": "website", "u": "https://acme.com/login"}
+ ]
+ },
+ "details": {
+ "sections": [{
+ "title": "",
+ "fields": [
+ {"t": "password", "v": "t0pS3cret"}
+ ]
+ }]
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json.license b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json.license
new file mode 100644
index 000000000..969b956c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_02.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json
new file mode 100644
index 000000000..57c7d0f3d
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json
@@ -0,0 +1,20 @@
+{
+ "uuid": "864201357",
+ "vaultUuid": "1357",
+ "overview": {
+ "title": "Mock Something"
+ },
+ "details": {
+ "fields": [
+ {
+ "value": "jbond@mi6.gov.uk",
+ "name": "emailAddress"
+ },
+ {
+ "name": "password",
+ "value": "vauxhall"
+ },
+ {}
+ ]
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json.license b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json.license
new file mode 100644
index 000000000..969b956c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v1_out_03.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json
new file mode 100644
index 000000000..7ef0bb0c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json
@@ -0,0 +1,35 @@
+{
+ "id": "ywvdbojsguzgrgnokmcxtydgdv",
+ "title": "Authy Backup",
+ "version": 1,
+ "vault": {
+ "id": "bcqxysvcnejjrwzoqrwzcqjqxc",
+ "name": "test vault"
+ },
+ "category": "PASSWORD",
+ "last_edited_by": "7FUPZ8ZNE02KSHMAIMKHIVUE17",
+ "created_at": "2015-01-18T13:13:38Z",
+ "updated_at": "2016-02-20T16:23:54Z",
+ "additional_information": "Jan 18, 2015, 08:13:38",
+ "fields": [
+ {
+ "id": "password",
+ "type": "CONCEALED",
+ "purpose": "PASSWORD",
+ "label": "password",
+ "value": "OctoberPoppyNuttyDraperySabbath",
+ "reference": "op://Test Vault/Authy Backup/password",
+ "password_details": {
+ "strength": "FANTASTIC"
+ }
+ },
+ {
+ "id": "notesPlain",
+ "type": "STRING",
+ "purpose": "NOTES",
+ "label": "notesPlain",
+ "value": "Backup password to restore Authy",
+ "reference": "op://Test Vault/Authy Backup/notesPlain"
+ }
+ ]
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json.license b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json.license
new file mode 100644
index 000000000..969b956c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_01.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json
new file mode 100644
index 000000000..5da2a16d1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json
@@ -0,0 +1,85 @@
+{
+ "id": "awk4s2u44fhnrgppszcsvc663i",
+ "title": "Dummy Login",
+ "version": 4,
+ "vault": {
+ "id": "stpebbaccrq72xulgouxsk4p7y",
+ "name": "Personal"
+ },
+ "category": "LOGIN",
+ "last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU",
+ "created_at": "2018-04-25T21:55:19Z",
+ "updated_at": "2022-09-02T17:51:21Z",
+ "additional_information": "agent.smith",
+ "urls": [
+ {
+ "primary": true,
+ "href": "https://acme.com"
+ }
+ ],
+ "sections": [
+ {
+ "id": "add more"
+ },
+ {
+ "id": "gafaeg7vnqmgrklw5r6yrufyxy",
+ "label": "COMMANDS"
+ },
+ {
+ "id": "linked items",
+ "label": "Related Items"
+ }
+ ],
+ "fields": [
+ {
+ "id": "username",
+ "type": "STRING",
+ "purpose": "USERNAME",
+ "label": "username",
+ "value": "agent.smith",
+ "reference": "op://Personal/Dummy Login/username"
+ },
+ {
+ "id": "password",
+ "type": "CONCEALED",
+ "purpose": "PASSWORD",
+ "label": "password",
+ "value": "FootworkDegreeReverence",
+ "entropy": 159.60836791992188,
+ "reference": "op://Personal/Dummy Login/password",
+ "password_details": {
+ "entropy": 159,
+ "generated": true,
+ "strength": "FANTASTIC"
+ }
+ },
+ {
+ "id": "notesPlain",
+ "type": "STRING",
+ "purpose": "NOTES",
+ "label": "notesPlain",
+ "reference": "op://Personal/Dummy Login/notesPlain"
+ },
+ {
+ "id": "7gyjekelk24ghgd4rvafspjbli",
+ "section": {
+ "id": "add more"
+ },
+ "type": "STRING",
+ "label": "title",
+ "value": "value of the field",
+ "reference": "op://Personal/Dummy Login/add more/title"
+ },
+ {
+ "id": "fx4wpzokrxn7tlb3uwpdjfptgm",
+ "section": {
+ "id": "gafaeg7vnqmgrklw5r6yrufyxy",
+ "label": "COMMANDS"
+ },
+ "type": "CONCEALED",
+ "label": "password1",
+ "value": "data in custom field",
+ "reference": "op://Personal/Dummy Login/COMMANDS/password1"
+ }
+ ]
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json.license b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json.license
new file mode 100644
index 000000000..969b956c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_02.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json
new file mode 100644
index 000000000..22fbc3f29
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json
@@ -0,0 +1,103 @@
+{
+ "id": "7t7qu2r35qyvqj3crujd4dqxmy",
+ "title": "Duplicate Sections",
+ "version": 3,
+ "vault": {
+ "id": "stpebbaccrq72xulgouxsk4p7y",
+ "name": "Personal"
+ },
+ "category": "LOGIN",
+ "last_edited_by": "LSGPJERUYBH7BFPHMZ2KKGL6AU",
+ "created_at": "2022-11-04T17:09:18Z",
+ "updated_at": "2022-11-04T17:22:19Z",
+ "additional_information": "flora",
+ "urls": [
+ {
+ "label": "website",
+ "primary": true,
+ "href": "https://acme.com/login"
+ }
+ ],
+ "sections": [
+ {
+ "id": "add more"
+ },
+ {
+ "id": "7osqcvd43i75teocdzbb6d7mie",
+ "label": "Section 2"
+ }
+ ],
+ "fields": [
+ {
+ "id": "username",
+ "type": "STRING",
+ "purpose": "USERNAME",
+ "label": "username",
+ "value": "flora",
+ "reference": "op://Personal/Duplicate Sections/username"
+ },
+ {
+ "id": "password",
+ "type": "CONCEALED",
+ "purpose": "PASSWORD",
+ "label": "password",
+ "value": "PtZGFLAibx-erTo7ywywEvh-n4syas97n-tuF2D.b8DdqA2vCjrvRGkNQxj!Gi9R",
+ "entropy": 379.564697265625,
+ "reference": "op://Personal/Duplicate Sections/password",
+ "password_details": {
+ "entropy": 379,
+ "generated": true,
+ "strength": "FANTASTIC"
+ }
+ },
+ {
+ "id": "notesPlain",
+ "type": "STRING",
+ "purpose": "NOTES",
+ "label": "notesPlain",
+ "reference": "op://Personal/Duplicate Sections/notesPlain"
+ },
+ {
+ "id": "4saaazkb7arwisj6ysctb4jmm4",
+ "section": {
+ "id": "add more"
+ },
+ "type": "STRING",
+ "label": "text",
+ "value": "text field the first",
+ "reference": "op://Personal/Duplicate Sections/add more/text"
+ },
+ {
+ "id": "4vtfkj4bwcmg7d5uf62wnpkp3a",
+ "section": {
+ "id": "add more"
+ },
+ "type": "STRING",
+ "label": "text",
+ "value": "text field the second",
+ "reference": "op://Personal/Duplicate Sections/add more/text"
+ },
+ {
+ "id": "wbrjnowkrgavpooomtht36gjqu",
+ "section": {
+ "id": "7osqcvd43i75teocdzbb6d7mie",
+ "label": "Section 2"
+ },
+ "type": "STRING",
+ "label": "s2 text",
+ "value": "first value",
+ "reference": "op://Personal/Duplicate Sections/Section 2/s2 text"
+ },
+ {
+ "id": "bddlz2fj2pebmtfhksbmcexy7m",
+ "section": {
+ "id": "7osqcvd43i75teocdzbb6d7mie",
+ "label": "Section 2"
+ },
+ "type": "STRING",
+ "label": "s2 text",
+ "value": "second value",
+ "reference": "op://Personal/Duplicate Sections/Section 2/s2 text"
+ }
+ ]
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json.license b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json.license
new file mode 100644
index 000000000..969b956c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/onepassword_fixtures/v2_out_03.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_bitwarden.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_bitwarden.py
new file mode 100644
index 000000000..d45263965
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_bitwarden.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Jonathan Lung <lungj@heresjono.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+
+from ansible.errors import AnsibleError
+from ansible.module_utils import six
+from ansible.plugins.loader import lookup_loader
+from ansible_collections.community.general.plugins.lookup.bitwarden import Bitwarden
+
+
+MOCK_RECORDS = [
+ {
+ "collectionIds": [],
+ "deletedDate": None,
+ "favorite": False,
+ "fields": [
+ {
+ "linkedId": None,
+ "name": "a_new_secret",
+ "type": 1,
+ "value": "this is a new secret"
+ },
+ {
+ "linkedId": None,
+ "name": "not so secret",
+ "type": 0,
+ "value": "not secret"
+ }
+ ],
+ "folderId": "3b12a9da-7c49-40b8-ad33-aede017a7ead",
+ "id": "90992f63-ddb6-4e76-8bfc-aede016ca5eb",
+ "login": {
+ "password": "passwordA3",
+ "passwordRevisionDate": "2022-07-26T23:03:23.399Z",
+ "totp": None,
+ "username": "userA"
+ },
+ "name": "a_test",
+ "notes": None,
+ "object": "item",
+ "organizationId": None,
+ "passwordHistory": [
+ {
+ "lastUsedDate": "2022-07-26T23:03:23.405Z",
+ "password": "a_new_secret: this is secret"
+ },
+ {
+ "lastUsedDate": "2022-07-26T23:03:23.399Z",
+ "password": "passwordA2"
+ },
+ {
+ "lastUsedDate": "2022-07-26T22:59:52.885Z",
+ "password": "passwordA"
+ }
+ ],
+ "reprompt": 0,
+ "revisionDate": "2022-07-26T23:03:23.743Z",
+ "type": 1
+ },
+ {
+ "collectionIds": [],
+ "deletedDate": None,
+ "favorite": False,
+ "folderId": None,
+ "id": "5ebd4d31-104c-49fc-a09c-aedf003d28ad",
+ "login": {
+ "password": "b",
+ "passwordRevisionDate": None,
+ "totp": None,
+ "username": "a"
+ },
+ "name": "dupe_name",
+ "notes": None,
+ "object": "item",
+ "organizationId": None,
+ "reprompt": 0,
+ "revisionDate": "2022-07-27T03:42:40.353Z",
+ "type": 1
+ },
+ {
+ "collectionIds": [],
+ "deletedDate": None,
+ "favorite": False,
+ "folderId": None,
+ "id": "90657653-6695-496d-9431-aedf003d3015",
+ "login": {
+ "password": "d",
+ "passwordRevisionDate": None,
+ "totp": None,
+ "username": "c"
+ },
+ "name": "dupe_name",
+ "notes": None,
+ "object": "item",
+ "organizationId": None,
+ "reprompt": 0,
+ "revisionDate": "2022-07-27T03:42:46.673Z",
+ "type": 1
+ }
+]
+
+
+class MockBitwarden(Bitwarden):
+
+ unlocked = True
+
+ def _get_matches(self, search_value, search_field="name", collection_id=None):
+ return list(filter(lambda record: record[search_field] == search_value, MOCK_RECORDS))
+
+
+class LoggedOutMockBitwarden(MockBitwarden):
+
+ unlocked = False
+
+
+class TestLookupModule(unittest.TestCase):
+
+ def setUp(self):
+ self.lookup = lookup_loader.get('community.general.bitwarden')
+
+ @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden())
+ def test_bitwarden_plugin_no_match(self):
+ # Entry 0, "a_test" of the test input should have no duplicates.
+ self.assertEqual([], self.lookup.run(['not_here'], field='password')[0])
+
+ @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden())
+ def test_bitwarden_plugin_fields(self):
+ # Entry 0, "a_test" of the test input should have no duplicates.
+ record = MOCK_RECORDS[0]
+ record_name = record['name']
+ for k, v in six.iteritems(record['login']):
+ self.assertEqual([v],
+ self.lookup.run([record_name], field=k)[0])
+
+ @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden())
+ def test_bitwarden_plugin_duplicates(self):
+ # There are two records with name dupe_name; we need to be order-insensitive with
+ # checking what was retrieved.
+ self.assertEqual(set(['b', 'd']),
+ set(self.lookup.run(['dupe_name'], field='password')[0]))
+
+ @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', new=MockBitwarden())
+ def test_bitwarden_plugin_full_item(self):
+ # Try to retrieve the full record of the first entry where the name is "a_name".
+ self.assertEqual([MOCK_RECORDS[0]],
+ self.lookup.run(['a_test'])[0])
+
+ @patch('ansible_collections.community.general.plugins.lookup.bitwarden._bitwarden', LoggedOutMockBitwarden())
+ def test_bitwarden_plugin_unlocked(self):
+ record = MOCK_RECORDS[0]
+ record_name = record['name']
+ with self.assertRaises(AnsibleError):
+ self.lookup.run([record_name], field='password')
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_dependent.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_dependent.py
new file mode 100644
index 000000000..74d7c4123
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_dependent.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020-2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+from ansible_collections.community.internal_test_tools.tests.unit.compat.unittest import TestCase
+from ansible_collections.community.internal_test_tools.tests.unit.compat.mock import (
+ MagicMock,
+)
+
+from ansible.plugins.loader import lookup_loader
+
+
+class TestLookupModule(TestCase):
+ def setUp(self):
+ templar = MagicMock()
+ templar._loader = None
+ self.lookup = lookup_loader.get("community.general.dependent", templar=templar)
+
+ def test_empty(self):
+ self.assertListEqual(self.lookup.run([], None), [])
+
+ def test_simple(self):
+ self.assertListEqual(
+ self.lookup.run(
+ [
+ {'a': '[1, 2]'},
+ {'b': '[item.a + 3, item.a + 6]'},
+ {'c': '[item.a + item.b * 10]'},
+ ],
+ {},
+ ),
+ [
+ {'a': 1, 'b': 4, 'c': 41},
+ {'a': 1, 'b': 7, 'c': 71},
+ {'a': 2, 'b': 5, 'c': 52},
+ {'a': 2, 'b': 8, 'c': 82},
+ ],
+ )
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py
new file mode 100644
index 000000000..a9a2d30ee
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_dsv.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.unittest import TestCase
+from ansible_collections.community.general.tests.unit.compat.mock import (
+ patch,
+ MagicMock,
+)
+from ansible_collections.community.general.plugins.lookup import dsv
+from ansible.plugins.loader import lookup_loader
+
+
+class MockSecretsVault(MagicMock):
+ RESPONSE = '{"foo": "bar"}'
+
+ def get_secret_json(self, path):
+ return self.RESPONSE
+
+
+class TestLookupModule(TestCase):
+ def setUp(self):
+ dsv.sdk_is_missing = False
+ self.lookup = lookup_loader.get("community.general.dsv")
+
+ @patch(
+ "ansible_collections.community.general.plugins.lookup.dsv.LookupModule.Client",
+ MockSecretsVault(),
+ )
+ def test_get_secret_json(self):
+ self.assertListEqual(
+ [MockSecretsVault.RESPONSE],
+ self.lookup.run(
+ ["/dummy"],
+ [],
+ **{"tenant": "dummy", "client_id": "dummy", "client_secret": "dummy", }
+ ),
+ )
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py
new file mode 100644
index 000000000..e9ac777eb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_etcd3.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2020, SCC France, Eric Belhomme <ebelhomme@fr.scc.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
+from ansible_collections.community.general.plugins.lookup import etcd3
+from ansible.plugins.loader import lookup_loader
+
+
+class FakeKVMetadata:
+
+ def __init__(self, keyvalue, header):
+ self.key = keyvalue
+ self.create_revision = ''
+ self.mod_revision = ''
+ self.version = ''
+ self.lease_id = ''
+ self.response_header = header
+
+
+class FakeEtcd3Client(MagicMock):
+
+ def get_prefix(self, key):
+ for i in range(1, 4):
+ yield self.get('{0}_{1}'.format(key, i))
+
+ def get(self, key):
+ return ("{0} value".format(key), FakeKVMetadata(key, None))
+
+
+class TestLookupModule(unittest.TestCase):
+
+ def setUp(self):
+ etcd3.HAS_ETCD = True
+ self.lookup = lookup_loader.get('community.general.etcd3')
+
+ @patch('ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client', FakeEtcd3Client())
+ def test_key(self):
+ expected_result = [{'key': 'a_key', 'value': 'a_key value'}]
+ self.assertListEqual(expected_result, self.lookup.run(['a_key'], []))
+
+ @patch('ansible_collections.community.general.plugins.lookup.etcd3.etcd3_client', FakeEtcd3Client())
+ def test_key_prefix(self):
+ expected_result = [
+ {'key': 'a_key_1', 'value': 'a_key_1 value'},
+ {'key': 'a_key_2', 'value': 'a_key_2 value'},
+ {'key': 'a_key_3', 'value': 'a_key_3 value'},
+ ]
+ self.assertListEqual(expected_result, self.lookup.run(['a_key'], [], **{'prefix': True}))
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py
new file mode 100644
index 000000000..5f65c9f63
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_lastpass.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2016 Andrew Zenk <azenk@umn.edu>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from argparse import ArgumentParser
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+
+from ansible.errors import AnsibleError
+from ansible.module_utils import six
+from ansible.plugins.loader import lookup_loader
+from ansible_collections.community.general.plugins.lookup.lastpass import LPass, LPassException
+
+
+MOCK_ENTRIES = [{'username': 'user',
+ 'name': 'Mock Entry',
+ 'password': 't0pS3cret passphrase entry!',
+ 'url': 'https://localhost/login',
+ 'notes': 'Test\nnote with multiple lines.\n',
+ 'id': '0123456789'}]
+
+
+class MockLPass(LPass):
+
+ _mock_logged_out = False
+ _mock_disconnected = False
+
+ def _lookup_mock_entry(self, key):
+ for entry in MOCK_ENTRIES:
+ if key == entry['id'] or key == entry['name']:
+ return entry
+
+ def _run(self, args, stdin=None, expected_rc=0):
+ # Mock behavior of lpass executable
+ base_options = ArgumentParser(add_help=False)
+ base_options.add_argument('--color', default="auto", choices=['auto', 'always', 'never'])
+
+ p = ArgumentParser()
+ sp = p.add_subparsers(help='command', dest='subparser_name')
+
+ logout_p = sp.add_parser('logout', parents=[base_options], help='logout')
+ show_p = sp.add_parser('show', parents=[base_options], help='show entry details')
+
+ field_group = show_p.add_mutually_exclusive_group(required=True)
+ for field in MOCK_ENTRIES[0].keys():
+ field_group.add_argument("--{0}".format(field), default=False, action='store_true')
+ field_group.add_argument('--field', default=None)
+ show_p.add_argument('selector', help='Unique Name or ID')
+
+ args = p.parse_args(args)
+
+ def mock_exit(output='', error='', rc=0):
+ if rc != expected_rc:
+ raise LPassException(error)
+ return output, error
+
+ if args.color != 'never':
+ return mock_exit(error='Error: Mock only supports --color=never', rc=1)
+
+ if args.subparser_name == 'logout':
+ if self._mock_logged_out:
+ return mock_exit(error='Error: Not currently logged in', rc=1)
+
+ logged_in_error = 'Are you sure you would like to log out? [Y/n]'
+ if stdin and stdin.lower() == 'n\n':
+ return mock_exit(output='Log out: aborted.', error=logged_in_error, rc=1)
+ elif stdin and stdin.lower() == 'y\n':
+ return mock_exit(output='Log out: complete.', error=logged_in_error, rc=0)
+ else:
+ return mock_exit(error='Error: aborted response', rc=1)
+
+ if args.subparser_name == 'show':
+ if self._mock_logged_out:
+ return mock_exit(error='Error: Could not find decryption key.' +
+ ' Perhaps you need to login with `lpass login`.', rc=1)
+
+ if self._mock_disconnected:
+ return mock_exit(error='Error: Couldn\'t resolve host name.', rc=1)
+
+ mock_entry = self._lookup_mock_entry(args.selector)
+
+ if args.field:
+ return mock_exit(output=mock_entry.get(args.field, ''))
+ elif args.password:
+ return mock_exit(output=mock_entry.get('password', ''))
+ elif args.username:
+ return mock_exit(output=mock_entry.get('username', ''))
+ elif args.url:
+ return mock_exit(output=mock_entry.get('url', ''))
+ elif args.name:
+ return mock_exit(output=mock_entry.get('name', ''))
+ elif args.id:
+ return mock_exit(output=mock_entry.get('id', ''))
+ elif args.notes:
+ return mock_exit(output=mock_entry.get('notes', ''))
+
+ raise LPassException('We should never get here')
+
+
+class DisconnectedMockLPass(MockLPass):
+
+ _mock_disconnected = True
+
+
+class LoggedOutMockLPass(MockLPass):
+
+ _mock_logged_out = True
+
+
+class TestLPass(unittest.TestCase):
+
+ def setUp(self):
+ self.lookup = lookup_loader.get('community.general.lastpass')
+
+ def test_lastpass_cli_path(self):
+ lp = MockLPass(path='/dev/null')
+ self.assertEqual('/dev/null', lp.cli_path)
+
+ def test_lastpass_build_args_logout(self):
+ lp = MockLPass()
+ self.assertEqual(['logout', '--color=never'], lp._build_args("logout"))
+
+ def test_lastpass_logged_in_true(self):
+ lp = MockLPass()
+ self.assertTrue(lp.logged_in)
+
+ def test_lastpass_logged_in_false(self):
+ lp = LoggedOutMockLPass()
+ self.assertFalse(lp.logged_in)
+
+ def test_lastpass_show_disconnected(self):
+ lp = DisconnectedMockLPass()
+
+ with self.assertRaises(LPassException):
+ lp.get_field('0123456789', 'username')
+
+ def test_lastpass_show(self):
+ lp = MockLPass()
+ for entry in MOCK_ENTRIES:
+ entry_id = entry.get('id')
+ for k, v in six.iteritems(entry):
+ self.assertEqual(v.strip(), lp.get_field(entry_id, k))
+
+
+class TestLastpassPlugin(unittest.TestCase):
+
+ def setUp(self):
+ self.lookup = lookup_loader.get('community.general.lastpass')
+
+ @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', new=MockLPass)
+ def test_lastpass_plugin_normal(self):
+ for entry in MOCK_ENTRIES:
+ entry_id = entry.get('id')
+ for k, v in six.iteritems(entry):
+ self.assertEqual(v.strip(),
+ self.lookup.run([entry_id], field=k)[0])
+
+ @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', LoggedOutMockLPass)
+ def test_lastpass_plugin_logged_out(self):
+ entry = MOCK_ENTRIES[0]
+ entry_id = entry.get('id')
+ with self.assertRaises(AnsibleError):
+ self.lookup.run([entry_id], field='password')
+
+ @patch('ansible_collections.community.general.plugins.lookup.lastpass.LPass', DisconnectedMockLPass)
+ def test_lastpass_plugin_disconnected(self):
+ entry = MOCK_ENTRIES[0]
+ entry_id = entry.get('id')
+ with self.assertRaises(AnsibleError):
+ self.lookup.run([entry_id], field='password')
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py
new file mode 100644
index 000000000..4fa356276
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_manifold.py
@@ -0,0 +1,537 @@
+# Copyright (c) 2018, Arigato Machine Inc.
+# Copyright (c) 2018, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch, call
+from ansible.errors import AnsibleError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils import six
+from ansible.plugins.loader import lookup_loader
+from ansible_collections.community.general.plugins.lookup.manifold import ManifoldApiClient, ApiError
+import json
+import os
+
+
+API_FIXTURES = {
+ 'https://api.marketplace.manifold.co/v1/resources':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ },
+ {
+ "body": {
+ "label": "resource-2",
+ "name": "Resource 2"
+ },
+ "id": "rid-2"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?label=resource-1':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?label=resource-2':
+ [
+ {
+ "body": {
+ "label": "resource-2",
+ "name": "Resource 2"
+ },
+ "id": "rid-2"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-1':
+ [
+ {
+ "body": {
+ "label": "resource-2",
+ "name": "Resource 2"
+ },
+ "id": "rid-2"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?project_id=pid-2':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ },
+ {
+ "body": {
+ "label": "resource-3",
+ "name": "Resource 3"
+ },
+ "id": "rid-3"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1':
+ [
+ {
+ "body": {
+ "label": "resource-1",
+ "name": "Resource 1"
+ },
+ "id": "rid-1"
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/projects':
+ [
+ {
+ "body": {
+ "label": "project-1",
+ "name": "Project 1",
+ },
+ "id": "pid-1",
+ },
+ {
+ "body": {
+ "label": "project-2",
+ "name": "Project 2",
+ },
+ "id": "pid-2",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/projects?label=project-2':
+ [
+ {
+ "body": {
+ "label": "project-2",
+ "name": "Project 2",
+ },
+ "id": "pid-2",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1':
+ [
+ {
+ "body": {
+ "resource_id": "rid-1",
+ "values": {
+ "RESOURCE_TOKEN_1": "token-1",
+ "RESOURCE_TOKEN_2": "token-2"
+ }
+ },
+ "id": "cid-1",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-2':
+ [
+ {
+ "body": {
+ "resource_id": "rid-2",
+ "values": {
+ "RESOURCE_TOKEN_3": "token-3",
+ "RESOURCE_TOKEN_4": "token-4"
+ }
+ },
+ "id": "cid-2",
+ }
+ ],
+ 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-3':
+ [
+ {
+ "body": {
+ "resource_id": "rid-3",
+ "values": {
+ "RESOURCE_TOKEN_1": "token-5",
+ "RESOURCE_TOKEN_2": "token-6"
+ }
+ },
+ "id": "cid-3",
+ }
+ ],
+ 'https://api.identity.manifold.co/v1/teams':
+ [
+ {
+ "id": "tid-1",
+ "body": {
+ "name": "Team 1",
+ "label": "team-1"
+ }
+ },
+ {
+ "id": "tid-2",
+ "body": {
+ "name": "Team 2",
+ "label": "team-2"
+ }
+ }
+ ]
+}
+
+
+def mock_fixture(open_url_mock, fixture=None, data=None, headers=None):
+ if not headers:
+ headers = {}
+ if fixture:
+ data = json.dumps(API_FIXTURES[fixture])
+ if 'content-type' not in headers:
+ headers['content-type'] = 'application/json'
+
+ open_url_mock.return_value.read.return_value = data
+ open_url_mock.return_value.headers = headers
+
+
+class TestManifoldApiClient(unittest.TestCase):
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_sends_default_headers(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello')
+ client = ManifoldApiClient('token-123')
+ client.request('test', 'endpoint')
+ open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_decodes_json(self, open_url_mock):
+ mock_fixture(open_url_mock, fixture='https://api.marketplace.manifold.co/v1/resources')
+ client = ManifoldApiClient('token-123')
+ self.assertIsInstance(client.request('marketplace', 'resources'), list)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_streams_text(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello', headers={'content-type': "text/plain"})
+ client = ManifoldApiClient('token-123')
+ self.assertEqual('hello', client.request('test', 'endpoint'))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_processes_parameterized_headers(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello')
+ client = ManifoldApiClient('token-123')
+ client.request('test', 'endpoint', headers={'X-HEADER': 'MANIFOLD'})
+ open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123',
+ 'X-HEADER': 'MANIFOLD'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_passes_arbitrary_parameters(self, open_url_mock):
+ mock_fixture(open_url_mock, data='hello')
+ client = ManifoldApiClient('token-123')
+ client.request('test', 'endpoint', use_proxy=False, timeout=5)
+ open_url_mock.assert_called_with('https://api.test.manifold.co/v1/endpoint',
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0',
+ use_proxy=False, timeout=5)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_incorrect_json(self, open_url_mock):
+ mock_fixture(open_url_mock, data='noJson', headers={'content-type': "application/json"})
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('JSON response can\'t be parsed while requesting https://api.test.manifold.co/v1/endpoint:\n'
+ 'noJson',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_status_500(self, open_url_mock):
+ open_url_mock.side_effect = HTTPError('https://api.test.manifold.co/v1/endpoint',
+ 500, 'Server error', {}, six.StringIO('ERROR'))
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Server returned: HTTP Error 500: Server error while requesting '
+ 'https://api.test.manifold.co/v1/endpoint:\nERROR',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_bad_url(self, open_url_mock):
+ open_url_mock.side_effect = URLError('URL is invalid')
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Failed lookup url for https://api.test.manifold.co/v1/endpoint : <url'
+ 'open error URL is invalid>',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_ssl_error(self, open_url_mock):
+ open_url_mock.side_effect = SSLValidationError('SSL Error')
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Error validating the server\'s certificate for https://api.test.manifold.co/v1/endpoint: '
+ 'SSL Error',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_request_raises_on_connection_error(self, open_url_mock):
+ open_url_mock.side_effect = ConnectionError('Unknown connection error')
+ client = ManifoldApiClient('token-123')
+ with self.assertRaises(ApiError) as context:
+ client.request('test', 'endpoint')
+ self.assertEqual('Error connecting to https://api.test.manifold.co/v1/endpoint: Unknown connection error',
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_resources_get_all(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/resources'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_resources())
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_resources_filter_label(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/resources?label=resource-1'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_resources(label='resource-1'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_resources_filter_team_and_project(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_resources(team_id='tid-1', project_id='pid-1'))
+ args, kwargs = open_url_mock.call_args
+ url_called = args[0]
+ # Dict order is not guaranteed, so an url may have querystring parameters order randomized
+ self.assertIn('team_id=tid-1', url_called)
+ self.assertIn('project_id=pid-1', url_called)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_teams_get_all(self, open_url_mock):
+ url = 'https://api.identity.manifold.co/v1/teams'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_teams())
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_teams_filter_label(self, open_url_mock):
+ url = 'https://api.identity.manifold.co/v1/teams'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url][1:2], client.get_teams(label='team-2'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_projects_get_all(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/projects'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_projects())
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_projects_filter_label(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/projects?label=project-2'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_projects(label='project-2'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.open_url')
+ def test_get_credentials(self, open_url_mock):
+ url = 'https://api.marketplace.manifold.co/v1/credentials?resource_id=rid-1'
+ mock_fixture(open_url_mock, fixture=url)
+ client = ManifoldApiClient('token-123')
+ self.assertListEqual(API_FIXTURES[url], client.get_credentials(resource_id='rid-1'))
+ open_url_mock.assert_called_with(url,
+ headers={'Accept': '*/*', 'Authorization': 'Bearer token-123'},
+ http_agent='python-manifold-ansible-1.0.0')
+
+
+class TestLookupModule(unittest.TestCase):
+ def setUp(self):
+ self.lookup = lookup_loader.get('community.general.manifold')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_all(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2',
+ 'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_one_resource(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?label=resource-2']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run(['resource-2'], api_token='token-123'))
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None, label='resource-2')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_two_resources(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2',
+ 'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run(['resource-1', 'resource-2'], api_token='token-123'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id=None)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.display')
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_get_resources_with_same_credential_names(self, client_mock, display_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-5',
+ 'RESOURCE_TOKEN_2': 'token-6'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-2']
+ client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects?label=project-2']
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-2'))
+ client_mock.assert_called_with('token-123')
+ display_mock.warning.assert_has_calls([
+ call("'RESOURCE_TOKEN_1' with label 'resource-1' was replaced by resource data with label 'resource-3'"),
+ call("'RESOURCE_TOKEN_2' with label 'resource-1' was replaced by resource data with label 'resource-3'")],
+ any_order=True
+ )
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-2')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_filter_by_team(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1']
+ client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', team='team-1'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id='tid-1', project_id=None)
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_filter_by_project(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_3': 'token-3',
+ 'RESOURCE_TOKEN_4': 'token-4'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?project_id=pid-1']
+ client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_filter_by_team_and_project(self, client_mock):
+ expected_result = [{'RESOURCE_TOKEN_1': 'token-1',
+ 'RESOURCE_TOKEN_2': 'token-2'
+ }]
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources?team_id=tid-1&project_id=pid-1']
+ client_mock.return_value.get_teams.return_value = API_FIXTURES['https://api.identity.manifold.co/v1/teams'][0:1]
+ client_mock.return_value.get_projects.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/projects'][0:1]
+ client_mock.return_value.get_credentials.side_effect = lambda x: API_FIXTURES['https://api.marketplace.manifold.co/v1/'
+ 'credentials?resource_id={0}'.format(x)]
+ self.assertListEqual(expected_result, self.lookup.run([], api_token='token-123', project='project-1'))
+ client_mock.assert_called_with('token-123')
+ client_mock.return_value.get_resources.assert_called_with(team_id=None, project_id='pid-1')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_raise_team_doesnt_exist(self, client_mock):
+ client_mock.return_value.get_teams.return_value = []
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123', team='no-team')
+ self.assertEqual("Team 'no-team' does not exist",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_raise_project_doesnt_exist(self, client_mock):
+ client_mock.return_value.get_projects.return_value = []
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123', project='no-project')
+ self.assertEqual("Project 'no-project' does not exist",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_raise_resource_doesnt_exist(self, client_mock):
+ client_mock.return_value.get_resources.return_value = API_FIXTURES['https://api.marketplace.manifold.co/v1/resources']
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run(['resource-1', 'no-resource-1', 'no-resource-2'], api_token='token-123')
+ self.assertEqual("Resource(s) no-resource-1, no-resource-2 do not exist",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_catch_api_error(self, client_mock):
+ client_mock.side_effect = ApiError('Generic error')
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123')
+ self.assertEqual("API Error: Generic error",
+ str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_catch_unhandled_exception(self, client_mock):
+ client_mock.side_effect = Exception('Unknown error')
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([], api_token='token-123')
+ self.assertTrue('Exception: Unknown error' in str(context.exception))
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_falls_back_to_env_var(self, client_mock):
+ client_mock.return_value.get_resources.return_value = []
+ client_mock.return_value.get_credentials.return_value = []
+ try:
+ os.environ['MANIFOLD_API_TOKEN'] = 'token-321'
+ self.lookup.run([])
+ finally:
+ os.environ.pop('MANIFOLD_API_TOKEN', None)
+ client_mock.assert_called_with('token-321')
+
+ @patch('ansible_collections.community.general.plugins.lookup.manifold.ManifoldApiClient')
+ def test_falls_raises_on_no_token(self, client_mock):
+ client_mock.return_value.get_resources.return_value = []
+ client_mock.return_value.get_credentials.return_value = []
+ os.environ.pop('MANIFOLD_API_TOKEN', None)
+ with self.assertRaises(AnsibleError) as context:
+ self.lookup.run([])
+ assert 'api_token' in str(context.exception)
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_merge_variables.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_merge_variables.py
new file mode 100644
index 000000000..5085797b3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_merge_variables.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Thales Netherlands
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.mock.loader import DictDataLoader
+
+from ansible.plugins import AnsiblePlugin
+from ansible.template import Templar
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+from ansible_collections.community.general.plugins.lookup import merge_variables
+
+
+class TestMergeVariablesLookup(unittest.TestCase):
+ def setUp(self):
+ self.loader = DictDataLoader({})
+ self.templar = Templar(loader=self.loader, variables={})
+ self.merge_vars_lookup = merge_variables.LookupModule(loader=self.loader, templar=self.templar)
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[['item1'], ['item3']])
+ def test_merge_list(self, mock_set_options, mock_get_option, mock_template):
+ results = self.merge_vars_lookup.run(['__merge_list'], {
+ 'testlist1__merge_list': ['item1'],
+ 'testlist2': ['item2'],
+ 'testlist3__merge_list': ['item3']
+ })
+
+ self.assertEqual(results, [['item1', 'item3']])
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[['initial_item'], 'ignore', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[['item1'], ['item3']])
+ def test_merge_list_with_initial_value(self, mock_set_options, mock_get_option, mock_template):
+ results = self.merge_vars_lookup.run(['__merge_list'], {
+ 'testlist1__merge_list': ['item1'],
+ 'testlist2': ['item2'],
+ 'testlist3__merge_list': ['item3']
+ })
+
+ self.assertEqual(results, [['initial_item', 'item1', 'item3']])
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']},
+ {'item2': 'test', 'list_item': ['test2']}])
+ def test_merge_dict(self, mock_set_options, mock_get_option, mock_template):
+ results = self.merge_vars_lookup.run(['__merge_dict'], {
+ 'testdict1__merge_dict': {
+ 'item1': 'test',
+ 'list_item': ['test1']
+ },
+ 'testdict2__merge_dict': {
+ 'item2': 'test',
+ 'list_item': ['test2']
+ }
+ })
+
+ self.assertEqual(results, [
+ {
+ 'item1': 'test',
+ 'item2': 'test',
+ 'list_item': ['test1', 'test2']
+ }
+ ])
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[{'initial_item': 'random value', 'list_item': ['test0']},
+ 'ignore', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']},
+ {'item2': 'test', 'list_item': ['test2']}])
+ def test_merge_dict_with_initial_value(self, mock_set_options, mock_get_option, mock_template):
+ results = self.merge_vars_lookup.run(['__merge_dict'], {
+ 'testdict1__merge_dict': {
+ 'item1': 'test',
+ 'list_item': ['test1']
+ },
+ 'testdict2__merge_dict': {
+ 'item2': 'test',
+ 'list_item': ['test2']
+ }
+ })
+
+ self.assertEqual(results, [
+ {
+ 'initial_item': 'random value',
+ 'item1': 'test',
+ 'item2': 'test',
+ 'list_item': ['test0', 'test1', 'test2']
+ }
+ ])
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'warn', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[{'item': 'value1'}, {'item': 'value2'}])
+ @patch.object(Display, 'warning')
+ def test_merge_dict_non_unique_warning(self, mock_set_options, mock_get_option, mock_template, mock_display):
+ results = self.merge_vars_lookup.run(['__merge_non_unique'], {
+ 'testdict1__merge_non_unique': {'item': 'value1'},
+ 'testdict2__merge_non_unique': {'item': 'value2'}
+ })
+
+ self.assertTrue(mock_display.called)
+ self.assertEqual(results, [{'item': 'value2'}])
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'error', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[{'item': 'value1'}, {'item': 'value2'}])
+ def test_merge_dict_non_unique_error(self, mock_set_options, mock_get_option, mock_template):
+ with self.assertRaises(AnsibleError):
+ self.merge_vars_lookup.run(['__merge_non_unique'], {
+ 'testdict1__merge_non_unique': {'item': 'value1'},
+ 'testdict2__merge_non_unique': {'item': 'value2'}
+ })
+
+ @patch.object(AnsiblePlugin, 'set_options')
+ @patch.object(AnsiblePlugin, 'get_option', side_effect=[None, 'ignore', 'suffix'])
+ @patch.object(Templar, 'template', side_effect=[{'item1': 'test', 'list_item': ['test1']},
+ ['item2', 'item3']])
+ def test_merge_list_and_dict(self, mock_set_options, mock_get_option, mock_template):
+ with self.assertRaises(AnsibleError):
+ self.merge_vars_lookup.run(['__merge_var'], {
+ 'testlist__merge_var': {
+ 'item1': 'test',
+ 'list_item': ['test1']
+ },
+ 'testdict__merge_var': ['item2', 'item3']
+ })
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py
new file mode 100644
index 000000000..ab7f3def2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_onepassword.py
@@ -0,0 +1,268 @@
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import operator
+import itertools
+import json
+import pytest
+
+from .onepassword_conftest import ( # noqa: F401, pylint: disable=unused-import
+ OP_VERSION_FIXTURES,
+ fake_op,
+ opv1,
+ opv2,
+)
+from .onepassword_common import MOCK_ENTRIES
+
+from ansible.errors import AnsibleLookupError
+from ansible.plugins.loader import lookup_loader
+from ansible_collections.community.general.plugins.lookup.onepassword import (
+ OnePassCLIv1,
+ OnePassCLIv2,
+)
+
+
+@pytest.mark.parametrize(
+ ("args", "rc", "expected_call_args", "expected_call_kwargs", "expected"),
+ (
+ ([], 0, ["get", "account"], {"ignore_errors": True}, True,),
+ ([], 1, ["get", "account"], {"ignore_errors": True}, False,),
+ (["acme"], 1, ["get", "account", "--account", "acme.1password.com"], {"ignore_errors": True}, False,),
+ )
+)
+def test_assert_logged_in_v1(mocker, args, rc, expected_call_args, expected_call_kwargs, expected):
+ mocker.patch.object(OnePassCLIv1, "_run", return_value=[rc, "", ""])
+
+ op_cli = OnePassCLIv1(*args)
+ result = op_cli.assert_logged_in()
+
+ op_cli._run.assert_called_with(expected_call_args, **expected_call_kwargs)
+ assert result == expected
+
+
+def test_full_signin_v1(mocker):
+ mocker.patch.object(OnePassCLIv1, "_run", return_value=[0, "", ""])
+
+ op_cli = OnePassCLIv1(
+ subdomain="acme",
+ username="bob@acme.com",
+ secret_key="SECRET",
+ master_password="ONEKEYTORULETHEMALL",
+ )
+ result = op_cli.full_signin()
+
+ op_cli._run.assert_called_with([
+ "signin",
+ "acme.1password.com",
+ b"bob@acme.com",
+ b"SECRET",
+ "--raw",
+ ], command_input=b"ONEKEYTORULETHEMALL")
+ assert result == [0, "", ""]
+
+
+@pytest.mark.parametrize(
+ ("args", "out", "expected_call_args", "expected_call_kwargs", "expected"),
+ (
+ ([], "list of accounts", ["account", "get"], {"ignore_errors": True}, True,),
+ (["acme"], "list of accounts", ["account", "get", "--account", "acme.1password.com"], {"ignore_errors": True}, True,),
+ ([], "", ["account", "list"], {}, False,),
+ )
+)
+def test_assert_logged_in_v2(mocker, args, out, expected_call_args, expected_call_kwargs, expected):
+ mocker.patch.object(OnePassCLIv2, "_run", return_value=[0, out, ""])
+ op_cli = OnePassCLIv2(*args)
+ result = op_cli.assert_logged_in()
+
+ op_cli._run.assert_called_with(expected_call_args, **expected_call_kwargs)
+ assert result == expected
+
+
+def test_full_signin_v2(mocker):
+ mocker.patch.object(OnePassCLIv2, "_run", return_value=[0, "", ""])
+
+ op_cli = OnePassCLIv2(
+ subdomain="acme",
+ username="bob@acme.com",
+ secret_key="SECRET",
+ master_password="ONEKEYTORULETHEMALL",
+ )
+ result = op_cli.full_signin()
+
+ op_cli._run.assert_called_with(
+ [
+ "account", "add", "--raw",
+ "--address", "acme.1password.com",
+ "--email", b"bob@acme.com",
+ "--signin",
+ ],
+ command_input=b"ONEKEYTORULETHEMALL",
+ environment_update={'OP_SECRET_KEY': 'SECRET'},
+ )
+ assert result == [0, "", ""]
+
+
+@pytest.mark.parametrize(
+ ("version", "version_class"),
+ (
+ ("1.17.2", OnePassCLIv1),
+ ("2.27.4", OnePassCLIv2),
+ )
+)
+def test_op_correct_cli_class(fake_op, version, version_class):
+ op = fake_op(version)
+ assert op._cli.version == version
+ assert isinstance(op._cli, version_class)
+
+
+def test_op_unsupported_cli_version(fake_op):
+ with pytest.raises(AnsibleLookupError, match="is unsupported"):
+ fake_op("99.77.77")
+
+
+@pytest.mark.parametrize("op_fixture", OP_VERSION_FIXTURES)
+def test_op_set_token_with_config(op_fixture, mocker, request):
+ op = request.getfixturevalue(op_fixture)
+ token = "F5417F77529B41B595D7F9D6F76EC057"
+ mocker.patch("os.path.isfile", return_value=True)
+ mocker.patch.object(op._cli, "signin", return_value=(0, token + "\n", ""))
+
+ op.set_token()
+
+ assert op.token == token
+
+
+@pytest.mark.parametrize(
+ ("op_fixture", "message"),
+ [
+ (op, value)
+ for op in OP_VERSION_FIXTURES
+ for value in
+ (
+ "Missing required parameters",
+ "The operation is unauthorized",
+ )
+ ]
+)
+def test_op_set_token_with_config_missing_args(op_fixture, message, request, mocker):
+ op = request.getfixturevalue(op_fixture)
+ mocker.patch("os.path.isfile", return_value=True)
+ mocker.patch.object(op._cli, "signin", return_value=(99, "", ""), side_effect=AnsibleLookupError(message))
+ mocker.patch.object(op._cli, "full_signin", return_value=(0, "", ""))
+
+ with pytest.raises(AnsibleLookupError, match=message):
+ op.set_token()
+
+ op._cli.full_signin.assert_not_called()
+
+
+@pytest.mark.parametrize("op_fixture", OP_VERSION_FIXTURES)
+def test_op_set_token_with_config_full_signin(op_fixture, request, mocker):
+ op = request.getfixturevalue(op_fixture)
+ mocker.patch("os.path.isfile", return_value=True)
+ mocker.patch.object(op._cli, "signin", return_value=(99, "", ""), side_effect=AnsibleLookupError("Raised intentionally"))
+ mocker.patch.object(op._cli, "full_signin", return_value=(0, "", ""))
+
+ op.set_token()
+
+ op._cli.full_signin.assert_called()
+
+
+@pytest.mark.parametrize("op_fixture", OP_VERSION_FIXTURES)
+def test_op_set_token_without_config(op_fixture, request, mocker):
+ op = request.getfixturevalue(op_fixture)
+ token = "B988E8A2680A4A348962751A96861FA1"
+ mocker.patch("os.path.isfile", return_value=False)
+ mocker.patch.object(op._cli, "signin", return_value=(99, "", ""))
+ mocker.patch.object(op._cli, "full_signin", return_value=(0, token + "\n", ""))
+
+ op.set_token()
+
+ op._cli.signin.assert_not_called()
+ assert op.token == token
+
+
+@pytest.mark.parametrize(
+ ("op_fixture", "login_status"),
+ [(op, value) for op in OP_VERSION_FIXTURES for value in [False, True]]
+)
+def test_op_assert_logged_in(mocker, login_status, op_fixture, request):
+ op = request.getfixturevalue(op_fixture)
+ mocker.patch.object(op._cli, "assert_logged_in", return_value=login_status)
+ mocker.patch.object(op, "set_token")
+
+ op.assert_logged_in()
+
+ op._cli.assert_logged_in.assert_called_once()
+ assert op.logged_in == login_status
+
+ if not login_status:
+ op.set_token.assert_called_once()
+
+
+@pytest.mark.parametrize("op_fixture", OP_VERSION_FIXTURES)
+def test_op_get_raw_v1(mocker, op_fixture, request):
+ op = request.getfixturevalue(op_fixture)
+ mocker.patch.object(op._cli, "get_raw", return_value=[99, "RAW OUTPUT", ""])
+
+ result = op.get_raw("some item")
+
+ assert result == "RAW OUTPUT"
+ op._cli.get_raw.assert_called_once()
+
+
+@pytest.mark.parametrize(
+ ("op_fixture", "output", "expected"),
+ (
+ list(itertools.chain([op], d))
+ for op in OP_VERSION_FIXTURES
+ for d in [
+ ("RAW OUTPUT", "RAW OUTPUT"),
+ (None, ""),
+ ("", ""),
+ ]
+ )
+)
+def test_op_get_field(mocker, op_fixture, output, expected, request):
+ op = request.getfixturevalue(op_fixture)
+ mocker.patch.object(op, "get_raw", return_value=output)
+ mocker.patch.object(op._cli, "_parse_field", return_value=output)
+
+ result = op.get_field("some item", "some field")
+
+ assert result == expected
+
+
+# This test sometimes fails on older Python versions because the gathered tests mismatch.
+# Sort the fixture data to make this reliable
+# https://github.com/pytest-dev/pytest-xdist/issues/432
+@pytest.mark.parametrize(
+ ("cli_class", "vault", "queries", "kwargs", "output", "expected"),
+ (
+ (_cli_class, item["vault_name"], item["queries"], item.get("kwargs", {}), item["output"], item["expected"])
+ for _cli_class in sorted(MOCK_ENTRIES, key=operator.attrgetter("__name__"))
+ for item in MOCK_ENTRIES[_cli_class]
+ )
+)
+def test_op_lookup(mocker, cli_class, vault, queries, kwargs, output, expected):
+ mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass._get_cli_class", cli_class)
+ mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePass.assert_logged_in", return_value=True)
+ mocker.patch("ansible_collections.community.general.plugins.lookup.onepassword.OnePassCLIBase._run", return_value=(0, json.dumps(output), ""))
+
+ op_lookup = lookup_loader.get("community.general.onepassword")
+ result = op_lookup.run(queries, vault=vault, **kwargs)
+
+ assert result == expected
+
+
+@pytest.mark.parametrize("op_fixture", OP_VERSION_FIXTURES)
+def test_signin(op_fixture, request):
+ op = request.getfixturevalue(op_fixture)
+ op._cli.master_password = "master_pass"
+ op._cli.signin()
+ print(op._cli.version)
+ op._cli._run.assert_called_once_with(['signin', '--raw'], command_input=b"master_pass")
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_revbitspss.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_revbitspss.py
new file mode 100644
index 000000000..510999206
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_revbitspss.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, RevBits <info@revbits.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.unittest import TestCase
+from ansible_collections.community.general.tests.unit.compat.mock import (
+ patch,
+ MagicMock,
+)
+from ansible_collections.community.general.plugins.lookup import revbitspss
+from ansible.plugins.loader import lookup_loader
+
+
+class MockPamSecrets(MagicMock):
+ RESPONSE = 'dummy value'
+
+ def get_pam_secret(self, path):
+ return self.RESPONSE
+
+
+class TestLookupModule(TestCase):
+ def setUp(self):
+ revbitspss.ANOTHER_LIBRARY_IMPORT_ERROR = None
+ self.lookup = lookup_loader.get("community.general.revbitspss")
+
+ @patch(
+ "ansible_collections.community.general.plugins.lookup.revbitspss.LookupModule.Client",
+ MockPamSecrets(),
+ )
+ def test_get_pam_secret(self):
+ terms = ['dummy secret']
+ variables = []
+ kwargs = {
+ "base_url": 'https://dummy.url',
+ "api_key": 'dummy'
+ }
+ self.assertListEqual(
+ [{'dummy secret': 'dummy value'}],
+ self.lookup.run(terms, variables, **kwargs)
+ )
diff --git a/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py b/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py
new file mode 100644
index 000000000..47ca79a69
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/lookup/test_tss.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Adam Migus <adam@migus.org>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.unittest import TestCase
+from ansible_collections.community.general.tests.unit.compat.mock import (
+ patch,
+ DEFAULT,
+ MagicMock,
+)
+from ansible_collections.community.general.plugins.lookup import tss
+from ansible.plugins.loader import lookup_loader
+
+
+TSS_IMPORT_PATH = 'ansible_collections.community.general.plugins.lookup.tss'
+
+
+def make_absolute(name):
+ return '.'.join([TSS_IMPORT_PATH, name])
+
+
+class SecretServerError(Exception):
+ def __init__(self):
+ self.message = ''
+
+
+class MockSecretServer(MagicMock):
+ RESPONSE = '{"foo": "bar"}'
+
+ def get_secret_json(self, path):
+ return self.RESPONSE
+
+
+class MockFaultySecretServer(MagicMock):
+ def get_secret_json(self, path):
+ raise SecretServerError
+
+
+@patch(make_absolute('SecretServer'), MockSecretServer())
+class TestTSSClient(TestCase):
+ def setUp(self):
+ self.server_params = {
+ 'base_url': '',
+ 'username': '',
+ 'domain': '',
+ 'password': '',
+ 'api_path_uri': '',
+ 'token_path_uri': '',
+ }
+
+ def test_from_params(self):
+ with patch(make_absolute('HAS_TSS_AUTHORIZER'), False):
+ self.assert_client_version('v0')
+
+ with patch.dict(self.server_params, {'domain': 'foo'}):
+ with self.assertRaises(tss.AnsibleError):
+ self._get_client()
+
+ with patch.multiple(TSS_IMPORT_PATH,
+ HAS_TSS_AUTHORIZER=True,
+ PasswordGrantAuthorizer=DEFAULT,
+ DomainPasswordGrantAuthorizer=DEFAULT):
+
+ self.assert_client_version('v1')
+
+ with patch.dict(self.server_params, {'domain': 'foo'}):
+ self.assert_client_version('v1')
+
+ def assert_client_version(self, version):
+ version_to_class = {
+ 'v0': tss.TSSClientV0,
+ 'v1': tss.TSSClientV1
+ }
+
+ client = self._get_client()
+ self.assertIsInstance(client, version_to_class[version])
+
+ def _get_client(self):
+ return tss.TSSClient.from_params(**self.server_params)
+
+
+class TestLookupModule(TestCase):
+ VALID_TERMS = [1]
+ INVALID_TERMS = ['foo']
+
+ def setUp(self):
+ self.lookup = lookup_loader.get("community.general.tss")
+
+ @patch.multiple(TSS_IMPORT_PATH,
+ HAS_TSS_SDK=False,
+ SecretServer=MockSecretServer)
+ def test_missing_sdk(self):
+ with self.assertRaises(tss.AnsibleError):
+ self._run_lookup(self.VALID_TERMS)
+
+ @patch.multiple(TSS_IMPORT_PATH,
+ HAS_TSS_SDK=True,
+ SecretServerError=SecretServerError)
+ def test_get_secret_json(self):
+ with patch(make_absolute('SecretServer'), MockSecretServer):
+ self.assertListEqual([MockSecretServer.RESPONSE], self._run_lookup(self.VALID_TERMS))
+
+ with self.assertRaises(tss.AnsibleOptionsError):
+ self._run_lookup(self.INVALID_TERMS)
+
+ with patch(make_absolute('SecretServer'), MockFaultySecretServer):
+ with self.assertRaises(tss.AnsibleError):
+ self._run_lookup(self.VALID_TERMS)
+
+ def _run_lookup(self, terms, variables=None, **kwargs):
+ variables = variables or []
+ kwargs = kwargs or {"base_url": "dummy", "username": "dummy", "password": "dummy"}
+
+ return self.lookup.run(terms, variables, **kwargs)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py
new file mode 100644
index 000000000..5a5188669
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_backoff.py
@@ -0,0 +1,54 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import random
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.cloud import _exponential_backoff, \
+ _full_jitter_backoff
+
+
+class ExponentialBackoffStrategyTestCase(unittest.TestCase):
+ def test_no_retries(self):
+ strategy = _exponential_backoff(retries=0)
+ result = list(strategy())
+ self.assertEqual(result, [], 'list should be empty')
+
+ def test_exponential_backoff(self):
+ strategy = _exponential_backoff(retries=5, delay=1, backoff=2)
+ result = list(strategy())
+ self.assertEqual(result, [1, 2, 4, 8, 16])
+
+ def test_max_delay(self):
+ strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=60)
+ result = list(strategy())
+ self.assertEqual(result, [1, 2, 4, 8, 16, 32, 60])
+
+ def test_max_delay_none(self):
+ strategy = _exponential_backoff(retries=7, delay=1, backoff=2, max_delay=None)
+ result = list(strategy())
+ self.assertEqual(result, [1, 2, 4, 8, 16, 32, 64])
+
+
+class FullJitterBackoffStrategyTestCase(unittest.TestCase):
+ def test_no_retries(self):
+ strategy = _full_jitter_backoff(retries=0)
+ result = list(strategy())
+ self.assertEqual(result, [], 'list should be empty')
+
+ def test_full_jitter(self):
+ retries = 5
+ seed = 1
+
+ r = random.Random(seed)
+ expected = [r.randint(0, 2**i) for i in range(0, retries)]
+
+ strategy = _full_jitter_backoff(
+ retries=retries, delay=1, _random=random.Random(seed))
+ result = list(strategy())
+
+ self.assertEqual(result, expected)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_scaleway.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_scaleway.py
new file mode 100644
index 000000000..dc53bc126
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/cloud/test_scaleway.py
@@ -0,0 +1,126 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.scaleway import SecretVariables, argon2
+
+
+class SecretVariablesTestCase(unittest.TestCase):
+ def test_dict_to_list(self):
+ source = dict(
+ attribute1="value1",
+ attribute2="value2"
+ )
+ expect = [
+ dict(key="attribute1", value="value1"),
+ dict(key="attribute2", value="value2")
+ ]
+
+ result = SecretVariables.dict_to_list(source)
+ result = sorted(result, key=lambda el: el['key'])
+ self.assertEqual(result, expect)
+
+ def test_list_to_dict(self):
+ source = [
+ dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"),
+ dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI")
+ ]
+ expect = dict(
+ secret1="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc",
+ secret2="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"
+ )
+
+ self.assertEqual(SecretVariables.list_to_dict(source, hashed=True), expect)
+
+ def test_list_to_dict(self):
+ source = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="value2")
+ ]
+ expect = dict(
+ secret1="value1",
+ secret2="value2"
+ )
+
+ self.assertEqual(SecretVariables.list_to_dict(source, hashed=False), expect)
+
+ @unittest.skipIf(argon2 is None, "Missing required 'argon2' library")
+ def test_decode_full(self):
+ source_secret = [
+ dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"),
+ dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"),
+ ]
+ source_value = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="value2"),
+ ]
+
+ expect = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="value2"),
+ ]
+
+ result = SecretVariables.decode(source_secret, source_value)
+ result = sorted(result, key=lambda el: el['key'])
+ self.assertEqual(result, expect)
+
+ @unittest.skipIf(argon2 is None, "Missing required 'argon2' library")
+ def test_decode_dict_divergent_values(self):
+ source_secret = [
+ dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"),
+ dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"),
+ ]
+ source_value = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="diverged_value2"),
+ ]
+
+ expect = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"),
+ ]
+
+ result = SecretVariables.decode(source_secret, source_value)
+ result = sorted(result, key=lambda el: el['key'])
+ self.assertEqual(result, expect)
+
+ @unittest.skipIf(argon2 is None, "Missing required 'argon2' library")
+ def test_decode_dict_missing_values_left(self):
+ source_secret = [
+ dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"),
+ ]
+ source_value = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="value2"),
+ ]
+
+ expect = [
+ dict(key="secret1", value="value1"),
+ ]
+
+ result = SecretVariables.decode(source_secret, source_value)
+ result = sorted(result, key=lambda el: el['key'])
+ self.assertEqual(result, expect)
+
+ @unittest.skipIf(argon2 is None, "Missing required 'argon2' library")
+ def test_decode_dict_missing_values_right(self):
+ source_secret = [
+ dict(key="secret1", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$NuZk+6UATHNFV78nFRXFvA$3kivcXfzNHI1c/4ZBpP8BeBSGhhI82NfOh4Dd48JJgc"),
+ dict(key="secret2", hashed_value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"),
+ ]
+ source_value = [
+ dict(key="secret1", value="value1"),
+ ]
+
+ expect = [
+ dict(key="secret1", value="value1"),
+ dict(key="secret2", value="$argon2id$v=19$m=65536,t=1,p=2$etGO/Z8ImYDeKr6uFsyPAQ$FbL5+hG/duDEpa8UCYqXpEUQ5EacKg6i2iAs+Dq4dAI"),
+ ]
+
+ result = SecretVariables.decode(source_secret, source_value)
+ result = sorted(result, key=lambda el: el['key'])
+ self.assertEqual(result, expect)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py
new file mode 100644
index 000000000..2217dd39f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/conftest.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+from io import BytesIO
+
+import pytest
+
+import ansible.module_utils.basic
+from ansible.module_utils.six import PY3, string_types
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def stdin(mocker, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the stdin pytest fixture')
+
+ fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict'))
+ if PY3:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock())
+ mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin)
+ else:
+ mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin)
+
+ yield fake_stdin
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
+
+
+@pytest.fixture
+def am(stdin, request):
+ old_args = ansible.module_utils.basic._ANSIBLE_ARGS
+ ansible.module_utils.basic._ANSIBLE_ARGS = None
+ old_argv = sys.argv
+ sys.argv = ['ansible_unittest']
+
+ argspec = {}
+ if hasattr(request, 'param'):
+ if isinstance(request.param, dict):
+ argspec = request.param
+
+ am = ansible.module_utils.basic.AnsibleModule(
+ argument_spec=argspec,
+ )
+ am._name = 'ansible_unittest'
+
+ yield am
+
+ ansible.module_utils.basic._ANSIBLE_ARGS = old_args
+ sys.argv = old_argv
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py
new file mode 100644
index 000000000..037305d3f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_dict_comparison.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+# 2018.07.26 --- use DictComparison instead of GcpRequest
+#
+# Copyright (c) 2016, Tom Melendez <tom@supertom.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import are_different_dicts
+
+
+class HwcDictComparisonTestCase(unittest.TestCase):
+ def test_simple_no_difference(self):
+ value1 = {
+ 'foo': 'bar',
+ 'test': 'original'
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_simple_different(self):
+ value1 = {
+ 'foo': 'bar',
+ 'test': 'original'
+ }
+ value2 = {
+ 'foo': 'bar',
+ 'test': 'different'
+ }
+ value3 = {
+ 'test': 'original'
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
+
+ def test_nested_dictionaries_no_difference(self):
+ value1 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test'
+ },
+ 'bar': 'baz'
+ },
+ 'test': 'original'
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_nested_dictionaries_with_difference(self):
+ value1 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test'
+ },
+ 'bar': 'baz'
+ },
+ 'test': 'original'
+ }
+ value2 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'baz'
+ },
+ 'bar': 'hello'
+ },
+ 'test': 'original'
+ }
+ value3 = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test'
+ },
+ 'bar': 'baz'
+ }
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
+
+ def test_arrays_strings_no_difference(self):
+ value1 = {
+ 'foo': [
+ 'baz',
+ 'bar'
+ ]
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_arrays_strings_with_difference(self):
+ value1 = {
+ 'foo': [
+ 'baz',
+ 'bar',
+ ]
+ }
+
+ value2 = {
+ 'foo': [
+ 'baz',
+ 'hello'
+ ]
+ }
+ value3 = {
+ 'foo': [
+ 'bar',
+ ]
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
+
+ def test_arrays_dicts_with_no_difference(self):
+ value1 = {
+ 'foo': [
+ {
+ 'test': 'value',
+ 'foo': 'bar'
+ },
+ {
+ 'different': 'dict'
+ }
+ ]
+ }
+
+ self.assertFalse(are_different_dicts(value1, value1))
+
+ def test_arrays_dicts_with_difference(self):
+ value1 = {
+ 'foo': [
+ {
+ 'test': 'value',
+ 'foo': 'bar'
+ },
+ {
+ 'different': 'dict'
+ }
+ ]
+ }
+ value2 = {
+ 'foo': [
+ {
+ 'test': 'value2',
+ 'foo': 'bar2'
+ },
+ ]
+ }
+ value3 = {
+ 'foo': [
+ {
+ 'test': 'value',
+ 'foo': 'bar'
+ }
+ ]
+ }
+
+ self.assertTrue(are_different_dicts(value1, value2))
+ self.assertTrue(are_different_dicts(value1, value3))
+ self.assertTrue(are_different_dicts(value2, value3))
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py
new file mode 100644
index 000000000..1344496b1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/hwc/test_hwc_utils.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.hwc_utils import (HwcModuleException, navigate_value)
+
+
+class HwcUtilsTestCase(unittest.TestCase):
+ def test_navigate_value(self):
+ value = {
+ 'foo': {
+ 'quiet': {
+ 'tree': 'test',
+ "trees": [0, 1]
+ },
+ }
+ }
+
+ self.assertEqual(navigate_value(value, ["foo", "quiet", "tree"]),
+ "test")
+
+ self.assertEqual(
+ navigate_value(value, ["foo", "quiet", "trees"],
+ {"foo.quiet.trees": 1}),
+ 1)
+
+ self.assertRaisesRegexp(HwcModuleException,
+ r".* key\(q\) is not exist in dict",
+ navigate_value, value, ["foo", "q", "tree"])
+
+ self.assertRaisesRegexp(HwcModuleException,
+ r".* the index is out of list",
+ navigate_value, value,
+ ["foo", "quiet", "trees"],
+ {"foo.quiet.trees": 2})
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py
new file mode 100644
index 000000000..9a816cfe2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_connect.py
@@ -0,0 +1,165 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from itertools import count
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import (
+ get_token,
+ KeycloakError,
+)
+from ansible.module_utils.six import StringIO
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+
+module_params_creds = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'validate_certs': True,
+ 'auth_realm': 'master',
+ 'client_id': 'admin-cli',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'client_secret': None,
+}
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ try:
+ call_number = get_id_call_count.__next__()
+ except AttributeError:
+ # manage python 2 versions.
+ call_number = get_id_call_count.next()
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+@pytest.fixture()
+def mock_good_connection(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_connect_to_keycloak_with_creds(mock_good_connection):
+ keycloak_header = get_token(module_params_creds)
+ assert keycloak_header == {
+ 'Authorization': 'Bearer alongtoken',
+ 'Content-Type': 'application/json'
+ }
+
+
+def test_connect_to_keycloak_with_token(mock_good_connection):
+ module_params_token = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'validate_certs': True,
+ 'client_id': 'admin-cli',
+ 'token': "alongtoken"
+ }
+ keycloak_header = get_token(module_params_token)
+ assert keycloak_header == {
+ 'Authorization': 'Bearer alongtoken',
+ 'Content-Type': 'application/json'
+ }
+
+
+@pytest.fixture()
+def mock_bad_json_returned(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token":'), }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_bad_json_returned(mock_bad_json_returned):
+ with pytest.raises(KeycloakError) as raised_error:
+ get_token(module_params_creds)
+ # cannot check all the message, different errors message for the value
+ # error in python 2.6, 2.7 and 3.*.
+ assert (
+ 'API returned invalid JSON when trying to obtain access token from '
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token: '
+ ) in str(raised_error.value)
+
+
+def raise_401(url):
+ def _raise_401():
+ raise HTTPError(url=url, code=401, msg='Unauthorized', hdrs='', fp=StringIO(''))
+ return _raise_401
+
+
+@pytest.fixture()
+def mock_401_returned(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': raise_401(
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token'),
+ }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_error_returned(mock_401_returned):
+ with pytest.raises(KeycloakError) as raised_error:
+ get_token(module_params_creds)
+ assert str(raised_error.value) == (
+ 'Could not obtain access token from http://keycloak.url'
+ '/auth/realms/master/protocol/openid-connect/token: '
+ 'HTTP Error 401: Unauthorized'
+ )
+
+
+@pytest.fixture()
+def mock_json_without_token_returned(mocker):
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"not_token": "It is not a token"}'), }
+ return mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+def test_json_without_token_returned(mock_json_without_token_returned):
+ with pytest.raises(KeycloakError) as raised_error:
+ get_token(module_params_creds)
+ assert str(raised_error.value) == (
+ 'Could not obtain access token from http://keycloak.url'
+ '/auth/realms/master/protocol/openid-connect/token'
+ )
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py
new file mode 100644
index 000000000..dc0f8d3f9
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/identity/keycloak/test_keycloak_module_utils.py
@@ -0,0 +1,103 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+
+from ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak import is_struct_included
+
+
+class KeycloakIsStructIncludedTestCase(unittest.TestCase):
+ dict1 = dict(
+ test1='test1',
+ test2=dict(
+ test1='test1',
+ test2='test2'
+ ),
+ test3=['test1', dict(test='test1', test2='test2')]
+ )
+ dict2 = dict(
+ test1='test1',
+ test2=dict(
+ test1='test1',
+ test2='test2',
+ test3='test3'
+ ),
+ test3=['test1', dict(test='test1', test2='test2'), 'test3'],
+ test4='test4'
+ )
+ dict3 = dict(
+ test1='test1',
+ test2=dict(
+ test1='test1',
+ test2='test23',
+ test3='test3'
+ ),
+ test3=['test1', dict(test='test1', test2='test23'), 'test3'],
+ test4='test4'
+ )
+
+ dict5 = dict(
+ test1='test1',
+ test2=dict(
+ test1=True,
+ test2='test23',
+ test3='test3'
+ ),
+ test3=['test1', dict(test='test1', test2='test23'), 'test3'],
+ test4='test4'
+ )
+
+ dict6 = dict(
+ test1='test1',
+ test2=dict(
+ test1='true',
+ test2='test23',
+ test3='test3'
+ ),
+ test3=['test1', dict(test='test1', test2='test23'), 'test3'],
+ test4='test4'
+ )
+ dict7 = [
+ {
+ 'roles': ['view-clients', 'view-identity-providers', 'view-users', 'query-realms', 'manage-users'],
+ 'clientid': 'master-realm'
+ },
+ {
+ 'roles': ['manage-account', 'view-profile', 'manage-account-links'],
+ 'clientid': 'account'
+ }
+ ]
+ dict8 = [
+ {
+ 'roles': ['view-clients', 'query-realms', 'view-users'],
+ 'clientid': 'master-realm'
+ },
+ {
+ 'roles': ['manage-account-links', 'view-profile', 'manage-account'],
+ 'clientid': 'account'
+ }
+ ]
+
+ def test_trivial(self):
+ self.assertTrue(is_struct_included(self.dict1, self.dict1))
+
+ def test_equals_with_dict2_bigger_than_dict1(self):
+ self.assertTrue(is_struct_included(self.dict1, self.dict2))
+
+ def test_not_equals_with_dict2_bigger_than_dict1(self):
+ self.assertFalse(is_struct_included(self.dict2, self.dict1))
+
+ def test_not_equals_with_dict1_different_than_dict3(self):
+ self.assertFalse(is_struct_included(self.dict1, self.dict3))
+
+ def test_equals_with_dict5_contain_bool_and_dict6_contain_true_string(self):
+ self.assertFalse(is_struct_included(self.dict5, self.dict6))
+ self.assertFalse(is_struct_included(self.dict6, self.dict5))
+
+ def test_not_equals_dict7_dict8_compare_dict7_with_list_bigger_than_dict8_but_reverse_equals(self):
+ self.assertFalse(is_struct_included(self.dict7, self.dict8))
+ self.assertTrue(is_struct_included(self.dict8, self.dict7))
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py
new file mode 100644
index 000000000..6ddc827a1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/net_tools/pritunl/test_api.py
@@ -0,0 +1,632 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+import json
+
+import pytest
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.module_utils.net_tools.pritunl import (
+ api,
+)
+from mock import MagicMock
+
+__metaclass__ = type
+
+
+# Pritunl Mocks
+
+PRITUNL_ORGS = [
+ {
+ "auth_api": False,
+ "name": "Foo",
+ "auth_token": None,
+ "user_count": 0,
+ "auth_secret": None,
+ "id": "csftwlu6uhralzi2dpmhekz3",
+ },
+ {
+ "auth_api": False,
+ "name": "GumGum",
+ "auth_token": None,
+ "user_count": 3,
+ "auth_secret": None,
+ "id": "58070daee63f3b2e6e472c36",
+ },
+ {
+ "auth_api": False,
+ "name": "Bar",
+ "auth_token": None,
+ "user_count": 0,
+ "auth_secret": None,
+ "id": "v1sncsxxybnsylc8gpqg85pg",
+ },
+]
+
+NEW_PRITUNL_ORG = {
+ "auth_api": False,
+ "name": "NewOrg",
+ "auth_token": None,
+ "user_count": 0,
+ "auth_secret": None,
+ "id": "604a140ae63f3b36bc34c7bd",
+}
+
+PRITUNL_USERS = [
+ {
+ "auth_type": "google",
+ "dns_servers": None,
+ "pin": True,
+ "dns_suffix": None,
+ "servers": [
+ {
+ "status": False,
+ "platform": None,
+ "server_id": "580711322bb66c1d59b9568f",
+ "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27",
+ "virt_address": "192.168.101.27",
+ "name": "vpn-A",
+ "real_address": None,
+ "connected_since": None,
+ "id": "580711322bb66c1d59b9568f",
+ "device_name": None,
+ },
+ {
+ "status": False,
+ "platform": None,
+ "server_id": "5dad2cc6e63f3b3f4a6dfea5",
+ "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37",
+ "virt_address": "192.168.201.37",
+ "name": "vpn-B",
+ "real_address": None,
+ "connected_since": None,
+ "id": "5dad2cc6e63f3b3f4a6dfea5",
+ "device_name": None,
+ },
+ ],
+ "disabled": False,
+ "network_links": [],
+ "port_forwarding": [],
+ "id": "58070dafe63f3b2e6e472c3b",
+ "organization_name": "GumGum",
+ "type": "server",
+ "email": "bot@company.com",
+ "status": True,
+ "dns_mapping": None,
+ "otp_secret": "123456789ABCDEFG",
+ "client_to_client": False,
+ "sso": "google",
+ "bypass_secondary": False,
+ "groups": ["admin", "multiregion"],
+ "audit": False,
+ "name": "bot",
+ "gravatar": True,
+ "otp_auth": True,
+ "organization": "58070daee63f3b2e6e472c36",
+ },
+ {
+ "auth_type": "google",
+ "dns_servers": None,
+ "pin": True,
+ "dns_suffix": None,
+ "servers": [
+ {
+ "status": False,
+ "platform": None,
+ "server_id": "580711322bb66c1d59b9568f",
+ "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27",
+ "virt_address": "192.168.101.27",
+ "name": "vpn-A",
+ "real_address": None,
+ "connected_since": None,
+ "id": "580711322bb66c1d59b9568f",
+ "device_name": None,
+ },
+ {
+ "status": False,
+ "platform": None,
+ "server_id": "5dad2cc6e63f3b3f4a6dfea5",
+ "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37",
+ "virt_address": "192.168.201.37",
+ "name": "vpn-B",
+ "real_address": None,
+ "connected_since": None,
+ "id": "5dad2cc6e63f3b3f4a6dfea5",
+ "device_name": None,
+ },
+ ],
+ "disabled": False,
+ "network_links": [],
+ "port_forwarding": [],
+ "id": "58070dafe63f3b2e6e472c3b",
+ "organization_name": "GumGum",
+ "type": "client",
+ "email": "florian@company.com",
+ "status": True,
+ "dns_mapping": None,
+ "otp_secret": "123456789ABCDEFG",
+ "client_to_client": False,
+ "sso": "google",
+ "bypass_secondary": False,
+ "groups": ["web", "database"],
+ "audit": False,
+ "name": "florian",
+ "gravatar": True,
+ "otp_auth": True,
+ "organization": "58070daee63f3b2e6e472c36",
+ },
+ {
+ "auth_type": "google",
+ "dns_servers": None,
+ "pin": True,
+ "dns_suffix": None,
+ "servers": [
+ {
+ "status": False,
+ "platform": None,
+ "server_id": "580711322bb66c1d59b9568f",
+ "virt_address6": "fd00:c0a8: 9700: 0: 192: 168: 101: 27",
+ "virt_address": "192.168.101.27",
+ "name": "vpn-A",
+ "real_address": None,
+ "connected_since": None,
+ "id": "580711322bb66c1d59b9568f",
+ "device_name": None,
+ },
+ {
+ "status": False,
+ "platform": None,
+ "server_id": "5dad2cc6e63f3b3f4a6dfea5",
+ "virt_address6": "fd00:c0a8:f200: 0: 192: 168: 201: 37",
+ "virt_address": "192.168.201.37",
+ "name": "vpn-B",
+ "real_address": None,
+ "connected_since": None,
+ "id": "5dad2cc6e63f3b3f4a6dfea5",
+ "device_name": None,
+ },
+ ],
+ "disabled": False,
+ "network_links": [],
+ "port_forwarding": [],
+ "id": "58070dafe63f3b2e6e472c3b",
+ "organization_name": "GumGum",
+ "type": "server",
+ "email": "ops@company.com",
+ "status": True,
+ "dns_mapping": None,
+ "otp_secret": "123456789ABCDEFG",
+ "client_to_client": False,
+ "sso": "google",
+ "bypass_secondary": False,
+ "groups": ["web", "database"],
+ "audit": False,
+ "name": "ops",
+ "gravatar": True,
+ "otp_auth": True,
+ "organization": "58070daee63f3b2e6e472c36",
+ },
+]
+
+NEW_PRITUNL_USER = {
+ "auth_type": "local",
+ "disabled": False,
+ "dns_servers": None,
+ "otp_secret": "6M4UWP2BCJBSYZAT",
+ "name": "alice",
+ "pin": False,
+ "dns_suffix": None,
+ "client_to_client": False,
+ "email": "alice@company.com",
+ "organization_name": "GumGum",
+ "bypass_secondary": False,
+ "groups": ["a", "b"],
+ "organization": "58070daee63f3b2e6e472c36",
+ "port_forwarding": [],
+ "type": "client",
+ "id": "590add71e63f3b72d8bb951a",
+}
+
+NEW_PRITUNL_USER_UPDATED = dict_merge(
+ NEW_PRITUNL_USER,
+ {
+ "disabled": True,
+ "name": "bob",
+ "email": "bob@company.com",
+ "groups": ["c", "d"],
+ },
+)
+
+
+class PritunlEmptyOrganizationMock(MagicMock):
+ """Pritunl API Mock for organization GET API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps([])
+
+
+class PritunlListOrganizationMock(MagicMock):
+ """Pritunl API Mock for organization GET API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps(PRITUNL_ORGS)
+
+
+class PritunlListUserMock(MagicMock):
+ """Pritunl API Mock for user GET API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps(PRITUNL_USERS)
+
+
+class PritunlErrorMock(MagicMock):
+ """Pritunl API Mock for API call failures."""
+
+ def getcode(self):
+ return 500
+
+ def read(self):
+ return "{}"
+
+
+class PritunlPostOrganizationMock(MagicMock):
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps(NEW_PRITUNL_ORG)
+
+
+class PritunlListOrganizationAfterPostMock(MagicMock):
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps(PRITUNL_ORGS + [NEW_PRITUNL_ORG])
+
+
+class PritunlPostUserMock(MagicMock):
+ """Pritunl API Mock for POST API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps([NEW_PRITUNL_USER])
+
+
+class PritunlPutUserMock(MagicMock):
+ """Pritunl API Mock for PUT API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return json.dumps(NEW_PRITUNL_USER_UPDATED)
+
+
+class PritunlDeleteOrganizationMock(MagicMock):
+ """Pritunl API Mock for DELETE API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return "{}"
+
+
+class PritunlDeleteUserMock(MagicMock):
+ """Pritunl API Mock for DELETE API calls."""
+
+ def getcode(self):
+ return 200
+
+ def read(self):
+ return "{}"
+
+
+# Ansible Module Mock and Pytest mock fixtures
+
+
+class ModuleFailException(Exception):
+ def __init__(self, msg, **kwargs):
+ super(ModuleFailException, self).__init__(msg)
+ self.fail_msg = msg
+ self.fail_kwargs = kwargs
+
+
+@pytest.fixture
+def pritunl_settings():
+ return {
+ "api_token": "token",
+ "api_secret": "secret",
+ "base_url": "https://pritunl.domain.com",
+ "validate_certs": True,
+ }
+
+
+@pytest.fixture
+def pritunl_organization_data():
+ return {
+ "name": NEW_PRITUNL_ORG["name"],
+ }
+
+
+@pytest.fixture
+def pritunl_user_data():
+ return {
+ "name": NEW_PRITUNL_USER["name"],
+ "email": NEW_PRITUNL_USER["email"],
+ "groups": NEW_PRITUNL_USER["groups"],
+ "disabled": NEW_PRITUNL_USER["disabled"],
+ "type": NEW_PRITUNL_USER["type"],
+ }
+
+
+@pytest.fixture
+def get_pritunl_organization_mock():
+ return PritunlListOrganizationMock()
+
+
+@pytest.fixture
+def get_pritunl_user_mock():
+ return PritunlListUserMock()
+
+
+@pytest.fixture
+def get_pritunl_error_mock():
+ return PritunlErrorMock()
+
+
+@pytest.fixture
+def post_pritunl_organization_mock():
+ return PritunlPostOrganizationMock()
+
+
+@pytest.fixture
+def post_pritunl_user_mock():
+ return PritunlPostUserMock()
+
+
+@pytest.fixture
+def put_pritunl_user_mock():
+ return PritunlPutUserMock()
+
+
+@pytest.fixture
+def delete_pritunl_organization_mock():
+ return PritunlDeleteOrganizationMock()
+
+
+@pytest.fixture
+def delete_pritunl_user_mock():
+ return PritunlDeleteUserMock()
+
+
+class TestPritunlApi:
+ """
+ Test class to validate CRUD operations on Pritunl.
+ """
+
+ # Test for GET / list operation on Pritunl API
+ @pytest.mark.parametrize(
+ "org_id,org_user_count",
+ [
+ ("58070daee63f3b2e6e472c36", 3),
+ ("v1sncsxxybnsylc8gpqg85pg", 0),
+ ],
+ )
+ def test_list_all_pritunl_organization(
+ self,
+ pritunl_settings,
+ get_pritunl_organization_mock,
+ org_id,
+ org_user_count,
+ ):
+ api._get_pritunl_organizations = get_pritunl_organization_mock()
+
+ response = api.list_pritunl_organizations(**pritunl_settings)
+
+ assert len(response) == 3
+
+ for org in response:
+ if org["id"] == org_id:
+ org["user_count"] == org_user_count
+
+ @pytest.mark.parametrize(
+ "org_filters,org_expected",
+ [
+ ({"id": "58070daee63f3b2e6e472c36"}, "GumGum"),
+ ({"name": "GumGum"}, "GumGum"),
+ ],
+ )
+ def test_list_filtered_pritunl_organization(
+ self,
+ pritunl_settings,
+ get_pritunl_organization_mock,
+ org_filters,
+ org_expected,
+ ):
+ api._get_pritunl_organizations = get_pritunl_organization_mock()
+
+ response = api.list_pritunl_organizations(
+ **dict_merge(pritunl_settings, {"filters": org_filters})
+ )
+
+ assert len(response) == 1
+ assert response[0]["name"] == org_expected
+
+ @pytest.mark.parametrize(
+ "org_id,org_user_count",
+ [("58070daee63f3b2e6e472c36", 3)],
+ )
+ def test_list_all_pritunl_user(
+ self, pritunl_settings, get_pritunl_user_mock, org_id, org_user_count
+ ):
+ api._get_pritunl_users = get_pritunl_user_mock()
+
+ response = api.list_pritunl_users(
+ **dict_merge(pritunl_settings, {"organization_id": org_id})
+ )
+
+ assert len(response) == org_user_count
+
+ @pytest.mark.parametrize(
+ "org_id,user_filters,user_expected",
+ [
+ ("58070daee63f3b2e6e472c36", {"email": "bot@company.com"}, "bot"),
+ ("58070daee63f3b2e6e472c36", {"name": "florian"}, "florian"),
+ ],
+ )
+ def test_list_filtered_pritunl_user(
+ self,
+ pritunl_settings,
+ get_pritunl_user_mock,
+ org_id,
+ user_filters,
+ user_expected,
+ ):
+ api._get_pritunl_users = get_pritunl_user_mock()
+
+ response = api.list_pritunl_users(
+ **dict_merge(
+ pritunl_settings, {"organization_id": org_id, "filters": user_filters}
+ )
+ )
+
+ assert len(response) > 0
+
+ for user in response:
+ assert user["organization"] == org_id
+ assert user["name"] == user_expected
+
+ # Test for POST operation on Pritunl API
+ def test_add_pritunl_organization(
+ self,
+ pritunl_settings,
+ pritunl_organization_data,
+ post_pritunl_organization_mock,
+ ):
+ api._post_pritunl_organization = post_pritunl_organization_mock()
+
+ create_response = api.post_pritunl_organization(
+ **dict_merge(
+ pritunl_settings,
+ {"organization_name": pritunl_organization_data["name"]},
+ )
+ )
+
+ # Ensure provided settings match with the ones returned by Pritunl
+ for k, v in iteritems(pritunl_organization_data):
+ assert create_response[k] == v
+
+ @pytest.mark.parametrize("org_id", [("58070daee63f3b2e6e472c36")])
+ def test_add_and_update_pritunl_user(
+ self,
+ pritunl_settings,
+ pritunl_user_data,
+ post_pritunl_user_mock,
+ put_pritunl_user_mock,
+ org_id,
+ ):
+ api._post_pritunl_user = post_pritunl_user_mock()
+ api._put_pritunl_user = put_pritunl_user_mock()
+
+ create_response = api.post_pritunl_user(
+ **dict_merge(
+ pritunl_settings,
+ {
+ "organization_id": org_id,
+ "user_data": pritunl_user_data,
+ },
+ )
+ )
+
+ # Ensure provided settings match with the ones returned by Pritunl
+ for k, v in iteritems(pritunl_user_data):
+ assert create_response[k] == v
+
+ # Update the newly created user to ensure only certain settings are changed
+
+ user_updates = {
+ "name": "bob",
+ "email": "bob@company.com",
+ "disabled": True,
+ }
+
+ update_response = api.post_pritunl_user(
+ **dict_merge(
+ pritunl_settings,
+ {
+ "organization_id": org_id,
+ "user_id": create_response["id"],
+ "user_data": dict_merge(pritunl_user_data, user_updates),
+ },
+ )
+ )
+
+ # Ensure only certain settings changed and the rest remained untouched.
+ for k, v in iteritems(update_response):
+ if k in update_response:
+ assert update_response[k] == v
+ else:
+ assert update_response[k] == create_response[k]
+
+ # Test for DELETE operation on Pritunl API
+
+ @pytest.mark.parametrize("org_id", [("58070daee63f3b2e6e472c36")])
+ def test_delete_pritunl_organization(
+ self, pritunl_settings, org_id, delete_pritunl_organization_mock
+ ):
+ api._delete_pritunl_organization = delete_pritunl_organization_mock()
+
+ response = api.delete_pritunl_organization(
+ **dict_merge(
+ pritunl_settings,
+ {
+ "organization_id": org_id,
+ },
+ )
+ )
+
+ assert response == {}
+
+ @pytest.mark.parametrize(
+ "org_id,user_id", [("58070daee63f3b2e6e472c36", "590add71e63f3b72d8bb951a")]
+ )
+ def test_delete_pritunl_user(
+ self, pritunl_settings, org_id, user_id, delete_pritunl_user_mock
+ ):
+ api._delete_pritunl_user = delete_pritunl_user_mock()
+
+ response = api.delete_pritunl_user(
+ **dict_merge(
+ pritunl_settings,
+ {
+ "organization_id": org_id,
+ "user_id": user_id,
+ },
+ )
+ )
+
+ assert response == {}
+
+ # Test API call errors
+ def test_pritunl_error(self, pritunl_settings, get_pritunl_error_mock):
+ api.pritunl_auth_request = get_pritunl_error_mock()
+
+ with pytest.raises(api.PritunlException):
+ response = api.list_pritunl_organizations(**pritunl_settings)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_cmd_runner.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_cmd_runner.py
new file mode 100644
index 000000000..7cec215a7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_cmd_runner.py
@@ -0,0 +1,374 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Alexei Znamensky <russoz@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from sys import version_info
+
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, PropertyMock
+from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, fmt
+
+
+TC_FORMATS = dict(
+ simple_boolean__true=(fmt.as_bool, ("--superflag",), True, ["--superflag"]),
+ simple_boolean__false=(fmt.as_bool, ("--superflag",), False, []),
+ simple_boolean__none=(fmt.as_bool, ("--superflag",), None, []),
+ simple_boolean_both__true=(fmt.as_bool, ("--superflag", "--falseflag"), True, ["--superflag"]),
+ simple_boolean_both__false=(fmt.as_bool, ("--superflag", "--falseflag"), False, ["--falseflag"]),
+ simple_boolean_both__none=(fmt.as_bool, ("--superflag", "--falseflag"), None, ["--falseflag"]),
+ simple_boolean_both__none_ig=(fmt.as_bool, ("--superflag", "--falseflag", True), None, []),
+ simple_boolean_not__true=(fmt.as_bool_not, ("--superflag",), True, []),
+ simple_boolean_not__false=(fmt.as_bool_not, ("--superflag",), False, ["--superflag"]),
+ simple_boolean_not__none=(fmt.as_bool_not, ("--superflag",), None, ["--superflag"]),
+ simple_optval__str=(fmt.as_optval, ("-t",), "potatoes", ["-tpotatoes"]),
+ simple_optval__int=(fmt.as_optval, ("-t",), 42, ["-t42"]),
+ simple_opt_val__str=(fmt.as_opt_val, ("-t",), "potatoes", ["-t", "potatoes"]),
+ simple_opt_val__int=(fmt.as_opt_val, ("-t",), 42, ["-t", "42"]),
+ simple_opt_eq_val__str=(fmt.as_opt_eq_val, ("--food",), "potatoes", ["--food=potatoes"]),
+ simple_opt_eq_val__int=(fmt.as_opt_eq_val, ("--answer",), 42, ["--answer=42"]),
+ simple_list_potato=(fmt.as_list, (), "literal_potato", ["literal_potato"]),
+ simple_list_42=(fmt.as_list, (), 42, ["42"]),
+ simple_map=(fmt.as_map, ({'a': 1, 'b': 2, 'c': 3},), 'b', ["2"]),
+ simple_default_type__list=(fmt.as_default_type, ("list",), [1, 2, 3, 5, 8], ["--1", "--2", "--3", "--5", "--8"]),
+ simple_default_type__bool_true=(fmt.as_default_type, ("bool", "what"), True, ["--what"]),
+ simple_default_type__bool_false=(fmt.as_default_type, ("bool", "what"), False, []),
+ simple_default_type__potato=(fmt.as_default_type, ("any-other-type", "potato"), "42", ["--potato", "42"]),
+ simple_fixed_true=(fmt.as_fixed, [("--always-here", "--forever")], True, ["--always-here", "--forever"]),
+ simple_fixed_false=(fmt.as_fixed, [("--always-here", "--forever")], False, ["--always-here", "--forever"]),
+ simple_fixed_none=(fmt.as_fixed, [("--always-here", "--forever")], None, ["--always-here", "--forever"]),
+ simple_fixed_str=(fmt.as_fixed, [("--always-here", "--forever")], "something", ["--always-here", "--forever"]),
+)
+if tuple(version_info) >= (3, 1):
+ from collections import OrderedDict
+
+ # needs OrderedDict to provide a consistent key order
+ TC_FORMATS["simple_default_type__dict"] = ( # type: ignore
+ fmt.as_default_type,
+ ("dict",),
+ OrderedDict((('a', 1), ('b', 2))),
+ ["--a=1", "--b=2"]
+ )
+TC_FORMATS_IDS = sorted(TC_FORMATS.keys())
+
+
+@pytest.mark.parametrize('func, fmt_opt, value, expected',
+ (TC_FORMATS[tc] for tc in TC_FORMATS_IDS),
+ ids=TC_FORMATS_IDS)
+def test_arg_format(func, fmt_opt, value, expected):
+ fmt_func = func(*fmt_opt)
+ actual = fmt_func(value, ctx_ignore_none=True)
+ print("formatted string = {0}".format(actual))
+ assert actual == expected, "actual = {0}".format(actual)
+
+
+TC_RUNNER = dict(
+ # SAMPLE: This shows all possible elements of a test case. It does not actually run.
+ #
+ # testcase_name=(
+ # # input
+ # dict(
+ # args_bundle = dict(
+ # param1=dict(
+ # type="int",
+ # value=11,
+ # fmt_func=fmt.as_opt_eq_val,
+ # fmt_arg="--answer",
+ # ),
+ # param2=dict(
+ # fmt_func=fmt.as_bool,
+ # fmt_arg="--bb-here",
+ # )
+ # ),
+ # runner_init_args = dict(
+ # command="testing",
+ # default_args_order=(),
+ # check_rc=False,
+ # force_lang="C",
+ # path_prefix=None,
+ # environ_update=None,
+ # ),
+ # runner_ctx_args = dict(
+ # args_order=['aa', 'bb'],
+ # output_process=None,
+ # ignore_value_none=True,
+ # ),
+ # ),
+ # # command execution
+ # dict(
+ # runner_ctx_run_args = dict(bb=True),
+ # rc = 0,
+ # out = "",
+ # err = "",
+ # ),
+ # # expected
+ # dict(
+ # results=(),
+ # run_info=dict(
+ # cmd=['/mock/bin/testing', '--answer=11', '--bb-here'],
+ # environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ # ),
+ # exc=None,
+ # ),
+ # ),
+ #
+ aa_bb=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(),
+ runner_ctx_args=dict(args_order=['aa', 'bb']),
+ ),
+ dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11', '--bb-here'],
+ environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ args_order=('aa', 'bb'),
+ ),
+ ),
+ ),
+ aa_bb_default_order=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(default_args_order=['bb', 'aa']),
+ runner_ctx_args=dict(),
+ ),
+ dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--bb-here', '--answer=11'],
+ environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ args_order=('bb', 'aa'),
+ ),
+ ),
+ ),
+ aa_bb_default_order_args_order=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(default_args_order=['bb', 'aa']),
+ runner_ctx_args=dict(args_order=['aa', 'bb']),
+ ),
+ dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11', '--bb-here'],
+ environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ args_order=('aa', 'bb'),
+ ),
+ ),
+ ),
+ aa_bb_dup_in_args_order=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(),
+ runner_ctx_args=dict(args_order=['aa', 'bb', 'aa']),
+ ),
+ dict(runner_ctx_run_args=dict(bb=True), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11', '--bb-here', '--answer=11'],
+ ),
+ ),
+ ),
+ aa_bb_process_output=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(default_args_order=['bb', 'aa']),
+ runner_ctx_args=dict(
+ args_order=['aa', 'bb'],
+ output_process=lambda rc, out, err: '-/-'.join([str(rc), out, err])
+ ),
+ ),
+ dict(runner_ctx_run_args=dict(bb=True), rc=0, out="ni", err="nu"),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11', '--bb-here'],
+ ),
+ results="0-/-ni-/-nu"
+ ),
+ ),
+ aa_bb_ignore_none_with_none=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=49, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(default_args_order=['bb', 'aa']),
+ runner_ctx_args=dict(
+ args_order=['aa', 'bb'],
+ ignore_value_none=True, # default
+ ),
+ ),
+ dict(runner_ctx_run_args=dict(bb=None), rc=0, out="ni", err="nu"),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=49'],
+ ),
+ ),
+ ),
+ aa_bb_ignore_not_none_with_none=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=49, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_bool, fmt_arg="--bb-here"),
+ ),
+ runner_init_args=dict(default_args_order=['bb', 'aa']),
+ runner_ctx_args=dict(
+ args_order=['aa', 'bb'],
+ ignore_value_none=False,
+ ),
+ ),
+ dict(runner_ctx_run_args=dict(aa=None, bb=True), rc=0, out="ni", err="nu"),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=None', '--bb-here'],
+ ),
+ ),
+ ),
+ aa_bb_fixed=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_fixed, fmt_arg=["fixed", "args"]),
+ ),
+ runner_init_args=dict(),
+ runner_ctx_args=dict(args_order=['aa', 'bb']),
+ ),
+ dict(runner_ctx_run_args=dict(), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11', 'fixed', 'args'],
+ environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ args_order=('aa', 'bb'),
+ ),
+ ),
+ ),
+ aa_bb_map=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_map, fmt_arg={"v1": 111, "v2": 222}),
+ ),
+ runner_init_args=dict(),
+ runner_ctx_args=dict(args_order=['aa', 'bb']),
+ ),
+ dict(runner_ctx_run_args=dict(bb="v2"), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11', '222'],
+ environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ args_order=('aa', 'bb'),
+ ),
+ ),
+ ),
+ aa_bb_map_default=(
+ dict(
+ args_bundle=dict(
+ aa=dict(type="int", value=11, fmt_func=fmt.as_opt_eq_val, fmt_arg="--answer"),
+ bb=dict(fmt_func=fmt.as_map, fmt_arg={"v1": 111, "v2": 222}),
+ ),
+ runner_init_args=dict(),
+ runner_ctx_args=dict(args_order=['aa', 'bb']),
+ ),
+ dict(runner_ctx_run_args=dict(bb="v123456789"), rc=0, out="", err=""),
+ dict(
+ run_info=dict(
+ cmd=['/mock/bin/testing', '--answer=11'],
+ environ_update={'LANGUAGE': 'C', 'LC_ALL': 'C'},
+ args_order=('aa', 'bb'),
+ ),
+ ),
+ ),
+)
+TC_RUNNER_IDS = sorted(TC_RUNNER.keys())
+
+
+@pytest.mark.parametrize('runner_input, cmd_execution, expected',
+ (TC_RUNNER[tc] for tc in TC_RUNNER_IDS),
+ ids=TC_RUNNER_IDS)
+def test_runner_context(runner_input, cmd_execution, expected):
+ arg_spec = {}
+ params = {}
+ arg_formats = {}
+ for k, v in runner_input['args_bundle'].items():
+ try:
+ arg_spec[k] = {'type': v['type']}
+ except KeyError:
+ pass
+ try:
+ params[k] = v['value']
+ except KeyError:
+ pass
+ try:
+ arg_formats[k] = v['fmt_func'](v['fmt_arg'])
+ except KeyError:
+ pass
+
+ orig_results = tuple(cmd_execution[x] for x in ('rc', 'out', 'err'))
+
+ print("arg_spec={0}\nparams={1}\narg_formats={2}\n".format(
+ arg_spec,
+ params,
+ arg_formats,
+ ))
+
+ module = MagicMock()
+ type(module).argument_spec = PropertyMock(return_value=arg_spec)
+ type(module).params = PropertyMock(return_value=params)
+ module.get_bin_path.return_value = '/mock/bin/testing'
+ module.run_command.return_value = orig_results
+
+ runner = CmdRunner(
+ module=module,
+ command="testing",
+ arg_formats=arg_formats,
+ **runner_input['runner_init_args']
+ )
+
+ def _assert_run_info(actual, expected):
+ reduced = dict((k, actual[k]) for k in expected.keys())
+ assert reduced == expected, "{0}".format(reduced)
+
+ def _assert_run(runner_input, cmd_execution, expected, ctx, results):
+ _assert_run_info(ctx.run_info, expected['run_info'])
+ assert results == expected.get('results', orig_results)
+
+ exc = expected.get("exc")
+ if exc:
+ with pytest.raises(exc):
+ with runner.context(**runner_input['runner_ctx_args']) as ctx:
+ results = ctx.run(**cmd_execution['runner_ctx_run_args'])
+ _assert_run(runner_input, cmd_execution, expected, ctx, results)
+
+ with pytest.raises(exc):
+ with runner(**runner_input['runner_ctx_args']) as ctx2:
+ results2 = ctx2.run(**cmd_execution['runner_ctx_run_args'])
+ _assert_run(runner_input, cmd_execution, expected, ctx2, results2)
+
+ else:
+ with runner.context(**runner_input['runner_ctx_args']) as ctx:
+ results = ctx.run(**cmd_execution['runner_ctx_run_args'])
+ _assert_run(runner_input, cmd_execution, expected, ctx, results)
+
+ with runner(**runner_input['runner_ctx_args']) as ctx2:
+ results2 = ctx2.run(**cmd_execution['runner_ctx_run_args'])
+ _assert_run(runner_input, cmd_execution, expected, ctx2, results2)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_csv.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_csv.py
new file mode 100644
index 000000000..8b83908e7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_csv.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils import csv
+
+
+VALID_CSV = [
+ (
+ 'excel',
+ {},
+ None,
+ "id,name,role\n1,foo,bar\n2,bar,baz",
+ [
+ {
+ "id": "1",
+ "name": "foo",
+ "role": "bar",
+ },
+ {
+ "id": "2",
+ "name": "bar",
+ "role": "baz",
+ },
+ ]
+ ),
+ (
+ 'excel',
+ {"skipinitialspace": True},
+ None,
+ "id,name,role\n1, foo, bar\n2, bar, baz",
+ [
+ {
+ "id": "1",
+ "name": "foo",
+ "role": "bar",
+ },
+ {
+ "id": "2",
+ "name": "bar",
+ "role": "baz",
+ },
+ ]
+ ),
+ (
+ 'excel',
+ {"delimiter": '|'},
+ None,
+ "id|name|role\n1|foo|bar\n2|bar|baz",
+ [
+ {
+ "id": "1",
+ "name": "foo",
+ "role": "bar",
+ },
+ {
+ "id": "2",
+ "name": "bar",
+ "role": "baz",
+ },
+ ]
+ ),
+ (
+ 'unix',
+ {},
+ None,
+ "id,name,role\n1,foo,bar\n2,bar,baz",
+ [
+ {
+ "id": "1",
+ "name": "foo",
+ "role": "bar",
+ },
+ {
+ "id": "2",
+ "name": "bar",
+ "role": "baz",
+ },
+ ]
+ ),
+ (
+ 'excel',
+ {},
+ ['id', 'name', 'role'],
+ "1,foo,bar\n2,bar,baz",
+ [
+ {
+ "id": "1",
+ "name": "foo",
+ "role": "bar",
+ },
+ {
+ "id": "2",
+ "name": "bar",
+ "role": "baz",
+ },
+ ]
+ ),
+]
+
+INVALID_CSV = [
+ (
+ 'excel',
+ {'strict': True},
+ None,
+ 'id,name,role\n1,"f"oo",bar\n2,bar,baz',
+ ),
+]
+
+INVALID_DIALECT = [
+ (
+ 'invalid',
+ {},
+ None,
+ "id,name,role\n1,foo,bar\n2,bar,baz",
+ ),
+]
+
+
+@pytest.mark.parametrize("dialect,dialect_params,fieldnames,data,expected", VALID_CSV)
+def test_valid_csv(data, dialect, dialect_params, fieldnames, expected):
+ dialect = csv.initialize_dialect(dialect, **dialect_params)
+ reader = csv.read_csv(data, dialect, fieldnames)
+ result = True
+
+ for idx, row in enumerate(reader):
+ for k, v in row.items():
+ if expected[idx][k] != v:
+ result = False
+ break
+
+ assert result
+
+
+@pytest.mark.parametrize("dialect,dialect_params,fieldnames,data", INVALID_CSV)
+def test_invalid_csv(data, dialect, dialect_params, fieldnames):
+ dialect = csv.initialize_dialect(dialect, **dialect_params)
+ reader = csv.read_csv(data, dialect, fieldnames)
+ result = False
+
+ try:
+ for row in reader:
+ continue
+ except csv.CSVError:
+ result = True
+
+ assert result
+
+
+@pytest.mark.parametrize("dialect,dialect_params,fieldnames,data", INVALID_DIALECT)
+def test_invalid_dialect(data, dialect, dialect_params, fieldnames):
+ result = False
+
+ try:
+ dialect = csv.initialize_dialect(dialect, **dialect_params)
+ except csv.DialectNotAvailableError:
+ result = True
+
+ assert result
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py
new file mode 100644
index 000000000..c76671202
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_database.py
@@ -0,0 +1,143 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.database import (
+ is_input_dangerous,
+ pg_quote_identifier,
+ SQLParseError,
+)
+
+# These are all valid strings
+# The results are based on interpreting the identifier as a table name
+VALID = {
+ # User quoted
+ '"public.table"': '"public.table"',
+ '"public"."table"': '"public"."table"',
+ '"schema test"."table test"': '"schema test"."table test"',
+
+ # We quote part
+ 'public.table': '"public"."table"',
+ '"public".table': '"public"."table"',
+ 'public."table"': '"public"."table"',
+ 'schema test.table test': '"schema test"."table test"',
+ '"schema test".table test': '"schema test"."table test"',
+ 'schema test."table test"': '"schema test"."table test"',
+
+ # Embedded double quotes
+ 'table "test"': '"table ""test"""',
+ 'public."table ""test"""': '"public"."table ""test"""',
+ 'public.table "test"': '"public"."table ""test"""',
+ 'schema "test".table': '"schema ""test"""."table"',
+ '"schema ""test""".table': '"schema ""test"""."table"',
+ '"""wat"""."""test"""': '"""wat"""."""test"""',
+ # Sigh, handle these as well:
+ '"no end quote': '"""no end quote"',
+ 'schema."table': '"schema"."""table"',
+ '"schema.table': '"""schema"."table"',
+ 'schema."table.something': '"schema"."""table"."something"',
+
+ # Embedded dots
+ '"schema.test"."table.test"': '"schema.test"."table.test"',
+ '"schema.".table': '"schema."."table"',
+ '"schema."."table"': '"schema."."table"',
+ 'schema.".table"': '"schema".".table"',
+ '"schema".".table"': '"schema".".table"',
+ '"schema.".".table"': '"schema.".".table"',
+ # These are valid but maybe not what the user intended
+ '."table"': '".""table"""',
+ 'table.': '"table."',
+}
+
+INVALID = {
+ ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
+ ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
+ ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
+ ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
+ ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
+ ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
+ ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
+ ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema."table"', 'table'): 'User escaped identifiers must escape extra quotes',
+ ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
+}
+
+HOW_MANY_DOTS = (
+ ('role', 'role', '"role"',
+ 'PostgreSQL does not support role with more than 1 dots'),
+ ('db', 'database', '"db"',
+ 'PostgreSQL does not support database with more than 1 dots'),
+ ('db.schema', 'schema', '"db"."schema"',
+ 'PostgreSQL does not support schema with more than 2 dots'),
+ ('db.schema.table', 'table', '"db"."schema"."table"',
+ 'PostgreSQL does not support table with more than 3 dots'),
+ ('db.schema.table.column', 'column', '"db"."schema"."table"."column"',
+ 'PostgreSQL does not support column with more than 4 dots'),
+)
+
+VALID_QUOTES = ((test, VALID[test]) for test in sorted(VALID))
+INVALID_QUOTES = ((test[0], test[1], INVALID[test]) for test in sorted(INVALID))
+
+IS_STRINGS_DANGEROUS = (
+ (u'', False),
+ (u' ', False),
+ (u'alternative database', False),
+ (u'backup of TRUNCATED table', False),
+ (u'bob.dropper', False),
+ (u'd\'artagnan', False),
+ (u'user_with_select_update_truncate_right', False),
+ (u';DROP DATABASE fluffy_pets_photos', True),
+ (u';drop DATABASE fluffy_pets_photos', True),
+ (u'; TRUNCATE TABLE his_valuable_table', True),
+ (u'; truncate TABLE his_valuable_table', True),
+ (u'\'--', True),
+ (u'"--', True),
+ (u'\' union select username, password from admin_credentials', True),
+ (u'\' UNION SELECT username, password from admin_credentials', True),
+ (u'\' intersect select', True),
+ (u'\' INTERSECT select', True),
+ (u'\' except select', True),
+ (u'\' EXCEPT select', True),
+ (u';ALTER TABLE prices', True),
+ (u';alter table prices', True),
+ (u"; UPDATE products SET price = '0'", True),
+ (u";update products SET price = '0'", True),
+ (u"; DELETE FROM products", True),
+ (u"; delete FROM products", True),
+ (u"; SELECT * FROM products", True),
+ (u" ; select * from products", True),
+)
+
+
+@pytest.mark.parametrize("identifier, quoted_identifier", VALID_QUOTES)
+def test_valid_quotes(identifier, quoted_identifier):
+ assert pg_quote_identifier(identifier, 'table') == quoted_identifier
+
+
+@pytest.mark.parametrize("identifier, id_type, msg", INVALID_QUOTES)
+def test_invalid_quotes(identifier, id_type, msg):
+ with pytest.raises(SQLParseError) as ex:
+ pg_quote_identifier(identifier, id_type)
+
+ ex.match(msg)
+
+
+@pytest.mark.parametrize("identifier, id_type, quoted_identifier, msg", HOW_MANY_DOTS)
+def test_how_many_dots(identifier, id_type, quoted_identifier, msg):
+ assert pg_quote_identifier(identifier, id_type) == quoted_identifier
+
+ with pytest.raises(SQLParseError) as ex:
+ pg_quote_identifier('%s.more' % identifier, id_type)
+
+ ex.match(msg)
+
+
+@pytest.mark.parametrize("string, result", IS_STRINGS_DANGEROUS)
+def test_is_input_dangerous(string, result):
+ assert is_input_dangerous(string) == result
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py
new file mode 100644
index 000000000..25e76b66f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_known_hosts.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Michael Scherer <mscherer@redhat.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils import known_hosts
+
+
+URLS = {
+ 'ssh://one.example.org/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'one.example.org',
+ 'add_host_key_cmd': " -t rsa one.example.org",
+ 'port': None,
+ },
+ 'ssh+git://two.example.org/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'two.example.org',
+ 'add_host_key_cmd': " -t rsa two.example.org",
+ 'port': None,
+ },
+ 'rsync://three.example.org/user/example.git': {
+ 'is_ssh_url': False,
+ 'get_fqdn': 'three.example.org',
+ 'add_host_key_cmd': None, # not called for non-ssh urls
+ 'port': None,
+ },
+ 'git@four.example.org:user/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'four.example.org',
+ 'add_host_key_cmd': " -t rsa four.example.org",
+ 'port': None,
+ },
+ 'git+ssh://five.example.org/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'five.example.org',
+ 'add_host_key_cmd': " -t rsa five.example.org",
+ 'port': None,
+ },
+ 'ssh://six.example.org:21/example.org': {
+ # ssh on FTP Port?
+ 'is_ssh_url': True,
+ 'get_fqdn': 'six.example.org',
+ 'add_host_key_cmd': " -t rsa -p 21 six.example.org",
+ 'port': '21',
+ },
+ 'ssh://[2001:DB8::abcd:abcd]/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
+ 'port': None,
+ },
+ 'ssh://[2001:DB8::abcd:abcd]:22/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa -p 22 [2001:DB8::abcd:abcd]",
+ 'port': '22',
+ },
+ 'username@[2001:DB8::abcd:abcd]/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
+ 'port': None,
+ },
+ 'username@[2001:DB8::abcd:abcd]:path/example.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': '[2001:DB8::abcd:abcd]',
+ 'add_host_key_cmd': " -t rsa [2001:DB8::abcd:abcd]",
+ 'port': None,
+ },
+ 'ssh://internal.git.server:7999/repos/repo.git': {
+ 'is_ssh_url': True,
+ 'get_fqdn': 'internal.git.server',
+ 'add_host_key_cmd': " -t rsa -p 7999 internal.git.server",
+ 'port': '7999',
+ },
+}
+
+
+@pytest.mark.parametrize('url, is_ssh_url', ((k, URLS[k]['is_ssh_url']) for k in sorted(URLS)))
+def test_is_ssh_url(url, is_ssh_url):
+ assert known_hosts.is_ssh_url(url) == is_ssh_url
+
+
+@pytest.mark.parametrize('url, fqdn, port', ((k, URLS[k]['get_fqdn'], URLS[k]['port']) for k in sorted(URLS)))
+def test_get_fqdn_and_port(url, fqdn, port):
+ assert known_hosts.get_fqdn_and_port(url) == (fqdn, port)
+
+
+@pytest.mark.parametrize('fqdn, port, add_host_key_cmd, stdin',
+ ((URLS[k]['get_fqdn'], URLS[k]['port'], URLS[k]['add_host_key_cmd'], {})
+ for k in sorted(URLS) if URLS[k]['is_ssh_url']),
+ indirect=['stdin'])
+def test_add_host_key(am, mocker, fqdn, port, add_host_key_cmd):
+ get_bin_path = mocker.MagicMock()
+ get_bin_path.return_value = keyscan_cmd = "/custom/path/ssh-keyscan"
+ am.get_bin_path = get_bin_path
+
+ run_command = mocker.MagicMock()
+ run_command.return_value = (0, "Needs output, otherwise thinks ssh-keyscan timed out'", "")
+ am.run_command = run_command
+
+ append_to_file = mocker.MagicMock()
+ append_to_file.return_value = (None,)
+ am.append_to_file = append_to_file
+
+ mocker.patch('os.path.isdir', return_value=True)
+ mocker.patch('os.path.exists', return_value=True)
+
+ known_hosts.add_host_key(am, fqdn, port=port)
+ run_command.assert_called_with(keyscan_cmd + add_host_key_cmd)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py
new file mode 100644
index 000000000..3d8a4b654
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_module_helper.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 -*-
+# (c) 2020, Alexei Znamensky <russoz@gmail.com>
+# Copyright (c) 2020 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.module_helper import (
+ ArgFormat, DependencyCtxMgr, VarMeta, VarDict, cause_changes
+)
+
+
+def single_lambda_2star(x, y, z):
+ return ["piggies=[{0},{1},{2}]".format(x, y, z)]
+
+
+ARG_FORMATS = dict(
+ simple_boolean_true=("--superflag", ArgFormat.BOOLEAN, 0,
+ True, ["--superflag"]),
+ simple_boolean_false=("--superflag", ArgFormat.BOOLEAN, 0,
+ False, []),
+ simple_boolean_none=("--superflag", ArgFormat.BOOLEAN, 0,
+ None, []),
+ simple_boolean_not_true=("--superflag", ArgFormat.BOOLEAN_NOT, 0,
+ True, []),
+ simple_boolean_not_false=("--superflag", ArgFormat.BOOLEAN_NOT, 0,
+ False, ["--superflag"]),
+ simple_boolean_not_none=("--superflag", ArgFormat.BOOLEAN_NOT, 0,
+ None, ["--superflag"]),
+ single_printf=("--param=%s", ArgFormat.PRINTF, 0,
+ "potatoes", ["--param=potatoes"]),
+ single_printf_no_substitution=("--param", ArgFormat.PRINTF, 0,
+ "potatoes", ["--param"]),
+ single_printf_none=("--param=%s", ArgFormat.PRINTF, 0,
+ None, []),
+ multiple_printf=(["--param", "free-%s"], ArgFormat.PRINTF, 0,
+ "potatoes", ["--param", "free-potatoes"]),
+ single_format=("--param={0}", ArgFormat.FORMAT, 0,
+ "potatoes", ["--param=potatoes"]),
+ single_format_none=("--param={0}", ArgFormat.FORMAT, 0,
+ None, []),
+ single_format_no_substitution=("--param", ArgFormat.FORMAT, 0,
+ "potatoes", ["--param"]),
+ multiple_format=(["--param", "free-{0}"], ArgFormat.FORMAT, 0,
+ "potatoes", ["--param", "free-potatoes"]),
+ multiple_format_none=(["--param", "free-{0}"], ArgFormat.FORMAT, 0,
+ None, []),
+ single_lambda_0star=((lambda v: ["piggies=[{0},{1},{2}]".format(v[0], v[1], v[2])]), None, 0,
+ ['a', 'b', 'c'], ["piggies=[a,b,c]"]),
+ single_lambda_0star_none=((lambda v: ["piggies=[{0},{1},{2}]".format(v[0], v[1], v[2])]), None, 0,
+ None, []),
+ single_lambda_1star=((lambda a, b, c: ["piggies=[{0},{1},{2}]".format(a, b, c)]), None, 1,
+ ['a', 'b', 'c'], ["piggies=[a,b,c]"]),
+ single_lambda_1star_none=((lambda a, b, c: ["piggies=[{0},{1},{2}]".format(a, b, c)]), None, 1,
+ None, []),
+ single_lambda_2star=(single_lambda_2star, None, 2,
+ dict(z='c', x='a', y='b'), ["piggies=[a,b,c]"]),
+ single_lambda_2star_none=(single_lambda_2star, None, 2,
+ None, []),
+)
+ARG_FORMATS_IDS = sorted(ARG_FORMATS.keys())
+
+
+@pytest.mark.parametrize('fmt, style, stars, value, expected',
+ (ARG_FORMATS[tc] for tc in ARG_FORMATS_IDS),
+ ids=ARG_FORMATS_IDS)
+def test_arg_format(fmt, style, stars, value, expected):
+ af = ArgFormat('name', fmt, style, stars)
+ actual = af.to_text(value)
+ print("formatted string = {0}".format(actual))
+ assert actual == expected, "actual = {0}".format(actual)
+
+
+ARG_FORMATS_FAIL = dict(
+ int_fmt=(3, None, 0, "", [""]),
+ bool_fmt=(True, None, 0, "", [""]),
+)
+ARG_FORMATS_FAIL_IDS = sorted(ARG_FORMATS_FAIL.keys())
+
+
+@pytest.mark.parametrize('fmt, style, stars, value, expected',
+ (ARG_FORMATS_FAIL[tc] for tc in ARG_FORMATS_FAIL_IDS),
+ ids=ARG_FORMATS_FAIL_IDS)
+def test_arg_format_fail(fmt, style, stars, value, expected):
+ with pytest.raises(TypeError):
+ af = ArgFormat('name', fmt, style, stars)
+ actual = af.to_text(value)
+ print("formatted string = {0}".format(actual))
+
+
+def test_dependency_ctxmgr():
+ ctx = DependencyCtxMgr("POTATOES", "Potatoes must be installed")
+ with ctx:
+ import potatoes_that_will_never_be_there # noqa: F401, pylint: disable=unused-import
+ print("POTATOES: ctx.text={0}".format(ctx.text))
+ assert ctx.text == "Potatoes must be installed"
+ assert not ctx.has_it
+
+ ctx = DependencyCtxMgr("POTATOES2")
+ with ctx:
+ import potatoes_that_will_never_be_there_again # noqa: F401, pylint: disable=unused-import
+ assert not ctx.has_it
+ print("POTATOES2: ctx.text={0}".format(ctx.text))
+ assert ctx.text.startswith("No module named")
+ assert "potatoes_that_will_never_be_there_again" in ctx.text
+
+ ctx = DependencyCtxMgr("TYPING")
+ with ctx:
+ import sys # noqa: F401, pylint: disable=unused-import
+ assert ctx.has_it
+
+
+def test_variable_meta():
+ meta = VarMeta()
+ assert meta.output is True
+ assert meta.diff is False
+ assert meta.value is None
+ meta.set_value("abc")
+ assert meta.initial_value == "abc"
+ assert meta.value == "abc"
+ assert meta.diff_result is None
+ meta.set_value("def")
+ assert meta.initial_value == "abc"
+ assert meta.value == "def"
+ assert meta.diff_result is None
+
+
+def test_variable_meta_diff():
+ meta = VarMeta(diff=True)
+ assert meta.output is True
+ assert meta.diff is True
+ assert meta.value is None
+ meta.set_value("abc")
+ assert meta.initial_value == "abc"
+ assert meta.value == "abc"
+ assert meta.diff_result is None
+ meta.set_value("def")
+ assert meta.initial_value == "abc"
+ assert meta.value == "def"
+ assert meta.diff_result == {"before": "abc", "after": "def"}
+ meta.set_value("ghi")
+ assert meta.initial_value == "abc"
+ assert meta.value == "ghi"
+ assert meta.diff_result == {"before": "abc", "after": "ghi"}
+
+
+def test_vardict():
+ vd = VarDict()
+ vd.set('a', 123)
+ assert vd['a'] == 123
+ assert vd.a == 123
+ assert 'a' in vd._meta
+ assert vd.meta('a').output is True
+ assert vd.meta('a').diff is False
+ assert vd.meta('a').change is False
+ vd['b'] = 456
+ assert vd.meta('b').output is True
+ assert vd.meta('b').diff is False
+ assert vd.meta('b').change is False
+ vd.set_meta('a', diff=True, change=True)
+ vd.set_meta('b', diff=True, output=False)
+ vd['c'] = 789
+ assert vd.has_changed('c') is False
+ vd['a'] = 'new_a'
+ assert vd.has_changed('a') is True
+ vd['c'] = 'new_c'
+ assert vd.has_changed('c') is False
+ vd['b'] = 'new_b'
+ assert vd.has_changed('b') is False
+ assert vd.a == 'new_a'
+ assert vd.c == 'new_c'
+ assert vd.output() == {'a': 'new_a', 'c': 'new_c'}
+ assert vd.diff() == {'before': {'a': 123}, 'after': {'a': 'new_a'}}, "diff={0}".format(vd.diff())
+
+
+def test_variable_meta_change():
+ vd = VarDict()
+ vd.set('a', 123, change=True)
+ vd.set('b', [4, 5, 6], change=True)
+ vd.set('c', {'m': 7, 'n': 8, 'o': 9}, change=True)
+ vd.set('d', {'a1': {'a11': 33, 'a12': 34}}, change=True)
+
+ vd.a = 1234
+ assert vd.has_changed('a') is True
+ vd.b.append(7)
+ assert vd.b == [4, 5, 6, 7]
+ assert vd.has_changed('b')
+ vd.c.update({'p': 10})
+ assert vd.c == {'m': 7, 'n': 8, 'o': 9, 'p': 10}
+ assert vd.has_changed('c')
+ vd.d['a1'].update({'a13': 35})
+ assert vd.d == {'a1': {'a11': 33, 'a12': 34, 'a13': 35}}
+ assert vd.has_changed('d')
+
+
+class MockMH(object):
+ changed = None
+
+ def _div(self, x, y):
+ return x / y
+
+ func_none = cause_changes()(_div)
+ func_onsucc = cause_changes(on_success=True)(_div)
+ func_onfail = cause_changes(on_failure=True)(_div)
+ func_onboth = cause_changes(on_success=True, on_failure=True)(_div)
+
+
+CAUSE_CHG_DECO_PARAMS = ['method', 'expect_exception', 'expect_changed']
+CAUSE_CHG_DECO = dict(
+ none_succ=dict(method='func_none', expect_exception=False, expect_changed=None),
+ none_fail=dict(method='func_none', expect_exception=True, expect_changed=None),
+ onsucc_succ=dict(method='func_onsucc', expect_exception=False, expect_changed=True),
+ onsucc_fail=dict(method='func_onsucc', expect_exception=True, expect_changed=None),
+ onfail_succ=dict(method='func_onfail', expect_exception=False, expect_changed=None),
+ onfail_fail=dict(method='func_onfail', expect_exception=True, expect_changed=True),
+ onboth_succ=dict(method='func_onboth', expect_exception=False, expect_changed=True),
+ onboth_fail=dict(method='func_onboth', expect_exception=True, expect_changed=True),
+)
+CAUSE_CHG_DECO_IDS = sorted(CAUSE_CHG_DECO.keys())
+
+
+@pytest.mark.parametrize(CAUSE_CHG_DECO_PARAMS,
+ [[CAUSE_CHG_DECO[tc][param]
+ for param in CAUSE_CHG_DECO_PARAMS]
+ for tc in CAUSE_CHG_DECO_IDS],
+ ids=CAUSE_CHG_DECO_IDS)
+def test_cause_changes_deco(method, expect_exception, expect_changed):
+ mh = MockMH()
+ if expect_exception:
+ with pytest.raises(Exception):
+ getattr(mh, method)(1, 0)
+ else:
+ getattr(mh, method)(9, 3)
+
+ assert mh.changed == expect_changed
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_ocapi_utils.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_ocapi_utils.py
new file mode 100644
index 000000000..3c939b558
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_ocapi_utils.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shutil
+import tempfile
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.module_utils.ocapi_utils import OcapiUtils
+
+
+class TestOcapiUtils(unittest.TestCase):
+ def setUp(self):
+ self.tempdir = tempfile.mkdtemp()
+ self.utils = OcapiUtils(creds={"user": "a_user", "pswd": "a_password"},
+ base_uri="fakeUri",
+ proxy_slot_number=None,
+ timeout=30,
+ module=None)
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+ def test_prepare_multipart_firmware_upload(self):
+ # Generate a binary file and save it
+ filename = "fake_firmware.bin"
+ filepath = os.path.join(self.tempdir, filename)
+ file_contents = b'\x00\x01\x02\x03\x04'
+ with open(filepath, 'wb+') as f:
+ f.write(file_contents)
+
+ # Call prepare_mutipart_firmware_upload
+ content_type, b_form_data = self.utils.prepare_multipart_firmware_upload(filepath)
+
+ # Check the returned content-type
+ content_type_pattern = r"multipart/form-data; boundary=(.*)"
+ m = re.match(content_type_pattern, content_type)
+ self.assertIsNotNone(m)
+
+ # Check the returned binary data
+ boundary = m.group(1)
+ expected_content_text = '--%s\r\n' % boundary
+ expected_content_text += 'Content-Disposition: form-data; name="FirmwareFile"; filename="%s"\r\n' % filename
+ expected_content_text += 'Content-Type: application/octet-stream\r\n\r\n'
+ expected_content_bytes = bytearray(expected_content_text, 'utf-8')
+ expected_content_bytes += file_contents
+ expected_content_bytes += bytearray('\r\n--%s--' % boundary, 'utf-8')
+ self.assertEqual(expected_content_bytes, b_form_data)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_onepassword.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_onepassword.py
new file mode 100644
index 000000000..dbe391835
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_onepassword.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig
+
+
+@pytest.fixture
+def os_expanduser(mocker):
+ def _os_expanduser(path):
+ return path.replace("~", "/home/testuser")
+
+ mocker.patch("os.path.expanduser", side_effect=_os_expanduser)
+
+
+@pytest.fixture
+def exists(mocker):
+ def _exists(path):
+ if "op/" in path:
+ return True
+
+ return os.path.exists(path)
+
+
+def test_op_config(mocker, os_expanduser):
+ mocker.patch("os.path.exists", side_effect=[False, True])
+ op_config = OnePasswordConfig()
+
+ assert "/home/testuser/.config/op/config" == op_config.config_file_path
+
+
+def test_op_no_config(mocker, os_expanduser):
+ mocker.patch("os.path.exists", return_value=False)
+ op_config = OnePasswordConfig()
+
+ assert op_config.config_file_path is None
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_opennebula.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_opennebula.py
new file mode 100644
index 000000000..dd6516984
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_opennebula.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, Michal Opala <mopala@opennebula.io>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import textwrap
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.opennebula import flatten, render
+
+
+FLATTEN_VALID = [
+ (
+ [[[1]], [2], 3],
+ False,
+ [1, 2, 3]
+ ),
+ (
+ [[[1]], [2], 3],
+ True,
+ [1, 2, 3]
+ ),
+ (
+ [[1]],
+ False,
+ [1]
+ ),
+ (
+ [[1]],
+ True,
+ 1
+ ),
+ (
+ 1,
+ False,
+ [1]
+ ),
+ (
+ 1,
+ True,
+ 1
+ ),
+]
+
+RENDER_VALID = [
+ (
+ {
+ "NIC": {"NAME": "NIC0", "NETWORK_ID": 0},
+ "CPU": 1,
+ "MEMORY": 1024,
+ },
+ textwrap.dedent('''
+ CPU="1"
+ MEMORY="1024"
+ NIC=[NAME="NIC0",NETWORK_ID="0"]
+ ''').strip()
+ ),
+ (
+ {
+ "NIC": [
+ {"NAME": "NIC0", "NETWORK_ID": 0},
+ {"NAME": "NIC1", "NETWORK_ID": 1},
+ ],
+ "CPU": 1,
+ "MEMORY": 1024,
+ },
+ textwrap.dedent('''
+ CPU="1"
+ MEMORY="1024"
+ NIC=[NAME="NIC0",NETWORK_ID="0"]
+ NIC=[NAME="NIC1",NETWORK_ID="1"]
+ ''').strip()
+ ),
+ (
+ {
+ 'EMPTY_VALUE': None,
+ 'SCHED_REQUIREMENTS': 'CLUSTER_ID="100"',
+ 'BACKSLASH_ESCAPED': "this is escaped: \\n; this isn't: \"\nend",
+ },
+ textwrap.dedent('''
+ BACKSLASH_ESCAPED="this is escaped: \\\\n; this isn't: \\"
+ end"
+ SCHED_REQUIREMENTS="CLUSTER_ID=\\"100\\""
+ ''').strip()
+ ),
+]
+
+
+@pytest.mark.parametrize('to_flatten,extract,expected_result', FLATTEN_VALID)
+def test_flatten(to_flatten, extract, expected_result):
+ result = flatten(to_flatten, extract)
+ assert result == expected_result, repr(result)
+
+
+@pytest.mark.parametrize('to_render,expected_result', RENDER_VALID)
+def test_render(to_render, expected_result):
+ result = render(to_render)
+ assert result == expected_result, repr(result)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py
new file mode 100644
index 000000000..d7a302248
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_saslprep.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Andrey Tuzhilin <andrei.tuzhilin@gmail.com>
+# Copyright (c) 2020, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.saslprep import saslprep
+
+
+VALID = [
+ (u'', u''),
+ (u'\u00A0', u' '),
+ (u'a', u'a'),
+ (u'й', u'й'),
+ (u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9', u'\u30DE\u30C8\u30EA\u30C3\u30AF\u30B9'),
+ (u'The\u00ADM\u00AAtr\u2168', u'TheMatrIX'),
+ (u'I\u00ADX', u'IX'),
+ (u'user', u'user'),
+ (u'USER', u'USER'),
+ (u'\u00AA', u'a'),
+ (u'\u2168', u'IX'),
+ (u'\u05BE\u00A0\u05BE', u'\u05BE\u0020\u05BE'),
+]
+
+INVALID = [
+ (None, TypeError),
+ (b'', TypeError),
+ (u'\u0221', ValueError),
+ (u'\u0007', ValueError),
+ (u'\u0627\u0031', ValueError),
+ (u'\uE0001', ValueError),
+ (u'\uE0020', ValueError),
+ (u'\uFFF9', ValueError),
+ (u'\uFDD0', ValueError),
+ (u'\u0000', ValueError),
+ (u'\u06DD', ValueError),
+ (u'\uFFFFD', ValueError),
+ (u'\uD800', ValueError),
+ (u'\u200E', ValueError),
+ (u'\u05BE\u00AA\u05BE', ValueError),
+]
+
+
+@pytest.mark.parametrize('source,target', VALID)
+def test_saslprep_conversions(source, target):
+ assert saslprep(source) == target
+
+
+@pytest.mark.parametrize('source,exception', INVALID)
+def test_saslprep_exceptions(source, exception):
+ with pytest.raises(exception) as ex:
+ saslprep(source)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py
new file mode 100644
index 000000000..1cab58d63
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/test_utm_utils.py
@@ -0,0 +1,48 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c) 2018, Johannes Brunswicker <johannes.brunswicker@gmail.com>
+#
+# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)
+# SPDX-License-Identifier: BSD-2-Clause
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM
+
+
+class FakeModule:
+ def __init__(self, params):
+ self.params = params
+
+
+def test_combine_headers_returns_only_default():
+ expected = {"Accept": "application/json", "Content-type": "application/json"}
+ module = FakeModule(
+ params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, 'utm_token': 'utm_token',
+ 'name': 'FakeName', 'headers': {}})
+ result = UTM(module, "endpoint", [])._combine_headers()
+ assert result == expected
+
+
+def test_combine_headers_returns_only_default2():
+ expected = {"Accept": "application/json", "Content-type": "application/json"}
+ module = FakeModule(
+ params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234, 'utm_token': 'utm_token',
+ 'name': 'FakeName'})
+ result = UTM(module, "endpoint", [])._combine_headers()
+ assert result == expected
+
+
+def test_combine_headers_returns_combined():
+ expected = {"Accept": "application/json", "Content-type": "application/json",
+ "extraHeader": "extraHeaderValue"}
+ module = FakeModule(params={'utm_protocol': 'utm_protocol', 'utm_host': 'utm_host', 'utm_port': 1234,
+ 'utm_token': 'utm_token', 'name': 'FakeName',
+ "headers": {"extraHeader": "extraHeaderValue"}})
+ result = UTM(module, "endpoint", [])._combine_headers()
+ assert result == expected
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py
new file mode 100644
index 000000000..bdcc21793
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeAnsibleModule.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class AnsibleModuleException(Exception):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class ExitJsonException(AnsibleModuleException):
+ pass
+
+
+class FailJsonException(AnsibleModuleException):
+ pass
+
+
+class FakeAnsibleModule:
+ def __init__(self, params=None, check_mode=False):
+ self.params = params
+ self.check_mode = check_mode
+
+ def exit_json(self, *args, **kwargs):
+ raise ExitJsonException(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ raise FailJsonException(*args, **kwargs)
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py
new file mode 100644
index 000000000..bc9d69c77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/FakeXenAPI.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+FAKE_API_VERSION = "1.1"
+
+
+class Failure(Exception):
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+class Session(object):
+ def __init__(self, uri, transport=None, encoding=None, verbose=0,
+ allow_none=1, ignore_ssl=False):
+
+ self.transport = transport
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def _get_api_version(self):
+ return FAKE_API_VERSION
+
+ def _login(self, method, params):
+ self._session = "OpaqueRef:fake-xenapi-session-ref"
+ self.last_login_method = method
+ self.last_login_params = params
+ self.API_version = self._get_api_version()
+
+ def _logout(self):
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def xenapi_request(self, methodname, params):
+ if methodname.startswith('login'):
+ self._login(methodname, params)
+ return None
+ elif methodname == 'logout' or methodname == 'session.logout':
+ self._logout()
+ return None
+ else:
+ # Should be patched with mocker.patch().
+ return None
+
+ def __getattr__(self, name):
+ if name == 'handle':
+ return self._session
+ elif name == 'xenapi':
+ # Should be patched with mocker.patch().
+ return None
+ elif name.startswith('login') or name.startswith('slave_local'):
+ return lambda *params: self._login(name, params)
+ elif name == 'logout':
+ return self._logout
+
+
+def xapi_local():
+ return Session("http://_var_lib_xcp_xapi/")
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py
new file mode 100644
index 000000000..0aee3197e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/common.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def fake_xenapi_ref(xenapi_class):
+ return "OpaqueRef:fake-xenapi-%s-ref" % xenapi_class
+
+
+testcase_bad_xenapi_refs = {
+ "params": [
+ None,
+ '',
+ 'OpaqueRef:NULL',
+ ],
+ "ids": [
+ 'none',
+ 'empty',
+ 'ref-null',
+ ],
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py
new file mode 100644
index 000000000..3fcea5561
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/conftest.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import sys
+import importlib
+import os
+import json
+import pytest
+
+from .FakeAnsibleModule import FakeAnsibleModule
+from ansible.module_utils import six
+from mock import MagicMock
+
+
+@pytest.fixture
+def fake_ansible_module(request):
+ """Returns fake AnsibleModule with fake module params."""
+ if hasattr(request, 'param'):
+ return FakeAnsibleModule(request.param)
+ else:
+ params = {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ }
+
+ return FakeAnsibleModule(params)
+
+
+@pytest.fixture(autouse=True)
+def XenAPI():
+ """Imports and returns fake XenAPI module."""
+
+ # Import of fake XenAPI module is wrapped by fixture so that it does not
+ # affect other unit tests which could potentially also use XenAPI module.
+
+ # First we use importlib.import_module() to import the module and assign
+ # it to a local symbol.
+ fake_xenapi = importlib.import_module('ansible_collections.community.general.tests.unit.plugins.module_utils.xenserver.FakeXenAPI')
+
+ # Now we populate Python module cache with imported fake module using the
+ # original module name (XenAPI). That way, any 'import XenAPI' statement
+ # will just load already imported fake module from the cache.
+ sys.modules['XenAPI'] = fake_xenapi
+
+ return fake_xenapi
+
+
+@pytest.fixture(autouse=True)
+def xenserver(XenAPI):
+ """Imports and returns xenserver module util."""
+
+ # Since we are wrapping fake XenAPI module inside a fixture, all modules
+ # that depend on it have to be imported inside a test function. To make
+ # this easier to handle and remove some code repetition, we wrap the import
+ # of xenserver module util with a fixture.
+ from ansible_collections.community.general.plugins.module_utils import xenserver
+
+ return xenserver
+
+
+@pytest.fixture
+def mock_xenapi_failure(XenAPI, mocker):
+ """
+ Returns mock object that raises XenAPI.Failure on any XenAPI
+ method call.
+ """
+ fake_error_msg = "Fake XAPI method call error!"
+
+ # We need to use our MagicMock based class that passes side_effect to its
+ # children because calls to xenapi methods can generate an arbitrary
+ # hierarchy of mock objects. Any such object when called should use the
+ # same side_effect as its parent mock object.
+ class MagicMockSideEffect(MagicMock):
+ def _get_child_mock(self, **kw):
+ child_mock = super(MagicMockSideEffect, self)._get_child_mock(**kw)
+ child_mock.side_effect = self.side_effect
+ return child_mock
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', new=MagicMockSideEffect(), create=True)
+ mocked_xenapi.side_effect = XenAPI.Failure(fake_error_msg)
+
+ return mocked_xenapi, fake_error_msg
+
+
+@pytest.fixture
+def fixture_data_from_file(request):
+ """Loads fixture data from files."""
+ if not hasattr(request, 'param'):
+ return {}
+
+ fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
+ fixture_data = {}
+
+ if isinstance(request.param, six.string_types):
+ request.param = [request.param]
+
+ for fixture_name in request.param:
+ path = os.path.join(fixture_path, fixture_name)
+
+ with open(path) as f:
+ data = f.read()
+
+ try:
+ data = json.loads(data)
+ except Exception:
+ pass
+
+ fixture_data[fixture_name] = data
+
+ return fixture_data
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json
new file mode 100644
index 000000000..add2dcf4b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json
@@ -0,0 +1,73 @@
+{
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "native",
+ "disks": [
+ {
+ "name": "ansible-test-vm-1-C",
+ "name_desc": "C:\\",
+ "os_device": "xvda",
+ "size": 42949672960,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "0"
+ }
+ ],
+ "domid": "143",
+ "folder": "/Ansible/Test",
+ "hardware": {
+ "memory_mb": 2048,
+ "num_cpu_cores_per_socket": 2,
+ "num_cpus": 2
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "ansible-test-vm-1",
+ "name_desc": "Created by Ansible",
+ "networks": [
+ {
+ "gateway": "10.0.0.1",
+ "gateway6": "",
+ "ip": "10.0.0.2",
+ "ip6": [
+ "fe80:0000:0000:0000:11e1:12c9:ef3b:75a0"
+ ],
+ "mac": "7a:a6:48:1e:31:46",
+ "mtu": "1500",
+ "name": "Host internal management network",
+ "netmask": "255.255.255.0",
+ "prefix": "24",
+ "prefix6": "",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "366fe8e0-878b-4320-8731-90d1ed3c0b93"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-28800",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "state": "poweredon",
+ "uuid": "81c373d7-a407-322f-911b-31386eb5215d",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json.license b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-facts.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json
new file mode 100644
index 000000000..709769668
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json
@@ -0,0 +1,707 @@
+{
+ "SR": {
+ "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f": {
+ "PBDs": [],
+ "VDIs": [],
+ "allowed_operations": [
+ "unplug",
+ "plug",
+ "pbd_create",
+ "update",
+ "pbd_destroy",
+ "vdi_resize",
+ "vdi_clone",
+ "scan",
+ "vdi_snapshot",
+ "vdi_mirror",
+ "vdi_create",
+ "vdi_destroy"
+ ],
+ "blobs": {},
+ "clustered": false,
+ "content_type": "",
+ "current_operations": {},
+ "introduced_by": "OpaqueRef:NULL",
+ "is_tools_sr": false,
+ "local_cache_enabled": false,
+ "name_description": "",
+ "name_label": "Ansible Test Storage 1",
+ "other_config": {
+ "auto-scan": "false"
+ },
+ "physical_size": "2521133219840",
+ "physical_utilisation": "1551485632512",
+ "shared": true,
+ "sm_config": {
+ "allocation": "thick",
+ "devserial": "scsi-3600a098038302d353624495242443848",
+ "multipathable": "true",
+ "use_vhd": "true"
+ },
+ "tags": [],
+ "type": "lvmohba",
+ "uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "virtual_allocation": "1556925644800"
+ }
+ },
+ "VBD": {
+ "OpaqueRef:1c0a7c6d-09e5-9b2c-bbe3-9a73aadcff9f": {
+ "VDI": "OpaqueRef:NULL",
+ "VM": "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "insert",
+ "pause"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvdd",
+ "empty": true,
+ "metrics": "OpaqueRef:1a36eae4-87c8-0945-cee9-c85a71fd843f",
+ "mode": "RO",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "CD",
+ "unpluggable": true,
+ "userdevice": "3",
+ "uuid": "e6aacd53-a2c8-649f-b405-93fcb811411a"
+ },
+ "OpaqueRef:ea4a4088-19c3-6db6-ebdf-c3c0ee4405a3": {
+ "VDI": "OpaqueRef:fd20510d-e9ca-b966-3b98-4ae547dacf9a",
+ "VM": "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "unplug",
+ "unplug_force",
+ "pause"
+ ],
+ "bootable": true,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvda",
+ "empty": false,
+ "metrics": "OpaqueRef:ddbd70d4-7dde-b51e-6208-eb434b300009",
+ "mode": "RW",
+ "other_config": {
+ "owner": "true"
+ },
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": true,
+ "userdevice": "0",
+ "uuid": "ffd6de9c-c416-1d52-3e9d-3bcbf567245e"
+ }
+ },
+ "VDI": {
+ "OpaqueRef:fd20510d-e9ca-b966-3b98-4ae547dacf9a": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:ea4a4088-19c3-6db6-ebdf-c3c0ee4405a3"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "clone",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "b807f67b-3f37-4a6e-ad6c-033f812ab093",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "C:\\",
+ "name_label": "ansible-test-vm-1-C",
+ "on_boot": "persist",
+ "other_config": {},
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "43041947648",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "host_OpaqueRef:07a8da76-f1cf-f3b5-a531-6b751384f770": "RW",
+ "read-caching-enabled-on-92ac8132-276b-4d0f-9d3a-54db51e4a438": "false",
+ "read-caching-reason-92ac8132-276b-4d0f-9d3a-54db51e4a438": "LICENSE_RESTRICTION",
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "system",
+ "uuid": "b807f67b-3f37-4a6e-ad6c-033f812ab093",
+ "virtual_size": "42949672960",
+ "xenstore_data": {}
+ }
+ },
+ "VIF": {
+ "OpaqueRef:38da2120-6086-5043-8383-ab0a53ede42a": {
+ "MAC": "7a:a6:48:1e:31:46",
+ "MAC_autogenerated": false,
+ "MTU": "1500",
+ "VM": "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c",
+ "allowed_operations": [
+ "attach",
+ "unplug"
+ ],
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "0",
+ "ipv4_addresses": [
+ "10.0.0.2/24"
+ ],
+ "ipv4_allowed": [],
+ "ipv4_configuration_mode": "Static",
+ "ipv4_gateway": "10.0.0.1",
+ "ipv6_addresses": [
+ ""
+ ],
+ "ipv6_allowed": [],
+ "ipv6_configuration_mode": "None",
+ "ipv6_gateway": "",
+ "locking_mode": "network_default",
+ "metrics": "OpaqueRef:15502939-df0f-0095-1ce3-e51367199d27",
+ "network": "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "uuid": "bd108d25-488a-f9b5-4c7b-02d40f1e38a8"
+ }
+ },
+ "VM": {
+ "OpaqueRef:43a1b8d4-da96-cb08-10f5-fb368abed19c": {
+ "HVM_boot_params": {
+ "order": "dc"
+ },
+ "HVM_boot_policy": "BIOS order",
+ "HVM_shadow_multiplier": 1.0,
+ "PCI_bus": "",
+ "PV_args": "",
+ "PV_bootloader": "",
+ "PV_bootloader_args": "",
+ "PV_kernel": "",
+ "PV_legacy_args": "",
+ "PV_ramdisk": "",
+ "VBDs": [
+ "OpaqueRef:1c0a7c6d-09e5-9b2c-bbe3-9a73aadcff9f",
+ "OpaqueRef:ea4a4088-19c3-6db6-ebdf-c3c0ee4405a3"
+ ],
+ "VCPUs_at_startup": "2",
+ "VCPUs_max": "2",
+ "VCPUs_params": {},
+ "VGPUs": [],
+ "VIFs": [
+ "OpaqueRef:38da2120-6086-5043-8383-ab0a53ede42a"
+ ],
+ "VTPMs": [],
+ "actions_after_crash": "restart",
+ "actions_after_reboot": "restart",
+ "actions_after_shutdown": "destroy",
+ "affinity": "OpaqueRef:NULL",
+ "allowed_operations": [
+ "changing_dynamic_range",
+ "migrate_send",
+ "pool_migrate",
+ "changing_VCPUs_live",
+ "suspend",
+ "hard_reboot",
+ "hard_shutdown",
+ "clean_reboot",
+ "clean_shutdown",
+ "pause",
+ "checkpoint",
+ "snapshot"
+ ],
+ "appliance": "OpaqueRef:NULL",
+ "attached_PCIs": [],
+ "bios_strings": {
+ "bios-vendor": "Xen",
+ "bios-version": "",
+ "hp-rombios": "",
+ "oem-1": "Xen",
+ "oem-2": "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d",
+ "system-manufacturer": "Xen",
+ "system-product-name": "HVM domU",
+ "system-serial-number": "",
+ "system-version": ""
+ },
+ "blobs": {},
+ "blocked_operations": {},
+ "children": [],
+ "consoles": [
+ "OpaqueRef:4fa7d34e-1fb6-9e88-1b21-41a3c6550d8b"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "domarch": "",
+ "domid": "143",
+ "generation_id": "3274224479562869847:6952848762503845513",
+ "guest_metrics": "OpaqueRef:453f21be-954d-2ca8-e38e-09741e91350c",
+ "ha_always_run": false,
+ "ha_restart_priority": "",
+ "hardware_platform_version": "0",
+ "has_vendor_device": false,
+ "is_a_snapshot": false,
+ "is_a_template": false,
+ "is_control_domain": false,
+ "is_default_template": false,
+ "is_snapshot_from_vmpp": false,
+ "is_vmss_snapshot": false,
+ "last_boot_CPU_flags": {
+ "features": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "vendor": "GenuineIntel"
+ },
+ "last_booted_record": "",
+ "memory_dynamic_max": "2147483648",
+ "memory_dynamic_min": "2147483648",
+ "memory_overhead": "20971520",
+ "memory_static_max": "2147483648",
+ "memory_static_min": "1073741824",
+ "memory_target": "2147483648",
+ "metrics": "OpaqueRef:6eede779-4e55-7cfb-8b8a-e4b9becf770b",
+ "name_description": "Created by Ansible",
+ "name_label": "ansible-test-vm-1",
+ "order": "0",
+ "other_config": {
+ "base_template_name": "Windows Server 2016 (64-bit)",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
+ "install-methods": "cdrom",
+ "instant": "true",
+ "mac_seed": "366fe8e0-878b-4320-8731-90d1ed3c0b93"
+ },
+ "parent": "OpaqueRef:NULL",
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "2",
+ "device_id": "0002",
+ "hpet": "true",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "-28800",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "true",
+ "viridian_reference_tsc": "true",
+ "viridian_time_ref_count": "true"
+ },
+ "power_state": "Running",
+ "protection_policy": "OpaqueRef:NULL",
+ "recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"1649267441664\"/><restriction field=\"vcpus-max\" max=\"32\"/><restriction field=\"has-vendor-device\" value=\"true\"/><restriction max=\"255\" property=\"number-of-vbds\"/><restriction max=\"7\" property=\"number-of-vifs\"/></restrictions>",
+ "reference_label": "windows-server-2016-64bit",
+ "requires_reboot": false,
+ "resident_on": "OpaqueRef:07a8da76-f1cf-f3b5-a531-6b751384f770",
+ "shutdown_delay": "0",
+ "snapshot_info": {},
+ "snapshot_metadata": "",
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_schedule": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "start_delay": "0",
+ "suspend_SR": "OpaqueRef:NULL",
+ "suspend_VDI": "OpaqueRef:NULL",
+ "tags": [],
+ "transportable_snapshot_id": "",
+ "user_version": "1",
+ "uuid": "81c373d7-a407-322f-911b-31386eb5215d",
+ "version": "0",
+ "xenstore_data": {
+ "vm-data": ""
+ }
+ }
+ },
+ "VM_guest_metrics": {
+ "OpaqueRef:453f21be-954d-2ca8-e38e-09741e91350c": {
+ "PV_drivers_detected": true,
+ "PV_drivers_up_to_date": true,
+ "PV_drivers_version": {
+ "build": "1020",
+ "major": "7",
+ "micro": "0",
+ "minor": "1"
+ },
+ "can_use_hotplug_vbd": "yes",
+ "can_use_hotplug_vif": "yes",
+ "disks": {},
+ "last_updated": "20190113T19:40:34Z",
+ "live": true,
+ "memory": {},
+ "networks": {
+ "0/ip": "10.0.0.2",
+ "0/ipv6/0": "fe80:0000:0000:0000:11e1:12c9:ef3b:75a0"
+ },
+ "os_version": {
+ "distro": "windows",
+ "major": "6",
+ "minor": "2",
+ "name": "Microsoft Windows Server 2016 Standard|C:\\Windows|\\Device\\Harddisk0\\Partition2",
+ "spmajor": "0",
+ "spminor": "0"
+ },
+ "other": {
+ "data-ts": "1",
+ "error": "WTSQueryUserToken : 1008 failed.",
+ "feature-balloon": "1",
+ "feature-poweroff": "1",
+ "feature-reboot": "1",
+ "feature-s3": "1",
+ "feature-s4": "1",
+ "feature-setcomputername": "1",
+ "feature-static-ip-setting": "1",
+ "feature-suspend": "1",
+ "feature-ts": "1",
+ "feature-ts2": "1",
+ "feature-xs-batcmd": "1",
+ "has-vendor-device": "0",
+ "platform-feature-multiprocessor-suspend": "1"
+ },
+ "other_config": {},
+ "uuid": "9ea6803f-12ca-3d6a-47b7-c90a33b67b98"
+ }
+ },
+ "VM_metrics": {
+ "OpaqueRef:6eede779-4e55-7cfb-8b8a-e4b9becf770b": {
+ "VCPUs_CPU": {},
+ "VCPUs_flags": {},
+ "VCPUs_number": "2",
+ "VCPUs_params": {},
+ "VCPUs_utilisation": {},
+ "hvm": true,
+ "install_time": "20190113T19:31:47Z",
+ "last_updated": "19700101T00:00:00Z",
+ "memory_actual": "2147475456",
+ "nested_virt": false,
+ "nomigrate": false,
+ "other_config": {},
+ "start_time": "20190113T19:38:59Z",
+ "state": [],
+ "uuid": "c67fadf7-8143-0c92-c772-cd3901c18e70"
+ }
+ },
+ "host": {
+ "OpaqueRef:07a8da76-f1cf-f3b5-a531-6b751384f770": {
+ "API_version_major": "2",
+ "API_version_minor": "7",
+ "API_version_vendor": "XenSource",
+ "API_version_vendor_implementation": {},
+ "PBDs": [],
+ "PCIs": [],
+ "PGPUs": [],
+ "PIFs": [],
+ "address": "10.0.0.1",
+ "allowed_operations": [
+ "vm_migrate",
+ "provision",
+ "vm_resume",
+ "evacuate",
+ "vm_start"
+ ],
+ "bios_strings": {},
+ "blobs": {},
+ "capabilities": [
+ "xen-3.0-x86_64",
+ "xen-3.0-x86_32p",
+ "hvm-3.0-x86_32",
+ "hvm-3.0-x86_32p",
+ "hvm-3.0-x86_64",
+ ""
+ ],
+ "chipset_info": {
+ "iommu": "true"
+ },
+ "control_domain": "OpaqueRef:a2a31555-f232-822b-8f36-10d75d44b79c",
+ "cpu_configuration": {},
+ "cpu_info": {
+ "cpu_count": "40",
+ "family": "6",
+ "features": "7ffefbff-bfebfbff-00000021-2c100800",
+ "features_hvm": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "features_pv": "17c9cbf5-f6f83203-2191cbf5-00000023-00000001-00000329-00000000-00000000-00001000-0c000000",
+ "flags": "fpu de tsc msr pae mce cx8 apic sep mca cmov pat clflush acpi mmx fxsr sse sse2 ht syscall nx lm constant_tsc arch_perfmon rep_good nopl nonstop_tsc eagerfpu pni pclmulqdq monitor est ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm ida arat epb pln pts dtherm fsgsbase bmi1 avx2 bmi2 erms xsaveopt cqm_llc cqm_occup_llc",
+ "model": "63",
+ "modelname": "Intel(R) Xeon(R) CPU E5-2660 v3 @ 2.60GHz",
+ "socket_count": "2",
+ "speed": "2597.064",
+ "stepping": "2",
+ "vendor": "GenuineIntel"
+ },
+ "crash_dump_sr": "OpaqueRef:ed72d7bf-4e53-67fc-17f5-e27b203042ba",
+ "crashdumps": [],
+ "current_operations": {},
+ "display": "enabled",
+ "edition": "free",
+ "enabled": true,
+ "external_auth_configuration": {},
+ "external_auth_service_name": "",
+ "external_auth_type": "",
+ "features": [],
+ "guest_VCPUs_params": {},
+ "ha_network_peers": [],
+ "ha_statefiles": [],
+ "host_CPUs": [
+ "OpaqueRef:f7e744f6-a6f9-c460-999a-c27e1395e2e0",
+ "OpaqueRef:f6e5dcf0-0453-8f3f-88c1-7ad6e2ef3dd1",
+ "OpaqueRef:f27a52fb-5feb-173d-1a07-d1735a83c2cc",
+ "OpaqueRef:ed65327a-508a-ccfc-dba6-2a0175cb2432",
+ "OpaqueRef:e41d2f2a-fe9e-72cb-8104-b22d6d314b13",
+ "OpaqueRef:e1988469-b814-5d10-17a6-bfd7c62d2b5f",
+ "OpaqueRef:d73967dc-b8d8-b47b-39f4-d599fdcabf55",
+ "OpaqueRef:cba9ebd9-40dc-0611-d1bb-aa661bd0bf70",
+ "OpaqueRef:c53d3110-4085-60af-8300-d879818789f7",
+ "OpaqueRef:bee0cf87-7df6-79a6-94e8-36f98e69ad20",
+ "OpaqueRef:bde28e83-213f-0e65-b6ad-0ae1ecebb98d",
+ "OpaqueRef:bbfefe67-f65f-98cb-c3fc-cb8ea0588006",
+ "OpaqueRef:b38ac595-afea-0ca0-49a0-9f5ef2368e3b",
+ "OpaqueRef:b14ef333-78b1-193d-02da-dc9bfed36912",
+ "OpaqueRef:afd478bf-57b9-0c79-f257-50aeb81504f1",
+ "OpaqueRef:a307cd3a-2132-2e42-4ebc-cc1c7780736d",
+ "OpaqueRef:a1a9df7d-88ba-64fd-a55c-0f6472e1753f",
+ "OpaqueRef:a0e39c9c-3e0b-fa03-e5d0-93a09aa77393",
+ "OpaqueRef:9fd5719b-36ab-8e25-7756-20a496ccb331",
+ "OpaqueRef:9ac4195d-ac07-cfe2-bc19-27ee54cf91fb",
+ "OpaqueRef:98c5c00c-1e2d-e22b-842e-79e85ce07873",
+ "OpaqueRef:961129bf-e695-f206-7297-64f9007a64f3",
+ "OpaqueRef:64368b4c-3488-2808-f0b3-42f2a656df2b",
+ "OpaqueRef:620dabc0-d7c5-0dc8-52df-3be25194c2fb",
+ "OpaqueRef:5cee2759-dd8e-7e1a-0727-21e196584030",
+ "OpaqueRef:58f70163-863d-5787-ffbb-2416cb16ca1e",
+ "OpaqueRef:4462f848-f396-653d-67f9-2bed13be2c58",
+ "OpaqueRef:40e800c2-19db-7cd8-c045-5ae93f908cae",
+ "OpaqueRef:3f84278b-dec6-ded0-1a33-4daa0ce75a2f",
+ "OpaqueRef:3ef14992-62f6-e1f0-5715-0ee02a834a9c",
+ "OpaqueRef:3e274c24-c55b-06f5-2c8f-415421043ab2",
+ "OpaqueRef:35ff27da-f286-7b70-adc1-a200880bb79f",
+ "OpaqueRef:2511aa53-8660-e442-3cd2-305982d1f751",
+ "OpaqueRef:21d234e3-138c-81ca-9ed8-febc81b874e9",
+ "OpaqueRef:1f9b4ee3-dcc7-114e-b401-dc3e94c07efa",
+ "OpaqueRef:1b94a981-d340-dd07-41c2-b3ff3c545fed",
+ "OpaqueRef:197ad104-64a8-5af3-8c7a-95f3d301aadd",
+ "OpaqueRef:1672e747-dc4b-737b-ddcf-0a373f966012",
+ "OpaqueRef:12ced494-a225-7584-456b-739331bb5114",
+ "OpaqueRef:0139ff72-62ac-1a6a-8f6f-cb01d8a4ee92"
+ ],
+ "hostname": "ansible-test-host-1",
+ "license_params": {
+ "address1": "",
+ "address2": "",
+ "city": "",
+ "company": "",
+ "country": "",
+ "enable_xha": "true",
+ "expiry": "20291231T23:00:00Z",
+ "grace": "no",
+ "license_type": "",
+ "name": "",
+ "platform_filter": "false",
+ "postalcode": "",
+ "productcode": "",
+ "regular_nag_dialog": "false",
+ "restrict_ad": "false",
+ "restrict_batch_hotfix_apply": "true",
+ "restrict_checkpoint": "false",
+ "restrict_cifs": "true",
+ "restrict_connection": "false",
+ "restrict_cpu_masking": "false",
+ "restrict_dmc": "false",
+ "restrict_dr": "false",
+ "restrict_email_alerting": "false",
+ "restrict_equalogic": "false",
+ "restrict_export_resource_data": "true",
+ "restrict_gpu": "false",
+ "restrict_guest_agent_auto_update": "true",
+ "restrict_guest_ip_setting": "false",
+ "restrict_health_check": "false",
+ "restrict_historical_performance": "false",
+ "restrict_hotfix_apply": "false",
+ "restrict_integrated_gpu_passthrough": "false",
+ "restrict_intellicache": "false",
+ "restrict_lab": "false",
+ "restrict_live_patching": "true",
+ "restrict_marathon": "false",
+ "restrict_nested_virt": "true",
+ "restrict_netapp": "false",
+ "restrict_pci_device_for_auto_update": "true",
+ "restrict_pool_attached_storage": "false",
+ "restrict_pooling": "false",
+ "restrict_pvs_proxy": "true",
+ "restrict_qos": "false",
+ "restrict_rbac": "false",
+ "restrict_read_caching": "true",
+ "restrict_set_vcpus_number_live": "true",
+ "restrict_ssl_legacy_switch": "false",
+ "restrict_stage": "false",
+ "restrict_storage_xen_motion": "false",
+ "restrict_storagelink": "false",
+ "restrict_storagelink_site_recovery": "false",
+ "restrict_vgpu": "true",
+ "restrict_vif_locking": "false",
+ "restrict_vlan": "false",
+ "restrict_vm_memory_introspection": "true",
+ "restrict_vmpr": "false",
+ "restrict_vmss": "false",
+ "restrict_vss": "false",
+ "restrict_vswitch_controller": "false",
+ "restrict_web_selfservice": "true",
+ "restrict_web_selfservice_manager": "true",
+ "restrict_wlb": "true",
+ "restrict_xcm": "true",
+ "restrict_xen_motion": "false",
+ "serialnumber": "",
+ "sku_marketing_name": "Citrix XenServer",
+ "sku_type": "free",
+ "sockets": "2",
+ "state": "",
+ "version": ""
+ },
+ "license_server": {
+ "address": "localhost",
+ "port": "27000"
+ },
+ "local_cache_sr": "OpaqueRef:ed72d7bf-4e53-67fc-17f5-e27b203042ba",
+ "logging": {},
+ "memory_overhead": "4606619648",
+ "metrics": "OpaqueRef:82b6937a-60c2-96d8-4e78-9f9a1143033f",
+ "name_description": "",
+ "name_label": "ansible-test-host-1",
+ "other_config": {
+ "agent_start_time": "1532019557.",
+ "boot_time": "1530023264.",
+ "iscsi_iqn": "iqn.2018-06.com.example:c8bac750",
+ "last_blob_sync_time": "1547394076.36",
+ "multipathhandle": "dmp",
+ "multipathing": "true"
+ },
+ "patches": [
+ "OpaqueRef:f74ca18d-cfb7-e4fe-e5c4-819843de11e2",
+ "OpaqueRef:f53ff05e-8dd8-3a15-d3b0-8dcf6004fbe2",
+ "OpaqueRef:ed7f38da-1a50-a48b-60bf-933cabe8d7bc",
+ "OpaqueRef:e7bb1462-51a5-1aaf-3b56-11b8ebd83a94",
+ "OpaqueRef:d87b343b-6ba3-db8b-b80e-e02319ba5924",
+ "OpaqueRef:ccb00450-ed04-4eaa-e6d7-130ef3722374",
+ "OpaqueRef:b79b8864-11d9-1d5f-09e5-a66d7b64b9e2",
+ "OpaqueRef:9bebcc7d-61ae-126b-3be0-9156026e586f",
+ "OpaqueRef:740a1156-b991-00b8-ef50-fdbb22a4d911",
+ "OpaqueRef:71def430-754b-2bfb-6c93-ec3b67b754e4",
+ "OpaqueRef:6c73b00d-df66-1740-9578-2b14e46297ba",
+ "OpaqueRef:6a53d2ae-3d6b-32ed-705f-fd53f1304470",
+ "OpaqueRef:35a67684-b094-1c77-beff-8237d87c7a27",
+ "OpaqueRef:33da42c2-c421-9859-79b7-ce9b6c394a1b",
+ "OpaqueRef:2baa6b4b-9bbe-c1b2-23ce-c8c831ac581d",
+ "OpaqueRef:2ac3beea-dee2-44e7-9f67-5fd216e593a0",
+ "OpaqueRef:1bd8f24b-3190-6e7a-b36e-e2998197d062",
+ "OpaqueRef:1694ea26-4930-6ca1-036e-273438375de9",
+ "OpaqueRef:09813f03-0c6f-a6af-768f-ef4cdde2c641"
+ ],
+ "power_on_config": {},
+ "power_on_mode": "",
+ "resident_VMs": [],
+ "sched_policy": "credit",
+ "software_version": {
+ "build_number": "release/falcon/master/8",
+ "date": "2017-05-11",
+ "db_schema": "5.120",
+ "dbv": "2017.0517",
+ "hostname": "f7d02093adae",
+ "linux": "4.4.0+10",
+ "network_backend": "openvswitch",
+ "platform_name": "XCP",
+ "platform_version": "2.3.0",
+ "product_brand": "XenServer",
+ "product_version": "7.2.0",
+ "product_version_text": "7.2",
+ "product_version_text_short": "7.2",
+ "xapi": "1.9",
+ "xen": "4.7.5-2.12",
+ "xencenter_max": "2.7",
+ "xencenter_min": "2.7"
+ },
+ "ssl_legacy": true,
+ "supported_bootloaders": [
+ "pygrub",
+ "eliloader"
+ ],
+ "suspend_image_sr": "OpaqueRef:ed72d7bf-4e53-67fc-17f5-e27b203042ba",
+ "tags": [],
+ "updates": [
+ "OpaqueRef:b71938bf-4c4f-eb17-7e78-588e71297a74",
+ "OpaqueRef:91cfa47b-52f9-a4e3-4e78-52e3eb3e5141",
+ "OpaqueRef:e2209ae9-5362-3a20-f691-9294144e49f2",
+ "OpaqueRef:6ac77a0f-f079-8067-85cc-c9ae2f8dcca9",
+ "OpaqueRef:a17e721d-faf4-6ad1-c617-dd4899279534",
+ "OpaqueRef:6c9b814c-e1c2-b8be-198f-de358686b10a",
+ "OpaqueRef:fbaabbfe-88d5-d89b-5b3f-d6374601ca71",
+ "OpaqueRef:9eccc765-9726-d220-96b1-2e85adf77ecc",
+ "OpaqueRef:204558d7-dce0-2304-bdc5-80ec5fd7e3c3",
+ "OpaqueRef:65b14ae7-f440-0c4d-4af9-c7946b90fd2f",
+ "OpaqueRef:0760c608-b02e-743a-18a1-fa8f205374d6",
+ "OpaqueRef:1ced32ca-fec4-8b44-0e8f-753c97f2d93f",
+ "OpaqueRef:3fffd7c7-f4d1-6b03-a5b8-d75211bb7b8f",
+ "OpaqueRef:01befb95-412e-e9dd-5b5d-edd50df61cb1",
+ "OpaqueRef:a3f9481e-fe3d-1f00-235f-44d404f51128",
+ "OpaqueRef:507ee5fc-59d3-e635-21d5-98a5cace4bf2",
+ "OpaqueRef:7b4b5da1-54af-d0c4-3fea-394b4257bffe",
+ "OpaqueRef:f61edc83-91d9-a161-113f-00c110196238",
+ "OpaqueRef:7efce157-9b93-d116-f3f8-7eb0c6fb1a79"
+ ],
+ "updates_requiring_reboot": [],
+ "uuid": "92ac8132-276b-4d0f-9d3a-54db51e4a438",
+ "virtual_hardware_platform_versions": [
+ "0",
+ "1",
+ "2"
+ ]
+ }
+ },
+ "network": {
+ "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724": {
+ "MTU": "1500",
+ "PIFs": [],
+ "VIFs": [],
+ "allowed_operations": [],
+ "assigned_ips": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": "169.254.0.3",
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": "169.254.0.2"
+ },
+ "blobs": {},
+ "bridge": "xenapi",
+ "current_operations": {},
+ "default_locking_mode": "unlocked",
+ "managed": true,
+ "name_description": "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI",
+ "name_label": "Host internal management network",
+ "other_config": {
+ "ip_begin": "169.254.0.1",
+ "ip_end": "169.254.255.254",
+ "is_guest_installer_network": "true",
+ "is_host_internal_management_network": "true",
+ "netmask": "255.255.0.0"
+ },
+ "tags": [],
+ "uuid": "dbb96525-944f-0d1a-54ed-e65cb6d07450"
+ }
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json.license b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-1-params.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json
new file mode 100644
index 000000000..607212c05
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json
@@ -0,0 +1,87 @@
+{
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "custom",
+ "disks": [
+ {
+ "name": "ansible-test-vm-2-root",
+ "name_desc": "/",
+ "os_device": "xvda",
+ "size": 10737418240,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "0"
+ },
+ {
+ "name": "ansible-test-vm-2-mysql",
+ "name_desc": "/var/lib/mysql",
+ "os_device": "xvdb",
+ "size": 1073741824,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "1"
+ }
+ ],
+ "domid": "140",
+ "folder": "/Ansible/Test",
+ "hardware": {
+ "memory_mb": 1024,
+ "num_cpu_cores_per_socket": 1,
+ "num_cpus": 1
+ },
+ "home_server": "ansible-test-host-2",
+ "is_template": false,
+ "name": "ansible-test-vm-2",
+ "name_desc": "Created by Ansible",
+ "networks": [
+ {
+ "gateway": "10.0.0.1",
+ "gateway6": "",
+ "ip": "169.254.0.2",
+ "ip6": [],
+ "mac": "16:87:31:70:d6:31",
+ "mtu": "1500",
+ "name": "Host internal management network",
+ "netmask": "255.255.255.0",
+ "prefix": "24",
+ "prefix6": "",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "base_template_name": "CentOS 7",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:cf1402d3-b6c1-d908-fe62-06502e3b311a",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "instant": "true",
+ "linux_template": "true",
+ "mac_seed": "0ab46664-f519-5383-166e-e4ea485ede7d"
+ },
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "device_id": "0001",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "0",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "false"
+ },
+ "state": "poweredon",
+ "uuid": "0a05d5ad-3e4b-f0dc-6101-8c56623958bc",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/gateway": "10.0.0.1",
+ "vm-data/networks/0/ip": "10.0.0.3",
+ "vm-data/networks/0/mac": "16:87:31:70:d6:31",
+ "vm-data/networks/0/name": "Host internal management network",
+ "vm-data/networks/0/netmask": "255.255.255.0",
+ "vm-data/networks/0/prefix": "24",
+ "vm-data/networks/0/type": "static"
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json.license b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-facts.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json
new file mode 100644
index 000000000..10615f40a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json
@@ -0,0 +1,771 @@
+{
+ "SR": {
+ "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f": {
+ "PBDs": [],
+ "VDIs": [],
+ "allowed_operations": [
+ "unplug",
+ "plug",
+ "pbd_create",
+ "update",
+ "pbd_destroy",
+ "vdi_resize",
+ "vdi_clone",
+ "scan",
+ "vdi_snapshot",
+ "vdi_mirror",
+ "vdi_create",
+ "vdi_destroy"
+ ],
+ "blobs": {},
+ "clustered": false,
+ "content_type": "",
+ "current_operations": {},
+ "introduced_by": "OpaqueRef:NULL",
+ "is_tools_sr": false,
+ "local_cache_enabled": false,
+ "name_description": "",
+ "name_label": "Ansible Test Storage 1",
+ "other_config": {
+ "auto-scan": "false"
+ },
+ "physical_size": "2521133219840",
+ "physical_utilisation": "1551485632512",
+ "shared": true,
+ "sm_config": {
+ "allocation": "thick",
+ "devserial": "scsi-3600a098038302d353624495242443848",
+ "multipathable": "true",
+ "use_vhd": "true"
+ },
+ "tags": [],
+ "type": "lvmohba",
+ "uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "virtual_allocation": "1556925644800"
+ }
+ },
+ "VBD": {
+ "OpaqueRef:510e214e-f0ba-3bc9-7834-a4f4d3fa33ef": {
+ "VDI": "OpaqueRef:NULL",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "insert",
+ "pause"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvdd",
+ "empty": true,
+ "metrics": "OpaqueRef:1075bebe-ba71-66ef-ba30-8afbc83bc6b5",
+ "mode": "RO",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "CD",
+ "unpluggable": true,
+ "userdevice": "3",
+ "uuid": "79ee1d8e-944b-3bfd-ba4c-a0c165d84f3d"
+ },
+ "OpaqueRef:6bc2c353-f132-926d-6e9b-e4d1d55a3760": {
+ "VDI": "OpaqueRef:102bef39-b134-d23a-9a50-490e1dbca8f7",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "pause"
+ ],
+ "bootable": true,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvda",
+ "empty": false,
+ "metrics": "OpaqueRef:1c71ccde-d7e9-10fb-569c-993b880fa790",
+ "mode": "RW",
+ "other_config": {
+ "owner": ""
+ },
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": false,
+ "userdevice": "0",
+ "uuid": "932fdf6d-7ac5-45e8-a48e-694af75726f1"
+ },
+ "OpaqueRef:9bd6decd-2e55-b55e-387d-c40aa67ff151": {
+ "VDI": "OpaqueRef:87b45ac6-af36-f4fd-6ebd-a08bed9001e4",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unpause",
+ "unplug",
+ "unplug_force",
+ "pause"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "xvdb",
+ "empty": false,
+ "metrics": "OpaqueRef:b8424146-d3ea-4850-db9a-47f0059c10ac",
+ "mode": "RW",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": true,
+ "userdevice": "1",
+ "uuid": "c0c1e648-3690-e1fb-9f47-24b4df0cb458"
+ }
+ },
+ "VDI": {
+ "OpaqueRef:102bef39-b134-d23a-9a50-490e1dbca8f7": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:6bc2c353-f132-926d-6e9b-e4d1d55a3760"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "clone",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "fa1202b8-326f-4235-802e-fafbed66b26b",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "/",
+ "name_label": "ansible-test-vm-2-root",
+ "on_boot": "persist",
+ "other_config": {},
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "10766778368",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "host_OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0": "RW",
+ "read-caching-enabled-on-dff6702e-bcb6-4704-8dd4-952e8c883365": "false",
+ "read-caching-reason-dff6702e-bcb6-4704-8dd4-952e8c883365": "LICENSE_RESTRICTION",
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "system",
+ "uuid": "fa1202b8-326f-4235-802e-fafbed66b26b",
+ "virtual_size": "10737418240",
+ "xenstore_data": {}
+ },
+ "OpaqueRef:87b45ac6-af36-f4fd-6ebd-a08bed9001e4": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:9bd6decd-2e55-b55e-387d-c40aa67ff151"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "clone",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "ab3a4d72-f498-4687-86ce-ca937046db76",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "/var/lib/mysql",
+ "name_label": "ansible-test-vm-2-mysql",
+ "on_boot": "persist",
+ "other_config": {},
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "1082130432",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "host_OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0": "RW",
+ "read-caching-enabled-on-dff6702e-bcb6-4704-8dd4-952e8c883365": "false",
+ "read-caching-reason-dff6702e-bcb6-4704-8dd4-952e8c883365": "LICENSE_RESTRICTION",
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "user",
+ "uuid": "ab3a4d72-f498-4687-86ce-ca937046db76",
+ "virtual_size": "1073741824",
+ "xenstore_data": {}
+ }
+ },
+ "VIF": {
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": {
+ "MAC": "16:87:31:70:d6:31",
+ "MAC_autogenerated": false,
+ "MTU": "1500",
+ "VM": "OpaqueRef:08632af0-473e-5106-f400-7910229e49be",
+ "allowed_operations": [
+ "attach",
+ "unplug"
+ ],
+ "current_operations": {},
+ "currently_attached": true,
+ "device": "0",
+ "ipv4_addresses": [],
+ "ipv4_allowed": [],
+ "ipv4_configuration_mode": "None",
+ "ipv4_gateway": "",
+ "ipv6_addresses": [],
+ "ipv6_allowed": [],
+ "ipv6_configuration_mode": "None",
+ "ipv6_gateway": "",
+ "locking_mode": "network_default",
+ "metrics": "OpaqueRef:d74d5f20-f0ab-ee36-9a74-496ffb994232",
+ "network": "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "uuid": "07b70134-9396-94fc-5105-179b430ce4f8"
+ }
+ },
+ "VM": {
+ "OpaqueRef:08632af0-473e-5106-f400-7910229e49be": {
+ "HVM_boot_params": {
+ "order": "cdn"
+ },
+ "HVM_boot_policy": "BIOS order",
+ "HVM_shadow_multiplier": 1.0,
+ "PCI_bus": "",
+ "PV_args": "",
+ "PV_bootloader": "",
+ "PV_bootloader_args": "",
+ "PV_kernel": "",
+ "PV_legacy_args": "",
+ "PV_ramdisk": "",
+ "VBDs": [
+ "OpaqueRef:510e214e-f0ba-3bc9-7834-a4f4d3fa33ef",
+ "OpaqueRef:9bd6decd-2e55-b55e-387d-c40aa67ff151",
+ "OpaqueRef:6bc2c353-f132-926d-6e9b-e4d1d55a3760"
+ ],
+ "VCPUs_at_startup": "1",
+ "VCPUs_max": "1",
+ "VCPUs_params": {},
+ "VGPUs": [],
+ "VIFs": [
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9"
+ ],
+ "VTPMs": [],
+ "actions_after_crash": "restart",
+ "actions_after_reboot": "restart",
+ "actions_after_shutdown": "destroy",
+ "affinity": "OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0",
+ "allowed_operations": [
+ "changing_dynamic_range",
+ "migrate_send",
+ "pool_migrate",
+ "changing_VCPUs_live",
+ "suspend",
+ "hard_reboot",
+ "hard_shutdown",
+ "clean_reboot",
+ "clean_shutdown",
+ "pause",
+ "checkpoint",
+ "snapshot"
+ ],
+ "appliance": "OpaqueRef:NULL",
+ "attached_PCIs": [],
+ "bios_strings": {
+ "bios-vendor": "Xen",
+ "bios-version": "",
+ "hp-rombios": "",
+ "oem-1": "Xen",
+ "oem-2": "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d",
+ "system-manufacturer": "Xen",
+ "system-product-name": "HVM domU",
+ "system-serial-number": "",
+ "system-version": ""
+ },
+ "blobs": {},
+ "blocked_operations": {},
+ "children": [],
+ "consoles": [
+ "OpaqueRef:2a24e023-a856-de30-aea3-2024bacdc71f"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "domarch": "",
+ "domid": "140",
+ "generation_id": "",
+ "guest_metrics": "OpaqueRef:150d2dfa-b634-7965-92ab-31fc26382683",
+ "ha_always_run": false,
+ "ha_restart_priority": "",
+ "hardware_platform_version": "0",
+ "has_vendor_device": false,
+ "is_a_snapshot": false,
+ "is_a_template": false,
+ "is_control_domain": false,
+ "is_default_template": false,
+ "is_snapshot_from_vmpp": false,
+ "is_vmss_snapshot": false,
+ "last_boot_CPU_flags": {
+ "features": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "vendor": "GenuineIntel"
+ },
+ "last_booted_record": "",
+ "memory_dynamic_max": "1073741824",
+ "memory_dynamic_min": "1073741824",
+ "memory_overhead": "11534336",
+ "memory_static_max": "1073741824",
+ "memory_static_min": "1073741824",
+ "memory_target": "1073741824",
+ "metrics": "OpaqueRef:b56b460b-6476-304d-b143-ce543ffab828",
+ "name_description": "Created by Ansible",
+ "name_label": "ansible-test-vm-2",
+ "order": "0",
+ "other_config": {
+ "base_template_name": "CentOS 7",
+ "folder": "/Ansible/Test",
+ "import_task": "OpaqueRef:cf1402d3-b6c1-d908-fe62-06502e3b311a",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "instant": "true",
+ "linux_template": "true",
+ "mac_seed": "0ab46664-f519-5383-166e-e4ea485ede7d"
+ },
+ "parent": "OpaqueRef:NULL",
+ "platform": {
+ "acpi": "1",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "device_id": "0001",
+ "nx": "true",
+ "pae": "true",
+ "timeoffset": "0",
+ "vga": "std",
+ "videoram": "8",
+ "viridian": "false"
+ },
+ "power_state": "Running",
+ "protection_policy": "OpaqueRef:NULL",
+ "recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"549755813888\" /><restriction field=\"vcpus-max\" max=\"16\" /><restriction property=\"number-of-vbds\" max=\"16\" /><restriction property=\"number-of-vifs\" max=\"7\" /><restriction field=\"allow-gpu-passthrough\" value=\"0\" /></restrictions>",
+ "reference_label": "",
+ "requires_reboot": false,
+ "resident_on": "OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0",
+ "shutdown_delay": "0",
+ "snapshot_info": {},
+ "snapshot_metadata": "",
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_schedule": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "start_delay": "0",
+ "suspend_SR": "OpaqueRef:NULL",
+ "suspend_VDI": "OpaqueRef:NULL",
+ "tags": [],
+ "transportable_snapshot_id": "",
+ "user_version": "1",
+ "uuid": "0a05d5ad-3e4b-f0dc-6101-8c56623958bc",
+ "version": "0",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/gateway": "10.0.0.1",
+ "vm-data/networks/0/ip": "10.0.0.3",
+ "vm-data/networks/0/mac": "16:87:31:70:d6:31",
+ "vm-data/networks/0/name": "Host internal management network",
+ "vm-data/networks/0/netmask": "255.255.255.0",
+ "vm-data/networks/0/prefix": "24",
+ "vm-data/networks/0/type": "static"
+ }
+ }
+ },
+ "VM_guest_metrics": {
+ "OpaqueRef:150d2dfa-b634-7965-92ab-31fc26382683": {
+ "PV_drivers_detected": true,
+ "PV_drivers_up_to_date": true,
+ "PV_drivers_version": {
+ "build": "90977",
+ "major": "6",
+ "micro": "0",
+ "minor": "5"
+ },
+ "can_use_hotplug_vbd": "unspecified",
+ "can_use_hotplug_vif": "unspecified",
+ "disks": {},
+ "last_updated": "20190113T19:36:26Z",
+ "live": true,
+ "memory": {},
+ "networks": {
+ "0/ip": "169.254.0.2"
+ },
+ "os_version": {
+ "distro": "centos",
+ "major": "7",
+ "minor": "2",
+ "name": "CentOS Linux release 7.2.1511 (Core)",
+ "uname": "3.10.0-327.22.2.el7.x86_64"
+ },
+ "other": {
+ "feature-balloon": "1",
+ "feature-shutdown": "1",
+ "feature-suspend": "1",
+ "feature-vcpu-hotplug": "1",
+ "has-vendor-device": "0",
+ "platform-feature-multiprocessor-suspend": "1"
+ },
+ "other_config": {},
+ "uuid": "5c9d1be5-7eee-88f2-46c3-df1d44f9cdb5"
+ }
+ },
+ "VM_metrics": {
+ "OpaqueRef:b56b460b-6476-304d-b143-ce543ffab828": {
+ "VCPUs_CPU": {},
+ "VCPUs_flags": {},
+ "VCPUs_number": "1",
+ "VCPUs_params": {},
+ "VCPUs_utilisation": {},
+ "hvm": true,
+ "install_time": "20190113T19:32:46Z",
+ "last_updated": "19700101T00:00:00Z",
+ "memory_actual": "1073729536",
+ "nested_virt": false,
+ "nomigrate": false,
+ "other_config": {},
+ "start_time": "20190113T19:35:15Z",
+ "state": [],
+ "uuid": "876dd44c-aad1-97bf-9ee5-4cd58eac7163"
+ }
+ },
+ "host": {
+ "OpaqueRef:e87be804-57a1-532e-56ac-6c4910957be0": {
+ "API_version_major": "2",
+ "API_version_minor": "7",
+ "API_version_vendor": "XenSource",
+ "API_version_vendor_implementation": {},
+ "PBDs": [],
+ "PCIs": [],
+ "PGPUs": [],
+ "PIFs": [],
+ "address": "10.0.0.1",
+ "allowed_operations": [
+ "vm_migrate",
+ "provision",
+ "vm_resume",
+ "evacuate",
+ "vm_start"
+ ],
+ "bios_strings": {},
+ "blobs": {},
+ "capabilities": [
+ "xen-3.0-x86_64",
+ "xen-3.0-x86_32p",
+ "hvm-3.0-x86_32",
+ "hvm-3.0-x86_32p",
+ "hvm-3.0-x86_64",
+ ""
+ ],
+ "chipset_info": {
+ "iommu": "true"
+ },
+ "control_domain": "OpaqueRef:ffcc92a1-8fde-df6f-a501-44b37811286b",
+ "cpu_configuration": {},
+ "cpu_info": {
+ "cpu_count": "40",
+ "family": "6",
+ "features": "7ffefbff-bfebfbff-00000021-2c100800",
+ "features_hvm": "17cbfbff-f7fa3223-2d93fbff-00000023-00000001-000007ab-00000000-00000000-00001000-0c000000",
+ "features_pv": "17c9cbf5-f6f83203-2191cbf5-00000023-00000001-00000329-00000000-00000000-00001000-0c000000",
+ "flags": "fpu de tsc msr pae mce cx8 apic sep mca cmov pat clflush acpi mmx fxsr sse sse2 ht syscall nx lm constant_tsc arch_perfmon rep_good nopl nonstop_tsc eagerfpu pni pclmulqdq monitor est ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm ida arat epb pln pts dtherm fsgsbase bmi1 avx2 bmi2 erms xsaveopt cqm_llc cqm_occup_llc",
+ "model": "63",
+ "modelname": "Intel(R) Xeon(R) CPU E5-2660 v3 @ 2.60GHz",
+ "socket_count": "2",
+ "speed": "2597.070",
+ "stepping": "2",
+ "vendor": "GenuineIntel"
+ },
+ "crash_dump_sr": "OpaqueRef:0b984cec-a36c-ce84-7b34-9f0088352d55",
+ "crashdumps": [],
+ "current_operations": {},
+ "display": "enabled",
+ "edition": "free",
+ "enabled": true,
+ "external_auth_configuration": {},
+ "external_auth_service_name": "",
+ "external_auth_type": "",
+ "features": [],
+ "guest_VCPUs_params": {},
+ "ha_network_peers": [],
+ "ha_statefiles": [],
+ "host_CPUs": [
+ "OpaqueRef:ec3ba9c4-9b57-236b-3eaa-b157affc1621",
+ "OpaqueRef:e6de7ab3-f4ad-f271-e51b-e3d8c041d3fb",
+ "OpaqueRef:e519ef88-bf41-86ac-16b3-c178cb4b78b1",
+ "OpaqueRef:e48f1bc1-98ba-89e5-ab69-821c625f7f82",
+ "OpaqueRef:e2659936-3de6-dbca-cc44-4af50960b2b7",
+ "OpaqueRef:d0da1e31-20ac-4aff-8897-e80df8200648",
+ "OpaqueRef:cec473ba-41a8-439d-b397-be0c60467b5d",
+ "OpaqueRef:ce88014d-b06c-c959-0624-04d79b791885",
+ "OpaqueRef:c656ca58-41fe-3689-d322-174aa5798beb",
+ "OpaqueRef:c0a21f14-8f46-19de-1cf4-530a34c4aa17",
+ "OpaqueRef:bf70c061-7b45-0497-7ef6-65a236e898e8",
+ "OpaqueRef:b7a2ba0f-f11b-3633-ad47-4f5f76a600a8",
+ "OpaqueRef:b4fef1fa-3aae-9790-f47e-6a17f645339c",
+ "OpaqueRef:b4594721-f8f4-4475-61c5-4efeec1733f1",
+ "OpaqueRef:9dcba36f-c29f-478f-f578-d1ea347410a6",
+ "OpaqueRef:987897e8-1184-917e-6a5f-e205d0c739e5",
+ "OpaqueRef:90f06d64-be18-7fdf-36ba-bbd696a26cf3",
+ "OpaqueRef:90150bc1-e604-4cd4-35ad-9cfa8e985de3",
+ "OpaqueRef:838f4ad4-8ad2-0d6c-a74e-26baa461de3d",
+ "OpaqueRef:736fb523-d347-e8c0-089b-c9811d3c1195",
+ "OpaqueRef:7137b479-87d4-9097-a684-e54cc4de5d09",
+ "OpaqueRef:6e08fa1d-7d7b-d9be-1574-ffe95bd515fd",
+ "OpaqueRef:6b9e6ecd-54e5-4248-5aea-ee5b99248818",
+ "OpaqueRef:65d56b24-3445-b444-5125-c91e6966fd29",
+ "OpaqueRef:60908eca-1e5c-c938-5b76-e8ff9d8899ab",
+ "OpaqueRef:46e96878-c076-2164-2373-6cdd108c2436",
+ "OpaqueRef:40ccdaf4-6008-2b83-92cb-ca197f73433f",
+ "OpaqueRef:3bc8133a-ccb2-6790-152f-b3f577517751",
+ "OpaqueRef:38c8edd8-0621-76de-53f6-86bef2a9e05c",
+ "OpaqueRef:342c1bab-a211-a0eb-79a5-780bd5ad1f23",
+ "OpaqueRef:1e20e6d0-5502-0dff-4f17-5d35eb833af1",
+ "OpaqueRef:176baafa-0e63-7000-f754-25e2a6b74959",
+ "OpaqueRef:16cab1a2-0111-b2af-6dfe-3724b79e6b6b",
+ "OpaqueRef:0f213647-8362-9c5e-e99b-0ebaefc609ce",
+ "OpaqueRef:0e019819-b41f-0bfb-d4ee-dd5484fea9b6",
+ "OpaqueRef:0d39212f-82ba-190c-b304-19b3fa491fff",
+ "OpaqueRef:087ce3ad-3b66-ae1e-3130-3ae640dcc638",
+ "OpaqueRef:0730f24c-87ed-8296-8f14-3036e5ad2357",
+ "OpaqueRef:04c27426-4895-39a7-9ade-ef33d3721c26",
+ "OpaqueRef:017b27bf-0270-19e7-049a-5a9b3bb54898"
+ ],
+ "hostname": "ansible-test-host-2",
+ "license_params": {
+ "address1": "",
+ "address2": "",
+ "city": "",
+ "company": "",
+ "country": "",
+ "enable_xha": "true",
+ "expiry": "20291231T23:00:00Z",
+ "grace": "no",
+ "license_type": "",
+ "name": "",
+ "platform_filter": "false",
+ "postalcode": "",
+ "productcode": "",
+ "regular_nag_dialog": "false",
+ "restrict_ad": "false",
+ "restrict_batch_hotfix_apply": "true",
+ "restrict_checkpoint": "false",
+ "restrict_cifs": "true",
+ "restrict_connection": "false",
+ "restrict_cpu_masking": "false",
+ "restrict_dmc": "false",
+ "restrict_dr": "false",
+ "restrict_email_alerting": "false",
+ "restrict_equalogic": "false",
+ "restrict_export_resource_data": "true",
+ "restrict_gpu": "false",
+ "restrict_guest_agent_auto_update": "true",
+ "restrict_guest_ip_setting": "false",
+ "restrict_health_check": "false",
+ "restrict_historical_performance": "false",
+ "restrict_hotfix_apply": "false",
+ "restrict_integrated_gpu_passthrough": "false",
+ "restrict_intellicache": "false",
+ "restrict_lab": "false",
+ "restrict_live_patching": "true",
+ "restrict_marathon": "false",
+ "restrict_nested_virt": "true",
+ "restrict_netapp": "false",
+ "restrict_pci_device_for_auto_update": "true",
+ "restrict_pool_attached_storage": "false",
+ "restrict_pooling": "false",
+ "restrict_pvs_proxy": "true",
+ "restrict_qos": "false",
+ "restrict_rbac": "false",
+ "restrict_read_caching": "true",
+ "restrict_set_vcpus_number_live": "true",
+ "restrict_ssl_legacy_switch": "false",
+ "restrict_stage": "false",
+ "restrict_storage_xen_motion": "false",
+ "restrict_storagelink": "false",
+ "restrict_storagelink_site_recovery": "false",
+ "restrict_vgpu": "true",
+ "restrict_vif_locking": "false",
+ "restrict_vlan": "false",
+ "restrict_vm_memory_introspection": "true",
+ "restrict_vmpr": "false",
+ "restrict_vmss": "false",
+ "restrict_vss": "false",
+ "restrict_vswitch_controller": "false",
+ "restrict_web_selfservice": "true",
+ "restrict_web_selfservice_manager": "true",
+ "restrict_wlb": "true",
+ "restrict_xcm": "true",
+ "restrict_xen_motion": "false",
+ "serialnumber": "",
+ "sku_marketing_name": "Citrix XenServer",
+ "sku_type": "free",
+ "sockets": "2",
+ "state": "",
+ "version": ""
+ },
+ "license_server": {
+ "address": "localhost",
+ "port": "27000"
+ },
+ "local_cache_sr": "OpaqueRef:0b984cec-a36c-ce84-7b34-9f0088352d55",
+ "logging": {},
+ "memory_overhead": "4865126400",
+ "metrics": "OpaqueRef:f55653cb-92eb-8257-f2ee-7a2d1c2d6aef",
+ "name_description": "",
+ "name_label": "ansible-test-host-2",
+ "other_config": {
+ "agent_start_time": "1532019582.",
+ "boot_time": "1528986759.",
+ "iscsi_iqn": "iqn.2018-06.com.example:87b7637d",
+ "last_blob_sync_time": "1547394065.41",
+ "multipathhandle": "dmp",
+ "multipathing": "true"
+ },
+ "patches": [
+ "OpaqueRef:f5bd18b6-1423-893a-5d7f-7095338e6a2d",
+ "OpaqueRef:eecb0b95-87fb-a53e-651c-9741efd18bb6",
+ "OpaqueRef:e92c9ef3-2e51-1a36-d400-9e237982b782",
+ "OpaqueRef:cc98226c-2c08-799e-5f15-7761a398e4a0",
+ "OpaqueRef:c4f35e66-d064-55a7-6946-7f4b145275a6",
+ "OpaqueRef:c3794494-f894-6141-b811-f37a8fe60094",
+ "OpaqueRef:bcf61af7-63a9-e430-5b7c-a740ba470596",
+ "OpaqueRef:b58ac71e-797e-6f66-71ad-fe298c94fd10",
+ "OpaqueRef:a2ea18fd-5343-f8db-718d-f059c2a8cce0",
+ "OpaqueRef:929db459-6861-c588-158f-70f763331d6d",
+ "OpaqueRef:92962d94-2205-f6e1-12f9-b55a99fd824d",
+ "OpaqueRef:65dfb07a-f90d-dad9-9ab8-1cc2b1e79afb",
+ "OpaqueRef:537a87c4-3bf4-969f-f06a-2dd8d3a018a2",
+ "OpaqueRef:32dd1de3-c9c8-bcbb-27a0-83d4a930876d",
+ "OpaqueRef:30a8ccc8-74a9-b31f-0403-66b117e281b6",
+ "OpaqueRef:24545c44-ffd1-8a28-18c6-3d008bf4d63e",
+ "OpaqueRef:1fcef81b-7c44-a4db-f59a-c4a147da9c49",
+ "OpaqueRef:1e98a240-514b-1863-5518-c771d0ebf579",
+ "OpaqueRef:1632cab2-b268-6ce8-4f7b-ce7fd4bfa1eb"
+ ],
+ "power_on_config": {},
+ "power_on_mode": "",
+ "resident_VMs": [],
+ "sched_policy": "credit",
+ "software_version": {
+ "build_number": "release/falcon/master/8",
+ "date": "2017-05-11",
+ "db_schema": "5.120",
+ "dbv": "2017.0517",
+ "hostname": "f7d02093adae",
+ "linux": "4.4.0+10",
+ "network_backend": "openvswitch",
+ "platform_name": "XCP",
+ "platform_version": "2.3.0",
+ "product_brand": "XenServer",
+ "product_version": "7.2.0",
+ "product_version_text": "7.2",
+ "product_version_text_short": "7.2",
+ "xapi": "1.9",
+ "xen": "4.7.5-2.12",
+ "xencenter_max": "2.7",
+ "xencenter_min": "2.7"
+ },
+ "ssl_legacy": true,
+ "supported_bootloaders": [
+ "pygrub",
+ "eliloader"
+ ],
+ "suspend_image_sr": "OpaqueRef:0b984cec-a36c-ce84-7b34-9f0088352d55",
+ "tags": [],
+ "updates": [
+ "OpaqueRef:7b4b5da1-54af-d0c4-3fea-394b4257bffe",
+ "OpaqueRef:fbaabbfe-88d5-d89b-5b3f-d6374601ca71",
+ "OpaqueRef:507ee5fc-59d3-e635-21d5-98a5cace4bf2",
+ "OpaqueRef:6c9b814c-e1c2-b8be-198f-de358686b10a",
+ "OpaqueRef:a17e721d-faf4-6ad1-c617-dd4899279534",
+ "OpaqueRef:6ac77a0f-f079-8067-85cc-c9ae2f8dcca9",
+ "OpaqueRef:f61edc83-91d9-a161-113f-00c110196238",
+ "OpaqueRef:b71938bf-4c4f-eb17-7e78-588e71297a74",
+ "OpaqueRef:01befb95-412e-e9dd-5b5d-edd50df61cb1",
+ "OpaqueRef:a3f9481e-fe3d-1f00-235f-44d404f51128",
+ "OpaqueRef:0760c608-b02e-743a-18a1-fa8f205374d6",
+ "OpaqueRef:204558d7-dce0-2304-bdc5-80ec5fd7e3c3",
+ "OpaqueRef:9eccc765-9726-d220-96b1-2e85adf77ecc",
+ "OpaqueRef:91cfa47b-52f9-a4e3-4e78-52e3eb3e5141",
+ "OpaqueRef:3fffd7c7-f4d1-6b03-a5b8-d75211bb7b8f",
+ "OpaqueRef:7efce157-9b93-d116-f3f8-7eb0c6fb1a79",
+ "OpaqueRef:e2209ae9-5362-3a20-f691-9294144e49f2",
+ "OpaqueRef:1ced32ca-fec4-8b44-0e8f-753c97f2d93f",
+ "OpaqueRef:65b14ae7-f440-0c4d-4af9-c7946b90fd2f"
+ ],
+ "updates_requiring_reboot": [],
+ "uuid": "dff6702e-bcb6-4704-8dd4-952e8c883365",
+ "virtual_hardware_platform_versions": [
+ "0",
+ "1",
+ "2"
+ ]
+ }
+ },
+ "network": {
+ "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724": {
+ "MTU": "1500",
+ "PIFs": [],
+ "VIFs": [],
+ "allowed_operations": [],
+ "assigned_ips": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": "169.254.0.3",
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": "169.254.0.2"
+ },
+ "blobs": {},
+ "bridge": "xenapi",
+ "current_operations": {},
+ "default_locking_mode": "unlocked",
+ "managed": true,
+ "name_description": "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI",
+ "name_label": "Host internal management network",
+ "other_config": {
+ "ip_begin": "169.254.0.1",
+ "ip_end": "169.254.255.254",
+ "is_guest_installer_network": "true",
+ "is_host_internal_management_network": "true",
+ "netmask": "255.255.0.0"
+ },
+ "tags": [],
+ "uuid": "dbb96525-944f-0d1a-54ed-e65cb6d07450"
+ }
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json.license b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-2-params.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json
new file mode 100644
index 000000000..5ed7df7f1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json
@@ -0,0 +1,75 @@
+{
+ "cdrom": {
+ "type": "none"
+ },
+ "customization_agent": "custom",
+ "disks": [
+ {
+ "name": "ansible-test-vm-3-root",
+ "name_desc": "/",
+ "os_device": "xvda",
+ "size": 8589934592,
+ "sr": "Ansible Test Storage 1",
+ "sr_uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "vbd_userdevice": "0"
+ }
+ ],
+ "domid": "-1",
+ "folder": "",
+ "hardware": {
+ "memory_mb": 1024,
+ "num_cpu_cores_per_socket": 1,
+ "num_cpus": 1
+ },
+ "home_server": "",
+ "is_template": false,
+ "name": "ansible-test-vm-3",
+ "name_desc": "Created by Ansible",
+ "networks": [
+ {
+ "gateway": "",
+ "gateway6": "",
+ "ip": "169.254.0.3",
+ "ip6": [],
+ "mac": "72:fb:c7:ac:b9:97",
+ "mtu": "1500",
+ "name": "Host internal management network",
+ "netmask": "",
+ "prefix": "",
+ "prefix6": "",
+ "vif_device": "0"
+ }
+ ],
+ "other_config": {
+ "auto_poweron": "true",
+ "base_template_name": "zatemplate",
+ "import_task": "OpaqueRef:9948fd82-6d79-8882-2f01-4edc8795e361",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "install-repository": "http://mirror.centos.org/centos-6/6.2/os/x86_64/",
+ "instant": "true",
+ "last_shutdown_action": "Destroy",
+ "last_shutdown_initiator": "external",
+ "last_shutdown_reason": "halted",
+ "last_shutdown_time": "20140314T21:16:41Z",
+ "linux_template": "true",
+ "mac_seed": "06e27068-70c2-4c69-614b-7c54b5a4a781",
+ "rhel6": "true"
+ },
+ "platform": {
+ "acpi": "true",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "nx": "false",
+ "pae": "true",
+ "viridian": "true"
+ },
+ "state": "poweredoff",
+ "uuid": "8f5bc97c-42fa-d619-aba4-d25eced735e0",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/mac": "72:fb:c7:ac:b9:97",
+ "vm-data/networks/0/name": "Host internal management network"
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json.license b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-facts.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json
new file mode 100644
index 000000000..02e224bf0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json
@@ -0,0 +1,420 @@
+{
+ "SR": {
+ "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f": {
+ "PBDs": [],
+ "VDIs": [],
+ "allowed_operations": [
+ "unplug",
+ "plug",
+ "pbd_create",
+ "update",
+ "pbd_destroy",
+ "vdi_resize",
+ "vdi_clone",
+ "scan",
+ "vdi_snapshot",
+ "vdi_mirror",
+ "vdi_create",
+ "vdi_destroy"
+ ],
+ "blobs": {},
+ "clustered": false,
+ "content_type": "",
+ "current_operations": {},
+ "introduced_by": "OpaqueRef:NULL",
+ "is_tools_sr": false,
+ "local_cache_enabled": false,
+ "name_description": "",
+ "name_label": "Ansible Test Storage 1",
+ "other_config": {
+ "auto-scan": "false"
+ },
+ "physical_size": "2521133219840",
+ "physical_utilisation": "1551485632512",
+ "shared": true,
+ "sm_config": {
+ "allocation": "thick",
+ "devserial": "scsi-3600a098038302d353624495242443848",
+ "multipathable": "true",
+ "use_vhd": "true"
+ },
+ "tags": [],
+ "type": "lvmohba",
+ "uuid": "767b30e4-f8db-a83d-8ba7-f5e6e732e06f",
+ "virtual_allocation": "1556925644800"
+ }
+ },
+ "VBD": {
+ "OpaqueRef:024b722e-8d0f-65e6-359e-f301a009b683": {
+ "VDI": "OpaqueRef:NULL",
+ "VM": "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2",
+ "allowed_operations": [
+ "attach",
+ "insert"
+ ],
+ "bootable": false,
+ "current_operations": {},
+ "currently_attached": false,
+ "device": "",
+ "empty": true,
+ "metrics": "OpaqueRef:81509584-b22f-bc71-3c4e-e6c3bdca71f0",
+ "mode": "RO",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "CD",
+ "unpluggable": true,
+ "userdevice": "3",
+ "uuid": "38d850d0-c402-490e-6b97-1d23558c4e0e"
+ },
+ "OpaqueRef:235f4f04-1dc9-9fa5-c229-a1df187ba48c": {
+ "VDI": "OpaqueRef:4d3e9fc7-ae61-b312-e0a8-b53bee06282e",
+ "VM": "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2",
+ "allowed_operations": [
+ "attach"
+ ],
+ "bootable": true,
+ "current_operations": {},
+ "currently_attached": false,
+ "device": "xvda",
+ "empty": false,
+ "metrics": "OpaqueRef:529f6071-5627-28c5-1f41-ee8c0733f1da",
+ "mode": "RW",
+ "other_config": {
+ "owner": ""
+ },
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "storage_lock": false,
+ "type": "Disk",
+ "unpluggable": false,
+ "userdevice": "0",
+ "uuid": "3fd7d35c-cb9d-f0c4-726b-e188ef0dc446"
+ }
+ },
+ "VDI": {
+ "OpaqueRef:4d3e9fc7-ae61-b312-e0a8-b53bee06282e": {
+ "SR": "OpaqueRef:f746e964-e0fe-c36d-d60b-6897cfde583f",
+ "VBDs": [
+ "OpaqueRef:235f4f04-1dc9-9fa5-c229-a1df187ba48c"
+ ],
+ "allow_caching": false,
+ "allowed_operations": [
+ "forget",
+ "generate_config",
+ "update",
+ "resize",
+ "destroy",
+ "clone",
+ "copy",
+ "snapshot"
+ ],
+ "crash_dumps": [],
+ "current_operations": {},
+ "is_a_snapshot": false,
+ "is_tools_iso": false,
+ "location": "bdd0baeb-5447-4963-9e71-a5ff6e85fa59",
+ "managed": true,
+ "metadata_latest": false,
+ "metadata_of_pool": "",
+ "missing": false,
+ "name_description": "/",
+ "name_label": "ansible-test-vm-3-root",
+ "on_boot": "persist",
+ "other_config": {
+ "content_id": "cd8e8b2b-f158-c519-02f0-81d130fe83c5"
+ },
+ "parent": "OpaqueRef:NULL",
+ "physical_utilisation": "8615100416",
+ "read_only": false,
+ "sharable": false,
+ "sm_config": {
+ "vdi_type": "vhd"
+ },
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "storage_lock": false,
+ "tags": [],
+ "type": "system",
+ "uuid": "bdd0baeb-5447-4963-9e71-a5ff6e85fa59",
+ "virtual_size": "8589934592",
+ "xenstore_data": {}
+ }
+ },
+ "VIF": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": {
+ "MAC": "72:fb:c7:ac:b9:97",
+ "MAC_autogenerated": true,
+ "MTU": "1500",
+ "VM": "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2",
+ "allowed_operations": [
+ "attach"
+ ],
+ "current_operations": {},
+ "currently_attached": false,
+ "device": "0",
+ "ipv4_addresses": [],
+ "ipv4_allowed": [],
+ "ipv4_configuration_mode": "None",
+ "ipv4_gateway": "",
+ "ipv6_addresses": [],
+ "ipv6_allowed": [],
+ "ipv6_configuration_mode": "None",
+ "ipv6_gateway": "",
+ "locking_mode": "network_default",
+ "metrics": "OpaqueRef:e5b53fb1-3e99-4bf5-6b00-95fdba1f2610",
+ "network": "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724",
+ "other_config": {},
+ "qos_algorithm_params": {},
+ "qos_algorithm_type": "",
+ "qos_supported_algorithms": [],
+ "runtime_properties": {},
+ "status_code": "0",
+ "status_detail": "",
+ "uuid": "94bd4913-4940-437c-a1c3-50f7eb354c55"
+ }
+ },
+ "VM": {
+ "OpaqueRef:957f576a-2347-1789-80db-4beb50466bc2": {
+ "HVM_boot_params": {
+ "order": ""
+ },
+ "HVM_boot_policy": "",
+ "HVM_shadow_multiplier": 1.0,
+ "PCI_bus": "",
+ "PV_args": "graphical utf8",
+ "PV_bootloader": "pygrub",
+ "PV_bootloader_args": "",
+ "PV_kernel": "",
+ "PV_legacy_args": "",
+ "PV_ramdisk": "",
+ "VBDs": [
+ "OpaqueRef:235f4f04-1dc9-9fa5-c229-a1df187ba48c",
+ "OpaqueRef:024b722e-8d0f-65e6-359e-f301a009b683"
+ ],
+ "VCPUs_at_startup": "1",
+ "VCPUs_max": "1",
+ "VCPUs_params": {},
+ "VGPUs": [],
+ "VIFs": [
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab"
+ ],
+ "VTPMs": [],
+ "actions_after_crash": "restart",
+ "actions_after_reboot": "restart",
+ "actions_after_shutdown": "destroy",
+ "affinity": "OpaqueRef:NULL",
+ "allowed_operations": [
+ "changing_dynamic_range",
+ "changing_shadow_memory",
+ "changing_static_range",
+ "make_into_template",
+ "migrate_send",
+ "destroy",
+ "export",
+ "start_on",
+ "start",
+ "clone",
+ "copy",
+ "snapshot"
+ ],
+ "appliance": "OpaqueRef:NULL",
+ "attached_PCIs": [],
+ "bios_strings": {
+ "bios-vendor": "Xen",
+ "bios-version": "",
+ "hp-rombios": "",
+ "oem-1": "Xen",
+ "oem-2": "MS_VM_CERT/SHA1/bdbeb6e0a816d43fa6d3fe8aaef04c2bad9d3e3d",
+ "system-manufacturer": "Xen",
+ "system-product-name": "HVM domU",
+ "system-serial-number": "",
+ "system-version": ""
+ },
+ "blobs": {},
+ "blocked_operations": {},
+ "children": [],
+ "consoles": [],
+ "crash_dumps": [],
+ "current_operations": {},
+ "domarch": "",
+ "domid": "-1",
+ "generation_id": "",
+ "guest_metrics": "OpaqueRef:6a8acd85-4cab-4e52-27d5-5f4a51c1bf69",
+ "ha_always_run": false,
+ "ha_restart_priority": "",
+ "hardware_platform_version": "0",
+ "has_vendor_device": false,
+ "is_a_snapshot": false,
+ "is_a_template": false,
+ "is_control_domain": false,
+ "is_default_template": false,
+ "is_snapshot_from_vmpp": false,
+ "is_vmss_snapshot": false,
+ "last_boot_CPU_flags": {
+ "features": "17c9cbf5-f6f83203-2191cbf5-00000023-00000001-00000329-00000000-00000000-00001000-0c000000",
+ "vendor": "GenuineIntel"
+ },
+ "last_booted_record": "",
+ "memory_dynamic_max": "1073741824",
+ "memory_dynamic_min": "1073741824",
+ "memory_overhead": "10485760",
+ "memory_static_max": "1073741824",
+ "memory_static_min": "536870912",
+ "memory_target": "0",
+ "metrics": "OpaqueRef:87fc5829-478b-1dcd-989f-50e8ba58a87d",
+ "name_description": "Created by Ansible",
+ "name_label": "ansible-test-vm-3",
+ "order": "0",
+ "other_config": {
+ "auto_poweron": "true",
+ "base_template_name": "zatemplate",
+ "import_task": "OpaqueRef:9948fd82-6d79-8882-2f01-4edc8795e361",
+ "install-methods": "cdrom,nfs,http,ftp",
+ "install-repository": "http://mirror.centos.org/centos-6/6.2/os/x86_64/",
+ "instant": "true",
+ "last_shutdown_action": "Destroy",
+ "last_shutdown_initiator": "external",
+ "last_shutdown_reason": "halted",
+ "last_shutdown_time": "20140314T21:16:41Z",
+ "linux_template": "true",
+ "mac_seed": "06e27068-70c2-4c69-614b-7c54b5a4a781",
+ "rhel6": "true"
+ },
+ "parent": "OpaqueRef:NULL",
+ "platform": {
+ "acpi": "true",
+ "apic": "true",
+ "cores-per-socket": "1",
+ "nx": "false",
+ "pae": "true",
+ "viridian": "true"
+ },
+ "power_state": "Halted",
+ "protection_policy": "OpaqueRef:NULL",
+ "recommendations": "<restrictions><restriction field=\"memory-static-max\" max=\"17179869184\" /><restriction field=\"vcpus-max\" max=\"8\" /><restriction property=\"number-of-vbds\" max=\"7\" /><restriction property=\"number-of-vifs\" max=\"7\" /></restrictions>",
+ "reference_label": "",
+ "requires_reboot": false,
+ "resident_on": "OpaqueRef:NULL",
+ "shutdown_delay": "0",
+ "snapshot_info": {},
+ "snapshot_metadata": "",
+ "snapshot_of": "OpaqueRef:NULL",
+ "snapshot_schedule": "OpaqueRef:NULL",
+ "snapshot_time": "19700101T00:00:00Z",
+ "snapshots": [],
+ "start_delay": "0",
+ "suspend_SR": "OpaqueRef:NULL",
+ "suspend_VDI": "OpaqueRef:NULL",
+ "tags": [
+ "web-frontend"
+ ],
+ "transportable_snapshot_id": "",
+ "user_version": "1",
+ "uuid": "8f5bc97c-42fa-d619-aba4-d25eced735e0",
+ "version": "0",
+ "xenstore_data": {
+ "vm-data": "",
+ "vm-data/networks": "",
+ "vm-data/networks/0": "",
+ "vm-data/networks/0/mac": "72:fb:c7:ac:b9:97",
+ "vm-data/networks/0/name": "Host internal management network"
+ }
+ }
+ },
+ "VM_guest_metrics": {
+ "OpaqueRef:6a8acd85-4cab-4e52-27d5-5f4a51c1bf69": {
+ "PV_drivers_detected": true,
+ "PV_drivers_up_to_date": true,
+ "PV_drivers_version": {
+ "build": "46676",
+ "major": "5",
+ "micro": "100",
+ "minor": "6"
+ },
+ "can_use_hotplug_vbd": "unspecified",
+ "can_use_hotplug_vif": "unspecified",
+ "disks": {},
+ "last_updated": "20190113T19:36:07Z",
+ "live": true,
+ "memory": {},
+ "networks": {
+ "0/ip": "169.254.0.3"
+ },
+ "os_version": {
+ "distro": "centos",
+ "major": "6",
+ "minor": "10",
+ "name": "CentOS release 6.10 (Final)",
+ "uname": "2.6.32-754.6.3.el6.x86_64"
+ },
+ "other": {
+ "feature-balloon": "1",
+ "has-vendor-device": "0",
+ "platform-feature-multiprocessor-suspend": "1"
+ },
+ "other_config": {},
+ "uuid": "3928a6a4-1acd-c134-ed35-eb0ccfaed65c"
+ }
+ },
+ "VM_metrics": {
+ "OpaqueRef:87fc5829-478b-1dcd-989f-50e8ba58a87d": {
+ "VCPUs_CPU": {},
+ "VCPUs_flags": {},
+ "VCPUs_number": "0",
+ "VCPUs_params": {},
+ "VCPUs_utilisation": {
+ "0": 0.0
+ },
+ "hvm": false,
+ "install_time": "20190113T19:35:05Z",
+ "last_updated": "19700101T00:00:00Z",
+ "memory_actual": "1073741824",
+ "nested_virt": false,
+ "nomigrate": false,
+ "other_config": {},
+ "start_time": "19700101T00:00:00Z",
+ "state": [],
+ "uuid": "6cb05fe9-b83e-34c8-29e0-3b793e1da661"
+ }
+ },
+ "host": {},
+ "network": {
+ "OpaqueRef:8a404c5e-5673-ab69-5d6f-5a35a33b8724": {
+ "MTU": "1500",
+ "PIFs": [],
+ "VIFs": [],
+ "allowed_operations": [],
+ "assigned_ips": {
+ "OpaqueRef:8171dad1-f902-ec00-7ba2-9f92d8aa75ab": "169.254.0.3",
+ "OpaqueRef:9754a0ed-e100-d224-6a70-a55a9c2cedf9": "169.254.0.2"
+ },
+ "blobs": {},
+ "bridge": "xenapi",
+ "current_operations": {},
+ "default_locking_mode": "unlocked",
+ "managed": true,
+ "name_description": "Network on which guests will be assigned a private link-local IP address which can be used to talk XenAPI",
+ "name_label": "Host internal management network",
+ "other_config": {
+ "ip_begin": "169.254.0.1",
+ "ip_end": "169.254.255.254",
+ "is_guest_installer_network": "true",
+ "is_host_internal_management_network": "true",
+ "netmask": "255.255.0.0"
+ },
+ "tags": [],
+ "uuid": "dbb96525-944f-0d1a-54ed-e65cb6d07450"
+ }
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json.license b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/fixtures/ansible-test-vm-3-params.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py
new file mode 100644
index 000000000..37e54b2b5
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_gather_vm_params_and_facts.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .common import testcase_bad_xenapi_refs
+
+
+testcase_gather_vm_params_and_facts = {
+ "params": [
+ ["ansible-test-vm-1-params.json", "ansible-test-vm-1-facts.json"],
+ ["ansible-test-vm-2-params.json", "ansible-test-vm-2-facts.json"],
+ ["ansible-test-vm-3-params.json", "ansible-test-vm-3-facts.json"],
+ ],
+ "ids": [
+ "ansible-test-vm-1",
+ "ansible-test-vm-2",
+ "ansible-test-vm-3",
+ ],
+}
+
+
+@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_gather_vm_params_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
+ """Tests return of empty dict on bad vm_ref."""
+ assert xenserver.gather_vm_params(fake_ansible_module, vm_ref) == {}
+
+
+def test_gather_vm_facts_no_vm_params(fake_ansible_module, xenserver):
+ """Tests return of empty facts dict when vm_params is not available"""
+ assert xenserver.gather_vm_facts(fake_ansible_module, None) == {}
+ assert xenserver.gather_vm_facts(fake_ansible_module, {}) == {}
+
+
+@pytest.mark.parametrize('fixture_data_from_file',
+ testcase_gather_vm_params_and_facts['params'],
+ ids=testcase_gather_vm_params_and_facts['ids'],
+ indirect=True)
+def test_gather_vm_params_and_facts(mocker, fake_ansible_module, XenAPI, xenserver, fixture_data_from_file):
+ """Tests proper parsing of VM parameters and facts."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ if "params" in list(fixture_data_from_file.keys())[0]:
+ params_file = list(fixture_data_from_file.keys())[0]
+ facts_file = list(fixture_data_from_file.keys())[1]
+ else:
+ params_file = list(fixture_data_from_file.keys())[1]
+ facts_file = list(fixture_data_from_file.keys())[0]
+
+ mocked_returns = {
+ "VM.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM'][obj_ref],
+ "VM_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_metrics'][obj_ref],
+ "VM_guest_metrics.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VM_guest_metrics'][obj_ref],
+ "VBD.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VBD'][obj_ref],
+ "VDI.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VDI'][obj_ref],
+ "SR.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['SR'][obj_ref],
+ "VIF.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['VIF'][obj_ref],
+ "network.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['network'][obj_ref],
+ "host.get_record.side_effect": lambda obj_ref: fixture_data_from_file[params_file]['host'][obj_ref],
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ vm_ref = list(fixture_data_from_file[params_file]['VM'].keys())[0]
+
+ assert xenserver.gather_vm_facts(fake_ansible_module, xenserver.gather_vm_params(fake_ansible_module, vm_ref)) == fixture_data_from_file[facts_file]
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py
new file mode 100644
index 000000000..242e1debd
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_get_object_ref.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref
+
+
+def test_get_object_ref_xenapi_failure(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests catching of XenAPI failures."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', side_effect=XenAPI.Failure('Fake XAPI method call error!'))
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name")
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: Fake XAPI method call error!"
+
+
+def test_get_object_ref_bad_uuid_and_name(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests failure on bad object uuid and/or name."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request')
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, None, msg_prefix="Test: ")
+
+ mocked_xenapi.xenapi_request.assert_not_called()
+ assert exc_info.value.kwargs['msg'] == "Test: no valid name or UUID supplied for VM!"
+
+
+def test_get_object_ref_uuid_not_found(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests when object is not found by uuid."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', side_effect=XenAPI.Failure('Fake XAPI not found error!'))
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == "Test: VM with UUID 'fake-uuid' not found!"
+ assert xenserver.get_object_ref(fake_ansible_module, "name", uuid="fake-uuid", fail=False, msg_prefix="Test: ") is None
+
+
+def test_get_object_ref_name_not_found(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests when object is not found by name."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', return_value=[])
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == "Test: VM with name 'name' not found!"
+ assert xenserver.get_object_ref(fake_ansible_module, "name", fail=False, msg_prefix="Test: ") is None
+
+
+def test_get_object_ref_name_multiple_found(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests when multiple objects are found by name."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi_request', return_value=[fake_xenapi_ref('VM'), fake_xenapi_ref('VM')])
+
+ error_msg = "Test: multiple VMs with name 'name' found! Please use UUID."
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == error_msg
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.get_object_ref(fake_ansible_module, "name", fail=False, msg_prefix="Test: ")
+
+ assert exc_info.value.kwargs['msg'] == error_msg
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py
new file mode 100644
index 000000000..b22e4aa35
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_misc.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def test_xapi_to_module_vm_power_state_bad_power_state(xenserver):
+ """Tests that None is returned on bad power state."""
+ assert xenserver.xapi_to_module_vm_power_state("bad") is None
+
+
+def test_module_to_xapi_vm_power_state_bad_power_state(xenserver):
+ """Tests that None is returned on bad power state."""
+ assert xenserver.module_to_xapi_vm_power_state("bad") is None
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py
new file mode 100644
index 000000000..d072ce207
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_netaddr_functions.py
@@ -0,0 +1,183 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from ansible.module_utils.common.network import is_mac
+
+testcase_is_valid_mac_addr = [
+ ('A4-23-8D-F8-C9-E5', True),
+ ('35:71:F4:11:0B:D8', True),
+ ('b3-bd-20-59-0c-cf', True),
+ ('32:61:ca:65:f1:f4', True),
+ ('asdf', False),
+ ('A4-23-8D-G8-C9-E5', False),
+ ('A4-3-8D-F8-C9-E5', False),
+ ('A4-23-88D-F8-C9-E5', False),
+ ('A4-23-8D-F8-C9_E5', False),
+ ('A4-23--8D-F8-C9-E5', False),
+]
+
+testcase_is_valid_ip_addr = [
+ ('0.0.0.0', True),
+ ('10.0.0.1', True),
+ ('192.168.0.1', True),
+ ('255.255.255.255', True),
+ ('asdf', False),
+ ('a.b.c.d', False),
+ ('345.345.345.345', False),
+ ('-10.0.0.1', False),
+]
+
+testcase_is_valid_ip_netmask = [
+ ('240.0.0.0', True),
+ ('255.224.0.0', True),
+ ('255.255.248.0', True),
+ ('255.255.255.255', True),
+ ('asdf', False),
+ ('a.b.c.d', False),
+ ('192.168.0.1', False),
+ ('255.0.248.0', False),
+]
+
+testcase_is_valid_ip_prefix = [
+ ('0', True),
+ ('16', True),
+ ('24', True),
+ ('32', True),
+ ('asdf', False),
+ ('-10', False),
+ ('60', False),
+ ('60s', False),
+]
+
+testcase_ip_prefix_to_netmask = {
+ "params": [
+ ('0', '0.0.0.0'),
+ ('8', '255.0.0.0'),
+ ('11', '255.224.0.0'),
+ ('16', '255.255.0.0'),
+ ('21', '255.255.248.0'),
+ ('24', '255.255.255.0'),
+ ('26', '255.255.255.192'),
+ ('32', '255.255.255.255'),
+ ('a', ''),
+ ('60', ''),
+ ],
+ "ids": [
+ '0',
+ '8',
+ '11',
+ '16',
+ '21',
+ '24',
+ '26',
+ '32',
+ 'a',
+ '60',
+ ],
+}
+
+testcase_ip_netmask_to_prefix = {
+ "params": [
+ ('0.0.0.0', '0'),
+ ('255.0.0.0', '8'),
+ ('255.224.0.0', '11'),
+ ('255.255.0.0', '16'),
+ ('255.255.248.0', '21'),
+ ('255.255.255.0', '24'),
+ ('255.255.255.192', '26'),
+ ('255.255.255.255', '32'),
+ ('a', ''),
+ ('60', ''),
+ ],
+ "ids": [
+ '0.0.0.0',
+ '255.0.0.0',
+ '255.224.0.0',
+ '255.255.0.0',
+ '255.255.248.0',
+ '255.255.255.0',
+ '255.255.255.192',
+ '255.255.255.255',
+ 'a',
+ '60',
+ ],
+}
+
+testcase_is_valid_ip6_addr = [
+ ('::1', True),
+ ('2001:DB8:0:0:8:800:200C:417A', True),
+ ('2001:DB8::8:800:200C:417A', True),
+ ('FF01::101', True),
+ ('asdf', False),
+ ('2001:DB8:0:0:8:800:200C:417A:221', False),
+ ('FF01::101::2', False),
+ ('2001:db8:85a3::8a2e:370k:7334', False),
+]
+
+testcase_is_valid_ip6_prefix = [
+ ('0', True),
+ ('56', True),
+ ('78', True),
+ ('128', True),
+ ('asdf', False),
+ ('-10', False),
+ ('345', False),
+ ('60s', False),
+]
+
+
+@pytest.mark.parametrize('mac_addr, result', testcase_is_valid_mac_addr)
+def test_is_valid_mac_addr(xenserver, mac_addr, result):
+ """Tests against examples of valid and invalid mac addresses."""
+ assert is_mac(mac_addr) is result
+
+
+@pytest.mark.parametrize('ip_addr, result', testcase_is_valid_ip_addr)
+def test_is_valid_ip_addr(xenserver, ip_addr, result):
+ """Tests against examples of valid and invalid ip addresses."""
+ assert xenserver.is_valid_ip_addr(ip_addr) is result
+
+
+@pytest.mark.parametrize('ip_netmask, result', testcase_is_valid_ip_netmask)
+def test_is_valid_ip_netmask(xenserver, ip_netmask, result):
+ """Tests against examples of valid and invalid ip netmasks."""
+ assert xenserver.is_valid_ip_netmask(ip_netmask) is result
+
+
+@pytest.mark.parametrize('ip_prefix, result', testcase_is_valid_ip_prefix)
+def test_is_valid_ip_prefix(xenserver, ip_prefix, result):
+ """Tests against examples of valid and invalid ip prefixes."""
+ assert xenserver.is_valid_ip_prefix(ip_prefix) is result
+
+
+@pytest.mark.parametrize('ip_prefix, ip_netmask', testcase_ip_prefix_to_netmask['params'], ids=testcase_ip_prefix_to_netmask['ids'])
+def test_ip_prefix_to_netmask(xenserver, ip_prefix, ip_netmask):
+ """Tests ip prefix to netmask conversion."""
+ assert xenserver.ip_prefix_to_netmask(ip_prefix) == ip_netmask
+
+
+@pytest.mark.parametrize('ip_netmask, ip_prefix', testcase_ip_netmask_to_prefix['params'], ids=testcase_ip_netmask_to_prefix['ids'])
+def test_ip_netmask_to_prefix(xenserver, ip_netmask, ip_prefix):
+ """Tests ip netmask to prefix conversion."""
+ assert xenserver.ip_netmask_to_prefix(ip_netmask) == ip_prefix
+
+
+@pytest.mark.parametrize('ip6_addr, result', testcase_is_valid_ip6_addr)
+def test_is_valid_ip6_addr(xenserver, ip6_addr, result):
+ """Tests against examples of valid and invalid ip6 addresses."""
+ assert xenserver.is_valid_ip6_addr(ip6_addr) is result
+
+
+@pytest.mark.parametrize('ip6_prefix, result', testcase_is_valid_ip6_prefix)
+def test_is_valid_ip6_prefix(xenserver, ip6_prefix, result):
+ """Tests against examples of valid and invalid ip6 prefixes."""
+ assert xenserver.is_valid_ip6_prefix(ip6_prefix) is result
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py
new file mode 100644
index 000000000..279dc9912
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_set_vm_power_state.py
@@ -0,0 +1,414 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref, testcase_bad_xenapi_refs
+
+
+testcase_set_vm_power_state_bad_transitions = {
+ "params": [
+ ('restarted', 'Halted', "Cannot restart VM in state 'poweredoff'!"),
+ ('restarted', 'Suspended', "Cannot restart VM in state 'suspended'!"),
+ ('suspended', 'Halted', "Cannot suspend VM in state 'poweredoff'!"),
+ ('suspended', 'Paused', "Cannot suspend VM in state 'paused'!"),
+ ('shutdownguest', 'Halted', "Cannot shutdown guest when VM is in state 'poweredoff'!"),
+ ('shutdownguest', 'Suspended', "Cannot shutdown guest when VM is in state 'suspended'!"),
+ ('shutdownguest', 'Paused', "Cannot shutdown guest when VM is in state 'paused'!"),
+ ('rebootguest', 'Halted', "Cannot reboot guest when VM is in state 'poweredoff'!"),
+ ('rebootguest', 'Suspended', "Cannot reboot guest when VM is in state 'suspended'!"),
+ ('rebootguest', 'Paused', "Cannot reboot guest when VM is in state 'paused'!"),
+ ],
+ "ids": [
+ "poweredoff->restarted",
+ "suspended->restarted",
+ "poweredoff->suspended",
+ "paused->suspended",
+ "poweredoff->shutdownguest",
+ "suspended->shutdownguest",
+ "paused->shutdownguest",
+ "poweredoff->rebootguest",
+ "suspended->rebootguest",
+ "paused->rebootguest",
+ ],
+}
+
+testcase_set_vm_power_state_task_timeout = {
+ "params": [
+ ('shutdownguest', "Guest shutdown task failed: 'timeout'!"),
+ ('rebootguest', "Guest reboot task failed: 'timeout'!"),
+ ],
+ "ids": [
+ "shutdownguest-timeout",
+ "rebootguest-timeout",
+ ],
+}
+
+testcase_set_vm_power_state_no_transitions = {
+ "params": [
+ ('poweredon', "Running"),
+ ('Poweredon', "Running"),
+ ('powered-on', "Running"),
+ ('Powered_on', "Running"),
+ ('poweredoff', "Halted"),
+ ('Poweredoff', "Halted"),
+ ('powered-off', "Halted"),
+ ('powered_off', "Halted"),
+ ('suspended', "Suspended"),
+ ('Suspended', "Suspended"),
+ ],
+ "ids": [
+ "poweredon",
+ "poweredon-cap",
+ "poweredon-dash",
+ "poweredon-under",
+ "poweredoff",
+ "poweredoff-cap",
+ "poweredoff-dash",
+ "poweredoff-under",
+ "suspended",
+ "suspended-cap",
+ ],
+}
+
+testcase_set_vm_power_state_transitions = {
+ "params": [
+ ('poweredon', 'Halted', 'running', 'VM.start'),
+ ('Poweredon', 'Halted', 'running', 'VM.start'),
+ ('powered-on', 'Halted', 'running', 'VM.start'),
+ ('Powered_on', 'Halted', 'running', 'VM.start'),
+ ('poweredon', 'Suspended', 'running', 'VM.resume'),
+ ('Poweredon', 'Suspended', 'running', 'VM.resume'),
+ ('powered-on', 'Suspended', 'running', 'VM.resume'),
+ ('Powered_on', 'Suspended', 'running', 'VM.resume'),
+ ('poweredon', 'Paused', 'running', 'VM.unpause'),
+ ('Poweredon', 'Paused', 'running', 'VM.unpause'),
+ ('powered-on', 'Paused', 'running', 'VM.unpause'),
+ ('Powered_on', 'Paused', 'running', 'VM.unpause'),
+ ('poweredoff', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('Poweredoff', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('powered-off', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('powered_off', 'Running', 'halted', 'VM.hard_shutdown'),
+ ('poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('Poweredoff', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('powered-off', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('powered_off', 'Suspended', 'halted', 'VM.hard_shutdown'),
+ ('poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('Poweredoff', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('powered-off', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('powered_off', 'Paused', 'halted', 'VM.hard_shutdown'),
+ ('restarted', 'Running', 'running', 'VM.hard_reboot'),
+ ('Restarted', 'Running', 'running', 'VM.hard_reboot'),
+ ('restarted', 'Paused', 'running', 'VM.hard_reboot'),
+ ('Restarted', 'Paused', 'running', 'VM.hard_reboot'),
+ ('suspended', 'Running', 'suspended', 'VM.suspend'),
+ ('Suspended', 'Running', 'suspended', 'VM.suspend'),
+ ('shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('Shutdownguest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('shutdown-guest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('shutdown_guest', 'Running', 'halted', 'VM.clean_shutdown'),
+ ('rebootguest', 'Running', 'running', 'VM.clean_reboot'),
+ ('rebootguest', 'Running', 'running', 'VM.clean_reboot'),
+ ('reboot-guest', 'Running', 'running', 'VM.clean_reboot'),
+ ('reboot_guest', 'Running', 'running', 'VM.clean_reboot'),
+ ],
+ "ids": [
+ "poweredoff->poweredon",
+ "poweredoff->poweredon-cap",
+ "poweredoff->poweredon-dash",
+ "poweredoff->poweredon-under",
+ "suspended->poweredon",
+ "suspended->poweredon-cap",
+ "suspended->poweredon-dash",
+ "suspended->poweredon-under",
+ "paused->poweredon",
+ "paused->poweredon-cap",
+ "paused->poweredon-dash",
+ "paused->poweredon-under",
+ "poweredon->poweredoff",
+ "poweredon->poweredoff-cap",
+ "poweredon->poweredoff-dash",
+ "poweredon->poweredoff-under",
+ "suspended->poweredoff",
+ "suspended->poweredoff-cap",
+ "suspended->poweredoff-dash",
+ "suspended->poweredoff-under",
+ "paused->poweredoff",
+ "paused->poweredoff-cap",
+ "paused->poweredoff-dash",
+ "paused->poweredoff-under",
+ "poweredon->restarted",
+ "poweredon->restarted-cap",
+ "paused->restarted",
+ "paused->restarted-cap",
+ "poweredon->suspended",
+ "poweredon->suspended-cap",
+ "poweredon->shutdownguest",
+ "poweredon->shutdownguest-cap",
+ "poweredon->shutdownguest-dash",
+ "poweredon->shutdownguest-under",
+ "poweredon->rebootguest",
+ "poweredon->rebootguest-cap",
+ "poweredon->rebootguest-dash",
+ "poweredon->rebootguest-under",
+ ],
+}
+
+testcase_set_vm_power_state_transitions_async = {
+ "params": [
+ ('shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('Shutdownguest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('shutdown-guest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('shutdown_guest', 'Running', 'halted', 'Async.VM.clean_shutdown'),
+ ('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ('rebootguest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ('reboot-guest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ('reboot_guest', 'Running', 'running', 'Async.VM.clean_reboot'),
+ ],
+ "ids": [
+ "poweredon->shutdownguest",
+ "poweredon->shutdownguest-cap",
+ "poweredon->shutdownguest-dash",
+ "poweredon->shutdownguest-under",
+ "poweredon->rebootguest",
+ "poweredon->rebootguest-cap",
+ "poweredon->rebootguest-dash",
+ "poweredon->rebootguest-under",
+ ],
+}
+
+
+@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_set_vm_power_state_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
+ """Tests failure on bad vm_ref."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, vm_ref, None)
+
+ assert exc_info.value.kwargs['msg'] == "Cannot set VM power state. Invalid VM reference supplied!"
+
+
+def test_set_vm_power_state_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "poweredon")
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+def test_set_vm_power_state_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests failure on unsupported power state."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), "bad")
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert exc_info.value.kwargs['msg'] == "Requested VM power state 'bad' is unsupported!"
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, error_msg',
+ testcase_set_vm_power_state_bad_transitions['params'],
+ ids=testcase_set_vm_power_state_bad_transitions['ids'])
+def test_set_vm_power_state_bad_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current, error_msg):
+ """Tests failure on bad power state transition."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired)
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert exc_info.value.kwargs['msg'] == error_msg
+
+
+@pytest.mark.parametrize('power_state, error_msg',
+ testcase_set_vm_power_state_task_timeout['params'],
+ ids=testcase_set_vm_power_state_task_timeout['ids'])
+def test_set_vm_power_state_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver, power_state, error_msg):
+ """Tests failure on async task timeout."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ "Async.VM.clean_shutdown.return_value": fake_xenapi_ref('task'),
+ "Async.VM.clean_reboot.return_value": fake_xenapi_ref('task'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task', return_value="timeout")
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state, timeout=1)
+
+ # Beside VM.get_power_state() only one of Async.VM.clean_shutdown or
+ # Async.VM.clean_reboot should have been called additionally.
+ assert len(mocked_xenapi.method_calls) == 2
+
+ assert exc_info.value.kwargs['msg'] == error_msg
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current',
+ testcase_set_vm_power_state_no_transitions['params'],
+ ids=testcase_set_vm_power_state_no_transitions['ids'])
+def test_set_vm_power_state_no_transition(mocker, fake_ansible_module, XenAPI, xenserver, power_state_desired, power_state_current):
+ """Tests regular invocation without power state transition."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired)
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert result[0] is False
+ assert result[1] == power_state_current.lower()
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
+ testcase_set_vm_power_state_transitions['params'],
+ ids=testcase_set_vm_power_state_transitions['ids'])
+def test_set_vm_power_state_transition(mocker,
+ fake_ansible_module,
+ XenAPI,
+ xenserver,
+ power_state_desired,
+ power_state_current,
+ power_state_resulting,
+ activated_xenapi_method):
+ """Tests regular invocation with power state transition."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0)
+
+ mocked_xenapi_method = mocked_xenapi
+
+ for activated_xenapi_class in activated_xenapi_method.split('.'):
+ mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
+
+ mocked_xenapi_method.assert_called_once()
+
+ # Beside VM.get_power_state() only activated_xenapi_method should have
+ # been called additionally.
+ assert len(mocked_xenapi.method_calls) == 2
+
+ assert result[0] is True
+ assert result[1] == power_state_resulting
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
+ testcase_set_vm_power_state_transitions_async['params'],
+ ids=testcase_set_vm_power_state_transitions_async['ids'])
+def test_set_vm_power_state_transition_async(mocker,
+ fake_ansible_module,
+ XenAPI,
+ xenserver,
+ power_state_desired,
+ power_state_current,
+ power_state_resulting,
+ activated_xenapi_method):
+ """
+ Tests regular invocation with async power state transition
+ (shutdownguest and rebootguest only).
+ """
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ "%s.return_value" % activated_xenapi_method: fake_xenapi_ref('task'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.wait_for_task', return_value="")
+
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=1)
+
+ mocked_xenapi_method = mocked_xenapi
+
+ for activated_xenapi_class in activated_xenapi_method.split('.'):
+ mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
+
+ mocked_xenapi_method.assert_called_once()
+
+ # Beside VM.get_power_state() only activated_xenapi_method should have
+ # been called additionally.
+ assert len(mocked_xenapi.method_calls) == 2
+
+ assert result[0] is True
+ assert result[1] == power_state_resulting
+
+
+@pytest.mark.parametrize('power_state_desired, power_state_current, power_state_resulting, activated_xenapi_method',
+ testcase_set_vm_power_state_transitions['params'],
+ ids=testcase_set_vm_power_state_transitions['ids'])
+def test_set_vm_power_state_transition_check_mode(mocker,
+ fake_ansible_module,
+ XenAPI,
+ xenserver,
+ power_state_desired,
+ power_state_current,
+ power_state_resulting,
+ activated_xenapi_method):
+ """Tests regular invocation with power state transition in check mode."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": power_state_current,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ fake_ansible_module.check_mode = True
+ result = xenserver.set_vm_power_state(fake_ansible_module, fake_xenapi_ref('VM'), power_state_desired, timeout=0)
+
+ mocked_xenapi_method = mocked_xenapi
+
+ for activated_xenapi_class in activated_xenapi_method.split('.'):
+ mocked_xenapi_method = getattr(mocked_xenapi_method, activated_xenapi_class)
+
+ mocked_xenapi_method.assert_not_called()
+
+ # Beside VM.get_power_state() no other method should have been
+ # called additionally.
+ assert len(mocked_xenapi.method_calls) == 1
+
+ assert result[0] is True
+ assert result[1] == power_state_resulting
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py
new file mode 100644
index 000000000..3f31f030e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_wait_for_functions.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref, testcase_bad_xenapi_refs
+
+
+testcase_wait_for_vm_ip_address_bad_power_states = {
+ "params": [
+ 'Halted',
+ 'Paused',
+ 'Suspended',
+ 'Other',
+ ],
+ "ids": [
+ 'state-halted',
+ 'state-paused',
+ 'state-suspended',
+ 'state-other',
+ ]
+}
+
+testcase_wait_for_vm_ip_address_bad_guest_metrics = {
+ "params": [
+ ('OpaqueRef:NULL', {"networks": {}}),
+ (fake_xenapi_ref('VM_guest_metrics'), {"networks": {}}),
+ ],
+ "ids": [
+ 'vm_guest_metrics_ref-null, no-ip',
+ 'vm_guest_metrics_ref-ok, no-ip',
+ ],
+}
+
+testcase_wait_for_task_all_statuses = {
+ "params": [
+ ('Success', ''),
+ ('Failure', 'failure'),
+ ('Cancelling', 'cancelling'),
+ ('Cancelled', 'cancelled'),
+ ('Other', 'other'),
+ ],
+ "ids": [
+ 'task-success',
+ 'task-failure',
+ 'task-cancelling',
+ 'task-cancelled',
+ 'task-other',
+ ]
+}
+
+
+@pytest.mark.parametrize('vm_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_wait_for_vm_ip_address_bad_vm_ref(fake_ansible_module, xenserver, vm_ref):
+ """Tests failure on bad vm_ref."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, vm_ref)
+
+ assert exc_info.value.kwargs['msg'] == "Cannot wait for VM IP address. Invalid VM reference supplied!"
+
+
+def test_wait_for_vm_ip_address_xenapi_failure(mock_xenapi_failure, xenserver, fake_ansible_module):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'))
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+@pytest.mark.parametrize('bad_power_state',
+ testcase_wait_for_vm_ip_address_bad_power_states['params'],
+ ids=testcase_wait_for_vm_ip_address_bad_power_states['ids'])
+def test_wait_for_vm_ip_address_bad_power_state(mocker, fake_ansible_module, XenAPI, xenserver, bad_power_state):
+ """Tests failure on bad power state."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": bad_power_state,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'))
+
+ assert exc_info.value.kwargs['msg'] == ("Cannot wait for VM IP address when VM is in state '%s'!" %
+ xenserver.xapi_to_module_vm_power_state(bad_power_state.lower()))
+
+
+@pytest.mark.parametrize('bad_guest_metrics_ref, bad_guest_metrics',
+ testcase_wait_for_vm_ip_address_bad_guest_metrics['params'],
+ ids=testcase_wait_for_vm_ip_address_bad_guest_metrics['ids'])
+def test_wait_for_vm_ip_address_timeout(mocker, fake_ansible_module, XenAPI, xenserver, bad_guest_metrics_ref, bad_guest_metrics):
+ """Tests timeout."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ "VM.get_guest_metrics.return_value": bad_guest_metrics_ref,
+ "VM_guest_metrics.get_record.return_value": bad_guest_metrics,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'), timeout=1)
+
+ assert exc_info.value.kwargs['msg'] == "Timed out waiting for VM IP address!"
+
+
+def test_wait_for_vm_ip_address(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests regular invocation."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ # This mock simulates regular VM IP acquirement lifecycle:
+ #
+ # 1) First, no guest metrics are available because VM is not yet fully
+ # booted and guest agent is not yet started.
+ # 2) Next, guest agent is started and guest metrics are available but
+ # IP address is still not acquired.
+ # 3) Lastly, IP address is acquired by VM on its primary VIF.
+ mocked_returns = {
+ "VM.get_power_state.return_value": "Running",
+ "VM.get_guest_metrics.side_effect": [
+ 'OpaqueRef:NULL',
+ fake_xenapi_ref('VM_guest_metrics'),
+ fake_xenapi_ref('VM_guest_metrics'),
+ ],
+ "VM_guest_metrics.get_record.side_effect": [
+ {
+ "networks": {},
+ },
+ {
+ "networks": {
+ "0/ip": "192.168.0.1",
+ "1/ip": "10.0.0.1",
+ },
+ },
+ ],
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ fake_guest_metrics = xenserver.wait_for_vm_ip_address(fake_ansible_module, fake_xenapi_ref('VM'))
+
+ assert fake_guest_metrics == mocked_returns['VM_guest_metrics.get_record.side_effect'][1]
+
+
+@pytest.mark.parametrize('task_ref', testcase_bad_xenapi_refs['params'], ids=testcase_bad_xenapi_refs['ids'])
+def test_wait_for_task_bad_task_ref(fake_ansible_module, xenserver, task_ref):
+ """Tests failure on bad task_ref."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_task(fake_ansible_module, task_ref)
+
+ assert exc_info.value.kwargs['msg'] == "Cannot wait for task. Invalid task reference supplied!"
+
+
+def test_wait_for_task_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'))
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+def test_wait_for_task_timeout(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests timeout."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "task.get_status.return_value": "Pending",
+ "task.destroy.return_value": None,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'), timeout=1)
+
+ mocked_xenapi.task.destroy.assert_called_once()
+ assert fake_result == "timeout"
+
+
+@pytest.mark.parametrize('task_status, result',
+ testcase_wait_for_task_all_statuses['params'],
+ ids=testcase_wait_for_task_all_statuses['ids'])
+def test_wait_for_task(mocker, fake_ansible_module, XenAPI, xenserver, task_status, result):
+ """Tests regular invocation."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ # Mock will first return Pending status and on second invocation it will
+ # return one of possible final statuses.
+ mocked_returns = {
+ "task.get_status.side_effect": [
+ 'Pending',
+ task_status,
+ ],
+ "task.destroy.return_value": None,
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('time.sleep')
+
+ fake_result = xenserver.wait_for_task(fake_ansible_module, fake_xenapi_ref('task'))
+
+ mocked_xenapi.task.destroy.assert_called_once()
+ assert fake_result == result
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py
new file mode 100644
index 000000000..86965b4e5
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xapi.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+import atexit
+
+from .FakeAnsibleModule import FailJsonException
+from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
+
+
+testcase_module_local_conn = {
+ "params": [
+ {
+ "hostname": "localhost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ ],
+ "ids": [
+ "local-conn",
+ ],
+}
+
+testcase_module_remote_conn = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ ],
+ "ids": [
+ "remote-conn",
+ ],
+}
+
+testcase_module_remote_conn_scheme = {
+ "params": [
+ {
+ "hostname": "http://somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ {
+ "hostname": "https://somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ },
+ ],
+ "ids": [
+ "remote-conn-http",
+ "remote-conn-https",
+ ],
+}
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_local_conn['params'], ids=testcase_module_local_conn['ids'], indirect=True)
+def test_xapi_connect_local_session(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that connection to localhost uses XenAPI.xapi_local() function."""
+ mocker.patch('XenAPI.xapi_local')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ XenAPI.xapi_local.assert_called_once()
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_local_conn['params'], ids=testcase_module_local_conn['ids'], indirect=True)
+def test_xapi_connect_local_login(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that connection to localhost uses empty username and password."""
+ mocker.patch.object(XenAPI.Session, 'login_with_password', create=True)
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ XenAPI.Session.login_with_password.assert_called_once_with('', '', ANSIBLE_VERSION, 'Ansible')
+
+
+def test_xapi_connect_login(mocker, fake_ansible_module, XenAPI, xenserver):
+ """
+ Tests that username and password are properly propagated to
+ XenAPI.Session.login_with_password() function.
+ """
+ mocker.patch.object(XenAPI.Session, 'login_with_password', create=True)
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ username = fake_ansible_module.params['username']
+ password = fake_ansible_module.params['password']
+
+ XenAPI.Session.login_with_password.assert_called_once_with(username, password, ANSIBLE_VERSION, 'Ansible')
+
+
+def test_xapi_connect_login_failure(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that login failure is properly handled."""
+ fake_error_msg = "Fake XAPI login error!"
+
+ mocked_login = mocker.patch.object(XenAPI.Session, 'login_with_password', create=True)
+ mocked_login.side_effect = XenAPI.Failure(fake_error_msg)
+
+ hostname = fake_ansible_module.params['hostname']
+ username = fake_ansible_module.params['username']
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ assert exc_info.value.kwargs['msg'] == "Unable to log on to XenServer at http://%s as %s: %s" % (hostname, username, fake_error_msg)
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_remote_conn_scheme['params'], ids=testcase_module_remote_conn_scheme['ids'], indirect=True)
+def test_xapi_connect_remote_scheme(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that explicit scheme in hostname param is preserved."""
+ mocker.patch('XenAPI.Session')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ hostname = fake_ansible_module.params['hostname']
+ ignore_ssl = not fake_ansible_module.params['validate_certs']
+
+ XenAPI.Session.assert_called_once_with(hostname, ignore_ssl=ignore_ssl)
+
+
+@pytest.mark.parametrize('fake_ansible_module', testcase_module_remote_conn['params'], ids=testcase_module_remote_conn['ids'], indirect=True)
+def test_xapi_connect_remote_no_scheme(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests that proper scheme is prepended to hostname without scheme."""
+ mocker.patch('XenAPI.Session')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ hostname = fake_ansible_module.params['hostname']
+ ignore_ssl = not fake_ansible_module.params['validate_certs']
+
+ XenAPI.Session.assert_called_once_with("http://%s" % hostname, ignore_ssl=ignore_ssl)
+
+
+def test_xapi_connect_support_ignore_ssl(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests proper handling of ignore_ssl support."""
+ mocked_session = mocker.patch('XenAPI.Session')
+ mocked_session.side_effect = TypeError()
+
+ with pytest.raises(TypeError) as exc_info:
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module)
+
+ hostname = fake_ansible_module.params['hostname']
+ ignore_ssl = not fake_ansible_module.params['validate_certs']
+
+ XenAPI.Session.assert_called_with("http://%s" % hostname)
+
+
+def test_xapi_connect_no_disconnect_atexit(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests skipping registration of atexit disconnect handler."""
+ mocker.patch('atexit.register')
+
+ xapi_session = xenserver.XAPI.connect(fake_ansible_module, disconnect_atexit=False)
+
+ atexit.register.assert_not_called()
+
+
+def test_xapi_connect_singleton(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests if XAPI.connect() returns singleton."""
+ mocker.patch('XenAPI.Session')
+
+ xapi_session1 = xenserver.XAPI.connect(fake_ansible_module)
+ xapi_session2 = xenserver.XAPI.connect(fake_ansible_module)
+
+ XenAPI.Session.assert_called_once()
+ assert xapi_session1 == xapi_session2
diff --git a/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py
new file mode 100644
index 000000000..2d758fd4e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/module_utils/xenserver/test_xenserverobject.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from .FakeAnsibleModule import FailJsonException
+from .common import fake_xenapi_ref
+
+
+def test_xenserverobject_xenapi_lib_detection(mocker, fake_ansible_module, xenserver):
+ """Tests XenAPI lib detection code."""
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.HAS_XENAPI', new=False)
+
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.XenServerObject(fake_ansible_module)
+
+ assert 'Failed to import the required Python library (XenAPI) on' in exc_info.value.kwargs['msg']
+
+
+def test_xenserverobject_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
+ """Tests catching of XenAPI failures."""
+ with pytest.raises(FailJsonException) as exc_info:
+ xenserver.XenServerObject(fake_ansible_module)
+
+ assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
+
+
+def test_xenserverobject(mocker, fake_ansible_module, XenAPI, xenserver):
+ """Tests successful creation of XenServerObject."""
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ "session.get_this_host.return_value": fake_xenapi_ref('host'),
+ "host.get_software_version.return_value": {"product_version": "7.2.0"},
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ xso = xenserver.XenServerObject(fake_ansible_module)
+
+ assert xso.pool_ref == fake_xenapi_ref('pool')
+ assert xso.xenserver_version == [7, 2, 0]
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/FakeAnsibleModule.py b/ansible_collections/community/general/tests/unit/plugins/modules/FakeAnsibleModule.py
new file mode 100644
index 000000000..bdcc21793
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/FakeAnsibleModule.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class AnsibleModuleException(Exception):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class ExitJsonException(AnsibleModuleException):
+ pass
+
+
+class FailJsonException(AnsibleModuleException):
+ pass
+
+
+class FakeAnsibleModule:
+ def __init__(self, params=None, check_mode=False):
+ self.params = params
+ self.check_mode = check_mode
+
+ def exit_json(self, *args, **kwargs):
+ raise ExitJsonException(*args, **kwargs)
+
+ def fail_json(self, *args, **kwargs):
+ raise FailJsonException(*args, **kwargs)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/FakeXenAPI.py b/ansible_collections/community/general/tests/unit/plugins/modules/FakeXenAPI.py
new file mode 100644
index 000000000..bc9d69c77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/FakeXenAPI.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+FAKE_API_VERSION = "1.1"
+
+
+class Failure(Exception):
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+class Session(object):
+ def __init__(self, uri, transport=None, encoding=None, verbose=0,
+ allow_none=1, ignore_ssl=False):
+
+ self.transport = transport
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def _get_api_version(self):
+ return FAKE_API_VERSION
+
+ def _login(self, method, params):
+ self._session = "OpaqueRef:fake-xenapi-session-ref"
+ self.last_login_method = method
+ self.last_login_params = params
+ self.API_version = self._get_api_version()
+
+ def _logout(self):
+ self._session = None
+ self.last_login_method = None
+ self.last_login_params = None
+ self.API_version = FAKE_API_VERSION
+
+ def xenapi_request(self, methodname, params):
+ if methodname.startswith('login'):
+ self._login(methodname, params)
+ return None
+ elif methodname == 'logout' or methodname == 'session.logout':
+ self._logout()
+ return None
+ else:
+ # Should be patched with mocker.patch().
+ return None
+
+ def __getattr__(self, name):
+ if name == 'handle':
+ return self._session
+ elif name == 'xenapi':
+ # Should be patched with mocker.patch().
+ return None
+ elif name.startswith('login') or name.startswith('slave_local'):
+ return lambda *params: self._login(name, params)
+ elif name == 'logout':
+ return self._logout
+
+
+def xapi_local():
+ return Session("http://_var_lib_xcp_xapi/")
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py b/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py
new file mode 100644
index 000000000..9504c2336
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/conftest.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+from ansible_collections.community.general.plugins.module_utils import deps
+
+
+@pytest.fixture
+def patch_ansible_module(request, mocker):
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the patch_ansible_module pytest fixture')
+
+ mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
+
+
+@pytest.fixture(autouse=True)
+def deps_cleanup():
+ deps._deps.clear()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/gitlab.py b/ansible_collections/community/general/tests/unit/plugins/modules/gitlab.py
new file mode 100644
index 000000000..c64d99fff
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/gitlab.py
@@ -0,0 +1,704 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+from httmock import response # noqa
+from httmock import urlmatch # noqa
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+import gitlab
+
+
+class FakeAnsibleModule(object):
+ def __init__(self, module_params=None):
+ self.check_mode = False
+ self.params = module_params if module_params else {}
+
+ def fail_json(self, **args):
+ pass
+
+ def exit_json(self, **args):
+ pass
+
+
+class GitlabModuleTestCase(unittest.TestCase):
+ def setUp(self):
+ unitest_python_version_check_requirement(self)
+
+ self.mock_module = FakeAnsibleModule()
+
+ self.gitlab_instance = gitlab.Gitlab("http://localhost", private_token="private_token", api_version=4)
+
+
+# Python 2.7+ is needed for python-gitlab
+GITLAB_MINIMUM_PYTHON_VERSION = (2, 7)
+
+
+# Verify if the current Python version is higher than GITLAB_MINIMUM_PYTHON_VERSION
+def python_version_match_requirement():
+ return sys.version_info >= GITLAB_MINIMUM_PYTHON_VERSION
+
+
+def python_gitlab_module_version():
+ return gitlab.__version__
+
+
+def python_gitlab_version_match_requirement():
+ return "2.3.0"
+
+
+# Skip unittest test case if python version don't match requirement
+def unitest_python_version_check_requirement(unittest_testcase):
+ if not python_version_match_requirement():
+ unittest_testcase.skipTest("Python %s+ is needed for python-gitlab" % ",".join(map(str, GITLAB_MINIMUM_PYTHON_VERSION)))
+
+
+'''
+USER API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="get")
+def resp_find_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
+ '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
+ '"web_url": "http://localhost:3000/john_smith"}, {"id": 2,'
+ '"username": "jack_smith", "name": "Jack Smith", "state": "blocked",'
+ '"avatar_url": "http://gravatar.com/../e32131cd8.jpeg",'
+ '"web_url": "http://localhost:3000/jack_smith"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users", method="post")
+def resp_create_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "john_smith", "name": "John Smith", "state": "active",'
+ '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
+ '"web_url": "http://localhost:3000/john_smith","created_at": "2012-05-23T08:00:58Z",'
+ '"bio": null, "location": null, "public_email": "john@example.com", "skype": "",'
+ '"linkedin": "", "twitter": "", "website_url": "", "organization": ""}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
+def resp_get_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "john_smith", "name": "John Smith",'
+ '"state": "active",'
+ '"avatar_url": "http://localhost:3000/uploads/user/avatar/1/cd8.jpeg",'
+ '"web_url": "http://localhost:3000/john_smith",'
+ '"created_at": "2012-05-23T08:00:58Z", "bio": null, "location": null,'
+ '"public_email": "john@example.com", "skype": "", "linkedin": "",'
+ '"twitter": "", "website_url": "", "organization": "", "is_admin": false}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="get")
+def resp_get_missing_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
+def resp_delete_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
+def resp_delete_missing_user(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+'''
+USER SSHKEY API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="get")
+def resp_get_user_keys(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
+ 'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQa'
+ 'SeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2014-08-01T14:47:39.080Z"},{"id": 3,'
+ '"title": "Another Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596'
+ 'k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaS'
+ 'eP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2014-08-01T14:47:39.080Z"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1/keys", method="post")
+def resp_create_user_keys(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "title": "Private key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcUdRuSuA5z'
+ 'szUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+xawxKWmI7hJ5S0tOv6MJ+Ixy'
+ 'Ta4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2jTiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH'
+ '2WOKBw6za0az6XoG75obUdFVdW3qcD0xc809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9'
+ 'M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",'
+ '"created_at": "2014-08-01T14:47:39.080Z"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+'''
+GROUP API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="get")
+def resp_find_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"file_template_project_id": 1, "parent_id": null, "projects": []}, {"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
+ '"web_url": "http://localhost:3000/groups/bar-foo", "request_access_enabled": false,'
+ '"full_name": "BarFoo Group", "full_path": "bar-foo",'
+ '"file_template_project_id": 1, "parent_id": null, "projects": []}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
+def resp_get_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",'
+ '"require_two_factor_authentication": true,'
+ '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/foo-bar", method="get")
+def resp_get_group_by_name(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"project_creation_level": "maintainer", "subgroup_creation_level": "maintainer",'
+ '"require_two_factor_authentication": true,'
+ '"file_template_project_id": 1, "parent_id": null, "projects": [{"id": 1,"description": null, "default_branch": "master",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1", method="get")
+def resp_get_missing_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
+def resp_create_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "name": "Foobar Group", "path": "foo-bar",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/1/foo.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar", "request_access_enabled": false,'
+ '"full_name": "Foobar Group", "full_path": "foo-bar",'
+ '"file_template_project_id": 1, "parent_id": null,'
+ '"project_creation_level": "developer", "subgroup_creation_level": "maintainer",'
+ '"require_two_factor_authentication": true}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups", method="post")
+def resp_create_subgroup(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 2, "name": "BarFoo Group", "path": "bar-foor",'
+ '"description": "An interesting group", "visibility": "public",'
+ '"lfs_enabled": true, "avatar_url": "http://localhost:3000/uploads/group/avatar/2/bar.jpg",'
+ '"web_url": "http://localhost:3000/groups/foo-bar/bar-foo", "request_access_enabled": false,'
+ '"full_name": "BarFoo Group", "full_path": "foo-bar/bar-foo",'
+ '"file_template_project_id": 1, "parent_id": 1,'
+ '"project_creation_level": "noone",'
+ '"require_two_factor_authentication": true}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/users/1", method="delete")
+def resp_delete_group(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+GROUP MEMBER API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="get")
+def resp_get_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="get")
+def resp_find_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1, "username": "raymond_smith", "name": "Raymond Smith", "state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z", "access_level": 30},{'
+ '"id": 2, "username": "john_doe", "name": "John Doe","state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root","expires_at": "2012-10-22T14:13:35Z",'
+ '"access_level": 30}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members", method="post")
+def resp_add_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
+ '"state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
+ '"access_level": 30}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/members/1", method="put")
+def resp_update_member(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "username": "raymond_smith", "name": "Raymond Smith",'
+ '"state": "active",'
+ '"avatar_url": "https://www.gravatar.com/avatar/c2525a7f58ae3776070e44c106c48e15?s=80&d=identicon",'
+ '"web_url": "http://192.168.1.8:3000/root", "expires_at": "2012-10-22T14:13:35Z",'
+ '"access_level": 10}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+'''
+DEPLOY KEY API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="get")
+def resp_find_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T10:12:29Z"},{"id": 3,"title": "Another Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T11:12:29Z"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="get")
+def resp_get_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T10:12:29Z"}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys", method="post")
+def resp_create_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"title": "Public key",'
+ '"key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc'
+ 'KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",'
+ '"created_at": "2013-10-02T10:12:29Z"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/deploy_keys/1", method="delete")
+def resp_delete_project_deploy_key(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+PROJECT API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="get")
+def resp_find_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="get")
+def resp_get_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/foo-bar%2Fdiaspora-client", method="get")
+def resp_get_project_by_name(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects", method="get")
+def resp_find_group_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/groups/1/projects/1", method="get")
+def resp_get_group_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects", method="post")
+def resp_create_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"description": null, "default_branch": "master", "merge_method": "merge",'
+ '"ssh_url_to_repo": "git@example.com:diaspora/diaspora-client.git",'
+ '"http_url_to_repo": "http://example.com/diaspora/diaspora-client.git",'
+ '"web_url": "http://example.com/diaspora/diaspora-client",'
+ '"readme_url": "http://example.com/diaspora/diaspora-client/blob/master/README.md",'
+ '"tag_list": ["example","disapora client"],"name": "Diaspora Client",'
+ '"name_with_namespace": "Diaspora / Diaspora Client","path": "diaspora-client",'
+ '"path_with_namespace": "diaspora/diaspora-client","created_at": "2013-09-30T13:46:02Z",'
+ '"last_activity_at": "2013-09-30T13:46:02Z","forks_count": 0,'
+ '"avatar_url": "http://example.com/uploads/project/avatar/4/uploads/avatar.png",'
+ '"star_count": 0}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1", method="delete")
+def resp_delete_project(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+
+ return response(204, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get")
+def resp_get_protected_branch(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1, "name": "master", "push_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],'
+ '"merge_access_levels": [{"access_level": 40, "access_level_description": "Maintainers"}],'
+ '"allow_force_push":false, "code_owner_approval_required": false}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="get")
+def resp_get_protected_branch_not_exist(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('')
+ content = content.encode("utf-8")
+ return response(404, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/protected_branches/master", method="delete")
+def resp_delete_protected_branch(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+HOOK API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="get")
+def resp_find_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('[{"id": 1,"url": "http://example.com/hook","project_id": 3,'
+ '"push_events": true,"push_events_branch_filter": "","issues_events": true,'
+ '"confidential_issues_events": true,"merge_requests_events": true,'
+ '"tag_push_events": true,"note_events": true,"job_events": true,'
+ '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
+ '"created_at": "2012-10-12T17:04:47Z"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="get")
+def resp_get_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
+ '"push_events": true,"push_events_branch_filter": "","issues_events": true,'
+ '"confidential_issues_events": true,"merge_requests_events": true,'
+ '"tag_push_events": true,"note_events": true,"job_events": true,'
+ '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
+ '"created_at": "2012-10-12T17:04:47Z"}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks", method="post")
+def resp_create_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"id": 1,"url": "http://example.com/hook","project_id": 3,'
+ '"push_events": true,"push_events_branch_filter": "","issues_events": true,'
+ '"confidential_issues_events": true,"merge_requests_events": true,'
+ '"tag_push_events": true,"note_events": true,"job_events": true,'
+ '"pipeline_events": true,"wiki_page_events": true,"enable_ssl_verification": true,'
+ '"created_at": "2012-10-12T17:04:47Z"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path="/api/v4/projects/1/hooks/1", method="delete")
+def resp_delete_project_hook(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
+
+
+'''
+RUNNER API
+'''
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/all$', method="get")
+def resp_find_runners_all(url, request):
+ headers = {'content-type': 'application/json',
+ "X-Page": 1,
+ "X-Next-Page": 2,
+ "X-Per-Page": 1,
+ "X-Total-Pages": 1,
+ "X-Total": 2}
+ content = ('[{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-2-20150125","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners$', method="get")
+def resp_find_runners_list(url, request):
+ headers = {'content-type': 'application/json',
+ "X-Page": 1,
+ "X-Next-Page": 2,
+ "X-Per-Page": 1,
+ "X-Total-Pages": 1,
+ "X-Total": 2}
+ content = ('[{"active": true,"description": "test-1-20201214","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-2-20201214","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/projects/1/runners$', method="get")
+def resp_find_project_runners(url, request):
+ headers = {'content-type': 'application/json',
+ "X-Page": 1,
+ "X-Next-Page": 2,
+ "X-Per-Page": 1,
+ "X-Total-Pages": 1,
+ "X-Total": 2}
+ content = ('[{"active": true,"description": "test-1-20220210","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-2-20220210","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/groups/1/runners$', method="get")
+def resp_find_group_runners(url, request):
+ headers = {'content-type': 'application/json',
+ "X-Page": 1,
+ "X-Next-Page": 2,
+ "X-Per-Page": 1,
+ "X-Total-Pages": 1,
+ "X-Total": 2}
+ content = ('[{"active": true,"description": "test-3-20220210","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-4-20220210","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/1$', method="put")
+def resp_update_runner(url, request):
+ headers = {'content-type': 'application/json',
+ "X-Page": 1,
+ "X-Next-Page": 2,
+ "X-Per-Page": 1,
+ "X-Total-Pages": 1,
+ "X-Total": 2}
+ content = ('[{"active": true,"description": "test-1-20201214","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"},{"active": true,'
+ '"description": "test-2-20201214","id": 2,"ip_address": "127.0.0.1",'
+ '"is_shared": false,"name": null,"online": false,"status": "offline"}]')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/1$', method="get")
+def resp_get_runner(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"}')
+ content = content.encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners$', method="post")
+def resp_create_runner(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{"active": true,"description": "test-1-20150125","id": 1,'
+ '"is_shared": false,"ip_address": "127.0.0.1","name": null,'
+ '"online": true,"status": "online"}')
+ content = content.encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(scheme="http", netloc="localhost", path=r'/api/v4/runners/1$', method="delete")
+def resp_delete_runner(url, request):
+ headers = {'content-type': 'application/json'}
+ content = ('{}')
+ content = content.encode("utf-8")
+ return response(204, content, headers, None, 5, request)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/hpe_test_utils.py b/ansible_collections/community/general/tests/unit/plugins/modules/hpe_test_utils.py
new file mode 100644
index 000000000..ab16d8f22
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/hpe_test_utils.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import re
+import yaml
+
+from mock import Mock, patch
+from .oneview_module_loader import ONEVIEW_MODULE_UTILS_PATH
+from hpOneView.oneview_client import OneViewClient
+
+
+class OneViewBaseTest(object):
+ @pytest.fixture(autouse=True)
+ def setUp(self, mock_ansible_module, mock_ov_client, request):
+ marker = request.node.get_marker('resource')
+ self.resource = getattr(mock_ov_client, "%s" % (marker.args))
+ self.mock_ov_client = mock_ov_client
+ self.mock_ansible_module = mock_ansible_module
+
+ @pytest.fixture
+ def testing_module(self):
+ resource_name = type(self).__name__.replace('Test', '')
+ resource_module_path_name = resource_name.replace('Module', '')
+ resource_module_path_name = re.findall('[A-Z][^A-Z]*', resource_module_path_name)
+ resource_module_path_name = 'oneview_' + str.join('_', resource_module_path_name).lower()
+
+ ansible_collections = __import__('ansible_collections')
+ oneview_module = ansible_collections.community.general.plugins.modules
+ resource_module = getattr(oneview_module, resource_module_path_name)
+ self.testing_class = getattr(resource_module, resource_name)
+ testing_module = self.testing_class.__module__.split('.')[-1]
+ testing_module = getattr(oneview_module, testing_module)
+ try:
+ # Load scenarios from module examples (Also checks if it is a valid yaml)
+ EXAMPLES = yaml.safe_load(testing_module.EXAMPLES)
+
+ except yaml.scanner.ScannerError:
+ message = "Something went wrong while parsing yaml from {0}.EXAMPLES".format(self.testing_class.__module__)
+ raise Exception(message)
+ return testing_module
+
+ def test_main_function_should_call_run_method(self, testing_module, mock_ansible_module):
+ mock_ansible_module.params = {'config': 'config.json'}
+
+ main_func = getattr(testing_module, 'main')
+
+ with patch.object(self.testing_class, "run") as mock_run:
+ main_func()
+ mock_run.assert_called_once()
+
+
+class FactsParamsTest(OneViewBaseTest):
+ def test_should_get_all_using_filters(self, testing_module):
+ self.resource.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None,
+ params={
+ 'start': 1,
+ 'count': 3,
+ 'sort': 'name:descending',
+ 'filter': 'purpose=General',
+ 'query': 'imported eq true'
+ })
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource.get_all.assert_called_once_with(start=1, count=3, sort='name:descending', filter='purpose=General', query='imported eq true')
+
+ def test_should_get_all_without_params(self, testing_module):
+ self.resource.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None
+ )
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource.get_all.assert_called_once_with()
+
+
+class OneViewBaseTestCase(object):
+ mock_ov_client_from_json_file = None
+ testing_class = None
+ mock_ansible_module = None
+ mock_ov_client = None
+ testing_module = None
+ EXAMPLES = None
+
+ def configure_mocks(self, test_case, testing_class):
+ """
+ Preload mocked OneViewClient instance and AnsibleModule
+ Args:
+ test_case (object): class instance (self) that are inheriting from OneViewBaseTestCase
+ testing_class (object): class being tested
+ """
+ self.testing_class = testing_class
+
+ # Define OneView Client Mock (FILE)
+ patcher_json_file = patch.object(OneViewClient, 'from_json_file')
+ test_case.addCleanup(patcher_json_file.stop)
+ self.mock_ov_client_from_json_file = patcher_json_file.start()
+
+ # Define OneView Client Mock
+ self.mock_ov_client = self.mock_ov_client_from_json_file.return_value
+
+ # Define Ansible Module Mock
+ patcher_ansible = patch(ONEVIEW_MODULE_UTILS_PATH + '.AnsibleModule')
+ test_case.addCleanup(patcher_ansible.stop)
+ mock_ansible_module = patcher_ansible.start()
+ self.mock_ansible_module = Mock()
+ mock_ansible_module.return_value = self.mock_ansible_module
+
+ self.__set_module_examples()
+
+ def test_main_function_should_call_run_method(self):
+ self.mock_ansible_module.params = {'config': 'config.json'}
+
+ main_func = getattr(self.testing_module, 'main')
+
+ with patch.object(self.testing_class, "run") as mock_run:
+ main_func()
+ mock_run.assert_called_once()
+
+ def __set_module_examples(self):
+ # Load scenarios from module examples (Also checks if it is a valid yaml)
+ ansible_collections = __import__('ansible_collections')
+ testing_module = self.testing_class.__module__.split('.')[-1]
+ self.testing_module = getattr(ansible_collections.community.general.plugins.modules, testing_module)
+
+ try:
+ # Load scenarios from module examples (Also checks if it is a valid yaml)
+ self.EXAMPLES = yaml.safe_load(self.testing_module.EXAMPLES)
+
+ except yaml.scanner.ScannerError:
+ message = "Something went wrong while parsing yaml from {0}.EXAMPLES".format(self.testing_class.__module__)
+ raise Exception(message)
+
+
+class FactsParamsTestCase(OneViewBaseTestCase):
+ """
+ FactsParamsTestCase has common test for classes that support pass additional
+ parameters when retrieving all resources.
+ """
+
+ def configure_client_mock(self, resorce_client):
+ """
+ Args:
+ resorce_client: Resource client that is being called
+ """
+ self.resource_client = resorce_client
+
+ def __validations(self):
+ if not self.testing_class:
+ raise Exception("Mocks are not configured, you must call 'configure_mocks' before running this test.")
+
+ if not self.resource_client:
+ raise Exception(
+ "Mock for the client not configured, you must call 'configure_client_mock' before running this test.")
+
+ def test_should_get_all_using_filters(self):
+ self.__validations()
+ self.resource_client.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None,
+ params={
+ 'start': 1,
+ 'count': 3,
+ 'sort': 'name:descending',
+ 'filter': 'purpose=General',
+ 'query': 'imported eq true'
+ })
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource_client.get_all.assert_called_once_with(start=1, count=3, sort='name:descending',
+ filter='purpose=General',
+ query='imported eq true')
+
+ def test_should_get_all_without_params(self):
+ self.__validations()
+ self.resource_client.get_all.return_value = []
+
+ params_get_all_with_filters = dict(
+ config='config.json',
+ name=None
+ )
+ self.mock_ansible_module.params = params_get_all_with_filters
+
+ self.testing_class().run()
+
+ self.resource_client.get_all.assert_called_once_with()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file-README.md b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file-README.md
new file mode 100644
index 000000000..aa9298f37
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file-README.md
@@ -0,0 +1,27 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+# interfaces_file unit tests
+
+## Tests structure
+
+- `input` directory contains interfaces configuration files
+- `test_interfaces_file.py` runs each hardcoded test against all configurations in `input` directory and compares results with golden outputs in `golden_output`
+
+## Running unit tests with docker
+
+1. Clone project to `ansible_collections/community/general`
+2. Change directory to the project one `cd ansible_collections/community/general`
+3. Run `ansible-test units --docker -v --python 3.10 tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py`
+
+## Adding tests
+
+1. New configurations should added to `input` directory
+2. New test cases should be defined in `test_interfaces_file.py`. Same for new test functions if needed
+3. On first test run for a new combination of a test case and an interface configuration new set of golden files will be generated. In case of docker-based test approach that's going to fail due to RO mount option. The workaround is to run tests locally with Python 3 (3.7 in this example):
+ 1. Install required modules with `pip3.7 install pytest-xdist pytest-mock mock`
+ 3. Run tests with `ansible-test units --python 3.10 tests/unit/plugins/modules/system/interfaces_file/test_interfaces_file.py`
+4. Carefully verify newly created golden output files!
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family.test_no_changes.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt
new file mode 100644
index 000000000..bb6a333ab
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt
new file mode 100644
index 000000000..f1bdb5fd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_aggi_up_twice.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 000000000..53c9acd13
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_add_and_delete_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt
new file mode 100644
index 000000000..122f18652
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_aggi_remove_dup.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4
new file mode 100644
index 000000000..9a2f5b059
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.42
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up
new file mode 100644
index 000000000..5077e3a68
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+ post-up XXXX_ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up
new file mode 100644
index 000000000..5c0f69736
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+ pre-up XXXX_ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv4_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6
new file mode 100644
index 000000000..afaaac962
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::42
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up
new file mode 100644
index 000000000..cb3e98b77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
+ post-up XXXX_ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up
new file mode 100644
index 000000000..149da568b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
+ pre-up XXXX_ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_ipv6_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt
new file mode 100644
index 000000000..6e0ba79f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth1 not found
+options:
+{
+ "iface": "eth1",
+ "option": "method",
+ "state": "present",
+ "value": "dhcp"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_change_method.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_revert.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu
new file mode 100644
index 000000000..40331271a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu
@@ -0,0 +1,13 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
+ mtu 1350
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 000000000..8b9c5a14b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_and_eth0_mtu.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt
new file mode 100644
index 000000000..a6ce9ad69
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "slaves",
+ "state": "present",
+ "value": "int1 int3"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json
new file mode 100644
index 000000000..8903ee647
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "fc00::1",
+ "address_family": "inet6",
+ "down": [],
+ "method": "static",
+ "post-up": [
+ "echo configuring ipv6"
+ ],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/address_family_set_aggi_slaves.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp.test_no_changes.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt
new file mode 100644
index 000000000..bb6a333ab
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt
new file mode 100644
index 000000000..f1bdb5fd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_aggi_up_twice.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 000000000..53c9acd13
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_add_and_delete_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt
new file mode 100644
index 000000000..122f18652
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_aggi_remove_dup.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4
new file mode 100644
index 000000000..696293741
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ address 192.168.0.42
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up
new file mode 100644
index 000000000..998f48446
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ post-up XXXX_ipv4
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up
new file mode 100644
index 000000000..5e6af40a2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ pre-up XXXX_ipv4
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv4_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt
new file mode 100644
index 000000000..015052275
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "fc00::42"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 000000000..2a73a2b77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 000000000..262ffe9f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_ipv6_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt
new file mode 100644
index 000000000..6e0ba79f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth1 not found
+options:
+{
+ "iface": "eth1",
+ "option": "method",
+ "state": "present",
+ "value": "dhcp"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_change_method.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_revert.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu
new file mode 100644
index 000000000..7bbad22a5
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu
@@ -0,0 +1,7 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
+ mtu 1350
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 000000000..8b9c5a14b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_and_eth0_mtu.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt
new file mode 100644
index 000000000..a6ce9ad69
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "slaves",
+ "state": "present",
+ "value": "int1 int3"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json
new file mode 100644
index 000000000..782b4d0fb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json
@@ -0,0 +1,18 @@
+{
+ "eth0": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/default_dhcp_set_aggi_slaves.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces.test_no_changes.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt
new file mode 100644
index 000000000..bb6a333ab
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt
new file mode 100644
index 000000000..f1bdb5fd1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_aggi_up_twice.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 000000000..53c9acd13
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_add_and_delete_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt
new file mode 100644
index 000000000..122f18652
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt
@@ -0,0 +1,17 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "absent",
+ "value": null
+}
+=====
+[1] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "up",
+ "state": "present",
+ "value": "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_aggi_remove_dup.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4
new file mode 100644
index 000000000..ff999d718
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 192.168.0.42
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up
new file mode 100644
index 000000000..36c773be6
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up
@@ -0,0 +1,9 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
+post-up XXXX_ipv4
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up
new file mode 100644
index 000000000..89264c1d1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up
@@ -0,0 +1,9 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
+pre-up XXXX_ipv4
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv4_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt
new file mode 100644
index 000000000..015052275
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "fc00::42"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 000000000..2a73a2b77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 000000000..262ffe9f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_ipv6_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt
new file mode 100644
index 000000000..6e0ba79f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth1 not found
+options:
+{
+ "iface": "eth1",
+ "option": "method",
+ "state": "present",
+ "value": "dhcp"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_change_method.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert
new file mode 100644
index 000000000..9da7b7259
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert
@@ -0,0 +1,7 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_revert.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu
new file mode 100644
index 000000000..4788c3ddf
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1350
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 000000000..8b9c5a14b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_and_eth0_mtu.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt
new file mode 100644
index 000000000..a6ce9ad69
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt
@@ -0,0 +1,8 @@
+[0] fail_json message: Error: interface aggi not found
+options:
+{
+ "iface": "aggi",
+ "option": "slaves",
+ "state": "present",
+ "value": "int1 int3"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json
new file mode 100644
index 000000000..5fe55437a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json
@@ -0,0 +1,21 @@
+{
+ "eth0": {
+ "address": "10.0.0.1",
+ "address_family": "inet",
+ "down": [],
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.0",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/no_leading_spaces_set_aggi_slaves.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com.test_no_changes.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up
new file mode 100644
index 000000000..e86b25782
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up
@@ -0,0 +1,62 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice
new file mode 100644
index 000000000..e86b25782
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice
@@ -0,0 +1,62 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_aggi_up_twice.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_add_and_delete_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup
new file mode 100644
index 000000000..e86b25782
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup
@@ -0,0 +1,62 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_aggi_remove_dup.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt
new file mode 100644
index 000000000..bab7cce06
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "192.168.0.42"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 000000000..d1ce15975
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv4"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 000000000..8a439db4c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv4"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv4_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt
new file mode 100644
index 000000000..015052275
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "fc00::42"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 000000000..2a73a2b77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 000000000..262ffe9f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_ipv6_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method
new file mode 100644
index 000000000..065bf0f04
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet dhcp
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json
new file mode 100644
index 000000000..6df01a42f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_change_method.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt
new file mode 100644
index 000000000..57f3fe6f7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "iface": "eth0",
+ "option": "mtu",
+ "state": "absent",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_revert.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu
new file mode 100644
index 000000000..5218eed19
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1350
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 000000000..007dd4444
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[1] fail_json message: Error: interface eth0 not found
+options:
+{
+ "iface": "eth0",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_and_eth0_mtu.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves
new file mode 100644
index 000000000..e2b78e93a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int3
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json
new file mode 100644
index 000000000..c85421197
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json
@@ -0,0 +1,109 @@
+{
+ "agge": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "down": [],
+ "hwaddress": "ether 22:44:77:88:D5:96",
+ "method": "static",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K aggi tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "int1 int2",
+ "up": []
+ },
+ "br0": {
+ "address": "188.44.133.76",
+ "address_family": "inet",
+ "bond_downdelay": "200",
+ "bond_lacp_rate": "slow",
+ "bond_miimon": "100",
+ "bond_mode": "4",
+ "bond_updelay": "200",
+ "bond_xmit_hash_policy": "layer3+4",
+ "bridge_ports": "agge",
+ "down": [],
+ "gateway": "188.44.133.75",
+ "hwaddress": "ether 22:44:77:88:D5:98",
+ "method": "static",
+ "netmask": "255.255.255.248",
+ "post-up": [
+ "/sbin/ethtool -K agge tx off tso off"
+ ],
+ "pre-up": [],
+ "slaves": "ext1 ext2",
+ "up": [
+ "route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi",
+ "route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi",
+ "route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi"
+ ]
+ },
+ "eth1": {
+ "address_family": "inet",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext1": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "ext2": {
+ "address_family": "inet",
+ "bond-master": "agge",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int1": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "int2": {
+ "address_family": "inet",
+ "bond-master": "aggi",
+ "down": [],
+ "method": "manual",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/servers.com_set_aggi_slaves.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup.test_no_changes.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_aggi_up_twice.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up
new file mode 100644
index 000000000..2b5ca7404
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up
@@ -0,0 +1,9 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_add_and_delete_aggi_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup
new file mode 100644
index 000000000..326291ef2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup
@@ -0,0 +1,10 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_aggi_remove_dup.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt
new file mode 100644
index 000000000..bab7cce06
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "192.168.0.42"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt
new file mode 100644
index 000000000..d1ce15975
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv4"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt
new file mode 100644
index 000000000..8a439db4c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv4"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv4_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6 b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt
new file mode 100644
index 000000000..015052275
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "address",
+ "state": "present",
+ "value": "fc00::42"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt
new file mode 100644
index 000000000..2a73a2b77
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "post-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_post_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt
new file mode 100644
index 000000000..262ffe9f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt
@@ -0,0 +1,9 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "address_family": "inet6",
+ "iface": "eth0",
+ "option": "pre-up",
+ "state": "present",
+ "value": "XXXX_ipv6"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_ipv6_pre_up.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt
new file mode 100644
index 000000000..6e0ba79f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth1 not found
+options:
+{
+ "iface": "eth1",
+ "option": "method",
+ "state": "present",
+ "value": "dhcp"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_change_method.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt
new file mode 100644
index 000000000..57f3fe6f7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt
@@ -0,0 +1,8 @@
+fail_json message: Error: interface eth0 not found
+options:
+{
+ "iface": "eth0",
+ "option": "mtu",
+ "state": "absent",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_revert.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu
new file mode 100644
index 000000000..6bc202e0c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1350
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt
new file mode 100644
index 000000000..007dd4444
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt
@@ -0,0 +1,8 @@
+[1] fail_json message: Error: interface eth0 not found
+options:
+{
+ "iface": "eth0",
+ "option": "mtu",
+ "state": "present",
+ "value": "1350"
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.exceptions.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_and_eth0_mtu.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves
new file mode 100644
index 000000000..d044b9251
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves
@@ -0,0 +1,12 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ slaves int1 int3
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.exceptions.txt
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json
new file mode 100644
index 000000000..80b7c210c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json
@@ -0,0 +1,24 @@
+{
+ "aggi": {
+ "address": "10.44.15.196",
+ "address_family": "inet",
+ "down": [],
+ "method": "dhcp",
+ "mtu": "1500",
+ "netmask": "255.255.255.248",
+ "post-up": [],
+ "pre-up": [],
+ "up": [
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi",
+ "route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi"
+ ]
+ },
+ "lo": {
+ "address_family": "inet",
+ "down": [],
+ "method": "loopback",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+}
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/up_down_dup_set_aggi_slaves.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family
new file mode 100644
index 000000000..bc4ecea78
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family
@@ -0,0 +1,12 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet static
+ address 192.168.0.1
+ post-up echo configuring ipv4
+
+iface eth0 inet6 static
+ address fc00::1
+ post-up echo configuring ipv6
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/address_family.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp
new file mode 100644
index 000000000..bd4522ec0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp
@@ -0,0 +1,6 @@
+# The loopback network interface
+auto lo eth0
+iface lo inet loopback
+
+# The primary network interface
+iface eth0 inet dhcp
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/default_dhcp.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces
new file mode 100644
index 000000000..11f2d550c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces
@@ -0,0 +1,8 @@
+iface lo inet loopback
+auto lo
+
+auto eth0
+iface eth0 inet static
+address 10.0.0.1
+netmask 255.255.255.0
+mtu 1500
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/no_leading_spaces.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com
new file mode 100644
index 000000000..c826bbe73
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com
@@ -0,0 +1,61 @@
+ auto aggi
+ iface aggi inet static
+ hwaddress ether 22:44:77:88:D5:96
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ slaves int1 int2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K aggi tx off tso off
+
+ auto agge
+ iface agge inet manual
+
+ auto br0
+ iface br0 inet static
+ bridge_ports agge
+ hwaddress ether 22:44:77:88:D5:98
+ address 188.44.133.76
+ netmask 255.255.255.248
+ gateway 188.44.133.75
+ slaves ext1 ext2
+ bond_mode 4
+ bond_miimon 100
+ bond_downdelay 200
+ bond_updelay 200
+ bond_lacp_rate slow
+ bond_xmit_hash_policy layer3+4
+ post-up /sbin/ethtool -K agge tx off tso off
+
+ up route add -net 10.0.0.0/8 gw 10.44.15.117 dev aggi
+ up route add -net 192.168.0.0/16 gw 10.44.15.117 dev aggi
+ up route add -net 188.44.208.0/21 gw 10.44.15.117 dev aggi
+
+ auto int1
+ iface int1 inet manual
+ bond-master aggi
+
+ auto int2
+ iface int2 inet manual
+ bond-master aggi
+
+ auto ext1
+ iface ext1 inet manual
+ bond-master agge
+
+ auto ext2
+ iface ext2 inet manual
+ bond-master agge
+
+ auto eth1
+ iface eth1 inet manual
+
+ auto lo
+ iface lo inet loopback
+
+source /etc/network/interfaces.d/*.cfg
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/servers.com.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup
new file mode 100644
index 000000000..fdf434eb4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup
@@ -0,0 +1,11 @@
+# this file covers duplicates issue for up/down option, #3841
+auto lo
+iface lo inet loopback
+
+auto aggi
+iface aggi inet dhcp
+ address 10.44.15.196
+ netmask 255.255.255.248
+ mtu 1500
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
+ up route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup.license b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup.license
new file mode 100644
index 000000000..edff8c768
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/up_down_dup.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py
new file mode 100644
index 000000000..94e10b75f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/interfaces_file/test_interfaces_file.py
@@ -0,0 +1,557 @@
+# Copyright (c) 2017, Roman Belyakovsky <ihryamzik () gmail.com>
+#
+# This file is part of Ansible
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules import interfaces_file
+from shutil import copyfile, move
+import difflib
+import inspect
+import io
+import json
+import os
+import re
+import shutil
+import tempfile
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+class ModuleMocked:
+ def atomic_move(self, src, dst):
+ move(src, dst)
+
+ def backup_local(self, path):
+ backupp = os.path.join("/tmp", os.path.basename(path) + ".bak")
+ copyfile(path, backupp)
+ return backupp
+
+ def fail_json(self, msg):
+ raise AnsibleFailJson(msg)
+
+
+module = ModuleMocked()
+fixture_path = os.path.join(os.path.dirname(__file__), 'interfaces_file_fixtures', 'input')
+golden_output_path = os.path.join(os.path.dirname(__file__), 'interfaces_file_fixtures', 'golden_output')
+
+
+class TestInterfacesFileModule(unittest.TestCase):
+ unittest.TestCase.maxDiff = None
+
+ def getTestFiles(self, include_filter=None, exclude_filter=None):
+ flist = next(os.walk(fixture_path))[2]
+ flist = [file for file in flist if not file.endswith('.license')]
+ if include_filter:
+ flist = filter(lambda x: re.match(include_filter, x), flist)
+ if exclude_filter:
+ flist = filter(lambda x: not re.match(exclude_filter, x), flist)
+ return flist
+
+ def compareFileToBackup(self, path, backup):
+ with open(path) as f1:
+ with open(backup) as f2:
+ diffs = difflib.context_diff(f1.readlines(),
+ f2.readlines(),
+ fromfile=os.path.basename(path),
+ tofile=os.path.basename(backup))
+ # Restore backup
+ move(backup, path)
+ deltas = list(diffs)
+ self.assertTrue(len(deltas) == 0)
+
+ def compareInterfacesLinesToFile(self, interfaces_lines, path, testname=None):
+ if not testname:
+ testname = "%s.%s" % (path, inspect.stack()[1][3])
+ self.compareStringWithFile("".join([d['line'] for d in interfaces_lines if 'line' in d]), testname)
+
+ def compareInterfacesToFile(self, ifaces, path, testname=None):
+ if not testname:
+ testname = "%s.%s.json" % (path, inspect.stack()[1][3])
+
+ testfilepath = os.path.join(golden_output_path, testname)
+ string = json.dumps(ifaces, sort_keys=True, indent=4, separators=(',', ': '))
+ if string and not string.endswith('\n'):
+ string += '\n'
+ goldenstring = string
+ goldenData = ifaces
+ if not os.path.isfile(testfilepath):
+ with io.open(testfilepath, 'wb') as f:
+ f.write(string.encode())
+ else:
+ with open(testfilepath, 'r') as goldenfile:
+ goldenData = json.load(goldenfile)
+ self.assertEqual(goldenData, ifaces)
+
+ def compareStringWithFile(self, string, path):
+ testfilepath = os.path.join(golden_output_path, path)
+ if string and not string.endswith('\n'):
+ string += '\n'
+ goldenstring = string
+ if not os.path.isfile(testfilepath):
+ f = io.open(testfilepath, 'wb')
+ f.write(string.encode())
+ f.close()
+ else:
+ with open(testfilepath, 'r') as goldenfile:
+ goldenstring = goldenfile.read()
+ goldenfile.close()
+ self.assertEqual(goldenstring, string)
+
+ def test_no_changes(self):
+ for testfile in self.getTestFiles():
+ path = os.path.join(fixture_path, testfile)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ self.compareInterfacesLinesToFile(lines, testfile)
+ self.compareInterfacesToFile(ifaces, testfile)
+
+ def test_add_up_option_to_aggi(self):
+ testcases = {
+ "add_aggi_up": [
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ }
+ ],
+ "add_and_delete_aggi_up": [
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ },
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': None,
+ 'state': 'absent',
+ },
+ ],
+ "add_aggi_up_twice": [
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ },
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ },
+ ],
+ "aggi_remove_dup": [
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': None,
+ 'state': 'absent',
+ },
+ {
+ 'iface': 'aggi',
+ 'option': 'up',
+ 'value': 'route add -net 224.0.0.0 netmask 240.0.0.0 dev aggi',
+ 'state': 'present',
+ },
+ ],
+ "set_aggi_slaves": [
+ {
+ 'iface': 'aggi',
+ 'option': 'slaves',
+ 'value': 'int1 int3',
+ 'state': 'present',
+ },
+ ],
+ "set_aggi_and_eth0_mtu": [
+ {
+ 'iface': 'aggi',
+ 'option': 'mtu',
+ 'value': '1350',
+ 'state': 'present',
+ },
+ {
+ 'iface': 'eth0',
+ 'option': 'mtu',
+ 'value': '1350',
+ 'state': 'present',
+ },
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ path = os.path.join(fixture_path, testfile)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ fail_json_iterations = []
+ for i, options in enumerate(options_list):
+ try:
+ dummy, lines = interfaces_file.set_interface_option(module, lines, options['iface'], options['option'],
+ options['value'], options['state'])
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("[%d] fail_json message: %s\noptions:\n%s" %
+ (i, str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+
+ def test_revert(self):
+ testcases = {
+ "revert": [
+ {
+ 'iface': 'eth0',
+ 'option': 'mtu',
+ 'value': '1350',
+ }
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ src_path = os.path.join(fixture_path, testfile)
+ path = temp_file.name
+ shutil.copy(src_path, path)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ backupp = module.backup_local(path)
+ options = options_list[0]
+ for state in ['present', 'absent']:
+ fail_json_iterations = []
+ options['state'] = state
+ try:
+ dummy, lines = interfaces_file.set_interface_option(module, lines,
+ options['iface'], options['option'], options['value'], options['state'])
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
+ (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
+
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+ if testfile not in ["no_leading_spaces"]:
+ # skip if eth0 has MTU value
+ self.compareFileToBackup(path, backupp)
+
+ def test_change_method(self):
+ testcases = {
+ "change_method": [
+ {
+ 'iface': 'eth1',
+ 'option': 'method',
+ 'value': 'dhcp',
+ 'state': 'present',
+ }
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ src_path = os.path.join(fixture_path, testfile)
+ path = temp_file.name
+ shutil.copy(src_path, path)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ backupp = module.backup_local(path)
+ options = options_list[0]
+ fail_json_iterations = []
+ try:
+ changed, lines = interfaces_file.set_interface_option(module, lines, options['iface'], options['option'],
+ options['value'], options['state'])
+ # When a changed is made try running it again for proper idempotency
+ if changed:
+ changed_again, lines = interfaces_file.set_interface_option(module, lines, options['iface'],
+ options['option'], options['value'], options['state'])
+ self.assertFalse(changed_again,
+ msg='Second request for change should return false for {0} running on {1}'.format(testname,
+ testfile))
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
+ (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
+
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+ # Restore backup
+ move(backupp, path)
+
+ def test_getValueFromLine(self):
+ testcases = [
+ {
+ "line": " address 1.2.3.5",
+ "value": "1.2.3.5",
+ }
+ ]
+ for testcase in testcases:
+ value = interfaces_file.getValueFromLine(testcase["line"])
+ self.assertEqual(testcase["value"], value)
+
+ def test_get_interface_options(self):
+ testcases = {
+ "basic": {
+ "iface_lines": [
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": "iface eno1 inet static",
+ "line_type": "iface",
+ "params": {
+ "address": "",
+ "address_family": "inet",
+ "down": [],
+ "gateway": "",
+ "method": "static",
+ "netmask": "",
+ "post-up": [],
+ "pre-up": [],
+ "up": []
+ }
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " address 1.2.3.5",
+ "line_type": "option",
+ "option": "address",
+ "value": "1.2.3.5"
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " netmask 255.255.255.0",
+ "line_type": "option",
+ "option": "netmask",
+ "value": "255.255.255.0"
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " gateway 1.2.3.1",
+ "line_type": "option",
+ "option": "gateway",
+ "value": "1.2.3.1"
+ }
+ ],
+ "iface_options": [
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " address 1.2.3.5",
+ "line_type": "option",
+ "option": "address",
+ "value": "1.2.3.5"
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " netmask 255.255.255.0",
+ "line_type": "option",
+ "option": "netmask",
+ "value": "255.255.255.0"
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " gateway 1.2.3.1",
+ "line_type": "option",
+ "option": "gateway",
+ "value": "1.2.3.1"
+ }
+ ]
+ },
+ }
+
+ for testname in testcases.keys():
+ iface_options = interfaces_file.get_interface_options(testcases[testname]["iface_lines"])
+ self.assertEqual(testcases[testname]["iface_options"], iface_options)
+
+ def test_get_interface_options(self):
+ testcases = {
+ "select address": {
+ "iface_options": [
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " address 1.2.3.5",
+ "line_type": "option",
+ "option": "address",
+ "value": "1.2.3.5"
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " netmask 255.255.255.0",
+ "line_type": "option",
+ "option": "netmask",
+ "value": "255.255.255.0"
+ },
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " gateway 1.2.3.1",
+ "line_type": "option",
+ "option": "gateway",
+ "value": "1.2.3.1"
+ }
+ ],
+ "target_options": [
+ {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " address 1.2.3.5",
+ "line_type": "option",
+ "option": "address",
+ "value": "1.2.3.5"
+ }
+ ],
+ "option": "address"
+ },
+ }
+
+ for testname in testcases.keys():
+ target_options = interfaces_file.get_target_options(testcases[testname]["iface_options"], testcases[testname]["option"])
+ self.assertEqual(testcases[testname]["target_options"], target_options)
+
+ def test_update_existing_option_line(self):
+ testcases = {
+ "update address": {
+ "target_option": {
+ "address_family": "inet",
+ "iface": "eno1",
+ "line": " address 1.2.3.5",
+ "line_type": "option",
+ "option": "address",
+ "value": "1.2.3.5"
+ },
+ "value": "1.2.3.4",
+ "result": " address 1.2.3.4",
+ },
+ }
+
+ for testname in testcases.keys():
+ updated = interfaces_file.update_existing_option_line(testcases[testname]["target_option"], testcases[testname]["value"])
+ self.assertEqual(testcases[testname]["result"], updated)
+
+ def test_predefined(self):
+ testcases = {
+ "idempotency": {
+ "source_lines": [
+ "iface eno1 inet static",
+ " address 1.2.3.5",
+ " netmask 255.255.255.0",
+ " gateway 1.2.3.1",
+ ],
+ "input": {
+ "iface": "eno1",
+ "option": "address",
+ "value": "1.2.3.5",
+ 'state': 'present',
+ },
+ "result_lines": [
+ "iface eno1 inet static",
+ " address 1.2.3.5",
+ " netmask 255.255.255.0",
+ " gateway 1.2.3.1",
+ ],
+ "changed": False,
+ },
+ }
+
+ for testname in testcases.keys():
+ lines, ifaces = interfaces_file.read_interfaces_lines(module, testcases[testname]["source_lines"])
+ changed, lines = interfaces_file.set_interface_option(module, lines, testcases[testname]["input"]['iface'], testcases[testname]["input"]['option'],
+ testcases[testname]["input"]['value'], testcases[testname]["input"]['state'])
+ self.assertEqual(testcases[testname]["result_lines"], [d['line'] for d in lines if 'line' in d])
+ assert testcases[testname]['changed'] == changed
+
+ def test_inet_inet6(self):
+ testcases = {
+ "change_ipv4": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet',
+ 'option': 'address',
+ 'value': '192.168.0.42',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv6": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet6',
+ 'option': 'address',
+ 'value': 'fc00::42',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv4_pre_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet',
+ 'option': 'pre-up',
+ 'value': 'XXXX_ipv4',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv6_pre_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet6',
+ 'option': 'pre-up',
+ 'value': 'XXXX_ipv6',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv4_post_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet',
+ 'option': 'post-up',
+ 'value': 'XXXX_ipv4',
+ 'state': 'present',
+ }
+ ],
+ "change_ipv6_post_up": [
+ {
+ 'iface': 'eth0',
+ 'address_family': 'inet6',
+ 'option': 'post-up',
+ 'value': 'XXXX_ipv6',
+ 'state': 'present',
+ }
+ ],
+ }
+ for testname, options_list in testcases.items():
+ for testfile in self.getTestFiles():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ src_path = os.path.join(fixture_path, testfile)
+ path = temp_file.name
+ shutil.copy(src_path, path)
+ lines, ifaces = interfaces_file.read_interfaces_file(module, path)
+ backupp = module.backup_local(path)
+ options = options_list[0]
+ fail_json_iterations = []
+ try:
+ dummy, lines = interfaces_file.set_interface_option(module, lines, options['iface'], options['option'],
+ options['value'], options['state'], options['address_family'])
+ except AnsibleFailJson as e:
+ fail_json_iterations.append("fail_json message: %s\noptions:\n%s" %
+ (str(e), json.dumps(options, sort_keys=True, indent=4, separators=(',', ': '))))
+ interfaces_file.write_changes(module, [d['line'] for d in lines if 'line' in d], path)
+
+ self.compareStringWithFile("\n=====\n".join(fail_json_iterations), "%s_%s.exceptions.txt" % (testfile, testname))
+
+ self.compareInterfacesLinesToFile(lines, testfile, "%s_%s" % (testfile, testname))
+ self.compareInterfacesToFile(ifaces, testfile, "%s_%s.json" % (testfile, testname))
+ # Restore backup
+ move(backupp, path)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/linode_conftest.py b/ansible_collections/community/general/tests/unit/plugins/modules/linode_conftest.py
new file mode 100644
index 000000000..33a704d34
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/linode_conftest.py
@@ -0,0 +1,87 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+
+@pytest.fixture
+def api_key(monkeypatch):
+ monkeypatch.setenv('LINODE_API_KEY', 'foobar')
+
+
+@pytest.fixture
+def auth(monkeypatch):
+ def patched_test_echo(dummy):
+ return []
+ monkeypatch.setattr('linode.api.Api.test_echo', patched_test_echo)
+
+
+@pytest.fixture
+def access_token(monkeypatch):
+ monkeypatch.setenv('LINODE_ACCESS_TOKEN', 'barfoo')
+
+
+@pytest.fixture
+def no_access_token_in_env(monkeypatch):
+ try:
+ monkeypatch.delenv('LINODE_ACCESS_TOKEN')
+ except KeyError:
+ pass
+
+
+@pytest.fixture
+def default_args():
+ return {'state': 'present', 'label': 'foo'}
+
+
+@pytest.fixture
+def mock_linode():
+ class Linode():
+ def delete(self, *args, **kwargs):
+ pass
+
+ @property
+ def _raw_json(self):
+ return {
+ "alerts": {
+ "cpu": 90,
+ "io": 10000,
+ "network_in": 10,
+ "network_out": 10,
+ "transfer_quota": 80
+ },
+ "backups": {
+ "enabled": False,
+ "schedule": {
+ "day": None,
+ "window": None,
+ }
+ },
+ "created": "2018-09-26T08:12:33",
+ "group": "Foobar Group",
+ "hypervisor": "kvm",
+ "id": 10480444,
+ "image": "linode/centos7",
+ "ipv4": [
+ "130.132.285.233"
+ ],
+ "ipv6": "2a82:7e00::h03c:46ff:fe04:5cd2/64",
+ "label": "lin-foo",
+ "region": "eu-west",
+ "specs": {
+ "disk": 25600,
+ "memory": 1024,
+ "transfer": 1000,
+ "vcpus": 1
+ },
+ "status": "running",
+ "tags": [],
+ "type": "g6-nanode-1",
+ "updated": "2018-09-26T10:10:14",
+ "watchdog_enabled": True
+ }
+ return Linode()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/oneview_conftest.py b/ansible_collections/community/general/tests/unit/plugins/modules/oneview_conftest.py
new file mode 100644
index 000000000..f86543d7c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/oneview_conftest.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from mock import Mock, patch
+from .oneview_module_loader import ONEVIEW_MODULE_UTILS_PATH
+from hpOneView.oneview_client import OneViewClient
+
+
+@pytest.fixture
+def mock_ov_client():
+ patcher_json_file = patch.object(OneViewClient, 'from_json_file')
+ client = patcher_json_file.start()
+ return client.return_value
+
+
+@pytest.fixture
+def mock_ansible_module():
+ patcher_ansible = patch(ONEVIEW_MODULE_UTILS_PATH + '.AnsibleModule')
+ patcher_ansible = patcher_ansible.start()
+ ansible_module = Mock()
+ patcher_ansible.return_value = ansible_module
+ return ansible_module
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/oneview_module_loader.py b/ansible_collections/community/general/tests/unit/plugins/modules/oneview_module_loader.py
new file mode 100644
index 000000000..ae62d9ced
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/oneview_module_loader.py
@@ -0,0 +1,36 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+from ansible_collections.community.general.tests.unit.compat.mock import Mock
+
+# FIXME: These should be done inside of a fixture so that they're only mocked during
+# these unittests
+if 'hpOneView' not in sys.modules:
+ sys.modules['hpOneView'] = Mock()
+ sys.modules['hpOneView.oneview_client'] = Mock()
+
+ONEVIEW_MODULE_UTILS_PATH = 'ansible_collections.community.general.plugins.module_utils.oneview'
+from ansible_collections.community.general.plugins.module_utils.oneview import ( # noqa: F401, pylint: disable=unused-import
+ OneViewModuleException,
+ OneViewModuleTaskError,
+ OneViewModuleResourceNotFound,
+ OneViewModuleBase,
+)
+
+from ansible_collections.community.general.plugins.modules.oneview_ethernet_network import EthernetNetworkModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_ethernet_network_info import ( # noqa: F401, pylint: disable=unused-import
+ EthernetNetworkInfoModule,
+)
+from ansible_collections.community.general.plugins.modules.oneview_fc_network import FcNetworkModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_fc_network_info import FcNetworkInfoModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_fcoe_network import FcoeNetworkModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_fcoe_network_info import FcoeNetworkInfoModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_network_set import NetworkSetModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_network_set_info import NetworkSetInfoModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_san_manager import SanManagerModule # noqa: F401, pylint: disable=unused-import
+from ansible_collections.community.general.plugins.modules.oneview_san_manager_info import SanManagerInfoModule # noqa: F401, pylint: disable=unused-import
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/rhn_conftest.py b/ansible_collections/community/general/tests/unit/plugins/modules/rhn_conftest.py
new file mode 100644
index 000000000..acc0e2f22
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/rhn_conftest.py
@@ -0,0 +1,35 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six.moves import xmlrpc_client
+
+import pytest
+
+
+def get_method_name(request_body):
+ return xmlrpc_client.loads(request_body)[1]
+
+
+@pytest.fixture
+def mock_request(request, mocker):
+ responses = request.getfixturevalue('testcase')['calls']
+ module_name = request.module.TESTED_MODULE
+
+ def transport_request(host, handler, request_body, verbose=0):
+ """Fake request"""
+ method_name = get_method_name(request_body)
+ excepted_name, response = responses.pop(0)
+ if method_name == excepted_name:
+ if isinstance(response, Exception):
+ raise response
+ else:
+ return response
+ else:
+ raise Exception('Expected call: %r, called with: %r' % (excepted_name, method_name))
+
+ target = '{0}.xmlrpc_client.Transport.request'.format(module_name)
+ mocker.patch(target, side_effect=transport_request)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_alerta_customer.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_alerta_customer.py
new file mode 100644
index 000000000..ccd0ced50
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_alerta_customer.py
@@ -0,0 +1,250 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.plugins.modules import alerta_customer
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class MockedReponse(object):
+ def __init__(self, data):
+ self.data = data
+
+ def read(self):
+ return self.data
+
+
+def customer_response_page1():
+ server_response = json.dumps({"customers": [
+ {
+ "customer": "admin",
+ "href": "http://localhost:8080/api/customer/d89664a7-9c87-4ab9-8be8-830e7e5f0616",
+ "id": "d89664a7-9c87-4ab9-8be8-830e7e5f0616",
+ "match": "admin@example.com"
+ },
+ {
+ "customer": "Developer",
+ "href": "http://localhost:8080/api/customer/188ed093-84cc-4f46-bf80-4c9127180d9c",
+ "id": "188ed093-84cc-4f46-bf80-4c9127180d9c",
+ "match": "dev@example.com"
+ }],
+ "more": True,
+ "page": 1,
+ "pageSize": 50,
+ "pages": 1,
+ "status": "ok",
+ "total": 2})
+ return (MockedReponse(server_response), {"status": 200})
+
+
+def customer_response_page2():
+ server_response = json.dumps({"customers": [
+ {
+ "customer": "admin",
+ "href": "http://localhost:8080/api/customer/d89664a7-9c87-4ab9-8be8-830e7e5f0616",
+ "id": "d89664a7-9c87-4ab9-8be8-830e7e5f0616",
+ "match": "admin@example.com"
+ },
+ {
+ "customer": "Developer",
+ "href": "http://localhost:8080/api/customer/188ed093-84cc-4f46-bf80-4c9127180d9c",
+ "id": "188ed093-84cc-4f46-bf80-4c9127180d9c",
+ "match": "dev@example.com"
+ }],
+ "more": True,
+ "page": 2,
+ "pageSize": 50,
+ "pages": 2,
+ "status": "ok",
+ "total": 52})
+ return (MockedReponse(server_response), {"status": 200})
+
+
+class TestAlertaCustomerModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestAlertaCustomerModule, self).setUp()
+ self.module = alerta_customer
+
+ def tearDown(self):
+ super(TestAlertaCustomerModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch()
+
+ def test_without_parameters(self):
+ """Failure if no parameters set"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_without_content(self):
+ """Failure if customer and match are missing"""
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password"
+ })
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_successful_existing_customer_creation(self):
+ """Test the customer creation (already exists)."""
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password",
+ 'customer': 'Developer',
+ 'match': 'dev@example.com'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = customer_response_page1()
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+ self.assertTrue(fetch_url_mock.call_count, 1)
+
+ def test_successful_customer_creation(self):
+ """Test the customer creation."""
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password",
+ 'customer': 'Developer',
+ 'match': 'dev2@example.com'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = customer_response_page1()
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['match'] == "dev2@example.com"
+ assert call_data['customer'] == "Developer"
+
+ def test_successful_customer_creation_key(self):
+ """Test the customer creation using api_key."""
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_key': "demo-key",
+ 'customer': 'Developer',
+ 'match': 'dev2@example.com'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = customer_response_page1()
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['match'] == "dev2@example.com"
+ assert call_data['customer'] == "Developer"
+
+ def test_failed_not_found(self):
+ """Test failure with wrong URL."""
+
+ set_module_args({
+ 'alerta_url': "http://localhost:8080/s",
+ 'api_username': "admin@example.com",
+ 'api_password': "password",
+ 'customer': 'Developer',
+ 'match': 'dev@example.com'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'Not found for request GET on http://localhost:8080/a/api/customers'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_failed_forbidden(self):
+ """Test failure with wrong user."""
+
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "dev@example.com",
+ 'api_password': "password",
+ 'customer': 'Developer',
+ 'match': 'dev@example.com'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 403, 'msg': 'Permission Denied for GET on http://localhost:8080/api/customers'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_failed_unauthorized(self):
+ """Test failure with wrong username or password."""
+
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password_wrong",
+ 'customer': 'Developer',
+ 'match': 'dev@example.com'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 401, 'msg': 'Unauthorized to request GET on http://localhost:8080/api/customers'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_successful_customer_deletion(self):
+ """Test the customer deletion."""
+
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password",
+ 'customer': 'Developer',
+ 'match': 'dev@example.com',
+ 'state': 'absent'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = customer_response_page1()
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ def test_successful_customer_deletion_page2(self):
+ """Test the customer deletion on the second page."""
+
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password",
+ 'customer': 'Developer',
+ 'match': 'dev@example.com',
+ 'state': 'absent'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = customer_response_page2()
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ def test_successful_nonexisting_customer_deletion(self):
+ """Test the customer deletion (non existing)."""
+
+ set_module_args({
+ 'alerta_url': "http://localhost:8080",
+ 'api_username': "admin@example.com",
+ 'api_password': "password",
+ 'customer': 'Billing',
+ 'match': 'dev@example.com',
+ 'state': 'absent'
+ })
+
+ with patch.object(alerta_customer, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = customer_response_page1()
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_apache2_module.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_apache2_module.py
new file mode 100644
index 000000000..3e44bdb58
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_apache2_module.py
@@ -0,0 +1,24 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.apache2_module import create_apache_identifier
+
+REPLACEMENTS = [
+ ('php7.1', 'php7_module'),
+ ('php5.6', 'php5_module'),
+ ('shib2', 'mod_shib'),
+ ('evasive', 'evasive20_module'),
+ ('thismoduledoesnotexist', 'thismoduledoesnotexist_module'), # the default
+]
+
+
+@pytest.mark.parametrize("replacement", REPLACEMENTS, ids=lambda x: x[0])
+def test_apache_identifier(replacement):
+ "test the correct replacement of an a2enmod name with an apache2ctl name"
+ assert create_apache_identifier(replacement[0]) == replacement[1]
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_apk.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_apk.py
new file mode 100644
index 000000000..c952456ef
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_apk.py
@@ -0,0 +1,38 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from ansible_collections.community.general.plugins.modules import apk
+
+
+class TestApkQueryLatest(unittest.TestCase):
+
+ def setUp(self):
+ self.module_names = [
+ 'bash',
+ 'g++',
+ ]
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.apk.AnsibleModule')
+ def test_not_latest(self, mock_module):
+ apk.APK_PATH = ""
+ for module_name in self.module_names:
+ command_output = module_name + '-2.0.0-r1 < 3.0.0-r2 '
+ mock_module.run_command.return_value = (0, command_output, None)
+ command_result = apk.query_latest(mock_module, module_name)
+ self.assertFalse(command_result)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.apk.AnsibleModule')
+ def test_latest(self, mock_module):
+ apk.APK_PATH = ""
+ for module_name in self.module_names:
+ command_output = module_name + '-2.0.0-r1 = 2.0.0-r1 '
+ mock_module.run_command.return_value = (0, command_output, None)
+ command_result = apk.query_latest(mock_module, module_name)
+ self.assertTrue(command_result)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_archive.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_archive.py
new file mode 100644
index 000000000..84a1360f1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_archive.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args
+from ansible_collections.community.general.plugins.modules.archive import get_archive, common_path
+
+
+class TestArchive(ModuleTestCase):
+ def setUp(self):
+ super(TestArchive, self).setUp()
+
+ self.mock_os_path_isdir = patch('os.path.isdir')
+ self.os_path_isdir = self.mock_os_path_isdir.start()
+
+ def tearDown(self):
+ self.os_path_isdir = self.mock_os_path_isdir.stop()
+
+ def test_archive_removal_safety(self):
+ set_module_args(
+ dict(
+ path=['/foo', '/bar', '/baz'],
+ dest='/foo/destination.tgz',
+ remove=True
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(type='list', elements='path', required=True),
+ format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
+ dest=dict(type='path'),
+ exclude_path=dict(type='list', elements='path', default=[]),
+ exclusion_patterns=dict(type='list', elements='path'),
+ force_archive=dict(type='bool', default=False),
+ remove=dict(type='bool', default=False),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True,
+ )
+
+ self.os_path_isdir.side_effect = [True, False, False, True]
+
+ module.fail_json = Mock()
+
+ archive = get_archive(module)
+
+ module.fail_json.assert_called_once_with(
+ path=b', '.join(archive.paths),
+ msg='Error, created archive can not be contained in source paths when remove=true'
+ )
+
+
+PATHS = (
+ ([], ''),
+ (['/'], '/'),
+ ([b'/'], b'/'),
+ (['/foo', '/bar', '/baz', '/foobar', '/barbaz', '/foo/bar'], '/'),
+ ([b'/foo', b'/bar', b'/baz', b'/foobar', b'/barbaz', b'/foo/bar'], b'/'),
+ (['/foo/bar/baz', '/foo/bar'], '/foo/'),
+ (['/foo/bar/baz', '/foo/bar/'], '/foo/bar/'),
+)
+
+
+@pytest.mark.parametrize("paths,root", PATHS)
+def test_common_path(paths, root):
+ assert common_path(paths) == root
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_access_key.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_access_key.py
new file mode 100644
index 000000000..71e28f653
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_access_key.py
@@ -0,0 +1,343 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules import bitbucket_access_key
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketAccessKeyModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketAccessKeyModule, self).setUp()
+ self.module = bitbucket_access_key
+
+ def test_missing_key_with_present_state(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'label': 'key name',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_key'])
+
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
+ def test_create_deploy_key(self, *args):
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'user': 'ABC',
+ 'password': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'key': 'public_key',
+ 'label': 'key name',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_deploy_key_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
+ def test_create_deploy_key_check_mode(self, *args):
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'key': 'public_key',
+ 'label': 'key name',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(create_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_update_deploy_key(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'key': 'new public key',
+ 'label': 'mykey',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 1)
+ self.assertEqual(create_deploy_key_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "new public key",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_dont_update_same_value(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'key': 'new public key',
+ 'label': 'mykey',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(create_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_update_deploy_key_check_mode(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with patch.object(self.module, 'create_deploy_key') as create_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'key': 'new public key',
+ 'label': 'mykey',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(create_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_delete_deploy_key(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'label': 'mykey',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value=None)
+ def test_delete_absent_deploy_key(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'label': 'mykey',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_access_key, 'get_existing_deploy_key', return_value={
+ "id": 123,
+ "label": "mykey",
+ "created_on": "2019-03-23T10:15:21.517377+00:00",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAADA...AdkTg7HGqL3rlaDrEcWfL7Lu6TnhBdq5",
+ "type": "deploy_key",
+ "comment": "",
+ "last_used": None,
+ "repository": {
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test"
+ },
+ "html": {
+ "href": "https://bitbucket.org/mleu/test"
+ },
+ "avatar": {
+ "href": "..."
+ }
+ },
+ "type": "repository",
+ "name": "test",
+ "full_name": "mleu/test",
+ "uuid": "{85d08b4e-571d-44e9-a507-fa476535aa98}"
+ },
+ "links": {
+ "self": {
+ "href": "https://api.bitbucket.org/2.0/repositories/mleu/test/deploy-keys/123"
+ }
+ },
+ })
+ def test_delete_deploy_key_check_mode(self, *args):
+ with patch.object(self.module, 'delete_deploy_key') as delete_deploy_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'label': 'mykey',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_deploy_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py
new file mode 100644
index 000000000..a1f5478c2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_key_pair.py
@@ -0,0 +1,198 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_key_pair
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketPipelineKeyPairModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketPipelineKeyPairModule, self).setUp()
+ self.module = bitbucket_pipeline_key_pair
+
+ def test_missing_keys_with_present_state(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_keys'])
+
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None)
+ def test_create_keys(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'user': 'ABC',
+ 'password': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None)
+ def test_create_keys_check_mode(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'unknown',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_update_keys(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'public',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_dont_update_same_key(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'unknown',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_update_keys_check_mode(self, *args):
+ with patch.object(self.module, 'update_ssh_key_pair') as update_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'public_key': 'public',
+ 'private_key': 'PRIVATE',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(update_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'public',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_delete_keys(self, *args):
+ with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_ssh_key_pair_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value=None)
+ def test_delete_absent_keys(self, *args):
+ with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_key_pair, 'get_existing_ssh_key_pair', return_value={
+ 'public_key': 'public',
+ 'type': 'pipeline_ssh_key_pair',
+ })
+ def test_delete_keys_check_mode(self, *args):
+ with patch.object(self.module, 'delete_ssh_key_pair') as delete_ssh_key_pair_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_ssh_key_pair_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py
new file mode 100644
index 000000000..07709f1a8
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_known_host.py
@@ -0,0 +1,193 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_known_host
+from ansible_collections.community.general.plugins.modules.bitbucket_pipeline_known_host import HAS_PARAMIKO
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketPipelineKnownHostModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketPipelineKnownHostModule, self).setUp()
+ self.module = bitbucket_pipeline_known_host
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_create_known_host(self, *args):
+ with patch.object(self.module, 'create_known_host') as create_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_known_host_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'request', return_value=(dict(status=201), dict()))
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_create_known_host_with_key(self, *args):
+ with patch.object(self.module, 'get_host_key') as get_host_key_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'user': 'ABC',
+ 'password': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'key': 'ssh-rsa public',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(get_host_key_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ }
+ })
+ def test_dont_create_same_value(self, *args):
+ with patch.object(self.module, 'create_known_host') as create_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_create_known_host_check_mode(self, *args):
+ with patch.object(self.module, 'create_known_host') as create_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(create_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ }
+ })
+ def test_delete_known_host(self, *args):
+ with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_known_host_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value=None)
+ def test_delete_absent_known_host(self, *args):
+ with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @pytest.mark.skipif(not HAS_PARAMIKO, reason='paramiko must be installed to test key creation')
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_known_host, 'get_existing_known_host', return_value={
+ 'type': 'pipeline_known_host',
+ 'uuid': '{21cc0590-bebe-4fae-8baf-03722704119a7}',
+ 'hostname': 'bitbucket.org',
+ 'public_key': {
+ 'type': 'pipeline_ssh_public_key',
+ 'md5_fingerprint': 'md5:97:8c:1b:f2:6f:14:6b:4b:3b:ec:aa:46:46:74:7c:40',
+ 'sha256_fingerprint': 'SHA256:zzXQOXSFBEiUtuE8AikoYKwbHaxvSc0ojez9YXaGp1A',
+ 'key_type': 'ssh-rsa',
+ 'key': 'AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kN...seeFVBoGqzHM9yXw=='
+ }
+ })
+ def test_delete_known_host_check_mode(self, *args):
+ with patch.object(self.module, 'delete_known_host') as delete_known_host_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'bitbucket.org',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_known_host_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py
new file mode 100644
index 000000000..6f710189c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_bitbucket_pipeline_variable.py
@@ -0,0 +1,311 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper
+from ansible_collections.community.general.plugins.modules import bitbucket_pipeline_variable
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class TestBucketPipelineVariableModule(ModuleTestCase):
+ def setUp(self):
+ super(TestBucketPipelineVariableModule, self).setUp()
+ self.module = bitbucket_pipeline_variable
+
+ def test_without_required_parameters(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['failed'], True)
+
+ def test_missing_value_with_present_state(self):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], self.module.error_messages['required_value'])
+
+ @patch.dict('os.environ', {
+ 'BITBUCKET_CLIENT_ID': 'ABC',
+ 'BITBUCKET_CLIENT_SECRET': 'XXX',
+ })
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_oauth_env_vars_params(self, *args):
+ with self.assertRaises(AnsibleExitJson):
+ set_module_args({
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ @patch.dict('os.environ', {
+ 'BITBUCKET_USERNAME': 'ABC',
+ 'BITBUCKET_PASSWORD': 'XXX',
+ })
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_basic_auth_env_vars_params(self, *args):
+ with self.assertRaises(AnsibleExitJson):
+ set_module_args({
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_create_variable(self, *args):
+ with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'user': 'ABC',
+ 'password': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(create_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_create_variable_check_mode(self, *args):
+ with patch.object(self.module, 'create_pipeline_variable') as create_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(create_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_variable(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'type': 'pipeline_variable',
+ 'secured': True,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_secured_variable(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'secured': True,
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_secured_state(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'secured': True,
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_dont_update_same_value(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_update_variable_check_mode(self, *args):
+ with patch.object(self.module, 'update_pipeline_variable') as update_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': '42',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(update_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_delete_variable(self, *args):
+ with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_pipeline_variable_mock.call_count, 1)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value=None)
+ def test_delete_absent_variable(self, *args):
+ with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ })
+ self.module.main()
+
+ self.assertEqual(delete_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], False)
+
+ @patch.object(BitbucketHelper, 'fetch_access_token', return_value='token')
+ @patch.object(bitbucket_pipeline_variable, 'get_existing_pipeline_variable', return_value={
+ 'name': 'PIPELINE_VAR_NAME',
+ 'value': 'Im alive',
+ 'type': 'pipeline_variable',
+ 'secured': False,
+ 'uuid': '{9ddb0507-439a-495a- 99f3 - 564f15138127}'
+ })
+ def test_delete_variable_check_mode(self, *args):
+ with patch.object(self.module, 'delete_pipeline_variable') as delete_pipeline_variable_mock:
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ set_module_args({
+ 'client_id': 'ABC',
+ 'client_secret': 'XXX',
+ 'workspace': 'name',
+ 'repository': 'repo',
+ 'name': 'PIPELINE_VAR_NAME',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ self.module.main()
+
+ self.assertEqual(delete_pipeline_variable_mock.call_count, 0)
+ self.assertEqual(exec_info.exception.args[0]['changed'], True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_campfire.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_campfire.py
new file mode 100644
index 000000000..ef0dca5ed
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_campfire.py
@@ -0,0 +1,96 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.plugins.modules import campfire
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestCampfireModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestCampfireModule, self).setUp()
+ self.module = campfire
+
+ def tearDown(self):
+ super(TestCampfireModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.notification.campfire.fetch_url')
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_successful_message(self):
+ """Test failure message"""
+ set_module_args({
+ 'subscription': 'test',
+ 'token': 'abc',
+ 'room': 'test',
+ 'msg': 'test'
+ })
+
+ with patch.object(campfire, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 1
+ url = fetch_url_mock.call_args[0][1]
+ data = fetch_url_mock.call_args[1]['data']
+
+ assert url == 'https://test.campfirenow.com/room/test/speak.xml'
+ assert data == '<message><body>test</body></message>'
+
+ def test_successful_message_with_notify(self):
+ """Test failure message"""
+ set_module_args({
+ 'subscription': 'test',
+ 'token': 'abc',
+ 'room': 'test',
+ 'msg': 'test',
+ 'notify': 'bell'
+ })
+
+ with patch.object(campfire, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 2
+ notify_call = fetch_url_mock.mock_calls[0]
+ url = notify_call[1][1]
+ data = notify_call[2]['data']
+
+ assert url == 'https://test.campfirenow.com/room/test/speak.xml'
+ assert data == '<message><type>SoundMessage</type><body>bell</body></message>'
+
+ message_call = fetch_url_mock.mock_calls[1]
+ url = message_call[1][1]
+ data = message_call[2]['data']
+
+ assert url == 'https://test.campfirenow.com/room/test/speak.xml'
+ assert data == '<message><body>test</body></message>'
+
+ def test_failure_message(self):
+ """Test failure message"""
+ set_module_args({
+ 'subscription': 'test',
+ 'token': 'abc',
+ 'room': 'test',
+ 'msg': 'test'
+ })
+
+ with patch.object(campfire, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 403})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_circonus_annotation.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_circonus_annotation.py
new file mode 100644
index 000000000..7378e62a2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_circonus_annotation.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import json
+import re
+import uuid
+from urllib3.response import HTTPResponse
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible_collections.community.general.plugins.modules import circonus_annotation
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestCirconusAnnotation(ModuleTestCase):
+
+ def setUp(self):
+ super(TestCirconusAnnotation, self).setUp()
+ self.module = circonus_annotation
+
+ def tearDown(self):
+ super(TestCirconusAnnotation, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_add_annotation(self):
+ """Check that result is changed"""
+ set_module_args({
+ 'category': 'test category',
+ 'description': 'test description',
+ 'title': 'test title',
+ 'api_key': str(uuid.uuid4()),
+ })
+
+ cid = '/annotation/100000'
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ data = {
+ '_cid': cid,
+ '_created': 1502146995,
+ '_last_modified': 1502146995,
+ '_last_modified_by': '/user/1000',
+ 'category': 'test category',
+ 'description': 'test description',
+ 'rel_metrics': [],
+ 'start': 1502145480,
+ 'stop': None,
+ 'title': 'test title',
+ }
+ raw = to_bytes(json.dumps(data))
+ resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
+ resp.status = 200
+ resp.reason = 'OK'
+ resp.headers = {'X-Circonus-API-Version': '2.00'}
+ return self.build_response(request, resp)
+
+ with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid)
+ self.assertEqual(send.call_count, 1)
+
+ def test_add_annotation_unicode(self):
+ """Check that result is changed.
+ Note: it seems there is a bug which prevent to create an annotation
+ with a non-ASCII category if this category already exists, in such
+ case an Internal Server Error (500) occurs."""
+ set_module_args({
+ 'category': 'new catégorÿ',
+ 'description': 'test description',
+ 'title': 'test title',
+ 'api_key': str(uuid.uuid4()),
+ })
+
+ cid = '/annotation/100000'
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ data = {
+ '_cid': '/annotation/100000',
+ '_created': 1502236928,
+ '_last_modified': 1502236928,
+ '_last_modified_by': '/user/1000',
+ # use res['annotation']['category'].encode('latin1').decode('utf8')
+ 'category': u'new cat\xc3\xa9gor\xc3\xbf',
+ 'description': 'test description',
+ 'rel_metrics': [],
+ 'start': 1502236927,
+ 'stop': 1502236927,
+ 'title': 'test title',
+ }
+
+ raw = to_bytes(json.dumps(data), encoding='latin1')
+ resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
+ resp.status = 200
+ resp.reason = 'OK'
+ resp.headers = {'X-Circonus-API-Version': '2.00'}
+ return self.build_response(request, resp)
+
+ with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid)
+ self.assertEqual(send.call_count, 1)
+
+ def test_auth_failure(self):
+ """Check that an error is raised when authentication failed"""
+ set_module_args({
+ 'category': 'test category',
+ 'description': 'test description',
+ 'title': 'test title',
+ 'api_key': str(uuid.uuid4()),
+ })
+
+ cid = '/annotation/100000'
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ data = {
+ '_cid': cid,
+ '_created': 1502146995,
+ '_last_modified': 1502146995,
+ '_last_modified_by': '/user/1000',
+ 'category': 'test category',
+ 'description': 'test description',
+ 'rel_metrics': [],
+ 'start': 1502145480,
+ 'stop': None,
+ 'title': 'test title',
+ }
+ raw = to_bytes(json.dumps(data))
+ resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
+ resp.status = 403
+ resp.reason = 'Forbidden'
+ resp.headers = {'X-Circonus-API-Version': '2.00'}
+ return self.build_response(request, resp)
+
+ with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['failed'])
+ self.assertTrue(re.match(r'\b403\b', result.exception.args[0]['reason']))
+ self.assertEqual(send.call_count, 1)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_cpanm.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_cpanm.py
new file mode 100644
index 000000000..5367a1fab
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_cpanm.py
@@ -0,0 +1,293 @@
+# -*- coding: utf-8 -*-
+# Author: Alexei Znamensky (russoz@gmail.com)
+# Largely adapted from test_redhat_subscription by
+# Jiri Hnidek (jhnidek@redhat.com)
+#
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# Copyright (c) Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules import cpanm
+
+import pytest
+
+TESTED_MODULE = cpanm.__name__
+
+
+@pytest.fixture
+def patch_cpanm(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path',
+ return_value='/testbin/cpanm')
+
+
+TEST_CASES = [
+ [
+ {'name': 'Dancer'},
+ {
+ 'id': 'install_dancer_compatibility',
+ 'run_command.calls': [
+ (
+ ['/testbin/cpanm', '-le', 'use Dancer;'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ (2, '', 'error, not installed',), # output rc, out, err
+ ),
+ (
+ ['/testbin/cpanm', 'Dancer'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ ),
+ ],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer'},
+ {
+ 'id': 'install_dancer_already_installed_compatibility',
+ 'run_command.calls': [
+ (
+ ['/testbin/cpanm', '-le', 'use Dancer;'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ (0, '', '',), # output rc, out, err
+ ),
+ ],
+ 'changed': False,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'mode': 'new'},
+ {
+ 'id': 'install_dancer',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'Dancer'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'MIYAGAWA/Plack-0.99_05.tar.gz'},
+ {
+ 'id': 'install_distribution_file_compatibility',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'MIYAGAWA/Plack-0.99_05.tar.gz', 'mode': 'new'},
+ {
+ 'id': 'install_distribution_file',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'MIYAGAWA/Plack-0.99_05.tar.gz'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'locallib': '/srv/webapps/my_app/extlib', 'mode': 'new'},
+ {
+ 'id': 'install_into_locallib',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'from_path': '/srv/webapps/my_app/src/', 'mode': 'new'},
+ {
+ 'id': 'install_from_local_directory',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', '/srv/webapps/my_app/src/'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'locallib': '/srv/webapps/my_app/extlib', 'notest': True, 'mode': 'new'},
+ {
+ 'id': 'install_into_locallib_no_unit_testing',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', '--notest', '--local-lib', '/srv/webapps/my_app/extlib', 'Dancer'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'mirror': 'http://cpan.cpantesters.org/', 'mode': 'new'},
+ {
+ 'id': 'install_from_mirror',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', '--mirror', 'http://cpan.cpantesters.org/', 'Dancer'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'system_lib': True, 'mode': 'new'},
+ {
+ 'id': 'install_into_system_lib',
+ 'run_command.calls': [],
+ 'changed': False,
+ 'failed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'version': '1.0', 'mode': 'new'},
+ {
+ 'id': 'install_minversion_implicit',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'Dancer~1.0'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'version': '~1.5', 'mode': 'new'},
+ {
+ 'id': 'install_minversion_explicit',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'Dancer~1.5'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ }
+ ],
+ [
+ {'name': 'Dancer', 'version': '@1.7', 'mode': 'new'},
+ {
+ 'id': 'install_specific_version',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'Dancer@1.7'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ 'failed': False,
+ }
+ ],
+ [
+ {'name': 'MIYAGAWA/Plack-0.99_05.tar.gz', 'version': '@1.7', 'mode': 'new'},
+ {
+ 'id': 'install_specific_version_from_file_error',
+ 'run_command.calls': [],
+ 'changed': False,
+ 'failed': True,
+ 'msg': "parameter 'version' must not be used when installing from a file",
+ }
+ ],
+ [
+ {'from_path': '~/', 'version': '@1.7', 'mode': 'new'},
+ {
+ 'id': 'install_specific_version_from_directory_error',
+ 'run_command.calls': [],
+ 'changed': False,
+ 'failed': True,
+ 'msg': "parameter 'version' must not be used when installing from a directory",
+ }
+ ],
+ [
+ {'name': 'git://github.com/plack/Plack.git', 'version': '@1.7', 'mode': 'new'},
+ {
+ 'id': 'install_specific_version_from_git_url_explicit',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'git://github.com/plack/Plack.git@1.7'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ 'failed': False,
+ }
+ ],
+ [
+ {'name': 'git://github.com/plack/Plack.git', 'version': '2.5', 'mode': 'new'},
+ {
+ 'id': 'install_specific_version_from_git_url_implicit',
+ 'run_command.calls': [(
+ ['/testbin/cpanm', 'git://github.com/plack/Plack.git@2.5'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', '',), # output rc, out, err
+ )],
+ 'changed': True,
+ 'failed': False,
+ }
+ ],
+ [
+ {'name': 'git://github.com/plack/Plack.git', 'version': '~2.5', 'mode': 'new'},
+ {
+ 'id': 'install_version_operator_from_git_url_error',
+ 'run_command.calls': [],
+ 'changed': False,
+ 'failed': True,
+ 'msg': "operator '~' not allowed in version parameter when installing from git repository",
+ }
+ ],
+]
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ TEST_CASES,
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_cpanm(mocker, capfd, patch_cpanm, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.module_helper.AnsibleModule.run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ cpanm.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("results =\n%s" % results)
+
+ assert mock_run_command.call_count == len(testcase['run_command.calls'])
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
+
+ assert results.get('changed', False) == testcase['changed']
+ if 'failed' in testcase:
+ assert results.get('failed', False) == testcase['failed']
+ if 'msg' in testcase:
+ assert results.get('msg', '') == testcase['msg']
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_datadog_downtime.py.disabled b/ansible_collections/community/general/tests/unit/plugins/modules/test_datadog_downtime.py.disabled
new file mode 100644
index 000000000..52f27710c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_datadog_downtime.py.disabled
@@ -0,0 +1,226 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules.monitoring.datadog import datadog_downtime
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+)
+
+from pytest import importorskip
+
+# Skip this test if python 2 so datadog_api_client cannot be installed
+datadog_api_client = importorskip("datadog_api_client")
+Downtime = datadog_api_client.v1.model.downtime.Downtime
+DowntimeRecurrence = datadog_api_client.v1.model.downtime_recurrence.DowntimeRecurrence
+
+
+class TestDatadogDowntime(ModuleTestCase):
+
+ def setUp(self):
+ super(TestDatadogDowntime, self).setUp()
+ self.module = datadog_downtime
+
+ def tearDown(self):
+ super(TestDatadogDowntime, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi")
+ def test_create_downtime_when_no_id(self, downtimes_api_mock):
+ set_module_args({
+ "monitor_tags": ["foo:bar"],
+ "scope": ["*"],
+ "monitor_id": 12345,
+ "downtime_message": "Message",
+ "start": 1111,
+ "end": 2222,
+ "timezone": "UTC",
+ "rrule": "rrule",
+ "api_key": "an_api_key",
+ "app_key": "an_app_key",
+ })
+
+ downtime = Downtime()
+ downtime.monitor_tags = ["foo:bar"]
+ downtime.scope = ["*"]
+ downtime.monitor_id = 12345
+ downtime.message = "Message"
+ downtime.start = 1111
+ downtime.end = 2222
+ downtime.timezone = "UTC"
+ downtime.recurrence = DowntimeRecurrence(
+ rrule="rrule"
+ )
+
+ create_downtime_mock = MagicMock(return_value=Downtime(id=12345))
+ downtimes_api_mock.return_value = MagicMock(create_downtime=create_downtime_mock)
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['downtime']['id'], 12345)
+ create_downtime_mock.assert_called_once_with(downtime)
+
+ @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi")
+ def test_create_downtime_when_id_and_disabled(self, downtimes_api_mock):
+ set_module_args({
+ "id": 1212,
+ "monitor_tags": ["foo:bar"],
+ "scope": ["*"],
+ "monitor_id": 12345,
+ "downtime_message": "Message",
+ "start": 1111,
+ "end": 2222,
+ "timezone": "UTC",
+ "rrule": "rrule",
+ "api_key": "an_api_key",
+ "app_key": "an_app_key",
+ })
+
+ downtime = Downtime()
+ downtime.monitor_tags = ["foo:bar"]
+ downtime.scope = ["*"]
+ downtime.monitor_id = 12345
+ downtime.message = "Message"
+ downtime.start = 1111
+ downtime.end = 2222
+ downtime.timezone = "UTC"
+ downtime.recurrence = DowntimeRecurrence(
+ rrule="rrule"
+ )
+
+ create_downtime_mock = MagicMock(return_value=Downtime(id=12345))
+ get_downtime_mock = MagicMock(return_value=Downtime(id=1212, disabled=True))
+ downtimes_api_mock.return_value = MagicMock(
+ create_downtime=create_downtime_mock, get_downtime=get_downtime_mock
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['downtime']['id'], 12345)
+ create_downtime_mock.assert_called_once_with(downtime)
+ get_downtime_mock.assert_called_once_with(1212)
+
+ @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi")
+ def test_update_downtime_when_not_disabled(self, downtimes_api_mock):
+ set_module_args({
+ "id": 1212,
+ "monitor_tags": ["foo:bar"],
+ "scope": ["*"],
+ "monitor_id": 12345,
+ "downtime_message": "Message",
+ "start": 1111,
+ "end": 2222,
+ "timezone": "UTC",
+ "rrule": "rrule",
+ "api_key": "an_api_key",
+ "app_key": "an_app_key",
+ })
+
+ downtime = Downtime()
+ downtime.monitor_tags = ["foo:bar"]
+ downtime.scope = ["*"]
+ downtime.monitor_id = 12345
+ downtime.message = "Message"
+ downtime.start = 1111
+ downtime.end = 2222
+ downtime.timezone = "UTC"
+ downtime.recurrence = DowntimeRecurrence(
+ rrule="rrule"
+ )
+
+ update_downtime_mock = MagicMock(return_value=Downtime(id=1212))
+ get_downtime_mock = MagicMock(return_value=Downtime(id=1212, disabled=False))
+ downtimes_api_mock.return_value = MagicMock(
+ update_downtime=update_downtime_mock, get_downtime=get_downtime_mock
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['downtime']['id'], 1212)
+ update_downtime_mock.assert_called_once_with(1212, downtime)
+ get_downtime_mock.assert_called_once_with(1212)
+
+ @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi")
+ def test_update_downtime_no_change(self, downtimes_api_mock):
+ set_module_args({
+ "id": 1212,
+ "monitor_tags": ["foo:bar"],
+ "scope": ["*"],
+ "monitor_id": 12345,
+ "downtime_message": "Message",
+ "start": 1111,
+ "end": 2222,
+ "timezone": "UTC",
+ "rrule": "rrule",
+ "api_key": "an_api_key",
+ "app_key": "an_app_key",
+ })
+
+ downtime = Downtime()
+ downtime.monitor_tags = ["foo:bar"]
+ downtime.scope = ["*"]
+ downtime.monitor_id = 12345
+ downtime.message = "Message"
+ downtime.start = 1111
+ downtime.end = 2222
+ downtime.timezone = "UTC"
+ downtime.recurrence = DowntimeRecurrence(
+ rrule="rrule"
+ )
+
+ downtime_get = Downtime()
+ downtime_get.id = 1212
+ downtime_get.disabled = False
+ downtime_get.monitor_tags = ["foo:bar"]
+ downtime_get.scope = ["*"]
+ downtime_get.monitor_id = 12345
+ downtime_get.message = "Message"
+ downtime_get.start = 1111
+ downtime_get.end = 2222
+ downtime_get.timezone = "UTC"
+ downtime_get.recurrence = DowntimeRecurrence(
+ rrule="rrule"
+ )
+
+ update_downtime_mock = MagicMock(return_value=downtime_get)
+ get_downtime_mock = MagicMock(return_value=downtime_get)
+ downtimes_api_mock.return_value = MagicMock(
+ update_downtime=update_downtime_mock, get_downtime=get_downtime_mock
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertFalse(result.exception.args[0]['changed'])
+ self.assertEqual(result.exception.args[0]['downtime']['id'], 1212)
+ update_downtime_mock.assert_called_once_with(1212, downtime)
+ get_downtime_mock.assert_called_once_with(1212)
+
+ @patch("ansible_collections.community.general.plugins.modules.monitoring.datadog.datadog_downtime.DowntimesApi")
+ def test_delete_downtime(self, downtimes_api_mock):
+ set_module_args({
+ "id": 1212,
+ "state": "absent",
+ "api_key": "an_api_key",
+ "app_key": "an_app_key",
+ })
+
+ cancel_downtime_mock = MagicMock()
+ get_downtime_mock = MagicMock(return_value=Downtime(id=1212))
+ downtimes_api_mock.return_value = MagicMock(
+ get_downtime=get_downtime_mock,
+ cancel_downtime=cancel_downtime_mock
+ )
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ cancel_downtime_mock.assert_called_once_with(1212)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_dconf.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_dconf.py
new file mode 100644
index 000000000..e0ea8195a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_dconf.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or
+# https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules import dconf
+
+try:
+ from gi.repository.GLib import Variant
+except ImportError:
+ Variant = None
+
+DconfPreference = dconf.DconfPreference
+
+
+@pytest.mark.parametrize(
+ "v1,v2,expected,fallback_expected",
+ (("'foo'", "'foo'", True, True),
+ ('"foo"', "'foo'", True, False),
+ ("'foo'", '"foo"', True, False),
+ ("'foo'", '"bar"', False, False),
+ ("[1, 2, 3]", "[1, 2, 3]", True, True),
+ ("[1, 2, 3]", "[3, 2, 1]", False, False),
+ ('1234', '1234', True, True),
+ ('1234', '1235', False, False),
+ ('1.0', '1.0', True, True),
+ ('1.000', '1.0', True, False),
+ ('2.0', '4.0', False, False),
+ # GVariants with different types aren't equal!
+ ('1', '1.0', False, False),
+ # Explicit types
+ ('@as []', '[]', True, False),
+ ))
+def test_gvariant_equality(mocker, v1, v2, expected, fallback_expected):
+ assert DconfPreference.variants_are_equal(v1, v2) is \
+ (expected if Variant else fallback_expected)
+ mocker.patch.object(dconf, 'Variant', None)
+ mocker.patch.object(dconf, "GError", AttributeError)
+ assert DconfPreference.variants_are_equal(v1, v2) is fallback_expected
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_discord.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_discord.py
new file mode 100644
index 000000000..83069d279
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_discord.py
@@ -0,0 +1,105 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.plugins.modules import discord
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestDiscordModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestDiscordModule, self).setUp()
+ self.module = discord
+
+ def tearDown(self):
+ super(TestDiscordModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.notification.discord.fetch_url')
+
+ def test_without_parameters(self):
+ """Failure if no parameters set"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_without_content(self):
+ """Failure if content and embeds both are missing"""
+ set_module_args({
+ 'webhook_id': 'xxx',
+ 'webhook_token': 'xxx'
+ })
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_successful_message(self):
+ """Test a basic message successfully."""
+ set_module_args({
+ 'webhook_id': 'xxx',
+ 'webhook_token': 'xxx',
+ 'content': 'test'
+ })
+
+ with patch.object(discord, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['content'] == "test"
+
+ def test_message_with_username(self):
+ """Test a message with username set successfully."""
+ set_module_args({
+ 'webhook_id': 'xxx',
+ 'webhook_token': 'xxx',
+ 'content': 'test',
+ 'username': 'Ansible Bot'
+ })
+
+ with patch.object(discord, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 204, 'msg': 'OK (0 bytes)'})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible Bot"
+ assert call_data['content'] == "test"
+
+ def test_failed_message(self):
+ """Test failure because webhook id is wrong."""
+
+ set_module_args({
+ 'webhook_id': 'wrong',
+ 'webhook_token': 'xxx',
+ 'content': 'test'
+ })
+
+ with patch.object(discord, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found', 'body': '{"message": "Unknown Webhook", "code": 10015}'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_failed_message_without_body(self):
+ """Test failure with empty response body."""
+
+ set_module_args({
+ 'webhook_id': 'wrong',
+ 'webhook_token': 'xxx',
+ 'content': 'test'
+ })
+
+ with patch.object(discord, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'HTTP Error 404: Not Found'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple.py
new file mode 100644
index 000000000..95a78818d
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import dnsimple as dnsimple_module
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+import pytest
+import sys
+
+dnsimple = pytest.importorskip('dnsimple')
+mandatory_py_version = pytest.mark.skipif(
+ sys.version_info < (3, 6),
+ reason='The dnsimple dependency requires python3.6 or higher'
+)
+
+from dnsimple import DNSimpleException
+
+
+class TestDNSimple(ModuleTestCase):
+ """Main class for testing dnsimple module."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestDNSimple, self).setUp()
+ self.module = dnsimple_module
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestDNSimple, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ @patch('dnsimple.service.Identity.whoami')
+ def test_account_token(self, mock_whoami):
+ mock_whoami.return_value.data.account = 42
+ ds = self.module.DNSimpleV2('fake', 'fake', True, self.module)
+ self.assertEquals(ds.account, 42)
+
+ @patch('dnsimple.service.Accounts.list_accounts')
+ @patch('dnsimple.service.Identity.whoami')
+ def test_user_token_multiple_accounts(self, mock_whoami, mock_accounts):
+ mock_accounts.return_value.data = [1, 2, 3]
+ mock_whoami.return_value.data.account = None
+ with self.assertRaises(DNSimpleException):
+ self.module.DNSimpleV2('fake', 'fake', True, self.module)
+
+ @patch('dnsimple.service.Accounts.list_accounts')
+ @patch('dnsimple.service.Identity.whoami')
+ def test_user_token_single_account(self, mock_whoami, mock_accounts):
+ mock_accounts.return_value.data = [42]
+ mock_whoami.return_value.data.account = None
+ ds = self.module.DNSimpleV2('fake', 'fake', True, self.module)
+ self.assertEquals(ds.account, 42)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple_info.py
new file mode 100644
index 000000000..5806ec772
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_dnsimple_info.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import dnsimple_info
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args, AnsibleExitJson
+from httmock import response
+from httmock import with_httmock
+from httmock import urlmatch
+
+
+@urlmatch(netloc='(.)*dnsimple.com(.)*',
+ path='/v2/[0-9]*/zones/')
+def zones_resp(url, request):
+ """return domains"""
+ headers = {'content-type': 'application/json'}
+ data_content = {"data":
+ [{"account_id": "1234", }, ],
+ "pagination": {"total_pages": 1}}
+ content = data_content
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(netloc='(.)*dnsimple.com(.)*',
+ path='/v2/[0-9]*/zones/(.)*/records(.*)')
+def records_resp(url, request):
+ """return record(s)"""
+ headers = {'content-type': 'application/json'}
+ data_content = {"data":
+ [{"content": "example",
+ "name": "example.com"}],
+ "pagination": {"total_pages": 1}}
+ content = data_content
+ return response(200, content, headers, None, 5, request)
+
+
+class TestDNSimple_Info(ModuleTestCase):
+ """Main class for testing dnsimple module."""
+
+ def setUp(self):
+
+ """Setup."""
+ super(TestDNSimple_Info, self).setUp()
+ self.module = dnsimple_info
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestDNSimple_Info, self).tearDown()
+
+ def test_with_no_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ @with_httmock(zones_resp)
+ def test_only_key_and_account(self):
+ """key and account will pass, returns domains"""
+ account_id = "1234"
+ with self.assertRaises(AnsibleExitJson) as exc_info:
+ set_module_args({
+ "api_key": "abcd1324",
+ "account_id": account_id
+ })
+ self.module.main()
+ result = exc_info.exception.args[0]
+ # nothing should change
+ self.assertFalse(result['changed'])
+ # we should return at least one item with the matching account ID
+ assert result['dnsimple_domain_info'][0]["account_id"] == account_id
+
+ @with_httmock(records_resp)
+ def test_only_name_without_record(self):
+ """name and no record should not fail, returns the record"""
+ name = "example.com"
+ with self.assertRaises(AnsibleExitJson) as exc_info:
+ set_module_args({
+ "api_key": "abcd1324",
+ "name": "example.com",
+ "account_id": "1234"
+ })
+ self.module.main()
+ result = exc_info.exception.args[0]
+ # nothing should change
+ self.assertFalse(result['changed'])
+ # we should return at least one item with mathing domain
+ assert result['dnsimple_records_info'][0]['name'] == name
+
+ @with_httmock(records_resp)
+ def test_name_and_record(self):
+ """name and record should not fail, returns the record"""
+ record = "example"
+ with self.assertRaises(AnsibleExitJson) as exc_info:
+ set_module_args({
+ "api_key": "abcd1324",
+ "account_id": "1234",
+ "name": "example.com",
+ "record": "example"
+ })
+ self.module.main()
+ result = exc_info.exception.args[0]
+ # nothing should change
+ self.assertFalse(result['changed'])
+ # we should return at least one item and content should match
+ assert result['dnsimple_record_info'][0]['content'] == record
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2.py
new file mode 100644
index 000000000..f01f15ef8
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2.py
@@ -0,0 +1,116 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules import gconftool2
+
+import pytest
+
+TESTED_MODULE = gconftool2.__name__
+
+
+@pytest.fixture
+def patch_gconftool2(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path',
+ return_value='/testbin/gconftool-2')
+
+
+TEST_CASES = [
+ [
+ {'state': 'get', 'key': '/desktop/gnome/background/picture_filename'},
+ {
+ 'id': 'test_simple_element_get',
+ 'run_command.calls': [
+ (
+ ['/testbin/gconftool-2', '--get', '/desktop/gnome/background/picture_filename'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '100\n', '',),
+ ),
+ ],
+ 'new_value': '100',
+ }
+ ],
+ [
+ {'state': 'get', 'key': '/desktop/gnome/background/picture_filename'},
+ {
+ 'id': 'test_simple_element_get_not_found',
+ 'run_command.calls': [
+ (
+ ['/testbin/gconftool-2', '--get', '/desktop/gnome/background/picture_filename'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '', "No value set for `/desktop/gnome/background/picture_filename'\n",),
+ ),
+ ],
+ 'new_value': None,
+ }
+ ],
+ [
+ {'state': 'present', 'key': '/desktop/gnome/background/picture_filename', 'value': '200', 'value_type': 'int'},
+ {
+ 'id': 'test_simple_element_set',
+ 'run_command.calls': [
+ (
+ ['/testbin/gconftool-2', '--get', '/desktop/gnome/background/picture_filename'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '100\n', '',),
+ ),
+ (
+ ['/testbin/gconftool-2', '--type', 'int', '--set', '/desktop/gnome/background/picture_filename', '200'],
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ (0, '200\n', '',),
+ ),
+ ],
+ 'new_value': '200',
+ }
+ ],
+]
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ TEST_CASES,
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_gconftool2(mocker, capfd, patch_gconftool2, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ gconftool2.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % testcase)
+ print("results =\n%s" % results)
+
+ for conditional_test_result in ('value',):
+ if conditional_test_result in testcase:
+ assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results)
+ assert results[conditional_test_result] == testcase[conditional_test_result], \
+ "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result])
+
+ assert mock_run_command.call_count == len(testcase['run_command.calls'])
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2_info.py
new file mode 100644
index 000000000..352af6bb0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gconftool2_info.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules import gconftool2_info
+
+import pytest
+
+TESTED_MODULE = gconftool2_info.__name__
+
+
+@pytest.fixture
+def patch_gconftool2_info(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path',
+ return_value='/testbin/gconftool-2')
+
+
+TEST_CASES = [
+ [
+ {'key': '/desktop/gnome/background/picture_filename'},
+ {
+ 'id': 'test_simple_element_get',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/gconftool-2', '--get', '/desktop/gnome/background/picture_filename'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (0, '100\n', '',),
+ ),
+ ],
+ 'value': '100',
+ }
+ ],
+ [
+ {'key': '/desktop/gnome/background/picture_filename'},
+ {
+ 'id': 'test_simple_element_get_not_found',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/gconftool-2', '--get', '/desktop/gnome/background/picture_filename'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (0, '', "No value set for `/desktop/gnome/background/picture_filename'\n",),
+ ),
+ ],
+ 'value': None,
+ }
+ ],
+]
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ TEST_CASES,
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_gconftool2_info(mocker, capfd, patch_gconftool2_info, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ gconftool2_info.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % testcase)
+ print("results =\n%s" % results)
+
+ for conditional_test_result in ('value',):
+ if conditional_test_result in testcase:
+ assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results)
+ assert results[conditional_test_result] == testcase[conditional_test_result], \
+ "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result])
+
+ assert mock_run_command.call_count == len(testcase['run_command.calls'])
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gem.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gem.py
new file mode 100644
index 000000000..92578e062
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gem.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2018 Antoine Catton
+# MIT License (see LICENSES/MIT.txt or https://opensource.org/licenses/MIT)
+# SPDX-License-Identifier: MIT
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import copy
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules import gem
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+def get_command(run_command):
+ """Generate the command line string from the patched run_command"""
+ args = run_command.call_args[0]
+ command = args[0]
+ return ' '.join(command)
+
+
+class TestGem(ModuleTestCase):
+ def setUp(self):
+ super(TestGem, self).setUp()
+ self.rubygems_path = ['/usr/bin/gem']
+ self.mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.gem.get_rubygems_path',
+ lambda module: copy.deepcopy(self.rubygems_path),
+ )
+
+ @pytest.fixture(autouse=True)
+ def _mocker(self, mocker):
+ self.mocker = mocker
+
+ def patch_installed_versions(self, versions):
+ """Mocks the versions of the installed package"""
+
+ target = 'ansible_collections.community.general.plugins.modules.gem.get_installed_versions'
+
+ def new(module, remote=False):
+ return versions
+
+ return self.mocker.patch(target, new)
+
+ def patch_rubygems_version(self, version=None):
+ target = 'ansible_collections.community.general.plugins.modules.gem.get_rubygems_version'
+
+ def new(module):
+ return version
+
+ return self.mocker.patch(target, new)
+
+ def patch_run_command(self):
+ target = 'ansible.module_utils.basic.AnsibleModule.run_command'
+ return self.mocker.patch(target)
+
+ def test_fails_when_user_install_and_install_dir_are_combined(self):
+ set_module_args({
+ 'name': 'dummy',
+ 'user_install': True,
+ 'install_dir': '/opt/dummy',
+ })
+
+ with pytest.raises(AnsibleFailJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+ assert result['failed']
+ assert result['msg'] == "install_dir requires user_install=false"
+
+ def test_passes_install_dir_to_gem(self):
+ # XXX: This test is extremely fragile, and makes assuptions about the module code, and how
+ # functions are run.
+ # If you start modifying the code of the module, you might need to modify what this
+ # test mocks. The only thing that matters is the assertion that this 'gem install' is
+ # invoked with '--install-dir'.
+
+ set_module_args({
+ 'name': 'dummy',
+ 'user_install': False,
+ 'install_dir': '/opt/dummy',
+ })
+
+ self.patch_rubygems_version()
+ self.patch_installed_versions([])
+ run_command = self.patch_run_command()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+ assert result['changed']
+ assert run_command.called
+
+ assert '--install-dir /opt/dummy' in get_command(run_command)
+
+ def test_passes_install_dir_and_gem_home_when_uninstall_gem(self):
+ # XXX: This test is also extremely fragile because of mocking.
+ # If this breaks, the only that matters is to check whether '--install-dir' is
+ # in the run command, and that GEM_HOME is passed to the command.
+ set_module_args({
+ 'name': 'dummy',
+ 'user_install': False,
+ 'install_dir': '/opt/dummy',
+ 'state': 'absent',
+ })
+
+ self.patch_rubygems_version()
+ self.patch_installed_versions(['1.0.0'])
+
+ run_command = self.patch_run_command()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+
+ assert result['changed']
+ assert run_command.called
+
+ assert '--install-dir /opt/dummy' in get_command(run_command)
+
+ update_environ = run_command.call_args[1].get('environ_update', {})
+ assert update_environ.get('GEM_HOME') == '/opt/dummy'
+
+ def test_passes_add_force_option(self):
+ set_module_args({
+ 'name': 'dummy',
+ 'force': True,
+ })
+
+ self.patch_rubygems_version()
+ self.patch_installed_versions([])
+ run_command = self.patch_run_command()
+
+ with pytest.raises(AnsibleExitJson) as exc:
+ gem.main()
+
+ result = exc.value.args[0]
+ assert result['changed']
+ assert run_command.called
+
+ assert '--force' in get_command(run_command)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_github_repo.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_github_repo.py
new file mode 100644
index 000000000..10227aadf
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_github_repo.py
@@ -0,0 +1,330 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import json
+import sys
+from httmock import with_httmock, urlmatch, response
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules import github_repo
+
+GITHUB_MINIMUM_PYTHON_VERSION = (2, 7)
+
+
+@urlmatch(netloc=r'.*')
+def debug_mock(url, request):
+ print(request.original.__dict__)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/orgs/.*', method="get")
+def get_orgs_mock(url, request):
+ match = re.search(r"api\.github\.com(:[0-9]+)?/orgs/(?P<org>[^/]+)", request.url)
+ org = match.group("org")
+
+ # https://docs.github.com/en/rest/reference/orgs#get-an-organization
+ headers = {'content-type': 'application/json'}
+ content = {
+ "login": org,
+ "url": "https://api.github.com/orgs/{0}".format(org)
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/user', method="get")
+def get_user_mock(url, request):
+ # https://docs.github.com/en/rest/reference/users#get-the-authenticated-user
+ headers = {'content-type': 'application/json'}
+ content = {
+ "login": "octocat",
+ "url": "https://api.github.com/users/octocat"
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get")
+def get_repo_notfound_mock(url, request):
+ return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get")
+def get_repo_mock(url, request):
+ match = re.search(
+ r"api\.github\.com(:[0-9]+)?/repos/(?P<org>[^/]+)/(?P<repo>[^/]+)", request.url)
+ org = match.group("org")
+ repo = match.group("repo")
+
+ # https://docs.github.com/en/rest/reference/repos#get-a-repository
+ headers = {'content-type': 'application/json'}
+ content = {
+ "name": repo,
+ "full_name": "{0}/{1}".format(org, repo),
+ "url": "https://api.github.com/repos/{0}/{1}".format(org, repo),
+ "private": False,
+ "description": "This your first repo!",
+ "default_branch": "master",
+ "allow_rebase_merge": True
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="get")
+def get_private_repo_mock(url, request):
+ match = re.search(
+ r"api\.github\.com(:[0-9]+)?/repos/(?P<org>[^/]+)/(?P<repo>[^/]+)", request.url)
+ org = match.group("org")
+ repo = match.group("repo")
+
+ # https://docs.github.com/en/rest/reference/repos#get-a-repository
+ headers = {'content-type': 'application/json'}
+ content = {
+ "name": repo,
+ "full_name": "{0}/{1}".format(org, repo),
+ "url": "https://api.github.com/repos/{0}/{1}".format(org, repo),
+ "private": True,
+ "description": "This your first repo!",
+ "default_branch": "master",
+ "allow_rebase_merge": True
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/orgs/.*/repos', method="post")
+def create_new_org_repo_mock(url, request):
+ match = re.search(
+ r"api\.github\.com(:[0-9]+)?/orgs/(?P<org>[^/]+)/repos", request.url)
+ org = match.group("org")
+ repo = json.loads(request.body)
+
+ headers = {'content-type': 'application/json'}
+ # https://docs.github.com/en/rest/reference/repos#create-an-organization-repository
+ content = {
+ "name": repo['name'],
+ "full_name": "{0}/{1}".format(org, repo['name']),
+ "private": repo.get('private', False),
+ "description": repo.get('description')
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/user/repos', method="post")
+def create_new_user_repo_mock(url, request):
+ repo = json.loads(request.body)
+
+ headers = {'content-type': 'application/json'}
+ # https://docs.github.com/en/rest/reference/repos#create-a-repository-for-the-authenticated-user
+ content = {
+ "name": repo['name'],
+ "full_name": "{0}/{1}".format("octocat", repo['name']),
+ "private": repo.get('private', False),
+ "description": repo.get('description')
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(201, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="patch")
+def patch_repo_mock(url, request):
+ match = re.search(
+ r"api\.github\.com(:[0-9]+)?/repos/(?P<org>[^/]+)/(?P<repo>[^/]+)", request.url)
+ org = match.group("org")
+ repo = match.group("repo")
+
+ body = json.loads(request.body)
+ headers = {'content-type': 'application/json'}
+ # https://docs.github.com/en/rest/reference/repos#update-a-repository
+ content = {
+ "name": repo,
+ "full_name": "{0}/{1}".format(org, repo),
+ "url": "https://api.github.com/repos/{0}/{1}".format(org, repo),
+ "private": body.get('private', False),
+ "description": body.get('description'),
+ "default_branch": "master",
+ "allow_rebase_merge": True
+ }
+ content = json.dumps(content).encode("utf-8")
+ return response(200, content, headers, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="delete")
+def delete_repo_mock(url, request):
+ # https://docs.github.com/en/rest/reference/repos#delete-a-repository
+ return response(204, None, None, None, 5, request)
+
+
+@urlmatch(netloc=r'api\.github\.com(:[0-9]+)?$', path=r'/repos/.*/.*', method="delete")
+def delete_repo_notfound_mock(url, request):
+ # https://docs.github.com/en/rest/reference/repos#delete-a-repository
+ return response(404, "{\"message\": \"Not Found\"}", "", "Not Found", 5, request)
+
+
+class TestGithubRepo(unittest.TestCase):
+ def setUp(self):
+ if sys.version_info < GITHUB_MINIMUM_PYTHON_VERSION:
+ self.skipTest("Python %s+ is needed for PyGithub" %
+ ",".join(map(str, GITHUB_MINIMUM_PYTHON_VERSION)))
+
+ @with_httmock(get_orgs_mock)
+ @with_httmock(get_repo_notfound_mock)
+ @with_httmock(create_new_org_repo_mock)
+ def test_create_new_org_repo(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": "MyOrganization",
+ "name": "myrepo",
+ "description": "Just for fun",
+ "private": False,
+ "state": "present",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+
+ self.assertEqual(result['changed'], True)
+ self.assertEqual(result['repo']['private'], False)
+ self.assertEqual(result['repo']['description'], 'Just for fun')
+
+ @with_httmock(get_orgs_mock)
+ @with_httmock(get_repo_notfound_mock)
+ @with_httmock(create_new_org_repo_mock)
+ def test_create_new_org_repo_incomplete(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": "MyOrganization",
+ "name": "myrepo",
+ "description": None,
+ "private": None,
+ "state": "present",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+
+ self.assertEqual(result['changed'], True)
+ self.assertEqual(result['repo']['private'], False)
+ self.assertEqual(result['repo']['description'], None)
+
+ @with_httmock(get_user_mock)
+ @with_httmock(get_repo_notfound_mock)
+ @with_httmock(create_new_user_repo_mock)
+ def test_create_new_user_repo(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": None,
+ "name": "myrepo",
+ "description": "Just for fun",
+ "private": True,
+ "state": "present",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+ self.assertEqual(result['changed'], True)
+ self.assertEqual(result['repo']['private'], True)
+
+ @with_httmock(get_orgs_mock)
+ @with_httmock(get_repo_mock)
+ @with_httmock(patch_repo_mock)
+ def test_patch_existing_org_repo(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": "MyOrganization",
+ "name": "myrepo",
+ "description": "Just for fun",
+ "private": True,
+ "state": "present",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+ self.assertEqual(result['changed'], True)
+ self.assertEqual(result['repo']['private'], True)
+
+ @with_httmock(get_orgs_mock)
+ @with_httmock(get_private_repo_mock)
+ def test_idempotency_existing_org_private_repo(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": "MyOrganization",
+ "name": "myrepo",
+ "description": None,
+ "private": None,
+ "state": "present",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+ self.assertEqual(result['changed'], False)
+ self.assertEqual(result['repo']['private'], True)
+ self.assertEqual(result['repo']['description'], 'This your first repo!')
+
+ @with_httmock(get_orgs_mock)
+ @with_httmock(get_repo_mock)
+ @with_httmock(delete_repo_mock)
+ def test_delete_org_repo(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": "MyOrganization",
+ "name": "myrepo",
+ "description": "Just for fun",
+ "private": False,
+ "state": "absent",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+ self.assertEqual(result['changed'], True)
+
+ @with_httmock(get_user_mock)
+ @with_httmock(get_repo_mock)
+ @with_httmock(delete_repo_mock)
+ def test_delete_user_repo(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": None,
+ "name": "myrepo",
+ "description": "Just for fun",
+ "private": False,
+ "state": "absent",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+ self.assertEqual(result['changed'], True)
+
+ @with_httmock(get_orgs_mock)
+ @with_httmock(get_repo_notfound_mock)
+ @with_httmock(delete_repo_notfound_mock)
+ def test_delete_org_repo_notfound(self):
+ result = github_repo.run_module({
+ 'username': None,
+ 'password': None,
+ "access_token": "mytoken",
+ "organization": "MyOrganization",
+ "name": "myrepo",
+ "description": "Just for fun",
+ "private": True,
+ "state": "absent",
+ "api_url": "https://api.github.com",
+ "force_defaults": False,
+ })
+ self.assertEqual(result['changed'], False)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_deploy_key.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_deploy_key.py
new file mode 100644
index 000000000..3e4dc5856
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_deploy_key.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.gitlab_deploy_key import GitLabDeployKey
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_project, resp_find_project_deploy_key,
+ resp_create_project_deploy_key, resp_delete_project_deploy_key)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import ProjectKey
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_project = _dummy
+ resp_find_project_deploy_key = _dummy
+ resp_create_project_deploy_key = _dummy
+ resp_delete_project_deploy_key = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabDeployKey(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabDeployKey, self).setUp()
+
+ self.moduleUtil = GitLabDeployKey(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_deploy_key)
+ def test_deploy_key_exist(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ rvalue = self.moduleUtil.exists_deploy_key(project, "Public key")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.exists_deploy_key(project, "Private key")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_create_project_deploy_key)
+ def test_create_deploy_key(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ deploy_key = self.moduleUtil.create_deploy_key(project, {"title": "Public key",
+ "key": "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM"
+ "4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxc"
+ "KDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4soW6NUlfD"
+ "zpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0="})
+
+ self.assertEqual(type(deploy_key), ProjectKey)
+ self.assertEqual(deploy_key.title, "Public key")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_deploy_key)
+ @with_httmock(resp_create_project_deploy_key)
+ def test_update_deploy_key(self):
+ project = self.gitlab_instance.projects.get(1)
+ deploy_key = self.moduleUtil.find_deploy_key(project, "Public key")
+
+ changed, newDeploy_key = self.moduleUtil.update_deploy_key(deploy_key, {"title": "Private key"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newDeploy_key), ProjectKey)
+ self.assertEqual(newDeploy_key.title, "Private key")
+
+ changed, newDeploy_key = self.moduleUtil.update_deploy_key(deploy_key, {"title": "Private key"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newDeploy_key.title, "Private key")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_deploy_key)
+ @with_httmock(resp_delete_project_deploy_key)
+ def test_delete_deploy_key(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ self.moduleUtil.exists_deploy_key(project, "Public key")
+
+ rvalue = self.moduleUtil.delete_deploy_key()
+
+ self.assertEqual(rvalue, None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_group.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_group.py
new file mode 100644
index 000000000..230c47030
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_group.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.gitlab_group import GitLabGroup
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_group, resp_get_missing_group, resp_create_group,
+ resp_create_subgroup, resp_delete_group, resp_find_group_project)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Group
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_group = _dummy
+ resp_get_missing_group = _dummy
+ resp_create_group = _dummy
+ resp_create_subgroup = _dummy
+ resp_delete_group = _dummy
+ resp_find_group_project = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabGroup(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabGroup, self).setUp()
+
+ self.moduleUtil = GitLabGroup(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_group)
+ def test_exist_group(self):
+ rvalue = self.moduleUtil.exists_group(1)
+
+ self.assertEqual(rvalue, True)
+
+ @with_httmock(resp_get_missing_group)
+ def test_exist_group(self):
+ rvalue = self.moduleUtil.exists_group(1)
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_create_group)
+ def test_create_group(self):
+ group = self.moduleUtil.create_group({'name': "Foobar Group",
+ 'path': "foo-bar",
+ 'description': "An interesting group",
+ 'project_creation_level': "developer",
+ 'subgroup_creation_level': "maintainer",
+ 'require_two_factor_authentication': True,
+ })
+
+ self.assertEqual(type(group), Group)
+ self.assertEqual(group.name, "Foobar Group")
+ self.assertEqual(group.path, "foo-bar")
+ self.assertEqual(group.description, "An interesting group")
+ self.assertEqual(group.project_creation_level, "developer")
+ self.assertEqual(group.subgroup_creation_level, "maintainer")
+ self.assertEqual(group.require_two_factor_authentication, True)
+ self.assertEqual(group.id, 1)
+
+ @with_httmock(resp_create_subgroup)
+ def test_create_subgroup(self):
+ group = self.moduleUtil.create_group({'name': "BarFoo Group",
+ 'path': "bar-foo",
+ 'parent_id': 1,
+ 'project_creation_level': "noone",
+ 'require_two_factor_authentication': True,
+ })
+
+ self.assertEqual(type(group), Group)
+ self.assertEqual(group.name, "BarFoo Group")
+ self.assertEqual(group.full_path, "foo-bar/bar-foo")
+ self.assertEqual(group.project_creation_level, "noone")
+ self.assertEqual(group.require_two_factor_authentication, True)
+ self.assertEqual(group.id, 2)
+ self.assertEqual(group.parent_id, 1)
+
+ @with_httmock(resp_get_group)
+ def test_update_group(self):
+ group = self.gitlab_instance.groups.get(1)
+ changed, newGroup = self.moduleUtil.update_group(group, {'name': "BarFoo Group",
+ 'visibility': "private",
+ 'project_creation_level': "maintainer",
+ 'require_two_factor_authentication': True,
+ })
+
+ self.assertEqual(changed, True)
+ self.assertEqual(newGroup.name, "BarFoo Group")
+ self.assertEqual(newGroup.visibility, "private")
+ self.assertEqual(newGroup.project_creation_level, "maintainer")
+ self.assertEqual(newGroup.require_two_factor_authentication, True)
+
+ changed, newGroup = self.moduleUtil.update_group(group, {'name': "BarFoo Group"})
+
+ self.assertEqual(changed, False)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_find_group_project)
+ @with_httmock(resp_delete_group)
+ def test_delete_group(self):
+ self.moduleUtil.exists_group(1)
+
+ print(self.moduleUtil.group_object.projects)
+
+ rvalue = self.moduleUtil.delete_group()
+
+ self.assertEqual(rvalue, None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_hook.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_hook.py
new file mode 100644
index 000000000..b9c72e19e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_hook.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.gitlab_hook import GitLabHook
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_project, resp_find_project_hook,
+ resp_create_project_hook, resp_delete_project_hook)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import ProjectHook
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_project = _dummy
+ resp_find_project_hook = _dummy
+ resp_create_project_hook = _dummy
+ resp_delete_project_hook = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabHook(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabHook, self).setUp()
+
+ self.moduleUtil = GitLabHook(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_hook)
+ def test_hook_exist(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ rvalue = self.moduleUtil.exists_hook(project, "http://example.com/hook")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.exists_hook(project, "http://gitlab.com/hook")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_create_project_hook)
+ def test_create_hook(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ hook = self.moduleUtil.create_hook(project, {"url": "http://example.com/hook"})
+
+ self.assertEqual(type(hook), ProjectHook)
+ self.assertEqual(hook.url, "http://example.com/hook")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_hook)
+ def test_update_hook(self):
+ project = self.gitlab_instance.projects.get(1)
+ hook = self.moduleUtil.find_hook(project, "http://example.com/hook")
+
+ changed, newHook = self.moduleUtil.update_hook(hook, {"url": "http://gitlab.com/hook"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newHook), ProjectHook)
+ self.assertEqual(newHook.url, "http://gitlab.com/hook")
+
+ changed, newHook = self.moduleUtil.update_hook(hook, {"url": "http://gitlab.com/hook"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newHook.url, "http://gitlab.com/hook")
+
+ @with_httmock(resp_get_project)
+ @with_httmock(resp_find_project_hook)
+ @with_httmock(resp_delete_project_hook)
+ def test_delete_hook(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ self.moduleUtil.exists_hook(project, "http://example.com/hook")
+
+ rvalue = self.moduleUtil.delete_hook()
+
+ self.assertEqual(rvalue, None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_project.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_project.py
new file mode 100644
index 000000000..397f79bcb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_project.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.gitlab_project import GitLabProject
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_get_group, resp_get_project_by_name, resp_create_project,
+ resp_get_project, resp_delete_project, resp_get_user)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Project
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_get_group = _dummy
+ resp_get_project_by_name = _dummy
+ resp_create_project = _dummy
+ resp_get_project = _dummy
+ resp_delete_project = _dummy
+ resp_get_user = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabProject(GitlabModuleTestCase):
+ @with_httmock(resp_get_user)
+ def setUp(self):
+ super(TestGitlabProject, self).setUp()
+
+ self.gitlab_instance.user = self.gitlab_instance.users.get(1)
+ self.moduleUtil = GitLabProject(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_project_by_name)
+ def test_project_exist(self):
+ group = self.gitlab_instance.groups.get(1)
+
+ rvalue = self.moduleUtil.exists_project(group, "diaspora-client")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.exists_project(group, "missing-project")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_create_project)
+ def test_create_project(self):
+ group = self.gitlab_instance.groups.get(1)
+ project = self.moduleUtil.create_project(group, {"name": "Diaspora Client", "path": "diaspora-client", "namespace_id": group.id})
+
+ self.assertEqual(type(project), Project)
+ self.assertEqual(project.name, "Diaspora Client")
+
+ @with_httmock(resp_get_project)
+ def test_update_project(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ changed, newProject = self.moduleUtil.update_project(project, {"name": "New Name"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newProject), Project)
+ self.assertEqual(newProject.name, "New Name")
+
+ changed, newProject = self.moduleUtil.update_project(project, {"name": "New Name"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newProject.name, "New Name")
+
+ @with_httmock(resp_get_project)
+ def test_update_project_merge_method(self):
+ project = self.gitlab_instance.projects.get(1)
+
+ # merge_method should be 'merge' by default
+ self.assertEqual(project.merge_method, "merge")
+
+ changed, newProject = self.moduleUtil.update_project(project, {"name": "New Name", "merge_method": "rebase_merge"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newProject), Project)
+ self.assertEqual(newProject.name, "New Name")
+ self.assertEqual(newProject.merge_method, "rebase_merge")
+
+ changed, newProject = self.moduleUtil.update_project(project, {"name": "New Name", "merge_method": "rebase_merge"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newProject.name, "New Name")
+ self.assertEqual(newProject.merge_method, "rebase_merge")
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_project_by_name)
+ @with_httmock(resp_delete_project)
+ def test_delete_project(self):
+ group = self.gitlab_instance.groups.get(1)
+
+ self.moduleUtil.exists_project(group, "diaspora-client")
+
+ rvalue = self.moduleUtil.delete_project()
+
+ self.assertEqual(rvalue, None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_protected_branch.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_protected_branch.py
new file mode 100644
index 000000000..1162d8a35
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_protected_branch.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.general.plugins.modules.gitlab_protected_branch import GitlabProtectedBranch
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement, python_gitlab_module_version,
+ python_gitlab_version_match_requirement,
+ resp_get_protected_branch, resp_get_project_by_name,
+ resp_get_protected_branch_not_exist,
+ resp_delete_protected_branch, resp_get_user)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Project # noqa: F401, pylint: disable=unused-import
+ gitlab_req_version = python_gitlab_version_match_requirement()
+ gitlab_module_version = python_gitlab_module_version()
+ if LooseVersion(gitlab_module_version) < LooseVersion(gitlab_req_version):
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing (Wrong version)"))
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabProtectedBranch(GitlabModuleTestCase):
+ @with_httmock(resp_get_project_by_name)
+ @with_httmock(resp_get_user)
+ def setUp(self):
+ super(TestGitlabProtectedBranch, self).setUp()
+
+ self.gitlab_instance.user = self.gitlab_instance.users.get(1)
+ self.moduleUtil = GitlabProtectedBranch(module=self.mock_module, project="foo-bar/diaspora-client", gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_get_protected_branch)
+ def test_protected_branch_exist(self):
+ rvalue = self.moduleUtil.protected_branch_exist(name="master")
+ self.assertEqual(rvalue.name, "master")
+
+ @with_httmock(resp_get_protected_branch_not_exist)
+ def test_protected_branch_exist_not_exist(self):
+ rvalue = self.moduleUtil.protected_branch_exist(name="master")
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_protected_branch)
+ def test_compare_protected_branch(self):
+ rvalue = self.moduleUtil.compare_protected_branch(name="master", merge_access_levels="maintainer", push_access_level="maintainer")
+ self.assertEqual(rvalue, True)
+
+ @with_httmock(resp_get_protected_branch)
+ def test_compare_protected_branch_different_settings(self):
+ rvalue = self.moduleUtil.compare_protected_branch(name="master", merge_access_levels="developer", push_access_level="maintainer")
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_get_protected_branch)
+ @with_httmock(resp_delete_protected_branch)
+ def test_delete_protected_branch(self):
+ rvalue = self.moduleUtil.delete_protected_branch(name="master")
+ self.assertEqual(rvalue, None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_runner.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_runner.py
new file mode 100644
index 000000000..987659e9c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_runner.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+import gitlab
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.gitlab_runner import GitLabRunner
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (FakeAnsibleModule,
+ GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_find_runners_all, resp_find_runners_list,
+ resp_find_project_runners, resp_find_group_runners,
+ resp_get_runner,
+ resp_create_runner, resp_delete_runner,
+ resp_get_project_by_name, resp_get_group_by_name)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import Runner
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_find_runners_list = _dummy
+ resp_get_runner = _dummy
+ resp_create_runner = _dummy
+ resp_delete_runner = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabRunner(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabRunner, self).setUp()
+
+ self.module_util_all = GitLabRunner(module=FakeAnsibleModule({"owned": False}), gitlab_instance=self.gitlab_instance)
+ self.module_util_owned = GitLabRunner(module=FakeAnsibleModule({"owned": True}), gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_find_runners_all)
+ @with_httmock(resp_get_runner)
+ def test_runner_exist_all(self):
+ rvalue = self.module_util_all.exists_runner("test-1-20150125")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.module_util_all.exists_runner("test-3-00000000")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_find_runners_list)
+ @with_httmock(resp_get_runner)
+ def test_runner_exist_owned(self):
+ rvalue = self.module_util_owned.exists_runner("test-1-20201214")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.module_util_owned.exists_runner("test-3-00000000")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_find_project_runners)
+ @with_httmock(resp_get_runner)
+ @with_httmock(resp_get_project_by_name)
+ def test_project_runner_exist(self):
+ gitlab_project = self.gitlab_instance.projects.get('foo-bar/diaspora-client')
+ module_util = GitLabRunner(module=FakeAnsibleModule(), gitlab_instance=self.gitlab_instance, project=gitlab_project)
+
+ rvalue = module_util.exists_runner("test-1-20220210")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = module_util.exists_runner("test-3-00000000")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_find_group_runners)
+ @with_httmock(resp_get_group_by_name)
+ @with_httmock(resp_get_runner)
+ @pytest.mark.skipif(gitlab.__version__ < "2.3.0", reason="require python-gitlab >= 2.3.0")
+ def test_group_runner_exist(self):
+ gitlab_group = self.gitlab_instance.groups.get('foo-bar')
+ module_util = GitLabRunner(module=FakeAnsibleModule(), gitlab_instance=self.gitlab_instance, group=gitlab_group)
+
+ rvalue = module_util.exists_runner("test-3-20220210")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = module_util.exists_runner("test-3-00000000")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_create_runner)
+ def test_create_runner(self):
+ runner = self.module_util_all.create_runner({"token": "token", "description": "test-1-20150125"})
+
+ self.assertEqual(type(runner), Runner)
+ self.assertEqual(runner.description, "test-1-20150125")
+
+ @with_httmock(resp_find_runners_all)
+ @with_httmock(resp_get_runner)
+ def test_update_runner(self):
+ runner = self.module_util_all.find_runner("test-1-20150125")
+
+ changed, newRunner = self.module_util_all.update_runner(runner, {"description": "Runner description"})
+
+ self.assertEqual(changed, True)
+ self.assertEqual(type(newRunner), Runner)
+ self.assertEqual(newRunner.description, "Runner description")
+
+ changed, newRunner = self.module_util_all.update_runner(runner, {"description": "Runner description"})
+
+ self.assertEqual(changed, False)
+ self.assertEqual(newRunner.description, "Runner description")
+
+ @with_httmock(resp_find_runners_all)
+ @with_httmock(resp_get_runner)
+ @with_httmock(resp_delete_runner)
+ def test_delete_runner(self):
+ self.module_util_all.exists_runner("test-1-20150125")
+
+ rvalue = self.module_util_all.delete_runner()
+
+ self.assertEqual(rvalue, None)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_user.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_user.py
new file mode 100644
index 000000000..6dd2fce1d
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_gitlab_user.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.gitlab_user import GitLabUser
+
+
+def _dummy(x):
+ """Dummy function. Only used as a placeholder for toplevel definitions when the test is going
+ to be skipped anyway"""
+ return x
+
+
+pytestmark = []
+try:
+ from .gitlab import (GitlabModuleTestCase,
+ python_version_match_requirement,
+ resp_find_user, resp_get_user, resp_get_user_keys,
+ resp_create_user_keys, resp_create_user, resp_delete_user,
+ resp_get_member, resp_get_group, resp_add_member,
+ resp_update_member, resp_get_member)
+
+ # GitLab module requirements
+ if python_version_match_requirement():
+ from gitlab.v4.objects import User
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load gitlab module required for testing"))
+ # Need to set these to something so that we don't fail when parsing
+ GitlabModuleTestCase = object
+ resp_find_user = _dummy
+ resp_get_user = _dummy
+ resp_get_user_keys = _dummy
+ resp_create_user_keys = _dummy
+ resp_create_user = _dummy
+ resp_delete_user = _dummy
+ resp_get_member = _dummy
+ resp_get_group = _dummy
+ resp_add_member = _dummy
+ resp_update_member = _dummy
+ resp_get_member = _dummy
+
+# Unit tests requirements
+try:
+ from httmock import with_httmock # noqa
+except ImportError:
+ pytestmark.append(pytest.mark.skip("Could not load httmock module required for testing"))
+ with_httmock = _dummy
+
+
+class TestGitlabUser(GitlabModuleTestCase):
+ def setUp(self):
+ super(TestGitlabUser, self).setUp()
+
+ self.moduleUtil = GitLabUser(module=self.mock_module, gitlab_instance=self.gitlab_instance)
+
+ @with_httmock(resp_find_user)
+ def test_exist_user(self):
+ rvalue = self.moduleUtil.exists_user("john_smith")
+
+ self.assertEqual(rvalue, True)
+
+ rvalue = self.moduleUtil.exists_user("paul_smith")
+
+ self.assertEqual(rvalue, False)
+
+ @with_httmock(resp_find_user)
+ def test_find_user(self):
+ user = self.moduleUtil.find_user("john_smith")
+
+ self.assertEqual(type(user), User)
+ self.assertEqual(user.name, "John Smith")
+ self.assertEqual(user.id, 1)
+
+ @with_httmock(resp_create_user)
+ def test_create_user(self):
+ user = self.moduleUtil.create_user({'email': 'john@example.com', 'password': 's3cur3s3cr3T',
+ 'username': 'john_smith', 'name': 'John Smith'})
+ self.assertEqual(type(user), User)
+ self.assertEqual(user.name, "John Smith")
+ self.assertEqual(user.id, 1)
+
+ @with_httmock(resp_get_user)
+ def test_update_user(self):
+ user = self.gitlab_instance.users.get(1)
+
+ changed, newUser = self.moduleUtil.update_user(
+ user,
+ {'name': {'value': "Jack Smith"}, "is_admin": {'value': "true", 'setter': 'admin'}}, {}
+ )
+
+ self.assertEqual(changed, True)
+ self.assertEqual(newUser.name, "Jack Smith")
+ self.assertEqual(newUser.admin, "true")
+
+ changed, newUser = self.moduleUtil.update_user(user, {'name': {'value': "Jack Smith"}}, {})
+
+ self.assertEqual(changed, False)
+
+ changed, newUser = self.moduleUtil.update_user(
+ user,
+ {}, {
+ 'skip_reconfirmation': {'value': True},
+ 'password': {'value': 'super_secret-super_secret'},
+ }
+ )
+
+ # note: uncheckable parameters dont set changed state
+ self.assertEqual(changed, False)
+ self.assertEqual(newUser.skip_reconfirmation, True)
+ self.assertEqual(newUser.password, 'super_secret-super_secret')
+
+ @with_httmock(resp_find_user)
+ @with_httmock(resp_delete_user)
+ def test_delete_user(self):
+ self.moduleUtil.exists_user("john_smith")
+ rvalue = self.moduleUtil.delete_user()
+
+ self.assertEqual(rvalue, None)
+
+ @with_httmock(resp_get_user)
+ @with_httmock(resp_get_user_keys)
+ def test_sshkey_exist(self):
+ user = self.gitlab_instance.users.get(1)
+
+ exist = self.moduleUtil.ssh_key_exists(user, "Public key")
+ self.assertEqual(exist, True)
+
+ notExist = self.moduleUtil.ssh_key_exists(user, "Private key")
+ self.assertEqual(notExist, False)
+
+ @with_httmock(resp_get_user)
+ @with_httmock(resp_create_user_keys)
+ @with_httmock(resp_get_user_keys)
+ def test_create_sshkey(self):
+ user = self.gitlab_instance.users.get(1)
+
+ rvalue = self.moduleUtil.add_ssh_key_to_user(user, {
+ 'name': "Public key",
+ 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJe"
+ "jgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9wa++Oi7Qkr8prgHc4"
+ "soW6NUlfDzpvZK2H5E7eQaSeP3SAwGmQKUFHCddNaP0L+hM7zhFNzjFvpaMgJw0=",
+ 'expires_at': ""})
+ self.assertEqual(rvalue, False)
+
+ rvalue = self.moduleUtil.add_ssh_key_to_user(user, {
+ 'name': "Private key",
+ 'file': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDA1YotVDm2mAyk2tPt4E7AHm01sS6JZmcU"
+ "dRuSuA5zszUJzYPPUSRAX3BCgTqLqYx//UuVncK7YqLVSbbwjKR2Ez5lISgCnVfLVEXzwhv+"
+ "xawxKWmI7hJ5S0tOv6MJ+IxyTa4xcKwJTwB86z22n9fVOQeJTR2dSOH1WJrf0PvRk+KVNY2j"
+ "TiGHTi9AIjLnyD/jWRpOgtdfkLRc8EzAWrWlgNmH2WOKBw6za0az6XoG75obUdFVdW3qcD0x"
+ "c809OHLi7FDf+E7U4wiZJCFuUizMeXyuK/SkaE1aee4Qp5R4dxTR4TP9M1XAYkf+kF0W9srZ+mhF069XD/zhUPJsvwEF",
+ 'expires_at': "2027-01-01"})
+ self.assertEqual(rvalue, True)
+
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_member)
+ def test_find_member(self):
+ group = self.gitlab_instance.groups.get(1)
+
+ user = self.moduleUtil.find_member(group, 1)
+ self.assertEqual(user.username, "raymond_smith")
+
+ @with_httmock(resp_get_user)
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_group)
+ @with_httmock(resp_get_member)
+ @with_httmock(resp_add_member)
+ @with_httmock(resp_update_member)
+ def test_assign_user_to_group(self):
+ group = self.gitlab_instance.groups.get(1)
+ user = self.gitlab_instance.users.get(1)
+
+ rvalue = self.moduleUtil.assign_user_to_group(user, group.id, "developer")
+ self.assertEqual(rvalue, False)
+
+ rvalue = self.moduleUtil.assign_user_to_group(user, group.id, "guest")
+ self.assertEqual(rvalue, True)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_hana_query.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_hana_query.py
new file mode 100644
index 000000000..db06e4cef
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_hana_query.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Rainer Leber (@rainerleber) <rainerleber@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import hana_query
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ ModuleTestCase,
+ set_module_args,
+)
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+
+
+def get_bin_path(*args, **kwargs):
+ """Function to return path of hdbsql"""
+ return "/usr/sap/HDB/HDB01/exe/hdbsql"
+
+
+class Testhana_query(ModuleTestCase):
+ """Main class for testing hana_query module."""
+
+ def setUp(self):
+ """Setup."""
+ super(Testhana_query, self).setUp()
+ self.module = hana_query
+ self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
+ self.mock_get_bin_path.start()
+ self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
+
+ def tearDown(self):
+ """Teardown."""
+ super(Testhana_query, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing."""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_hana_query(self):
+ """Check that result is processed."""
+ set_module_args({
+ 'sid': "HDB",
+ 'instance': "01",
+ 'encrypted': False,
+ 'host': "localhost",
+ 'user': "SYSTEM",
+ 'password': "1234Qwer",
+ 'database': "HDB",
+ 'query': "SELECT * FROM users;"
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, 'username,name\n testuser,test user \n myuser, my user \n', ''
+ with self.assertRaises(AnsibleExitJson) as result:
+ hana_query.main()
+ self.assertEqual(result.exception.args[0]['query_result'], [[
+ {'username': 'testuser', 'name': 'test user'},
+ {'username': 'myuser', 'name': 'my user'},
+ ]])
+ self.assertEqual(run_command.call_count, 1)
+
+ def test_hana_userstore_query(self):
+ """Check that result is processed with userstore."""
+ set_module_args({
+ 'sid': "HDB",
+ 'instance': "01",
+ 'encrypted': False,
+ 'host': "localhost",
+ 'user': "SYSTEM",
+ 'userstore': True,
+ 'database': "HDB",
+ 'query': "SELECT * FROM users;"
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, 'username,name\n testuser,test user \n myuser, my user \n', ''
+ with self.assertRaises(AnsibleExitJson) as result:
+ hana_query.main()
+ self.assertEqual(result.exception.args[0]['query_result'], [[
+ {'username': 'testuser', 'name': 'test user'},
+ {'username': 'myuser', 'name': 'my user'},
+ ]])
+ self.assertEqual(run_command.call_count, 1)
+
+ def test_hana_failed_no_passwd(self):
+ """Check that result is failed with no password."""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'sid': "HDB",
+ 'instance': "01",
+ 'encrypted': False,
+ 'host': "localhost",
+ 'user': "SYSTEM",
+ 'database': "HDB",
+ 'query': "SELECT * FROM users;"
+ })
+ self.module.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew.py
new file mode 100644
index 000000000..f849b433d
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew.py
@@ -0,0 +1,24 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.homebrew import Homebrew
+
+
+class TestHomebrewModule(unittest.TestCase):
+
+ def setUp(self):
+ self.brew_app_names = [
+ 'git-ssh',
+ 'awscli@1',
+ 'bash'
+ ]
+
+ def test_valid_package_names(self):
+ for name in self.brew_app_names:
+ self.assertTrue(Homebrew.valid_package(name))
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew_cask.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew_cask.py
new file mode 100644
index 000000000..6fcc06d97
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_homebrew_cask.py
@@ -0,0 +1,23 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.homebrew_cask import HomebrewCask
+
+
+class TestHomebrewCaskModule(unittest.TestCase):
+
+ def setUp(self):
+ self.brew_cask_names = [
+ 'visual-studio-code',
+ 'firefox'
+ ]
+
+ def test_valid_cask_names(self):
+ for name in self.brew_cask_names:
+ self.assertTrue(HomebrewCask.valid_cask(name))
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_icinga2_feature.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_icinga2_feature.py
new file mode 100644
index 000000000..23c94fad5
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_icinga2_feature.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import icinga2_feature
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+
+
+def get_bin_path(*args, **kwargs):
+ """Function to return path of icinga2 binary."""
+ return "/bin/icinga2"
+
+
+class TestIcinga2Feature(ModuleTestCase):
+ """Main class for testing icinga2_feature module."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestIcinga2Feature, self).setUp()
+ self.module = icinga2_feature
+ self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
+ self.mock_get_bin_path.start()
+ self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestIcinga2Feature, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing."""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_enable_feature(self):
+ """Check that result is changed."""
+ set_module_args({
+ 'name': 'api',
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ self.assertEqual(run_command.call_args[0][0][-1], 'api')
+
+ def test_enable_feature_with_check_mode(self):
+ """Check that result is changed in check mode."""
+ set_module_args({
+ 'name': 'api',
+ '_ansible_check_mode': True,
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
+
+ def test_disable_feature(self):
+ """Check that result is changed."""
+ set_module_args({
+ 'name': 'api',
+ 'state': 'absent'
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 2)
+ self.assertEqual(run_command.call_args[0][0][-1], 'api')
+
+ def test_disable_feature_with_check_mode(self):
+ """Check that result is changed in check mode."""
+ set_module_args({
+ 'name': 'api',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ icinga2_feature.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ self.assertEqual(run_command.call_count, 1)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otpconfig.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otpconfig.py
new file mode 100644
index 000000000..718359a30
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otpconfig.py
@@ -0,0 +1,407 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import ipa_otpconfig
+
+
+@contextmanager
+def patch_ipa(**kwargs):
+ """Mock context manager for patching the methods in OTPConfigIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+ obj = ipa_otpconfig.OTPConfigIPAClient
+ with patch.object(obj, 'login') as mock_login:
+ with patch.object(obj, '_post_json', **kwargs) as mock_post:
+ yield mock_login, mock_post
+
+
+class TestIPAOTPConfig(ModuleTestCase):
+ def setUp(self):
+ super(TestIPAOTPConfig, self).setUp()
+ self.module = ipa_otpconfig
+
+ def _test_base(self, module_args, return_value, mock_calls, changed):
+ """Base function that's called by all the other test functions
+
+ module_args (dict):
+ Arguments passed to the module
+
+ return_value (dict):
+ Mocked return value of OTPConfigIPAClient.otpconfig_show, as returned by the IPA API.
+ This should be set to the current state. It will be changed to the desired state using the above arguments.
+ (Technically, this is the return value of _post_json, but it's only checked by otpconfig_show).
+
+ mock_calls (list/tuple of dicts):
+ List of calls made to OTPConfigIPAClient._post_json, in order.
+ _post_json is called by all of the otpconfig_* methods of the class.
+ Pass an empty list if no calls are expected.
+
+ changed (bool):
+ Whether or not the module is supposed to be marked as changed
+ """
+ set_module_args(module_args)
+
+ # Run the module
+ with patch_ipa(return_value=return_value) as (mock_login, mock_post):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify that the calls to _post_json match what is expected
+ expected_call_count = len(mock_calls)
+ if expected_call_count > 1:
+ # Convert the call dicts to unittest.mock.call instances because `assert_has_calls` only accepts them
+ converted_calls = []
+ for call_dict in mock_calls:
+ converted_calls.append(call(**call_dict))
+
+ mock_post.assert_has_calls(converted_calls)
+ self.assertEqual(len(mock_post.mock_calls), expected_call_count)
+ elif expected_call_count == 1:
+ mock_post.assert_called_once_with(**mock_calls[0])
+ else: # expected_call_count is 0
+ mock_post.assert_not_called()
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_set_all_no_adjustment(self):
+ """Set values requiring no adjustment"""
+ module_args = {
+ 'ipatokentotpauthwindow': 11,
+ 'ipatokentotpsyncwindow': 12,
+ 'ipatokenhotpauthwindow': 13,
+ 'ipatokenhotpsyncwindow': 14
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_all_aliases_no_adjustment(self):
+ """Set values requiring no adjustment on all using aliases values"""
+ module_args = {
+ 'totpauthwindow': 11,
+ 'totpsyncwindow': 12,
+ 'hotpauthwindow': 13,
+ 'hotpsyncwindow': 14
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_totp_auth_window_no_adjustment(self):
+ """Set values requiring no adjustment on totpauthwindow"""
+ module_args = {
+ 'totpauthwindow': 11
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_totp_sync_window_no_adjustment(self):
+ """Set values requiring no adjustment on totpsyncwindow"""
+ module_args = {
+ 'totpsyncwindow': 12
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_hotp_auth_window_no_adjustment(self):
+ """Set values requiring no adjustment on hotpauthwindow"""
+ module_args = {
+ 'hotpauthwindow': 13
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_hotp_sync_window_no_adjustment(self):
+ """Set values requiring no adjustment on hotpsyncwindow"""
+ module_args = {
+ 'hotpsyncwindow': 14
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_totp_auth_window(self):
+ """Set values requiring adjustment on totpauthwindow"""
+ module_args = {
+ 'totpauthwindow': 10
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_mod',
+ 'name': None,
+ 'item': {'ipatokentotpauthwindow': '10'}
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_totp_sync_window(self):
+ """Set values requiring adjustment on totpsyncwindow"""
+ module_args = {
+ 'totpsyncwindow': 10
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_mod',
+ 'name': None,
+ 'item': {'ipatokentotpsyncwindow': '10'}
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_hotp_auth_window(self):
+ """Set values requiring adjustment on hotpauthwindow"""
+ module_args = {
+ 'hotpauthwindow': 10
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_mod',
+ 'name': None,
+ 'item': {'ipatokenhotpauthwindow': '10'}
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_hotp_sync_window(self):
+ """Set values requiring adjustment on hotpsyncwindow"""
+ module_args = {
+ 'hotpsyncwindow': 10
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['11'],
+ 'ipatokentotpsyncwindow': ['12'],
+ 'ipatokenhotpauthwindow': ['13'],
+ 'ipatokenhotpsyncwindow': ['14']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_mod',
+ 'name': None,
+ 'item': {'ipatokenhotpsyncwindow': '10'}
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_set_all(self):
+ """Set values requiring adjustment on all"""
+ module_args = {
+ 'ipatokentotpauthwindow': 11,
+ 'ipatokentotpsyncwindow': 12,
+ 'ipatokenhotpauthwindow': 13,
+ 'ipatokenhotpsyncwindow': 14
+ }
+ return_value = {
+ 'ipatokentotpauthwindow': ['1'],
+ 'ipatokentotpsyncwindow': ['2'],
+ 'ipatokenhotpauthwindow': ['3'],
+ 'ipatokenhotpsyncwindow': ['4']}
+ mock_calls = (
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ },
+ {
+ 'method': 'otpconfig_mod',
+ 'name': None,
+ 'item': {'ipatokentotpauthwindow': '11',
+ 'ipatokentotpsyncwindow': '12',
+ 'ipatokenhotpauthwindow': '13',
+ 'ipatokenhotpsyncwindow': '14'}
+ },
+ {
+ 'method': 'otpconfig_show',
+ 'name': None
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_fail_post(self):
+ """Fail due to an exception raised from _post_json"""
+ set_module_args({
+ 'ipatokentotpauthwindow': 11,
+ 'ipatokentotpsyncwindow': 12,
+ 'ipatokenhotpauthwindow': 13,
+ 'ipatokenhotpsyncwindow': 14
+ })
+
+ with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otptoken.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otptoken.py
new file mode 100644
index 000000000..c06e19c3b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_otptoken.py
@@ -0,0 +1,496 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import ipa_otptoken
+
+
+@contextmanager
+def patch_ipa(**kwargs):
+ """Mock context manager for patching the methods in OTPTokenIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+ obj = ipa_otptoken.OTPTokenIPAClient
+ with patch.object(obj, 'login') as mock_login:
+ with patch.object(obj, '_post_json', **kwargs) as mock_post:
+ yield mock_login, mock_post
+
+
+class TestIPAOTPToken(ModuleTestCase):
+ def setUp(self):
+ super(TestIPAOTPToken, self).setUp()
+ self.module = ipa_otptoken
+
+ def _test_base(self, module_args, return_value, mock_calls, changed):
+ """Base function that's called by all the other test functions
+
+ module_args (dict):
+ Arguments passed to the module
+
+ return_value (dict):
+ Mocked return value of OTPTokenIPAClient.otptoken_show, as returned by the IPA API.
+ This should be set to the current state. It will be changed to the desired state using the above arguments.
+ (Technically, this is the return value of _post_json, but it's only checked by otptoken_show).
+
+ mock_calls (list/tuple of dicts):
+ List of calls made to OTPTokenIPAClient._post_json, in order.
+ _post_json is called by all of the otptoken_* methods of the class.
+ Pass an empty list if no calls are expected.
+
+ changed (bool):
+ Whether or not the module is supposed to be marked as changed
+ """
+ set_module_args(module_args)
+
+ # Run the module
+ with patch_ipa(return_value=return_value) as (mock_login, mock_post):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify that the calls to _post_json match what is expected
+ expected_call_count = len(mock_calls)
+ if expected_call_count > 1:
+ # Convert the call dicts to unittest.mock.call instances because `assert_has_calls` only accepts them
+ converted_calls = []
+ for call_dict in mock_calls:
+ converted_calls.append(call(**call_dict))
+
+ mock_post.assert_has_calls(converted_calls)
+ self.assertEqual(len(mock_post.mock_calls), expected_call_count)
+ elif expected_call_count == 1:
+ mock_post.assert_called_once_with(**mock_calls[0])
+ else: # expected_call_count is 0
+ mock_post.assert_not_called()
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_add_new_all_default(self):
+ """Add a new OTP with all default values"""
+ module_args = {
+ 'uniqueid': 'NewToken1'
+ }
+ return_value = {}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_add',
+ 'name': 'NewToken1',
+ 'item': {'ipatokendisabled': 'FALSE',
+ 'all': True}
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_add_new_all_default_with_aliases(self):
+ """Add a new OTP with all default values using alias values"""
+ module_args = {
+ 'name': 'NewToken1'
+ }
+ return_value = {}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_add',
+ 'name': 'NewToken1',
+ 'item': {'ipatokendisabled': 'FALSE',
+ 'all': True}
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_add_new_all_specified(self):
+ """Add a new OTP with all default values"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'otptype': 'hotp',
+ 'secretkey': 'VGVzdFNlY3JldDE=',
+ 'description': 'Test description',
+ 'owner': 'pinky',
+ 'enabled': True,
+ 'notbefore': '20200101010101',
+ 'notafter': '20900101010101',
+ 'vendor': 'Acme',
+ 'model': 'ModelT',
+ 'serial': 'Number1',
+ 'state': 'present',
+ 'algorithm': 'sha256',
+ 'digits': 6,
+ 'offset': 10,
+ 'interval': 30,
+ 'counter': 30,
+ }
+ return_value = {}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_add',
+ 'name': 'NewToken1',
+ 'item': {'type': 'HOTP',
+ 'ipatokenotpkey': 'KRSXG5CTMVRXEZLUGE======',
+ 'description': 'Test description',
+ 'ipatokenowner': 'pinky',
+ 'ipatokendisabled': 'FALSE',
+ 'ipatokennotbefore': '20200101010101Z',
+ 'ipatokennotafter': '20900101010101Z',
+ 'ipatokenvendor': 'Acme',
+ 'ipatokenmodel': 'ModelT',
+ 'ipatokenserial': 'Number1',
+ 'ipatokenotpalgorithm': 'sha256',
+ 'ipatokenotpdigits': '6',
+ 'ipatokentotpclockoffset': '10',
+ 'ipatokentotptimestep': '30',
+ 'ipatokenhotpcounter': '30',
+ 'all': True}
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_already_existing_no_change_all_specified(self):
+ """Add a new OTP with all values specified but needing no change"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'otptype': 'hotp',
+ 'secretkey': 'VGVzdFNlY3JldDE=',
+ 'description': 'Test description',
+ 'owner': 'pinky',
+ 'enabled': True,
+ 'notbefore': '20200101010101',
+ 'notafter': '20900101010101',
+ 'vendor': 'Acme',
+ 'model': 'ModelT',
+ 'serial': 'Number1',
+ 'state': 'present',
+ 'algorithm': 'sha256',
+ 'digits': 6,
+ 'offset': 10,
+ 'interval': 30,
+ 'counter': 30,
+ }
+ return_value = {'ipatokenuniqueid': 'NewToken1',
+ 'type': 'HOTP',
+ 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}],
+ 'description': ['Test description'],
+ 'ipatokenowner': ['pinky'],
+ 'ipatokendisabled': ['FALSE'],
+ 'ipatokennotbefore': ['20200101010101Z'],
+ 'ipatokennotafter': ['20900101010101Z'],
+ 'ipatokenvendor': ['Acme'],
+ 'ipatokenmodel': ['ModelT'],
+ 'ipatokenserial': ['Number1'],
+ 'ipatokenotpalgorithm': ['sha256'],
+ 'ipatokenotpdigits': ['6'],
+ 'ipatokentotpclockoffset': ['10'],
+ 'ipatokentotptimestep': ['30'],
+ 'ipatokenhotpcounter': ['30']}
+ mock_calls = [
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ }
+ ]
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_already_existing_one_change_all_specified(self):
+ """Modify an existing OTP with one value specified needing change"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'otptype': 'hotp',
+ 'secretkey': 'VGVzdFNlY3JldDE=',
+ 'description': 'Test description',
+ 'owner': 'brain',
+ 'enabled': True,
+ 'notbefore': '20200101010101',
+ 'notafter': '20900101010101',
+ 'vendor': 'Acme',
+ 'model': 'ModelT',
+ 'serial': 'Number1',
+ 'state': 'present',
+ 'algorithm': 'sha256',
+ 'digits': 6,
+ 'offset': 10,
+ 'interval': 30,
+ 'counter': 30,
+ }
+ return_value = {'ipatokenuniqueid': 'NewToken1',
+ 'type': 'HOTP',
+ 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}],
+ 'description': ['Test description'],
+ 'ipatokenowner': ['pinky'],
+ 'ipatokendisabled': ['FALSE'],
+ 'ipatokennotbefore': ['20200101010101Z'],
+ 'ipatokennotafter': ['20900101010101Z'],
+ 'ipatokenvendor': ['Acme'],
+ 'ipatokenmodel': ['ModelT'],
+ 'ipatokenserial': ['Number1'],
+ 'ipatokenotpalgorithm': ['sha256'],
+ 'ipatokenotpdigits': ['6'],
+ 'ipatokentotpclockoffset': ['10'],
+ 'ipatokentotptimestep': ['30'],
+ 'ipatokenhotpcounter': ['30']}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_mod',
+ 'name': 'NewToken1',
+ 'item': {'description': 'Test description',
+ 'ipatokenowner': 'brain',
+ 'ipatokendisabled': 'FALSE',
+ 'ipatokennotbefore': '20200101010101Z',
+ 'ipatokennotafter': '20900101010101Z',
+ 'ipatokenvendor': 'Acme',
+ 'ipatokenmodel': 'ModelT',
+ 'ipatokenserial': 'Number1',
+ 'all': True}
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_already_existing_all_valid_change_all_specified(self):
+ """Modify an existing OTP with all valid values specified needing change"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'otptype': 'hotp',
+ 'secretkey': 'VGVzdFNlY3JldDE=',
+ 'description': 'New Test description',
+ 'owner': 'pinky',
+ 'enabled': False,
+ 'notbefore': '20200101010102',
+ 'notafter': '20900101010102',
+ 'vendor': 'NewAcme',
+ 'model': 'NewModelT',
+ 'serial': 'Number2',
+ 'state': 'present',
+ 'algorithm': 'sha256',
+ 'digits': 6,
+ 'offset': 10,
+ 'interval': 30,
+ 'counter': 30,
+ }
+ return_value = {'ipatokenuniqueid': 'NewToken1',
+ 'type': 'HOTP',
+ 'ipatokenotpkey': [{'__base64__': 'VGVzdFNlY3JldDE='}],
+ 'description': ['Test description'],
+ 'ipatokenowner': ['pinky'],
+ 'ipatokendisabled': ['FALSE'],
+ 'ipatokennotbefore': ['20200101010101Z'],
+ 'ipatokennotafter': ['20900101010101Z'],
+ 'ipatokenvendor': ['Acme'],
+ 'ipatokenmodel': ['ModelT'],
+ 'ipatokenserial': ['Number1'],
+ 'ipatokenotpalgorithm': ['sha256'],
+ 'ipatokenotpdigits': ['6'],
+ 'ipatokentotpclockoffset': ['10'],
+ 'ipatokentotptimestep': ['30'],
+ 'ipatokenhotpcounter': ['30']}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_mod',
+ 'name': 'NewToken1',
+ 'item': {'description': 'New Test description',
+ 'ipatokenowner': 'pinky',
+ 'ipatokendisabled': 'TRUE',
+ 'ipatokennotbefore': '20200101010102Z',
+ 'ipatokennotafter': '20900101010102Z',
+ 'ipatokenvendor': 'NewAcme',
+ 'ipatokenmodel': 'NewModelT',
+ 'ipatokenserial': 'Number2',
+ 'all': True}
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_delete_existing_token(self):
+ """Delete an existing OTP"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'state': 'absent'
+ }
+ return_value = {'ipatokenuniqueid': 'NewToken1',
+ 'type': 'HOTP',
+ 'ipatokenotpkey': [{'__base64__': 'KRSXG5CTMVRXEZLUGE======'}],
+ 'description': ['Test description'],
+ 'ipatokenowner': ['pinky'],
+ 'ipatokendisabled': ['FALSE'],
+ 'ipatokennotbefore': ['20200101010101Z'],
+ 'ipatokennotafter': ['20900101010101Z'],
+ 'ipatokenvendor': ['Acme'],
+ 'ipatokenmodel': ['ModelT'],
+ 'ipatokenserial': ['Number1'],
+ 'ipatokenotpalgorithm': ['sha256'],
+ 'ipatokenotpdigits': ['6'],
+ 'ipatokentotpclockoffset': ['10'],
+ 'ipatokentotptimestep': ['30'],
+ 'ipatokenhotpcounter': ['30']}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_del',
+ 'name': 'NewToken1'
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_disable_existing_token(self):
+ """Disable an existing OTP"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'otptype': 'hotp',
+ 'enabled': False
+ }
+ return_value = {'ipatokenuniqueid': 'NewToken1',
+ 'type': 'HOTP',
+ 'ipatokenotpkey': [{'__base64__': 'KRSXG5CTMVRXEZLUGE======'}],
+ 'description': ['Test description'],
+ 'ipatokenowner': ['pinky'],
+ 'ipatokendisabled': ['FALSE'],
+ 'ipatokennotbefore': ['20200101010101Z'],
+ 'ipatokennotafter': ['20900101010101Z'],
+ 'ipatokenvendor': ['Acme'],
+ 'ipatokenmodel': ['ModelT'],
+ 'ipatokenserial': ['Number1'],
+ 'ipatokenotpalgorithm': ['sha256'],
+ 'ipatokenotpdigits': ['6'],
+ 'ipatokentotpclockoffset': ['10'],
+ 'ipatokentotptimestep': ['30'],
+ 'ipatokenhotpcounter': ['30']}
+ mock_calls = (
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ },
+ {
+ 'method': 'otptoken_mod',
+ 'name': 'NewToken1',
+ 'item': {'ipatokendisabled': 'TRUE',
+ 'all': True}
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_delete_not_existing_token(self):
+ """Delete a OTP that does not exist"""
+ module_args = {
+ 'uniqueid': 'NewToken1',
+ 'state': 'absent'
+ }
+ return_value = {}
+
+ mock_calls = [
+ {
+ 'method': 'otptoken_find',
+ 'name': None,
+ 'item': {'all': True,
+ 'ipatokenuniqueid': 'NewToken1',
+ 'timelimit': '0',
+ 'sizelimit': '0'}
+ }
+ ]
+
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_fail_post(self):
+ """Fail due to an exception raised from _post_json"""
+ set_module_args({
+ 'uniqueid': 'NewToken1'
+ })
+
+ with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_pwpolicy.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_pwpolicy.py
new file mode 100644
index 000000000..b45c566fc
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ipa_pwpolicy.py
@@ -0,0 +1,614 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import ipa_pwpolicy
+
+
+@contextmanager
+def patch_ipa(**kwargs):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+ obj = ipa_pwpolicy.PwPolicyIPAClient
+ with patch.object(obj, 'login') as mock_login:
+ with patch.object(obj, '_post_json', **kwargs) as mock_post:
+ yield mock_login, mock_post
+
+
+class TestIPAPwPolicy(ModuleTestCase):
+ def setUp(self):
+ super(TestIPAPwPolicy, self).setUp()
+ self.module = ipa_pwpolicy
+
+ def _test_base(self, module_args, return_value, mock_calls, changed):
+ """Base function that's called by all the other test functions
+
+ module_args (dict):
+ Arguments passed to the module
+
+ return_value (dict):
+ Mocked return value of PwPolicyIPAClient.pwpolicy_find, as returned by the IPA API.
+ This should be set to the current state. It will be changed to the desired state using the above arguments.
+ (Technically, this is the return value of _post_json, but it's only checked by pwpolicy_find).
+ An empty dict means that the policy doesn't exist.
+
+ mock_calls (list/tuple of dicts):
+ List of calls made to PwPolicyIPAClient._post_json, in order.
+ _post_json is called by all of the pwpolicy_* methods of the class.
+ Pass an empty list if no calls are expected.
+
+ changed (bool):
+ Whether or not the module is supposed to be marked as changed
+ """
+ set_module_args(module_args)
+
+ # Run the module
+ with patch_ipa(return_value=return_value) as (mock_login, mock_post):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify that the calls to _post_json match what is expected
+ expected_call_count = len(mock_calls)
+ if expected_call_count > 1:
+ # Convert the call dicts to unittest.mock.call instances because `assert_has_calls` only accepts them
+ converted_calls = []
+ for call_dict in mock_calls:
+ converted_calls.append(call(**call_dict))
+
+ mock_post.assert_has_calls(converted_calls)
+ self.assertEqual(len(mock_post.mock_calls), expected_call_count)
+ elif expected_call_count == 1:
+ mock_post.assert_called_once_with(**mock_calls[0])
+ else: # expected_call_count is 0
+ mock_post.assert_not_called()
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_add(self):
+ """Add a new policy"""
+ module_args = {
+ 'group': 'admins',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'minpwdlife': '1',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '16',
+ 'maxfailcount': '6',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {}
+ mock_calls = (
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'admins'
+ }
+ },
+ {
+ 'method': 'pwpolicy_add',
+ 'name': 'admins',
+ 'item': {
+ 'cospriority': '10',
+ 'krbmaxpwdlife': '90',
+ 'krbminpwdlife': '1',
+ 'krbpwdhistorylength': '8',
+ 'krbpwdmindiffchars': '3',
+ 'krbpwdminlength': '16',
+ 'krbpwdmaxfailure': '6',
+ 'krbpwdfailurecountinterval': '60',
+ 'krbpwdlockoutduration': '600'
+ }
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_aliases(self):
+ """Same as test_add, but uses the `name` alias for the `group` option"""
+ module_args = {
+ 'name': 'admins',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'minpwdlife': '1',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '16',
+ 'maxfailcount': '6',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {}
+ mock_calls = (
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'admins'
+ }
+ },
+ {
+ 'method': 'pwpolicy_add',
+ 'name': 'admins',
+ 'item': {
+ 'cospriority': '10',
+ 'krbmaxpwdlife': '90',
+ 'krbminpwdlife': '1',
+ 'krbpwdhistorylength': '8',
+ 'krbpwdmindiffchars': '3',
+ 'krbpwdminlength': '16',
+ 'krbpwdmaxfailure': '6',
+ 'krbpwdfailurecountinterval': '60',
+ 'krbpwdlockoutduration': '600'
+ }
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_mod_different_args(self):
+ """Policy exists, but some of the args are different and need to be modified"""
+ module_args = {
+ 'group': 'sysops',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '60',
+ 'minpwdlife': '24',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '12',
+ 'maxfailcount': '8',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {
+ 'cn': ['sysops'],
+ 'cospriority': ['10'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbminpwdlife': ['1'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdmindiffchars': ['3'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'krbpwdfailurecountinterval': ['60'],
+ 'krbpwdlockoutduration': ['600'],
+ 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = (
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'sysops'
+ }
+ },
+ {
+ 'method': 'pwpolicy_mod',
+ 'name': 'sysops',
+ 'item': {
+ 'cospriority': '10',
+ 'krbmaxpwdlife': '60',
+ 'krbminpwdlife': '24',
+ 'krbpwdhistorylength': '8',
+ 'krbpwdmindiffchars': '3',
+ 'krbpwdminlength': '12',
+ 'krbpwdmaxfailure': '8',
+ 'krbpwdfailurecountinterval': '60',
+ 'krbpwdlockoutduration': '600'
+ }
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_mod_missing_args(self):
+ """Policy exists, but some of the args aren't set, so need to be added"""
+ module_args = {
+ 'group': 'sysops',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'minpwdlife': '1',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '16',
+ 'maxfailcount': '6',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {
+ 'cn': ['sysops'],
+ 'cospriority': ['10'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = (
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'sysops'
+ }
+ },
+ {
+ 'method': 'pwpolicy_mod',
+ 'name': 'sysops',
+ 'item': {
+ 'cospriority': '10',
+ 'krbmaxpwdlife': '90',
+ 'krbminpwdlife': '1',
+ 'krbpwdhistorylength': '8',
+ 'krbpwdmindiffchars': '3',
+ 'krbpwdminlength': '16',
+ 'krbpwdmaxfailure': '6',
+ 'krbpwdfailurecountinterval': '60',
+ 'krbpwdlockoutduration': '600'
+ }
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_del(self):
+ """Policy exists, and state is absent. Needs to be deleted"""
+ module_args = {
+ 'group': 'sysops',
+ 'state': 'absent',
+ # other arguments are ignored when state is `absent`
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'historylength': '8',
+ 'minlength': '16',
+ 'maxfailcount': '6'
+ }
+ return_value = {
+ 'cn': ['sysops'],
+ 'cospriority': ['10'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = (
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'sysops'
+ }
+ },
+ {
+ 'method': 'pwpolicy_del',
+ 'name': 'sysops',
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_no_change(self):
+ """Policy already exists. No changes needed"""
+ module_args = {
+ 'group': 'admins',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'minpwdlife': '1',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '16',
+ 'maxfailcount': '6',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {
+ 'cn': ['admins'],
+ 'cospriority': ['10'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbminpwdlife': ['1'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdmindiffchars': ['3'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'krbpwdfailurecountinterval': ['60'],
+ 'krbpwdlockoutduration': ['600'],
+ 'dn': 'cn=admins,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = [
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'admins'
+ }
+ }
+ ]
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_del_no_change(self):
+ """Policy doesn't exist, and state is absent. No change needed"""
+ module_args = {
+ 'group': 'sysops',
+ 'state': 'absent',
+ # other arguments are ignored when state is `absent`
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'historylength': '8',
+ 'minlength': '16',
+ 'maxfailcount': '6'
+ }
+ return_value = {}
+ mock_calls = [
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'sysops'
+ }
+ }
+ ]
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_global(self):
+ """Modify the global policy"""
+ module_args = {
+ 'maxpwdlife': '60',
+ 'minpwdlife': '24',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '12',
+ 'maxfailcount': '8',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {
+ 'cn': ['global_policy'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbminpwdlife': ['1'],
+ 'krbpwdmindiffchars': ['3'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'krbpwdfailurecountinterval': ['60'],
+ 'krbpwdlockoutduration': ['600'],
+ 'dn': 'cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = (
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'global_policy'
+ }
+ },
+ {
+ 'method': 'pwpolicy_mod',
+ 'name': None,
+ 'item': {
+ 'krbmaxpwdlife': '60',
+ 'krbminpwdlife': '24',
+ 'krbpwdhistorylength': '8',
+ 'krbpwdmindiffchars': '3',
+ 'krbpwdminlength': '12',
+ 'krbpwdmaxfailure': '8',
+ 'krbpwdfailurecountinterval': '60',
+ 'krbpwdlockoutduration': '600'
+ }
+ }
+ )
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_global_no_change(self):
+ """Global policy already matches the given arguments. No change needed"""
+ module_args = {
+ 'maxpwdlife': '90',
+ 'minpwdlife': '1',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '16',
+ 'maxfailcount': '6',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {
+ 'cn': ['global_policy'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbminpwdlife': ['1'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdmindiffchars': ['3'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'krbpwdfailurecountinterval': ['60'],
+ 'krbpwdlockoutduration': ['600'],
+ 'dn': 'cn=global_policy,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = [
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'global_policy'
+ }
+ }
+ ]
+ changed = False
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_check_add(self):
+ """Add a new policy in check mode. pwpolicy_add shouldn't be called"""
+ module_args = {
+ '_ansible_check_mode': True,
+ 'group': 'admins',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '90',
+ 'minpwdlife': '1',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '16',
+ 'maxfailcount': '6',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {}
+ mock_calls = [
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'admins'
+ }
+ }
+ ]
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_check_mod(self):
+ """Modify a policy in check mode. pwpolicy_mod shouldn't be called"""
+ module_args = {
+ '_ansible_check_mode': True,
+ 'group': 'sysops',
+ 'state': 'present',
+ 'priority': '10',
+ 'maxpwdlife': '60',
+ 'minpwdlife': '24',
+ 'historylength': '8',
+ 'minclasses': '3',
+ 'minlength': '12',
+ 'maxfailcount': '8',
+ 'failinterval': '60',
+ 'lockouttime': '600'
+ }
+ return_value = {
+ 'cn': ['sysops'],
+ 'cospriority': ['10'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbminpwdlife': ['1'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdmindiffchars': ['3'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'krbpwdfailurecountinterval': ['60'],
+ 'krbpwdlockoutduration': ['600'],
+ 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = [
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'sysops'
+ }
+ }
+ ]
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_check_del(self):
+ """Delete a policy in check mode. pwpolicy_del shouldn't be called"""
+ module_args = {
+ '_ansible_check_mode': True,
+ 'group': 'sysops',
+ 'state': 'absent'
+ }
+ return_value = {
+ 'cn': ['sysops'],
+ 'cospriority': ['10'],
+ 'krbmaxpwdlife': ['90'],
+ 'krbpwdhistorylength': ['8'],
+ 'krbpwdminlength': ['16'],
+ 'krbpwdmaxfailure': ['6'],
+ 'dn': 'cn=sysops,cn=EXAMPLE.COM,cn=kerberos,dc=example,dc=com',
+ 'objectclass': ['top', 'nscontainer', 'krbpwdpolicy']
+ }
+ mock_calls = [
+ {
+ 'method': 'pwpolicy_find',
+ 'name': None,
+ 'item': {
+ 'all': True,
+ 'cn': 'sysops'
+ }
+ }
+ ]
+ changed = True
+
+ self._test_base(module_args, return_value, mock_calls, changed)
+
+ def test_fail_post(self):
+ """Fail due to an exception raised from _post_json"""
+ set_module_args({
+ 'group': 'admins',
+ 'state': 'absent'
+ })
+
+ with patch_ipa(side_effect=Exception('ERROR MESSAGE')) as (mock_login, mock_post):
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(exec_info.exception.args[0]['msg'], 'ERROR MESSAGE')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_java_keystore.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_java_keystore.py
new file mode 100644
index 000000000..b2e70404a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_java_keystore.py
@@ -0,0 +1,422 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018, Ansible Project
+# Copyright (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat.mock import Mock
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.modules.java_keystore import JavaKeystore
+
+
+module_argument_spec = dict(
+ name=dict(type='str', required=True),
+ dest=dict(type='path', required=True),
+ certificate=dict(type='str', no_log=True),
+ certificate_path=dict(type='path'),
+ private_key=dict(type='str', no_log=True),
+ private_key_path=dict(type='path', no_log=False),
+ private_key_passphrase=dict(type='str', no_log=True),
+ password=dict(type='str', required=True, no_log=True),
+ ssl_backend=dict(type='str', default='openssl', choices=['openssl', 'cryptography']),
+ keystore_type=dict(type='str', choices=['jks', 'pkcs12']),
+ force=dict(type='bool', default=False),
+)
+module_supports_check_mode = True
+module_choose_between = (['certificate', 'certificate_path'],
+ ['private_key', 'private_key_path'])
+
+
+class TestCreateJavaKeystore(ModuleTestCase):
+ """Test the creation of a Java keystore."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestCreateJavaKeystore, self).setUp()
+
+ orig_exists = os.path.exists
+ self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.java_keystore.create_file')
+ self.mock_create_path = patch('ansible_collections.community.general.plugins.modules.java_keystore.create_path')
+ self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.java_keystore.JavaKeystore.current_type')
+ self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+ self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy')
+ self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move')
+ self.mock_os_path_exists = patch('os.path.exists',
+ side_effect=lambda path: True if path == '/path/to/keystore.jks' else orig_exists(path))
+ self.mock_selinux_context = patch('ansible.module_utils.basic.AnsibleModule.selinux_context',
+ side_effect=lambda path: ['unconfined_u', 'object_r', 'user_home_t', 's0'])
+ self.mock_is_special_selinux_path = patch('ansible.module_utils.basic.AnsibleModule.is_special_selinux_path',
+ side_effect=lambda path: (False, None))
+ self.run_command = self.mock_run_command.start()
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.preserved_copy = self.mock_preserved_copy.start()
+ self.atomic_move = self.mock_atomic_move.start()
+ self.create_file = self.mock_create_file.start()
+ self.create_path = self.mock_create_path.start()
+ self.current_type = self.mock_current_type.start()
+ self.selinux_context = self.mock_selinux_context.start()
+ self.is_special_selinux_path = self.mock_is_special_selinux_path.start()
+ self.os_path_exists = self.mock_os_path_exists.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestCreateJavaKeystore, self).tearDown()
+ self.mock_create_file.stop()
+ self.mock_create_path.stop()
+ self.mock_current_type.stop()
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ self.mock_preserved_copy.stop()
+ self.mock_atomic_move.stop()
+ self.mock_selinux_context.stop()
+ self.mock_is_special_selinux_path.stop()
+ self.mock_os_path_exists.stop()
+
+ def test_create_jks_success(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='test',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ with patch('os.remove', return_value=True):
+ self.create_path.side_effect = ['/tmp/tmpgrzm2ah7']
+ self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', '']
+ self.run_command.side_effect = [(0, '', ''), (0, '', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ assert jks.create() == {
+ 'changed': True,
+ 'cmd': ["keytool", "-importkeystore",
+ "-destkeystore", "/path/to/keystore.jks",
+ "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test",
+ "-noprompt"],
+ 'msg': '',
+ 'rc': 0
+ }
+
+ def test_create_jks_keypass_fail_export_pkcs12(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ private_key_passphrase='passphrase-foo',
+ dest='/path/to/keystore.jks',
+ name='test',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ module.exit_json = Mock()
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.create_path.side_effect = ['/tmp/tmp1cyp12xa']
+ self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', '']
+ self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ jks.create()
+ module.fail_json.assert_called_once_with(
+ cmd=["openssl", "pkcs12", "-export", "-name", "test",
+ "-in", "/tmp/tmpvalcrt32",
+ "-inkey", "/tmp/tmpwh4key0c",
+ "-out", "/tmp/tmp1cyp12xa",
+ "-passout", "stdin",
+ "-passin", "stdin"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
+
+ def test_create_jks_fail_export_pkcs12(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='test',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ module.exit_json = Mock()
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.create_path.side_effect = ['/tmp/tmp1cyp12xa']
+ self.create_file.side_effect = ['/tmp/tmpvalcrt32', '/tmp/tmpwh4key0c', '']
+ self.run_command.side_effect = [(1, '', 'Oops'), (0, '', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ jks.create()
+ module.fail_json.assert_called_once_with(
+ cmd=["openssl", "pkcs12", "-export", "-name", "test",
+ "-in", "/tmp/tmpvalcrt32",
+ "-inkey", "/tmp/tmpwh4key0c",
+ "-out", "/tmp/tmp1cyp12xa",
+ "-passout", "stdin"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
+
+ def test_create_jks_fail_import_key(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='test',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ module.exit_json = Mock()
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.create_path.side_effect = ['/tmp/tmpgrzm2ah7']
+ self.create_file.side_effect = ['/tmp/etacifitrec', '/tmp/yek_etavirp', '']
+ self.run_command.side_effect = [(0, '', ''), (1, '', 'Oops')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ jks.create()
+ module.fail_json.assert_called_once_with(
+ cmd=["keytool", "-importkeystore",
+ "-destkeystore", "/path/to/keystore.jks",
+ "-srckeystore", "/tmp/tmpgrzm2ah7", "-srcstoretype", "pkcs12", "-alias", "test",
+ "-noprompt"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
+
+
+class TestCertChanged(ModuleTestCase):
+ """Test if the cert has changed."""
+
+ def setUp(self):
+ """Setup."""
+ super(TestCertChanged, self).setUp()
+ self.mock_create_file = patch('ansible_collections.community.general.plugins.modules.java_keystore.create_file')
+ self.mock_current_type = patch('ansible_collections.community.general.plugins.modules.java_keystore.JavaKeystore.current_type')
+ self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+ self.mock_preserved_copy = patch('ansible.module_utils.basic.AnsibleModule.preserved_copy')
+ self.mock_atomic_move = patch('ansible.module_utils.basic.AnsibleModule.atomic_move')
+ self.run_command = self.mock_run_command.start()
+ self.create_file = self.mock_create_file.start()
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.current_type = self.mock_current_type.start()
+ self.preserved_copy = self.mock_preserved_copy.start()
+ self.atomic_move = self.mock_atomic_move.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestCertChanged, self).tearDown()
+ self.mock_create_file.stop()
+ self.mock_current_type.stop()
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ self.mock_preserved_copy.stop()
+ self.mock_atomic_move.stop()
+
+ def test_cert_unchanged_same_fingerprint(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ with patch('os.remove', return_value=True):
+ self.create_file.side_effect = ['/tmp/placeholder', '']
+ self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: abcd:1234:efgh', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ self.current_type.side_effect = ['jks']
+ jks = JavaKeystore(module)
+ result = jks.cert_changed()
+ self.assertFalse(result, 'Fingerprint is identical')
+
+ def test_cert_changed_fingerprint_mismatch(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ with patch('os.remove', return_value=True):
+ self.create_file.side_effect = ['/tmp/placeholder', '']
+ self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''), (0, 'SHA256: wxyz:9876:stuv', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ self.current_type.side_effect = ['jks']
+ jks = JavaKeystore(module)
+ result = jks.cert_changed()
+ self.assertTrue(result, 'Fingerprint mismatch')
+
+ def test_cert_changed_alias_does_not_exist(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ with patch('os.remove', return_value=True):
+ self.create_file.side_effect = ['/tmp/placeholder', '']
+ self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''),
+ (1, 'keytool error: java.lang.Exception: Alias <foo> does not exist', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ result = jks.cert_changed()
+ self.assertTrue(result, 'Alias mismatch detected')
+
+ def test_cert_changed_password_mismatch(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ with patch('os.remove', return_value=True):
+ self.create_file.side_effect = ['/tmp/placeholder', '']
+ self.run_command.side_effect = [(0, 'foo=abcd:1234:efgh', ''),
+ (1, 'keytool error: java.io.IOException: Keystore password was incorrect', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ result = jks.cert_changed()
+ self.assertTrue(result, 'Password mismatch detected')
+
+ def test_cert_changed_fail_read_cert(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ module.exit_json = Mock()
+ module.fail_json = Mock()
+
+ with patch('os.remove', return_value=True):
+ self.create_file.side_effect = ['/tmp/tmpdj6bvvme', '']
+ self.run_command.side_effect = [(1, '', 'Oops'), (0, 'SHA256: wxyz:9876:stuv', '')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ self.current_type.side_effect = ['jks']
+ jks = JavaKeystore(module)
+ jks.cert_changed()
+ module.fail_json.assert_called_once_with(
+ cmd=["openssl", "x509", "-noout", "-in", "/tmp/tmpdj6bvvme", "-fingerprint", "-sha256"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
+
+ def test_cert_changed_fail_read_keystore(self):
+ set_module_args(dict(
+ certificate='cert-foo',
+ private_key='private-foo',
+ dest='/path/to/keystore.jks',
+ name='foo',
+ password='changeit'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=module_argument_spec,
+ supports_check_mode=module_supports_check_mode,
+ mutually_exclusive=module_choose_between,
+ required_one_of=module_choose_between
+ )
+
+ module.exit_json = Mock()
+ module.fail_json = Mock(return_value=True)
+
+ with patch('os.remove', return_value=True):
+ self.create_file.side_effect = ['/tmp/placeholder', '']
+ self.run_command.side_effect = [(0, 'foo: wxyz:9876:stuv', ''), (1, '', 'Oops')]
+ self.get_bin_path.side_effect = ['keytool', 'openssl', '']
+ jks = JavaKeystore(module)
+ jks.cert_changed()
+ module.fail_json.assert_called_with(
+ cmd=["keytool", "-list", "-alias", "foo", "-keystore", "/path/to/keystore.jks", "-v"],
+ msg='',
+ err='Oops',
+ rc=1
+ )
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_build.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_build.py
new file mode 100644
index 000000000..44c6307ac
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_build.py
@@ -0,0 +1,224 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible_collections.community.general.plugins.modules import jenkins_build
+
+import json
+
+
+def set_module_args(args):
+ """prepare arguments so that they will be picked up during module creation"""
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class jenkins:
+ class JenkinsException(Exception):
+ pass
+
+ class NotFoundException(JenkinsException):
+ pass
+
+
+class JenkinsBuildMock():
+ def get_build_status(self):
+ try:
+ instance = JenkinsMock()
+ response = JenkinsMock.get_build_info(instance, 'host-delete', 1234)
+ return response
+ except jenkins.JenkinsException as e:
+ response = {}
+ response["result"] = "ABSENT"
+ return response
+ except Exception as e:
+ fail_json(msg='Unable to fetch build information, {0}'.format(e))
+
+
+class JenkinsMock():
+
+ def get_job_info(self, name):
+ return {
+ "nextBuildNumber": 1234
+ }
+
+ def get_build_info(self, name, build_number):
+ if name == "host-delete":
+ raise jenkins.JenkinsException("job {0} number {1} does not exist".format(name, build_number))
+ return {
+ "building": True,
+ "result": "SUCCESS"
+ }
+
+ def build_job(self, *args):
+ return None
+
+ def delete_build(self, name, build_number):
+ return None
+
+ def stop_build(self, name, build_number):
+ return None
+
+
+class JenkinsMockIdempotent():
+
+ def get_job_info(self, name):
+ return {
+ "nextBuildNumber": 1235
+ }
+
+ def get_build_info(self, name, build_number):
+ return {
+ "building": False,
+ "result": "ABORTED"
+ }
+
+ def build_job(self, *args):
+ return None
+
+ def delete_build(self, name, build_number):
+ raise jenkins.NotFoundException("job {0} number {1} does not exist".format(name, build_number))
+
+ def stop_build(self, name, build_number):
+ return None
+
+
+class TestJenkinsBuild(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ def test_module_fail_when_required_args_missing(self, test_deps):
+ test_deps.return_value = None
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ jenkins_build.main()
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ def test_module_fail_when_missing_build_number(self, test_deps):
+ test_deps.return_value = None
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ "name": "required-if",
+ "state": "stopped"
+ })
+ jenkins_build.main()
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection')
+ def test_module_create_build(self, jenkins_connection, test_deps):
+ test_deps.return_value = None
+ jenkins_connection.return_value = JenkinsMock()
+
+ with self.assertRaises(AnsibleExitJson):
+ set_module_args({
+ "name": "host-check",
+ "user": "abc",
+ "token": "xyz"
+ })
+ jenkins_build.main()
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection')
+ def test_module_stop_build(self, jenkins_connection, test_deps):
+ test_deps.return_value = None
+ jenkins_connection.return_value = JenkinsMock()
+
+ with self.assertRaises(AnsibleExitJson) as return_json:
+ set_module_args({
+ "name": "host-check",
+ "build_number": "1234",
+ "state": "stopped",
+ "user": "abc",
+ "token": "xyz"
+ })
+ jenkins_build.main()
+
+ self.assertTrue(return_json.exception.args[0]['changed'])
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection')
+ def test_module_stop_build_again(self, jenkins_connection, test_deps):
+ test_deps.return_value = None
+ jenkins_connection.return_value = JenkinsMockIdempotent()
+
+ with self.assertRaises(AnsibleExitJson) as return_json:
+ set_module_args({
+ "name": "host-check",
+ "build_number": "1234",
+ "state": "stopped",
+ "user": "abc",
+ "password": "xyz"
+ })
+ jenkins_build.main()
+
+ self.assertFalse(return_json.exception.args[0]['changed'])
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection')
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_build_status')
+ def test_module_delete_build(self, build_status, jenkins_connection, test_deps):
+ test_deps.return_value = None
+ jenkins_connection.return_value = JenkinsMock()
+ build_status.return_value = JenkinsBuildMock().get_build_status()
+
+ with self.assertRaises(AnsibleExitJson):
+ set_module_args({
+ "name": "host-delete",
+ "build_number": "1234",
+ "state": "absent",
+ "user": "abc",
+ "token": "xyz"
+ })
+ jenkins_build.main()
+
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.test_dependencies')
+ @patch('ansible_collections.community.general.plugins.modules.jenkins_build.JenkinsBuild.get_jenkins_connection')
+ def test_module_delete_build_again(self, jenkins_connection, test_deps):
+ test_deps.return_value = None
+ jenkins_connection.return_value = JenkinsMockIdempotent()
+
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ "name": "host-delete",
+ "build_number": "1234",
+ "state": "absent",
+ "user": "abc",
+ "token": "xyz"
+ })
+ jenkins_build.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_plugin.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_plugin.py
new file mode 100644
index 000000000..194cc2d72
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_jenkins_plugin.py
@@ -0,0 +1,192 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import BytesIO
+
+from ansible_collections.community.general.plugins.modules.jenkins_plugin import JenkinsPlugin
+from ansible.module_utils.common._collections_compat import Mapping
+
+
+def pass_function(*args, **kwargs):
+ pass
+
+
+GITHUB_DATA = {"url": u'https://api.github.com/repos/ansible/ansible',
+ "response": b"""
+{
+ "id": 3638964,
+ "name": "ansible",
+ "full_name": "ansible/ansible",
+ "owner": {
+ "login": "ansible",
+ "id": 1507452,
+ "avatar_url": "https://avatars2.githubusercontent.com/u/1507452?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/ansible",
+ "html_url": "https://github.com/ansible",
+ "followers_url": "https://api.github.com/users/ansible/followers",
+ "following_url": "https://api.github.com/users/ansible/following{/other_user}",
+ "gists_url": "https://api.github.com/users/ansible/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/ansible/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/ansible/subscriptions",
+ "organizations_url": "https://api.github.com/users/ansible/orgs",
+ "repos_url": "https://api.github.com/users/ansible/repos",
+ "events_url": "https://api.github.com/users/ansible/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/ansible/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "private": false,
+ "html_url": "https://github.com/ansible/ansible",
+ "description": "Ansible is a radically simple IT automation platform that makes your applications and systems easier to deploy.",
+ "fork": false,
+ "url": "https://api.github.com/repos/ansible/ansible",
+ "forks_url": "https://api.github.com/repos/ansible/ansible/forks",
+ "keys_url": "https://api.github.com/repos/ansible/ansible/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/ansible/ansible/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/ansible/ansible/teams",
+ "hooks_url": "https://api.github.com/repos/ansible/ansible/hooks",
+ "issue_events_url": "https://api.github.com/repos/ansible/ansible/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/ansible/ansible/events",
+ "assignees_url": "https://api.github.com/repos/ansible/ansible/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/ansible/ansible/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/ansible/ansible/tags",
+ "blobs_url": "https://api.github.com/repos/ansible/ansible/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/ansible/ansible/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/ansible/ansible/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/ansible/ansible/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/ansible/ansible/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/ansible/ansible/languages",
+ "stargazers_url": "https://api.github.com/repos/ansible/ansible/stargazers",
+ "contributors_url": "https://api.github.com/repos/ansible/ansible/contributors",
+ "subscribers_url": "https://api.github.com/repos/ansible/ansible/subscribers",
+ "subscription_url": "https://api.github.com/repos/ansible/ansible/subscription",
+ "commits_url": "https://api.github.com/repos/ansible/ansible/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/ansible/ansible/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/ansible/ansible/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/ansible/ansible/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/ansible/ansible/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/ansible/ansible/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/ansible/ansible/merges",
+ "archive_url": "https://api.github.com/repos/ansible/ansible/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/ansible/ansible/downloads",
+ "issues_url": "https://api.github.com/repos/ansible/ansible/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/ansible/ansible/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/ansible/ansible/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/ansible/ansible/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/ansible/ansible/labels{/name}",
+ "releases_url": "https://api.github.com/repos/ansible/ansible/releases{/id}",
+ "deployments_url": "https://api.github.com/repos/ansible/ansible/deployments",
+ "created_at": "2012-03-06T14:58:02Z",
+ "updated_at": "2017-09-19T18:10:54Z",
+ "pushed_at": "2017-09-19T18:04:51Z",
+ "git_url": "git://github.com/ansible/ansible.git",
+ "ssh_url": "git@github.com:ansible/ansible.git",
+ "clone_url": "https://github.com/ansible/ansible.git",
+ "svn_url": "https://github.com/ansible/ansible",
+ "homepage": "https://www.ansible.com/",
+ "size": 91174,
+ "stargazers_count": 25552,
+ "watchers_count": 25552,
+ "language": "Python",
+ "has_issues": true,
+ "has_projects": true,
+ "has_downloads": true,
+ "has_wiki": false,
+ "has_pages": false,
+ "forks_count": 8893,
+ "mirror_url": null,
+ "open_issues_count": 4283,
+ "forks": 8893,
+ "open_issues": 4283,
+ "watchers": 25552,
+ "default_branch": "devel",
+ "organization": {
+ "login": "ansible",
+ "id": 1507452,
+ "avatar_url": "https://avatars2.githubusercontent.com/u/1507452?v=4",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/ansible",
+ "html_url": "https://github.com/ansible",
+ "followers_url": "https://api.github.com/users/ansible/followers",
+ "following_url": "https://api.github.com/users/ansible/following{/other_user}",
+ "gists_url": "https://api.github.com/users/ansible/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/ansible/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/ansible/subscriptions",
+ "organizations_url": "https://api.github.com/users/ansible/orgs",
+ "repos_url": "https://api.github.com/users/ansible/repos",
+ "events_url": "https://api.github.com/users/ansible/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/ansible/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "network_count": 8893,
+ "subscribers_count": 1733
+}
+"""
+ }
+
+
+def test__get_json_data(mocker):
+ "test the json conversion of _get_url_data"
+
+ timeout = 30
+ params = {
+ 'url': GITHUB_DATA['url'],
+ 'timeout': timeout
+ }
+ module = mocker.Mock()
+ module.params = params
+
+ JenkinsPlugin._csrf_enabled = pass_function
+ JenkinsPlugin._get_installed_plugins = pass_function
+ JenkinsPlugin._get_url_data = mocker.Mock()
+ JenkinsPlugin._get_url_data.return_value = BytesIO(GITHUB_DATA['response'])
+ jenkins_plugin = JenkinsPlugin(module)
+
+ json_data = jenkins_plugin._get_json_data(
+ "{url}".format(url=GITHUB_DATA['url']),
+ 'CSRF')
+
+ assert isinstance(json_data, Mapping)
+
+
+def test__new_fallback_urls(mocker):
+ "test generation of new fallback URLs"
+
+ params = {
+ "url": "http://fake.jenkins.server",
+ "timeout": 30,
+ "name": "test-plugin",
+ "version": "1.2.3",
+ "updates_url": ["https://some.base.url"],
+ "latest_plugins_url_segments": ["test_latest"],
+ "versioned_plugins_url_segments": ["ansible", "versioned_plugins"],
+ "update_json_url_segment": ["unreachable", "updates/update-center.json"],
+ }
+ module = mocker.Mock()
+ module.params = params
+
+ JenkinsPlugin._csrf_enabled = pass_function
+ JenkinsPlugin._get_installed_plugins = pass_function
+
+ jenkins_plugin = JenkinsPlugin(module)
+
+ latest_urls = jenkins_plugin._get_latest_plugin_urls()
+ assert isInList(latest_urls, "https://some.base.url/test_latest/test-plugin.hpi")
+ versioned_urls = jenkins_plugin._get_versioned_plugin_urls()
+ assert isInList(versioned_urls, "https://some.base.url/versioned_plugins/test-plugin/1.2.3/test-plugin.hpi")
+ json_urls = jenkins_plugin._get_update_center_urls()
+ assert isInList(json_urls, "https://some.base.url/updates/update-center.json")
+
+
+def isInList(l, i):
+ print("checking if %s in %s" % (i, l))
+ for item in l:
+ if item == i:
+ return True
+ return False
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_authentication.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_authentication.py
new file mode 100644
index 000000000..aaa1fa9b1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_authentication.py
@@ -0,0 +1,623 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_authentication
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_authentication_flow_by_alias=None, copy_auth_flow=None, create_empty_auth_flow=None,
+ get_executions_representation=None, delete_authentication_flow_by_id=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_authentication.KeycloakAPI
+ with patch.object(obj, 'get_authentication_flow_by_alias', side_effect=get_authentication_flow_by_alias) \
+ as mock_get_authentication_flow_by_alias:
+ with patch.object(obj, 'copy_auth_flow', side_effect=copy_auth_flow) \
+ as mock_copy_auth_flow:
+ with patch.object(obj, 'create_empty_auth_flow', side_effect=create_empty_auth_flow) \
+ as mock_create_empty_auth_flow:
+ with patch.object(obj, 'get_executions_representation', return_value=get_executions_representation) \
+ as mock_get_executions_representation:
+ with patch.object(obj, 'delete_authentication_flow_by_id', side_effect=delete_authentication_flow_by_id) \
+ as mock_delete_authentication_flow_by_id:
+ yield mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow, \
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakAuthentication(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakAuthentication, self).setUp()
+ self.module = keycloak_authentication
+
+ def test_create_auth_flow_from_copy(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'copyFrom': 'first broker login',
+ 'authenticationExecutions': [
+ {
+ 'providerId': 'identity-provider-redirector',
+ 'requirement': 'ALTERNATIVE',
+ },
+ ],
+ 'state': 'present',
+ }
+ return_value_auth_flow_before = [{}]
+ return_value_copied = [{
+ 'id': '2ac059fc-c548-414f-9c9e-84d42bd4944e',
+ 'alias': 'first broker login',
+ 'description': 'browser based authentication',
+ 'providerId': 'basic-flow',
+ 'topLevel': True,
+ 'builtIn': False,
+ 'authenticationExecutions': [
+ {
+ 'authenticator': 'auth-cookie',
+ 'requirement': 'ALTERNATIVE',
+ 'priority': 10,
+ 'userSetupAllowed': False,
+ 'autheticatorFlow': False
+ },
+ ],
+ }]
+ return_value_executions_after = [
+ {
+ 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591',
+ 'requirement': 'ALTERNATIVE',
+ 'displayName': 'Identity Provider Redirector',
+ 'requirementChoices': ['REQUIRED', 'DISABLED'],
+ 'configurable': True,
+ 'providerId': 'identity-provider-redirector',
+ 'level': 0,
+ 'index': 0
+ },
+ {
+ 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893',
+ 'requirement': 'ALTERNATIVE',
+ 'displayName': 'Cookie',
+ 'requirementChoices': [
+ 'REQUIRED',
+ 'ALTERNATIVE',
+ 'DISABLED'
+ ],
+ 'configurable': False,
+ 'providerId': 'auth-cookie',
+ 'level': 0,
+ 'index': 1
+ },
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before, copy_auth_flow=return_value_copied,
+ get_executions_representation=return_value_executions_after) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 1)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 2)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_auth_flow_from_copy_idempotency(self):
+ """Add an already existing authentication flow from copy of an other flow to test idempotency"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'copyFrom': 'first broker login',
+ 'authenticationExecutions': [
+ {
+ 'providerId': 'identity-provider-redirector',
+ 'requirement': 'ALTERNATIVE',
+ },
+ ],
+ 'state': 'present',
+ }
+ return_value_auth_flow_before = [{
+ 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4',
+ 'alias': 'Test create authentication flow copy',
+ 'description': '',
+ 'providerId': 'basic-flow',
+ 'topLevel': True,
+ 'builtIn': False,
+ 'authenticationExecutions': [
+ {
+ 'authenticator': 'identity-provider-redirector',
+ 'requirement': 'ALTERNATIVE',
+ 'priority': 0,
+ 'userSetupAllowed': False,
+ 'autheticatorFlow': False
+ },
+ {
+ 'authenticator': 'auth-cookie',
+ 'requirement': 'ALTERNATIVE',
+ 'priority': 0,
+ 'userSetupAllowed': False,
+ 'autheticatorFlow': False
+ },
+ ],
+ }]
+ return_value_executions_after = [
+ {
+ 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591',
+ 'requirement': 'ALTERNATIVE',
+ 'displayName': 'Identity Provider Redirector',
+ 'requirementChoices': ['REQUIRED', 'DISABLED'],
+ 'configurable': True,
+ 'providerId': 'identity-provider-redirector',
+ 'level': 0,
+ 'index': 0
+ },
+ {
+ 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893',
+ 'requirement': 'ALTERNATIVE',
+ 'displayName': 'Cookie',
+ 'requirementChoices': [
+ 'REQUIRED',
+ 'ALTERNATIVE',
+ 'DISABLED'
+ ],
+ 'configurable': False,
+ 'providerId': 'auth-cookie',
+ 'level': 0,
+ 'index': 1
+ },
+ ]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before,
+ get_executions_representation=return_value_executions_after) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 2)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_auth_flow_without_copy(self):
+ """Add authentication without copy"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'authenticationExecutions': [
+ {
+ 'providerId': 'identity-provider-redirector',
+ 'requirement': 'ALTERNATIVE',
+ 'authenticationConfig': {
+ 'alias': 'name',
+ 'config': {
+ 'defaultProvider': 'value'
+ },
+ },
+ },
+ ],
+ 'state': 'present',
+ }
+ return_value_auth_flow_before = [{}]
+ return_value_created_empty_flow = [
+ {
+ "alias": "Test of the keycloak_auth module",
+ "authenticationExecutions": [],
+ "builtIn": False,
+ "description": "",
+ "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f",
+ "providerId": "basic-flow",
+ "topLevel": True
+ },
+ ]
+ return_value_executions_after = [
+ {
+ 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591',
+ 'requirement': 'ALTERNATIVE',
+ 'displayName': 'Identity Provider Redirector',
+ 'requirementChoices': ['REQUIRED', 'DISABLED'],
+ 'configurable': True,
+ 'providerId': 'identity-provider-redirector',
+ 'level': 0,
+ 'index': 0
+ },
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before,
+ get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 3)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_update_auth_flow_adding_exec(self):
+ """Update authentication flow by adding execution"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'authenticationExecutions': [
+ {
+ 'providerId': 'identity-provider-redirector',
+ 'requirement': 'ALTERNATIVE',
+ 'authenticationConfig': {
+ 'alias': 'name',
+ 'config': {
+ 'defaultProvider': 'value'
+ },
+ },
+ },
+ ],
+ 'state': 'present',
+ }
+ return_value_auth_flow_before = [{
+ 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4',
+ 'alias': 'Test create authentication flow copy',
+ 'description': '',
+ 'providerId': 'basic-flow',
+ 'topLevel': True,
+ 'builtIn': False,
+ 'authenticationExecutions': [
+ {
+ 'authenticator': 'auth-cookie',
+ 'requirement': 'ALTERNATIVE',
+ 'priority': 0,
+ 'userSetupAllowed': False,
+ 'autheticatorFlow': False
+ },
+ ],
+ }]
+ return_value_executions_after = [
+ {
+ 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591',
+ 'requirement': 'DISABLED',
+ 'displayName': 'Identity Provider Redirector',
+ 'requirementChoices': ['REQUIRED', 'DISABLED'],
+ 'configurable': True,
+ 'providerId': 'identity-provider-redirector',
+ 'level': 0,
+ 'index': 0
+ },
+ {
+ 'id': 'fdc208e9-c292-48b7-b7d1-1d98315ee893',
+ 'requirement': 'ALTERNATIVE',
+ 'displayName': 'Cookie',
+ 'requirementChoices': [
+ 'REQUIRED',
+ 'ALTERNATIVE',
+ 'DISABLED'
+ ],
+ 'configurable': False,
+ 'providerId': 'auth-cookie',
+ 'level': 0,
+ 'index': 1
+ },
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before,
+ get_executions_representation=return_value_executions_after) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 3)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_auth_flow(self):
+ """Delete authentication flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'state': 'absent',
+ }
+ return_value_auth_flow_before = [{
+ 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4',
+ 'alias': 'Test create authentication flow copy',
+ 'description': '',
+ 'providerId': 'basic-flow',
+ 'topLevel': True,
+ 'builtIn': False,
+ 'authenticationExecutions': [
+ {
+ 'authenticator': 'auth-cookie',
+ 'requirement': 'ALTERNATIVE',
+ 'priority': 0,
+ 'userSetupAllowed': False,
+ 'autheticatorFlow': False
+ },
+ ],
+ }]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 0)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_auth_flow_idempotency(self):
+ """Delete second time authentication flow to test idempotency"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'state': 'absent',
+ }
+ return_value_auth_flow_before = [{}]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 0)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_force_update_auth_flow(self):
+ """Delete authentication flow and create new one"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'alias': 'Test create authentication flow copy',
+ 'authenticationExecutions': [
+ {
+ 'providerId': 'identity-provider-redirector',
+ 'requirement': 'ALTERNATIVE',
+ 'authenticationConfig': {
+ 'alias': 'name',
+ 'config': {
+ 'defaultProvider': 'value'
+ },
+ },
+ },
+ ],
+ 'state': 'present',
+ 'force': 'yes',
+ }
+ return_value_auth_flow_before = [{
+ 'id': '71275d5e-e11f-4be4-b119-0abfa87987a4',
+ 'alias': 'Test create authentication flow copy',
+ 'description': '',
+ 'providerId': 'basic-flow',
+ 'topLevel': True,
+ 'builtIn': False,
+ 'authenticationExecutions': [
+ {
+ 'authenticator': 'auth-cookie',
+ 'requirement': 'ALTERNATIVE',
+ 'priority': 0,
+ 'userSetupAllowed': False,
+ 'autheticatorFlow': False
+ },
+ ],
+ }]
+ return_value_created_empty_flow = [
+ {
+ "alias": "Test of the keycloak_auth module",
+ "authenticationExecutions": [],
+ "builtIn": False,
+ "description": "",
+ "id": "513f5baa-cc42-47bf-b4b6-1d23ccc0a67f",
+ "providerId": "basic-flow",
+ "topLevel": True
+ },
+ ]
+ return_value_executions_after = [
+ {
+ 'id': 'b678e30c-8469-40a7-8c21-8d0cda76a591',
+ 'requirement': 'DISABLED',
+ 'displayName': 'Identity Provider Redirector',
+ 'requirementChoices': ['REQUIRED', 'DISABLED'],
+ 'configurable': True,
+ 'providerId': 'identity-provider-redirector',
+ 'level': 0,
+ 'index': 0
+ },
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_authentication_flow_by_alias=return_value_auth_flow_before,
+ get_executions_representation=return_value_executions_after, create_empty_auth_flow=return_value_created_empty_flow) \
+ as (mock_get_authentication_flow_by_alias, mock_copy_auth_flow, mock_create_empty_auth_flow,
+ mock_get_executions_representation, mock_delete_authentication_flow_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(len(mock_get_authentication_flow_by_alias.mock_calls), 1)
+ self.assertEqual(len(mock_copy_auth_flow.mock_calls), 0)
+ self.assertEqual(len(mock_create_empty_auth_flow.mock_calls), 1)
+ self.assertEqual(len(mock_get_executions_representation.mock_calls), 3)
+ self.assertEqual(len(mock_delete_authentication_flow_by_id.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client.py
new file mode 100644
index 000000000..b44013af1
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_client
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_client_by_clientid=None, get_client_by_id=None, update_client=None, create_client=None,
+ delete_client=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_client.KeycloakAPI
+ with patch.object(obj, 'get_client_by_clientid', side_effect=get_client_by_clientid) as mock_get_client_by_clientid:
+ with patch.object(obj, 'get_client_by_id', side_effect=get_client_by_id) as mock_get_client_by_id:
+ with patch.object(obj, 'create_client', side_effect=create_client) as mock_create_client:
+ with patch.object(obj, 'update_client', side_effect=update_client) as mock_update_client:
+ with patch.object(obj, 'delete_client', side_effect=delete_client) as mock_delete_client:
+ yield mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+
+ def _create_wrapper():
+ return StringIO(text_as_string)
+
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper(
+ '{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakRealm(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakRealm, self).setUp()
+ self.module = keycloak_client
+
+ def test_authentication_flow_binding_overrides_feature(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'https: // auth.example.com / auth',
+ 'token': '{{ access_token }}',
+ 'state': 'present',
+ 'realm': 'master',
+ 'client_id': 'test',
+ 'authentication_flow_binding_overrides': {
+ 'browser': '4c90336b-bf1d-4b87-916d-3677ba4e5fbb'
+ }
+ }
+ return_value_get_client_by_clientid = [
+ None,
+ {
+ "authenticationFlowBindingOverrides": {
+ "browser": "f9502b6d-d76a-4efe-8331-2ddd853c9f9c"
+ },
+ "clientId": "onboardingid",
+ "enabled": "true",
+ "protocol": "openid-connect",
+ "redirectUris": [
+ "*"
+ ]
+ }
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_client_by_clientid=return_value_get_client_by_clientid) \
+ as (mock_get_client_by_clientid, mock_get_client_by_id, mock_create_client, mock_update_client, mock_delete_client):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(mock_get_client_by_clientid.call_count, 2)
+ self.assertEqual(mock_get_client_by_id.call_count, 0)
+ self.assertEqual(mock_create_client.call_count, 1)
+ self.assertEqual(mock_update_client.call_count, 0)
+ self.assertEqual(mock_delete_client.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py
new file mode 100644
index 000000000..58c8b9548
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_client_rolemapping.py
@@ -0,0 +1,573 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_client_rolemapping
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_group_by_name=None, get_client_id=None, get_client_role_id_by_name=None,
+ get_client_group_rolemapping_by_id=None, get_client_group_available_rolemappings=None,
+ get_client_group_composite_rolemappings=None, add_group_rolemapping=None,
+ delete_group_rolemapping=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_client_rolemapping.KeycloakAPI
+ with patch.object(obj, 'get_group_by_name',
+ side_effect=get_group_by_name) as mock_get_group_by_name:
+ with patch.object(obj, 'get_client_id',
+ side_effect=get_client_id) as mock_get_client_id:
+ with patch.object(obj, 'get_client_role_id_by_name',
+ side_effect=get_client_role_id_by_name) as mock_get_client_role_id_by_name:
+ with patch.object(obj, 'get_client_group_rolemapping_by_id',
+ side_effect=get_client_group_rolemapping_by_id) as mock_get_client_group_rolemapping_by_id:
+ with patch.object(obj, 'get_client_group_available_rolemappings',
+ side_effect=get_client_group_available_rolemappings) as mock_get_client_group_available_rolemappings:
+ with patch.object(obj, 'get_client_group_composite_rolemappings',
+ side_effect=get_client_group_composite_rolemappings) as mock_get_client_group_composite_rolemappings:
+ with patch.object(obj, 'add_group_rolemapping',
+ side_effect=add_group_rolemapping) as mock_add_group_rolemapping:
+ with patch.object(obj, 'delete_group_rolemapping',
+ side_effect=delete_group_rolemapping) as mock_delete_group_rolemapping:
+ yield mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping, \
+ mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, \
+ mock_get_client_group_composite_rolemappings, mock_delete_group_rolemapping
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakRealm(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakRealm, self).setUp()
+ self.module = keycloak_client_rolemapping
+
+ def test_map_clientrole_to_group_with_name(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'client_id': 'test_client',
+ 'group_name': 'test_group',
+ 'roles': [
+ {
+ 'name': 'test_role1',
+ },
+ {
+ 'name': 'test_role1',
+ },
+ ],
+ }
+ return_value_get_group_by_name = [{
+ "access": {
+ "manage": "true",
+ "manageMembership": "true",
+ "view": "true"
+ },
+ "attributes": "{}",
+ "clientRoles": "{}",
+ "id": "92f2400e-0ecb-4185-8950-12dcef616c2b",
+ "name": "test_group",
+ "path": "/test_group",
+ "realmRoles": "[]",
+ "subGroups": "[]"
+ }]
+ return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
+ return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
+ return_value_get_client_group_available_rolemappings = [[
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ]]
+ return_value_get_client_group_composite_rolemappings = [
+ None,
+ [
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ]
+ ]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
+ get_client_role_id_by_name=return_value_get_client_role_id_by_name,
+ get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
+ get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
+ as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
+ mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
+ mock_delete_group_rolemapping):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(mock_get_group_by_name.call_count, 1)
+ self.assertEqual(mock_get_client_id.call_count, 1)
+ self.assertEqual(mock_add_group_rolemapping.call_count, 1)
+ self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
+ self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
+ self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 2)
+ self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_map_clientrole_to_group_with_name_idempotency(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'client_id': 'test_client',
+ 'group_name': 'test_group',
+ 'roles': [
+ {
+ 'name': 'test_role1',
+ },
+ {
+ 'name': 'test_role1',
+ },
+ ],
+ }
+ return_value_get_group_by_name = [{
+ "access": {
+ "manage": "true",
+ "manageMembership": "true",
+ "view": "true"
+ },
+ "attributes": "{}",
+ "clientRoles": "{}",
+ "id": "92f2400e-0ecb-4185-8950-12dcef616c2b",
+ "name": "test_group",
+ "path": "/test_group",
+ "realmRoles": "[]",
+ "subGroups": "[]"
+ }]
+ return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
+ return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
+ return_value_get_client_group_available_rolemappings = [[]]
+ return_value_get_client_group_composite_rolemappings = [[
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ]]
+
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
+ get_client_role_id_by_name=return_value_get_client_role_id_by_name,
+ get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
+ get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
+ as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
+ mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
+ mock_delete_group_rolemapping):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(mock_get_group_by_name.call_count, 1)
+ self.assertEqual(mock_get_client_id.call_count, 1)
+ self.assertEqual(mock_add_group_rolemapping.call_count, 0)
+ self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
+ self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
+ self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 1)
+ self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_map_clientrole_to_group_with_id(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'cid': 'c0f8490c-b224-4737-a567-20223e4c1727',
+ 'gid': '92f2400e-0ecb-4185-8950-12dcef616c2b',
+ 'roles': [
+ {
+ 'name': 'test_role1',
+ },
+ {
+ 'name': 'test_role1',
+ },
+ ],
+ }
+ return_value_get_group_by_name = [{
+ "access": {
+ "manage": "true",
+ "manageMembership": "true",
+ "view": "true"
+ },
+ "attributes": "{}",
+ "clientRoles": "{}",
+ "id": "92f2400e-0ecb-4185-8950-12dcef616c2b",
+ "name": "test_group",
+ "path": "/test_group",
+ "realmRoles": "[]",
+ "subGroups": "[]"
+ }]
+ return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
+ return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
+ return_value_get_client_group_available_rolemappings = [[
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ]]
+ return_value_get_client_group_composite_rolemappings = [
+ None,
+ [
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ]
+ ]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
+ get_client_role_id_by_name=return_value_get_client_role_id_by_name,
+ get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
+ get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
+ as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
+ mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
+ mock_delete_group_rolemapping):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(mock_get_group_by_name.call_count, 0)
+ self.assertEqual(mock_get_client_id.call_count, 0)
+ self.assertEqual(mock_add_group_rolemapping.call_count, 1)
+ self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
+ self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
+ self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 2)
+ self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_remove_clientrole_from_group(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'realm': 'realm-name',
+ 'state': 'absent',
+ 'client_id': 'test_client',
+ 'group_name': 'test_group',
+ 'roles': [
+ {
+ 'name': 'test_role1',
+ },
+ {
+ 'name': 'test_role1',
+ },
+ ],
+ }
+ return_value_get_group_by_name = [{
+ "access": {
+ "manage": "true",
+ "manageMembership": "true",
+ "view": "true"
+ },
+ "attributes": "{}",
+ "clientRoles": "{}",
+ "id": "92f2400e-0ecb-4185-8950-12dcef616c2b",
+ "name": "test_group",
+ "path": "/test_group",
+ "realmRoles": "[]",
+ "subGroups": "[]"
+ }]
+ return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
+ return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
+ return_value_get_client_group_available_rolemappings = [[]]
+ return_value_get_client_group_composite_rolemappings = [
+ [
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ],
+ []
+ ]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
+ get_client_role_id_by_name=return_value_get_client_role_id_by_name,
+ get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
+ get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
+ as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
+ mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
+ mock_delete_group_rolemapping):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(mock_get_group_by_name.call_count, 1)
+ self.assertEqual(mock_get_client_id.call_count, 1)
+ self.assertEqual(mock_add_group_rolemapping.call_count, 0)
+ self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
+ self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
+ self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 2)
+ self.assertEqual(mock_delete_group_rolemapping.call_count, 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_remove_clientrole_from_group_idempotency(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'realm': 'realm-name',
+ 'state': 'absent',
+ 'client_id': 'test_client',
+ 'group_name': 'test_group',
+ 'roles': [
+ {
+ 'name': 'test_role1',
+ },
+ {
+ 'name': 'test_role1',
+ },
+ ],
+ }
+ return_value_get_group_by_name = [{
+ "access": {
+ "manage": "true",
+ "manageMembership": "true",
+ "view": "true"
+ },
+ "attributes": "{}",
+ "clientRoles": "{}",
+ "id": "92f2400e-0ecb-4185-8950-12dcef616c2b",
+ "name": "test_group",
+ "path": "/test_group",
+ "realmRoles": "[]",
+ "subGroups": "[]"
+ }]
+ return_value_get_client_id = "c0f8490c-b224-4737-a567-20223e4c1727"
+ return_value_get_client_role_id_by_name = "e91af074-cfd5-40ee-8ef5-ae0ae1ce69fe"
+ return_value_get_client_group_available_rolemappings = [
+ [
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "c2bf2edb-da94-4f2f-b9f2-196dfee3fe4d",
+ "name": "test_role2"
+ },
+ {
+ "clientRole": "true",
+ "composite": "false",
+ "containerId": "c0f8490c-b224-4737-a567-20223e4c1727",
+ "id": "00a2d9a9-924e-49fa-8cde-c539c010ef6e",
+ "name": "test_role1"
+ }
+ ]
+ ]
+ return_value_get_client_group_composite_rolemappings = [[]]
+
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_group_by_name=return_value_get_group_by_name, get_client_id=return_value_get_client_id,
+ get_client_role_id_by_name=return_value_get_client_role_id_by_name,
+ get_client_group_available_rolemappings=return_value_get_client_group_available_rolemappings,
+ get_client_group_composite_rolemappings=return_value_get_client_group_composite_rolemappings) \
+ as (mock_get_group_by_name, mock_get_client_id, mock_get_client_role_id_by_name, mock_add_group_rolemapping,
+ mock_get_client_group_rolemapping_by_id, mock_get_client_group_available_rolemappings, mock_get_client_group_composite_rolemappings,
+ mock_delete_group_rolemapping):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(mock_get_group_by_name.call_count, 1)
+ self.assertEqual(mock_get_client_id.call_count, 1)
+ self.assertEqual(mock_add_group_rolemapping.call_count, 0)
+ self.assertEqual(mock_get_client_group_rolemapping_by_id.call_count, 0)
+ self.assertEqual(mock_get_client_group_available_rolemappings.call_count, 1)
+ self.assertEqual(mock_get_client_group_composite_rolemappings.call_count, 1)
+ self.assertEqual(mock_delete_group_rolemapping.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_clientscope.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_clientscope.py
new file mode 100644
index 000000000..ea015b05b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_clientscope.py
@@ -0,0 +1,614 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_clientscope
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_clientscope_by_name=None, get_clientscope_by_clientscopeid=None, create_clientscope=None,
+ update_clientscope=None, get_clientscope_protocolmapper_by_name=None,
+ update_clientscope_protocolmappers=None, create_clientscope_protocolmapper=None,
+ delete_clientscope=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ """
+ get_clientscope_by_clientscopeid
+ delete_clientscope
+ """
+
+ obj = keycloak_clientscope.KeycloakAPI
+ with patch.object(obj, 'get_clientscope_by_name', side_effect=get_clientscope_by_name) \
+ as mock_get_clientscope_by_name:
+ with patch.object(obj, 'get_clientscope_by_clientscopeid', side_effect=get_clientscope_by_clientscopeid) \
+ as mock_get_clientscope_by_clientscopeid:
+ with patch.object(obj, 'create_clientscope', side_effect=create_clientscope) \
+ as mock_create_clientscope:
+ with patch.object(obj, 'update_clientscope', return_value=update_clientscope) \
+ as mock_update_clientscope:
+ with patch.object(obj, 'get_clientscope_protocolmapper_by_name',
+ side_effect=get_clientscope_protocolmapper_by_name) \
+ as mock_get_clientscope_protocolmapper_by_name:
+ with patch.object(obj, 'update_clientscope_protocolmappers',
+ side_effect=update_clientscope_protocolmappers) \
+ as mock_update_clientscope_protocolmappers:
+ with patch.object(obj, 'create_clientscope_protocolmapper',
+ side_effect=create_clientscope_protocolmapper) \
+ as mock_create_clientscope_protocolmapper:
+ with patch.object(obj, 'delete_clientscope', side_effect=delete_clientscope) \
+ as mock_delete_clientscope:
+ yield mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope, \
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name, mock_update_clientscope_protocolmappers, \
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+
+ def _create_wrapper():
+ return StringIO(text_as_string)
+
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper(
+ '{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakAuthentication(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakAuthentication, self).setUp()
+ self.module = keycloak_clientscope
+
+ def test_create_clientscope(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'name': 'my-new-kc-clientscope'
+ }
+ return_value_get_clientscope_by_name = [
+ None,
+ {
+ "attributes": {},
+ "id": "73fec1d2-f032-410c-8177-583104d01305",
+ "name": "my-new-kc-clientscope"
+ }]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \
+ as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope,
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name,
+ mock_update_clientscope_protocolmappers,
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(mock_get_clientscope_by_name.call_count, 2)
+ self.assertEqual(mock_create_clientscope.call_count, 1)
+ self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0)
+ self.assertEqual(mock_update_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0)
+ self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0)
+ self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0)
+ self.assertEqual(mock_delete_clientscope.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_clientscope_idempotency(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'name': 'my-new-kc-clientscope'
+ }
+ return_value_get_clientscope_by_name = [{
+ "attributes": {},
+ "id": "73fec1d2-f032-410c-8177-583104d01305",
+ "name": "my-new-kc-clientscope"
+ }]
+
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \
+ as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope,
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name,
+ mock_update_clientscope_protocolmappers,
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(mock_get_clientscope_by_name.call_count, 1)
+ self.assertEqual(mock_create_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0)
+ self.assertEqual(mock_update_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0)
+ self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0)
+ self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0)
+ self.assertEqual(mock_delete_clientscope.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_clientscope(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'state': 'absent',
+ 'name': 'my-new-kc-clientscope'
+ }
+ return_value_get_clientscope_by_name = [{
+ "attributes": {},
+ "id": "73fec1d2-f032-410c-8177-583104d01305",
+ "name": "my-new-kc-clientscope"
+ }]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \
+ as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope,
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name,
+ mock_update_clientscope_protocolmappers,
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(mock_get_clientscope_by_name.call_count, 1)
+ self.assertEqual(mock_create_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0)
+ self.assertEqual(mock_update_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0)
+ self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0)
+ self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0)
+ self.assertEqual(mock_delete_clientscope.call_count, 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_clientscope_idempotency(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'state': 'absent',
+ 'name': 'my-new-kc-clientscope'
+ }
+ return_value_get_clientscope_by_name = [None]
+
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \
+ as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope,
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name,
+ mock_update_clientscope_protocolmappers,
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(mock_get_clientscope_by_name.call_count, 1)
+ self.assertEqual(mock_create_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0)
+ self.assertEqual(mock_update_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0)
+ self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0)
+ self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0)
+ self.assertEqual(mock_delete_clientscope.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_clientscope_with_protocolmappers(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'name': 'my-new-kc-clientscope',
+ 'protocolMappers': [
+ {
+ 'protocol': 'openid-connect',
+ 'config': {
+ 'full.path': 'true',
+ 'id.token.claim': 'true',
+ 'access.token.claim': 'true',
+ 'userinfo.token.claim': 'true',
+ 'claim.name': 'protocol1',
+ },
+ 'name': 'protocol1',
+ 'protocolMapper': 'oidc-group-membership-mapper',
+ },
+ {
+ 'protocol': 'openid-connect',
+ 'config': {
+ 'full.path': 'false',
+ 'id.token.claim': 'false',
+ 'access.token.claim': 'false',
+ 'userinfo.token.claim': 'false',
+ 'claim.name': 'protocol2',
+ },
+ 'name': 'protocol2',
+ 'protocolMapper': 'oidc-group-membership-mapper',
+ },
+ {
+ 'protocol': 'openid-connect',
+ 'config': {
+ 'full.path': 'true',
+ 'id.token.claim': 'false',
+ 'access.token.claim': 'true',
+ 'userinfo.token.claim': 'false',
+ 'claim.name': 'protocol3',
+ },
+ 'name': 'protocol3',
+ 'protocolMapper': 'oidc-group-membership-mapper',
+ },
+ ]
+ }
+ return_value_get_clientscope_by_name = [
+ None,
+ {
+ "attributes": {},
+ "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182",
+ "name": "my-new-kc-clientscope",
+ "protocolMappers": [
+ {
+ "config": {
+ "access.token.claim": "false",
+ "claim.name": "protocol2",
+ "full.path": "false",
+ "id.token.claim": "false",
+ "userinfo.token.claim": "false"
+ },
+ "consentRequired": "false",
+ "id": "a7f19adb-cc58-41b1-94ce-782dc255139b",
+ "name": "protocol2",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ },
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "protocol3",
+ "full.path": "true",
+ "id.token.claim": "false",
+ "userinfo.token.claim": "false"
+ },
+ "consentRequired": "false",
+ "id": "2103a559-185a-40f4-84ae-9ab311d5b812",
+ "name": "protocol3",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ },
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "protocol1",
+ "full.path": "true",
+ "id.token.claim": "true",
+ "userinfo.token.claim": "true"
+ },
+ "consentRequired": "false",
+ "id": "bbf6390f-e95f-4c20-882b-9dad328363b9",
+ "name": "protocol1",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ }]
+ }]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name) \
+ as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope,
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name,
+ mock_update_clientscope_protocolmappers,
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(mock_get_clientscope_by_name.call_count, 2)
+ self.assertEqual(mock_create_clientscope.call_count, 1)
+ self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 0)
+ self.assertEqual(mock_update_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 0)
+ self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 0)
+ self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0)
+ self.assertEqual(mock_delete_clientscope.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_update_clientscope_with_protocolmappers(self):
+ """Add a new authentication flow from copy of an other flow"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'realm': 'realm-name',
+ 'state': 'present',
+ 'name': 'my-new-kc-clientscope',
+ 'protocolMappers': [
+ {
+ 'protocol': 'openid-connect',
+ 'config': {
+ 'full.path': 'false',
+ 'id.token.claim': 'false',
+ 'access.token.claim': 'false',
+ 'userinfo.token.claim': 'false',
+ 'claim.name': 'protocol1_updated',
+ },
+ 'name': 'protocol1',
+ 'protocolMapper': 'oidc-group-membership-mapper',
+ },
+ {
+ 'protocol': 'openid-connect',
+ 'config': {
+ 'full.path': 'true',
+ 'id.token.claim': 'false',
+ 'access.token.claim': 'false',
+ 'userinfo.token.claim': 'false',
+ 'claim.name': 'protocol2_updated',
+ },
+ 'name': 'protocol2',
+ 'protocolMapper': 'oidc-group-membership-mapper',
+ },
+ {
+ 'protocol': 'openid-connect',
+ 'config': {
+ 'full.path': 'true',
+ 'id.token.claim': 'true',
+ 'access.token.claim': 'true',
+ 'userinfo.token.claim': 'true',
+ 'claim.name': 'protocol3_updated',
+ },
+ 'name': 'protocol3',
+ 'protocolMapper': 'oidc-group-membership-mapper',
+ },
+ ]
+ }
+ return_value_get_clientscope_by_name = [{
+ "attributes": {},
+ "id": "890ec72e-fe1d-4308-9f27-485ef7eaa182",
+ "name": "my-new-kc-clientscope",
+ "protocolMappers": [
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "groups",
+ "full.path": "true",
+ "id.token.claim": "true",
+ "userinfo.token.claim": "true"
+ },
+ "consentRequired": "false",
+ "id": "e077007a-367a-444f-91ef-70277a1d868d",
+ "name": "groups",
+ "protocol": "saml",
+ "protocolMapper": "oidc-group-membership-mapper"
+ },
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "groups",
+ "full.path": "true",
+ "id.token.claim": "true",
+ "userinfo.token.claim": "true"
+ },
+ "consentRequired": "false",
+ "id": "06c518aa-c627-43cc-9a82-d8467b508d34",
+ "name": "groups",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ },
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "groups",
+ "full.path": "true",
+ "id.token.claim": "true",
+ "userinfo.token.claim": "true"
+ },
+ "consentRequired": "false",
+ "id": "1d03c557-d97e-40f4-ac35-6cecd74ea70d",
+ "name": "groups",
+ "protocol": "wsfed",
+ "protocolMapper": "oidc-group-membership-mapper"
+ }
+ ]
+ }]
+ return_value_get_clientscope_by_clientscopeid = [{
+ "attributes": {},
+ "id": "2286032f-451e-44d5-8be6-e45aac7983a1",
+ "name": "my-new-kc-clientscope",
+ "protocolMappers": [
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "protocol1_updated",
+ "full.path": "true",
+ "id.token.claim": "false",
+ "userinfo.token.claim": "false"
+ },
+ "consentRequired": "false",
+ "id": "a7f19adb-cc58-41b1-94ce-782dc255139b",
+ "name": "protocol2",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ },
+ {
+ "config": {
+ "access.token.claim": "true",
+ "claim.name": "protocol1_updated",
+ "full.path": "true",
+ "id.token.claim": "false",
+ "userinfo.token.claim": "false"
+ },
+ "consentRequired": "false",
+ "id": "2103a559-185a-40f4-84ae-9ab311d5b812",
+ "name": "protocol3",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ },
+ {
+ "config": {
+ "access.token.claim": "false",
+ "claim.name": "protocol1_updated",
+ "full.path": "false",
+ "id.token.claim": "false",
+ "userinfo.token.claim": "false"
+ },
+ "consentRequired": "false",
+ "id": "bbf6390f-e95f-4c20-882b-9dad328363b9",
+ "name": "protocol1",
+ "protocol": "openid-connect",
+ "protocolMapper": "oidc-group-membership-mapper"
+ }
+ ]
+ }]
+
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_clientscope_by_name=return_value_get_clientscope_by_name,
+ get_clientscope_by_clientscopeid=return_value_get_clientscope_by_clientscopeid) \
+ as (mock_get_clientscope_by_name, mock_get_clientscope_by_clientscopeid, mock_create_clientscope,
+ mock_update_clientscope, mock_get_clientscope_protocolmapper_by_name,
+ mock_update_clientscope_protocolmappers,
+ mock_create_clientscope_protocolmapper, mock_delete_clientscope):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ # Verify number of call on each mock
+ self.assertEqual(mock_get_clientscope_by_name.call_count, 1)
+ self.assertEqual(mock_create_clientscope.call_count, 0)
+ self.assertEqual(mock_get_clientscope_by_clientscopeid.call_count, 1)
+ self.assertEqual(mock_update_clientscope.call_count, 1)
+ self.assertEqual(mock_get_clientscope_protocolmapper_by_name.call_count, 3)
+ self.assertEqual(mock_update_clientscope_protocolmappers.call_count, 3)
+ self.assertEqual(mock_create_clientscope_protocolmapper.call_count, 0)
+ self.assertEqual(mock_delete_clientscope.call_count, 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_identity_provider.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_identity_provider.py
new file mode 100644
index 000000000..6fd258b8a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_identity_provider.py
@@ -0,0 +1,588 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_identity_provider
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_identity_provider, create_identity_provider=None, update_identity_provider=None, delete_identity_provider=None,
+ get_identity_provider_mappers=None, create_identity_provider_mapper=None, update_identity_provider_mapper=None,
+ delete_identity_provider_mapper=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_identity_provider.KeycloakAPI
+ with patch.object(obj, 'get_identity_provider', side_effect=get_identity_provider) \
+ as mock_get_identity_provider:
+ with patch.object(obj, 'create_identity_provider', side_effect=create_identity_provider) \
+ as mock_create_identity_provider:
+ with patch.object(obj, 'update_identity_provider', side_effect=update_identity_provider) \
+ as mock_update_identity_provider:
+ with patch.object(obj, 'delete_identity_provider', side_effect=delete_identity_provider) \
+ as mock_delete_identity_provider:
+ with patch.object(obj, 'get_identity_provider_mappers', side_effect=get_identity_provider_mappers) \
+ as mock_get_identity_provider_mappers:
+ with patch.object(obj, 'create_identity_provider_mapper', side_effect=create_identity_provider_mapper) \
+ as mock_create_identity_provider_mapper:
+ with patch.object(obj, 'update_identity_provider_mapper', side_effect=update_identity_provider_mapper) \
+ as mock_update_identity_provider_mapper:
+ with patch.object(obj, 'delete_identity_provider_mapper', side_effect=delete_identity_provider_mapper) \
+ as mock_delete_identity_provider_mapper:
+ yield mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, \
+ mock_delete_identity_provider, mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, \
+ mock_update_identity_provider_mapper, mock_delete_identity_provider_mapper
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakIdentityProvider(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakIdentityProvider, self).setUp()
+ self.module = keycloak_identity_provider
+
+ def test_create_when_absent(self):
+ """Add a new identity provider"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'alias': 'oidc-idp',
+ 'display_name': 'OpenID Connect IdP',
+ 'enabled': True,
+ 'provider_id': 'oidc',
+ 'config': {
+ 'issuer': 'https://idp.example.com',
+ 'authorizationUrl': 'https://idp.example.com/auth',
+ 'tokenUrl': 'https://idp.example.com/token',
+ 'userInfoUrl': 'https://idp.example.com/userinfo',
+ 'clientAuthMethod': 'client_secret_post',
+ 'clientId': 'my-client',
+ 'clientSecret': 'secret',
+ 'syncMode': "FORCE",
+ },
+ 'mappers': [{
+ 'name': "first_name",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'config': {
+ 'claim': "first_name",
+ 'user.attribute': "first_name",
+ 'syncMode': "INHERIT",
+ }
+ }, {
+ 'name': "last_name",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'config': {
+ 'claim': "last_name",
+ 'user.attribute': "last_name",
+ 'syncMode': "INHERIT",
+ }
+ }]
+ }
+ return_value_idp_get = [
+ None,
+ {
+ "addReadTokenRoleOnCreate": False,
+ "alias": "oidc-idp",
+ "authenticateByDefault": False,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "no_log",
+ "issuer": "https://idp.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": True,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6",
+ "linkOnly": False,
+ "providerId": "oidc",
+ "storeToken": False,
+ "trustEmail": False,
+ }
+ ]
+ return_value_mappers_get = [
+ [{
+ "config": {
+ "claim": "first_name",
+ "syncMode": "INHERIT",
+ "user.attribute": "first_name"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "first_name"
+ }, {
+ "config": {
+ "claim": "last_name",
+ "syncMode": "INHERIT",
+ "user.attribute": "last_name"
+ },
+ "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "last_name"
+ }]
+ ]
+ return_value_idp_created = [None]
+ return_value_mapper_created = [None, None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get,
+ create_identity_provider=return_value_idp_created, create_identity_provider_mapper=return_value_mapper_created) \
+ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider,
+ mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper,
+ mock_delete_identity_provider_mapper):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_identity_provider.mock_calls), 2)
+ self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1)
+ self.assertEqual(len(mock_create_identity_provider.mock_calls), 1)
+ self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 2)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_update_when_present(self):
+ """Update existing identity provider"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'alias': 'oidc-idp',
+ 'display_name': 'OpenID Connect IdP',
+ 'enabled': True,
+ 'provider_id': 'oidc',
+ 'config': {
+ 'issuer': 'https://idp.example.com',
+ 'authorizationUrl': 'https://idp.example.com/auth',
+ 'tokenUrl': 'https://idp.example.com/token',
+ 'userInfoUrl': 'https://idp.example.com/userinfo',
+ 'clientAuthMethod': 'client_secret_post',
+ 'clientId': 'my-client',
+ 'clientSecret': 'secret',
+ 'syncMode': "FORCE"
+ },
+ 'mappers': [{
+ 'name': "username",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'config': {
+ 'claim': "username",
+ 'user.attribute': "username",
+ 'syncMode': "INHERIT",
+ }
+ }, {
+ 'name': "first_name",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'config': {
+ 'claim': "first_name",
+ 'user.attribute': "first_name",
+ 'syncMode': "INHERIT",
+ }
+ }, {
+ 'name': "last_name",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'config': {
+ 'claim': "last_name",
+ 'user.attribute': "last_name",
+ 'syncMode': "INHERIT",
+ }
+ }]
+ }
+ return_value_idp_get = [
+ {
+ "addReadTokenRoleOnCreate": False,
+ "alias": "oidc-idp",
+ "authenticateByDefault": False,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "no_log",
+ "issuer": "https://idp.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP changeme",
+ "enabled": True,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6",
+ "linkOnly": False,
+ "providerId": "oidc",
+ "storeToken": False,
+ "trustEmail": False,
+ },
+ {
+ "addReadTokenRoleOnCreate": False,
+ "alias": "oidc-idp",
+ "authenticateByDefault": False,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "no_log",
+ "issuer": "https://idp.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": True,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6",
+ "linkOnly": False,
+ "providerId": "oidc",
+ "storeToken": False,
+ "trustEmail": False,
+ }
+ ]
+ return_value_mappers_get = [
+ [{
+ 'config': {
+ 'claim': "username",
+ 'syncMode': "INHERIT",
+ 'user.attribute': "username"
+ },
+ "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'name': "username"
+ }, {
+ "config": {
+ "claim": "first_name_changeme",
+ "syncMode": "INHERIT",
+ "user.attribute": "first_name"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "first_name"
+ }],
+ [{
+ 'config': {
+ 'claim': "username",
+ 'syncMode': "INHERIT",
+ 'user.attribute': "username"
+ },
+ "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'name': "username"
+ }, {
+ "config": {
+ "claim": "first_name_changeme",
+ "syncMode": "INHERIT",
+ "user.attribute": "first_name"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "first_name"
+ }],
+ [{
+ 'config': {
+ 'claim': "username",
+ 'syncMode': "INHERIT",
+ 'user.attribute': "username"
+ },
+ "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'name': "username"
+ }, {
+ "config": {
+ "claim": "first_name_changeme",
+ "syncMode": "INHERIT",
+ "user.attribute": "first_name"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "first_name"
+ }],
+ [{
+ 'config': {
+ 'claim': "username",
+ 'syncMode': "INHERIT",
+ 'user.attribute': "username"
+ },
+ "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'name': "username"
+ }, {
+ "config": {
+ "claim": "first_name_changeme",
+ "syncMode": "INHERIT",
+ "user.attribute": "first_name"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "first_name"
+ }],
+ [{
+ 'config': {
+ 'claim': "username",
+ 'syncMode': "INHERIT",
+ 'user.attribute': "username"
+ },
+ "id": "616f11ba-b9ae-42ae-bd1b-bc618741c10b",
+ 'identityProviderAlias': "oidc-idp",
+ 'identityProviderMapper': "oidc-user-attribute-idp-mapper",
+ 'name': "username"
+ }, {
+ "config": {
+ "claim": "first_name",
+ "syncMode": "INHERIT",
+ "user.attribute": "first_name"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "first_name"
+ }, {
+ "config": {
+ "claim": "last_name",
+ "syncMode": "INHERIT",
+ "user.attribute": "last_name"
+ },
+ "id": "f00c61e0-34d9-4bed-82d1-7e45acfefc09",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "last_name"
+ }]
+ ]
+ return_value_idp_updated = [None]
+ return_value_mapper_updated = [None]
+ return_value_mapper_created = [None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get,
+ update_identity_provider=return_value_idp_updated, update_identity_provider_mapper=return_value_mapper_updated,
+ create_identity_provider_mapper=return_value_mapper_created) \
+ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider,
+ mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper,
+ mock_delete_identity_provider_mapper):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_identity_provider.mock_calls), 2)
+ self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 5)
+ self.assertEqual(len(mock_update_identity_provider.mock_calls), 1)
+ self.assertEqual(len(mock_update_identity_provider_mapper.mock_calls), 1)
+ self.assertEqual(len(mock_create_identity_provider_mapper.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_absent(self):
+ """Remove an absent identity provider"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'alias': 'oidc-idp',
+ 'state': 'absent',
+ }
+ return_value_idp_get = [None]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_identity_provider=return_value_idp_get) \
+ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider,
+ mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper,
+ mock_delete_identity_provider_mapper):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_identity_provider.mock_calls), 1)
+ self.assertEqual(len(mock_delete_identity_provider.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_present(self):
+ """Remove an existing identity provider"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'alias': 'oidc-idp',
+ 'state': 'absent',
+ }
+ return_value_idp_get = [
+ {
+ "addReadTokenRoleOnCreate": False,
+ "alias": "oidc-idp",
+ "authenticateByDefault": False,
+ "config": {
+ "authorizationUrl": "https://idp.example.com/auth",
+ "clientAuthMethod": "client_secret_post",
+ "clientId": "my-client",
+ "clientSecret": "no_log",
+ "issuer": "https://idp.example.com",
+ "syncMode": "FORCE",
+ "tokenUrl": "https://idp.example.com/token",
+ "userInfoUrl": "https://idp.example.com/userinfo"
+ },
+ "displayName": "OpenID Connect IdP",
+ "enabled": True,
+ "firstBrokerLoginFlowAlias": "first broker login",
+ "internalId": "7ab437d5-f2bb-4ecc-91a8-315349454da6",
+ "linkOnly": False,
+ "providerId": "oidc",
+ "storeToken": False,
+ "trustEmail": False,
+ },
+ None
+ ]
+ return_value_mappers_get = [
+ [{
+ "config": {
+ "claim": "email",
+ "syncMode": "INHERIT",
+ "user.attribute": "email"
+ },
+ "id": "5fde49bb-93bd-4f5d-97d6-c5d0c1d07aef",
+ "identityProviderAlias": "oidc-idp",
+ "identityProviderMapper": "oidc-user-attribute-idp-mapper",
+ "name": "email"
+ }]
+ ]
+ return_value_idp_deleted = [None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_identity_provider=return_value_idp_get, get_identity_provider_mappers=return_value_mappers_get,
+ delete_identity_provider=return_value_idp_deleted) \
+ as (mock_get_identity_provider, mock_create_identity_provider, mock_update_identity_provider, mock_delete_identity_provider,
+ mock_get_identity_provider_mappers, mock_create_identity_provider_mapper, mock_update_identity_provider_mapper,
+ mock_delete_identity_provider_mapper):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_identity_provider.mock_calls), 1)
+ self.assertEqual(len(mock_get_identity_provider_mappers.mock_calls), 1)
+ self.assertEqual(len(mock_delete_identity_provider.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm.py
new file mode 100644
index 000000000..72993cbdf
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm.py
@@ -0,0 +1,311 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_realm
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_realm_by_id, create_realm=None, update_realm=None, delete_realm=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_realm.KeycloakAPI
+ with patch.object(obj, 'get_realm_by_id', side_effect=get_realm_by_id) as mock_get_realm_by_id:
+ with patch.object(obj, 'create_realm', side_effect=create_realm) as mock_create_realm:
+ with patch.object(obj, 'update_realm', side_effect=update_realm) as mock_update_realm:
+ with patch.object(obj, 'delete_realm', side_effect=delete_realm) as mock_delete_realm:
+ yield mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakRealm(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakRealm, self).setUp()
+ self.module = keycloak_realm
+
+ def test_create_when_absent(self):
+ """Add a new realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ }
+ return_value_absent = [None, {'id': 'realm-name', 'realm': 'realm-name', 'enabled': True}]
+ return_value_created = [{
+ 'code': 201,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ }]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_by_id=return_value_absent, create_realm=return_value_created) \
+ as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2)
+ self.assertEqual(len(mock_create_realm.mock_calls), 1)
+ self.assertEqual(len(mock_update_realm.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_when_present_with_change(self):
+ """Update with change a realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': False
+ }
+ return_value_absent = [
+ {
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ },
+ {
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': False
+ }
+ ]
+ return_value_updated = [{
+ 'code': 201,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': False
+ }]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \
+ as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2)
+ self.assertEqual(len(mock_create_realm.mock_calls), 0)
+ self.assertEqual(len(mock_update_realm.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_when_present_no_change(self):
+ """Update without change a realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ }
+ return_value_absent = [
+ {
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ },
+ {
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ }
+ ]
+ return_value_updated = [{
+ 'code': 201,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True
+ }]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_by_id=return_value_absent, update_realm=return_value_updated) \
+ as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_by_id.mock_calls), 2)
+ self.assertEqual(len(mock_create_realm.mock_calls), 0)
+ self.assertEqual(len(mock_update_realm.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_absent(self):
+ """Remove an absent realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True,
+ 'state': 'absent'
+ }
+ return_value_absent = [None]
+ return_value_deleted = [None]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \
+ as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1)
+ self.assertEqual(len(mock_delete_realm.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_present(self):
+ """Remove a present realm"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'id': 'realm-name',
+ 'realm': 'realm-name',
+ 'enabled': True,
+ 'state': 'absent'
+ }
+ return_value_absent = [
+ {
+ 'id': 'realm-name',
+ 'realm': 'realm-name'
+ }]
+ return_value_deleted = [None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_by_id=return_value_absent, delete_realm=return_value_deleted) \
+ as (mock_get_realm_by_id, mock_create_realm, mock_update_realm, mock_delete_realm):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_by_id.mock_calls), 1)
+ self.assertEqual(len(mock_delete_realm.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm_info.py
new file mode 100644
index 000000000..41095a878
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_realm_info.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_realm_info
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_realm_info_by_id):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_realm_info.KeycloakAPI
+ with patch.object(obj, 'get_realm_info_by_id', side_effect=get_realm_info_by_id) as mock_get_realm_info_by_id:
+ yield mock_get_realm_info_by_id
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakRealmRole(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakRealmRole, self).setUp()
+ self.module = keycloak_realm_info
+
+ def test_get_public_info(self):
+ """Get realm public info"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'realm': 'my-realm',
+ }
+ return_value = [
+ None,
+ {
+ "realm": "my-realm",
+ "public_key": "MIIBIjANBgkqhkiG9w0BAQEF...",
+ "token-service": "https://auth.mock.com/auth/realms/my-realm/protocol/openid-connect",
+ "account-service": "https://auth.mock.com/auth/realms/my-realm/account",
+ "tokens-not-before": 0,
+ }
+ ]
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_info_by_id=return_value) \
+ as (mock_get_realm_info_by_id):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_info_by_id.mock_calls), 1)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_role.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_role.py
new file mode 100644
index 000000000..c48c9771a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_role.py
@@ -0,0 +1,327 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_role
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_realm_role, create_realm_role=None, update_realm_role=None, delete_realm_role=None):
+ """Mock context manager for patching the methods in PwPolicyIPAClient that contact the IPA server
+
+ Patches the `login` and `_post_json` methods
+
+ Keyword arguments are passed to the mock object that patches `_post_json`
+
+ No arguments are passed to the mock object that patches `login` because no tests require it
+
+ Example::
+
+ with patch_ipa(return_value={}) as (mock_login, mock_post):
+ ...
+ """
+
+ obj = keycloak_role.KeycloakAPI
+ with patch.object(obj, 'get_realm_role', side_effect=get_realm_role) as mock_get_realm_role:
+ with patch.object(obj, 'create_realm_role', side_effect=create_realm_role) as mock_create_realm_role:
+ with patch.object(obj, 'update_realm_role', side_effect=update_realm_role) as mock_update_realm_role:
+ with patch.object(obj, 'delete_realm_role', side_effect=delete_realm_role) as mock_delete_realm_role:
+ yield mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakRealmRole(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakRealmRole, self).setUp()
+ self.module = keycloak_role
+
+ def test_create_when_absent(self):
+ """Add a new realm role"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'name': 'role-name',
+ 'description': 'role-description',
+ }
+ return_value_absent = [
+ None,
+ {
+ "attributes": {},
+ "clientRole": False,
+ "composite": False,
+ "containerId": "realm-name",
+ "description": "role-description",
+ "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966",
+ "name": "role-name",
+ }
+ ]
+ return_value_created = [None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_role=return_value_absent, create_realm_role=return_value_created) \
+ as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_role.mock_calls), 2)
+ self.assertEqual(len(mock_create_realm_role.mock_calls), 1)
+ self.assertEqual(len(mock_update_realm_role.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_when_present_with_change(self):
+ """Update with change a realm role"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'name': 'role-name',
+ 'description': 'new-role-description',
+ }
+ return_value_present = [
+ {
+ "attributes": {},
+ "clientRole": False,
+ "composite": False,
+ "containerId": "realm-name",
+ "description": "role-description",
+ "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966",
+ "name": "role-name",
+ },
+ {
+ "attributes": {},
+ "clientRole": False,
+ "composite": False,
+ "containerId": "realm-name",
+ "description": "new-role-description",
+ "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966",
+ "name": "role-name",
+ }
+ ]
+ return_value_updated = [None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \
+ as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_role.mock_calls), 2)
+ self.assertEqual(len(mock_create_realm_role.mock_calls), 0)
+ self.assertEqual(len(mock_update_realm_role.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_when_present_no_change(self):
+ """Update without change a realm role"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'name': 'role-name',
+ 'description': 'role-description',
+ }
+ return_value_present = [
+ {
+ "attributes": {},
+ "clientRole": False,
+ "composite": False,
+ "containerId": "realm-name",
+ "description": "role-description",
+ "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966",
+ "name": "role-name",
+ },
+ {
+ "attributes": {},
+ "clientRole": False,
+ "composite": False,
+ "containerId": "realm-name",
+ "description": "role-description",
+ "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966",
+ "name": "role-name",
+ }
+ ]
+ return_value_updated = [None]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_role=return_value_present, update_realm_role=return_value_updated) \
+ as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_role.mock_calls), 1)
+ self.assertEqual(len(mock_create_realm_role.mock_calls), 0)
+ self.assertEqual(len(mock_update_realm_role.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_absent(self):
+ """Remove an absent realm role"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'name': 'role-name',
+ 'state': 'absent'
+ }
+ return_value_absent = [None]
+ return_value_deleted = [None]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \
+ as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_role.mock_calls), 1)
+ self.assertEqual(len(mock_delete_realm_role.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_present(self):
+ """Remove a present realm role"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_password': 'admin',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_client_id': 'admin-cli',
+ 'validate_certs': True,
+ 'realm': 'realm-name',
+ 'name': 'role-name',
+ 'state': 'absent'
+ }
+ return_value_absent = [
+ {
+ "attributes": {},
+ "clientRole": False,
+ "composite": False,
+ "containerId": "realm-name",
+ "description": "role-description",
+ "id": "90f1cdb6-be88-496e-89c6-da1fb6bc6966",
+ "name": "role-name",
+ }
+ ]
+ return_value_deleted = [None]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_realm_role=return_value_absent, delete_realm_role=return_value_deleted) \
+ as (mock_get_realm_role, mock_create_realm_role, mock_update_realm_role, mock_delete_realm_role):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_realm_role.mock_calls), 1)
+ self.assertEqual(len(mock_delete_realm_role.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_user_federation.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_user_federation.py
new file mode 100644
index 000000000..8d3dcaa23
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_keycloak_user_federation.py
@@ -0,0 +1,582 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from contextlib import contextmanager
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+from ansible_collections.community.general.plugins.modules import keycloak_user_federation
+
+from itertools import count
+
+from ansible.module_utils.six import StringIO
+
+
+@contextmanager
+def patch_keycloak_api(get_components=None, get_component=None, create_component=None, update_component=None, delete_component=None):
+ """Mock context manager for patching the methods in KeycloakAPI
+ """
+
+ obj = keycloak_user_federation.KeycloakAPI
+ with patch.object(obj, 'get_components', side_effect=get_components) \
+ as mock_get_components:
+ with patch.object(obj, 'get_component', side_effect=get_component) \
+ as mock_get_component:
+ with patch.object(obj, 'create_component', side_effect=create_component) \
+ as mock_create_component:
+ with patch.object(obj, 'update_component', side_effect=update_component) \
+ as mock_update_component:
+ with patch.object(obj, 'delete_component', side_effect=delete_component) \
+ as mock_delete_component:
+ yield mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component
+
+
+def get_response(object_with_future_response, method, get_id_call_count):
+ if callable(object_with_future_response):
+ return object_with_future_response()
+ if isinstance(object_with_future_response, dict):
+ return get_response(
+ object_with_future_response[method], method, get_id_call_count)
+ if isinstance(object_with_future_response, list):
+ call_number = next(get_id_call_count)
+ return get_response(
+ object_with_future_response[call_number], method, get_id_call_count)
+ return object_with_future_response
+
+
+def build_mocked_request(get_id_user_count, response_dict):
+ def _mocked_requests(*args, **kwargs):
+ url = args[0]
+ method = kwargs['method']
+ future_response = response_dict.get(url, None)
+ return get_response(future_response, method, get_id_user_count)
+ return _mocked_requests
+
+
+def create_wrapper(text_as_string):
+ """Allow to mock many times a call to one address.
+ Without this function, the StringIO is empty for the second call.
+ """
+ def _create_wrapper():
+ return StringIO(text_as_string)
+ return _create_wrapper
+
+
+def mock_good_connection():
+ token_response = {
+ 'http://keycloak.url/auth/realms/master/protocol/openid-connect/token': create_wrapper('{"access_token": "alongtoken"}'), }
+ return patch(
+ 'ansible_collections.community.general.plugins.module_utils.identity.keycloak.keycloak.open_url',
+ side_effect=build_mocked_request(count(), token_response),
+ autospec=True
+ )
+
+
+class TestKeycloakUserFederation(ModuleTestCase):
+ def setUp(self):
+ super(TestKeycloakUserFederation, self).setUp()
+ self.module = keycloak_user_federation
+
+ def test_create_when_absent(self):
+ """Add a new user federation"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'realm': 'realm-name',
+ 'name': 'kerberos',
+ 'state': 'present',
+ 'provider_id': 'kerberos',
+ 'provider_type': 'org.keycloak.storage.UserStorageProvider',
+ 'config': {
+ 'priority': 0,
+ 'enabled': True,
+ 'cachePolicy': 'DEFAULT',
+ 'kerberosRealm': 'REALM',
+ 'serverPrincipal': 'princ',
+ 'keyTab': 'keytab',
+ 'allowPasswordAuthentication': False,
+ 'updateProfileFirstLogin': False,
+ },
+ }
+ return_value_component_create = [
+ {
+ "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9",
+ "name": "kerberos",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider",
+ "parentId": "kerberos",
+ "config": {
+ "serverPrincipal": [
+ "princ"
+ ],
+ "allowPasswordAuthentication": [
+ "false"
+ ],
+ "keyTab": [
+ "keytab"
+ ],
+ "cachePolicy": [
+ "DEFAULT"
+ ],
+ "updateProfileFirstLogin": [
+ "false"
+ ],
+ "kerberosRealm": [
+ "REALM"
+ ],
+ "priority": [
+ "0"
+ ],
+ "enabled": [
+ "true"
+ ]
+ }
+ }
+ ]
+ return_value_components_get = [
+ [], []
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \
+ as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_components.mock_calls), 1)
+ self.assertEqual(len(mock_get_component.mock_calls), 0)
+ self.assertEqual(len(mock_create_component.mock_calls), 1)
+ self.assertEqual(len(mock_update_component.mock_calls), 0)
+ self.assertEqual(len(mock_delete_component.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_when_present(self):
+ """Update existing user federation"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'realm': 'realm-name',
+ 'name': 'kerberos',
+ 'state': 'present',
+ 'provider_id': 'kerberos',
+ 'provider_type': 'org.keycloak.storage.UserStorageProvider',
+ 'config': {
+ 'priority': 0,
+ 'enabled': True,
+ 'cachePolicy': 'DEFAULT',
+ 'kerberosRealm': 'REALM',
+ 'serverPrincipal': 'princ',
+ 'keyTab': 'keytab',
+ 'allowPasswordAuthentication': False,
+ 'updateProfileFirstLogin': False,
+ },
+ }
+ return_value_components_get = [
+ [
+ {
+ "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9",
+ "name": "kerberos",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider",
+ "parentId": "kerberos",
+ "config": {
+ "serverPrincipal": [
+ "princ"
+ ],
+ "allowPasswordAuthentication": [
+ "false"
+ ],
+ "keyTab": [
+ "keytab"
+ ],
+ "cachePolicy": [
+ "DEFAULT"
+ ],
+ "updateProfileFirstLogin": [
+ "false"
+ ],
+ "kerberosRealm": [
+ "REALM"
+ ],
+ "priority": [
+ "0"
+ ],
+ "enabled": [
+ "false"
+ ]
+ }
+ }
+ ],
+ []
+ ]
+ return_value_component_get = [
+ {
+ "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9",
+ "name": "kerberos",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider",
+ "parentId": "kerberos",
+ "config": {
+ "serverPrincipal": [
+ "princ"
+ ],
+ "allowPasswordAuthentication": [
+ "false"
+ ],
+ "keyTab": [
+ "keytab"
+ ],
+ "cachePolicy": [
+ "DEFAULT"
+ ],
+ "updateProfileFirstLogin": [
+ "false"
+ ],
+ "kerberosRealm": [
+ "REALM"
+ ],
+ "priority": [
+ "0"
+ ],
+ "enabled": [
+ "true"
+ ]
+ }
+ }
+ ]
+ return_value_component_update = [
+ None
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_components=return_value_components_get, get_component=return_value_component_get,
+ update_component=return_value_component_update) \
+ as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_components.mock_calls), 2)
+ self.assertEqual(len(mock_get_component.mock_calls), 1)
+ self.assertEqual(len(mock_create_component.mock_calls), 0)
+ self.assertEqual(len(mock_update_component.mock_calls), 1)
+ self.assertEqual(len(mock_delete_component.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_create_with_mappers(self):
+ """Add a new user federation with mappers"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'realm': 'realm-name',
+ 'name': 'ldap',
+ 'state': 'present',
+ 'provider_id': 'ldap',
+ 'provider_type': 'org.keycloak.storage.UserStorageProvider',
+ 'config': {
+ 'priority': 0,
+ 'enabled': True,
+ 'cachePolicy': 'DEFAULT',
+ 'batchSizeForSync': 1000,
+ 'editMode': 'READ_ONLY',
+ 'importEnabled': True,
+ 'syncRegistrations': False,
+ 'vendor': 'other',
+ 'usernameLDAPAttribute': 'uid',
+ 'rdnLDAPAttribute': 'uid',
+ 'uuidLDAPAttribute': 'entryUUID',
+ 'userObjectClasses': 'inetOrgPerson, organizationalPerson',
+ 'connectionUrl': 'ldaps://ldap.example.com:636',
+ 'usersDn': 'ou=Users,dc=example,dc=com',
+ 'authType': 'none',
+ 'searchScope': 1,
+ 'validatePasswordPolicy': False,
+ 'trustEmail': False,
+ 'useTruststoreSpi': 'ldapsOnly',
+ 'connectionPooling': True,
+ 'pagination': True,
+ 'allowKerberosAuthentication': False,
+ 'debug': False,
+ 'useKerberosForPasswordAuthentication': False,
+ },
+ 'mappers': [
+ {
+ 'name': 'full name',
+ 'providerId': 'full-name-ldap-mapper',
+ 'providerType': 'org.keycloak.storage.ldap.mappers.LDAPStorageMapper',
+ 'config': {
+ 'ldap.full.name.attribute': 'cn',
+ 'read.only': True,
+ 'write.only': False,
+ }
+ }
+ ]
+ }
+ return_value_components_get = [
+ [], []
+ ]
+ return_value_component_create = [
+ {
+ "id": "eb691537-b73c-4cd8-b481-6031c26499d8",
+ "name": "ldap",
+ "providerId": "ldap",
+ "providerType": "org.keycloak.storage.UserStorageProvider",
+ "parentId": "ldap",
+ "config": {
+ "pagination": [
+ "true"
+ ],
+ "connectionPooling": [
+ "true"
+ ],
+ "usersDn": [
+ "ou=Users,dc=example,dc=com"
+ ],
+ "cachePolicy": [
+ "DEFAULT"
+ ],
+ "useKerberosForPasswordAuthentication": [
+ "false"
+ ],
+ "importEnabled": [
+ "true"
+ ],
+ "enabled": [
+ "true"
+ ],
+ "usernameLDAPAttribute": [
+ "uid"
+ ],
+ "vendor": [
+ "other"
+ ],
+ "uuidLDAPAttribute": [
+ "entryUUID"
+ ],
+ "connectionUrl": [
+ "ldaps://ldap.example.com:636"
+ ],
+ "allowKerberosAuthentication": [
+ "false"
+ ],
+ "syncRegistrations": [
+ "false"
+ ],
+ "authType": [
+ "none"
+ ],
+ "debug": [
+ "false"
+ ],
+ "searchScope": [
+ "1"
+ ],
+ "useTruststoreSpi": [
+ "ldapsOnly"
+ ],
+ "trustEmail": [
+ "false"
+ ],
+ "priority": [
+ "0"
+ ],
+ "userObjectClasses": [
+ "inetOrgPerson, organizationalPerson"
+ ],
+ "rdnLDAPAttribute": [
+ "uid"
+ ],
+ "editMode": [
+ "READ_ONLY"
+ ],
+ "validatePasswordPolicy": [
+ "false"
+ ],
+ "batchSizeForSync": [
+ "1000"
+ ]
+ }
+ },
+ {
+ "id": "2dfadafd-8b34-495f-a98b-153e71a22311",
+ "name": "full name",
+ "providerId": "full-name-ldap-mapper",
+ "providerType": "org.keycloak.storage.ldap.mappers.LDAPStorageMapper",
+ "parentId": "eb691537-b73c-4cd8-b481-6031c26499d8",
+ "config": {
+ "ldap.full.name.attribute": [
+ "cn"
+ ],
+ "read.only": [
+ "true"
+ ],
+ "write.only": [
+ "false"
+ ]
+ }
+ }
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_components=return_value_components_get, create_component=return_value_component_create) \
+ as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_components.mock_calls), 2)
+ self.assertEqual(len(mock_get_component.mock_calls), 0)
+ self.assertEqual(len(mock_create_component.mock_calls), 2)
+ self.assertEqual(len(mock_update_component.mock_calls), 0)
+ self.assertEqual(len(mock_delete_component.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_absent(self):
+ """Remove an absent user federation"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'realm': 'realm-name',
+ 'name': 'kerberos',
+ 'state': 'absent',
+ }
+ return_value_components_get = [
+ []
+ ]
+ changed = False
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_components=return_value_components_get) \
+ as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_components.mock_calls), 1)
+ self.assertEqual(len(mock_get_component.mock_calls), 0)
+ self.assertEqual(len(mock_create_component.mock_calls), 0)
+ self.assertEqual(len(mock_update_component.mock_calls), 0)
+ self.assertEqual(len(mock_delete_component.mock_calls), 0)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+ def test_delete_when_present(self):
+ """Remove an existing user federation"""
+
+ module_args = {
+ 'auth_keycloak_url': 'http://keycloak.url/auth',
+ 'auth_realm': 'master',
+ 'auth_username': 'admin',
+ 'auth_password': 'admin',
+ 'realm': 'realm-name',
+ 'name': 'kerberos',
+ 'state': 'absent',
+ }
+ return_value_components_get = [
+ [
+ {
+ "id": "ebb7d999-60cc-4dfe-ab79-48f7bbd9d4d9",
+ "name": "kerberos",
+ "providerId": "kerberos",
+ "providerType": "org.keycloak.storage.UserStorageProvider",
+ "parentId": "kerberos",
+ "config": {
+ "serverPrincipal": [
+ "princ"
+ ],
+ "allowPasswordAuthentication": [
+ "false"
+ ],
+ "keyTab": [
+ "keytab"
+ ],
+ "cachePolicy": [
+ "DEFAULT"
+ ],
+ "updateProfileFirstLogin": [
+ "false"
+ ],
+ "kerberosRealm": [
+ "REALM"
+ ],
+ "priority": [
+ "0"
+ ],
+ "enabled": [
+ "false"
+ ]
+ }
+ }
+ ],
+ []
+ ]
+ return_value_component_delete = [
+ None
+ ]
+ changed = True
+
+ set_module_args(module_args)
+
+ # Run the module
+
+ with mock_good_connection():
+ with patch_keycloak_api(get_components=return_value_components_get, delete_component=return_value_component_delete) \
+ as (mock_get_components, mock_get_component, mock_create_component, mock_update_component, mock_delete_component):
+ with self.assertRaises(AnsibleExitJson) as exec_info:
+ self.module.main()
+
+ self.assertEqual(len(mock_get_components.mock_calls), 2)
+ self.assertEqual(len(mock_get_component.mock_calls), 0)
+ self.assertEqual(len(mock_create_component.mock_calls), 0)
+ self.assertEqual(len(mock_update_component.mock_calls), 0)
+ self.assertEqual(len(mock_delete_component.mock_calls), 1)
+
+ # Verify that the module's changed status matches what is expected
+ self.assertIs(exec_info.exception.args[0]['changed'], changed)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_linode.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_linode.py
new file mode 100644
index 000000000..9e7b158d8
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_linode.py
@@ -0,0 +1,22 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules import linode
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+from .linode_conftest import api_key, auth # noqa: F401, pylint: disable=unused-import
+
+if not linode.HAS_LINODE:
+ pytestmark = pytest.mark.skip('test_linode.py requires the `linode-python` module')
+
+
+def test_name_is_a_required_parameter(api_key, auth):
+ with pytest.raises(SystemExit):
+ set_module_args({})
+ linode.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_linode_v4.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_linode_v4.py
new file mode 100644
index 000000000..915a82f08
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_linode_v4.py
@@ -0,0 +1,379 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+import pytest
+
+linode_apiv4 = pytest.importorskip('linode_api4')
+mandatory_py_version = pytest.mark.skipif(
+ sys.version_info < (2, 7),
+ reason='The linode_api4 dependency requires python2.7 or higher'
+)
+
+from linode_api4.errors import ApiError as LinodeApiError
+from linode_api4 import LinodeClient
+
+from ansible_collections.community.general.plugins.modules import linode_v4
+from ansible_collections.community.general.plugins.module_utils.linode import get_user_agent
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+from ansible_collections.community.general.tests.unit.compat import mock
+
+from .linode_conftest import access_token, no_access_token_in_env, default_args, mock_linode # noqa: F401, pylint: disable=unused-import
+
+
+def test_mandatory_state_is_validated(capfd):
+ with pytest.raises(SystemExit):
+ set_module_args({'label': 'foo'})
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert all(txt in results['msg'] for txt in ('state', 'required'))
+ assert results['failed'] is True
+
+
+def test_mandatory_label_is_validated(capfd):
+ with pytest.raises(SystemExit):
+ set_module_args({'state': 'present'})
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert all(txt in results['msg'] for txt in ('label', 'required'))
+ assert results['failed'] is True
+
+
+def test_mandatory_access_token_is_validated(default_args,
+ no_access_token_in_env,
+ capfd):
+ with pytest.raises(SystemExit):
+ set_module_args(default_args)
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'missing',
+ 'required',
+ 'access_token',
+ ))
+
+
+def test_mandatory_access_token_passed_in_env(default_args,
+ access_token):
+ set_module_args(default_args)
+
+ try:
+ module = linode_v4.initialise_module()
+ except SystemExit:
+ pytest.fail("'access_token' is passed in environment")
+
+ now_set_token = module.params['access_token']
+ assert now_set_token == os.environ['LINODE_ACCESS_TOKEN']
+
+
+def test_mandatory_access_token_passed_in_as_parameter(default_args,
+ no_access_token_in_env):
+ default_args.update({'access_token': 'foo'})
+ set_module_args(default_args)
+
+ try:
+ module = linode_v4.initialise_module()
+ except SystemExit:
+ pytest.fail("'access_token' is passed in as parameter")
+
+ assert module.params['access_token'] == 'foo'
+
+
+def test_instance_by_label_cannot_authenticate(capfd, access_token,
+ default_args):
+ set_module_args(default_args)
+ module = linode_v4.initialise_module()
+ client = LinodeClient(module.params['access_token'])
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, side_effect=LinodeApiError('foo')):
+ with pytest.raises(SystemExit):
+ linode_v4.maybe_instance_from_label(module, client)
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert 'Unable to query the Linode API' in results['msg']
+
+
+def test_no_instances_found_with_label_gives_none(default_args,
+ access_token):
+ set_module_args(default_args)
+ module = linode_v4.initialise_module()
+ client = LinodeClient(module.params['access_token'])
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ result = linode_v4.maybe_instance_from_label(module, client)
+
+ assert result is None
+
+
+def test_optional_region_is_validated(default_args, capfd, access_token):
+ default_args.update({'type': 'foo', 'image': 'bar'})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'required',
+ 'together',
+ 'region'
+ ))
+
+
+def test_optional_type_is_validated(default_args, capfd, access_token):
+ default_args.update({'region': 'foo', 'image': 'bar'})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'required',
+ 'together',
+ 'type'
+ ))
+
+
+def test_optional_image_is_validated(default_args, capfd, access_token):
+ default_args.update({'type': 'foo', 'region': 'bar'})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert all(txt in results['msg'] for txt in (
+ 'required',
+ 'together',
+ 'image'
+ ))
+
+
+@pytest.mark.parametrize('value', [True, False])
+def test_private_ip_valid_values(default_args, access_token, value):
+ default_args.update({'private_ip': value})
+ set_module_args(default_args)
+
+ module = linode_v4.initialise_module()
+
+ assert module.params['private_ip'] is value
+
+
+@pytest.mark.parametrize('value', ['not-a-bool', 42])
+def test_private_ip_invalid_values(default_args, capfd, access_token, value):
+ default_args.update({'private_ip': value})
+ set_module_args(default_args)
+
+ with pytest.raises(SystemExit):
+ linode_v4.initialise_module()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['failed'] is True
+ assert 'not a valid boolean' in results['msg']
+
+
+def test_private_ip_default_value(default_args, access_token):
+ default_args.pop('private_ip', None)
+ set_module_args(default_args)
+
+ module = linode_v4.initialise_module()
+
+ assert module.params['private_ip'] is False
+
+
+def test_private_ip_is_forwarded_to_linode(default_args, mock_linode, access_token):
+ default_args.update({'private_ip': True})
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit):
+ target = 'linode_api4.linode_client.LinodeGroup.instance_create'
+ with mock.patch(target, return_value=(mock_linode, 'passw0rd')) as instance_create_mock:
+ linode_v4.main()
+
+ args, kwargs = instance_create_mock.call_args
+ assert kwargs['private_ip'] is True
+
+
+def test_instance_already_created(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({
+ 'type': 'foo',
+ 'region': 'bar',
+ 'image': 'baz'
+ })
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[mock_linode]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is False
+ assert 'root_password' not in results['instance']
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+
+
+def test_instance_to_be_created_without_root_pass(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({
+ 'type': 'foo',
+ 'region': 'bar',
+ 'image': 'baz'
+ })
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ target = 'linode_api4.linode_client.LinodeGroup.instance_create'
+ with mock.patch(target, return_value=(mock_linode, 'passw0rd')):
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is True
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+ assert results['instance']['root_pass'] == 'passw0rd'
+
+
+def test_instance_to_be_created_with_root_pass(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({
+ 'type': 'foo',
+ 'region': 'bar',
+ 'image': 'baz',
+ 'root_pass': 'passw0rd',
+ })
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ target = 'linode_api4.linode_client.LinodeGroup.instance_create'
+ with mock.patch(target, return_value=mock_linode):
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is True
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+ assert 'root_pass' not in results['instance']
+
+
+def test_instance_to_be_deleted(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({'state': 'absent'})
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[mock_linode]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is True
+ assert (
+ results['instance']['label'] ==
+ mock_linode._raw_json['label']
+ )
+
+
+def test_instance_already_deleted_no_change(default_args,
+ mock_linode,
+ capfd,
+ access_token):
+ default_args.update({'state': 'absent'})
+ set_module_args(default_args)
+
+ target = 'linode_api4.linode_client.LinodeGroup.instances'
+ with mock.patch(target, return_value=[]):
+ with pytest.raises(SystemExit) as sys_exit_exc:
+ linode_v4.main()
+
+ assert sys_exit_exc.value.code == 0
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results['changed'] is False
+ assert results['instance'] == {}
+
+
+def test_user_agent_created_properly():
+ try:
+ from ansible.module_utils.ansible_release import (
+ __version__ as ansible_version
+ )
+ except ImportError:
+ ansible_version = 'unknown'
+
+ expected_user_agent = 'Ansible-linode_v4_module/%s' % ansible_version
+ assert expected_user_agent == get_user_agent('linode_v4_module')
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_cmms.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_cmms.py
new file mode 100644
index 000000000..efbdad062
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_cmms.py
@@ -0,0 +1,101 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.plugins.modules import lxca_cmms
+
+
+@pytest.fixture(scope='module')
+@mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True)
+def setup_module(close_conn):
+ close_conn.return_value = True
+
+
+class TestMyModule():
+ @pytest.mark.parametrize('patch_ansible_module',
+ [
+ {},
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_user": "USERID",
+ },
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_password": "Password",
+ },
+ {
+ "login_user": "USERID",
+ "login_password": "Password",
+ },
+ ],
+ indirect=['patch_ansible_module'])
+ @pytest.mark.usefixtures('patch_ansible_module')
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.execute_module', autospec=True)
+ def test_without_required_parameters(self, _setup_conn, _execute_module,
+ mocker, capfd, setup_module):
+ """Failure must occurs when all parameters are missing"""
+ with pytest.raises(SystemExit):
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = "Fake execution"
+ lxca_cmms.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.execute_module', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.AnsibleModule', autospec=True)
+ def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
+ expected_arguments_spec = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
+ 'cmms_by_chassis_uuid']),
+ auth_url=dict(required=True),
+ uuid=dict(default=None),
+ chassis=dict(default=None),
+ )
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = []
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "command_options": "cmms",
+ }
+ mod_obj.params = args
+ lxca_cmms.main()
+ assert mock.call(argument_spec=expected_arguments_spec,
+ supports_check_mode=False) == ansible_mod_cls.call_args
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms._cmms_by_uuid',
+ autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_cmms.AnsibleModule',
+ autospec=True)
+ def test__cmms_empty_list(self, ansible_mod_cls, _get_cmms, _setup_conn, setup_module):
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "uuid": "3C737AA5E31640CE949B10C129A8B01F",
+ "command_options": "cmms_by_uuid",
+ }
+ mod_obj.params = args
+ _setup_conn.return_value = "Fake connection"
+ empty_nodes_list = []
+ _get_cmms.return_value = empty_nodes_list
+ ret_cmms = _get_cmms(mod_obj, args)
+ assert mock.call(mod_obj, mod_obj.params) == _get_cmms.call_args
+ assert _get_cmms.return_value == ret_cmms
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_nodes.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_nodes.py
new file mode 100644
index 000000000..87effa0c0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_lxca_nodes.py
@@ -0,0 +1,103 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.plugins.modules import lxca_nodes
+
+
+@pytest.fixture(scope='module')
+@mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.close_conn', autospec=True)
+def setup_module(close_conn):
+ close_conn.return_value = True
+
+
+class TestMyModule():
+ @pytest.mark.parametrize('patch_ansible_module',
+ [
+ {},
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_user": "USERID",
+ },
+ {
+ "auth_url": "https://10.240.14.195",
+ "login_password": "Password",
+ },
+ {
+ "login_user": "USERID",
+ "login_password": "Password",
+ },
+ ],
+ indirect=['patch_ansible_module'])
+ @pytest.mark.usefixtures('patch_ansible_module')
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.execute_module', autospec=True)
+ def test_without_required_parameters(self, _setup_conn, _execute_module,
+ mocker, capfd, setup_module):
+ """Failure must occurs when all parameters are missing"""
+ with pytest.raises(SystemExit):
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = "Fake execution"
+ lxca_nodes.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.execute_module', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.AnsibleModule', autospec=True)
+ def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
+ expected_arguments_spec = dict(
+ login_user=dict(required=True),
+ login_password=dict(required=True, no_log=True),
+ command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
+ 'nodes_by_chassis_uuid',
+ 'nodes_status_managed',
+ 'nodes_status_unmanaged']),
+ auth_url=dict(required=True),
+ uuid=dict(default=None),
+ chassis=dict(default=None),
+ )
+ _setup_conn.return_value = "Fake connection"
+ _execute_module.return_value = []
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "command_options": "nodes",
+ }
+ mod_obj.params = args
+ lxca_nodes.main()
+ assert mock.call(argument_spec=expected_arguments_spec,
+ supports_check_mode=False) == ansible_mod_cls.call_args
+
+ @mock.patch('ansible_collections.community.general.plugins.module_utils.remote_management.lxca.common.setup_conn', autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes._nodes_by_uuid',
+ autospec=True)
+ @mock.patch('ansible_collections.community.general.plugins.modules.lxca_nodes.AnsibleModule',
+ autospec=True)
+ def test__nodes_empty_list(self, ansible_mod_cls, _get_nodes, _setup_conn, setup_module):
+ mod_obj = ansible_mod_cls.return_value
+ args = {
+ "auth_url": "https://10.243.30.195",
+ "login_user": "USERID",
+ "login_password": "password",
+ "uuid": "3C737AA5E31640CE949B10C129A8B01F",
+ "command_options": "nodes_by_uuid",
+ }
+ mod_obj.params = args
+ _setup_conn.return_value = "Fake connection"
+ empty_nodes_list = []
+ _get_nodes.return_value = empty_nodes_list
+ ret_nodes = _get_nodes(mod_obj, args)
+ assert mock.call(mod_obj, mod_obj.params) == _get_nodes.call_args
+ assert _get_nodes.return_value == ret_nodes
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_macports.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_macports.py
new file mode 100644
index 000000000..61de27654
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_macports.py
@@ -0,0 +1,35 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import macports
+
+import pytest
+
+TESTED_MODULE = macports.__name__
+
+QUERY_PORT_TEST_CASES = [
+ pytest.param('', False, False, id='Not installed'),
+ pytest.param(' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28', True, False, id='Installed but not active'),
+ pytest.param(' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active)', True, True, id='Installed and active'),
+ pytest.param(''' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28
+ git @2.28.1_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28
+''', True, False, id='2 versions installed, neither active'),
+ pytest.param(''' git @2.29.2_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28 (active)
+ git @2.28.1_0+credential_osxkeychain+diff_highlight+doc+pcre+perl5_28
+''', True, True, id='2 versions installed, one active'),
+]
+
+
+@pytest.mark.parametrize("run_cmd_return_val, present_expected, active_expected", QUERY_PORT_TEST_CASES)
+def test_macports_query_port(mocker, run_cmd_return_val, present_expected, active_expected):
+ module = mocker.Mock()
+ run_command = mocker.Mock()
+ run_command.return_value = (0, run_cmd_return_val, '')
+ module.run_command = run_command
+
+ assert macports.query_port(module, 'port', 'git', state="present") == present_expected
+ assert macports.query_port(module, 'port', 'git', state="active") == active_expected
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_maven_artifact.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_maven_artifact.py
new file mode 100644
index 000000000..7e2557449
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_maven_artifact.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules import maven_artifact
+from ansible.module_utils import basic
+
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+maven_metadata_example = b"""<?xml version="1.0" encoding="UTF-8"?>
+<metadata>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <versioning>
+ <latest>4.13-beta-2</latest>
+ <release>4.13-beta-2</release>
+ <versions>
+ <version>3.7</version>
+ <version>3.8</version>
+ <version>3.8.1</version>
+ <version>3.8.2</version>
+ <version>4.0</version>
+ <version>4.1</version>
+ <version>4.2</version>
+ <version>4.3</version>
+ <version>4.3.1</version>
+ <version>4.4</version>
+ <version>4.5</version>
+ <version>4.6</version>
+ <version>4.7</version>
+ <version>4.8</version>
+ <version>4.8.1</version>
+ <version>4.8.2</version>
+ <version>4.9</version>
+ <version>4.10</version>
+ <version>4.11-beta-1</version>
+ <version>4.11</version>
+ <version>4.12-beta-1</version>
+ <version>4.12-beta-2</version>
+ <version>4.12-beta-3</version>
+ <version>4.12</version>
+ <version>4.13-beta-1</version>
+ <version>4.13-beta-2</version>
+ </versions>
+ <lastUpdated>20190202141051</lastUpdated>
+ </versioning>
+</metadata>
+"""
+
+
+@pytest.mark.parametrize('patch_ansible_module, version_by_spec, version_choosed', [
+ (None, "(,3.9]", "3.8.2"),
+ (None, "3.0", "3.8.2"),
+ (None, "[3.7]", "3.7"),
+ (None, "[4.10, 4.12]", "4.12"),
+ (None, "[4.10, 4.12)", "4.11"),
+ (None, "[2.0,)", "4.13-beta-2"),
+])
+def test_find_version_by_spec(mocker, version_by_spec, version_choosed):
+ _getContent = mocker.patch('ansible_collections.community.general.plugins.modules.maven_artifact.MavenDownloader._getContent')
+ _getContent.return_value = maven_metadata_example
+
+ artifact = maven_artifact.Artifact("junit", "junit", None, version_by_spec, "jar")
+ mvn_downloader = maven_artifact.MavenDownloader(basic.AnsibleModule, "https://repo1.maven.org/maven2")
+
+ assert mvn_downloader.find_version_by_spec(artifact) == version_choosed
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_modprobe.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_modprobe.py
new file mode 100644
index 000000000..18695695a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_modprobe.py
@@ -0,0 +1,485 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat.mock import Mock
+from ansible_collections.community.general.tests.unit.compat.mock import mock_open
+from ansible_collections.community.general.plugins.modules.modprobe import Modprobe, build_module
+
+
+class TestLoadModule(ModuleTestCase):
+ def setUp(self):
+ super(TestLoadModule, self).setUp()
+
+ self.mock_module_loaded = patch(
+ 'ansible_collections.community.general.plugins.modules.modprobe.Modprobe.module_loaded'
+ )
+ self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.module_loaded = self.mock_module_loaded.start()
+ self.run_command = self.mock_run_command.start()
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestLoadModule, self).tearDown()
+ self.mock_module_loaded.stop()
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+
+ def test_load_module_success(self):
+ set_module_args(dict(
+ name='test',
+ state='present',
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+ self.module_loaded.side_effect = [True]
+ self.run_command.side_effect = [(0, '', '')]
+
+ modprobe = Modprobe(module)
+ modprobe.load_module()
+
+ assert modprobe.result == {
+ 'changed': True,
+ 'name': 'test',
+ 'params': '',
+ 'state': 'present',
+ }
+
+ def test_load_module_unchanged(self):
+ set_module_args(dict(
+ name='test',
+ state='present',
+ ))
+
+ module = build_module()
+
+ module.warn = Mock()
+
+ self.get_bin_path.side_effect = ['modprobe']
+ self.module_loaded.side_effect = [False]
+ self.run_command.side_effect = [(0, '', ''), (1, '', '')]
+
+ modprobe = Modprobe(module)
+ modprobe.load_module()
+
+ module.warn.assert_called_once_with('')
+
+
+class TestUnloadModule(ModuleTestCase):
+ def setUp(self):
+ super(TestUnloadModule, self).setUp()
+
+ self.mock_module_loaded = patch(
+ 'ansible_collections.community.general.plugins.modules.modprobe.Modprobe.module_loaded'
+ )
+ self.mock_run_command = patch('ansible.module_utils.basic.AnsibleModule.run_command')
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.module_loaded = self.mock_module_loaded.start()
+ self.run_command = self.mock_run_command.start()
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestUnloadModule, self).tearDown()
+ self.mock_module_loaded.stop()
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+
+ def test_unload_module_success(self):
+ set_module_args(dict(
+ name='test',
+ state='absent',
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+ self.module_loaded.side_effect = [False]
+ self.run_command.side_effect = [(0, '', '')]
+
+ modprobe = Modprobe(module)
+ modprobe.unload_module()
+
+ assert modprobe.result == {
+ 'changed': True,
+ 'name': 'test',
+ 'params': '',
+ 'state': 'absent',
+ }
+
+ def test_unload_module_failure(self):
+ set_module_args(dict(
+ name='test',
+ state='absent',
+ ))
+
+ module = build_module()
+
+ module.fail_json = Mock()
+
+ self.get_bin_path.side_effect = ['modprobe']
+ self.module_loaded.side_effect = [True]
+ self.run_command.side_effect = [(1, '', '')]
+
+ modprobe = Modprobe(module)
+ modprobe.unload_module()
+
+ dummy_result = {
+ 'changed': False,
+ 'name': 'test',
+ 'state': 'absent',
+ 'params': '',
+ }
+
+ module.fail_json.assert_called_once_with(
+ msg='', rc=1, stdout='', stderr='', **dummy_result
+ )
+
+
+class TestModuleIsLoadedPersistently(ModuleTestCase):
+ def setUp(self):
+ if (sys.version_info[0] == 3 and sys.version_info[1] < 7) or (sys.version_info[0] == 2 and sys.version_info[1] < 7):
+ self.skipTest('open_mock doesnt support readline in earlier python versions')
+
+ super(TestModuleIsLoadedPersistently, self).setUp()
+
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestModuleIsLoadedPersistently, self).tearDown()
+
+ self.mock_get_bin_path.stop()
+
+ def test_module_is_loaded(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file:
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'):
+ modprobe.modules_files = ['/etc/modules-load.d/dummy.conf']
+
+ assert modprobe.module_is_loaded_persistently
+
+ mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf')
+
+ def test_module_is_not_loaded_empty_file(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file:
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'):
+ modprobe.modules_files = ['/etc/modules-load.d/dummy.conf']
+
+ assert not modprobe.module_is_loaded_persistently
+
+ mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf')
+
+ def test_module_is_not_loaded_no_files(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'):
+ modprobe.modules_files = []
+
+ assert not modprobe.module_is_loaded_persistently
+
+
+class TestPermanentParams(ModuleTestCase):
+ def setUp(self):
+ if (sys.version_info[0] == 3 and sys.version_info[1] < 7) or (sys.version_info[0] == 2 and sys.version_info[1] < 7):
+ self.skipTest('open_mock doesnt support readline in earlier python versions')
+ super(TestPermanentParams, self).setUp()
+
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestPermanentParams, self).tearDown()
+
+ self.mock_get_bin_path.stop()
+
+ def test_module_permanent_params_exist(self):
+
+ files_content = [
+ 'options dummy numdummies=4\noptions dummy dummy_parameter1=6',
+ 'options dummy dummy_parameter2=5 #Comment\noptions notdummy notdummy_param=5'
+ ]
+ mock_files_content = [mock_open(read_data=content).return_value for content in files_content]
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file:
+ mocked_file.side_effect = mock_files_content
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'):
+ modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf']
+
+ assert modprobe.permanent_params == set(['numdummies=4', 'dummy_parameter1=6', 'dummy_parameter2=5'])
+
+ def test_module_permanent_params_empty(self):
+
+ files_content = [
+ '',
+ ''
+ ]
+ mock_files_content = [mock_open(read_data=content).return_value for content in files_content]
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='')) as mocked_file:
+ mocked_file.side_effect = mock_files_content
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'):
+ modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf', '/etc/modprobe.d/dummy2.conf']
+
+ assert modprobe.permanent_params == set()
+
+
+class TestCreateModuleFIle(ModuleTestCase):
+ def setUp(self):
+ super(TestCreateModuleFIle, self).setUp()
+
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestCreateModuleFIle, self).tearDown()
+
+ self.mock_get_bin_path.stop()
+
+ def test_create_file(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file:
+ modprobe.create_module_file()
+ mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf', 'w')
+ mocked_file().write.assert_called_once_with('dummy\n')
+
+
+class TestCreateModuleOptionsFIle(ModuleTestCase):
+ def setUp(self):
+ super(TestCreateModuleOptionsFIle, self).setUp()
+
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestCreateModuleOptionsFIle, self).tearDown()
+
+ self.mock_get_bin_path.stop()
+
+ def test_create_file(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ params='numdummies=4',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open()) as mocked_file:
+ modprobe.create_module_options_file()
+ mocked_file.assert_called_once_with('/etc/modprobe.d/dummy.conf', 'w')
+ mocked_file().write.assert_called_once_with('options dummy numdummies=4\n')
+
+
+class TestDisableOldParams(ModuleTestCase):
+ def setUp(self):
+ super(TestDisableOldParams, self).setUp()
+
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestDisableOldParams, self).tearDown()
+
+ self.mock_get_bin_path.stop()
+
+ def test_disable_old_params_file_changed(self):
+ mock_data = 'options dummy numdummies=4'
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ params='numdummies=4',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file:
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'):
+ modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf']
+ modprobe.disable_old_params()
+ mocked_file.assert_called_with('/etc/modprobe.d/dummy1.conf', 'w')
+ mocked_file().write.assert_called_once_with('#options dummy numdummies=4')
+
+ def test_disable_old_params_file_unchanged(self):
+ mock_data = 'options notdummy numdummies=4'
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ params='numdummies=4',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data=mock_data)) as mocked_file:
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modprobe_files'):
+ modprobe.modprobe_files = ['/etc/modprobe.d/dummy1.conf']
+ modprobe.disable_old_params()
+ mocked_file.assert_called_once_with('/etc/modprobe.d/dummy1.conf')
+
+
+class TestDisableModulePermanent(ModuleTestCase):
+ def setUp(self):
+ super(TestDisableModulePermanent, self).setUp()
+
+ self.mock_get_bin_path = patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ """Teardown."""
+ super(TestDisableModulePermanent, self).tearDown()
+
+ self.mock_get_bin_path.stop()
+
+ def test_disable_module_permanent_file_changed(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ params='numdummies=4',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='dummy')) as mocked_file:
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'):
+ modprobe.modules_files = ['/etc/modules-load.d/dummy.conf']
+ modprobe.disable_module_permanent()
+ mocked_file.assert_called_with('/etc/modules-load.d/dummy.conf', 'w')
+ mocked_file().write.assert_called_once_with('#dummy')
+
+ def test_disable_module_permanent_file_unchanged(self):
+
+ set_module_args(dict(
+ name='dummy',
+ state='present',
+ params='numdummies=4',
+ persistent='present'
+ ))
+
+ module = build_module()
+
+ self.get_bin_path.side_effect = ['modprobe']
+
+ modprobe = Modprobe(module)
+
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.open', mock_open(read_data='notdummy')) as mocked_file:
+ with patch('ansible_collections.community.general.plugins.modules.modprobe.Modprobe.modules_files'):
+ modprobe.modules_files = ['/etc/modules-load.d/dummy.conf']
+ modprobe.disable_module_permanent()
+ mocked_file.assert_called_once_with('/etc/modules-load.d/dummy.conf')
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_monit.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_monit.py
new file mode 100644
index 000000000..7f8f15dd9
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_monit.py
@@ -0,0 +1,159 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import mock
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules import monit
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+
+
+TEST_OUTPUT = """
+%s '%s'
+ status %s
+ monitoring status Not monitored
+ monitoring mode active
+"""
+
+
+class MonitTest(unittest.TestCase):
+ def setUp(self):
+ self.module = mock.MagicMock()
+ self.module.exit_json.side_effect = AnsibleExitJson
+ self.module.fail_json.side_effect = AnsibleFailJson
+ self.monit = monit.Monit(self.module, 'monit', 'processX', 1)
+ self.monit._status_change_retry_count = 1
+ mock_sleep = mock.patch('time.sleep')
+ mock_sleep.start()
+ self.addCleanup(mock_sleep.stop)
+
+ def patch_status(self, side_effect):
+ if not isinstance(side_effect, list):
+ side_effect = [side_effect]
+ return mock.patch.object(self.monit, 'get_status', side_effect=side_effect)
+
+ def test_change_state_success(self):
+ with self.patch_status([monit.Status.OK, monit.Status.NOT_MONITORED]):
+ with self.assertRaises(AnsibleExitJson):
+ self.monit.stop()
+ self.module.fail_json.assert_not_called()
+ self.module.run_command.assert_called_with(['monit', 'stop', 'processX'], check_rc=True)
+
+ def test_change_state_fail(self):
+ with self.patch_status([monit.Status.OK] * 3):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.stop()
+
+ def test_reload_fail(self):
+ self.module.run_command.return_value = (1, 'stdout', 'stderr')
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.reload()
+
+ def test_reload(self):
+ self.module.run_command.return_value = (0, '', '')
+ with self.patch_status(monit.Status.OK):
+ with self.assertRaises(AnsibleExitJson):
+ self.monit.reload()
+
+ def test_wait_for_status_to_stop_pending(self):
+ status = [
+ monit.Status.MISSING,
+ monit.Status.DOES_NOT_EXIST,
+ monit.Status.INITIALIZING,
+ monit.Status.OK.pending(),
+ monit.Status.OK
+ ]
+ with self.patch_status(status) as get_status:
+ self.monit.wait_for_monit_to_stop_pending()
+ self.assertEqual(get_status.call_count, len(status))
+
+ def test_wait_for_status_change(self):
+ with self.patch_status([monit.Status.NOT_MONITORED, monit.Status.OK]) as get_status:
+ self.monit.wait_for_status_change(monit.Status.NOT_MONITORED)
+ self.assertEqual(get_status.call_count, 2)
+
+ def test_wait_for_status_change_fail(self):
+ with self.patch_status([monit.Status.OK] * 3):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.wait_for_status_change(monit.Status.OK)
+
+ def test_monitor(self):
+ with self.patch_status([monit.Status.NOT_MONITORED, monit.Status.OK.pending(), monit.Status.OK]):
+ with self.assertRaises(AnsibleExitJson):
+ self.monit.monitor()
+
+ def test_monitor_fail(self):
+ with self.patch_status([monit.Status.NOT_MONITORED] * 3):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.monitor()
+
+ def test_timeout(self):
+ self.monit.timeout = 0
+ with self.patch_status(monit.Status.NOT_MONITORED.pending()):
+ with self.assertRaises(AnsibleFailJson):
+ self.monit.wait_for_monit_to_stop_pending()
+
+
+@pytest.mark.parametrize('status_name', monit.StatusValue.ALL_STATUS)
+def test_status_value(status_name):
+ value = getattr(monit.StatusValue, status_name.upper())
+ status = monit.StatusValue(value)
+ assert getattr(status, 'is_%s' % status_name)
+ assert not all(getattr(status, 'is_%s' % name) for name in monit.StatusValue.ALL_STATUS if name != status_name)
+
+
+BASIC_OUTPUT_CASES = [
+ (TEST_OUTPUT % ('Process', 'processX', name), getattr(monit.Status, name.upper()))
+ for name in monit.StatusValue.ALL_STATUS
+]
+
+
+@pytest.mark.parametrize('output, expected', BASIC_OUTPUT_CASES + [
+ ('', monit.Status.MISSING),
+ (TEST_OUTPUT % ('Process', 'processY', 'OK'), monit.Status.MISSING),
+ (TEST_OUTPUT % ('Process', 'processX', 'Not Monitored - start pending'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Monitored - stop pending'), monit.Status.NOT_MONITORED),
+ (TEST_OUTPUT % ('Process', 'processX', 'Monitored - restart pending'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Not Monitored - monitor pending'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Does not exist'), monit.Status.DOES_NOT_EXIST),
+ (TEST_OUTPUT % ('Process', 'processX', 'Not monitored'), monit.Status.NOT_MONITORED),
+ (TEST_OUTPUT % ('Process', 'processX', 'Running'), monit.Status.OK),
+ (TEST_OUTPUT % ('Process', 'processX', 'Execution failed | Does not exist'), monit.Status.EXECUTION_FAILED),
+])
+def test_parse_status(output, expected):
+ status = monit.Monit(None, '', 'processX', 0)._parse_status(output, '')
+ assert status == expected
+
+
+@pytest.mark.parametrize('output, expected', BASIC_OUTPUT_CASES + [
+ (TEST_OUTPUT % ('Process', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('File', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Fifo', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Filesystem', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Directory', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Remote host', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('System', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Program', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Network', 'processX', 'OK'), monit.Status.OK),
+ (TEST_OUTPUT % ('Unsupported', 'processX', 'OK'), monit.Status.MISSING),
+])
+def test_parse_status_supports_all_services(output, expected):
+ status = monit.Monit(None, '', 'processX', 0)._parse_status(output, '')
+ assert status == expected
+
+
+@pytest.mark.parametrize('output, expected', [
+ ('This is monit version 5.18.1', '5.18.1'),
+ ('This is monit version 12.18', '12.18'),
+ ('This is monit version 5.1.12', '5.1.12'),
+])
+def test_parse_version(output, expected):
+ module = mock.MagicMock()
+ module.run_command.return_value = (0, output, '')
+ raw_version, version_tuple = monit.Monit(module, '', 'processX', 0)._get_monit_version()
+ assert raw_version == expected
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_nmcli.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_nmcli.py
new file mode 100644
index 000000000..efd8284a3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_nmcli.py
@@ -0,0 +1,4261 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.common.text.converters import to_text
+from ansible_collections.community.general.plugins.modules import nmcli
+from ansible.module_utils.basic import AnsibleModule
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+TESTCASE_CONNECTION = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'bond',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'bond-slave',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'bridge',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'vlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'vxlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'gre',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'ipip',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'sit',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'dummy',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'gsm',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'wireguard',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'vpn',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'infiniband',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'type': 'macvlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'state': 'absent',
+ '_ansible_check_mode': True,
+ },
+]
+
+TESTCASE_GENERIC = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_GENERIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: generic_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.route-metric: -1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_GENERIC_DIFF_CHECK = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.2',
+ 'route_metric4': -1,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_GENERIC_MODIFY_ROUTING_RULES = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'routing_rules4': ['priority 5 from 10.0.0.0/24 table 5000', 'priority 10 from 10.0.1.0/24 table 5001'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_GENERIC_MODIFY_ROUTING_RULES_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: generic_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.routing-rules: priority 5 from 10.0.0.0/24 table 5000, priority 10 from 10.0.1.0/24 table 5001
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'],
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64',
+ 'next_hop': '2001:beef:cafe:10::2'}],
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+ipv4.method: auto
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: manual
+ipv6.addresses: 2001:beef:cafe:10::1/64
+ipv6.routes: { ip = fd2e:446f:d85d:5::/64, nh = 2001:beef:cafe:10::2 }
+ipv6.route-metric: -1
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'routes4': ['192.168.200.0/24 192.168.1.1'],
+ 'route_metric4': 10,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'routes4_extended': [{'ip': '192.168.200.0/24', 'next_hop': '192.168.1.1'}],
+ 'route_metric4': 10,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 192.168.1.10
+ipv4.routes: { ip = 192.168.200.0/24, nh = 192.168.1.1 }
+ipv4.route-metric: 10
+"""
+
+TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'],
+ 'route_metric6': 10,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'}],
+ 'route_metric6': 10,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+ipv6.method: manual
+ipv6.addresses: 2001:beef:cafe:10::1/64
+ipv6.routes: { ip = fd2e:446f:d85d:5::/64, nh = 2001:beef:cafe:10::2 }
+ipv6.route-metric 10
+"""
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2', 'fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5'],
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'},
+ {'ip': 'fd2e:8890:abcd:25::/64', 'next_hop': '2001:beef:cafe:10::5'}],
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+ipv4.method: auto
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: manual
+ipv6.addresses: 2001:beef:cafe:10::1/64
+ipv6.routes: { ip = fd2e:446f:d85d:5::/64, nh = 2001:beef:cafe:10::2 }; { ip = fd2e:8890:abcd:25::/64, nh = 2001:beef:cafe:10::5 }
+ipv6.route-metric: -1
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'method4': 'disabled',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2'],
+ 'route_metric6': 5,
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'method4': 'disabled',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'}],
+ 'route_metric6': 5,
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+ipv4.method: auto
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: manual
+ipv6.addresses: 2001:beef:cafe:10::1/64
+ipv6.routes: { ip = fd2e:446f:d85d:5::/64, nh = 2001:beef:cafe:10::2 }
+ipv6.route-metric: 5
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'method4': 'disabled',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6': ['fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2', 'fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5'],
+ 'route_metric6': 5,
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'method4': 'disabled',
+ 'ip6': '2001:beef:cafe:10::1/64',
+ 'routes6_extended': [{'ip': 'fd2e:446f:d85d:5::/64', 'next_hop': '2001:beef:cafe:10::2'},
+ {'ip': 'fd2e:8890:abcd:25::/64', 'next_hop': '2001:beef:cafe:10::5'}],
+ 'route_metric6': 5,
+ 'method6': 'manual',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+ipv4.method: auto
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: manual
+ipv6.addresses: 2001:beef:cafe:10::1/64
+ipv6.routes: { ip = fd2e:446f:d85d:5::/64, nh = 2001:beef:cafe:10::2 }; { ip = fd2e:8890:abcd:25::/64, nh = 2001:beef:cafe:10::5 }
+ipv6.route-metric: 5
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_GENERIC_DNS4_SEARCH = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ 'dns4_search': 'search.redhat.com',
+ 'dns6_search': 'search6.redhat.com',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_GENERIC_DNS4_SEARCH_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: generic_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.dns-search: search.redhat.com
+ipv4.may-fail: yes
+ipv6.dns-search: search6.redhat.com
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_GENERIC_ZONE = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ 'zone': 'external',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_GENERIC_ZONE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: generic_non_existant
+connection.autoconnect: yes
+connection.zone: external
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_GENERIC_ZONE_ONLY = [
+ {
+ 'type': 'generic',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'generic_non_existant',
+ 'state': 'present',
+ 'zone': 'public',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BOND = [
+ {
+ 'type': 'bond',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'bond_non_existant',
+ 'mode': 'active-backup',
+ 'xmit_hash_policy': 'layer3+4',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ 'primary': 'non_existent_primary',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BOND_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: bond_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+bond.options: mode=active-backup,primary=non_existent_primary,xmit_hash_policy=layer3+4
+"""
+
+TESTCASE_BRIDGE = [
+ {
+ 'type': 'bridge',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'br0_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'mac': '52:54:00:ab:cd:ef',
+ 'maxage': 100,
+ 'stp': True,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BRIDGE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: br0_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+bridge.mac-address: 52:54:00:AB:CD:EF
+bridge.stp: yes
+bridge.max-age: 100
+bridge.ageing-time: 300
+bridge.hello-time: 2
+bridge.priority: 128
+bridge.forward-delay: 15
+"""
+
+TESTCASE_BRIDGE_SLAVE = [
+ {
+ 'type': 'bridge-slave',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'br0_non_existant',
+ 'path_cost': 100,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: br0_non_existant
+connection.autoconnect: yes
+connection.slave-type: bridge
+ipv4.never-default: no
+bridge-port.path-cost: 100
+bridge-port.hairpin-mode: yes
+bridge-port.priority: 32
+"""
+
+TESTCASE_TEAM = [
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_TEAM_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: team0_non_existant
+connection.autoconnect: yes
+connection.type: team
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+team.runner: roundrobin
+team.runner-fast-rate: no
+"""
+
+TESTCASE_TEAM_HWADDR_POLICY_FAILS = [
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'runner_hwaddr_policy': 'by_active',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_TEAM_RUNNER_FAST_RATE = [
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'runner': 'lacp',
+ 'runner_fast_rate': True,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_TEAM_RUNNER_FAST_RATE_FAILS = [
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'runner_fast_rate': True,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'state': 'present',
+ 'runner_fast_rate': False,
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'state': 'present',
+ 'runner': 'activebackup',
+ 'runner_fast_rate': False,
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'team',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'team0_non_existant',
+ 'state': 'present',
+ 'runner': 'activebackup',
+ 'runner_fast_rate': True,
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_TEAM_RUNNER_FAST_RATE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: team0_non_existant
+connection.autoconnect: yes
+connection.type: team
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+team.runner: lacp
+team.runner-fast-rate: yes
+"""
+
+TESTCASE_TEAM_SLAVE = [
+ {
+ 'type': 'team-slave',
+ 'conn_name': 'non_existent_nw_slaved_device',
+ 'ifname': 'generic_slaved_non_existant',
+ 'master': 'team0_non_existant',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_TEAM_SLAVE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_slaved_device
+connection.interface-name: generic_slaved_non_existant
+connection.autoconnect: yes
+connection.master: team0_non_existant
+connection.slave-type: team
+802-3-ethernet.mtu: auto
+"""
+
+TESTCASE_VLAN = [
+ {
+ 'type': 'vlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'vlan_not_exists',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'vlanid': 10,
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_VLAN_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: vlan_not_exists
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+vlan.id: 10
+802-3-ethernet.mtu: auto
+"""
+
+TESTCASE_VXLAN = [
+ {
+ 'type': 'vxlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'vxlan-existent_nw_device',
+ 'vxlan_id': 11,
+ 'vxlan_local': '192.168.225.5',
+ 'vxlan_remote': '192.168.225.6',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_VXLAN_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: vxlan-existent_nw_device
+connection.autoconnect: yes
+vxlan.id: 11
+vxlan.local: 192.168.225.5
+vxlan.remote: 192.168.225.6
+"""
+
+TESTCASE_GRE = [
+ {
+ 'type': 'gre',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'gre-existent_nw_device',
+ 'ip_tunnel_dev': 'non_existent_gre_device',
+ 'ip_tunnel_local': '192.168.225.5',
+ 'ip_tunnel_remote': '192.168.225.6',
+ 'ip_tunnel_input_key': '1',
+ 'ip_tunnel_output_key': '2',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_GRE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: gre-existent_nw_device
+connection.autoconnect: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ip-tunnel.mode: gre
+ip-tunnel.parent: non_existent_gre_device
+ip-tunnel.local: 192.168.225.5
+ip-tunnel.remote: 192.168.225.6
+ip-tunnel.input-key: 1
+ip-tunnel.output-key: 2
+"""
+
+TESTCASE_IPIP = [
+ {
+ 'type': 'ipip',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ipip-existent_nw_device',
+ 'ip_tunnel_dev': 'non_existent_ipip_device',
+ 'ip_tunnel_local': '192.168.225.5',
+ 'ip_tunnel_remote': '192.168.225.6',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_IPIP_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ipip-existent_nw_device
+connection.autoconnect: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ip-tunnel.mode: ipip
+ip-tunnel.parent: non_existent_ipip_device
+ip-tunnel.local: 192.168.225.5
+ip-tunnel.remote: 192.168.225.6
+"""
+
+TESTCASE_SIT = [
+ {
+ 'type': 'sit',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'sit-existent_nw_device',
+ 'ip_tunnel_dev': 'non_existent_sit_device',
+ 'ip_tunnel_local': '192.168.225.5',
+ 'ip_tunnel_remote': '192.168.225.6',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_SIT_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: sit-existent_nw_device
+connection.autoconnect: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ip-tunnel.mode: sit
+ip-tunnel.parent: non_existent_sit_device
+ip-tunnel.local: 192.168.225.5
+ip-tunnel.remote: 192.168.225.6
+"""
+
+TESTCASE_ETHERNET_DHCP = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'dhcp_client_id': '00:11:22:AA:BB:CC:DD',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv4.method: auto
+ipv4.dhcp-client-id: 00:11:22:AA:BB:CC:DD
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_STATIC = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'dns4': ['1.1.1.1', '8.8.8.8'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.dns: 1.1.1.1,8.8.8.8
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip4': ['10.10.10.10/32', '10.10.20.10/32'],
+ 'gw4': '10.10.10.1',
+ 'dns4': ['1.1.1.1', '8.8.8.8'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip4': ['10.10.10.10', '10.10.20.10'],
+ 'gw4': '10.10.10.1',
+ 'dns4': ['1.1.1.1', '8.8.8.8'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': '2001:db8::cafe/128',
+ 'gw6': '2001:db8::cafa',
+ 'dns6': ['2001:4860:4860::8888'],
+ 'state': 'present',
+ 'ip_privacy6': 'prefer-public-addr',
+ 'addr_gen_mode6': 'eui64',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES = [
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': ['2001:db8::cafe/128', '2002:db8::cafe/128'],
+ 'gw6': '2001:db8::cafa',
+ 'dns6': ['2001:4860:4860::8888', '2001:4860:4860::8844'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+ {
+ 'type': 'ethernet',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'ethernet_non_existant',
+ 'ip6': ['2001:db8::cafe', '2002:db8::cafe'],
+ 'gw6': '2001:db8::cafa',
+ 'dns6': ['2001:4860:4860::8888', '2001:4860:4860::8844'],
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/32, 10.10.20.10/32
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.dns: 1.1.1.1,8.8.8.8
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+"""
+
+TESTCASE_ETHERNET_STATIC_IP6_ADDRESS_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv6.method: manual
+ipv6.addresses: 2001:db8::cafe/128
+ipv6.gateway: 2001:db8::cafa
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ipv6.never-default: no
+ipv6.may-fail: yes
+ipv6.dns: 2001:4860:4860::8888,2001:4860:4860::8844
+ipv4.method: disabled
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+"""
+
+
+TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv6.method: manual
+ipv6.addresses: 2001:db8::cafe/128, 2002:db8::cafe/128
+ipv6.gateway: 2001:db8::cafa
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ipv6.never-default: no
+ipv6.may-fail: yes
+ipv6.dns: 2001:4860:4860::8888,2001:4860:4860::8844
+ipv4.method: disabled
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+"""
+
+TESTCASE_WIRELESS = [
+ {
+ 'type': 'wifi',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'wireless_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'ssid': 'Brittany',
+ 'wifi': {
+ 'hidden': True,
+ 'mode': 'ap',
+ },
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_SECURE_WIRELESS = [
+ {
+ 'type': 'wifi',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'wireless_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'ssid': 'Brittany',
+ 'wifi_sec': {
+ 'key-mgmt': 'wpa-psk',
+ 'psk': 'VERY_SECURE_PASSWORD',
+ },
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT = """\
+802-11-wireless.ssid: --
+802-11-wireless.mode: infrastructure
+802-11-wireless.band: --
+802-11-wireless.channel: 0
+802-11-wireless.bssid: --
+802-11-wireless.rate: 0
+802-11-wireless.tx-power: 0
+802-11-wireless.mac-address: --
+802-11-wireless.cloned-mac-address: --
+802-11-wireless.generate-mac-address-mask:--
+802-11-wireless.mac-address-blacklist: --
+802-11-wireless.mac-address-randomization:default
+802-11-wireless.mtu: auto
+802-11-wireless.seen-bssids: --
+802-11-wireless.hidden: no
+802-11-wireless.powersave: 0 (default)
+802-11-wireless.wake-on-wlan: 0x1 (default)
+802-11-wireless.ap-isolation: -1 (default)
+"""
+
+TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT = \
+ TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT + """\
+802-11-wireless-security.key-mgmt: --
+802-11-wireless-security.wep-tx-keyidx: 0
+802-11-wireless-security.auth-alg: --
+802-11-wireless-security.proto: --
+802-11-wireless-security.pairwise: --
+802-11-wireless-security.group: --
+802-11-wireless-security.pmf: 0 (default)
+802-11-wireless-security.leap-username: --
+802-11-wireless-security.wep-key0: --
+802-11-wireless-security.wep-key1: --
+802-11-wireless-security.wep-key2: --
+802-11-wireless-security.wep-key3: --
+802-11-wireless-security.wep-key-flags: 0 (none)
+802-11-wireless-security.wep-key-type: unknown
+802-11-wireless-security.psk: testingtestingtesting
+802-11-wireless-security.psk-flags: 0 (none)
+802-11-wireless-security.leap-password: --
+802-11-wireless-security.leap-password-flags:0 (none)
+802-11-wireless-security.wps-method: 0x0 (default)
+802-11-wireless-security.fils: 0 (default)
+"""
+
+
+TESTCASE_DUMMY_STATIC = [
+ {
+ 'type': 'dummy',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'dummy_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'dns4': ['1.1.1.1', '8.8.8.8'],
+ 'ip6': '2001:db8::1/128',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_DUMMY_STATIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: dummy_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.dns: 1.1.1.1,8.8.8.8
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ipv6.method: manual
+ipv6.addresses: 2001:db8::1/128
+"""
+
+TESTCASE_DUMMY_STATIC_WITHOUT_MTU_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: dummy_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.dns: 1.1.1.1,8.8.8.8
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ipv6.method: manual
+ipv6.addresses: 2001:db8::1/128
+"""
+
+TESTCASE_DUMMY_STATIC_WITH_CUSTOM_MTU_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: dummy_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: 1500
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.dns: 1.1.1.1,8.8.8.8
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ipv6.method: manual
+ipv6.addresses: 2001:db8::1/128
+"""
+
+TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE_UNCHANGED_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: ethernet_non_existant
+connection.autoconnect: yes
+802-3-ethernet.mtu: auto
+ipv6.method: manual
+ipv6.addresses: 2001:db8::cafe/128
+ipv6.gateway: 2001:db8::cafa
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+ipv6.never-default: no
+ipv6.may-fail: yes
+ipv6.ip6-privacy: 1 (enabled, prefer public IP)
+ipv6.addr-gen-mode: eui64
+ipv6.dns: 2001:4860:4860::8888
+ipv4.method: disabled
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+"""
+
+TESTCASE_GSM = [
+ {
+ 'type': 'gsm',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'gsm_non_existant',
+ 'gsm': {
+ 'apn': 'internet.telekom',
+ 'username': 't-mobile',
+ 'password': 'tm',
+ 'pin': '1234',
+ },
+ 'method4': 'auto',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_GSM_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.type: gsm
+connection.interface-name: gsm_non_existant
+connection.autoconnect: yes
+ipv4.method: auto
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+gsm.auto-config: no
+gsm.number: --
+gsm.username: t-mobile
+gsm.password: tm
+gsm.password-flags: 0 (none)
+gsm.apn: "internet.telekom"
+gsm.network-id: --
+gsm.pin: 1234
+gsm.pin-flags: 0 (none)
+gsm.home-only: no
+gsm.device-id: --
+gsm.sim-id: --
+gsm.sim-operator-id: --
+gsm.mtu: auto
+"""
+
+TESTCASE_WIREGUARD = [
+ {
+ 'type': 'wireguard',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'wg_non_existant',
+ 'wireguard': {
+ 'listen-port': '51820',
+ 'private-key': '<hidden>',
+ },
+ 'method4': 'manual',
+ 'ip4': '10.10.10.10/24',
+ 'method6': 'manual',
+ 'ip6': '2001:db8::1/128',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_WIREGUARD_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.type: wireguard
+connection.interface-name: wg_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv6.method: manual
+ipv6.addresses: 2001:db8::1/128
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+wireguard.private-key: <hidden>
+wireguard.private-key-flags: 0 (none)
+wireguard.listen-port: 51820
+wireguard.fwmark: 0x0
+wireguard.peer-routes: yes
+wireguard.mtu: 0
+wireguard.ip4-auto-default-route: -1 (default)
+wireguard.ip6-auto-default-route: -1 (default)
+"""
+
+TESTCASE_VPN_L2TP = [
+ {
+ 'type': 'vpn',
+ 'conn_name': 'vpn_l2tp',
+ 'vpn': {
+ 'permissions': 'brittany',
+ 'service-type': 'org.freedesktop.NetworkManager.l2tp',
+ 'gateway': 'vpn.example.com',
+ 'password-flags': '2',
+ 'user': 'brittany',
+ 'ipsec-enabled': 'true',
+ 'ipsec-psk': 'QnJpdHRhbnkxMjM=',
+ },
+ 'gw4_ignore_auto': True,
+ 'routes4': ['192.168.200.0/24'],
+ 'autoconnect': 'false',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_VPN_L2TP_SHOW_OUTPUT = """\
+connection.id: vpn_l2tp
+connection.type: vpn
+connection.autoconnect: no
+connection.permissions: brittany
+ipv4.method: auto
+ipv4.routes: { ip = 192.168.200.0/24 }
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+vpn.service-type: org.freedesktop.NetworkManager.l2tp
+vpn.data: gateway = vpn.example.com, ipsec-enabled = true, ipsec-psk = QnJpdHRhbnkxMjM=, password-flags = 2, user = brittany
+vpn.secrets: ipsec-psk = QnJpdHRhbnkxMjM=
+vpn.persistent: no
+vpn.timeout: 0
+"""
+
+TESTCASE_VPN_PPTP = [
+ {
+ 'type': 'vpn',
+ 'conn_name': 'vpn_pptp',
+ 'vpn': {
+ 'permissions': 'brittany',
+ 'service-type': 'org.freedesktop.NetworkManager.pptp',
+ 'gateway': 'vpn.example.com',
+ 'password-flags': '2',
+ 'user': 'brittany',
+ },
+ 'autoconnect': 'false',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_VPN_PPTP_SHOW_OUTPUT = """\
+connection.id: vpn_pptp
+connection.type: vpn
+connection.autoconnect: no
+connection.permissions: brittany
+ipv4.method: auto
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+vpn.service-type: org.freedesktop.NetworkManager.pptp
+vpn.data: gateway=vpn.example.com, password-flags=2, user=brittany
+"""
+
+TESTCASE_INFINIBAND_STATIC = [
+ {
+ 'type': 'infiniband',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'infiniband_non_existant',
+ 'ip4': '10.10.10.10/24',
+ 'gw4': '10.10.10.1',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_INFINIBAND_STATIC_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.type: infiniband
+connection.interface-name: infiniband_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.gateway: 10.10.10.1
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv6.method: auto
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+infiniband.transport-mode datagram
+"""
+
+TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE = [
+ {
+
+ 'type': 'infiniband',
+ 'conn_name': 'non_existent_nw_device',
+ 'transport_mode': 'connected',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ },
+]
+
+TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.interface-name: infiniband_non_existant
+infiniband.transport_mode: connected
+"""
+
+TESTCASE_MACVLAN = [
+ {
+ 'type': 'macvlan',
+ 'conn_name': 'non_existent_nw_device',
+ 'ifname': 'macvlan_non_existant',
+ 'macvlan': {
+ 'mode': '2',
+ 'parent': 'non_existent_parent',
+ },
+ 'method4': 'manual',
+ 'ip4': '10.10.10.10/24',
+ 'method6': 'manual',
+ 'ip6': '2001:db8::1/128',
+ 'state': 'present',
+ '_ansible_check_mode': False,
+ }
+]
+
+TESTCASE_MACVLAN_SHOW_OUTPUT = """\
+connection.id: non_existent_nw_device
+connection.type: macvlan
+connection.interface-name: macvlan_non_existant
+connection.autoconnect: yes
+ipv4.method: manual
+ipv4.addresses: 10.10.10.10/24
+ipv4.never-default: no
+ipv4.may-fail: yes
+ipv4.ignore-auto-dns: no
+ipv4.ignore-auto-routes: no
+ipv6.method: manual
+ipv6.addresses: 2001:db8::1/128
+ipv6.ignore-auto-dns: no
+ipv6.ignore-auto-routes: no
+macvlan.parent: non_existent_parent
+macvlan.mode: 2 (bridge)
+macvlan.promiscuous: yes
+macvlan.tap: no
+"""
+
+
+def mocker_set(mocker,
+ connection_exists=False,
+ execute_return=(0, "", ""),
+ execute_side_effect=None,
+ changed_return=None):
+ """
+ Common mocker object
+ """
+ get_bin_path = mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path')
+ get_bin_path.return_value = '/usr/bin/nmcli'
+ connection = mocker.patch.object(nmcli.Nmcli, 'connection_exists')
+ connection.return_value = connection_exists
+ execute_command = mocker.patch.object(nmcli.Nmcli, 'execute_command')
+ if execute_return:
+ execute_command.return_value = execute_return
+ if execute_side_effect:
+ execute_command.side_effect = execute_side_effect
+ if changed_return:
+ is_connection_changed = mocker.patch.object(nmcli.Nmcli, 'is_connection_changed')
+ is_connection_changed.return_value = changed_return
+
+
+@pytest.fixture
+def mocked_generic_connection_create(mocker):
+ mocker_set(mocker)
+
+
+@pytest.fixture
+def mocked_connection_exists(mocker):
+ mocker_set(mocker, connection_exists=True)
+
+
+@pytest.fixture
+def mocked_generic_connection_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ changed_return=(True, dict()))
+
+
+@pytest.fixture
+def mocked_generic_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_generic_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_MODIFY_ROUTING_RULES_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_generic_connection_dns_search_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_DNS4_SEARCH_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_generic_connection_zone_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_ZONE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_bond_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_BOND_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_bridge_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_BRIDGE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_bridge_slave_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_BRIDGE_SLAVE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_team_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_TEAM_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_team_runner_fast_rate_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_TEAM_RUNNER_FAST_RATE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_team_slave_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_TEAM_SLAVE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_vlan_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_VLAN_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_vxlan_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_VXLAN_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_gre_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GRE_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ipip_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_IPIP_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_sit_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_SIT_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_DHCP, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_dhcp_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_static_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_static_multiple_ip4_addresses_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_static_ip6_privacy_and_addr_gen_mode_unchange(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE_UNCHANGED_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_static_multiple_ip6_addresses_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_static_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_STATIC_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv6_static_address_static_route_create(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_create(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_with_metric_create(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_with_ipv6_address_static_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_STATIC_IP6_ADDRESS_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_ethernet_connection_dhcp_to_static(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_ETHERNET_DHCP_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_wireless_create(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_DEFAULT_WIRELESS_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_secure_wireless_create(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_secure_wireless_create_failure(mocker):
+ mocker_set(mocker,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""),
+ (1, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_secure_wireless_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ (0, "", ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_secure_wireless_modify_failure(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_DEFAULT_SECURE_WIRELESS_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ (1, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_dummy_connection_static_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_DUMMY_STATIC_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_dummy_connection_static_without_mtu_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_DUMMY_STATIC_WITHOUT_MTU_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_dummy_connection_static_with_custom_mtu_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_DUMMY_STATIC_WITH_CUSTOM_MTU_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_gsm_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GSM_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_wireguard_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_WIREGUARD_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_vpn_l2tp_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_VPN_L2TP_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_vpn_pptp_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_VPN_PPTP_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_infiniband_connection_static_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_INFINIBAND_STATIC_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_infiniband_connection_static_transport_mode_connected_modify(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=None,
+ execute_side_effect=(
+ (0, TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE_SHOW_OUTPUT, ""),
+ (0, "", ""),
+ ))
+
+
+@pytest.fixture
+def mocked_macvlan_connection_unchanged(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_MACVLAN_SHOW_OUTPUT, ""))
+
+
+@pytest.fixture
+def mocked_generic_connection_diff_check(mocker):
+ mocker_set(mocker,
+ connection_exists=True,
+ execute_return=(0, TESTCASE_GENERIC_SHOW_OUTPUT, ""))
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module'])
+def test_bond_connection_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Bond connection created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'bond'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ for param in ['ipv4.gateway', 'primary', 'connection.autoconnect',
+ 'connection.interface-name', 'bond_non_existant',
+ 'mode', 'active-backup', 'ipv4.addresses',
+ '+bond.options', 'xmit_hash_policy=layer3+4']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BOND, indirect=['patch_ansible_module'])
+def test_bond_connection_unchanged(mocked_bond_connection_unchanged, capfd):
+ """
+ Test : Bond connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
+def test_generic_connection_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'generic'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ for param in ['connection.autoconnect', 'ipv4.gateway', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
+def test_generic_connection_modify(mocked_generic_connection_modify, capfd):
+ """
+ Test : Generic connection modify
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ for param in ['ipv4.gateway', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC, indirect=['patch_ansible_module'])
+def test_generic_connection_unchanged(mocked_generic_connection_unchanged, capfd):
+ """
+ Test : Generic connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_MODIFY_ROUTING_RULES, indirect=['patch_ansible_module'])
+def test_generic_connection_modify_routing_rules4(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection modified with routing-rules4
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.routing-rules' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
+def test_generic_connection_create_dns_search(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection created with dns search
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.dns-search' in args[0]
+ assert 'ipv6.dns-search' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
+def test_generic_connection_modify_dns_search(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection modified with dns search
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.dns-search' in args[0]
+ assert 'ipv6.dns-search' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DNS4_SEARCH, indirect=['patch_ansible_module'])
+def test_generic_connection_dns_search_unchanged(mocked_generic_connection_dns_search_unchanged, capfd):
+ """
+ Test : Generic connection with dns search unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_CONNECTION, indirect=['patch_ansible_module'])
+def test_dns4_none(mocked_connection_exists, capfd):
+ """
+ Test if DNS4 param is None
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE, indirect=['patch_ansible_module'])
+def test_generic_connection_create_zone(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection created with zone
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'connection.zone' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE, indirect=['patch_ansible_module'])
+def test_generic_connection_modify_zone(mocked_generic_connection_create, capfd):
+ """
+ Test : Generic connection modified with zone
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'connection.zone' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE, indirect=['patch_ansible_module'])
+def test_generic_connection_zone_unchanged(mocked_generic_connection_zone_unchanged, capfd):
+ """
+ Test : Generic connection with zone unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_ZONE_ONLY, indirect=['patch_ansible_module'])
+def test_generic_connection_modify_zone_only(mocked_generic_connection_modify, capfd):
+ """
+ Test : Generic connection modified with zone only
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'connection.zone' in args[0]
+ assert 'ipv4.addresses' not in args[0]
+ assert 'ipv4.gateway' not in args[0]
+ assert 'ipv6.addresses' not in args[0]
+ assert 'ipv6.gateway' not in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_CONNECTION, indirect=['patch_ansible_module'])
+def test_zone_none(mocked_connection_exists, capfd):
+ """
+ Test if zone param is None
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
+def test_create_bridge(mocked_generic_connection_create, capfd):
+ """
+ Test if Bridge created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'bridge'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
+def test_mod_bridge(mocked_generic_connection_modify, capfd):
+ """
+ Test if Bridge modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'bridge.max-age', '100', 'bridge.stp', 'yes']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE, indirect=['patch_ansible_module'])
+def test_bridge_connection_unchanged(mocked_bridge_connection_unchanged, capfd):
+ """
+ Test : Bridge connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
+def test_create_bridge_slave(mocked_generic_connection_create, capfd):
+ """
+ Test if Bridge_slave created
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'bridge-slave'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['bridge-port.path-cost', '100']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
+def test_mod_bridge_slave(mocked_generic_connection_modify, capfd):
+ """
+ Test if Bridge_slave modified
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['bridge-port.path-cost', '100']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_BRIDGE_SLAVE, indirect=['patch_ansible_module'])
+def test_bridge_slave_unchanged(mocked_bridge_slave_unchanged, capfd):
+ """
+ Test : Bridge-slave connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module'])
+def test_team_connection_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Team connection created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'team'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ for param in ['connection.autoconnect', 'connection.interface-name', 'team0_non_existant']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM, indirect=['patch_ansible_module'])
+def test_team_connection_unchanged(mocked_team_connection_unchanged, capfd):
+ """
+ Test : Team connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_HWADDR_POLICY_FAILS, indirect=['patch_ansible_module'])
+def test_team_connection_create_hwaddr_policy_fails(mocked_generic_connection_create, capfd):
+ """
+ Test : Team connection created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get('failed')
+ assert results['msg'] == "Runner-hwaddr-policy is only allowed for runner activebackup"
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_RUNNER_FAST_RATE, indirect=['patch_ansible_module'])
+def test_team_runner_fast_rate_connection_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Team connection created with runner_fast_rate parameter
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'team'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ for param in ['connection.autoconnect', 'connection.interface-name', 'team0_non_existant', 'team.runner', 'lacp', 'team.runner-fast-rate', 'yes']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_RUNNER_FAST_RATE, indirect=['patch_ansible_module'])
+def test_team_runner_fast_rate_connection_unchanged(mocked_team_runner_fast_rate_connection_unchanged, capfd):
+ """
+ Test : Team connection unchanged with runner_fast_rate parameter
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_RUNNER_FAST_RATE_FAILS, indirect=['patch_ansible_module'])
+def test_team_connection_create_runner_fast_rate_fails(mocked_generic_connection_create, capfd):
+ """
+ Test : Team connection with runner_fast_rate enabled
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get('failed')
+ assert results['msg'] == "runner-fast-rate is only allowed for runner lacp"
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module'])
+def test_create_team_slave(mocked_generic_connection_create, capfd):
+ """
+ Test if Team_slave created
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'team-slave'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_slaved_device'
+
+ for param in ['connection.autoconnect', 'connection.interface-name', 'connection.master', 'team0_non_existant', 'connection.slave-type']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_TEAM_SLAVE, indirect=['patch_ansible_module'])
+def test_team_slave_connection_unchanged(mocked_team_slave_connection_unchanged, capfd):
+ """
+ Test : Team slave connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
+def test_create_vlan_con(mocked_generic_connection_create, capfd):
+ """
+ Test if VLAN created
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'vlan'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'vlan.id', '10']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
+def test_mod_vlan_conn(mocked_generic_connection_modify, capfd):
+ """
+ Test if VLAN modified
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ipv4.addresses', '10.10.10.10/24', 'ipv4.gateway', '10.10.10.1', 'vlan.id', '10']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VLAN, indirect=['patch_ansible_module'])
+def test_vlan_connection_unchanged(mocked_vlan_connection_unchanged, capfd):
+ """
+ Test : VLAN connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module'])
+def test_create_vxlan(mocked_generic_connection_create, capfd):
+ """
+ Test if vxlan created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'vxlan'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'vxlan-existent_nw_device',
+ 'vxlan.local', '192.168.225.5', 'vxlan.remote', '192.168.225.6', 'vxlan.id', '11']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module'])
+def test_vxlan_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if vxlan modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['vxlan.local', '192.168.225.5', 'vxlan.remote', '192.168.225.6', 'vxlan.id', '11']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VXLAN, indirect=['patch_ansible_module'])
+def test_vxlan_connection_unchanged(mocked_vxlan_connection_unchanged, capfd):
+ """
+ Test : VxLAN connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module'])
+def test_create_ipip(mocked_generic_connection_create, capfd):
+ """
+ Test if ipip created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'ip-tunnel'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'ipip-existent_nw_device',
+ 'ip-tunnel.local', '192.168.225.5',
+ 'ip-tunnel.mode', 'ipip',
+ 'ip-tunnel.parent', 'non_existent_ipip_device',
+ 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module'])
+def test_ipip_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if ipip modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_IPIP, indirect=['patch_ansible_module'])
+def test_ipip_connection_unchanged(mocked_ipip_connection_unchanged, capfd):
+ """
+ Test : IPIP connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module'])
+def test_create_sit(mocked_generic_connection_create, capfd):
+ """
+ Test if sit created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'ip-tunnel'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'sit-existent_nw_device',
+ 'ip-tunnel.local', '192.168.225.5',
+ 'ip-tunnel.mode', 'sit',
+ 'ip-tunnel.parent', 'non_existent_sit_device',
+ 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module'])
+def test_sit_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if sit modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SIT, indirect=['patch_ansible_module'])
+def test_sit_connection_unchanged(mocked_sit_connection_unchanged, capfd):
+ """
+ Test : SIT connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module'])
+def test_eth_dhcp_client_id_con_create(mocked_generic_connection_create, capfd):
+ """
+ Test : Ethernet connection created with DHCP_CLIENT_ID
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert 'ipv4.dhcp-client-id' in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module'])
+def test_create_gre(mocked_generic_connection_create, capfd):
+ """
+ Test if gre created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'ip-tunnel'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'gre-existent_nw_device',
+ 'ip-tunnel.local', '192.168.225.5',
+ 'ip-tunnel.mode', 'gre',
+ 'ip-tunnel.parent', 'non_existent_gre_device',
+ 'ip-tunnel.remote', '192.168.225.6',
+ 'ip-tunnel.input-key', '1',
+ 'ip-tunnel.output-key', '2']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module'])
+def test_gre_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if gre modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['ip-tunnel.local', '192.168.225.5', 'ip-tunnel.remote', '192.168.225.6']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GRE, indirect=['patch_ansible_module'])
+def test_gre_connection_unchanged(mocked_gre_connection_unchanged, capfd):
+ """
+ Test : GRE connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_DHCP, indirect=['patch_ansible_module'])
+def test_ethernet_connection_dhcp_unchanged(mocked_ethernet_connection_dhcp_unchanged, capfd):
+ """
+ Test : Ethernet connection with DHCP_CLIENT_ID unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module'])
+def test_modify_ethernet_dhcp_to_static(mocked_ethernet_connection_dhcp_to_static, capfd):
+ """
+ Test : Modify ethernet connection from DHCP to static
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[1]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ for param in ['ipv4.method', 'ipv4.gateway', 'ipv4.addresses']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module'])
+def test_create_ethernet_static(mocked_generic_connection_create, capfd):
+ """
+ Test : Create ethernet connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ 'ipv4.gateway', '10.10.10.1',
+ 'ipv4.dns', '1.1.1.1,8.8.8.8']:
+ assert param in add_args_text
+
+ up_args, up_kw = arg_list[1]
+ assert up_args[0][0] == '/usr/bin/nmcli'
+ assert up_args[0][1] == 'con'
+ assert up_args[0][2] == 'up'
+ assert up_args[0][3] == 'non_existent_nw_device'
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_unchanged(mocked_ethernet_connection_static_unchanged, capfd):
+ """
+ Test : Ethernet connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV4_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_ipv4_address_static_route_with_metric_modify(
+ mocked_ethernet_connection_with_ipv4_static_address_static_route_metric_modify, capfd):
+ """
+ Test : Modify ethernet connection with static IPv4 address and static route
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[1]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'modify'
+ assert add_args[0][3] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['ipv4.routes', '192.168.200.0/24 192.168.1.1',
+ 'ipv4.route-metric', '10']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results.get('changed') is True
+ assert not results.get('failed')
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_ipv6_address_static_route_create(mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd):
+ """
+ Test : Create ethernet connection with static IPv6 address and static route
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'con-name', 'non_existent_nw_device',
+ 'ipv6.addresses', '2001:beef:cafe:10::1/64',
+ 'ipv6.method', 'manual',
+ 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_MOD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_ipv6_address_static_route_metric_modify(
+ mocked_ethernet_connection_with_ipv6_static_address_static_route_metric_modify, capfd):
+ """
+ Test : Modify ethernet connection with static IPv6 address and static route
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[1]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'modify'
+ assert add_args[0][3] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2',
+ 'ipv6.route-metric', '10']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results.get('changed') is True
+ assert not results.get('failed')
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_ipv6_address_multiple_static_routes_with_metric_create(
+ mocked_ethernet_connection_with_ipv6_static_address_multiple_static_routes_with_metric_create, capfd):
+ """
+ Test : Create ethernet connection with static IPv6 address and multiple static routes
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'con-name', 'non_existent_nw_device',
+ 'ipv6.addresses', '2001:beef:cafe:10::1/64',
+ 'ipv6.method', 'manual',
+ 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2,fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_ROUTE_AND_METRIC, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_ipv6_address_static_route_with_metric_create(
+ mocked_ethernet_connection_with_ipv6_static_address_static_route_with_metric_create, capfd):
+ """
+ Test : Create ethernet connection with static IPv6 address and static route with metric
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'con-name', 'non_existent_nw_device',
+ 'ipv6.addresses', '2001:beef:cafe:10::1/64',
+ 'ipv6.method', 'manual',
+ 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2',
+ 'ipv6.route-metric', '5']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_ADD_IPV6_INT_WITH_MULTIPLE_ROUTES_AND_METRIC, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_ipv6_address_static_route_create(mocked_ethernet_connection_with_ipv6_static_address_static_route_create, capfd):
+ """
+ Test : Create ethernet connection with static IPv6 address and multiple static routes with metric
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'con-name', 'non_existent_nw_device',
+ 'ipv6.addresses', '2001:beef:cafe:10::1/64',
+ 'ipv6.method', 'manual',
+ 'ipv6.routes', 'fd2e:446f:d85d:5::/64 2001:beef:cafe:10::2,fd2e:8890:abcd:25::/64 2001:beef:cafe:10::5',
+ 'ipv6.route-metric', '5']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIRELESS, indirect=['patch_ansible_module'])
+def test_create_wireless(mocked_wireless_create, capfd):
+ """
+ Test : Create wireless connection
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+
+ get_available_options_args, get_available_options_kw = arg_list[0]
+ assert get_available_options_args[0][0] == '/usr/bin/nmcli'
+ assert get_available_options_args[0][1] == 'con'
+ assert get_available_options_args[0][2] == 'edit'
+ assert get_available_options_args[0][3] == 'type'
+ assert get_available_options_args[0][4] == 'wifi'
+
+ get_available_options_data = get_available_options_kw['data'].split()
+ for param in ['print', '802-11-wireless',
+ 'quit', 'yes']:
+ assert param in get_available_options_data
+
+ add_args, add_kw = arg_list[1]
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'wifi'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'wireless_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ '802-11-wireless.ssid', 'Brittany',
+ '802-11-wireless.mode', 'ap',
+ '802-11-wireless.hidden', 'yes']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module'])
+def test_create_secure_wireless(mocked_secure_wireless_create, capfd):
+ """
+ Test : Create secure wireless connection
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 3
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+
+ get_available_options_args, get_available_options_kw = arg_list[0]
+ assert get_available_options_args[0][0] == '/usr/bin/nmcli'
+ assert get_available_options_args[0][1] == 'con'
+ assert get_available_options_args[0][2] == 'edit'
+ assert get_available_options_args[0][3] == 'type'
+ assert get_available_options_args[0][4] == 'wifi'
+
+ get_available_options_data = get_available_options_kw['data'].split()
+ for param in ['print', '802-11-wireless-security',
+ 'quit', 'yes']:
+ assert param in get_available_options_data
+
+ add_args, add_kw = arg_list[1]
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'wifi'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'wireless_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ '802-11-wireless.ssid', 'Brittany',
+ '802-11-wireless-security.key-mgmt', 'wpa-psk']:
+ assert param in add_args_text
+
+ edit_args, edit_kw = arg_list[2]
+ assert edit_args[0][0] == '/usr/bin/nmcli'
+ assert edit_args[0][1] == 'con'
+ assert edit_args[0][2] == 'edit'
+ assert edit_args[0][3] == 'non_existent_nw_device'
+
+ edit_kw_data = edit_kw['data'].split()
+ for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD',
+ 'save',
+ 'quit']:
+ assert param in edit_kw_data
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module'])
+def test_create_secure_wireless_failure(mocked_secure_wireless_create_failure, capfd):
+ """
+ Test : Create secure wireless connection w/failure
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+
+ get_available_options_args, get_available_options_kw = arg_list[0]
+ assert get_available_options_args[0][0] == '/usr/bin/nmcli'
+ assert get_available_options_args[0][1] == 'con'
+ assert get_available_options_args[0][2] == 'edit'
+ assert get_available_options_args[0][3] == 'type'
+ assert get_available_options_args[0][4] == 'wifi'
+
+ get_available_options_data = get_available_options_kw['data'].split()
+ for param in ['print', '802-11-wireless-security',
+ 'quit', 'yes']:
+ assert param in get_available_options_data
+
+ add_args, add_kw = arg_list[1]
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'wifi'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'wireless_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ '802-11-wireless.ssid', 'Brittany',
+ '802-11-wireless-security.key-mgmt', 'wpa-psk']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get('failed')
+ assert 'changed' not in results
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module'])
+def test_modify_secure_wireless(mocked_secure_wireless_modify, capfd):
+ """
+ Test : Modify secure wireless connection
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+ assert nmcli.Nmcli.execute_command.call_count == 4
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+
+ get_available_options_args, get_available_options_kw = arg_list[0]
+ assert get_available_options_args[0][0] == '/usr/bin/nmcli'
+ assert get_available_options_args[0][1] == 'con'
+ assert get_available_options_args[0][2] == 'edit'
+ assert get_available_options_args[0][3] == 'type'
+ assert get_available_options_args[0][4] == 'wifi'
+
+ get_available_options_data = get_available_options_kw['data'].split()
+ for param in ['print', '802-11-wireless-security',
+ 'quit', 'yes']:
+ assert param in get_available_options_data
+
+ show_args, show_kw = arg_list[1]
+ assert show_args[0][0] == '/usr/bin/nmcli'
+ assert show_args[0][1] == '--show-secrets'
+ assert show_args[0][2] == 'con'
+ assert show_args[0][3] == 'show'
+ assert show_args[0][4] == 'non_existent_nw_device'
+
+ add_args, add_kw = arg_list[2]
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'modify'
+ assert add_args[0][3] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'wireless_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ '802-11-wireless.ssid', 'Brittany',
+ '802-11-wireless-security.key-mgmt', 'wpa-psk']:
+ assert param in add_args_text
+
+ edit_args, edit_kw = arg_list[3]
+ assert edit_args[0][0] == '/usr/bin/nmcli'
+ assert edit_args[0][1] == 'con'
+ assert edit_args[0][2] == 'edit'
+ assert edit_args[0][3] == 'non_existent_nw_device'
+
+ edit_kw_data = edit_kw['data'].split()
+ for param in ['802-11-wireless-security.psk', 'VERY_SECURE_PASSWORD',
+ 'save',
+ 'quit']:
+ assert param in edit_kw_data
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_SECURE_WIRELESS, indirect=['patch_ansible_module'])
+def test_modify_secure_wireless_failure(mocked_secure_wireless_modify_failure, capfd):
+ """
+ Test : Modify secure wireless connection w/failure
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 3
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+
+ get_available_options_args, get_available_options_kw = arg_list[0]
+ assert get_available_options_args[0][0] == '/usr/bin/nmcli'
+ assert get_available_options_args[0][1] == 'con'
+ assert get_available_options_args[0][2] == 'edit'
+ assert get_available_options_args[0][3] == 'type'
+ assert get_available_options_args[0][4] == 'wifi'
+
+ get_available_options_data = get_available_options_kw['data'].split()
+ for param in ['print', '802-11-wireless-security',
+ 'quit', 'yes']:
+ assert param in get_available_options_data
+
+ show_args, show_kw = arg_list[1]
+ assert show_args[0][0] == '/usr/bin/nmcli'
+ assert show_args[0][1] == '--show-secrets'
+ assert show_args[0][2] == 'con'
+ assert show_args[0][3] == 'show'
+ assert show_args[0][4] == 'non_existent_nw_device'
+
+ add_args, add_kw = arg_list[2]
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'modify'
+ assert add_args[0][3] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'wireless_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ '802-11-wireless.ssid', 'Brittany',
+ '802-11-wireless-security.key-mgmt', 'wpa-psk']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get('failed')
+ assert 'changed' not in results
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module'])
+def test_create_dummy_static(mocked_generic_connection_create, capfd):
+ """
+ Test : Create dummy connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'dummy'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'dummy_non_existant',
+ 'ipv4.addresses', '10.10.10.10/24',
+ 'ipv4.gateway', '10.10.10.1',
+ 'ipv4.dns', '1.1.1.1,8.8.8.8',
+ 'ipv6.addresses', '2001:db8::1/128']:
+ assert param in add_args_text
+
+ up_args, up_kw = arg_list[1]
+ assert up_args[0][0] == '/usr/bin/nmcli'
+ assert up_args[0][1] == 'con'
+ assert up_args[0][2] == 'up'
+ assert up_args[0][3] == 'non_existent_nw_device'
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module'])
+def test_dummy_connection_static_unchanged(mocked_dummy_connection_static_unchanged, capfd):
+ """
+ Test : Dummy connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module'])
+def test_dummy_connection_static_without_mtu_unchanged(mocked_dummy_connection_static_without_mtu_unchanged, capfd):
+ """
+ Test : Dummy connection with static IP configuration and no mtu set unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_DUMMY_STATIC, indirect=['patch_ansible_module'])
+def test_dummy_connection_static_with_custom_mtu_modify(mocked_dummy_connection_static_with_custom_mtu_modify, capfd):
+ """
+ Test : Dummy connection with static IP configuration and no mtu set modify
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[1]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['802-3-ethernet.mtu', '0']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module'])
+def test_create_gsm(mocked_generic_connection_create, capfd):
+ """
+ Test if gsm created
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'add'
+ assert args[0][3] == 'type'
+ assert args[0][4] == 'gsm'
+ assert args[0][5] == 'con-name'
+ assert args[0][6] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['connection.interface-name', 'gsm_non_existant',
+ 'gsm.apn', 'internet.telekom',
+ 'gsm.username', 't-mobile',
+ 'gsm.password', 'tm',
+ 'gsm.pin', '1234']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module'])
+def test_gsm_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test if gsm modified
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['gsm.username', 't-mobile',
+ 'gsm.password', 'tm']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GSM, indirect=['patch_ansible_module'])
+def test_gsm_connection_unchanged(mocked_gsm_connection_unchanged, capfd):
+ """
+ Test if gsm connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=['patch_ansible_module'])
+def test_create_ethernet_with_multiple_ip4_addresses_static(mocked_generic_connection_create, capfd):
+ """
+ Test : Create ethernet connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'ipv4.addresses', '10.10.10.10/32,10.10.20.10/32',
+ 'ipv4.gateway', '10.10.10.1',
+ 'ipv4.dns', '1.1.1.1,8.8.8.8']:
+ assert param in add_args_text
+
+ up_args, up_kw = arg_list[1]
+ assert up_args[0][0] == '/usr/bin/nmcli'
+ assert up_args[0][1] == 'con'
+ assert up_args[0][2] == 'up'
+ assert up_args[0][3] == 'non_existent_nw_device'
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES, indirect=['patch_ansible_module'])
+def test_create_ethernet_with_multiple_ip6_addresses_static(mocked_generic_connection_create, capfd):
+ """
+ Test : Create ethernet connection with multiple IPv6 addresses configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'ipv6.addresses', '2001:db8::cafe/128,2002:db8::cafe/128',
+ 'ipv6.gateway', '2001:db8::cafa',
+ 'ipv6.dns', '2001:4860:4860::8888,2001:4860:4860::8844']:
+ assert param in add_args_text
+
+ up_args, up_kw = arg_list[1]
+ assert up_args[0][0] == '/usr/bin/nmcli'
+ assert up_args[0][1] == 'con'
+ assert up_args[0][2] == 'up'
+ assert up_args[0][3] == 'non_existent_nw_device'
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged(mocked_ethernet_connection_static_multiple_ip4_addresses_unchanged, capfd):
+ """
+ Test : Ethernet connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP6_ADDRESSES, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_with_multiple_ip6_addresses_unchanged(mocked_ethernet_connection_static_multiple_ip6_addresses_unchanged, capfd):
+ """
+ Test : Ethernet connection with multiple IPv6 addresses configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_MULTIPLE_IP4_ADDRESSES, indirect=['patch_ansible_module'])
+def test_add_second_ip4_address_to_ethernet_connection(mocked_ethernet_connection_static_modify, capfd):
+ """
+ Test : Modify ethernet connection from DHCP to static
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[1]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ for param in ['ipv4.addresses', '10.10.10.10/32,10.10.20.10/32']:
+ assert param in args[0]
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE, indirect=['patch_ansible_module'])
+def test_create_ethernet_addr_gen_mode_and_ip6_privacy_static(mocked_generic_connection_create, capfd):
+ """
+ Test : Create ethernet connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 2
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'ethernet'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'ethernet_non_existant',
+ 'ipv6.addresses', '2001:db8::cafe/128',
+ 'ipv6.gateway', '2001:db8::cafa',
+ 'ipv6.dns', '2001:4860:4860::8888',
+ 'ipv6.ip6-privacy', 'prefer-public-addr',
+ 'ipv6.addr-gen-mode', 'eui64']:
+ assert param in add_args_text
+
+ up_args, up_kw = arg_list[1]
+ assert up_args[0][0] == '/usr/bin/nmcli'
+ assert up_args[0][1] == 'con'
+ assert up_args[0][2] == 'up'
+ assert up_args[0][3] == 'non_existent_nw_device'
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_ETHERNET_STATIC_IP6_PRIVACY_AND_ADDR_GEN_MODE, indirect=['patch_ansible_module'])
+def test_ethernet_connection_static_with_multiple_ip4_addresses_unchanged(mocked_ethernet_connection_static_ip6_privacy_and_addr_gen_mode_unchange, capfd):
+ """
+ Test : Ethernet connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIREGUARD, indirect=['patch_ansible_module'])
+def test_create_wireguard(mocked_generic_connection_create, capfd):
+ """
+ Test : Create wireguard connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'wireguard'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'wg_non_existant',
+ 'ipv4.method', 'manual',
+ 'ipv4.addresses', '10.10.10.10/24',
+ 'ipv6.method', 'manual',
+ 'ipv6.addresses', '2001:db8::1/128',
+ 'wireguard.listen-port', '51820',
+ 'wireguard.private-key', '<hidden>']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIREGUARD, indirect=['patch_ansible_module'])
+def test_wireguard_connection_unchanged(mocked_wireguard_connection_unchanged, capfd):
+ """
+ Test : Wireguard connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_WIREGUARD, indirect=['patch_ansible_module'])
+def test_wireguard_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test : Modify wireguard connection
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['wireguard.listen-port', '51820']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_L2TP, indirect=['patch_ansible_module'])
+def test_vpn_l2tp_connection_unchanged(mocked_vpn_l2tp_connection_unchanged, capfd):
+ """
+ Test : L2TP VPN connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_PPTP, indirect=['patch_ansible_module'])
+def test_vpn_pptp_connection_unchanged(mocked_vpn_pptp_connection_unchanged, capfd):
+ """
+ Test : PPTP VPN connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_L2TP, indirect=['patch_ansible_module'])
+def test_create_vpn_l2tp(mocked_generic_connection_create, capfd):
+ """
+ Test : Create L2TP VPN connection
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'vpn'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'vpn_l2tp'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['connection.autoconnect', 'no',
+ 'connection.permissions', 'brittany',
+ 'vpn.data', 'vpn.service-type', 'org.freedesktop.NetworkManager.l2tp',
+ ]:
+ assert param in add_args_text
+
+ vpn_data_index = add_args_text.index('vpn.data') + 1
+ args_vpn_data = add_args_text[vpn_data_index]
+ for vpn_data in ['gateway=vpn.example.com', 'password-flags=2', 'user=brittany', 'ipsec-enabled=true', 'ipsec-psk=QnJpdHRhbnkxMjM=']:
+ assert vpn_data in args_vpn_data
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_VPN_PPTP, indirect=['patch_ansible_module'])
+def test_create_vpn_pptp(mocked_generic_connection_create, capfd):
+ """
+ Test : Create PPTP VPN connection
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'vpn'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'vpn_pptp'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['connection.autoconnect', 'no',
+ 'connection.permissions', 'brittany',
+ 'vpn.data', 'vpn.service-type', 'org.freedesktop.NetworkManager.pptp',
+ ]:
+ assert param in add_args_text
+
+ vpn_data_index = add_args_text.index('vpn.data') + 1
+ args_vpn_data = add_args_text[vpn_data_index]
+ for vpn_data in ['password-flags=2', 'gateway=vpn.example.com', 'user=brittany']:
+ assert vpn_data in args_vpn_data
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_INFINIBAND_STATIC, indirect=['patch_ansible_module'])
+def test_infiniband_connection_static_unchanged(mocked_infiniband_connection_static_unchanged, capfd):
+ """
+ Test : Infiniband connection unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_INFINIBAND_STATIC_MODIFY_TRANSPORT_MODE, indirect=['patch_ansible_module'])
+def test_infiniband_connection_static_transport_mode_connected(
+ mocked_infiniband_connection_static_transport_mode_connected_modify, capfd):
+ """
+ Test : Modify Infiniband connection to use connected as transport_mode
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[1]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'modify'
+ assert add_args[0][3] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+
+ for param in ['infiniband.transport-mode', 'connected']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert results.get('changed') is True
+ assert not results.get('failed')
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_GENERIC_DIFF_CHECK, indirect=['patch_ansible_module'])
+def test_bond_connection_unchanged(mocked_generic_connection_diff_check, capfd):
+ """
+ Test : Bond connection unchanged
+ """
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ ignore_unsupported_suboptions=dict(type='bool', default=False),
+ autoconnect=dict(type='bool', default=True),
+ state=dict(type='str', required=True, choices=['absent', 'present']),
+ conn_name=dict(type='str', required=True),
+ master=dict(type='str'),
+ ifname=dict(type='str'),
+ type=dict(type='str',
+ choices=[
+ 'bond',
+ 'bond-slave',
+ 'bridge',
+ 'bridge-slave',
+ 'dummy',
+ 'ethernet',
+ 'generic',
+ 'gre',
+ 'infiniband',
+ 'ipip',
+ 'sit',
+ 'team',
+ 'team-slave',
+ 'vlan',
+ 'vxlan',
+ 'wifi',
+ 'gsm',
+ 'macvlan',
+ 'wireguard',
+ 'vpn',
+ ]),
+ ip4=dict(type='list', elements='str'),
+ gw4=dict(type='str'),
+ gw4_ignore_auto=dict(type='bool', default=False),
+ routes4=dict(type='list', elements='str'),
+ routes4_extended=dict(type='list',
+ elements='dict',
+ options=dict(
+ ip=dict(type='str', required=True),
+ next_hop=dict(type='str'),
+ metric=dict(type='int'),
+ table=dict(type='int'),
+ tos=dict(type='int'),
+ cwnd=dict(type='int'),
+ mtu=dict(type='int'),
+ onlink=dict(type='bool')
+ )),
+ route_metric4=dict(type='int'),
+ routing_rules4=dict(type='list', elements='str'),
+ never_default4=dict(type='bool', default=False),
+ dns4=dict(type='list', elements='str'),
+ dns4_search=dict(type='list', elements='str'),
+ dns4_ignore_auto=dict(type='bool', default=False),
+ method4=dict(type='str', choices=['auto', 'link-local', 'manual', 'shared', 'disabled']),
+ may_fail4=dict(type='bool', default=True),
+ dhcp_client_id=dict(type='str'),
+ ip6=dict(type='list', elements='str'),
+ gw6=dict(type='str'),
+ gw6_ignore_auto=dict(type='bool', default=False),
+ dns6=dict(type='list', elements='str'),
+ dns6_search=dict(type='list', elements='str'),
+ dns6_ignore_auto=dict(type='bool', default=False),
+ routes6=dict(type='list', elements='str'),
+ routes6_extended=dict(type='list',
+ elements='dict',
+ options=dict(
+ ip=dict(type='str', required=True),
+ next_hop=dict(type='str'),
+ metric=dict(type='int'),
+ table=dict(type='int'),
+ cwnd=dict(type='int'),
+ mtu=dict(type='int'),
+ onlink=dict(type='bool')
+ )),
+ route_metric6=dict(type='int'),
+ method6=dict(type='str', choices=['ignore', 'auto', 'dhcp', 'link-local', 'manual', 'shared', 'disabled']),
+ ip_privacy6=dict(type='str', choices=['disabled', 'prefer-public-addr', 'prefer-temp-addr', 'unknown']),
+ addr_gen_mode6=dict(type='str', choices=['default', 'default-or-eui64', 'eui64', 'stable-privacy']),
+ # Bond Specific vars
+ mode=dict(type='str', default='balance-rr',
+ choices=['802.3ad', 'active-backup', 'balance-alb', 'balance-rr', 'balance-tlb', 'balance-xor', 'broadcast']),
+ miimon=dict(type='int'),
+ downdelay=dict(type='int'),
+ updelay=dict(type='int'),
+ xmit_hash_policy=dict(type='str'),
+ arp_interval=dict(type='int'),
+ arp_ip_target=dict(type='str'),
+ primary=dict(type='str'),
+ # general usage
+ mtu=dict(type='int'),
+ mac=dict(type='str'),
+ zone=dict(type='str'),
+ # bridge specific vars
+ stp=dict(type='bool', default=True),
+ priority=dict(type='int', default=128),
+ slavepriority=dict(type='int', default=32),
+ forwarddelay=dict(type='int', default=15),
+ hellotime=dict(type='int', default=2),
+ maxage=dict(type='int', default=20),
+ ageingtime=dict(type='int', default=300),
+ hairpin=dict(type='bool'),
+ path_cost=dict(type='int', default=100),
+ # team specific vars
+ runner=dict(type='str', default='roundrobin',
+ choices=['broadcast', 'roundrobin', 'activebackup', 'loadbalance', 'lacp']),
+ # team active-backup runner specific options
+ runner_hwaddr_policy=dict(type='str', choices=['same_all', 'by_active', 'only_active']),
+ # team lacp runner specific options
+ runner_fast_rate=dict(type='bool'),
+ # vlan specific vars
+ vlanid=dict(type='int'),
+ vlandev=dict(type='str'),
+ flags=dict(type='str'),
+ ingress=dict(type='str'),
+ egress=dict(type='str'),
+ # vxlan specific vars
+ vxlan_id=dict(type='int'),
+ vxlan_local=dict(type='str'),
+ vxlan_remote=dict(type='str'),
+ # ip-tunnel specific vars
+ ip_tunnel_dev=dict(type='str'),
+ ip_tunnel_local=dict(type='str'),
+ ip_tunnel_remote=dict(type='str'),
+ # ip-tunnel type gre specific vars
+ ip_tunnel_input_key=dict(type='str', no_log=True),
+ ip_tunnel_output_key=dict(type='str', no_log=True),
+ # 802-11-wireless* specific vars
+ ssid=dict(type='str'),
+ wifi=dict(type='dict'),
+ wifi_sec=dict(type='dict', no_log=True),
+ gsm=dict(type='dict'),
+ macvlan=dict(type='dict'),
+ wireguard=dict(type='dict'),
+ vpn=dict(type='dict'),
+ transport_mode=dict(type='str', choices=['datagram', 'connected']),
+ ),
+ mutually_exclusive=[['never_default4', 'gw4'],
+ ['routes4_extended', 'routes4'],
+ ['routes6_extended', 'routes6']],
+ required_if=[("type", "wifi", [("ssid")])],
+ supports_check_mode=True,
+ )
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
+ nmcli_module = nmcli.Nmcli(module)
+
+ changed, diff = nmcli_module.is_connection_changed()
+
+ assert changed
+
+ num_of_diff_params = 0
+ for parameter, value in diff.get('before').items():
+ if value != diff['after'][parameter]:
+ num_of_diff_params += 1
+
+ assert num_of_diff_params == 1
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_MACVLAN, indirect=['patch_ansible_module'])
+def test_create_macvlan(mocked_generic_connection_create, capfd):
+ """
+ Test : Create macvlan connection with static IP configuration
+ """
+
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ add_args, add_kw = arg_list[0]
+
+ assert add_args[0][0] == '/usr/bin/nmcli'
+ assert add_args[0][1] == 'con'
+ assert add_args[0][2] == 'add'
+ assert add_args[0][3] == 'type'
+ assert add_args[0][4] == 'macvlan'
+ assert add_args[0][5] == 'con-name'
+ assert add_args[0][6] == 'non_existent_nw_device'
+
+ add_args_text = list(map(to_text, add_args[0]))
+ for param in ['connection.interface-name', 'macvlan_non_existant',
+ 'ipv4.method', 'manual',
+ 'ipv4.addresses', '10.10.10.10/24',
+ 'ipv6.method', 'manual',
+ 'ipv6.addresses', '2001:db8::1/128',
+ 'macvlan.mode', '2',
+ 'macvlan.parent', 'non_existent_parent']:
+ assert param in add_args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_MACVLAN, indirect=['patch_ansible_module'])
+def test_macvlan_connection_unchanged(mocked_macvlan_connection_unchanged, capfd):
+ """
+ Test : Macvlan connection with static IP configuration unchanged
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert not results['changed']
+
+
+@pytest.mark.parametrize('patch_ansible_module', TESTCASE_MACVLAN, indirect=['patch_ansible_module'])
+def test_macvlan_mod(mocked_generic_connection_modify, capfd):
+ """
+ Test : Modify macvlan connection
+ """
+ with pytest.raises(SystemExit):
+ nmcli.main()
+
+ assert nmcli.Nmcli.execute_command.call_count == 1
+ arg_list = nmcli.Nmcli.execute_command.call_args_list
+ args, kwargs = arg_list[0]
+
+ assert args[0][0] == '/usr/bin/nmcli'
+ assert args[0][1] == 'con'
+ assert args[0][2] == 'modify'
+ assert args[0][3] == 'non_existent_nw_device'
+
+ args_text = list(map(to_text, args[0]))
+ for param in ['macvlan.mode', '2']:
+ assert param in args_text
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get('failed')
+ assert results['changed']
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_npm.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_npm.py
new file mode 100644
index 000000000..f5d312775
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_npm.py
@@ -0,0 +1,262 @@
+#
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.plugins.modules import npm
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, ModuleTestCase, set_module_args
+
+
+class NPMModuleTestCase(ModuleTestCase):
+ module = npm
+
+ def setUp(self):
+ super(NPMModuleTestCase, self).setUp()
+ ansible_module_path = "ansible_collections.community.general.plugins.modules.npm.AnsibleModule"
+ self.mock_run_command = patch('%s.run_command' % ansible_module_path)
+ self.module_main_command = self.mock_run_command.start()
+ self.mock_get_bin_path = patch('%s.get_bin_path' % ansible_module_path)
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.get_bin_path.return_value = '/testbin/npm'
+
+ def tearDown(self):
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ super(NPMModuleTestCase, self).tearDown()
+
+ def module_main(self, exit_exc):
+ with self.assertRaises(exit_exc) as exc:
+ self.module.main()
+ return exc.exception.args[0]
+
+ def test_present(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'present'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_missing(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'present',
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {"missing" : true}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'install', '--global', 'coffee-script'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_version(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'present',
+ 'version': '2.5.1'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_version_update(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'present',
+ 'version': '2.5.1'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'install', '--global', 'coffee-script@2.5.1'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_version_exists(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'present',
+ 'version': '2.5.1'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertFalse(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ ])
+
+ def test_absent(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'absent'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None),
+ ])
+
+ def test_absent_version(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'absent',
+ 'version': '2.5.1'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {"version" : "2.5.1"}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None),
+ ])
+
+ def test_absent_version_different(self):
+ set_module_args({
+ 'name': 'coffee-script',
+ 'global': 'true',
+ 'state': 'absent',
+ 'version': '2.5.1'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{"dependencies": {"coffee-script": {"version" : "2.5.0"}}}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'list', '--json', '--long', '--global'], check_rc=False, cwd=None),
+ call(['/testbin/npm', 'uninstall', '--global', 'coffee-script'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_package_json(self):
+ set_module_args({
+ 'global': 'true',
+ 'state': 'present'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'install', '--global'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_package_json_production(self):
+ set_module_args({
+ 'production': 'true',
+ 'global': 'true',
+ 'state': 'present',
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'install', '--global', '--production'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_package_json_ci(self):
+ set_module_args({
+ 'ci': 'true',
+ 'global': 'true',
+ 'state': 'present'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'ci', '--global'], check_rc=True, cwd=None),
+ ])
+
+ def test_present_package_json_ci_production(self):
+ set_module_args({
+ 'ci': 'true',
+ 'production': 'true',
+ 'global': 'true',
+ 'state': 'present'
+ })
+ self.module_main_command.side_effect = [
+ (0, '{}', ''),
+ (0, '{}', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/npm', 'ci', '--global', '--production'], check_rc=True, cwd=None),
+ ])
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_command.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_command.py
new file mode 100644
index 000000000..3ce267c4e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_command.py
@@ -0,0 +1,639 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import shutil
+import tempfile
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils import basic
+import ansible_collections.community.general.plugins.modules.ocapi_command as module
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
+from ansible.module_utils.six.moves.urllib.parse import urljoin
+
+
+MOCK_BASE_URI = "mockBaseUri/"
+OPERATING_SYSTEM_URI = "OperatingSystem"
+MOCK_JOB_NAME = "MockJob"
+
+ACTION_WAS_SUCCESSFUL = "Action was successful."
+UPDATE_NOT_PERFORMED_IN_CHECK_MODE = "Update not performed in check mode."
+NO_ACTION_PERFORMED_IN_CHECK_MODE = "No action performed in check mode."
+
+MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG = {
+ "ret": True,
+ "data": {
+ "IndicatorLED": {
+ "ID": 4,
+ "Name": "Off"
+ },
+ "PowerState": {
+ "ID": 2,
+ "Name": "On"
+ }
+ },
+ "headers": {"etag": "MockETag"}
+}
+
+MOCK_SUCCESSFUL_HTTP_RESPONSE = {
+ "ret": True,
+ "data": {}
+}
+
+MOCK_404_RESPONSE = {
+ "ret": False,
+ "status": 404
+}
+
+MOCK_SUCCESSFUL_HTTP_RESPONSE_WITH_LOCATION_HEADER = {
+ "ret": True,
+ "data": {},
+ "headers": {"location": "mock_location"}
+}
+
+MOCK_HTTP_RESPONSE_CONFLICT = {
+ "ret": False,
+ "msg": "Conflict",
+ "status": 409
+}
+
+MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = {
+ "ret": True,
+ "data": {
+ "PercentComplete": 99
+ },
+ "headers": {
+ "etag": "12345"
+ }
+}
+
+MOCK_HTTP_RESPONSE_JOB_COMPLETE = {
+ "ret": True,
+ "data": {
+ "PercentComplete": 100
+ },
+ "headers": {
+ "etag": "12345"
+ }
+}
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+def get_exception_message(ansible_exit_json):
+ """From an AnsibleExitJson exception, get the message string."""
+ return ansible_exit_json.exception.args[0]["msg"]
+
+
+def is_changed(ansible_exit_json):
+ """From an AnsibleExitJson exception, return the value of the changed flag"""
+ return ansible_exit_json.exception.args[0]["changed"]
+
+
+def mock_get_request(*args, **kwargs):
+ """Mock for get_request."""
+ url = args[1]
+ if url == 'https://' + MOCK_BASE_URI:
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
+ elif url == "mock_location":
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_get_request_job_does_not_exist(*args, **kwargs):
+ """Mock for get_request."""
+ url = args[1]
+ if url == 'https://' + MOCK_BASE_URI:
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
+ elif url == urljoin('https://' + MOCK_BASE_URI, "Jobs/" + MOCK_JOB_NAME):
+ return MOCK_404_RESPONSE
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_get_request_job_in_progress(*args, **kwargs):
+ url = args[1]
+ if url == 'https://' + MOCK_BASE_URI:
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
+ elif url == urljoin('https://' + MOCK_BASE_URI, "Jobs/" + MOCK_JOB_NAME):
+ return MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_get_request_job_complete(*args, **kwargs):
+ url = args[1]
+ if url == 'https://' + MOCK_BASE_URI:
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE_LED_INDICATOR_OFF_WITH_ETAG
+ elif url == urljoin('https://' + MOCK_BASE_URI, "Jobs/" + MOCK_JOB_NAME):
+ return MOCK_HTTP_RESPONSE_JOB_COMPLETE
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_put_request(*args, **kwargs):
+ """Mock put_request."""
+ url = args[1]
+ if url == 'https://' + MOCK_BASE_URI:
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE_WITH_LOCATION_HEADER
+ raise RuntimeError("Illegal PUT call to: " + args[1])
+
+
+def mock_delete_request(*args, **kwargs):
+ """Mock delete request."""
+ url = args[1]
+ if url == urljoin('https://' + MOCK_BASE_URI, 'Jobs/' + MOCK_JOB_NAME):
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE
+ raise RuntimeError("Illegal DELETE call to: " + args[1])
+
+
+def mock_post_request(*args, **kwargs):
+ """Mock post_request."""
+ url = args[1]
+ if url == urljoin('https://' + MOCK_BASE_URI, OPERATING_SYSTEM_URI):
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE
+ raise RuntimeError("Illegal POST call to: " + args[1])
+
+
+def mock_http_request_conflict(*args, **kwargs):
+ """Mock to make an HTTP request return 409 Conflict"""
+ return MOCK_HTTP_RESPONSE_CONFLICT
+
+
+def mock_invalid_http_request(*args, **kwargs):
+ """Mock to make an HTTP request invalid. Raises an exception."""
+ raise RuntimeError("Illegal HTTP call to " + args[1])
+
+
+class TestOcapiCommand(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.tempdir = tempfile.mkdtemp()
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({})
+ module.main()
+ self.assertIn("missing required arguments:", get_exception_message(ansible_fail_json))
+
+ def test_module_fail_when_unknown_category(self):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'unknown',
+ 'command': 'IndicatorLedOn',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'baseuri': MOCK_BASE_URI
+ })
+ module.main()
+ self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json))
+
+ def test_set_power_mode(self):
+ """Test that we can set chassis power mode"""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'PowerModeLow',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_set_chassis_led_indicator(self):
+ """Test that we can set chassis LED indicator."""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOn',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_set_power_mode_already_set(self):
+ """Test that if we set Power Mode to normal when it's already normal, we get changed=False."""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'PowerModeNormal',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertFalse(is_changed(ansible_exit_json))
+
+ def test_set_power_mode_check_mode(self):
+ """Test check mode when setting chassis Power Mode."""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOn',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ '_ansible_check_mode': True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_set_chassis_led_indicator_check_mode(self):
+ """Test check mode when setting chassis LED indicator"""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOn',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ '_ansible_check_mode': True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_set_chassis_led_indicator_already_set(self):
+ """Test that if we set LED Indicator to off when it's already off, we get changed=False."""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOff',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertFalse(is_changed(ansible_exit_json))
+
+ def test_set_chassis_led_indicator_already_set_check_mode(self):
+ """Test that if we set LED Indicator to off when it's already off, we get changed=False even in check mode."""
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOff',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ "_ansible_check_mode": True
+ })
+ module.main()
+ self.assertEqual(NO_ACTION_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertFalse(is_changed(ansible_exit_json))
+
+ def test_set_chassis_invalid_indicator_command(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedBright',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertIn("Invalid Command", get_exception_message(ansible_fail_json))
+
+ def test_reset_enclosure(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Systems',
+ 'command': 'PowerGracefulRestart',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_reset_enclosure_check_mode(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Systems',
+ 'command': 'PowerGracefulRestart',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ "_ansible_check_mode": True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_firmware_upload_missing_update_image_path(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWUpload',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual("Missing update_image_path.", get_exception_message(ansible_fail_json))
+
+ def test_firmware_upload_file_not_found(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWUpload',
+ 'update_image_path': 'nonexistentfile.bin',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual("File does not exist.", get_exception_message(ansible_fail_json))
+
+ def test_firmware_upload(self):
+ filename = "fake_firmware.bin"
+ filepath = os.path.join(self.tempdir, filename)
+ file_contents = b'\x00\x01\x02\x03\x04'
+ with open(filepath, 'wb+') as f:
+ f.write(file_contents)
+
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWUpload',
+ 'update_image_path': filepath,
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_firmware_upload_check_mode(self):
+ filename = "fake_firmware.bin"
+ filepath = os.path.join(self.tempdir, filename)
+ file_contents = b'\x00\x01\x02\x03\x04'
+ with open(filepath, 'wb+') as f:
+ f.write(file_contents)
+
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWUpload',
+ 'update_image_path': filepath,
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ "_ansible_check_mode": True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_firmware_update(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWUpdate',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_firmware_update_check_mode(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWUpdate',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ "_ansible_check_mode": True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_firmware_activate(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWActivate',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_firmware_activate_check_mode(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWActivate',
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ "_ansible_check_mode": True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_delete_job(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request_job_complete,
+ delete_request=mock_delete_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'DeleteJob',
+ 'baseuri': MOCK_BASE_URI,
+ 'job_name': MOCK_JOB_NAME,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_delete_job_in_progress(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request_job_in_progress,
+ delete_request=mock_invalid_http_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'DeleteJob',
+ 'baseuri': MOCK_BASE_URI,
+ 'job_name': MOCK_JOB_NAME,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json))
+
+ def test_delete_job_in_progress_only_on_delete(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request_job_complete,
+ delete_request=mock_http_request_conflict,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'DeleteJob',
+ 'baseuri': MOCK_BASE_URI,
+ 'job_name': MOCK_JOB_NAME,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json))
+
+ def test_delete_job_check_mode(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request_job_complete,
+ delete_request=mock_delete_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'DeleteJob',
+ 'baseuri': MOCK_BASE_URI,
+ 'job_name': MOCK_JOB_NAME,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ '_ansible_check_mode': True
+ })
+ module.main()
+ self.assertEqual(UPDATE_NOT_PERFORMED_IN_CHECK_MODE, get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_delete_job_check_mode_job_not_found(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request_job_does_not_exist,
+ delete_request=mock_delete_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'DeleteJob',
+ 'baseuri': MOCK_BASE_URI,
+ 'job_name': MOCK_JOB_NAME,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ '_ansible_check_mode': True
+ })
+ module.main()
+ self.assertEqual("Job already deleted.", get_exception_message(ansible_exit_json))
+ self.assertFalse(is_changed(ansible_exit_json))
+
+ def test_delete_job_check_mode_job_in_progress(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request_job_in_progress,
+ delete_request=mock_delete_request,
+ put_request=mock_invalid_http_request,
+ post_request=mock_invalid_http_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'DeleteJob',
+ 'baseuri': MOCK_BASE_URI,
+ 'job_name': MOCK_JOB_NAME,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21',
+ '_ansible_check_mode': True
+ })
+ module.main()
+ self.assertEqual("Cannot delete job because it is in progress.", get_exception_message(ansible_fail_json))
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_info.py
new file mode 100644
index 000000000..5010b328f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ocapi_info.py
@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils import basic
+import ansible_collections.community.general.plugins.modules.ocapi_info as module
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
+
+MOCK_BASE_URI = "mockBaseUri"
+MOCK_JOB_NAME_IN_PROGRESS = "MockJobInProgress"
+MOCK_JOB_NAME_COMPLETE = "MockJobComplete"
+MOCK_JOB_NAME_DOES_NOT_EXIST = "MockJobDoesNotExist"
+
+ACTION_WAS_SUCCESSFUL = "Action was successful."
+
+MOCK_SUCCESSFUL_HTTP_RESPONSE = {
+ "ret": True,
+ "data": {}
+}
+
+MOCK_404_RESPONSE = {
+ "ret": False,
+ "status": 404
+}
+
+MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS = {
+ "ret": True,
+ "data": {
+ "Self": "https://openflex-data24-usalp02120qo0012-iomb:443/Storage/Devices/openflex-data24-usalp02120qo0012/Jobs/FirmwareUpdate/",
+ "ID": MOCK_JOB_NAME_IN_PROGRESS,
+ "PercentComplete": 10,
+ "Status": {
+ "State": {
+ "ID": 16,
+ "Name": "In service"
+ },
+ "Health": [
+ {
+ "ID": 5,
+ "Name": "OK"
+ }
+ ]
+ }
+ }
+}
+
+MOCK_HTTP_RESPONSE_JOB_COMPLETE = {
+ "ret": True,
+ "data": {
+ "Self": "https://openflex-data24-usalp02120qo0012-iomb:443/Storage/Devices/openflex-data24-usalp02120qo0012/Jobs/FirmwareUpdate/",
+ "ID": MOCK_JOB_NAME_COMPLETE,
+ "PercentComplete": 100,
+ "Status": {
+ "State": {
+ "ID": 65540,
+ "Name": "Activate needed"
+ },
+ "Health": [
+ {
+ "ID": 5,
+ "Name": "OK"
+ }
+ ],
+ "Details": [
+ "Completed."
+ ]
+ }
+ }
+}
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+def get_exception_message(ansible_exit_json):
+ """From an AnsibleExitJson exception, get the message string."""
+ return ansible_exit_json.exception.args[0]["msg"]
+
+
+def mock_get_request(*args, **kwargs):
+ """Mock for get_request."""
+ url = args[1]
+ if url == "https://" + MOCK_BASE_URI:
+ return MOCK_SUCCESSFUL_HTTP_RESPONSE
+ elif url == "https://" + MOCK_BASE_URI + '/Jobs/' + MOCK_JOB_NAME_IN_PROGRESS:
+ return MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS
+ elif url == "https://" + MOCK_BASE_URI + '/Jobs/' + MOCK_JOB_NAME_COMPLETE:
+ return MOCK_HTTP_RESPONSE_JOB_COMPLETE
+ elif url == "https://" + MOCK_BASE_URI + '/Jobs/' + MOCK_JOB_NAME_DOES_NOT_EXIST:
+ return MOCK_404_RESPONSE
+ else:
+ raise RuntimeError("Illegal GET call to: " + args[1])
+
+
+def mock_put_request(*args, **kwargs):
+ """Mock put_request. PUT should never happen so it will raise an error."""
+ raise RuntimeError("Illegal PUT call to: " + args[1])
+
+
+def mock_delete_request(*args, **kwargs):
+ """Mock delete request. DELETE should never happen so it will raise an error."""
+ raise RuntimeError("Illegal DELETE call to: " + args[1])
+
+
+def mock_post_request(*args, **kwargs):
+ """Mock post_request. POST should never happen so it will raise an error."""
+ raise RuntimeError("Illegal POST call to: " + args[1])
+
+
+class TestOcapiInfo(unittest.TestCase):
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({})
+ module.main()
+ self.assertIn("missing required arguments:", get_exception_message(ansible_fail_json))
+
+ def test_module_fail_when_unknown_category(self):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'unknown',
+ 'command': 'JobStatus',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'baseuri': MOCK_BASE_URI
+ })
+ module.main()
+ self.assertIn("Invalid Category 'unknown", get_exception_message(ansible_fail_json))
+
+ def test_module_fail_when_unknown_command(self):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'unknown',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'baseuri': MOCK_BASE_URI
+ })
+ module.main()
+ self.assertIn("Invalid Command 'unknown", get_exception_message(ansible_fail_json))
+
+ def test_job_status_in_progress(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ delete_request=mock_delete_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'JobStatus',
+ 'job_name': MOCK_JOB_NAME_IN_PROGRESS,
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ response_data = ansible_exit_json.exception.args[0]
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["PercentComplete"], response_data["percentComplete"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["ID"], response_data["operationStatusId"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["State"]["Name"], response_data["operationStatus"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_IN_PROGRESS["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"])
+ self.assertTrue(response_data["jobExists"])
+ self.assertFalse(response_data["changed"])
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"])
+ self.assertIsNone(response_data["details"])
+
+ def test_job_status_complete(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ delete_request=mock_delete_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'JobStatus',
+ 'job_name': MOCK_JOB_NAME_COMPLETE,
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ response_data = ansible_exit_json.exception.args[0]
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["PercentComplete"], response_data["percentComplete"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["ID"], response_data["operationStatusId"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["State"]["Name"], response_data["operationStatus"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["Name"], response_data["operationHealth"])
+ self.assertEqual(MOCK_HTTP_RESPONSE_JOB_COMPLETE["data"]["Status"]["Health"][0]["ID"], response_data["operationHealthId"])
+ self.assertTrue(response_data["jobExists"])
+ self.assertFalse(response_data["changed"])
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"])
+ self.assertEqual(["Completed."], response_data["details"])
+
+ def test_job_status_not_found(self):
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.ocapi_utils.OcapiUtils",
+ get_request=mock_get_request,
+ put_request=mock_put_request,
+ delete_request=mock_delete_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ set_module_args({
+ 'category': 'Jobs',
+ 'command': 'JobStatus',
+ 'job_name': MOCK_JOB_NAME_DOES_NOT_EXIST,
+ 'baseuri': MOCK_BASE_URI,
+ 'username': 'USERID',
+ 'password': 'PASSWORD=21'
+ })
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, get_exception_message(ansible_exit_json))
+ response_data = ansible_exit_json.exception.args[0]
+ self.assertFalse(response_data["jobExists"])
+ self.assertEqual(0, response_data["percentComplete"])
+ self.assertEqual(1, response_data["operationStatusId"])
+ self.assertEqual("Not Available", response_data["operationStatus"])
+ self.assertIsNone(response_data["operationHealth"])
+ self.assertIsNone(response_data["operationHealthId"])
+ self.assertFalse(response_data["changed"])
+ self.assertEqual(ACTION_WAS_SUCCESSFUL, response_data["msg"])
+ self.assertEqual("Job does not exist.", response_data["details"])
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_one_vm.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_one_vm.py
new file mode 100644
index 000000000..fcfa685af
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_one_vm.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2023, Michal Opala <mopala@opennebula.io>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules.one_vm import parse_updateconf
+
+
+PARSE_UPDATECONF_VALID = [
+ (
+ {
+ "CPU": 1,
+ "OS": {"ARCH": 2},
+ },
+ {
+ "OS": {"ARCH": 2},
+ }
+ ),
+ (
+ {
+ "OS": {"ARCH": 1, "ASD": 2}, # "ASD" is an invalid attribute, we ignore it
+ },
+ {
+ "OS": {"ARCH": 1},
+ }
+ ),
+ (
+ {
+ "OS": {"ASD": 1}, # "ASD" is an invalid attribute, we ignore it
+ },
+ {
+ }
+ ),
+ (
+ {
+ "MEMORY": 1,
+ "CONTEXT": {
+ "PASSWORD": 2,
+ "SSH_PUBLIC_KEY": 3,
+ },
+ },
+ {
+ "CONTEXT": {
+ "PASSWORD": 2,
+ "SSH_PUBLIC_KEY": 3,
+ },
+ }
+ ),
+]
+
+
+@pytest.mark.parametrize('vm_template,expected_result', PARSE_UPDATECONF_VALID)
+def test_parse_updateconf(vm_template, expected_result):
+ result = parse_updateconf(vm_template)
+ assert result == expected_result, repr(result)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_datacenter_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_datacenter_info.py
new file mode 100644
index 000000000..eb5e05d22
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_datacenter_info.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from .hpe_test_utils import FactsParamsTest
+from .oneview_conftest import mock_ov_client, mock_ansible_module # noqa: F401, pylint: disable=unused-import
+
+from ansible_collections.community.general.plugins.modules.oneview_datacenter_info import DatacenterInfoModule
+
+PARAMS_GET_CONNECTED = dict(
+ config='config.json',
+ name="MyDatacenter",
+ options=['visualContent']
+)
+
+
+class TestDatacenterInfoModule(FactsParamsTest):
+ @pytest.fixture(autouse=True)
+ def setUp(self, mock_ansible_module, mock_ov_client):
+ self.resource = mock_ov_client.datacenters
+ self.mock_ansible_module = mock_ansible_module
+ self.mock_ov_client = mock_ov_client
+
+ def test_should_get_all_datacenters(self):
+ self.resource.get_all.return_value = {"name": "Data Center Name"}
+
+ self.mock_ansible_module.params = dict(config='config.json',)
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenters=({"name": "Data Center Name"})
+ )
+
+ def test_should_get_datacenter_by_name(self):
+ self.resource.get_by.return_value = [{"name": "Data Center Name"}]
+
+ self.mock_ansible_module.params = dict(config='config.json', name="MyDatacenter")
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenters=([{"name": "Data Center Name"}])
+ )
+
+ def test_should_get_datacenter_visual_content(self):
+ self.resource.get_by.return_value = [{"name": "Data Center Name", "uri": "/rest/datacenter/id"}]
+
+ self.resource.get_visual_content.return_value = {
+ "name": "Visual Content"}
+
+ self.mock_ansible_module.params = PARAMS_GET_CONNECTED
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenter_visual_content={'name': 'Visual Content'},
+ datacenters=[{'name': 'Data Center Name', 'uri': '/rest/datacenter/id'}]
+ )
+
+ def test_should_get_none_datacenter_visual_content(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_GET_CONNECTED
+
+ DatacenterInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ datacenter_visual_content=None,
+ datacenters=[]
+ )
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_enclosure_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_enclosure_info.py
new file mode 100644
index 000000000..e8ef3449f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_enclosure_info.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .hpe_test_utils import FactsParamsTestCase
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules.oneview_enclosure_info import EnclosureInfoModule
+
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test-Enclosure",
+ options=[]
+)
+
+PARAMS_GET_BY_NAME_WITH_OPTIONS = dict(
+ config='config.json',
+ name="Test-Enclosure",
+ options=['utilization', 'environmentalConfiguration', 'script']
+)
+
+PARAMS_GET_UTILIZATION_WITH_PARAMS = dict(
+ config='config.json',
+ name="Test-Enclosure",
+ options=[dict(utilization=dict(fields='AveragePower',
+ filter=['startDate=2016-06-30T03:29:42.000Z',
+ 'endDate=2016-07-01T03:29:42.000Z'],
+ view='day',
+ refresh=True))]
+)
+
+PRESENT_ENCLOSURES = [{
+ "name": "Test-Enclosure",
+ "uri": "/rest/enclosures/c6bf9af9-48e7-4236-b08a-77684dc258a5"
+}]
+
+ENCLOSURE_SCRIPT = '# script content'
+
+ENCLOSURE_UTILIZATION = {
+ "isFresh": "True"
+}
+
+ENCLOSURE_ENVIRONMENTAL_CONFIG = {
+ "calibratedMaxPower": "2500"
+}
+
+
+class EnclosureInfoSpec(unittest.TestCase,
+ FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, EnclosureInfoModule)
+ self.enclosures = self.mock_ov_client.enclosures
+ FactsParamsTestCase.configure_client_mock(self, self.enclosures)
+
+ def test_should_get_all_enclosures(self):
+ self.enclosures.get_all.return_value = PRESENT_ENCLOSURES
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ EnclosureInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ enclosures=(PRESENT_ENCLOSURES)
+ )
+
+ def test_should_get_enclosure_by_name(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ EnclosureInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ enclosures=(PRESENT_ENCLOSURES)
+
+ )
+
+ def test_should_get_enclosure_by_name_with_options(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.enclosures.get_script.return_value = ENCLOSURE_SCRIPT
+ self.enclosures.get_utilization.return_value = ENCLOSURE_UTILIZATION
+ self.enclosures.get_environmental_configuration.return_value = ENCLOSURE_ENVIRONMENTAL_CONFIG
+
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITH_OPTIONS
+
+ EnclosureInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ enclosures=PRESENT_ENCLOSURES,
+ enclosure_script=ENCLOSURE_SCRIPT,
+ enclosure_environmental_configuration=ENCLOSURE_ENVIRONMENTAL_CONFIG,
+ enclosure_utilization=ENCLOSURE_UTILIZATION
+ )
+
+ def test_should_get_all_utilization_data(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.enclosures.get_script.return_value = ENCLOSURE_SCRIPT
+ self.enclosures.get_utilization.return_value = ENCLOSURE_UTILIZATION
+ self.enclosures.get_environmental_configuration.return_value = ENCLOSURE_ENVIRONMENTAL_CONFIG
+
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITH_OPTIONS
+
+ EnclosureInfoModule().run()
+
+ self.enclosures.get_utilization.assert_called_once_with(PRESENT_ENCLOSURES[0]['uri'], fields='', filter='',
+ view='', refresh='')
+
+ def test_should_get_utilization_with_parameters(self):
+ self.enclosures.get_by.return_value = PRESENT_ENCLOSURES
+ self.enclosures.get_script.return_value = ENCLOSURE_SCRIPT
+ self.enclosures.get_utilization.return_value = ENCLOSURE_UTILIZATION
+ self.enclosures.get_environmental_configuration.return_value = ENCLOSURE_ENVIRONMENTAL_CONFIG
+
+ self.mock_ansible_module.params = PARAMS_GET_UTILIZATION_WITH_PARAMS
+
+ EnclosureInfoModule().run()
+
+ date_filter = ["startDate=2016-06-30T03:29:42.000Z", "endDate=2016-07-01T03:29:42.000Z"]
+
+ self.enclosures.get_utilization.assert_called_once_with(
+ PRESENT_ENCLOSURES[0]['uri'], fields='AveragePower', filter=date_filter, view='day', refresh=True)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network.py
new file mode 100644
index 000000000..f1398740e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network.py
@@ -0,0 +1,392 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import yaml
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .oneview_module_loader import EthernetNetworkModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+FAKE_MSG_ERROR = 'Fake message error'
+DEFAULT_ETHERNET_NAME = 'Test Ethernet Network'
+RENAMED_ETHERNET = 'Renamed Ethernet Network'
+
+DEFAULT_ENET_TEMPLATE = dict(
+ name=DEFAULT_ETHERNET_NAME,
+ vlanId=200,
+ ethernetNetworkType="Tagged",
+ purpose="General",
+ smartLink=False,
+ privateNetwork=False,
+ connectionTemplateUri=None
+)
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_ETHERNET_NAME)
+)
+
+PARAMS_TO_RENAME = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_ETHERNET_NAME,
+ newName=RENAMED_ETHERNET)
+)
+
+YAML_PARAMS_WITH_CHANGES = """
+ config: "config.json"
+ state: present
+ data:
+ name: 'Test Ethernet Network'
+ purpose: Management
+ connectionTemplateUri: ~
+ bandwidth:
+ maximumBandwidth: 3000
+ typicalBandwidth: 2000
+"""
+
+YAML_RESET_CONNECTION_TEMPLATE = """
+ config: "{{ config }}"
+ state: default_bandwidth_reset
+ data:
+ name: 'network name'
+"""
+
+PARAMS_FOR_SCOPES_SET = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_ETHERNET_NAME)
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_ETHERNET_NAME)
+)
+
+PARAMS_FOR_BULK_CREATED = dict(
+ config='config.json',
+ state='present',
+ data=dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10")
+)
+
+DEFAULT_BULK_ENET_TEMPLATE = [
+ {'name': 'TestNetwork_1', 'vlanId': 1},
+ {'name': 'TestNetwork_2', 'vlanId': 2},
+ {'name': 'TestNetwork_5', 'vlanId': 5},
+ {'name': 'TestNetwork_9', 'vlanId': 9},
+ {'name': 'TestNetwork_10', 'vlanId': 10},
+]
+
+DICT_PARAMS_WITH_CHANGES = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)["data"]
+
+
+class EthernetNetworkModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase provides the mocks used in this test case
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, EthernetNetworkModule)
+ self.resource = self.mock_ov_client.ethernet_networks
+
+ def test_should_create_new_ethernet_network(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_ENET_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_CREATED,
+ ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=EthernetNetworkModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(ethernet_network=DEFAULT_ENET_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['purpose'] = 'Management'
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+ self.mock_ov_client.connection_templates.get.return_value = {"uri": "uri"}
+
+ self.mock_ansible_module.params = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=data_merged)
+ )
+
+ def test_update_when_only_bandwidth_has_modified_attributes(self):
+ self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES]
+ self.mock_ov_client.connection_templates.get.return_value = {"uri": "uri"}
+
+ self.mock_ansible_module.params = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=DICT_PARAMS_WITH_CHANGES)
+ )
+
+ def test_update_when_data_has_modified_attributes_but_bandwidth_is_equal(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['purpose'] = 'Management'
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+ self.mock_ov_client.connection_templates.get.return_value = {
+ "bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']}
+
+ self.mock_ansible_module.params = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=data_merged)
+ )
+
+ def test_update_successfully_even_when_connection_template_uri_not_exists(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ del data_merged['connectionTemplateUri']
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = yaml.safe_load(YAML_PARAMS_WITH_CHANGES)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(ethernet_network=data_merged)
+ )
+
+ def test_rename_when_resource_exists(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_ETHERNET
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = params_to_rename
+
+ EthernetNetworkModule().run()
+
+ self.resource.update.assert_called_once_with(data_merged)
+
+ def test_create_with_new_name_when_resource_not_exists(self):
+ data_merged = DEFAULT_ENET_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_ETHERNET
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_ENET_TEMPLATE
+
+ self.mock_ansible_module.params = params_to_rename
+
+ EthernetNetworkModule().run()
+
+ self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data'])
+
+ def test_should_remove_ethernet_network(self):
+ self.resource.get_by.return_value = [DEFAULT_ENET_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_ethernet_network_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=EthernetNetworkModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_should_create_all_ethernet_networks(self):
+ self.resource.get_range.side_effect = [[], DEFAULT_BULK_ENET_TEMPLATE]
+ self.resource.create_bulk.return_value = DEFAULT_BULK_ENET_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.resource.create_bulk.assert_called_once_with(
+ dict(namePrefix="TestNetwork", vlanIdRange="1-2,5,9-10"))
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_BULK_CREATED,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_should_create_missing_ethernet_networks(self):
+ enet_get_range_return = [
+ {'name': 'TestNetwork_1', 'vlanId': 1},
+ {'name': 'TestNetwork_2', 'vlanId': 2},
+ ]
+
+ self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE]
+ self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5, 9, 10]
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.resource.create_bulk.assert_called_once_with(
+ dict(namePrefix="TestNetwork", vlanIdRange="5,9,10"))
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True, msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_should_create_missing_ethernet_networks_with_just_one_difference(self):
+ enet_get_range_return = [
+ {'name': 'TestNetwork_1', 'vlanId': 1},
+ {'name': 'TestNetwork_2', 'vlanId': 2},
+ ]
+
+ self.resource.get_range.side_effect = [enet_get_range_return, DEFAULT_BULK_ENET_TEMPLATE]
+ self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5]
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.resource.create_bulk.assert_called_once_with({'vlanIdRange': '5-5', 'namePrefix': 'TestNetwork'})
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=EthernetNetworkModule.MSG_MISSING_BULK_CREATED,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_should_do_nothing_when_ethernet_networks_already_exist(self):
+ self.resource.get_range.return_value = DEFAULT_BULK_ENET_TEMPLATE
+ self.resource.dissociate_values_or_ranges.return_value = [1, 2, 5, 9, 10]
+
+ self.mock_ansible_module.params = PARAMS_FOR_BULK_CREATED
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False, msg=EthernetNetworkModule.MSG_BULK_ALREADY_EXIST,
+ ansible_facts=dict(ethernet_network_bulk=DEFAULT_BULK_ENET_TEMPLATE))
+
+ def test_reset_successfully(self):
+ self.resource.get_by.return_value = [DICT_PARAMS_WITH_CHANGES]
+ self.mock_ov_client.connection_templates.update.return_value = {'result': 'success'}
+ self.mock_ov_client.connection_templates.get.return_value = {
+ "bandwidth": DICT_PARAMS_WITH_CHANGES['bandwidth']}
+
+ self.mock_ov_client.connection_templates.get_default.return_value = {"bandwidth": {
+ "max": 1
+ }}
+
+ self.mock_ansible_module.params = yaml.safe_load(YAML_RESET_CONNECTION_TEMPLATE)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True, msg=EthernetNetworkModule.MSG_CONNECTION_TEMPLATE_RESET,
+ ansible_facts=dict(ethernet_network_connection_template={'result': 'success'}))
+
+ def test_should_fail_when_reset_not_existing_ethernet_network(self):
+ self.resource.get_by.return_value = [None]
+
+ self.mock_ansible_module.params = yaml.safe_load(YAML_RESET_CONNECTION_TEMPLATE)
+
+ EthernetNetworkModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg=EthernetNetworkModule.MSG_ETHERNET_NETWORK_NOT_FOUND
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_ENET_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/ethernet/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ EthernetNetworkModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/ethernet/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(ethernet_network=patch_return),
+ msg=EthernetNetworkModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_ENET_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ EthernetNetworkModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(ethernet_network=resource_data),
+ msg=EthernetNetworkModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py
new file mode 100644
index 000000000..4a2813e2f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_ethernet_network_info.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from .oneview_module_loader import EthernetNetworkInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test Ethernet Network",
+ options=[]
+)
+
+PARAMS_GET_BY_NAME_WITH_OPTIONS = dict(
+ config='config.json',
+ name="Test Ethernet Network",
+ options=['associatedProfiles', 'associatedUplinkGroups']
+)
+
+PRESENT_ENETS = [{
+ "name": "Test Ethernet Network",
+ "uri": "/rest/ethernet-networks/d34dcf5e-0d8e-441c-b00d-e1dd6a067188"
+}]
+
+ENET_ASSOCIATED_UPLINK_GROUP_URIS = [
+ "/rest/uplink-sets/c6bf9af9-48e7-4236-b08a-77684dc258a5",
+ "/rest/uplink-sets/e2f0031b-52bd-4223-9ac1-d91cb519d548"
+]
+
+ENET_ASSOCIATED_PROFILE_URIS = [
+ "/rest/server-profiles/83e2e117-59dc-4e33-9f24-462af951cbbe",
+ "/rest/server-profiles/57d3af2a-b6d2-4446-8645-f38dd808ea4d"
+]
+
+ENET_ASSOCIATED_UPLINK_GROUPS = [dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[0], name='Uplink Set 1'),
+ dict(uri=ENET_ASSOCIATED_UPLINK_GROUP_URIS[1], name='Uplink Set 2')]
+
+ENET_ASSOCIATED_PROFILES = [dict(uri=ENET_ASSOCIATED_PROFILE_URIS[0], name='Server Profile 1'),
+ dict(uri=ENET_ASSOCIATED_PROFILE_URIS[1], name='Server Profile 2')]
+
+
+class EthernetNetworkInfoSpec(unittest.TestCase,
+ FactsParamsTestCase
+ ):
+ def setUp(self):
+ self.configure_mocks(self, EthernetNetworkInfoModule)
+ self.ethernet_networks = self.mock_ov_client.ethernet_networks
+ FactsParamsTestCase.configure_client_mock(self, self.ethernet_networks)
+
+ def test_should_get_all_enets(self):
+ self.ethernet_networks.get_all.return_value = PRESENT_ENETS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ EthernetNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ethernet_networks=(PRESENT_ENETS)
+ )
+
+ def test_should_get_enet_by_name(self):
+ self.ethernet_networks.get_by.return_value = PRESENT_ENETS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ EthernetNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ethernet_networks=(PRESENT_ENETS)
+ )
+
+ def test_should_get_enet_by_name_with_options(self):
+ self.ethernet_networks.get_by.return_value = PRESENT_ENETS
+ self.ethernet_networks.get_associated_profiles.return_value = ENET_ASSOCIATED_PROFILE_URIS
+ self.ethernet_networks.get_associated_uplink_groups.return_value = ENET_ASSOCIATED_UPLINK_GROUP_URIS
+ self.mock_ov_client.server_profiles.get.side_effect = ENET_ASSOCIATED_PROFILES
+ self.mock_ov_client.uplink_sets.get.side_effect = ENET_ASSOCIATED_UPLINK_GROUPS
+
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITH_OPTIONS
+
+ EthernetNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ethernet_networks=PRESENT_ENETS,
+ enet_associated_profiles=ENET_ASSOCIATED_PROFILES,
+ enet_associated_uplink_groups=ENET_ASSOCIATED_UPLINK_GROUPS
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network.py
new file mode 100644
index 000000000..6def80fc4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import FcNetworkModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_FC_NETWORK_TEMPLATE = dict(
+ name='New FC Network 2',
+ autoLoginRedistribution=True,
+ fabricType='FabricAttach'
+)
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'],
+ newName="New Name",
+ fabricType='DirectAttach')
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
+)
+
+
+class FcNetworkModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase provides the mocks used in this test case
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, FcNetworkModule)
+ self.resource = self.mock_ov_client.fc_networks
+
+ def test_should_create_new_fc_network(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_FC_NETWORK_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcNetworkModule.MSG_CREATED,
+ ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcNetworkModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_FC_NETWORK_TEMPLATE.copy()
+
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(fc_network=data_merged)
+ )
+
+ def test_should_remove_fc_network(self):
+ self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcNetworkModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_fc_network_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcNetworkModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/fc/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ FcNetworkModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/fc/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(fc_network=patch_return),
+ msg=FcNetworkModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ FcNetworkModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(fc_network=resource_data),
+ msg=FcNetworkModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network_info.py
new file mode 100644
index 000000000..236ce136a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fc_network_info.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import FcNetworkInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test FC Network"
+)
+
+PRESENT_NETWORKS = [{
+ "name": "Test FC Network",
+ "uri": "/rest/fc-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"
+}]
+
+
+class FcNetworkInfoSpec(unittest.TestCase,
+ FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, FcNetworkInfoModule)
+ self.fc_networks = self.mock_ov_client.fc_networks
+ FactsParamsTestCase.configure_client_mock(self, self.fc_networks)
+
+ def test_should_get_all_fc_networks(self):
+ self.fc_networks.get_all.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ FcNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fc_networks=PRESENT_NETWORKS
+ )
+
+ def test_should_get_fc_network_by_name(self):
+ self.fc_networks.get_by.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ FcNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fc_networks=PRESENT_NETWORKS
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network.py
new file mode 100644
index 000000000..224e5471e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import FcoeNetworkModule
+from .hpe_test_utils import OneViewBaseTestCase
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_FCOE_NETWORK_TEMPLATE = dict(
+ name='New FCoE Network 2',
+ vlanId="201",
+ connectionTemplateUri=None
+)
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'])
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'],
+ fabricType='DirectAttach',
+ newName='New Name')
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_FCOE_NETWORK_TEMPLATE['name'])
+)
+
+
+class FcoeNetworkSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase provides the mocks used in this test case
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, FcoeNetworkModule)
+ self.resource = self.mock_ov_client.fcoe_networks
+
+ def test_should_create_new_fcoe_network(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_FCOE_NETWORK_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcoeNetworkModule.MSG_CREATED,
+ ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE]
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT.copy()
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcoeNetworkModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(fcoe_network=DEFAULT_FCOE_NETWORK_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_FCOE_NETWORK_TEMPLATE.copy()
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcoeNetworkModule.MSG_UPDATED,
+ ansible_facts=dict(fcoe_network=data_merged)
+ )
+
+ def test_should_remove_fcoe_network(self):
+ self.resource.get_by.return_value = [DEFAULT_FCOE_NETWORK_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=FcoeNetworkModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_fcoe_network_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ FcoeNetworkModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=FcoeNetworkModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FCOE_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/fcoe/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ FcoeNetworkModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/fcoe/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(fcoe_network=patch_return),
+ msg=FcoeNetworkModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_FCOE_NETWORK_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ FcoeNetworkModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(fcoe_network=resource_data),
+ msg=FcoeNetworkModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py
new file mode 100644
index 000000000..387c1da3c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_fcoe_network_info.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from .oneview_module_loader import FcoeNetworkInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test FCoE Networks"
+)
+
+PRESENT_NETWORKS = [{
+ "name": "Test FCoE Networks",
+ "uri": "/rest/fcoe-networks/c6bf9af9-48e7-4236-b08a-77684dc258a5"
+}]
+
+
+class FcoeNetworkInfoSpec(unittest.TestCase,
+ FactsParamsTestCase
+ ):
+ def setUp(self):
+ self.configure_mocks(self, FcoeNetworkInfoModule)
+ self.fcoe_networks = self.mock_ov_client.fcoe_networks
+ FactsParamsTestCase.configure_client_mock(self, self.fcoe_networks)
+
+ def test_should_get_all_fcoe_network(self):
+ self.fcoe_networks.get_all.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ FcoeNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fcoe_networks=PRESENT_NETWORKS
+ )
+
+ def test_should_get_fcoe_network_by_name(self):
+ self.fcoe_networks.get_by.return_value = PRESENT_NETWORKS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ FcoeNetworkInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ fcoe_networks=PRESENT_NETWORKS
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py
new file mode 100644
index 000000000..1f941fb50
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group.py
@@ -0,0 +1,261 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .hpe_test_utils import OneViewBaseTestCase
+from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group import LogicalInterconnectGroupModule
+
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_LIG_NAME = 'Test Logical Interconnect Group'
+RENAMED_LIG = 'Renamed Logical Interconnect Group'
+
+DEFAULT_LIG_TEMPLATE = dict(
+ name=DEFAULT_LIG_NAME,
+ uplinkSets=[],
+ enclosureType='C7000',
+ interconnectMapTemplate=dict(
+ interconnectMapEntryTemplates=[]
+ )
+)
+
+PARAMS_LIG_TEMPLATE_WITH_MAP = dict(
+ config='config.json',
+ state='present',
+ data=dict(
+ name=DEFAULT_LIG_NAME,
+ uplinkSets=[],
+ enclosureType='C7000',
+ interconnectMapTemplate=dict(
+ interconnectMapEntryTemplates=[
+ {
+ "logicalDownlinkUri": None,
+ "logicalLocation": {
+ "locationEntries": [
+ {
+ "relativeValue": "1",
+ "type": "Bay"
+ },
+ {
+ "relativeValue": 1,
+ "type": "Enclosure"
+ }
+ ]
+ },
+ "permittedInterconnectTypeName": "HP VC Flex-10/10D Module"
+ }]
+ )
+ ))
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_LIG_NAME)
+)
+
+PARAMS_TO_RENAME = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_LIG_NAME,
+ newName=RENAMED_LIG)
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_LIG_NAME,
+ description='It is an example')
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_LIG_NAME)
+)
+
+
+class LogicalInterconnectGroupGeneralSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ def setUp(self):
+ self.configure_mocks(self, LogicalInterconnectGroupModule)
+ self.resource = self.mock_ov_client.logical_interconnect_groups
+
+ def test_should_create_new_lig(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_LIG_TEMPLATE
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_CREATED,
+ ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE)
+ )
+
+ def test_should_create_new_with_named_permitted_interconnect_type(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = PARAMS_FOR_PRESENT
+
+ self.mock_ansible_module.params = deepcopy(PARAMS_LIG_TEMPLATE_WITH_MAP)
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_CREATED,
+ ansible_facts=dict(logical_interconnect_group=PARAMS_FOR_PRESENT.copy())
+ )
+
+ def test_should_fail_when_permitted_interconnect_type_name_not_exists(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = PARAMS_FOR_PRESENT
+ self.mock_ov_client.interconnect_types.get_by.return_value = []
+
+ self.mock_ansible_module.params = deepcopy(PARAMS_LIG_TEMPLATE_WITH_MAP)
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg=LogicalInterconnectGroupModule.MSG_INTERCONNECT_TYPE_NOT_FOUND)
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(logical_interconnect_group=DEFAULT_LIG_TEMPLATE)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = DEFAULT_LIG_TEMPLATE.copy()
+ data_merged['description'] = 'New description'
+
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_UPDATED,
+ ansible_facts=dict(logical_interconnect_group=data_merged)
+ )
+
+ def test_rename_when_resource_exists(self):
+ data_merged = DEFAULT_LIG_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_LIG
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+ self.resource.update.return_value = data_merged
+
+ self.mock_ansible_module.params = params_to_rename
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.update.assert_called_once_with(data_merged)
+
+ def test_create_with_newName_when_resource_not_exists(self):
+ data_merged = DEFAULT_LIG_TEMPLATE.copy()
+ data_merged['name'] = RENAMED_LIG
+ params_to_rename = PARAMS_TO_RENAME.copy()
+
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = DEFAULT_LIG_TEMPLATE
+
+ self.mock_ansible_module.params = params_to_rename
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.create.assert_called_once_with(PARAMS_TO_RENAME['data'])
+
+ def test_should_remove_lig(self):
+ self.resource.get_by.return_value = [DEFAULT_LIG_TEMPLATE]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=LogicalInterconnectGroupModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_lig_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ LogicalInterconnectGroupModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=LogicalInterconnectGroupModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_LIG_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/lig/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/lig/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(logical_interconnect_group=patch_return),
+ msg=LogicalInterconnectGroupModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = DEFAULT_LIG_TEMPLATE.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ LogicalInterconnectGroupModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(logical_interconnect_group=resource_data),
+ msg=LogicalInterconnectGroupModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py
new file mode 100644
index 000000000..9fa602a8c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_logical_interconnect_group_info.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .hpe_test_utils import FactsParamsTestCase
+from ansible_collections.community.general.plugins.modules.oneview_logical_interconnect_group_info import (
+ LogicalInterconnectGroupInfoModule
+)
+
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name="Test Logical Interconnect Group"
+)
+
+PRESENT_LIGS = [{
+ "name": "Test Logical Interconnect Group",
+ "uri": "/rest/logical-interconnect-groups/ebb4ada8-08df-400e-8fac-9ff987ac5140"
+}]
+
+
+class LogicalInterconnectGroupInfoSpec(unittest.TestCase, FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, LogicalInterconnectGroupInfoModule)
+ self.logical_interconnect_groups = self.mock_ov_client.logical_interconnect_groups
+ FactsParamsTestCase.configure_client_mock(self, self.logical_interconnect_groups)
+
+ def test_should_get_all_ligs(self):
+ self.logical_interconnect_groups.get_all.return_value = PRESENT_LIGS
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ LogicalInterconnectGroupInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ logical_interconnect_groups=(PRESENT_LIGS)
+ )
+
+ def test_should_get_lig_by_name(self):
+ self.logical_interconnect_groups.get_by.return_value = PRESENT_LIGS
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ LogicalInterconnectGroupInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ logical_interconnect_groups=(PRESENT_LIGS)
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set.py
new file mode 100644
index 000000000..f801cd102
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set.py
@@ -0,0 +1,187 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .hpe_test_utils import OneViewBaseTestCase
+from .oneview_module_loader import NetworkSetModule
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+NETWORK_SET = dict(
+ name='OneViewSDK Test Network Set',
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc']
+)
+
+NETWORK_SET_WITH_NEW_NAME = dict(name='OneViewSDK Test Network Set - Renamed')
+
+PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=NETWORK_SET['name'],
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc'])
+)
+
+PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=NETWORK_SET['name'],
+ newName=NETWORK_SET['name'] + " - Renamed",
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', 'Name of a Network'])
+)
+
+PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=NETWORK_SET['name'])
+)
+
+
+class NetworkSetModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ """
+ OneViewBaseTestCase has common tests for class constructor and main function,
+ also provides the mocks used in this test case.
+ """
+
+ def setUp(self):
+ self.configure_mocks(self, NetworkSetModule)
+ self.resource = self.mock_ov_client.network_sets
+ self.ethernet_network_client = self.mock_ov_client.ethernet_networks
+
+ def test_should_create_new_network_set(self):
+ self.resource.get_by.return_value = []
+ self.resource.create.return_value = NETWORK_SET
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=NetworkSetModule.MSG_CREATED,
+ ansible_facts=dict(network_set=NETWORK_SET)
+ )
+
+ def test_should_not_update_when_data_is_equals(self):
+ self.resource.get_by.return_value = [NETWORK_SET]
+
+ self.mock_ansible_module.params = PARAMS_FOR_PRESENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=NetworkSetModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(network_set=NETWORK_SET)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = dict(name=NETWORK_SET['name'] + " - Renamed",
+ networkUris=['/rest/ethernet-networks/aaa-bbb-ccc',
+ '/rest/ethernet-networks/ddd-eee-fff']
+ )
+
+ self.resource.get_by.side_effect = [NETWORK_SET], []
+ self.resource.update.return_value = data_merged
+ self.ethernet_network_client.get_by.return_value = [{'uri': '/rest/ethernet-networks/ddd-eee-fff'}]
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=NetworkSetModule.MSG_UPDATED,
+ ansible_facts=dict(network_set=data_merged)
+ )
+
+ def test_should_raise_exception_when_ethernet_network_not_found(self):
+ self.resource.get_by.side_effect = [NETWORK_SET], []
+ self.ethernet_network_client.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_WITH_CHANGES
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg=NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND + "Name of a Network"
+ )
+
+ def test_should_remove_network(self):
+ self.resource.get_by.return_value = [NETWORK_SET]
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=NetworkSetModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_network_set_not_exist(self):
+ self.resource.get_by.return_value = []
+
+ self.mock_ansible_module.params = PARAMS_FOR_ABSENT
+
+ NetworkSetModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=NetworkSetModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_update_scopes_when_different(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = NETWORK_SET.copy()
+ resource_data['scopeUris'] = ['fake']
+ resource_data['uri'] = 'rest/network-sets/fake'
+ self.resource.get_by.return_value = [resource_data]
+
+ patch_return = resource_data.copy()
+ patch_return['scopeUris'] = ['test']
+ self.resource.patch.return_value = patch_return
+
+ NetworkSetModule().run()
+
+ self.resource.patch.assert_called_once_with('rest/network-sets/fake',
+ operation='replace',
+ path='/scopeUris',
+ value=['test'])
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ ansible_facts=dict(network_set=patch_return),
+ msg=NetworkSetModule.MSG_UPDATED
+ )
+
+ def test_should_do_nothing_when_scopes_are_the_same(self):
+ params_to_scope = PARAMS_FOR_PRESENT.copy()
+ params_to_scope['data']['scopeUris'] = ['test']
+ self.mock_ansible_module.params = params_to_scope
+
+ resource_data = NETWORK_SET.copy()
+ resource_data['scopeUris'] = ['test']
+ self.resource.get_by.return_value = [resource_data]
+
+ NetworkSetModule().run()
+
+ self.resource.patch.not_been_called()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ ansible_facts=dict(network_set=resource_data),
+ msg=NetworkSetModule.MSG_ALREADY_PRESENT
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set_info.py
new file mode 100644
index 000000000..13cd0400a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_network_set_info.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import NetworkSetInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+ERROR_MSG = 'Fake message error'
+
+PARAMS_GET_ALL = dict(
+ config='config.json',
+ name=None
+)
+
+PARAMS_GET_ALL_WITHOUT_ETHERNET = dict(
+ config='config.json',
+ name=None,
+ options=['withoutEthernet']
+)
+
+PARAMS_GET_BY_NAME = dict(
+ config='config.json',
+ name='Network Set 1'
+)
+
+PARAMS_GET_BY_NAME_WITHOUT_ETHERNET = dict(
+ config='config.json',
+ name='Network Set 1',
+ options=['withoutEthernet']
+)
+
+
+class NetworkSetInfoSpec(unittest.TestCase,
+ FactsParamsTestCase):
+ def setUp(self):
+ self.configure_mocks(self, NetworkSetInfoModule)
+ self.network_sets = self.mock_ov_client.network_sets
+ FactsParamsTestCase.configure_client_mock(self, self.network_sets)
+
+ def test_should_get_all_network_sets(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc']
+ }, {
+ "name": "Network Set 2",
+ "networkUris": ['/rest/ethernet-networks/ddd-eee-fff', '/rest/ethernet-networks/ggg-hhh-fff']
+ }]
+
+ self.network_sets.get_all.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ NetworkSetInfoModule().run()
+
+ self.network_sets.get_all.assert_called_once_with()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+ def test_should_get_all_network_sets_without_ethernet(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": []
+ }, {
+ "name": "Network Set 2",
+ "networkUris": []
+ }]
+
+ self.network_sets.get_all.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_ALL
+
+ NetworkSetInfoModule().run()
+
+ self.network_sets.get_all.assert_called_once_with()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+ def test_should_get_network_set_by_name(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": ['/rest/ethernet-networks/aaa-bbb-ccc']
+ }]
+
+ self.network_sets.get_by.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME
+
+ NetworkSetInfoModule().run()
+
+ self.network_sets.get_by.assert_called_once_with('name', 'Network Set 1')
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+ def test_should_get_network_set_by_name_without_ethernet(self):
+ network_sets = [{
+ "name": "Network Set 1",
+ "networkUris": []
+ }]
+
+ self.network_sets.get_all_without_ethernet.return_value = network_sets
+ self.mock_ansible_module.params = PARAMS_GET_BY_NAME_WITHOUT_ETHERNET
+
+ NetworkSetInfoModule().run()
+
+ expected_filter = "\"'name'='Network Set 1'\""
+ self.network_sets.get_all_without_ethernet.assert_called_once_with(filter=expected_filter)
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ network_sets=network_sets)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager.py
new file mode 100644
index 000000000..d675c3b35
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest, mock
+from .oneview_module_loader import SanManagerModule
+from .hpe_test_utils import OneViewBaseTestCase
+from copy import deepcopy
+
+FAKE_MSG_ERROR = 'Fake message error'
+
+DEFAULT_SAN_MANAGER_TEMPLATE = dict(
+ name='172.18.15.1',
+ providerDisplayName='Brocade Network Advisor',
+ uri='/rest/fc-sans/device-managers/UUU-AAA-BBB',
+ refreshState='OK',
+ connectionInfo=[
+ {
+ 'valueFormat': 'IPAddressOrHostname',
+ 'displayName': 'Host',
+ 'name': 'Host',
+ 'valueType': 'String',
+ 'required': False,
+ 'value': '172.18.15.1'
+ }]
+)
+
+
+class SanManagerModuleSpec(unittest.TestCase,
+ OneViewBaseTestCase):
+ PARAMS_FOR_PRESENT = dict(
+ config='config.json',
+ state='present',
+ data=DEFAULT_SAN_MANAGER_TEMPLATE
+ )
+
+ PARAMS_FOR_CONNECTION_INFORMATION_SET = dict(
+ config='config.json',
+ state='connection_information_set',
+ data=DEFAULT_SAN_MANAGER_TEMPLATE.copy()
+ )
+
+ PARAMS_WITH_CHANGES = dict(
+ config='config.json',
+ state='present',
+ data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE['name'],
+ refreshState='RefreshPending')
+ )
+
+ PARAMS_FOR_ABSENT = dict(
+ config='config.json',
+ state='absent',
+ data=dict(name=DEFAULT_SAN_MANAGER_TEMPLATE['name'])
+ )
+
+ def setUp(self):
+ self.configure_mocks(self, SanManagerModule)
+ self.resource = self.mock_ov_client.san_managers
+
+ def test_should_add_new_san_manager(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+ self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_CREATED,
+ ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE)
+ )
+
+ def test_should_find_provider_uri_to_add(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+ self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ provider_display_name = DEFAULT_SAN_MANAGER_TEMPLATE['providerDisplayName']
+ self.resource.get_provider_uri.assert_called_once_with(provider_display_name)
+
+ def test_should_not_update_when_data_is_equals(self):
+ output_data = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ output_data.pop('connectionInfo')
+ self.resource.get_by_name.return_value = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=SanManagerModule.MSG_ALREADY_PRESENT,
+ ansible_facts=dict(san_manager=output_data)
+ )
+
+ def test_update_when_data_has_modified_attributes(self):
+ data_merged = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.resource.update.return_value = data_merged
+ self.mock_ansible_module.params = self.PARAMS_WITH_CHANGES
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_UPDATED,
+ ansible_facts=dict(san_manager=data_merged)
+ )
+
+ def test_update_should_not_send_connection_info_when_not_informed_on_data(self):
+ merged_data = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ merged_data['refreshState'] = 'RefreshPending'
+ output_data = deepcopy(merged_data)
+ output_data.pop('connectionInfo')
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.resource.update.return_value = merged_data
+ self.mock_ansible_module.params = self.PARAMS_WITH_CHANGES
+
+ SanManagerModule().run()
+
+ self.resource.update.assert_called_once_with(resource=output_data, id_or_uri=output_data['uri'])
+
+ def test_should_remove_san_manager(self):
+ self.resource.get_by_name.return_value = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_ABSENT.copy()
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_DELETED
+ )
+
+ def test_should_do_nothing_when_san_manager_not_exist(self):
+ self.resource.get_by_name.return_value = []
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_ABSENT.copy()
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ msg=SanManagerModule.MSG_ALREADY_ABSENT
+ )
+
+ def test_should_fail_when_name_not_found(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = None
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_PRESENT
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(
+ exception=mock.ANY,
+ msg="The provider 'Brocade Network Advisor' was not found."
+ )
+
+ def test_should_fail_when_name_and_hosts_in_connectionInfo_missing(self):
+ bad_params = deepcopy(self.PARAMS_FOR_PRESENT)
+ bad_params['data'].pop('name')
+ bad_params['data'].pop('connectionInfo')
+
+ self.mock_ansible_module.params = bad_params
+
+ SanManagerModule().run()
+
+ msg = 'A "name" or "connectionInfo" must be provided inside the "data" field for this operation. '
+ msg += 'If a "connectionInfo" is provided, the "Host" name is considered as the "name" for the resource.'
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=msg)
+
+ def test_connection_information_set_should_set_the_connection_information(self):
+ data_merged = deepcopy(DEFAULT_SAN_MANAGER_TEMPLATE)
+ data_merged['fabricType'] = 'DirectAttach'
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.resource.update.return_value = data_merged
+ self.mock_ansible_module.params = self.PARAMS_FOR_CONNECTION_INFORMATION_SET
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_UPDATED,
+ ansible_facts=dict(san_manager=data_merged)
+ )
+
+ def test_should_add_new_san_manager_when_connection_information_set_called_without_resource(self):
+ self.resource.get_by_name.return_value = []
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+ self.resource.add.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+
+ self.mock_ansible_module.params = self.PARAMS_FOR_CONNECTION_INFORMATION_SET
+
+ SanManagerModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=True,
+ msg=SanManagerModule.MSG_CREATED,
+ ansible_facts=dict(san_manager=DEFAULT_SAN_MANAGER_TEMPLATE)
+ )
+
+ def test_should_fail_when_required_attribute_missing(self):
+ bad_params = deepcopy(self.PARAMS_FOR_CONNECTION_INFORMATION_SET)
+ bad_params['data'] = self.PARAMS_FOR_CONNECTION_INFORMATION_SET['data'].copy()
+ bad_params['data'].pop('connectionInfo')
+
+ self.resource.get_by_name.return_value = DEFAULT_SAN_MANAGER_TEMPLATE
+ self.resource.get_provider_uri.return_value = '/rest/fc-sans/providers/123/device-managers'
+
+ self.mock_ansible_module.params = bad_params
+
+ SanManagerModule().run()
+
+ msg = 'A connectionInfo field is required for this operation.'
+
+ self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=msg)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager_info.py
new file mode 100644
index 000000000..be1f24316
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_oneview_san_manager_info.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from .oneview_module_loader import SanManagerInfoModule
+from .hpe_test_utils import FactsParamsTestCase
+
+
+class SanManagerInfoSpec(unittest.TestCase, FactsParamsTestCase):
+ ERROR_MSG = 'Fake message error'
+
+ PARAMS_GET_ALL = dict(
+ config='config.json',
+ provider_display_name=None
+ )
+
+ PARAMS_GET_BY_PROVIDER_DISPLAY_NAME = dict(
+ config='config.json',
+ provider_display_name="Brocade Network Advisor"
+ )
+
+ PRESENT_SAN_MANAGERS = [{
+ "providerDisplayName": "Brocade Network Advisor",
+ "uri": "/rest/fc-sans/device-managers//d60efc8a-15b8-470c-8470-738d16d6b319"
+ }]
+
+ def setUp(self):
+ self.configure_mocks(self, SanManagerInfoModule)
+ self.san_managers = self.mock_ov_client.san_managers
+
+ FactsParamsTestCase.configure_client_mock(self, self.san_managers)
+
+ def test_should_get_all(self):
+ self.san_managers.get_all.return_value = self.PRESENT_SAN_MANAGERS
+ self.mock_ansible_module.params = self.PARAMS_GET_ALL
+
+ SanManagerInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ san_managers=self.PRESENT_SAN_MANAGERS
+ )
+
+ def test_should_get_by_display_name(self):
+ self.san_managers.get_by_provider_display_name.return_value = self.PRESENT_SAN_MANAGERS[0]
+ self.mock_ansible_module.params = self.PARAMS_GET_BY_PROVIDER_DISPLAY_NAME
+
+ SanManagerInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ san_managers=self.PRESENT_SAN_MANAGERS
+ )
+
+ def test_should_return_empty_list_when_get_by_display_name_is_null(self):
+ self.san_managers.get_by_provider_display_name.return_value = None
+ self.mock_ansible_module.params = self.PARAMS_GET_BY_PROVIDER_DISPLAY_NAME
+
+ SanManagerInfoModule().run()
+
+ self.mock_ansible_module.exit_json.assert_called_once_with(
+ changed=False,
+ san_managers=[]
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_opkg.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_opkg.py
new file mode 100644
index 000000000..8e52368ff
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_opkg.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from collections import namedtuple
+from ansible_collections.community.general.plugins.modules import opkg
+
+import pytest
+
+TESTED_MODULE = opkg.__name__
+
+
+ModuleTestCase = namedtuple("ModuleTestCase", ["id", "input", "output", "run_command_calls"])
+RunCmdCall = namedtuple("RunCmdCall", ["command", "environ", "rc", "out", "err"])
+
+
+@pytest.fixture
+def patch_opkg(mocker):
+ mocker.patch('ansible.module_utils.basic.AnsibleModule.get_bin_path', return_value='/testbin/opkg')
+
+
+TEST_CASES = [
+ ModuleTestCase(
+ id="install_zlibdev",
+ input={"name": "zlib-dev", "state": "present"},
+ output={
+ "msg": "installed 1 package(s)"
+ },
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "install", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out=(
+ "Installing zlib-dev (1.2.11-6) to root..."
+ "Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk"
+ "Installing zlib (1.2.11-6) to root..."
+ "Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk"
+ "Configuring zlib."
+ "Configuring zlib-dev."
+ ),
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="zlib-dev - 1.2.11-6\n",
+ err="",
+ ),
+ ],
+ ),
+ ModuleTestCase(
+ id="install_zlibdev_present",
+ input={"name": "zlib-dev", "state": "present"},
+ output={
+ "msg": "package(s) already present"
+ },
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="zlib-dev - 1.2.11-6\n",
+ err="",
+ ),
+ ],
+ ),
+ ModuleTestCase(
+ id="install_zlibdev_force_reinstall",
+ input={"name": "zlib-dev", "state": "present", "force": "reinstall"},
+ output={
+ "msg": "installed 1 package(s)"
+ },
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="zlib-dev - 1.2.11-6\n",
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "install", "--force-reinstall", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out=(
+ "Installing zlib-dev (1.2.11-6) to root...\n"
+ "Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk\n"
+ "Configuring zlib-dev.\n"
+ ),
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="zlib-dev - 1.2.11-6\n",
+ err="",
+ ),
+ ],
+ ),
+ ModuleTestCase(
+ id="install_zlibdev_with_version",
+ input={"name": "zlib-dev=1.2.11-6", "state": "present"},
+ output={
+ "msg": "installed 1 package(s)"
+ },
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "install", "zlib-dev=1.2.11-6"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out=(
+ "Installing zlib-dev (1.2.11-6) to root..."
+ "Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk"
+ "Installing zlib (1.2.11-6) to root..."
+ "Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk"
+ "Configuring zlib."
+ "Configuring zlib-dev."
+ ),
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "zlib-dev"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="zlib-dev - 1.2.11-6 \n", # This output has the extra space at the end, to satisfy the behaviour of Yocto/OpenEmbedded's opkg
+ err="",
+ ),
+ ],
+ ),
+ ModuleTestCase(
+ id="install_vim_updatecache",
+ input={"name": "vim-fuller", "state": "present", "update_cache": True},
+ output={
+ "msg": "installed 1 package(s)"
+ },
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/opkg", "update"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "vim-fuller"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "install", "vim-fuller"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out=(
+ "Multiple packages (libgcc1 and libgcc1) providing same name marked HOLD or PREFER. Using latest.\n"
+ "Installing vim-fuller (9.0-1) to root...\n"
+ "Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/packages/vim-fuller_9.0-1_x86_64.ipk\n"
+ "Installing terminfo (6.4-2) to root...\n"
+ "Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/base/terminfo_6.4-2_x86_64.ipk\n"
+ "Installing libncurses6 (6.4-2) to root...\n"
+ "Downloading https://downloads.openwrt.org/snapshots/packages/x86_64/base/libncurses6_6.4-2_x86_64.ipk\n"
+ "Configuring terminfo.\n"
+ "Configuring libncurses6.\n"
+ "Configuring vim-fuller.\n"
+ ),
+ err="",
+ ),
+ RunCmdCall(
+ command=["/testbin/opkg", "list-installed", "vim-fuller"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="vim-fuller - 9.0-1 \n", # This output has the extra space at the end, to satisfy the behaviour of Yocto/OpenEmbedded's opkg
+ err="",
+ ),
+ ],
+ ),
+]
+TEST_CASES_IDS = [item.id for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ [[x.input, x] for x in TEST_CASES],
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_opkg(mocker, capfd, patch_opkg, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ run_cmd_calls = testcase.run_command_calls
+
+ # Mock function used for running commands first
+ call_results = [(x.rc, x.out, x.err) for x in run_cmd_calls]
+ mock_run_command = mocker.patch('ansible.module_utils.basic.AnsibleModule.run_command', side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ opkg.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % str(testcase))
+ print("results =\n%s" % results)
+
+ for test_result in testcase.output:
+ assert results[test_result] == testcase.output[test_result], \
+ "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], testcase.output[test_result])
+
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item.command, item.environ) for item in run_cmd_calls]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+
+ assert mock_run_command.call_count == len(run_cmd_calls)
+ if mock_run_command.call_count:
+ assert call_args_list == expected_call_args_list
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pacman.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pacman.py
new file mode 100644
index 000000000..04ff5bb3e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pacman.py
@@ -0,0 +1,1099 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+
+from ansible.module_utils import basic
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ set_module_args,
+ exit_json,
+ fail_json,
+)
+
+from ansible_collections.community.general.plugins.modules import pacman
+from ansible_collections.community.general.plugins.modules.pacman import (
+ Package,
+ VersionTuple,
+)
+
+import pytest
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+# This inventory data is tightly coupled with the inventory test and the mock_valid_inventory fixture
+valid_inventory = {
+ "installed_pkgs": {
+ "file": "5.41-1",
+ "filesystem": "2021.11.11-1",
+ "findutils": "4.8.0-1",
+ "gawk": "5.1.1-1",
+ "gettext": "0.21-1",
+ "grep": "3.7-1",
+ "gzip": "1.11-1",
+ "pacman": "6.0.1-2",
+ "pacman-mirrorlist": "20211114-1",
+ "sed": "4.8-1",
+ "sqlite": "3.36.0-1",
+ },
+ "installed_groups": {
+ "base-devel": set(["gawk", "grep", "file", "findutils", "pacman", "sed", "gzip", "gettext"])
+ },
+ "available_pkgs": {
+ "acl": "2.3.1-1",
+ "amd-ucode": "20211027.1d00989-1",
+ "archlinux-keyring": "20211028-1",
+ "argon2": "20190702-3",
+ "attr": "2.5.1-1",
+ "audit": "3.0.6-5",
+ "autoconf": "2.71-1",
+ "automake": "1.16.5-1",
+ "b43-fwcutter": "019-3",
+ "gawk": "5.1.1-1",
+ "grep": "3.7-1",
+ "sqlite": "3.37.0-1",
+ "sudo": "1.9.8.p2-3",
+ },
+ "available_groups": {
+ "base-devel": set(
+ [
+ "libtool",
+ "gawk",
+ "which",
+ "texinfo",
+ "fakeroot",
+ "grep",
+ "findutils",
+ "autoconf",
+ "gzip",
+ "pkgconf",
+ "flex",
+ "patch",
+ "groff",
+ "m4",
+ "bison",
+ "gcc",
+ "gettext",
+ "make",
+ "file",
+ "pacman",
+ "sed",
+ "automake",
+ "sudo",
+ "binutils",
+ ]
+ ),
+ "some-group": set(["libtool", "sudo", "binutils"]),
+ },
+ "upgradable_pkgs": {
+ "sqlite": VersionTuple(current="3.36.0-1", latest="3.37.0-1"),
+ },
+ "pkg_reasons": {
+ "file": "explicit",
+ "filesystem": "explicit",
+ "findutils": "explicit",
+ "gawk": "explicit",
+ "gettext": "explicit",
+ "grep": "explicit",
+ "gzip": "explicit",
+ "pacman": "explicit",
+ "pacman-mirrorlist": "dependency",
+ "sed": "explicit",
+ "sqlite": "explicit",
+ },
+}
+
+empty_inventory = {
+ "installed_pkgs": {},
+ "available_pkgs": {},
+ "installed_groups": {},
+ "available_groups": {},
+ "upgradable_pkgs": {},
+ "pkg_reasons": {},
+}
+
+
+class TestPacman:
+ @pytest.fixture(autouse=True)
+ def run_command(self, mocker):
+ self.mock_run_command = mocker.patch.object(basic.AnsibleModule, "run_command", autospec=True)
+
+ @pytest.fixture
+ def mock_package_list(self, mocker):
+ return mocker.patch.object(pacman.Pacman, "package_list", autospec=True)
+
+ @pytest.fixture(autouse=True)
+ def common(self, mocker):
+ self.mock_module = mocker.patch.multiple(
+ basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path,
+ )
+
+ @pytest.fixture
+ def mock_empty_inventory(self, mocker):
+ inv = empty_inventory
+ return mocker.patch.object(pacman.Pacman, "_build_inventory", return_value=inv)
+
+ @pytest.fixture
+ def mock_valid_inventory(self, mocker):
+ return mocker.patch.object(pacman.Pacman, "_build_inventory", return_value=valid_inventory)
+
+ def test_fail_without_required_args(self):
+ with pytest.raises(AnsibleFailJson) as e:
+ set_module_args({})
+ pacman.main()
+ assert e.match(r"one of the following is required")
+
+ def test_success(self, mock_empty_inventory):
+ set_module_args({"update_cache": True}) # Simplest args to let init go through
+ P = pacman.Pacman(pacman.setup_module())
+ with pytest.raises(AnsibleExitJson) as e:
+ P.success()
+
+ def test_fail(self, mock_empty_inventory):
+ set_module_args({"update_cache": True})
+ P = pacman.Pacman(pacman.setup_module())
+
+ args = dict(
+ msg="msg", stdout="something", stderr="somethingelse", cmd=["command", "with", "args"], rc=1
+ )
+ with pytest.raises(AnsibleFailJson) as e:
+ P.fail(**args)
+
+ assert all(item in e.value.args[0] for item in args)
+
+ @pytest.mark.parametrize(
+ "expected, run_command_side_effect, raises",
+ [
+ (
+ # Regular run
+ valid_inventory,
+ [
+ [ # pacman --query
+ 0,
+ """file 5.41-1
+ filesystem 2021.11.11-1
+ findutils 4.8.0-1
+ gawk 5.1.1-1
+ gettext 0.21-1
+ grep 3.7-1
+ gzip 1.11-1
+ pacman 6.0.1-2
+ pacman-mirrorlist 20211114-1
+ sed 4.8-1
+ sqlite 3.36.0-1
+ """,
+ "",
+ ],
+ ( # pacman --query --group
+ 0,
+ """base-devel file
+ base-devel findutils
+ base-devel gawk
+ base-devel gettext
+ base-devel grep
+ base-devel gzip
+ base-devel pacman
+ base-devel sed
+ """,
+ "",
+ ),
+ ( # pacman --sync --list
+ 0,
+ """core acl 2.3.1-1 [installed]
+ core amd-ucode 20211027.1d00989-1
+ core archlinux-keyring 20211028-1 [installed]
+ core argon2 20190702-3 [installed]
+ core attr 2.5.1-1 [installed]
+ core audit 3.0.6-5 [installed: 3.0.6-2]
+ core autoconf 2.71-1
+ core automake 1.16.5-1
+ core b43-fwcutter 019-3
+ core gawk 5.1.1-1 [installed]
+ core grep 3.7-1 [installed]
+ core sqlite 3.37.0-1 [installed: 3.36.0-1]
+ code sudo 1.9.8.p2-3
+ """,
+ "",
+ ),
+ ( # pacman --sync --group --group
+ 0,
+ """base-devel autoconf
+ base-devel automake
+ base-devel binutils
+ base-devel bison
+ base-devel fakeroot
+ base-devel file
+ base-devel findutils
+ base-devel flex
+ base-devel gawk
+ base-devel gcc
+ base-devel gettext
+ base-devel grep
+ base-devel groff
+ base-devel gzip
+ base-devel libtool
+ base-devel m4
+ base-devel make
+ base-devel pacman
+ base-devel patch
+ base-devel pkgconf
+ base-devel sed
+ base-devel sudo
+ base-devel texinfo
+ base-devel which
+ some-group libtool
+ some-group sudo
+ some-group binutils
+ """,
+ "",
+ ),
+ ( # pacman --query --upgrades
+ 0,
+ """sqlite 3.36.0-1 -> 3.37.0-1
+ systemd 249.6-3 -> 249.7-2 [ignored]
+ """,
+ "",
+ ),
+ ( # pacman --query --explicit
+ 0,
+ """file 5.41-1
+ filesystem 2021.11.11-1
+ findutils 4.8.0-1
+ gawk 5.1.1-1
+ gettext 0.21-1
+ grep 3.7-1
+ gzip 1.11-1
+ pacman 6.0.1-2
+ sed 4.8-1
+ sqlite 3.36.0-1
+ """,
+ "",
+ ),
+ ( # pacman --query --deps
+ 0,
+ """pacman-mirrorlist 20211114-1
+ """,
+ "",
+ ),
+ ],
+ None,
+ ),
+ (
+ # All good, but call to --query --upgrades return 1. aka nothing to upgrade
+ # with a pacman warning
+ empty_inventory,
+ [
+ (0, "", ""),
+ (0, "", ""),
+ (0, "", ""),
+ (0, "", ""),
+ (
+ 1,
+ "",
+ "warning: config file /etc/pacman.conf, line 34: directive 'TotalDownload' in section 'options' not recognized.",
+ ),
+ (0, "", ""),
+ (0, "", ""),
+ ],
+ None,
+ ),
+ (
+ # failure
+ empty_inventory,
+ [
+ (0, "", ""),
+ (0, "", ""),
+ (0, "", ""),
+ (0, "", ""),
+ (
+ 1,
+ "partial\npkg\\nlist",
+ "some warning",
+ ),
+ (0, "", ""),
+ (0, "", ""),
+ ],
+ AnsibleFailJson,
+ ),
+ ],
+ )
+ def test_build_inventory(self, expected, run_command_side_effect, raises):
+ self.mock_run_command.side_effect = run_command_side_effect
+
+ set_module_args({"update_cache": True})
+ if raises:
+ with pytest.raises(raises):
+ P = pacman.Pacman(pacman.setup_module())
+ P._build_inventory()
+ else:
+ P = pacman.Pacman(pacman.setup_module())
+ assert P._build_inventory() == expected
+
+ @pytest.mark.parametrize("check_mode_value", [True, False])
+ def test_upgrade_check_empty_inventory(self, mock_empty_inventory, check_mode_value):
+ set_module_args({"upgrade": True, "_ansible_check_mode": check_mode_value})
+ P = pacman.Pacman(pacman.setup_module())
+ with pytest.raises(AnsibleExitJson) as e:
+ P.run()
+ self.mock_run_command.call_count == 0
+ out = e.value.args[0]
+ assert "packages" not in out
+ assert not out["changed"]
+ assert "diff" not in out
+
+ def test_update_db_check(self, mock_empty_inventory):
+ set_module_args({"update_cache": True, "_ansible_check_mode": True})
+ P = pacman.Pacman(pacman.setup_module())
+
+ with pytest.raises(AnsibleExitJson) as e:
+ P.run()
+ self.mock_run_command.call_count == 0
+ out = e.value.args[0]
+ assert "packages" not in out
+ assert out["changed"]
+
+ @pytest.mark.parametrize(
+ "module_args,expected_calls,changed",
+ [
+ (
+ {},
+ [
+ (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a\nb\nc', ''),
+ (["pacman", "--sync", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
+ (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'b\na\nc', ''),
+ ],
+ False,
+ ),
+ (
+ {"force": True},
+ [
+ (["pacman", "--sync", "--refresh", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
+ ],
+ True,
+ ),
+ (
+ {"update_cache_extra_args": "--some-extra args"}, # shlex test
+ [
+ (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a\nb\nc', ''),
+ (["pacman", "--sync", "--refresh", "--some-extra", "args"], {'check_rc': False}, 0, 'stdout', 'stderr'),
+ (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'a changed\nb\nc', ''),
+ ],
+ True,
+ ),
+ (
+ {"force": True, "update_cache_extra_args": "--some-extra args"},
+ [
+ (["pacman", "--sync", "--refresh", "--some-extra", "args", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
+ ],
+ True,
+ ),
+ (
+ # Test whether pacman --sync --list is not called more than twice
+ {"upgrade": True},
+ [
+ (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'core foo 1.0.0-1 [installed]', ''),
+ (["pacman", "--sync", "--refresh"], {'check_rc': False}, 0, 'stdout', 'stderr'),
+ (["pacman", "--sync", "--list"], {'check_rc': True}, 0, 'core foo 1.0.0-1 [installed]', ''),
+ # The following is _build_inventory:
+ (["pacman", "--query"], {'check_rc': True}, 0, 'foo 1.0.0-1', ''),
+ (["pacman", "--query", "--groups"], {'check_rc': True}, 0, '', ''),
+ (["pacman", "--sync", "--groups", "--groups"], {'check_rc': True}, 0, '', ''),
+ (["pacman", "--query", "--upgrades"], {'check_rc': False}, 0, '', ''),
+ (["pacman", "--query", "--explicit"], {'check_rc': True}, 0, 'foo 1.0.0-1', ''),
+ (["pacman", "--query", "--deps"], {'check_rc': True}, 0, '', ''),
+ ],
+ False,
+ ),
+ ],
+ )
+ def test_update_db(self, module_args, expected_calls, changed):
+ args = {"update_cache": True}
+ args.update(module_args)
+ set_module_args(args)
+
+ self.mock_run_command.side_effect = [
+ (rc, stdout, stderr) for expected_call, kwargs, rc, stdout, stderr in expected_calls
+ ]
+ with pytest.raises(AnsibleExitJson) as e:
+ P = pacman.Pacman(pacman.setup_module())
+ P.run()
+
+ self.mock_run_command.assert_has_calls([
+ mock.call(mock.ANY, expected_call, **kwargs) for expected_call, kwargs, rc, stdout, stderr in expected_calls
+ ])
+ out = e.value.args[0]
+ assert out["cache_updated"] == changed
+ assert out["changed"] == changed
+
+ @pytest.mark.parametrize(
+ "check_mode_value, run_command_data, upgrade_extra_args",
+ [
+ # just check
+ (True, None, None),
+ (
+ # for real
+ False,
+ {
+ "args": ["pacman", "--sync", "--sysupgrade", "--quiet", "--noconfirm"],
+ "return_value": [0, "stdout", "stderr"],
+ },
+ None,
+ ),
+ (
+ # with extra args
+ False,
+ {
+ "args": [
+ "pacman",
+ "--sync",
+ "--sysupgrade",
+ "--quiet",
+ "--noconfirm",
+ "--some",
+ "value",
+ ],
+ "return_value": [0, "stdout", "stderr"],
+ },
+ "--some value",
+ ),
+ ],
+ )
+ def test_upgrade(self, mock_valid_inventory, check_mode_value, run_command_data, upgrade_extra_args):
+ args = {"upgrade": True, "_ansible_check_mode": check_mode_value}
+ if upgrade_extra_args:
+ args["upgrade_extra_args"] = upgrade_extra_args
+ set_module_args(args)
+
+ if run_command_data and "return_value" in run_command_data:
+ self.mock_run_command.return_value = run_command_data["return_value"]
+
+ P = pacman.Pacman(pacman.setup_module())
+
+ with pytest.raises(AnsibleExitJson) as e:
+ P.run()
+ out = e.value.args[0]
+
+ if check_mode_value:
+ self.mock_run_command.call_count == 0
+
+ if run_command_data and "args" in run_command_data:
+ self.mock_run_command.assert_called_with(mock.ANY, run_command_data["args"], check_rc=False)
+ assert out["stdout"] == "stdout"
+ assert out["stderr"] == "stderr"
+
+ assert len(out["packages"]) == 1 and "sqlite" in out["packages"]
+ assert out["changed"]
+ assert out["diff"]["before"] and out["diff"]["after"]
+
+ def test_upgrade_fail(self, mock_valid_inventory):
+ set_module_args({"upgrade": True})
+ self.mock_run_command.return_value = [1, "stdout", "stderr"]
+ P = pacman.Pacman(pacman.setup_module())
+
+ with pytest.raises(AnsibleFailJson) as e:
+ P.run()
+ self.mock_run_command.call_count == 1
+ out = e.value.args[0]
+ assert out["failed"]
+ assert out["stdout"] == "stdout"
+ assert out["stderr"] == "stderr"
+
+ @pytest.mark.parametrize(
+ "state, pkg_names, expected, run_command_data, raises",
+ [
+ # regular packages, no resolving required
+ (
+ "present",
+ ["acl", "attr"],
+ [Package(name="acl", source="acl"), Package(name="attr", source="attr")],
+ None,
+ None,
+ ),
+ (
+ # group expansion
+ "present",
+ ["acl", "some-group", "attr"],
+ [
+ Package(name="acl", source="acl"),
+ Package(name="binutils", source="binutils"),
+ Package(name="libtool", source="libtool"),
+ Package(name="sudo", source="sudo"),
+ Package(name="attr", source="attr"),
+ ],
+ None,
+ None,
+ ),
+ (
+ # <repo>/<pkgname> format -> call to pacman to resolve
+ "present",
+ ["community/elixir"],
+ [Package(name="elixir", source="community/elixir")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ ["pacman", "--sync", "--print-format", "%n", "community/elixir"],
+ check_rc=False,
+ )
+ ],
+ "side_effect": [(0, "elixir", "")],
+ },
+ None,
+ ),
+ (
+ # catch all -> call to pacman to resolve (--sync and --upgrade)
+ "present",
+ ["somepackage-12.3-x86_64.pkg.tar.zst"],
+ [
+ Package(
+ name="somepackage",
+ source="somepackage-12.3-x86_64.pkg.tar.zst",
+ source_is_URL=True,
+ )
+ ],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--sync",
+ "--print-format",
+ "%n",
+ "somepackage-12.3-x86_64.pkg.tar.zst",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--upgrade",
+ "--print-format",
+ "%n",
+ "somepackage-12.3-x86_64.pkg.tar.zst",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(1, "", "nope"), (0, "somepackage", "")],
+ },
+ None,
+ ),
+ (
+ # install a package that doesn't exist. call pacman twice and give up
+ "present",
+ ["unknown-package"],
+ [],
+ {
+ # no call validation, since it will fail
+ "side_effect": [(1, "", "nope"), (1, "", "stillnope")],
+ },
+ AnsibleFailJson,
+ ),
+ (
+ # Edge case: resolve a pkg that doesn't exist when trying to remove it (state == absent).
+ # will fallback to file + url format but not complain since it is already not there
+ # Can happen if a pkg is removed for the repos (or if a repo is disabled/removed)
+ "absent",
+ ["unknown-package-to-remove"],
+ [],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ ["pacman", "--sync", "--print-format", "%n", "unknown-package-to-remove"],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ ["pacman", "--upgrade", "--print-format", "%n", "unknown-package-to-remove"],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(1, "", "nope"), (1, "", "stillnope")],
+ },
+ None, # Doesn't fail
+ ),
+ ],
+ )
+ def test_package_list(
+ self, mock_valid_inventory, state, pkg_names, expected, run_command_data, raises
+ ):
+ set_module_args({"name": pkg_names, "state": state})
+ P = pacman.Pacman(pacman.setup_module())
+ P.inventory = P._build_inventory()
+ if run_command_data:
+ self.mock_run_command.side_effect = run_command_data["side_effect"]
+
+ if raises:
+ with pytest.raises(raises):
+ P.package_list()
+ else:
+ assert sorted(P.package_list()) == sorted(expected)
+ if run_command_data:
+ assert self.mock_run_command.mock_calls == run_command_data["calls"]
+
+ @pytest.mark.parametrize("check_mode_value", [True, False])
+ @pytest.mark.parametrize(
+ "name, state, package_list",
+ [
+ (["already-absent"], "absent", [Package("already-absent", "already-absent")]),
+ (["grep"], "present", [Package("grep", "grep")]),
+ ],
+ )
+ def test_op_packages_nothing_to_do(
+ self, mock_valid_inventory, mock_package_list, check_mode_value, name, state, package_list
+ ):
+ set_module_args({"name": name, "state": state, "_ansible_check_mode": check_mode_value})
+ mock_package_list.return_value = package_list
+ P = pacman.Pacman(pacman.setup_module())
+ with pytest.raises(AnsibleExitJson) as e:
+ P.run()
+ out = e.value.args[0]
+ assert not out["changed"]
+ assert "packages" in out
+ assert "diff" not in out
+ self.mock_run_command.call_count == 0
+
+ @pytest.mark.parametrize(
+ "module_args, expected_packages, package_list_out, run_command_data, raises",
+ [
+ (
+ # remove pkg: Check mode -- call to print format but that's it
+ {"_ansible_check_mode": True, "name": ["grep"], "state": "absent"},
+ ["grep-version"],
+ [Package("grep", "grep")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--remove",
+ "--noconfirm",
+ "--noprogressbar",
+ "--print-format",
+ "%n-%v",
+ "grep",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(0, "grep-version", "")],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # remove pkg for real now -- with 2 packages
+ {"name": ["grep", "gawk"], "state": "absent"},
+ ["grep-version", "gawk-anotherversion"],
+ [Package("grep", "grep"), Package("gawk", "gawk")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--remove",
+ "--noconfirm",
+ "--noprogressbar",
+ "--print-format",
+ "%n-%v",
+ "grep",
+ "gawk",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ ["pacman", "--remove", "--noconfirm", "--noprogressbar", "grep", "gawk"],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [
+ (0, "grep-version\ngawk-anotherversion", ""),
+ (0, "stdout", "stderr"),
+ ],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # remove pkg force + extra_args
+ {
+ "name": ["grep"],
+ "state": "absent",
+ "force": True,
+ "extra_args": "--some --extra arg",
+ },
+ ["grep-version"],
+ [Package("grep", "grep")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--remove",
+ "--noconfirm",
+ "--noprogressbar",
+ "--some",
+ "--extra",
+ "arg",
+ "--nodeps",
+ "--nodeps",
+ "--print-format",
+ "%n-%v",
+ "grep",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--remove",
+ "--noconfirm",
+ "--noprogressbar",
+ "--some",
+ "--extra",
+ "arg",
+ "--nodeps",
+ "--nodeps",
+ "grep",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [
+ (0, "grep-version", ""),
+ (0, "stdout", "stderr"),
+ ],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # remove pkg -- Failure to list
+ {"name": ["grep"], "state": "absent"},
+ ["grep-3.7-1"],
+ [Package("grep", "grep")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--remove",
+ "--noconfirm",
+ "--noprogressbar",
+ "--print-format",
+ "%n-%v",
+ "grep",
+ ],
+ check_rc=False,
+ )
+ ],
+ "side_effect": [
+ (1, "stdout", "stderr"),
+ ],
+ },
+ AnsibleFailJson,
+ ),
+ (
+ # remove pkg -- Failure to remove
+ {"name": ["grep"], "state": "absent"},
+ ["grep-3.7-1"],
+ [Package("grep", "grep")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--remove",
+ "--noconfirm",
+ "--noprogressbar",
+ "--print-format",
+ "%n-%v",
+ "grep",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ ["pacman", "--remove", "--noconfirm", "--noprogressbar", "grep"],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [
+ (0, "grep", ""),
+ (1, "stdout", "stderr"),
+ ],
+ },
+ AnsibleFailJson,
+ ),
+ (
+ # install pkg: Check mode
+ {"_ansible_check_mode": True, "name": ["sudo"], "state": "present"},
+ ["sudo"],
+ [Package("sudo", "sudo")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--sync",
+ "--print-format",
+ "%n %v",
+ "sudo",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(0, "sudo version", "")],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # Install pkgs: one regular, one already installed, one file URL and one https URL
+ {
+ "name": [
+ "sudo",
+ "grep",
+ "./somepackage-12.3-x86_64.pkg.tar.zst",
+ "http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
+ ],
+ "state": "present",
+ },
+ ["otherpkg", "somepackage", "sudo"],
+ [
+ Package("sudo", "sudo"),
+ Package("grep", "grep"),
+ Package("somepackage", "./somepackage-12.3-x86_64.pkg.tar.zst", source_is_URL=True),
+ Package(
+ "otherpkg",
+ "http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
+ source_is_URL=True,
+ ),
+ ],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--sync",
+ "--print-format",
+ "%n %v",
+ "sudo",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--upgrade",
+ "--print-format",
+ "%n %v",
+ "./somepackage-12.3-x86_64.pkg.tar.zst",
+ "http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--sync",
+ "sudo",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--upgrade",
+ "./somepackage-12.3-x86_64.pkg.tar.zst",
+ "http://example.com/otherpkg-1.2-x86_64.pkg.tar.zst",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [
+ (0, "sudo version", ""),
+ (0, "somepackage 12.3\notherpkg 1.2", ""),
+ (0, "", ""),
+ (0, "", ""),
+ ],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # install pkg, extra_args
+ {"name": ["sudo"], "state": "present", "extra_args": "--some --thing else"},
+ ["sudo"],
+ [Package("sudo", "sudo")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--some",
+ "--thing",
+ "else",
+ "--sync",
+ "--print-format",
+ "%n %v",
+ "sudo",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--some",
+ "--thing",
+ "else",
+ "--sync",
+ "sudo",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(0, "sudo version", ""), (0, "", "")],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # latest pkg: Check mode
+ {"_ansible_check_mode": True, "name": ["sqlite"], "state": "latest"},
+ ["sqlite"],
+ [Package("sqlite", "sqlite")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--sync",
+ "--print-format",
+ "%n %v",
+ "sqlite",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(0, "sqlite new-version", "")],
+ },
+ AnsibleExitJson,
+ ),
+ (
+ # latest pkg -- one already latest
+ {"name": ["sqlite", "grep"], "state": "latest"},
+ ["sqlite"],
+ [Package("sqlite", "sqlite")],
+ {
+ "calls": [
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--sync",
+ "--print-format",
+ "%n %v",
+ "sqlite",
+ ],
+ check_rc=False,
+ ),
+ mock.call(
+ mock.ANY,
+ [
+ "pacman",
+ "--noconfirm",
+ "--noprogressbar",
+ "--needed",
+ "--sync",
+ "sqlite",
+ ],
+ check_rc=False,
+ ),
+ ],
+ "side_effect": [(0, "sqlite new-version", ""), (0, "", "")],
+ },
+ AnsibleExitJson,
+ ),
+ ],
+ )
+ def test_op_packages(
+ self,
+ mock_valid_inventory,
+ mock_package_list,
+ module_args,
+ expected_packages,
+ package_list_out,
+ run_command_data,
+ raises,
+ ):
+ set_module_args(module_args)
+ self.mock_run_command.side_effect = run_command_data["side_effect"]
+ mock_package_list.return_value = package_list_out
+
+ P = pacman.Pacman(pacman.setup_module())
+ with pytest.raises(raises) as e:
+ P.run()
+ out = e.value.args[0]
+
+ assert self.mock_run_command.mock_calls == run_command_data["calls"]
+ if raises == AnsibleExitJson:
+ assert out["packages"] == expected_packages
+ assert out["changed"]
+ assert "packages" in out
+ assert "diff" in out
+ else:
+ assert out["stdout"] == "stdout"
+ assert out["stderr"] == "stderr"
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pacman_key.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pacman_key.py
new file mode 100644
index 000000000..ac8570898
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pacman_key.py
@@ -0,0 +1,577 @@
+# Copyright (c) 2019, George Rawlinson <george@rawlinson.net.nz>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.modules import pacman_key
+import pytest
+import json
+
+# path used for mocking get_bin_path()
+MOCK_BIN_PATH = '/mocked/path'
+
+# Key ID used for tests
+TESTING_KEYID = '14F26682D0916CDD81E37B6D61B7B526D98F0353'
+TESTING_KEYFILE_PATH = '/tmp/pubkey.asc'
+
+# gpg --{show,list}-key output (key present)
+GPG_SHOWKEY_OUTPUT = '''tru::1:1616373715:0:3:1:5
+pub:-:4096:1:61B7B526D98F0353:1437155332:::-:::scSC::::::23::0:
+fpr:::::::::14F26682D0916CDD81E37B6D61B7B526D98F0353:
+uid:-::::1437155332::E57D1F9BFF3B404F9F30333629369B08DF5E2161::Mozilla Software Releases <release@mozilla.com>::::::::::0:
+sub:e:4096:1:1C69C4E55E9905DB:1437155572:1500227572:::::s::::::23:
+fpr:::::::::F2EF4E6E6AE75B95F11F1EB51C69C4E55E9905DB:
+sub:e:4096:1:BBBEBDBB24C6F355:1498143157:1561215157:::::s::::::23:
+fpr:::::::::DCEAC5D96135B91C4EA672ABBBBEBDBB24C6F355:
+sub:e:4096:1:F1A6668FBB7D572E:1559247338:1622319338:::::s::::::23:
+fpr:::::::::097B313077AE62A02F84DA4DF1A6668FBB7D572E:'''
+
+# gpg --{show,list}-key output (key absent)
+GPG_NOKEY_OUTPUT = '''gpg: error reading key: No public key
+tru::1:1616373715:0:3:1:5'''
+
+# pacman-key output (successful invocation)
+PACMAN_KEY_SUCCESS = '''==> Updating trust database...
+gpg: next trustdb check due at 2021-08-02'''
+
+# expected command for gpg --list-keys KEYID
+RUN_CMD_LISTKEYS = [
+ MOCK_BIN_PATH,
+ '--with-colons',
+ '--batch',
+ '--no-tty',
+ '--no-default-keyring',
+ '--keyring=/etc/pacman.d/gnupg/pubring.gpg',
+ '--list-keys',
+ TESTING_KEYID,
+]
+
+# expected command for gpg --show-keys KEYFILE
+RUN_CMD_SHOW_KEYFILE = [
+ MOCK_BIN_PATH,
+ '--with-colons',
+ '--with-fingerprint',
+ '--batch',
+ '--no-tty',
+ '--show-keys',
+ TESTING_KEYFILE_PATH,
+]
+
+# expected command for pacman-key --lsign-key KEYID
+RUN_CMD_LSIGN_KEY = [
+ MOCK_BIN_PATH,
+ '--gpgdir',
+ '/etc/pacman.d/gnupg',
+ '--lsign-key',
+ TESTING_KEYID,
+]
+
+
+TESTCASES = [
+ #
+ # invalid user input
+ #
+ # state: present, id: absent
+ [
+ {
+ 'state': 'present',
+ },
+ {
+ 'id': 'param_missing_id',
+ 'msg': 'missing required arguments: id',
+ 'failed': True,
+ },
+ ],
+ # state: present, required parameters: missing
+ [
+ {
+ 'state': 'present',
+ 'id': '0xDOESNTMATTER',
+ },
+ {
+ 'id': 'param_missing_method',
+ 'msg': 'state is present but any of the following are missing: data, file, url, keyserver',
+ 'failed': True,
+ },
+ ],
+ # state: present, id: invalid (not full-length)
+ [
+ {
+ 'id': '0xDOESNTMATTER',
+ 'data': 'FAKEDATA',
+ },
+ {
+ 'id': 'param_id_not_full',
+ 'msg': 'key ID is not full-length: DOESNTMATTER',
+ 'failed': True,
+ },
+ ],
+ # state: present, id: invalid (not hexadecimal)
+ [
+ {
+ 'state': 'present',
+ 'id': '01234567890ABCDE01234567890ABCDE1234567M',
+ 'data': 'FAKEDATA',
+ },
+ {
+ 'id': 'param_id_not_hex',
+ 'msg': 'key ID is not hexadecimal: 01234567890ABCDE01234567890ABCDE1234567M',
+ 'failed': True,
+ },
+ ],
+ # state: absent, id: absent
+ [
+ {
+ 'state': 'absent',
+ },
+ {
+ 'id': 'param_absent_state_missing_id',
+ 'msg': 'missing required arguments: id',
+ 'failed': True,
+ },
+ ],
+ #
+ # check mode
+ #
+ # state & key present
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'data': 'FAKEDATA',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'id': 'checkmode_state_and_key_present',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT,
+ '',
+ ),
+ ),
+ ],
+ 'changed': False,
+ },
+ ],
+ # state present, key absent
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'data': 'FAKEDATA',
+ '_ansible_check_mode': True,
+ },
+ {
+ 'id': 'checkmode_state_present_key_absent',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ ],
+ 'changed': True,
+ },
+ ],
+ # state & key absent
+ [
+ {
+ 'state': 'absent',
+ 'id': TESTING_KEYID,
+ '_ansible_check_mode': True,
+ },
+ {
+ 'id': 'checkmode_state_and_key_absent',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ ],
+ 'changed': False,
+ },
+ ],
+ # state absent, key present
+ [
+ {
+ 'state': 'absent',
+ 'id': TESTING_KEYID,
+ '_ansible_check_mode': True,
+ },
+ {
+ 'id': 'check_mode_state_absent_key_present',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT,
+ '',
+ ),
+ ),
+ ],
+ 'changed': True,
+ },
+ ],
+ #
+ # normal operation
+ #
+ # state & key present
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'data': 'FAKEDATA',
+ },
+ {
+ 'id': 'state_and_key_present',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT,
+ '',
+ ),
+ ),
+ ],
+ 'changed': False,
+ },
+ ],
+ # state absent, key present
+ [
+ {
+ 'state': 'absent',
+ 'id': TESTING_KEYID,
+ },
+ {
+ 'id': 'state_absent_key_present',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT,
+ '',
+ ),
+ ),
+ (
+ [
+ MOCK_BIN_PATH,
+ '--gpgdir',
+ '/etc/pacman.d/gnupg',
+ '--delete',
+ TESTING_KEYID,
+ ],
+ {'check_rc': True},
+ (
+ 0,
+ PACMAN_KEY_SUCCESS,
+ '',
+ ),
+ ),
+ ],
+ 'changed': True,
+ },
+ ],
+ # state & key absent
+ [
+ {
+ 'state': 'absent',
+ 'id': TESTING_KEYID,
+ },
+ {
+ 'id': 'state_and_key_absent',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ ],
+ 'changed': False,
+ },
+ ],
+ # state: present, key: absent, method: file
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'file': TESTING_KEYFILE_PATH,
+ },
+ {
+ 'id': 'state_present_key_absent_method_file',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ (
+ RUN_CMD_SHOW_KEYFILE,
+ {'check_rc': True},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT,
+ '',
+ ),
+ ),
+ (
+ [
+ MOCK_BIN_PATH,
+ '--gpgdir',
+ '/etc/pacman.d/gnupg',
+ '--add',
+ '/tmp/pubkey.asc',
+ ],
+ {'check_rc': True},
+ (
+ 0,
+ PACMAN_KEY_SUCCESS,
+ '',
+ ),
+ ),
+ (
+ RUN_CMD_LSIGN_KEY,
+ {'check_rc': True},
+ (
+ 0,
+ PACMAN_KEY_SUCCESS,
+ '',
+ ),
+ ),
+ ],
+ 'changed': True,
+ },
+ ],
+ # state: present, key: absent, method: file
+ # failure: keyid & keyfile don't match
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'file': TESTING_KEYFILE_PATH,
+ },
+ {
+ 'id': 'state_present_key_absent_verify_failed',
+ 'msg': 'key ID does not match. expected 14F26682D0916CDD81E37B6D61B7B526D98F0353, got 14F26682D0916CDD81E37B6D61B7B526D98F0354',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ (
+ RUN_CMD_SHOW_KEYFILE,
+ {'check_rc': True},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT.replace('61B7B526D98F0353', '61B7B526D98F0354'),
+ '',
+ ),
+ ),
+ ],
+ 'failed': True,
+ },
+ ],
+ # state: present, key: absent, method: keyserver
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'keyserver': 'pgp.mit.edu',
+ },
+ {
+ 'id': 'state_present_key_absent_method_keyserver',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ (
+ [
+ MOCK_BIN_PATH,
+ '--gpgdir',
+ '/etc/pacman.d/gnupg',
+ '--keyserver',
+ 'pgp.mit.edu',
+ '--recv-keys',
+ TESTING_KEYID,
+ ],
+ {'check_rc': True},
+ (
+ 0,
+ '''
+gpg: key 0x61B7B526D98F0353: 32 signatures not checked due to missing keys
+gpg: key 0x61B7B526D98F0353: public key "Mozilla Software Releases <release@mozilla.com>" imported
+gpg: marginals needed: 3 completes needed: 1 trust model: pgp
+gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u
+gpg: Total number processed: 1
+gpg: imported: 1
+''',
+ '',
+ ),
+ ),
+ (
+ RUN_CMD_LSIGN_KEY,
+ {'check_rc': True},
+ (
+ 0,
+ PACMAN_KEY_SUCCESS,
+ '',
+ ),
+ ),
+ ],
+ 'changed': True,
+ },
+ ],
+ # state: present, key: absent, method: data
+ [
+ {
+ 'state': 'present',
+ 'id': TESTING_KEYID,
+ 'data': 'PGP_DATA',
+ },
+ {
+ 'id': 'state_present_key_absent_method_data',
+ 'run_command.calls': [
+ (
+ RUN_CMD_LISTKEYS,
+ {'check_rc': False},
+ (
+ 2,
+ '',
+ GPG_NOKEY_OUTPUT,
+ ),
+ ),
+ (
+ RUN_CMD_SHOW_KEYFILE,
+ {'check_rc': True},
+ (
+ 0,
+ GPG_SHOWKEY_OUTPUT,
+ '',
+ ),
+ ),
+ (
+ [
+ MOCK_BIN_PATH,
+ '--gpgdir',
+ '/etc/pacman.d/gnupg',
+ '--add',
+ '/tmp/pubkey.asc',
+ ],
+ {'check_rc': True},
+ (
+ 0,
+ PACMAN_KEY_SUCCESS,
+ '',
+ ),
+ ),
+ (
+ RUN_CMD_LSIGN_KEY,
+ {'check_rc': True},
+ (
+ 0,
+ PACMAN_KEY_SUCCESS,
+ '',
+ ),
+ ),
+ ],
+ 'save_key_output': TESTING_KEYFILE_PATH,
+ 'changed': True,
+ },
+ ],
+]
+
+
+@pytest.fixture
+def patch_get_bin_path(mocker):
+ get_bin_path = mocker.patch.object(
+ AnsibleModule,
+ 'get_bin_path',
+ return_value=MOCK_BIN_PATH,
+ )
+
+
+@pytest.mark.parametrize(
+ 'patch_ansible_module, expected',
+ TESTCASES,
+ ids=[item[1]['id'] for item in TESTCASES],
+ indirect=['patch_ansible_module']
+)
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_operation(mocker, capfd, patch_get_bin_path, expected):
+ # patch run_command invocations with mock data
+ if 'run_command.calls' in expected:
+ mock_run_command = mocker.patch.object(
+ AnsibleModule,
+ 'run_command',
+ side_effect=[item[2] for item in expected['run_command.calls']],
+ )
+
+ # patch save_key invocations with mock data
+ if 'save_key_output' in expected:
+ mock_save_key = mocker.patch.object(
+ pacman_key.PacmanKey,
+ 'save_key',
+ return_value=expected['save_key_output'],
+ )
+
+ # invoke module
+ with pytest.raises(SystemExit):
+ pacman_key.main()
+
+ # capture std{out,err}
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ # assertion time!
+ if 'msg' in expected:
+ assert results['msg'] == expected['msg']
+ if 'changed' in expected:
+ assert results['changed'] == expected['changed']
+ if 'failed' in expected:
+ assert results['failed'] == expected['failed']
+
+ if 'run_command.calls' in expected:
+ assert AnsibleModule.run_command.call_count == len(expected['run_command.calls'])
+ call_args_list = [(item[0][0], item[1]) for item in AnsibleModule.run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in expected['run_command.calls']]
+ assert call_args_list == expected_call_args_list
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty.py
new file mode 100644
index 000000000..d363804bc
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty.py
@@ -0,0 +1,130 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules import pagerduty
+
+import json
+
+
+class PagerDutyTest(unittest.TestCase):
+ def setUp(self):
+ self.pd = pagerduty.PagerDutyRequest(module=pagerduty, name='name', user='user', token='token')
+
+ def _assert_ongoing_maintenance_windows(self, module, url, headers):
+ self.assertEqual('https://api.pagerduty.com/maintenance_windows?filter=ongoing', url)
+ return object(), {'status': 200}
+
+ def _assert_ongoing_window_with_v1_compatible_header(self, module, url, headers, data=None, method=None):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return object(), {'status': 200}
+
+ def _assert_create_a_maintenance_window_url(self, module, url, headers, data=None, method=None):
+ self.assertEqual('https://api.pagerduty.com/maintenance_windows', url)
+ return object(), {'status': 201}
+
+ def _assert_create_a_maintenance_window_http_method(self, module, url, headers, data=None, method=None):
+ self.assertEqual('POST', method)
+ return object(), {'status': 201}
+
+ def _assert_create_a_maintenance_window_from_header(self, module, url, headers, data=None, method=None):
+ self.assertDictContainsSubset(
+ {'From': 'requester_id'},
+ headers,
+ 'From:requester_id HTTP header not found'
+ )
+ return object(), {'status': 201}
+
+ def _assert_create_window_with_v1_compatible_header(self, module, url, headers, data=None, method=None):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return object(), {'status': 201}
+
+ def _assert_create_window_payload(self, module, url, headers, data=None, method=None):
+ payload = json.loads(data)
+ window_data = payload['maintenance_window']
+ self.assertTrue('start_time' in window_data, '"start_time" is requiered attribute')
+ self.assertTrue('end_time' in window_data, '"end_time" is requiered attribute')
+ self.assertTrue('services' in window_data, '"services" is requiered attribute')
+ return object(), {'status': 201}
+
+ def _assert_create_window_single_service(self, module, url, headers, data=None, method=None):
+ payload = json.loads(data)
+ window_data = payload['maintenance_window']
+ services = window_data['services']
+ self.assertEqual(
+ [{'id': 'service_id', 'type': 'service_reference'}],
+ services
+ )
+ return object(), {'status': 201}
+
+ def _assert_create_window_multiple_service(self, module, url, headers, data=None, method=None):
+ payload = json.loads(data)
+ window_data = payload['maintenance_window']
+ services = window_data['services']
+ print(services)
+ self.assertEqual(
+ [
+ {'id': 'service_id_1', 'type': 'service_reference'},
+ {'id': 'service_id_2', 'type': 'service_reference'},
+ {'id': 'service_id_3', 'type': 'service_reference'},
+ ],
+ services
+ )
+ return object(), {'status': 201}
+
+ def _assert_absent_maintenance_window_url(self, module, url, headers, method=None):
+ self.assertEqual('https://api.pagerduty.com/maintenance_windows/window_id', url)
+ return object(), {'status': 204}
+
+ def _assert_absent_window_with_v1_compatible_header(self, module, url, headers, method=None):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return object(), {'status': 204}
+
+ def test_ongoing_maintenance_windos_url(self):
+ self.pd.ongoing(http_call=self._assert_ongoing_maintenance_windows)
+
+ def test_ongoing_maintenance_windos_compatibility_header(self):
+ self.pd.ongoing(http_call=self._assert_ongoing_window_with_v1_compatible_header)
+
+ def test_create_maintenance_window_url(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_url)
+
+ def test_create_maintenance_window_http_method(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_http_method)
+
+ def test_create_maintenance_from_header(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_a_maintenance_window_from_header)
+
+ def test_create_maintenance_compatibility_header(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_window_with_v1_compatible_header)
+
+ def test_create_maintenance_request_payload(self):
+ self.pd.create('requester_id', 'service', 1, 0, 'desc', http_call=self._assert_create_window_payload)
+
+ def test_create_maintenance_for_single_service(self):
+ self.pd.create('requester_id', 'service_id', 1, 0, 'desc', http_call=self._assert_create_window_single_service)
+
+ def test_create_maintenance_for_multiple_services(self):
+ self.pd.create('requester_id', ['service_id_1', 'service_id_2', 'service_id_3'], 1, 0, 'desc', http_call=self._assert_create_window_multiple_service)
+
+ def test_absent_maintenance_window_url(self):
+ self.pd.absent('window_id', http_call=self._assert_absent_maintenance_window_url)
+
+ def test_absent_maintenance_compatibility_header(self):
+ self.pd.absent('window_id', http_call=self._assert_absent_window_with_v1_compatible_header)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_alert.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_alert.py
new file mode 100644
index 000000000..3df992b42
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_alert.py
@@ -0,0 +1,46 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.plugins.modules import pagerduty_alert
+
+
+class PagerDutyAlertsTest(unittest.TestCase):
+ def _assert_incident_api(self, module, url, method, headers):
+ self.assertTrue('https://api.pagerduty.com/incidents' in url, 'url must contain REST API v2 network path')
+ self.assertTrue('service_ids%5B%5D=service_id' in url, 'url must contain service id to filter incidents')
+ self.assertTrue('sort_by=incident_number%3Adesc' in url, 'url should contain sorting parameter')
+ self.assertTrue('time_zone=UTC' in url, 'url should contain time zone parameter')
+ return Response(), {'status': 200}
+
+ def _assert_compatibility_header(self, module, url, method, headers):
+ self.assertDictContainsSubset(
+ {'Accept': 'application/vnd.pagerduty+json;version=2'},
+ headers,
+ 'Accept:application/vnd.pagerduty+json;version=2 HTTP header not found'
+ )
+ return Response(), {'status': 200}
+
+ def _assert_incident_key(self, module, url, method, headers):
+ self.assertTrue('incident_key=incident_key_value' in url, 'url must contain incident key')
+ return Response(), {'status': 200}
+
+ def test_incident_url(self):
+ pagerduty_alert.check(None, 'name', 'state', 'service_id', 'integration_key', 'api_key', http_call=self._assert_incident_api)
+
+ def test_compatibility_header(self):
+ pagerduty_alert.check(None, 'name', 'state', 'service_id', 'integration_key', 'api_key', http_call=self._assert_compatibility_header)
+
+ def test_incident_key_in_url_when_it_is_given(self):
+ pagerduty_alert.check(
+ None, 'name', 'state', 'service_id', 'integration_key', 'api_key', incident_key='incident_key_value', http_call=self._assert_incident_key
+ )
+
+
+class Response(object):
+ def read(self):
+ return '{"incidents":[{"id": "incident_id", "status": "triggered"}]}'
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_change.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_change.py
new file mode 100644
index 000000000..d596d6ab8
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pagerduty_change.py
@@ -0,0 +1,85 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.plugins.modules import pagerduty_change
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestPagerDutyChangeModule(ModuleTestCase):
+ def setUp(self):
+ super(TestPagerDutyChangeModule, self).setUp()
+ self.module = pagerduty_change
+
+ def tearDown(self):
+ super(TestPagerDutyChangeModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.monitoring.pagerduty_change.fetch_url')
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_ensure_change_event_created_with_minimal_data(self):
+ set_module_args({
+ 'integration_key': 'test',
+ 'summary': 'Testing'
+ })
+
+ with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 202})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 1
+ url = fetch_url_mock.call_args[0][1]
+ json_data = fetch_url_mock.call_args[1]['data']
+ data = json.loads(json_data)
+
+ assert url == 'https://events.pagerduty.com/v2/change/enqueue'
+ assert data['routing_key'] == 'test'
+ assert data['payload']['summary'] == 'Testing'
+ assert data['payload']['source'] == 'Ansible'
+
+ def test_ensure_change_event_created_with_full_data(self):
+ set_module_args({
+ 'integration_key': 'test',
+ 'summary': 'Testing',
+ 'source': 'My Ansible Script',
+ 'user': 'ansible',
+ 'repo': 'github.com/ansible/ansible',
+ 'revision': '8c67432',
+ 'environment': 'production',
+ 'link_url': 'https://pagerduty.com',
+ 'link_text': 'PagerDuty'
+ })
+
+ with patch.object(pagerduty_change, 'fetch_url') as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 202})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ assert fetch_url_mock.call_count == 1
+ url = fetch_url_mock.call_args[0][1]
+ json_data = fetch_url_mock.call_args[1]['data']
+ data = json.loads(json_data)
+
+ assert url == 'https://events.pagerduty.com/v2/change/enqueue'
+ assert data['routing_key'] == 'test'
+ assert data['payload']['summary'] == 'Testing'
+ assert data['payload']['source'] == 'My Ansible Script'
+ assert data['payload']['custom_details']['user'] == 'ansible'
+ assert data['payload']['custom_details']['repo'] == 'github.com/ansible/ansible'
+ assert data['payload']['custom_details']['revision'] == '8c67432'
+ assert data['payload']['custom_details']['environment'] == 'production'
+ assert data['links'][0]['href'] == 'https://pagerduty.com'
+ assert data['links'][0]['text'] == 'PagerDuty'
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pamd.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pamd.py
new file mode 100644
index 000000000..4c49cebed
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pamd.py
@@ -0,0 +1,386 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from ansible_collections.community.general.plugins.modules.pamd import PamdRule
+from ansible_collections.community.general.plugins.modules.pamd import PamdLine
+from ansible_collections.community.general.plugins.modules.pamd import PamdComment
+from ansible_collections.community.general.plugins.modules.pamd import PamdInclude
+from ansible_collections.community.general.plugins.modules.pamd import PamdService
+
+
+class PamdLineTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.pamd_line = PamdLine("This is a test")
+
+ def test_line(self):
+ self.assertEqual("This is a test", str(self.pamd_line))
+
+ def test_matches(self):
+ self.assertFalse(self.pamd_line.matches("test", "matches", "foo", "bar"))
+
+
+class PamdIncludeTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.good_include = PamdInclude("@include foobar")
+ self.bad_include = PamdInclude("include foobar")
+
+ def test_line(self):
+ self.assertEqual("@include foobar", str(self.good_include))
+
+ def test_matches(self):
+ self.assertFalse(self.good_include.matches("something", "something", "dark", "side"))
+
+ def test_valid(self):
+ self.assertTrue(self.good_include.is_valid)
+ self.assertFalse(self.bad_include.is_valid)
+
+
+class PamdCommentTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.good_comment = PamdComment("# This is a test comment")
+ self.bad_comment = PamdComment("This is a bad test comment")
+
+ def test_line(self):
+ self.assertEqual("# This is a test comment", str(self.good_comment))
+
+ def test_matches(self):
+ self.assertFalse(self.good_comment.matches("test", "matches", "foo", "bar"))
+
+ def test_valid(self):
+ self.assertTrue(self.good_comment.is_valid)
+ self.assertFalse(self.bad_comment.is_valid)
+
+
+class PamdRuleTestCase(unittest.TestCase):
+ def setUp(self):
+ self.rule = PamdRule('account', 'optional', 'pam_keyinit.so', 'revoke')
+
+ def test_type(self):
+ self.assertEqual(self.rule.rule_type, 'account')
+
+ def test_control(self):
+ self.assertEqual(self.rule.rule_control, 'optional')
+ self.assertEqual(self.rule._control, 'optional')
+
+ def test_path(self):
+ self.assertEqual(self.rule.rule_path, 'pam_keyinit.so')
+
+ def test_args(self):
+ self.assertEqual(self.rule.rule_args, ['revoke'])
+
+ def test_valid(self):
+ self.assertTrue(self.rule.validate()[0])
+
+
+class PamdRuleBadValidationTestCase(unittest.TestCase):
+ def setUp(self):
+ self.bad_type = PamdRule('foobar', 'optional', 'pam_keyinit.so', 'revoke')
+ self.bad_control_simple = PamdRule('account', 'foobar', 'pam_keyinit.so', 'revoke')
+ self.bad_control_value = PamdRule('account', '[foobar=1 default=ignore]', 'pam_keyinit.so', 'revoke')
+ self.bad_control_action = PamdRule('account', '[success=1 default=foobar]', 'pam_keyinit.so', 'revoke')
+
+ def test_validate_bad_type(self):
+ self.assertFalse(self.bad_type.validate()[0])
+
+ def test_validate_bad_control_simple(self):
+ self.assertFalse(self.bad_control_simple.validate()[0])
+
+ def test_validate_bad_control_value(self):
+ self.assertFalse(self.bad_control_value.validate()[0])
+
+ def test_validate_bad_control_action(self):
+ self.assertFalse(self.bad_control_action.validate()[0])
+
+
+class PamdServiceTestCase(unittest.TestCase):
+ def setUp(self):
+ self.system_auth_string = """#%PAM-1.0
+# This file is auto-generated.
+# User changes will be destroyed the next time authconfig is run.
+@include common-auth
+@include common-account
+@include common-session
+auth required pam_env.so
+auth sufficient pam_unix.so nullok try_first_pass
+auth requisite pam_succeed_if.so uid
+auth required pam_deny.so
+# Test comment
+auth sufficient pam_rootok.so
+
+account required pam_unix.so
+account sufficient pam_localuser.so
+account sufficient pam_succeed_if.so uid
+account [success=1 default=ignore] \
+ pam_succeed_if.so user = vagrant use_uid quiet
+account required pam_permit.so
+account required pam_access.so listsep=,
+session include system-auth
+
+password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type=
+password sufficient pam_unix.so sha512 shadow nullok try_first_pass use_authtok
+password required pam_deny.so
+
+session optional pam_keyinit.so revoke
+session required pam_limits.so
+-session optional pam_systemd.so
+session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid
+session [success=1 test=me default=ignore] pam_succeed_if.so service in crond quiet use_uid
+session required pam_unix.so"""
+
+ self.simple_system_auth_string = """#%PAM-1.0
+ auth required pam_env.so
+"""
+
+ self.no_header_system_auth_string = """auth required pam_env.so
+auth sufficient pam_unix.so nullok try_first_pass
+auth requisite pam_succeed_if.so uid
+auth required pam_deny.so
+"""
+
+ self.pamd = PamdService(self.system_auth_string)
+
+ def test_properly_parsed(self):
+ num_lines = len(self.system_auth_string.splitlines()) + 1
+ num_lines_processed = len(str(self.pamd).splitlines())
+ self.assertEqual(num_lines, num_lines_processed)
+
+ def test_has_rule(self):
+ self.assertTrue(self.pamd.has_rule('account', 'required', 'pam_permit.so'))
+ self.assertTrue(self.pamd.has_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so'))
+
+ def test_doesnt_have_rule(self):
+ self.assertFalse(self.pamd.has_rule('account', 'requisite', 'pam_permit.so'))
+
+ # Test Update
+ def test_update_rule_type(self):
+ self.assertTrue(self.pamd.update_rule('session', 'optional', 'pam_keyinit.so', new_type='account'))
+ self.assertTrue(self.pamd.has_rule('account', 'optional', 'pam_keyinit.so'))
+ test_rule = PamdRule('account', 'optional', 'pam_keyinit.so', 'revoke')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_that_doesnt_exist(self):
+ self.assertFalse(self.pamd.update_rule('blah', 'blah', 'blah', new_type='account'))
+ self.assertFalse(self.pamd.has_rule('blah', 'blah', 'blah'))
+ test_rule = PamdRule('blah', 'blah', 'blah', 'account')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_type_two(self):
+ self.assertTrue(self.pamd.update_rule('session', '[success=1 default=ignore]', 'pam_succeed_if.so', new_type='account'))
+ self.assertTrue(self.pamd.has_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so'))
+ test_rule = PamdRule('account', '[success=1 default=ignore]', 'pam_succeed_if.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_control_simple(self):
+ self.assertTrue(self.pamd.update_rule('session', 'optional', 'pam_keyinit.so', new_control='required'))
+ self.assertTrue(self.pamd.has_rule('session', 'required', 'pam_keyinit.so'))
+ test_rule = PamdRule('session', 'required', 'pam_keyinit.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_control_complex(self):
+ self.assertTrue(self.pamd.update_rule('session',
+ '[success=1 default=ignore]',
+ 'pam_succeed_if.so',
+ new_control='[success=2 test=me default=ignore]'))
+ self.assertTrue(self.pamd.has_rule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so'))
+ test_rule = PamdRule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_control_more_complex(self):
+
+ self.assertTrue(self.pamd.update_rule('session',
+ '[success=1 test=me default=ignore]',
+ 'pam_succeed_if.so',
+ new_control='[success=2 test=me default=ignore]'))
+ self.assertTrue(self.pamd.has_rule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so'))
+ test_rule = PamdRule('session', '[success=2 test=me default=ignore]', 'pam_succeed_if.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_module_path(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_path='pam_limits.so'))
+ self.assertTrue(self.pamd.has_rule('auth', 'required', 'pam_limits.so'))
+
+ def test_update_rule_module_path_slash(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so', new_path='/lib64/security/pam_duo.so'))
+ self.assertTrue(self.pamd.has_rule('auth', 'required', '/lib64/security/pam_duo.so'))
+
+ def test_update_rule_module_args(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args='uid uid'))
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'uid uid')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_remove_module_args(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so', new_args=''))
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', '')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_first_three(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'required', 'pam_env.so',
+ new_type='one', new_control='two', new_path='three'))
+ self.assertTrue(self.pamd.has_rule('one', 'two', 'three'))
+
+ def test_update_first_three_with_module_args(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so',
+ new_type='one', new_control='two', new_path='three'))
+ self.assertTrue(self.pamd.has_rule('one', 'two', 'three'))
+ test_rule = PamdRule('one', 'two', 'three')
+ self.assertIn(str(test_rule), str(self.pamd))
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ def test_update_all_four(self):
+ self.assertTrue(self.pamd.update_rule('auth', 'sufficient', 'pam_unix.so',
+ new_type='one', new_control='two', new_path='three',
+ new_args='four five'))
+ test_rule = PamdRule('one', 'two', 'three', 'four five')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ test_rule = PamdRule('auth', 'sufficient', 'pam_unix.so', 'nullok try_first_pass')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_update_rule_with_slash(self):
+ self.assertTrue(self.pamd.update_rule('account', '[success=1 default=ignore]', 'pam_succeed_if.so',
+ new_type='session', new_path='pam_access.so'))
+ test_rule = PamdRule('session', '[success=1 default=ignore]', 'pam_access.so')
+ self.assertIn(str(test_rule), str(self.pamd))
+
+ # Insert Before
+ def test_insert_before_rule(self):
+
+ count = self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so')
+ self.assertEqual(count, 1)
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so"))
+
+ def test_insert_before_rule_where_rule_doesnt_exist(self):
+
+ count = self.pamd.insert_before('account', 'sufficient', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so')
+ self.assertFalse(count)
+
+ def test_insert_before_rule_with_args(self):
+ self.assertTrue(self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so',
+ new_args='uid'))
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.prev.matches("account", "required", "pam_limits.so", 'uid'))
+
+ def test_insert_before_rule_test_duplicates(self):
+ self.assertTrue(self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so'))
+
+ self.pamd.insert_before('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_limits.so')
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ previous_rule = current_rule.prev
+ self.assertTrue(previous_rule.matches("account", "required", "pam_limits.so"))
+ self.assertFalse(previous_rule.prev.matches("account", "required", "pam_limits.so"))
+
+ def test_insert_before_first_rule(self):
+ self.assertTrue(self.pamd.insert_before('auth', 'required', 'pam_env.so',
+ new_type='account', new_control='required', new_path='pam_limits.so'))
+
+ def test_insert_before_first_rule_simple(self):
+ simple_service = PamdService(self.simple_system_auth_string)
+ self.assertTrue(simple_service.insert_before('auth', 'required', 'pam_env.so',
+ new_type='account', new_control='required', new_path='pam_limits.so'))
+
+ # Insert After
+ def test_insert_after_rule(self):
+ self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_unix.so',
+ new_type='account', new_control='required', new_path='pam_permit.so'))
+ rules = self.pamd.get("account", "required", "pam_unix.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so"))
+
+ def test_insert_after_rule_with_args(self):
+ self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid"))
+
+ def test_insert_after_test_duplicates(self):
+ self.assertTrue(self.pamd.insert_after('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+ self.assertFalse(self.pamd.insert_after('account', 'required', 'pam_access.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+
+ rules = self.pamd.get("account", "required", "pam_access.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid"))
+ self.assertFalse(current_rule.next.next.matches("account", "required", "pam_permit.so", "uid"))
+
+ def test_insert_after_rule_last_rule(self):
+ self.assertTrue(self.pamd.insert_after('session', 'required', 'pam_unix.so',
+ new_type='account', new_control='required', new_path='pam_permit.so',
+ new_args='uid'))
+ rules = self.pamd.get("session", "required", "pam_unix.so")
+ for current_rule in rules:
+ self.assertTrue(current_rule.next.matches("account", "required", "pam_permit.so", "uid"))
+
+ # Remove Module Arguments
+ def test_remove_module_arguments_one(self):
+ self.assertTrue(self.pamd.remove_module_arguments('auth', 'sufficient', 'pam_unix.so', 'nullok'))
+
+ def test_remove_module_arguments_one_list(self):
+ self.assertTrue(self.pamd.remove_module_arguments('auth', 'sufficient', 'pam_unix.so', ['nullok']))
+
+ def test_remove_module_arguments_two(self):
+ self.assertTrue(self.pamd.remove_module_arguments('session', '[success=1 default=ignore]', 'pam_succeed_if.so', 'service crond'))
+
+ def test_remove_module_arguments_two_list(self):
+ self.assertTrue(self.pamd.remove_module_arguments('session', '[success=1 default=ignore]', 'pam_succeed_if.so', ['service', 'crond']))
+
+ def test_remove_module_arguments_where_none_existed(self):
+ self.assertTrue(self.pamd.add_module_arguments('session', 'required', 'pam_limits.so', 'arg1 arg2= arg3=arg3'))
+
+ def test_add_module_arguments_where_none_existed(self):
+ self.assertTrue(self.pamd.add_module_arguments('account', 'required', 'pam_unix.so', 'arg1 arg2= arg3=arg3'))
+
+ def test_add_module_arguments_where_none_existed_list(self):
+ self.assertTrue(self.pamd.add_module_arguments('account', 'required', 'pam_unix.so', ['arg1', 'arg2=', 'arg3=arg3']))
+
+ def test_add_module_arguments_where_some_existed(self):
+ self.assertTrue(self.pamd.add_module_arguments('auth', 'sufficient', 'pam_unix.so', 'arg1 arg2= arg3=arg3'))
+
+ def test_remove_rule(self):
+ self.assertTrue(self.pamd.remove('account', 'required', 'pam_unix.so'))
+ # Second run should not change anything
+ self.assertFalse(self.pamd.remove('account', 'required', 'pam_unix.so'))
+ test_rule = PamdRule('account', 'required', 'pam_unix.so')
+ self.assertNotIn(str(test_rule), str(self.pamd))
+
+ def test_remove_first_rule(self):
+ no_header_service = PamdService(self.no_header_system_auth_string)
+ self.assertTrue(no_header_service.remove('auth', 'required', 'pam_env.so'))
+ test_rule = PamdRule('auth', 'required', 'pam_env.so')
+ self.assertNotIn(str(test_rule), str(no_header_service))
+
+ def test_remove_last_rule(self):
+ self.assertTrue(self.pamd.remove('session', 'required', 'pam_unix.so'))
+ test_rule = PamdRule('session', 'required', 'pam_unix.so')
+ self.assertNotIn(str(test_rule), str(self.pamd))
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_parted.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_parted.py
new file mode 100644
index 000000000..1e010343b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_parted.py
@@ -0,0 +1,346 @@
+# (c) 2017 Red Hat Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch, call
+from ansible_collections.community.general.plugins.modules import parted as parted_module
+from ansible_collections.community.general.plugins.modules.parted import parse_parted_version
+from ansible_collections.community.general.plugins.modules.parted import parse_partition_info
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+# Example of output : parted -s -m /dev/sdb -- unit 'MB' print
+parted_output1 = """
+BYT;
+/dev/sdb:286061MB:scsi:512:512:msdos:ATA TOSHIBA THNSFJ25:;
+1:1.05MB:106MB:105MB:fat32::esp;
+2:106MB:368MB:262MB:ext2::;
+3:368MB:256061MB:255692MB:::;"""
+
+parted_version_info = {"""
+ parted (GNU parted) 3.3
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+ This is free software: you are free to change and redistribute it.
+ There is NO WARRANTY, to the extent permitted by law.
+
+ Written by <http://git.debian.org/?p=parted/parted.git;a=blob_plain;f=AUTHORS>.
+ """: (3, 3, 0), """
+ parted (GNU parted) 3.4.5
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+ This is free software: you are free to change and redistribute it.
+ There is NO WARRANTY, to the extent permitted by law.
+
+ Written by <http://git.debian.org/?p=parted/parted.git;a=blob_plain;f=AUTHORS>.
+ """: (3, 4, 5), """
+ parted (GNU parted) 3.3.14-dfc61
+ Copyright (C) 2019 Free Software Foundation, Inc.
+ License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>.
+ This is free software: you are free to change and redistribute it.
+ There is NO WARRANTY, to the extent permitted by law.
+
+ Written by <http://git.debian.org/?p=parted/parted.git;a=blob_plain;f=AUTHORS>.
+ """: (3, 3, 14)}
+
+# corresponding dictionary after parsing by parse_partition_info
+parted_dict1 = {
+ "generic": {
+ "dev": "/dev/sdb",
+ "size": 286061.0,
+ "unit": "mb",
+ "table": "msdos",
+ "model": "ATA TOSHIBA THNSFJ25",
+ "logical_block": 512,
+ "physical_block": 512
+ },
+ "partitions": [{
+ "num": 1,
+ "begin": 1.05,
+ "end": 106.0,
+ "size": 105.0,
+ "fstype": "fat32",
+ "name": '',
+ "flags": ["esp"],
+ "unit": "mb"
+ }, {
+ "num": 2,
+ "begin": 106.0,
+ "end": 368.0,
+ "size": 262.0,
+ "fstype": "ext2",
+ "name": '',
+ "flags": [],
+ "unit": "mb"
+ }, {
+ "num": 3,
+ "begin": 368.0,
+ "end": 256061.0,
+ "size": 255692.0,
+ "fstype": "",
+ "name": '',
+ "flags": [],
+ "unit": "mb"
+ }]
+}
+
+parted_output2 = """
+BYT;
+/dev/sdb:286061MB:scsi:512:512:msdos:ATA TOSHIBA THNSFJ25:;"""
+
+# corresponding dictionary after parsing by parse_partition_info
+parted_dict2 = {
+ "generic": {
+ "dev": "/dev/sdb",
+ "size": 286061.0,
+ "unit": "mb",
+ "table": "msdos",
+ "model": "ATA TOSHIBA THNSFJ25",
+ "logical_block": 512,
+ "physical_block": 512
+ },
+ "partitions": []
+}
+
+# fake some_flag exists
+parted_dict3 = {
+ "generic": {
+ "dev": "/dev/sdb",
+ "size": 286061.0,
+ "unit": "mb",
+ "table": "msdos",
+ "model": "ATA TOSHIBA THNSFJ25",
+ "logical_block": 512,
+ "physical_block": 512
+ },
+ "partitions": [{
+ "num": 1,
+ "begin": 1.05,
+ "end": 106.0,
+ "size": 105.0,
+ "fstype": "fat32",
+ "name": '',
+ "flags": ["some_flag"],
+ "unit": "mb"
+ }]
+}
+
+
+class TestParted(ModuleTestCase):
+ def setUp(self):
+ super(TestParted, self).setUp()
+
+ self.module = parted_module
+ self.mock_check_parted_label = (patch('ansible_collections.community.general.plugins.modules.parted.check_parted_label', return_value=False))
+ self.check_parted_label = self.mock_check_parted_label.start()
+
+ self.mock_parted = (patch('ansible_collections.community.general.plugins.modules.parted.parted'))
+ self.parted = self.mock_parted.start()
+
+ self.mock_run_command = (patch('ansible.module_utils.basic.AnsibleModule.run_command'))
+ self.run_command = self.mock_run_command.start()
+
+ self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ super(TestParted, self).tearDown()
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ self.mock_parted.stop()
+ self.mock_check_parted_label.stop()
+
+ def execute_module(self, failed=False, changed=False, script=None):
+ if failed:
+ result = self.failed()
+ self.assertTrue(result['failed'], result)
+ else:
+ result = self.changed(changed)
+ self.assertEqual(result['changed'], changed, result)
+
+ if script:
+ self.assertEqual(script, result['script'], result['script'])
+
+ return result
+
+ def failed(self):
+ with self.assertRaises(AnsibleFailJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertTrue(result['failed'], result)
+ return result
+
+ def changed(self, changed=False):
+ with self.assertRaises(AnsibleExitJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertEqual(result['changed'], changed, result)
+ return result
+
+ def test_parse_partition_info(self):
+ """Test that the parse_partition_info returns the expected dictionary"""
+ self.assertEqual(parse_partition_info(parted_output1, 'MB'), parted_dict1)
+ self.assertEqual(parse_partition_info(parted_output2, 'MB'), parted_dict2)
+
+ def test_partition_already_exists(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=False)
+
+ def test_create_new_partition(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'state': 'present',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary 0% 100%')
+
+ def test_create_new_partition_1G(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'state': 'present',
+ 'part_end': '1GiB',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary 0% 1GiB')
+
+ def test_create_new_partition_minus_1G(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'state': 'present',
+ 'fs_type': 'ext2',
+ 'part_start': '-1GiB',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary ext2 -1GiB 100%')
+
+ def test_remove_partition_number_1(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'absent',
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='rm 1')
+
+ def test_resize_partition(self):
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 3,
+ 'state': 'present',
+ 'part_end': '100%',
+ 'resize': True
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='resizepart 3 100%')
+
+ def test_change_flag(self):
+ # Flags are set in a second run of parted().
+ # Between the two runs, the partition dict is updated.
+ # use checkmode here allow us to continue even if the dictionary is
+ # not updated.
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 3,
+ 'state': 'present',
+ 'flags': ['lvm', 'boot'],
+ '_ansible_check_mode': True,
+ })
+
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.parted.reset_mock()
+ self.execute_module(changed=True)
+ # When using multiple flags:
+ # order of execution is non deterministic, because set() operations are used in
+ # the current implementation.
+ expected_calls_order1 = [call('unit KiB set 3 lvm on set 3 boot on ',
+ '/dev/sdb', 'optimal')]
+ expected_calls_order2 = [call('unit KiB set 3 boot on set 3 lvm on ',
+ '/dev/sdb', 'optimal')]
+ self.assertTrue(self.parted.mock_calls == expected_calls_order1 or
+ self.parted.mock_calls == expected_calls_order2)
+
+ def test_create_new_primary_lvm_partition(self):
+ # use check_mode, see previous test comment
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 4,
+ 'flags': ["boot"],
+ 'state': 'present',
+ 'part_start': '257GiB',
+ 'fs_type': 'ext3',
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mkpart primary ext3 257GiB 100% unit KiB set 4 boot on')
+
+ def test_create_label_gpt(self):
+ # Like previous test, current implementation use parted to create the partition and
+ # then retrieve and update the dictionary. Use check_mode to force to continue even if
+ # dictionary is not updated.
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'flags': ["lvm"],
+ 'label': 'gpt',
+ 'name': 'lvmpartition',
+ 'state': 'present',
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict2):
+ self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100% unit KiB name 1 \'"lvmpartition"\' set 1 lvm on')
+
+ def test_change_label_gpt(self):
+ # When partitions already exists and label is changed, mkpart should be called even when partition already exists,
+ # because new empty label will be created anyway
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ 'label': 'gpt',
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict1):
+ self.execute_module(changed=True, script='unit KiB mklabel gpt mkpart primary 0% 100%')
+
+ def test_check_mode_unchanged(self):
+ # Test that get_device_info result is checked in check mode too
+ # No change on partition 1
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ 'flags': ['some_flag'],
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3):
+ self.execute_module(changed=False)
+
+ def test_check_mode_changed(self):
+ # Test that get_device_info result is checked in check mode too
+ # Flag change on partition 1
+ set_module_args({
+ 'device': '/dev/sdb',
+ 'number': 1,
+ 'state': 'present',
+ 'flags': ['other_flag'],
+ '_ansible_check_mode': True,
+ })
+ with patch('ansible_collections.community.general.plugins.modules.parted.get_device_info', return_value=parted_dict3):
+ self.execute_module(changed=True)
+
+ def test_version_info(self):
+ """Test that the parse_parted_version returns the expected tuple"""
+ for key, value in parted_version_info.items():
+ self.assertEqual(parse_parted_version(key), value)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pkgin.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pkgin.py
new file mode 100644
index 000000000..d73911e0c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pkgin.py
@@ -0,0 +1,145 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import mock
+from ansible_collections.community.general.tests.unit.compat import unittest
+
+from ansible_collections.community.general.plugins.modules import pkgin
+
+
+class TestPkginQueryPackage(unittest.TestCase):
+
+ def setUp(self):
+ pkgin.PKGIN_PATH = ""
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_package_without_version_is_present(self, mock_module):
+ # given
+ package = 'py37-conan'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s-1.21.0 = C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_package_with_version_is_present(self, mock_module):
+ # given
+ package = 'py37-conan-1.21.0'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s = C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_package_found_but_not_installed(self, mock_module):
+ # given
+ package = 'cmake'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "cmake316-3.16.0nb1 = Cross platform make\ncmake314-3.14.6nb1 = Cross platform make\ncmake-3.14.0 Cross platform make", None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.NOT_INSTALLED)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_package_found_outdated(self, mock_module):
+ # given
+ package = 'cmake316'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "cmake316-3.16.0nb1 < Cross platform make", None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.OUTDATED)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_package_with_version_found_outdated(self, mock_module):
+ # given
+ package = 'cmake316-3.16.0nb1'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "cmake316-3.16.0nb1 < Cross platform make", None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.OUTDATED)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_package_not_found(self, mock_module):
+ # given
+ package = 'cmake320-3.20.0nb1'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (1, None, "No results found for %s" % package),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.NOT_FOUND)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_with_parseable_flag_supported_package_is_present(self, mock_module):
+ # given
+ package = 'py37-conan'
+ parseable_flag_supported = 0
+ mock_module.run_command.side_effect = [
+ (parseable_flag_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s-1.21.0;=;C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
+
+ @mock.patch('ansible_collections.community.general.plugins.modules.pkgin.AnsibleModule')
+ def test_with_parseable_flag_not_supported_package_is_present(self, mock_module):
+ # given
+ package = 'py37-conan'
+ parseable_flag_not_supported = 1
+ mock_module.run_command.side_effect = [
+ (parseable_flag_not_supported, "pkgin 0.11.7 for Darwin-18.6.0 x86_64 (using SQLite 3.27.2)", None),
+ (0, "%s-1.21.0 = C/C++ package manager" % package, None),
+ ]
+
+ # when
+ command_result = pkgin.query_package(mock_module, package)
+
+ # then
+ self.assertEquals(command_result, pkgin.PackageState.PRESENT)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pmem.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pmem.py
new file mode 100644
index 000000000..cea673da0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pmem.py
@@ -0,0 +1,707 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2022, Masayoshi Mizuma <msys.mizuma@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import json
+
+pytest.importorskip('xmltodict')
+
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import ModuleTestCase, set_module_args, AnsibleFailJson, AnsibleExitJson
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+
+from ansible_collections.community.general.plugins.modules import pmem as pmem_module
+
+# goal_plain: the mock return value of pmem_run_command with:
+# impctl create -goal MemoryMode=70 Reserved=20 PersistentMemoryType=AppDirect
+goal_plain = """The following configuration will be applied:
+SocketID | DimmID | MemorySize | AppDirect1Size | AppDirect2Size
+==================================================================
+0x0000 | 0x0001 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+0x0000 | 0x0011 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+0x0001 | 0x1001 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+0x0001 | 0x1011 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+Do you want to continue? [y/n] Error: Invalid data input."""
+
+# goal_plain_sk0: the mock return value of pmem_run_command with:
+# ipmctl create -goal -socket 0 MemoryMode=70 Reserved=20 PersistentMemoryType=AppDirect
+goal_plain_sk0 = """The following configuration will be applied:
+SocketID | DimmID | MemorySize | AppDirect1Size | AppDirect2Size
+==================================================================
+0x0000 | 0x0001 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+0x0000 | 0x0011 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+Do you want to continue? [y/n] Error: Invalid data input."""
+
+# goal_plain_sk1: the mock return value of pmem_run_command with:
+# ipmctl create -goal -socket 1 MemoryMode=70 Reserved=20 PersistentMemoryType=AppDirect
+goal_plain_sk1 = """The following configuration will be applied:
+SocketID | DimmID | MemorySize | AppDirect1Size | AppDirect2Size
+==================================================================
+0x0001 | 0x1001 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+0x0001 | 0x1011 | 88.000 GiB | 12.000 GiB | 0.000 GiB
+Do you want to continue? [y/n] Error: Invalid data input."""
+
+# goal: the mock return value of pmem_run_command with:
+# ipmctl create -u B -o nvmxml -force -goal -socket 0 MemoryMode=70 Reserved=20 PersistentMemoryType=AppDirect
+goal = """<?xml version="1.0"?>
+<ConfigGoalList>
+ <ConfigGoal>
+ <SocketID>0x0000</SocketID>
+ <DimmID>0x0001</DimmID>
+ <MemorySize>94489280512 B</MemorySize>
+ <AppDirect1Size>12884901888 B</AppDirect1Size>
+ <AppDirect2Size>0 B</AppDirect2Size>
+ </ConfigGoal>
+ <ConfigGoal>
+ <SocketID>0x0000</SocketID>
+ <DimmID>0x0011</DimmID>
+ <MemorySize>94489280512 B</MemorySize>
+ <AppDirect1Size>12884901888 B</AppDirect1Size>
+ <AppDirect2Size>0 B</AppDirect2Size>
+ </ConfigGoal>
+</ConfigGoalList>"""
+
+# goal_sk0: the mock return value of pmem_run_command with:
+# ipmctl create -u B -o nvmxml -force -goal -socket 0 MemoryMode=70 Reserved=20 PersistentMemoryType=AppDirect
+goal_sk0 = """<?xml version="1.0"?>
+<ConfigGoalList>
+ <ConfigGoal>
+ <SocketID>0x0000</SocketID>
+ <DimmID>0x0001</DimmID>
+ <MemorySize>94489280512 B</MemorySize>
+ <AppDirect1Size>12884901888 B</AppDirect1Size>
+ <AppDirect2Size>0 B</AppDirect2Size>
+ </ConfigGoal>
+ <ConfigGoal>
+ <SocketID>0x0001</SocketID>
+ <DimmID>0x0011</DimmID>
+ <MemorySize>94489280512 B</MemorySize>
+ <AppDirect1Size>12884901888 B</AppDirect1Size>
+ <AppDirect2Size>0 B</AppDirect2Size>
+ </ConfigGoal>
+</ConfigGoalList>"""
+
+# goal_sk1: the mock return value of pmem_run_command with:
+# ipmctl create -u B -o nvmxml -force -goal -socket 1 MemoryMode=70 Reserved=20 PersistentMemoryType=AppDirect
+goal_sk1 = """<?xml version="1.0"?>
+<ConfigGoalList>
+ <ConfigGoal>
+ <SocketID>0x0001</SocketID>
+ <DimmID>0x1001</DimmID>
+ <MemorySize>94489280512 B</MemorySize>
+ <AppDirect1Size>12884901888 B</AppDirect1Size>
+ <AppDirect2Size>0 B</AppDirect2Size>
+ </ConfigGoal>
+ <ConfigGoal>
+ <SocketID>0x0001</SocketID>
+ <DimmID>0x1011</DimmID>
+ <MemorySize>94489280512 B</MemorySize>
+ <AppDirect1Size>12884901888 B</AppDirect1Size>
+ <AppDirect2Size>0 B</AppDirect2Size>
+ </ConfigGoal>
+</ConfigGoalList>"""
+
+# dimmlist: the mock return value of pmem_run_command with:
+# ipmctl show -d Capacity -u B -o nvmxml -dimm
+dimmlist = """<?xml version="1.0"?>
+<DimmList>
+ <Dimm>
+ <DimmID>0x0001</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+ <Dimm> <DimmID>0x0011</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+ <Dimm>
+ <DimmID>0x1001</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+ <Dimm> <DimmID>0x1011</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+</DimmList>"""
+
+# dimmlist_sk0: the mock return value of pmem_run_command with:
+# ipmctl show -d Capacity -u B -o nvmxml -dimm -socket 0
+dimmlist_sk0 = """<?xml version="1.0"?>
+<DimmList>
+ <Dimm>
+ <DimmID>0x0001</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+ <Dimm> <DimmID>0x0011</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+</DimmList>"""
+
+# dimmlist_sk1: the mock return value of pmem_run_command with:
+# ipmctl show -d Capacity -u B -o nvmxml -dimm -socket 1
+dimmlist_sk1 = """<?xml version="1.0"?>
+<DimmList>
+ <Dimm>
+ <DimmID>0x1001</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+ <Dimm> <DimmID>0x1011</DimmID>
+ <Capacity>135744782336 B</Capacity>
+ </Dimm>
+</DimmList>"""
+
+# show_skt: the mock return value of pmem_run_command with:
+# ipmctl show -o nvmxml -socket
+show_skt = """<?xml version="1.0"?>
+<SocketList>
+ <Socket>
+ <SocketID>0x0000</SocketID>
+ <MappedMemoryLimit>1024.000 GiB</MappedMemoryLimit>
+ <TotalMappedMemory>400.000 GiB</TotalMappedMemory>
+ </Socket>
+ <Socket>
+ <SocketID>0x0001</SocketID>
+ <MappedMemoryLimit>1024.000 GiB</MappedMemoryLimit>
+ <TotalMappedMemory>400.000 GiB</TotalMappedMemory>
+ </Socket>
+</SocketList>"""
+
+# ndctl_region: the mock return value of pmem_run_command with:
+# ndctl list -R
+ndctl_region = """[
+ {
+ "dev":"region1",
+ "size":51539607552,
+ "align":16777216,
+ "available_size":50465865728,
+ "max_available_extent":50465865728,
+ "type":"pmem",
+ "iset_id":4694373484956518536,
+ "persistence_domain":"memory_controller"
+ },
+ {
+ "dev":"region0",
+ "size":51539607552,
+ "align":16777216,
+ "available_size":51539607552,
+ "max_available_extent":51539607552,
+ "type":"pmem",
+ "iset_id":4588538894081362056,
+ "persistence_domain":"memory_controller"
+ }
+]"""
+
+# ndctl_region_empty: the mock return value of pmem_run_command with:
+# ndctl list -R
+ndctl_region_empty = ""
+
+# ndctl_without_size: the mock return value of pmem_run_command with:
+# ndctl create-namespace -t pmem -m sector
+ndctl_create_without_size = """{
+ "dev":"namespace1.0",
+ "mode":"sector",
+ "size":"47.95 GiB (51.49 GB)",
+ "uuid":"1aca23a5-941c-4f4a-9d88-e531f0b5a27e",
+ "sector_size":4096,
+ "blockdev":"pmem1s"
+}"""
+
+# ndctl_list_N: the mock return value of pmem_run_command with:
+# ndctl list -N
+ndctl_list_N = """[
+ {
+ "dev":"namespace1.0",
+ "mode":"sector",
+ "size":51488243712,
+ "uuid":"1aca23a5-941c-4f4a-9d88-e531f0b5a27e",
+ "sector_size":4096,
+ "blockdev":"pmem1s"
+ }
+]"""
+
+# ndctl_result_1G: the mock return value of pmem_run_command with:
+# ndctl create-namespace -t pmem -m sector -s 1073741824
+ndctl_create_1G = """{
+ "dev":"namespace0.0",
+ "mode":"sector",
+ "size":"1021.97 MiB (1071.62 MB)",
+ "uuid":"5ba4e51b-3028-4b06-8495-b6834867a9af",
+ "sector_size":4096,
+ "blockdev":"pmem0s"
+}"""
+
+# ndctl_result_640M: the mock return value of pmem_run_command with:
+# ndctl create-namespace -t pmem -m raw -s 671088640
+ndctl_create_640M = """{
+ "dev":"namespace1.0",
+ "mode":"raw",
+ "size":"640.00 MiB (671.09 MB)",
+ "uuid":"5ac1f81d-86e6-4f07-9460-8c4d37027f7a",
+ "sector_size":512,
+ "blockdev":"pmem1"
+}"""
+
+# ndctl_list_N_tow_namespaces: the mock return value of pmem_run_command with:
+# ndctl list -N
+ndctl_list_N_two_namespaces = """[
+ {
+ "dev":"namespace1.0",
+ "mode":"sector",
+ "size":1071616000,
+ "uuid":"afcf050d-3a8b-4f48-88a5-16d7c40ab2d8",
+ "sector_size":4096,
+ "blockdev":"pmem1s"
+ },
+ {
+ "dev":"namespace1.1",
+ "mode":"raw",
+ "size":671088640,
+ "uuid":"fb704339-729b-4cc7-b260-079f2633d84f",
+ "sector_size":512,
+ "blockdev":"pmem1.1"
+ }
+]"""
+
+
+class TestPmem(ModuleTestCase):
+ def setUp(self):
+ super(TestPmem, self).setUp()
+ self.module = pmem_module
+
+ self.mock_run_command = (patch('ansible.module_utils.basic.AnsibleModule.run_command'))
+ self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
+
+ self.run_command = self.mock_run_command.start()
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ self.mock_pmem_is_dcpmm_installed = (patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_is_dcpmm_installed', return_value=""))
+ self.mock_pmem_init_env = (patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_init_env', return_value=""))
+
+ self.pmem_is_dcpmm_installed = self.mock_pmem_is_dcpmm_installed.start()
+ self.pmem_init_env = self.mock_pmem_init_env.start()
+
+ def tearDown(self):
+ super(TestPmem, self).tearDown()
+ self.mock_get_bin_path.stop()
+ self.mock_run_command.stop()
+ self.mock_pmem_is_dcpmm_installed.stop()
+ self.mock_pmem_init_env.stop()
+
+ def result_check(self, result, socket, appdirect, memmode, reserved):
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertTrue(result.exception.args[0]['reboot_required'])
+
+ test_result = result.exception.args[0]['result']
+
+ if socket:
+ maxIndex = 1
+ else:
+ maxIndex = 0
+
+ for i in range(0, maxIndex):
+ self.assertAlmostEqual(test_result[i]['appdirect'], appdirect[i])
+ self.assertAlmostEqual(test_result[i]['memorymode'], memmode[i])
+ self.assertAlmostEqual(test_result[i]['reserved'], reserved[i])
+ if socket:
+ self.assertAlmostEqual(test_result[i]['socket'], i)
+
+ def result_check_ns(self, result, namespace):
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertFalse(result.exception.args[0]['reboot_required'])
+
+ test_result = result.exception.args[0]['result']
+ expected = json.loads(namespace)
+
+ for i, result in enumerate(test_result):
+ self.assertEqual(result['dev'], expected[i]['dev'])
+ self.assertEqual(result['size'], expected[i]['size'])
+
+ def test_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ pmem_module.main()
+
+ def test_fail_when_appdirect_only(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'appdirect': 10,
+ })
+ pmem_module.main()
+
+ def test_fail_when_MemosyMode_only(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'memorymode': 70,
+ })
+ pmem_module.main()
+
+ def test_fail_when_reserved_only(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'reserved': 10,
+ })
+ pmem_module.main()
+
+ def test_fail_when_appdirect_memorymode_reserved_total_not_100(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'appdirect': 10,
+ 'memorymode': 70,
+ 'reserved': 10,
+ })
+ pmem_module.main()
+
+ def test_when_appdirect_memorymode(self):
+ set_module_args({
+ 'appdirect': 10,
+ 'memorymode': 70,
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[goal_plain, goal, dimmlist]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check(result, False, [25769803776], [188978561024], [328230764544])
+
+ def test_when_appdirect_memorymode_reserved(self):
+ set_module_args({
+ 'appdirect': 10,
+ 'memorymode': 70,
+ 'reserved': 20,
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[goal_plain, goal, dimmlist]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check(result, False, [25769803776], [188978561024], [328230764544])
+
+ def test_when_appdirect_notinterleaved_memorymode_reserved(self):
+ set_module_args({
+ 'appdirect': 10,
+ 'appdirect_interleaved': False,
+ 'memorymode': 70,
+ 'reserved': 20,
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[goal_plain, goal, dimmlist]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check(result, False, [25769803776], [188978561024], [328230764544])
+
+ def test_fail_when_socket_id_appdirect(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'socket': [
+ {
+ 'id': 0,
+ 'appdirect': 10,
+ },
+ {
+ 'id': 1,
+ 'appdirect': 10,
+ },
+ ],
+ })
+ pmem_module.main()
+
+ def test_fail_when_socket0_id_memorymode_socket1_id_appdirect(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'socket': [
+ {
+ 'id': 0,
+ ' memorymode': 70,
+ },
+ {
+ 'id': 1,
+ 'appdirect': 10,
+ },
+ ],
+ })
+ pmem_module.main()
+
+ def test_fail_when_socket0_without_id(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'socket': [
+ {
+ 'appdirect': 10,
+ 'memorymode': 70,
+ },
+ {
+ 'id': 1,
+ 'appdirect': 10,
+ 'memorymode': 70,
+ },
+ ],
+ })
+ pmem_module.main()
+
+ def test_when_socket0_and_1_appdirect_memorymode(self):
+ set_module_args({
+ 'socket': [
+ {
+ 'id': 0,
+ 'appdirect': 10,
+ 'memorymode': 70,
+ },
+ {
+ 'id': 1,
+ 'appdirect': 10,
+ 'memorymode': 70,
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[
+ show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check(
+ result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272])
+
+ def test_when_socket0_and_1_appdirect_memorymode_reserved(self):
+ set_module_args({
+ 'socket': [
+ {
+ 'id': 0,
+ 'appdirect': 10,
+ 'memorymode': 70,
+ 'reserved': 20,
+ },
+ {
+ 'id': 1,
+ 'appdirect': 10,
+ 'memorymode': 70,
+ 'reserved': 20,
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[
+ show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check(
+ result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272])
+
+ def test_when_socket0_appdirect_notinterleaved_memorymode_reserved_socket1_appdirect_memorymode_reserved(self):
+ set_module_args({
+ 'socket': [
+ {
+ 'id': 0,
+ 'appdirect': 10,
+ 'appdirect_interleaved': False,
+ 'memorymode': 70,
+ 'reserved': 20,
+ },
+ {
+ 'id': 1,
+ 'appdirect': 10,
+ 'memorymode': 70,
+ 'reserved': 20,
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[
+ show_skt, goal_plain_sk0, goal_sk0, dimmlist_sk0, goal_plain_sk1, goal_sk1, dimmlist_sk1]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check(
+ result, True, [12884901888, 12884901888], [94489280512, 94489280512], [164115382272, 164115382272])
+
+ def test_fail_when_namespace_without_mode(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '1GB',
+ 'type': 'pmem',
+ },
+ {
+ 'size': '2GB',
+ 'type': 'blk',
+ },
+ ],
+ })
+ pmem_module.main()
+
+ def test_fail_when_region_is_empty(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '1GB',
+ 'type': 'pmem',
+ 'mode': 'sector',
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region_empty]):
+ pmem_module.main()
+
+ def test_fail_when_namespace_invalid_size(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '1XXX',
+ 'type': 'pmem',
+ 'mode': 'sector',
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region]):
+ pmem_module.main()
+
+ def test_fail_when_size_is_invalid_alignment(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '400MB',
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ {
+ 'size': '500MB',
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region]):
+ pmem_module.main()
+
+ def test_fail_when_blk_is_unsupported_type(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '4GB',
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ {
+ 'size': '5GB',
+ 'type': 'blk',
+ 'mode': 'sector'
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region]):
+ pmem_module.main()
+
+ def test_fail_when_size_isnot_set_to_multiple_namespaces(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ {
+ 'size': '500GB',
+ 'type': 'blk',
+ 'mode': 'sector'
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region]):
+ pmem_module.main()
+
+ def test_fail_when_size_of_namespace_over_available(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '400GB',
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ {
+ 'size': '500GB',
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region]):
+ pmem_module.main()
+
+ def test_when_namespace0_without_size(self):
+ set_module_args({
+ 'namespace': [
+ {
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region, ndctl_create_without_size, ndctl_list_N]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check_ns(result, ndctl_list_N)
+
+ def test_when_namespace0_with_namespace_append(self):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '640MB',
+ 'type': 'pmem',
+ 'mode': 'raw'
+ },
+ ],
+ 'namespace_append': True,
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region, ndctl_create_640M, ndctl_list_N_two_namespaces]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check_ns(result, ndctl_list_N_two_namespaces)
+
+ def test_when_namespace0_1GiB_pmem_sector_namespace1_640MiB_pmem_raw(self):
+ set_module_args({
+ 'namespace': [
+ {
+ 'size': '1GB',
+ 'type': 'pmem',
+ 'mode': 'sector'
+ },
+ {
+ 'size': '640MB',
+ 'type': 'pmem',
+ 'mode': 'raw',
+ },
+ ],
+ })
+ with patch(
+ 'ansible_collections.community.general.plugins.modules.pmem.PersistentMemory.pmem_run_command',
+ side_effect=[ndctl_region, ndctl_create_1G, ndctl_create_640M, ndctl_list_N_two_namespaces]):
+ with self.assertRaises(AnsibleExitJson) as result:
+ pmem_module.main()
+ self.result_check_ns(result, ndctl_list_N_two_namespaces)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org.py
new file mode 100644
index 000000000..94809784b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.modules import (
+ pritunl_org,
+)
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import (
+ PritunlDeleteOrganizationMock,
+ PritunlListOrganizationMock,
+ PritunlListOrganizationAfterPostMock,
+ PritunlPostOrganizationMock,
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ ModuleTestCase,
+ set_module_args,
+)
+
+__metaclass__ = type
+
+
+class TestPritunlOrg(ModuleTestCase):
+ def setUp(self):
+ super(TestPritunlOrg, self).setUp()
+ self.module = pritunl_org
+
+ # Add backward compatibility
+ if sys.version_info < (3, 2):
+ self.assertRegex = self.assertRegexpMatches
+
+ def tearDown(self):
+ super(TestPritunlOrg, self).tearDown()
+
+ def patch_add_pritunl_organization(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._post_pritunl_organization",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_delete_pritunl_organization(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._delete_pritunl_organization",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_get_pritunl_organizations(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations",
+ autospec=True,
+ **kwds
+ )
+
+ def test_without_parameters(self):
+ """Test without parameters"""
+ set_module_args({})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_present(self):
+ """Test Pritunl organization creation."""
+ org_params = {"name": "NewOrg"}
+ set_module_args(
+ dict_merge(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ },
+ org_params,
+ )
+ )
+ # Test creation
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as mock_get:
+ with self.patch_add_pritunl_organization(
+ side_effect=PritunlPostOrganizationMock
+ ) as mock_add:
+ with self.assertRaises(AnsibleExitJson) as create_result:
+ self.module.main()
+
+ create_exc = create_result.exception.args[0]
+
+ self.assertTrue(create_exc["changed"])
+ self.assertEqual(create_exc["response"]["name"], org_params["name"])
+ self.assertEqual(create_exc["response"]["user_count"], 0)
+
+ # Test module idempotency
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationAfterPostMock
+ ) as mock_get:
+ with self.patch_add_pritunl_organization(
+ side_effect=PritunlPostOrganizationMock
+ ) as mock_add:
+ with self.assertRaises(AnsibleExitJson) as idempotent_result:
+ self.module.main()
+
+ idempotent_exc = idempotent_result.exception.args[0]
+
+ # Ensure both calls resulted in the same returned value
+ # except for changed which should be false the second time
+ for k, v in iteritems(idempotent_exc):
+ if k == "changed":
+ self.assertFalse(idempotent_exc[k])
+ else:
+ self.assertEqual(create_exc[k], idempotent_exc[k])
+
+ def test_absent(self):
+ """Test organization removal from Pritunl."""
+ org_params = {"name": "NewOrg"}
+ set_module_args(
+ dict_merge(
+ {
+ "state": "absent",
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ },
+ org_params,
+ )
+ )
+ # Test deletion
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationAfterPostMock
+ ) as mock_get:
+ with self.patch_delete_pritunl_organization(
+ side_effect=PritunlDeleteOrganizationMock
+ ) as mock_delete:
+ with self.assertRaises(AnsibleExitJson) as delete_result:
+ self.module.main()
+
+ delete_exc = delete_result.exception.args[0]
+
+ self.assertTrue(delete_exc["changed"])
+ self.assertEqual(delete_exc["response"], {})
+
+ # Test module idempotency
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as mock_get:
+ with self.patch_delete_pritunl_organization(
+ side_effect=PritunlDeleteOrganizationMock
+ ) as mock_add:
+ with self.assertRaises(AnsibleExitJson) as idempotent_result:
+ self.module.main()
+
+ idempotent_exc = idempotent_result.exception.args[0]
+
+ # Ensure both calls resulted in the same returned value
+ # except for changed which should be false the second time
+ self.assertFalse(idempotent_exc["changed"])
+ self.assertEqual(idempotent_exc["response"], delete_exc["response"])
+
+ def test_absent_with_existing_users(self):
+ """Test organization removal with attached users should fail except if force is true."""
+ module_args = {
+ "state": "absent",
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "name": "GumGum",
+ }
+ set_module_args(module_args)
+
+ # Test deletion
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as mock_get:
+ with self.patch_delete_pritunl_organization(
+ side_effect=PritunlDeleteOrganizationMock
+ ) as mock_delete:
+ with self.assertRaises(AnsibleFailJson) as failure_result:
+ self.module.main()
+
+ failure_exc = failure_result.exception.args[0]
+
+ self.assertRegex(failure_exc["msg"], "Can not remove organization")
+
+ # Switch force=True which should run successfully
+ set_module_args(dict_merge(module_args, {"force": True}))
+
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as mock_get:
+ with self.patch_delete_pritunl_organization(
+ side_effect=PritunlDeleteOrganizationMock
+ ) as mock_delete:
+ with self.assertRaises(AnsibleExitJson) as delete_result:
+ self.module.main()
+
+ delete_exc = delete_result.exception.args[0]
+
+ self.assertTrue(delete_exc["changed"])
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org_info.py
new file mode 100644
index 000000000..dc33c3d8c
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_org_info.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ansible_collections.community.general.plugins.modules import (
+ pritunl_org_info,
+)
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import (
+ PritunlListOrganizationMock,
+ PritunlEmptyOrganizationMock,
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ ModuleTestCase,
+ set_module_args,
+)
+
+__metaclass__ = type
+
+
+class TestPritunlOrgInfo(ModuleTestCase):
+ def setUp(self):
+ super(TestPritunlOrgInfo, self).setUp()
+ self.module = pritunl_org_info
+
+ # Add backward compatibility
+ if sys.version_info < (3, 2):
+ self.assertRegex = self.assertRegexpMatches
+
+ def tearDown(self):
+ super(TestPritunlOrgInfo, self).tearDown()
+
+ def patch_get_pritunl_organizations(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations",
+ autospec=True,
+ **kwds
+ )
+
+ def test_without_parameters(self):
+ """Test without parameters"""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ set_module_args({})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 0)
+
+ def test_list_empty_organizations(self):
+ """Listing all organizations even when no org exists should be valid."""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlEmptyOrganizationMock
+ ) as org_mock:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+
+ exc = result.exception.args[0]
+ self.assertEqual(len(exc["organizations"]), 0)
+
+ def test_list_specific_organization(self):
+ """Listing a specific organization should be valid."""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "org": "GumGum",
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+
+ exc = result.exception.args[0]
+ self.assertEqual(len(exc["organizations"]), 1)
+
+ def test_list_unknown_organization(self):
+ """Listing an unknown organization should result in a failure."""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.assertRaises(AnsibleFailJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "org": "Unknown",
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+
+ exc = result.exception.args[0]
+ self.assertRegex(exc["msg"], "does not exist")
+
+ def test_list_all_organizations(self):
+ """Listing all organizations should be valid."""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+
+ exc = result.exception.args[0]
+ self.assertEqual(len(exc["organizations"]), 3)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user.py
new file mode 100644
index 000000000..112083918
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021 Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ansible.module_utils.common.dict_transformations import dict_merge
+from ansible.module_utils.six import iteritems
+from ansible_collections.community.general.plugins.modules import (
+ pritunl_user,
+)
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import (
+ PritunlDeleteUserMock,
+ PritunlListOrganizationMock,
+ PritunlListUserMock,
+ PritunlPostUserMock,
+ PritunlPutUserMock,
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ ModuleTestCase,
+ set_module_args,
+)
+
+__metaclass__ = type
+
+
+def mock_pritunl_api(func, **kwargs):
+ def wrapped(self=None):
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ):
+ with self.patch_get_pritunl_users(side_effect=PritunlListUserMock):
+ with self.patch_add_pritunl_users(side_effect=PritunlPostUserMock):
+ with self.patch_delete_pritunl_users(
+ side_effect=PritunlDeleteUserMock
+ ):
+ func(self, **kwargs)
+
+ return wrapped
+
+
+class TestPritunlUser(ModuleTestCase):
+ def setUp(self):
+ super(TestPritunlUser, self).setUp()
+ self.module = pritunl_user
+
+ # Add backward compatibility
+ if sys.version_info < (3, 2):
+ self.assertRegex = self.assertRegexpMatches
+
+ def tearDown(self):
+ super(TestPritunlUser, self).tearDown()
+
+ def patch_get_pritunl_users(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_users",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_add_pritunl_users(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._post_pritunl_user",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_update_pritunl_users(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._put_pritunl_user",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_delete_pritunl_users(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._delete_pritunl_user",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_get_pritunl_organizations(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations",
+ autospec=True,
+ **kwds
+ )
+
+ def test_without_parameters(self):
+ """Test without parameters"""
+ set_module_args({})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ @mock_pritunl_api
+ def test_present(self):
+ """Test Pritunl user creation and update."""
+ user_params = {
+ "user_name": "alice",
+ "user_email": "alice@company.com",
+ }
+ set_module_args(
+ dict_merge(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "GumGum",
+ },
+ user_params,
+ )
+ )
+
+ with self.patch_update_pritunl_users(
+ side_effect=PritunlPostUserMock
+ ) as post_mock:
+ with self.assertRaises(AnsibleExitJson) as create_result:
+ self.module.main()
+
+ create_exc = create_result.exception.args[0]
+
+ self.assertTrue(create_exc["changed"])
+ self.assertEqual(create_exc["response"]["name"], user_params["user_name"])
+ self.assertEqual(create_exc["response"]["email"], user_params["user_email"])
+ self.assertFalse(create_exc["response"]["disabled"])
+
+ # Changing user from alice to bob should update certain fields only
+
+ new_user_params = {
+ "user_name": "bob",
+ "user_email": "bob@company.com",
+ "user_disabled": True,
+ }
+ set_module_args(
+ dict_merge(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "GumGum",
+ },
+ new_user_params,
+ )
+ )
+
+ with self.patch_update_pritunl_users(
+ side_effect=PritunlPutUserMock
+ ) as put_mock:
+
+ with self.assertRaises(AnsibleExitJson) as update_result:
+ self.module.main()
+
+ update_exc = update_result.exception.args[0]
+
+ # Ensure only certain settings changed and the rest remained untouched.
+ for k, v in iteritems(update_exc):
+ if k in new_user_params:
+ assert update_exc[k] == v
+ else:
+ assert update_exc[k] == create_exc[k]
+
+ @mock_pritunl_api
+ def test_absent(self):
+ """Test user removal from Pritunl."""
+ set_module_args(
+ {
+ "state": "absent",
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "GumGum",
+ "user_name": "florian",
+ }
+ )
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+
+ exc = result.exception.args[0]
+
+ self.assertTrue(exc["changed"])
+ self.assertEqual(exc["response"], {})
+
+ @mock_pritunl_api
+ def test_absent_failure(self):
+ """Test user removal from a non existing organization."""
+ set_module_args(
+ {
+ "state": "absent",
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "Unknown",
+ "user_name": "floria@company.com",
+ }
+ )
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.main()
+
+ exc = result.exception.args[0]
+
+ self.assertRegex(exc["msg"], "Can not remove user")
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user_info.py
new file mode 100644
index 000000000..5aae15d96
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_pritunl_user_info.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2021, Florian Dambrine <android.florian@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ansible_collections.community.general.plugins.modules import (
+ pritunl_user_info,
+)
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.module_utils.net_tools.pritunl.test_api import (
+ PritunlListOrganizationMock,
+ PritunlListUserMock,
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ ModuleTestCase,
+ set_module_args,
+)
+
+__metaclass__ = type
+
+
+class TestPritunlUserInfo(ModuleTestCase):
+ def setUp(self):
+ super(TestPritunlUserInfo, self).setUp()
+ self.module = pritunl_user_info
+
+ # Add backward compatibility
+ if sys.version_info < (3, 2):
+ self.assertRegex = self.assertRegexpMatches
+
+ def tearDown(self):
+ super(TestPritunlUserInfo, self).tearDown()
+
+ def patch_get_pritunl_users(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_users",
+ autospec=True,
+ **kwds
+ )
+
+ def patch_get_pritunl_organizations(self, **kwds):
+ return patch(
+ "ansible_collections.community.general.plugins.module_utils.net_tools.pritunl.api._get_pritunl_organizations",
+ autospec=True,
+ **kwds
+ )
+
+ def test_without_parameters(self):
+ """Test without parameters"""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.patch_get_pritunl_users(
+ side_effect=PritunlListUserMock
+ ) as user_mock:
+ set_module_args({})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 0)
+ self.assertEqual(user_mock.call_count, 0)
+
+ def test_missing_organization(self):
+ """Failure must occur when the requested organization is not found."""
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.patch_get_pritunl_users(
+ side_effect=PritunlListUserMock
+ ) as user_mock:
+ with self.assertRaises(AnsibleFailJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "Unknown",
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+ self.assertEqual(user_mock.call_count, 0)
+
+ exc = result.exception.args[0]
+ self.assertRegex(exc["msg"], "Can not list users from the organization")
+
+ def test_get_all_client_users_from_organization(self):
+ """
+ The list of all Pritunl client users from the organization must be returned when no user specified.
+ """
+ expected_user_type = "client"
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.patch_get_pritunl_users(
+ side_effect=PritunlListUserMock
+ ) as user_mock:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "GumGum",
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+ self.assertEqual(user_mock.call_count, 1)
+
+ exc = result.exception.args[0]
+ # module should not report changes
+ self.assertFalse(exc["changed"])
+ # user_type when not provided is set client and should only return client user type
+ self.assertEqual(len(exc["users"]), 1)
+ for user in exc["users"]:
+ self.assertEqual(user["type"], expected_user_type)
+
+ def test_get_specific_server_user_from_organization(self):
+ """
+ Retrieving a specific user from the organization must return a single record.
+ """
+ expected_user_type = "server"
+ expected_user_name = "ops"
+ with self.patch_get_pritunl_organizations(
+ side_effect=PritunlListOrganizationMock
+ ) as org_mock:
+ with self.patch_get_pritunl_users(
+ side_effect=PritunlListUserMock
+ ) as user_mock:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args(
+ {
+ "pritunl_api_token": "token",
+ "pritunl_api_secret": "secret",
+ "pritunl_url": "https://pritunl.domain.com",
+ "organization": "GumGum",
+ "user_name": expected_user_name,
+ "user_type": expected_user_type,
+ }
+ )
+ self.module.main()
+
+ self.assertEqual(org_mock.call_count, 1)
+ self.assertEqual(user_mock.call_count, 1)
+
+ exc = result.exception.args[0]
+ # module should not report changes
+ self.assertFalse(exc["changed"])
+ self.assertEqual(len(exc["users"]), 1)
+ for user in exc["users"]:
+ self.assertEqual(user["type"], expected_user_type)
+ self.assertEqual(user["name"], expected_user_name)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_kvm.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_kvm.py
new file mode 100644
index 000000000..531185102
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_kvm.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules.proxmox_kvm import parse_dev, parse_mac
+
+
+def test_parse_mac():
+ assert parse_mac('virtio=00:11:22:AA:BB:CC,bridge=vmbr0,firewall=1') == '00:11:22:AA:BB:CC'
+
+
+def test_parse_dev():
+ assert parse_dev('local-lvm:vm-1000-disk-0,format=qcow2') == 'local-lvm:vm-1000-disk-0'
+ assert parse_dev('local-lvm:vm-101-disk-1,size=8G') == 'local-lvm:vm-101-disk-1'
+ assert parse_dev('local-zfs:vm-1001-disk-0') == 'local-zfs:vm-1001-disk-0'
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_snap.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_snap.py
new file mode 100644
index 000000000..4bdcaa8b7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_snap.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from ansible_collections.community.general.tests.unit.compat.mock import MagicMock, patch
+from ansible_collections.community.general.plugins.modules import proxmox_snap
+import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+
+def get_resources(type):
+ return [{"diskwrite": 0,
+ "vmid": 100,
+ "node": "localhost",
+ "id": "lxc/100",
+ "maxdisk": 10000,
+ "template": 0,
+ "disk": 10000,
+ "uptime": 10000,
+ "maxmem": 10000,
+ "maxcpu": 1,
+ "netin": 10000,
+ "type": "lxc",
+ "netout": 10000,
+ "mem": 10000,
+ "diskread": 10000,
+ "cpu": 0.01,
+ "name": "test-lxc",
+ "status": "running"}]
+
+
+def fake_api(mocker):
+ r = mocker.MagicMock()
+ r.cluster.resources.get = MagicMock(side_effect=get_resources)
+ return r
+
+
+def test_proxmox_snap_without_argument(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ proxmox_snap.main()
+
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_create_snapshot_check_mode(connect_mock, capfd, mocker):
+ set_module_args({"hostname": "test-lxc",
+ "api_user": "root@pam",
+ "api_password": "secret",
+ "api_host": "127.0.0.1",
+ "state": "present",
+ "snapname": "test",
+ "timeout": "1",
+ "force": True,
+ "_ansible_check_mode": True})
+ proxmox_utils.HAS_PROXMOXER = True
+ connect_mock.side_effect = lambda: fake_api(mocker)
+ with pytest.raises(SystemExit) as results:
+ proxmox_snap.main()
+
+ out, err = capfd.readouterr()
+ assert not err
+ assert not json.loads(out)['changed']
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_remove_snapshot_check_mode(connect_mock, capfd, mocker):
+ set_module_args({"hostname": "test-lxc",
+ "api_user": "root@pam",
+ "api_password": "secret",
+ "api_host": "127.0.0.1",
+ "state": "absent",
+ "snapname": "test",
+ "timeout": "1",
+ "force": True,
+ "_ansible_check_mode": True})
+ proxmox_utils.HAS_PROXMOXER = True
+ connect_mock.side_effect = lambda: fake_api(mocker)
+ with pytest.raises(SystemExit) as results:
+ proxmox_snap.main()
+
+ out, err = capfd.readouterr()
+ assert not err
+ assert not json.loads(out)['changed']
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_rollback_snapshot_check_mode(connect_mock, capfd, mocker):
+ set_module_args({"hostname": "test-lxc",
+ "api_user": "root@pam",
+ "api_password": "secret",
+ "api_host": "127.0.0.1",
+ "state": "rollback",
+ "snapname": "test",
+ "timeout": "1",
+ "force": True,
+ "_ansible_check_mode": True})
+ proxmox_utils.HAS_PROXMOXER = True
+ connect_mock.side_effect = lambda: fake_api(mocker)
+ with pytest.raises(SystemExit) as results:
+ proxmox_snap.main()
+
+ out, err = capfd.readouterr()
+ assert not err
+ output = json.loads(out)
+ assert not output['changed']
+ assert output['msg'] == "Snapshot test does not exist"
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_tasks_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_tasks_info.py
new file mode 100644
index 000000000..0d1b5a7bf
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_proxmox_tasks_info.py
@@ -0,0 +1,195 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Andreas Botzner (@paginabianca) <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Proxmox Tasks module unit tests.
+# The API responses used in these tests were recorded from PVE version 6.4-8
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import json
+
+from ansible_collections.community.general.plugins.modules import proxmox_tasks_info
+import ansible_collections.community.general.plugins.module_utils.proxmox as proxmox_utils
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+NODE = 'node01'
+TASK_UPID = 'UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:'
+TASKS = [
+ {
+ "endtime": 1629092710,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 3539,
+ "pstart": 474062216,
+ "starttime": 1629092709,
+ "status": "OK",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:",
+ "user": "root@pam"
+ },
+ {
+ "endtime": 1627975785,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 10717,
+ "pstart": 362369675,
+ "starttime": 1627975784,
+ "status": "command 'ifreload -a' failed: exit code 1",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
+ "user": "root@pam"
+ },
+ {
+ "endtime": 1627975503,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 6778,
+ "pstart": 362341540,
+ "starttime": 1627975503,
+ "status": "OK",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:",
+ "user": "root@pam"
+ }
+]
+EXPECTED_TASKS = [
+ {
+ "endtime": 1629092710,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 3539,
+ "pstart": 474062216,
+ "starttime": 1629092709,
+ "status": "OK",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:00000DD3:1C419D88:6119FB65:srvreload:networking:root@pam:",
+ "user": "root@pam",
+ "failed": False
+ },
+ {
+ "endtime": 1627975785,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 10717,
+ "pstart": 362369675,
+ "starttime": 1627975784,
+ "status": "command 'ifreload -a' failed: exit code 1",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
+ "user": "root@pam",
+ "failed": True
+ },
+ {
+ "endtime": 1627975503,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 6778,
+ "pstart": 362341540,
+ "starttime": 1627975503,
+ "status": "OK",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:00001A7A:1598E4A4:6108EF4F:srvreload:networking:root@pam:",
+ "user": "root@pam",
+ "failed": False
+ }
+]
+
+EXPECTED_SINGLE_TASK = [
+ {
+ "endtime": 1627975785,
+ "id": "networking",
+ "node": "iaclab-01-01",
+ "pid": 10717,
+ "pstart": 362369675,
+ "starttime": 1627975784,
+ "status": "command 'ifreload -a' failed: exit code 1",
+ "type": "srvreload",
+ "upid": "UPID:iaclab-01-01:000029DD:1599528B:6108F068:srvreload:networking:root@pam:",
+ "user": "root@pam",
+ "failed": True
+ },
+]
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_without_required_parameters(connect_mock, capfd, mocker):
+ set_module_args({})
+ with pytest.raises(SystemExit):
+ proxmox_tasks_info.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+
+
+def mock_api_tasks_response(mocker):
+ m = mocker.MagicMock()
+ g = mocker.MagicMock()
+ m.nodes = mocker.MagicMock(return_value=g)
+ g.tasks.get = mocker.MagicMock(return_value=TASKS)
+ return m
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_get_tasks(connect_mock, capfd, mocker):
+ set_module_args({'api_host': 'proxmoxhost',
+ 'api_user': 'root@pam',
+ 'api_password': 'supersecret',
+ 'node': NODE})
+
+ connect_mock.side_effect = lambda: mock_api_tasks_response(mocker)
+ proxmox_utils.HAS_PROXMOXER = True
+
+ with pytest.raises(SystemExit):
+ proxmox_tasks_info.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert len(json.loads(out)['proxmox_tasks']) != 0
+ assert not json.loads(out)['changed']
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_get_single_task(connect_mock, capfd, mocker):
+ set_module_args({'api_host': 'proxmoxhost',
+ 'api_user': 'root@pam',
+ 'api_password': 'supersecret',
+ 'node': NODE,
+ 'task': TASK_UPID})
+
+ connect_mock.side_effect = lambda: mock_api_tasks_response(mocker)
+ proxmox_utils.HAS_PROXMOXER = True
+
+ with pytest.raises(SystemExit):
+ proxmox_tasks_info.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert len(json.loads(out)['proxmox_tasks']) == 1
+ assert json.loads(out)
+ assert not json.loads(out)['changed']
+
+
+@patch('ansible_collections.community.general.plugins.module_utils.proxmox.ProxmoxAnsible._connect')
+def test_get_non_existent_task(connect_mock, capfd, mocker):
+ set_module_args({'api_host': 'proxmoxhost',
+ 'api_user': 'root@pam',
+ 'api_password': 'supersecret',
+ 'node': NODE,
+ 'task': 'UPID:nonexistent'})
+
+ connect_mock.side_effect = lambda: mock_api_tasks_response(mocker)
+ proxmox_utils.HAS_PROXMOXER = True
+
+ with pytest.raises(SystemExit):
+ proxmox_tasks_info.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+ assert 'proxmox_tasks' not in json.loads(out)
+ assert not json.loads(out)['changed']
+ assert json.loads(
+ out)['msg'] == 'Task: UPID:nonexistent does not exist on node: node01.'
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_puppet.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_puppet.py
new file mode 100644
index 000000000..f62523e7f
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_puppet.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+# Author: Alexei Znamensky (russoz@gmail.com)
+# Largely adapted from test_redhat_subscription by
+# Jiri Hnidek (jhnidek@redhat.com)
+#
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# Copyright (c) Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from collections import namedtuple
+from ansible_collections.community.general.plugins.modules import puppet
+
+import pytest
+
+TESTED_MODULE = puppet.__name__
+
+
+ModuleTestCase = namedtuple("ModuleTestCase", ["id", "input", "output", "run_command_calls"])
+RunCmdCall = namedtuple("RunCmdCall", ["command", "environ", "rc", "out", "err"])
+
+
+@pytest.fixture
+def patch_get_bin_path(mocker):
+ """
+ Function used for mocking AnsibleModule.get_bin_path
+ """
+ def mockie(self, path, *args, **kwargs):
+ return "/testbin/{0}".format(path)
+ mocker.patch("ansible.module_utils.basic.AnsibleModule.get_bin_path", mockie)
+
+
+TEST_CASES = [
+ ModuleTestCase(
+ id="puppet_agent_plain",
+ input={},
+ output=dict(changed=False),
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/puppet", "config", "print", "agent_disabled_lockfile"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="blah, anything",
+ err="",
+ ),
+ RunCmdCall(
+ command=[
+ "/testbin/timeout", "-s", "9", "30m", "/testbin/puppet", "agent", "--onetime", "--no-daemonize",
+ "--no-usecacheonfailure", "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0"
+ ],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ ]
+ ),
+ ModuleTestCase(
+ id="puppet_agent_certname",
+ input={"certname": "potatobox"},
+ output=dict(changed=False),
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/puppet", "config", "print", "agent_disabled_lockfile"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="blah, anything",
+ err="",
+ ),
+ RunCmdCall(
+ command=[
+ "/testbin/timeout", "-s", "9", "30m", "/testbin/puppet", "agent", "--onetime", "--no-daemonize",
+ "--no-usecacheonfailure", "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", "--certname=potatobox"
+ ],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ ]
+ ),
+ ModuleTestCase(
+ id="puppet_agent_tags_abc",
+ input={"tags": ["a", "b", "c"]},
+ output=dict(changed=False),
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/puppet", "config", "print", "agent_disabled_lockfile"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="blah, anything",
+ err="",
+ ),
+ RunCmdCall(
+ command=[
+ "/testbin/timeout", "-s", "9", "30m", "/testbin/puppet", "agent", "--onetime", "--no-daemonize",
+ "--no-usecacheonfailure", "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", "--tags", "a,b,c"
+ ],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ ]
+ ),
+ ModuleTestCase(
+ id="puppet_agent_skip_tags_def",
+ input={"skip_tags": ["d", "e", "f"]},
+ output=dict(changed=False),
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/puppet", "config", "print", "agent_disabled_lockfile"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="blah, anything",
+ err="",
+ ),
+ RunCmdCall(
+ command=[
+ "/testbin/timeout", "-s", "9", "30m", "/testbin/puppet", "agent", "--onetime", "--no-daemonize",
+ "--no-usecacheonfailure", "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", "--skip_tags", "d,e,f"
+ ],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ ]
+ ),
+ ModuleTestCase(
+ id="puppet_agent_noop_false",
+ input={"noop": False},
+ output=dict(changed=False),
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/puppet", "config", "print", "agent_disabled_lockfile"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="blah, anything",
+ err="",
+ ),
+ RunCmdCall(
+ command=[
+ "/testbin/timeout", "-s", "9", "30m", "/testbin/puppet", "agent", "--onetime", "--no-daemonize",
+ "--no-usecacheonfailure", "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", "--no-noop"
+ ],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ ]
+ ),
+ ModuleTestCase(
+ id="puppet_agent_noop_true",
+ input={"noop": True},
+ output=dict(changed=False),
+ run_command_calls=[
+ RunCmdCall(
+ command=["/testbin/puppet", "config", "print", "agent_disabled_lockfile"],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="blah, anything",
+ err="",
+ ),
+ RunCmdCall(
+ command=[
+ "/testbin/timeout", "-s", "9", "30m", "/testbin/puppet", "agent", "--onetime", "--no-daemonize",
+ "--no-usecacheonfailure", "--no-splay", "--detailed-exitcodes", "--verbose", "--color", "0", "--noop"
+ ],
+ environ={'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ rc=0,
+ out="",
+ err="",
+ ),
+ ]
+ ),
+]
+TEST_CASES_IDS = [item.id for item in TEST_CASES]
+
+
+@pytest.mark.parametrize("patch_ansible_module, testcase",
+ [[x.input, x] for x in TEST_CASES],
+ ids=TEST_CASES_IDS,
+ indirect=["patch_ansible_module"])
+@pytest.mark.usefixtures("patch_ansible_module")
+def test_puppet(mocker, capfd, patch_get_bin_path, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ run_cmd_calls = testcase.run_command_calls
+
+ # Mock function used for running commands first
+ call_results = [(x.rc, x.out, x.err) for x in run_cmd_calls]
+ mock_run_command = mocker.patch(
+ "ansible.module_utils.basic.AnsibleModule.run_command",
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ puppet.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % str(testcase))
+ print("results =\n%s" % results)
+
+ assert mock_run_command.call_count == len(run_cmd_calls)
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item.command, item.environ) for item in run_cmd_calls]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
+
+ assert results.get("changed", False) == testcase.output["changed"]
+ if "failed" in testcase:
+ assert results.get("failed", False) == testcase.output["failed"]
+ if "msg" in testcase:
+ assert results.get("msg", "") == testcase.output["msg"]
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_redhat_subscription.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_redhat_subscription.py
new file mode 100644
index 000000000..4bf272916
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_redhat_subscription.py
@@ -0,0 +1,1337 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils import basic
+from ansible_collections.community.general.plugins.modules import redhat_subscription
+
+import pytest
+
+TESTED_MODULE = redhat_subscription.__name__
+
+
+@pytest.fixture
+def patch_redhat_subscription(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.RegistrationBase.REDHAT_REPO')
+ mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.isfile', return_value=False)
+ mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.unlink', return_value=True)
+ mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.AnsibleModule.get_bin_path',
+ return_value='/testbin/subscription-manager')
+ mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.Rhsm._can_connect_to_dbus',
+ return_value=False)
+ mocker.patch('ansible_collections.community.general.plugins.modules.redhat_subscription.getuid',
+ return_value=0)
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters_unregistered(mocker, capfd, patch_redhat_subscription):
+ """
+ Failure must occurs when all parameters are missing
+ """
+ mock_run_command = mocker.patch.object(
+ basic.AnsibleModule,
+ 'run_command',
+ return_value=(1, 'This system is not yet registered.', ''))
+
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'state is present but any of the following are missing' in results['msg']
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters_registered(mocker, capfd, patch_redhat_subscription):
+ """
+ System already registered, no parameters required (state=present is the
+ default)
+ """
+ mock_run_command = mocker.patch.object(
+ basic.AnsibleModule,
+ 'run_command',
+ return_value=(0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', ''))
+
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert 'changed' in results
+ if 'msg' in results:
+ assert results['msg'] == 'System already registered.'
+
+
+TEST_CASES = [
+ # Test the case, when the system is already registered
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin'
+ },
+ {
+ 'id': 'test_already_registered_system',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/subscription-manager', 'identity'],
+ # Was return code checked?
+ {'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ )
+ ],
+ 'changed': False,
+ 'msg': 'System already registered.'
+ }
+ ],
+ # Already registered system without credentials specified
+ [
+ {
+ 'state': 'present',
+ },
+ {
+ 'id': 'test_already_registered_system',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ )
+ ],
+ 'changed': False,
+ 'msg': 'System already registered.'
+ }
+ ],
+ # Test simple registration using username and password
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'satellite.company.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ },
+ {
+ 'id': 'test_registeration_username_password',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'register',
+ '--username', 'admin',
+ '--password', 'admin'],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'satellite.company.com'."
+ }
+ ],
+ # Test simple registration using token
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'satellite.company.com',
+ 'token': 'fake_token',
+ },
+ {
+ 'id': 'test_registeration_token',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'register',
+ '--token', 'fake_token'],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'satellite.company.com'."
+ }
+ ],
+ # Test unregistration, when system is unregistered
+ [
+ {
+ 'state': 'absent',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ },
+ {
+ 'id': 'test_unregisteration',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'remove', '--all'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'unregister'],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully unregistered from subscription.rhsm.redhat.com."
+ }
+ ],
+ # Test unregistration of already unregistered system
+ [
+ {
+ 'state': 'absent',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ },
+ {
+ 'id': 'test_unregisteration_of_unregistered_system',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ )
+ ],
+ 'changed': False,
+ 'msg': "System already unregistered."
+ }
+ ],
+ # Test registration using activation key
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'satellite.company.com',
+ 'activationkey': 'some-activation-key',
+ 'org_id': 'admin'
+ },
+ {
+ 'id': 'test_registeration_activation_key',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'config', '--server.hostname=satellite.company.com'],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--activationkey', 'some-activation-key'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'satellite.company.com'."
+ }
+ ],
+ # Test of registration using username and password with auto-attach option
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'auto_attach': 'true'
+ },
+ {
+ 'id': 'test_registeration_username_password_auto_attach',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--auto-attach',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of force registration despite the system is already registered
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'force_register': 'true'
+ },
+ {
+ 'id': 'test_force_registeration_username_password',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'This system already registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--force',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration with arguments that are not part of register options but needs to be configured
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'force_register': 'true',
+ 'server_prefix': '/rhsm',
+ 'server_port': '443'
+ },
+ {
+ 'id': 'test_arguments_not_in_register_options',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'This system already registered.', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'config',
+ '--server.port=443',
+ '--server.prefix=/rhsm'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'register',
+ '--force',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username, password and proxy options
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'force_register': 'true',
+ 'server_proxy_hostname': 'proxy.company.com',
+ 'server_proxy_scheme': 'https',
+ 'server_proxy_port': '12345',
+ 'server_proxy_user': 'proxy_user',
+ 'server_proxy_password': 'secret_proxy_password'
+ },
+ {
+ 'id': 'test_registeration_username_password_proxy_options',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'This system already registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'config',
+ '--server.proxy_hostname=proxy.company.com',
+ '--server.proxy_password=secret_proxy_password',
+ '--server.proxy_port=12345',
+ '--server.proxy_scheme=https',
+ '--server.proxy_user=proxy_user'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--force',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool': 'ff8080816b8e967f016b8e99632804a6'
+ },
+ {
+ 'id': 'test_registeration_username_password_pool',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', ''),
+ ]
+ ),
+ (
+ 'subscription-manager attach --pool ff8080816b8e967f016b8e99632804a6',
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool ID and quantities
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': [{'ff8080816b8e967f016b8e99632804a6': 2}, {'ff8080816b8e967f016b8e99747107e9': 4}]
+ },
+ {
+ 'id': 'test_registeration_username_password_pool_ids_quantities',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6',
+ '--quantity', '2'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99747107e9',
+ '--quantity', '4'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool ID without quantities
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': ['ff8080816b8e967f016b8e99632804a6', 'ff8080816b8e967f016b8e99747107e9']
+ },
+ {
+ 'id': 'test_registeration_username_password_pool_ids',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99747107e9'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password and attach to pool ID (one pool)
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'pool_ids': ['ff8080816b8e967f016b8e99632804a6']
+ },
+ {
+ 'id': 'test_registeration_username_password_one_pool_id',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6',
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test attaching different set of pool IDs
+ [
+ {
+ 'state': 'present',
+ 'pool_ids': [{'ff8080816b8e967f016b8e99632804a6': 2}, {'ff8080816b8e967f016b8e99747107e9': 4}]
+ },
+ {
+ 'id': 'test_attaching_different_pool_ids',
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', ''),
+ ),
+ (
+ 'subscription-manager list --consumed',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0, '''
++-------------------------------------------+
+ Consumed Subscriptions
++-------------------------------------------+
+Subscription Name: Multi-Attribute Stackable (4 cores, no content)
+Provides: Multi-Attribute Limited Product (no content)
+SKU: cores4-multiattr
+Contract: 1
+Account: 12331131231
+Serial: 7807912223970164816
+Pool ID: ff8080816b8e967f016b8e995f5103b5
+Provides Management: No
+Active: True
+Quantity Used: 1
+Service Type: Level 3
+Roles:
+Service Level: Premium
+Usage:
+Add-ons:
+Status Details: Subscription is current
+Subscription Type: Stackable
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+''', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'remove',
+ '--serial=7807912223970164816',
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ 'subscription-manager list --available',
+ {'check_rc': True, 'environ_update': {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'}},
+ (0,
+ '''
++-------------------------------------------+
+ Available Subscriptions
++-------------------------------------------+
+Subscription Name: SP Smart Management (A: ADDON1)
+Provides: SP Addon 1 bits
+SKU: sp-with-addon-1
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e99747107e9
+Provides Management: Yes
+Available: 10
+Suggested: 1
+Service Type:
+Roles:
+Service Level:
+Usage:
+Add-ons: ADDON1
+Subscription Type: Standard
+Starts: 25.6.2019
+Ends: 24.6.2020
+Entitlement Type: Physical
+
+Subscription Name: SP Server Premium (S: Premium, U: Production, R: SP Server)
+Provides: SP Server Bits
+SKU: sp-server-prem-prod
+Contract: 0
+Pool ID: ff8080816b8e967f016b8e99632804a6
+Provides Management: Yes
+Available: 5
+Suggested: 1
+Service Type: L1-L3
+Roles: SP Server
+Service Level: Premium
+Usage: Production
+Add-ons:
+Subscription Type: Standard
+Starts: 06/25/19
+Ends: 06/24/20
+Entitlement Type: Physical
+
+Subscription Name: Multi-Attribute Stackable (4 cores, no content)
+Provides: Multi-Attribute Limited Product (no content)
+SKU: cores4-multiattr
+Contract: 1
+Pool ID: ff8080816b8e967f016b8e995f5103b5
+Provides Management: No
+Available: 10
+Suggested: 1
+Service Type: Level 3
+Roles:
+Service Level: Premium
+Usage:
+Add-ons:
+Subscription Type: Stackable
+Starts: 11.7.2019
+Ends: 10.7.2020
+Entitlement Type: Physical
+''', '')
+ ]
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99632804a6',
+ '--quantity', '2'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'attach',
+ '--pool', 'ff8080816b8e967f016b8e99747107e9',
+ '--quantity', '4'
+ ],
+ {'check_rc': True},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ }
+ ]
+]
+
+
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, ids=TEST_CASES_IDS, indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_redhat_subscription(mocker, capfd, patch_redhat_subscription, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch.object(
+ basic.AnsibleModule,
+ 'run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ assert 'changed' in results
+ assert results['changed'] == testcase['changed']
+ if 'msg' in results:
+ assert results['msg'] == testcase['msg']
+
+ assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls'])
+ if basic.AnsibleModule.run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in basic.AnsibleModule.run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ assert call_args_list == expected_call_args_list
+
+
+SYSPURPOSE_TEST_CASES = [
+ # Test setting syspurpose attributes (system is already registered)
+ # and synchronization with candlepin server
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': True
+ }
+ },
+ {
+ 'id': 'test_setting_syspurpose_attributes',
+ 'existing_syspurpose': {},
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'status'],
+ {'check_rc': False},
+ (0, '''
++-------------------------------------------+
+ System Status Details
++-------------------------------------------+
+Overall Status: Current
+
+System Purpose Status: Matched
+''', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': 'Syspurpose attributes changed.'
+ }
+ ],
+ # Test setting unspupported attributes
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'foo': 'Bar',
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': True
+ }
+ },
+ {
+ 'id': 'test_setting_syspurpose_wrong_attributes',
+ 'existing_syspurpose': {},
+ 'expected_syspurpose': {},
+ 'run_command.calls': [],
+ 'failed': True
+ }
+ ],
+ # Test setting addons not a list
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': 'ADDON1',
+ 'sync': True
+ }
+ },
+ {
+ 'id': 'test_setting_syspurpose_addons_not_list',
+ 'existing_syspurpose': {},
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1']
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'status'],
+ {'check_rc': False},
+ (0, '''
++-------------------------------------------+
+ System Status Details
++-------------------------------------------+
+Overall Status: Current
+
+System Purpose Status: Matched
+''', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': 'Syspurpose attributes changed.'
+ }
+ ],
+ # Test setting syspurpose attributes (system is already registered)
+ # without synchronization with candlepin server. Some syspurpose attributes were set
+ # in the past
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': False
+ }
+ },
+ {
+ 'id': 'test_changing_syspurpose_attributes',
+ 'existing_syspurpose': {
+ 'role': 'CoolOS',
+ 'usage': 'Production',
+ 'service_level_agreement': 'Super',
+ 'addons': [],
+ 'foo': 'bar'
+ },
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'foo': 'bar'
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ ],
+ 'changed': True,
+ 'msg': 'Syspurpose attributes changed.'
+ }
+ ],
+ # Test trying to set syspurpose attributes (system is already registered)
+ # without synchronization with candlepin server. Some syspurpose attributes were set
+ # in the past. Syspurpose attributes are same as before
+ [
+ {
+ 'state': 'present',
+ 'server_hostname': 'subscription.rhsm.redhat.com',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ 'sync': False
+ }
+ },
+ {
+ 'id': 'test_not_changing_syspurpose_attributes',
+ 'existing_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ },
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'service_level_agreement': 'Premium',
+ 'addons': ['ADDON1', 'ADDON2'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (0, 'system identity: b26df632-25ed-4452-8f89-0308bfd167cb', '')
+ ),
+ ],
+ 'changed': False,
+ 'msg': 'System already registered.'
+ }
+ ],
+ # Test of registration using username and password with auto-attach option, when
+ # syspurpose attributes are set
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'auto_attach': 'true',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ 'sync': False
+ },
+ },
+ {
+ 'id': 'test_registeration_username_password_auto_attach_syspurpose',
+ 'existing_syspurpose': None,
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--auto-attach',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+ # Test of registration using username and password with auto-attach option, when
+ # syspurpose attributes are set. Syspurpose attributes are also synchronized
+ # in this case
+ [
+ {
+ 'state': 'present',
+ 'username': 'admin',
+ 'password': 'admin',
+ 'org_id': 'admin',
+ 'auto_attach': 'true',
+ 'syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ 'sync': True
+ },
+ },
+ {
+ 'id': 'test_registeration_username_password_auto_attach_syspurpose_sync',
+ 'existing_syspurpose': None,
+ 'expected_syspurpose': {
+ 'role': 'AwesomeOS',
+ 'usage': 'Testing',
+ 'service_level_agreement': 'Super',
+ 'addons': ['ADDON1'],
+ },
+ 'run_command.calls': [
+ (
+ ['/testbin/subscription-manager', 'identity'],
+ {'check_rc': False},
+ (1, 'This system is not yet registered.', '')
+ ),
+ (
+ [
+ '/testbin/subscription-manager',
+ 'register',
+ '--org', 'admin',
+ '--auto-attach',
+ '--username', 'admin',
+ '--password', 'admin'
+ ],
+ {'check_rc': True, 'expand_user_and_vars': False},
+ (0, '', '')
+ ),
+ (
+ ['/testbin/subscription-manager', 'status'],
+ {'check_rc': False},
+ (0, '''
++-------------------------------------------+
+ System Status Details
++-------------------------------------------+
+Overall Status: Current
+
+System Purpose Status: Matched
+''', '')
+ )
+ ],
+ 'changed': True,
+ 'msg': "System successfully registered to 'None'."
+ }
+ ],
+]
+
+
+SYSPURPOSE_TEST_CASES_IDS = [item[1]['id'] for item in SYSPURPOSE_TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', SYSPURPOSE_TEST_CASES, ids=SYSPURPOSE_TEST_CASES_IDS, indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_redhat_subscription_syspurpose(mocker, capfd, patch_redhat_subscription, patch_ansible_module, testcase, tmpdir):
+ """
+ Run unit tests for test cases listen in SYSPURPOSE_TEST_CASES (syspurpose specific cases)
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch.object(
+ basic.AnsibleModule,
+ 'run_command',
+ side_effect=call_results)
+
+ mock_syspurpose_file = tmpdir.mkdir("syspurpose").join("syspurpose.json")
+ # When there there are some existing syspurpose attributes specified, then
+ # write them to the file first
+ if testcase['existing_syspurpose'] is not None:
+ mock_syspurpose_file.write(json.dumps(testcase['existing_syspurpose']))
+ else:
+ mock_syspurpose_file.write("{}")
+
+ redhat_subscription.SysPurpose.SYSPURPOSE_FILE_PATH = str(mock_syspurpose_file)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ redhat_subscription.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+
+ if 'failed' in testcase:
+ assert results['failed'] == testcase['failed']
+ else:
+ assert 'changed' in results
+ assert results['changed'] == testcase['changed']
+ if 'msg' in results:
+ assert results['msg'] == testcase['msg']
+
+ mock_file_content = mock_syspurpose_file.read_text("utf-8")
+ current_syspurpose = json.loads(mock_file_content)
+ assert current_syspurpose == testcase['expected_syspurpose']
+
+ assert basic.AnsibleModule.run_command.call_count == len(testcase['run_command.calls'])
+ if basic.AnsibleModule.run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in basic.AnsibleModule.run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ assert call_args_list == expected_call_args_list
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data.py
new file mode 100644
index 000000000..da195f70a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data.py
@@ -0,0 +1,278 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+import json
+from redis import __version__
+
+from ansible_collections.community.general.plugins.modules import redis_data
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+HAS_REDIS_USERNAME_OPTION = True
+if tuple(map(int, __version__.split('.'))) < (3, 4, 0):
+ HAS_REDIS_USERNAME_OPTION = False
+
+
+def test_redis_data_without_arguments(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ redis_data.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_key(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'value': 'baz',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ mocker.patch('redis.Redis.set', return_value=True)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['old_value'] == 'bar'
+ assert json.loads(out)['value'] == 'baz'
+ assert json.loads(out)['msg'] == 'Set key: foo'
+ assert json.loads(out)['changed'] is True
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_existing_key_nx(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'value': 'baz',
+ 'non_existing': True,
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ mocker.patch('redis.Redis.set', return_value=None)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['old_value'] == 'bar'
+ assert 'value' not in json.loads(out)
+ assert json.loads(
+ out)['msg'] == 'Could not set key: foo. Key already present.'
+ assert json.loads(out)['changed'] is False
+ assert json.loads(out)['failed'] is True
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_non_existing_key_xx(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'value': 'baz',
+ 'existing': True,
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value=None)
+ mocker.patch('redis.Redis.set', return_value=None)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['old_value'] is None
+ assert 'value' not in json.loads(out)
+ assert json.loads(
+ out)['msg'] == 'Could not set key: foo. Key not present.'
+ assert json.loads(out)['changed'] is False
+ assert json.loads(out)['failed'] is True
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_delete_present_key(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'absent'})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ mocker.patch('redis.Redis.delete', return_value=1)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Deleted key: foo'
+ assert json.loads(out)['changed'] is True
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_delete_absent_key(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'absent'})
+ mocker.patch('redis.Redis.delete', return_value=0)
+ mocker.patch('redis.Redis.get', return_value=None)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Key: foo not present'
+ assert json.loads(out)['changed'] is False
+
+
+@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0")
+def test_redis_data_fail_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'value': 'baz',
+ '_ansible_check_mode': False})
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['failed']
+ assert json.loads(
+ out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.'
+
+
+def test_redis_data_key_no_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'value': 'baz',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ mocker.patch('redis.Redis.set', return_value=True)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['old_value'] == 'bar'
+ assert json.loads(out)['value'] == 'baz'
+ assert json.loads(out)['msg'] == 'Set key: foo'
+ assert json.loads(out)['changed'] is True
+
+
+def test_redis_delete_key_no_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'absent',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ mocker.patch('redis.Redis.delete', return_value=1)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Deleted key: foo'
+ assert json.loads(out)['changed'] is True
+
+
+def test_redis_delete_key_non_existent_key(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'absent',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value=None)
+ mocker.patch('redis.Redis.delete', return_value=0)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Key: foo not present'
+ assert json.loads(out)['changed'] is False
+
+
+def test_redis_set_key_check_mode_nochange(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'present',
+ 'value': 'bar',
+ '_ansible_check_mode': True})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Key foo already has desired value'
+ assert json.loads(out)['value'] == 'bar'
+ assert not json.loads(out)['changed']
+ assert json.loads(out)['old_value'] == 'bar'
+
+
+def test_redis_set_key_check_mode_delete_nx(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'present',
+ 'value': 'baz',
+ '_ansible_check_mode': True})
+ mocker.patch('redis.Redis.get', return_value=None)
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Set key: foo'
+ assert json.loads(out)['value'] == 'baz'
+ assert json.loads(out)['old_value'] is None
+
+
+def test_redis_set_key_check_mode_delete(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'present',
+ 'value': 'baz',
+ '_ansible_check_mode': True})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Set key: foo'
+ assert json.loads(out)['value'] == 'baz'
+ assert json.loads(out)['old_value'] == 'bar'
+
+
+def test_redis_set_key_check_mode(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'state': 'present',
+ 'value': 'baz',
+ '_ansible_check_mode': True})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ with pytest.raises(SystemExit):
+ redis_data.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['msg'] == 'Set key: foo'
+ assert json.loads(out)['value'] == 'baz'
+ assert json.loads(out)['old_value'] == 'bar'
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_incr.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_incr.py
new file mode 100644
index 000000000..d819b2f7e
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_incr.py
@@ -0,0 +1,208 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+import json
+import redis
+from redis import __version__
+
+from ansible_collections.community.general.plugins.modules import redis_data_incr
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+
+HAS_REDIS_USERNAME_OPTION = True
+if tuple(map(int, __version__.split('.'))) < (3, 4, 0):
+ HAS_REDIS_USERNAME_OPTION = False
+if HAS_REDIS_USERNAME_OPTION:
+ from redis.exceptions import NoPermissionError
+
+
+def test_redis_data_incr_without_arguments(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_incr(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo', })
+ mocker.patch('redis.Redis.incr', return_value=57)
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['value'] == 57.0
+ assert json.loads(
+ out)['msg'] == 'Incremented key: foo to 57'
+ assert json.loads(out)['changed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_incr_int(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'increment_int': 10})
+ mocker.patch('redis.Redis.incrby', return_value=57)
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['value'] == 57.0
+ assert json.loads(
+ out)['msg'] == 'Incremented key: foo by 10 to 57'
+ assert json.loads(out)['changed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_inc_float(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'increment_float': '5.5'})
+ mocker.patch('redis.Redis.incrbyfloat', return_value=57.45)
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['value'] == 57.45
+ assert json.loads(
+ out)['msg'] == 'Incremented key: foo by 5.5 to 57.45'
+ assert json.loads(out)['changed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_incr_float_wrong_value(capfd):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'increment_float': 'not_a_number'})
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['failed']
+
+
+@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0")
+def test_redis_data_incr_fail_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': False})
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['failed']
+ assert json.loads(
+ out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.'
+
+
+def test_redis_data_incr_no_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo', })
+ mocker.patch('redis.Redis.incr', return_value=57)
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['value'] == 57.0
+ assert json.loads(
+ out)['msg'] == 'Incremented key: foo to 57'
+ assert json.loads(out)['changed']
+
+
+def test_redis_data_incr_float_no_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ 'increment_float': '5.5'})
+ mocker.patch('redis.Redis.incrbyfloat', return_value=57.45)
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['value'] == 57.45
+ assert json.loads(
+ out)['msg'] == 'Incremented key: foo by 5.5 to 57.45'
+ assert json.loads(out)['changed']
+
+
+def test_redis_data_incr_check_mode(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': True})
+ mocker.patch('redis.Redis.get', return_value=10)
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['value'] == 11.0
+ assert json.loads(out)['msg'] == 'Incremented key: foo by 1 to 11.0'
+ assert not json.loads(out)['changed']
+
+
+def test_redis_data_incr_check_mode_not_incrementable(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': True})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['failed']
+ assert json.loads(out)[
+ 'msg'] == "Value: bar of key: foo is not incrementable(int or float)"
+ assert 'value' not in json.loads(out)
+ assert not json.loads(out)['changed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_incr_check_mode_permissions(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': True})
+ redis.Redis.get = mocker.Mock(side_effect=NoPermissionError(
+ "this user has no permissions to run the 'get' command or its subcommand"))
+ with pytest.raises(SystemExit):
+ redis_data_incr.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['failed']
+ assert json.loads(out)['msg'].startswith(
+ 'Failed to get value of key: foo with exception:')
+ assert 'value' not in json.loads(out)
+ assert not json.loads(out)['changed']
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_info.py
new file mode 100644
index 000000000..302e003bf
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_data_info.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2021, Andreas Botzner <andreas at botzner dot com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+import json
+from redis import __version__
+
+from ansible_collections.community.general.plugins.modules import (
+ redis_data_info)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+
+HAS_REDIS_USERNAME_OPTION = True
+if tuple(map(int, __version__.split('.'))) < (3, 4, 0):
+ HAS_REDIS_USERNAME_OPTION = False
+
+
+def test_redis_data_info_without_arguments(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit):
+ redis_data_info.main()
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_info_existing_key(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ with pytest.raises(SystemExit):
+ redis_data_info.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['exists']
+ assert json.loads(out)['value'] == 'bar'
+
+
+@pytest.mark.skipif(not HAS_REDIS_USERNAME_OPTION, reason="Redis version < 3.4.0")
+def test_redis_data_info_absent_key(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value=None)
+ with pytest.raises(SystemExit):
+ redis_data_info.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert not json.loads(out)['exists']
+ assert 'value' not in json.loads(out)
+
+
+@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0")
+def test_redis_data_fail_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_user': 'root',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': False})
+ with pytest.raises(SystemExit):
+ redis_data_info.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['failed']
+ assert json.loads(
+ out)['msg'] == 'The option `username` in only supported with redis >= 3.4.0.'
+
+
+@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0")
+def test_redis_data_info_absent_key_no_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value=None)
+ with pytest.raises(SystemExit):
+ redis_data_info.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert not json.loads(out)['exists']
+ assert 'value' not in json.loads(out)
+
+
+@pytest.mark.skipif(HAS_REDIS_USERNAME_OPTION, reason="Redis version > 3.4.0")
+def test_redis_data_info_existing_key_no_username(capfd, mocker):
+ set_module_args({'login_host': 'localhost',
+ 'login_password': 'secret',
+ 'key': 'foo',
+ '_ansible_check_mode': False})
+ mocker.patch('redis.Redis.get', return_value='bar')
+ with pytest.raises(SystemExit):
+ redis_data_info.main()
+ out, err = capfd.readouterr()
+ print(out)
+ assert not err
+ assert json.loads(out)['exists']
+ assert json.loads(out)['value'] == 'bar'
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_info.py
new file mode 100644
index 000000000..8b30a2316
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_redis_info.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020, Pavlo Bashynskyi (@levonet) <levonet@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
+from ansible_collections.community.general.plugins.modules import redis_info
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class FakeRedisClient(MagicMock):
+
+ def ping(self):
+ pass
+
+ def info(self):
+ return {'redis_version': '999.999.999'}
+
+
+class FakeRedisClientFail(MagicMock):
+
+ def ping(self):
+ raise Exception('Test Error')
+
+ def info(self):
+ pass
+
+
+class TestRedisInfoModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestRedisInfoModule, self).setUp()
+ redis_info.HAS_REDIS_PACKAGE = True
+ self.module = redis_info
+
+ def tearDown(self):
+ super(TestRedisInfoModule, self).tearDown()
+
+ def patch_redis_client(self, **kwds):
+ return patch('ansible_collections.community.general.plugins.modules.redis_info.redis_client', autospec=True, **kwds)
+
+ def test_without_parameters(self):
+ """Test without parameters"""
+ with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({})
+ self.module.main()
+ self.assertEqual(redis_client.call_count, 1)
+ self.assertEqual(redis_client.call_args, ({'host': 'localhost', 'port': 6379, 'password': None},))
+ self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999')
+
+ def test_with_parameters(self):
+ """Test with all parameters"""
+ with self.patch_redis_client(side_effect=FakeRedisClient) as redis_client:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({
+ 'login_host': 'test',
+ 'login_port': 1234,
+ 'login_password': 'PASS'
+ })
+ self.module.main()
+ self.assertEqual(redis_client.call_count, 1)
+ self.assertEqual(redis_client.call_args, ({'host': 'test', 'port': 1234, 'password': 'PASS'},))
+ self.assertEqual(result.exception.args[0]['info']['redis_version'], '999.999.999')
+
+ def test_with_fail_client(self):
+ """Test failure message"""
+ with self.patch_redis_client(side_effect=FakeRedisClientFail) as redis_client:
+ with self.assertRaises(AnsibleFailJson) as result:
+ set_module_args({})
+ self.module.main()
+ self.assertEqual(redis_client.call_count, 1)
+ self.assertEqual(result.exception.args[0]['msg'], 'unable to connect to database: Test Error')
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_channel.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_channel.py
new file mode 100644
index 000000000..fd3bdc5fe
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_channel.py
@@ -0,0 +1,147 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2017 Pierre-Louis Bonicoli <pierre-louis@libregerbil.fr>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules import rhn_channel
+
+from .rhn_conftest import mock_request # noqa: F401, pylint: disable=unused-import
+
+import pytest
+
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+def test_without_required_parameters(capfd):
+ with pytest.raises(SystemExit):
+ rhn_channel.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+
+TESTED_MODULE = rhn_channel.__name__
+TEST_CASES = [
+ [
+ # add channel already added, check that result isn't changed
+ {
+ 'name': 'rhel-x86_64-server-6',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': False,
+ 'msg': 'Channel rhel-x86_64-server-6 already exists',
+ }
+ ],
+ [
+ # add channel, check that result is changed
+ {
+ 'name': 'rhel-x86_64-server-6-debuginfo',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('system.setChildChannels', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': True,
+ 'msg': 'Channel rhel-x86_64-server-6-debuginfo added',
+ }
+ ],
+ [
+ # remove inexistent channel, check that result isn't changed
+ {
+ 'name': 'rhel-x86_64-server-6-debuginfo',
+ 'state': 'absent',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': False,
+ 'msg': 'Not subscribed to channel rhel-x86_64-server-6-debuginfo.',
+ }
+ ],
+ [
+ # remove channel, check that result is changed
+ {
+ 'name': 'rhel-x86_64-server-6-debuginfo',
+ 'state': 'absent',
+ 'sysname': 'server01',
+ 'url': 'https://rhn.redhat.com/rpc/api',
+ 'user': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.listUserSystems',
+ [[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
+ ('channel.software.listSystemChannels', [[
+ {'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
+ {'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
+ ]]),
+ ('channel.software.listSystemChannels', [[
+ {'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
+ {'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
+ ]]),
+ ('system.setChildChannels', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'changed': True,
+ 'msg': 'Channel rhel-x86_64-server-6-debuginfo removed'
+ }
+ ]
+]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
+def test_rhn_channel(capfd, mocker, testcase, mock_request):
+ """Check 'msg' and 'changed' results"""
+
+ with pytest.raises(SystemExit):
+ rhn_channel.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['changed'] == testcase['changed']
+ assert results['msg'] == testcase['msg']
+ assert not testcase['calls'] # all calls should have been consumed
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_register.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_register.py
new file mode 100644
index 000000000..1394c07b6
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_rhn_register.py
@@ -0,0 +1,293 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from ansible_collections.community.general.tests.unit.compat.mock import mock_open
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_native
+import ansible.module_utils.six
+from ansible.module_utils.six.moves import xmlrpc_client
+from ansible_collections.community.general.plugins.modules import rhn_register
+
+from .rhn_conftest import mock_request # noqa: F401, pylint: disable=unused-import
+
+import pytest
+
+
+SYSTEMID = """<?xml version="1.0"?>
+<params>
+<param>
+<value><struct>
+<member>
+<name>system_id</name>
+<value><string>ID-123456789</string></value>
+</member>
+</struct></value>
+</param>
+</params>
+"""
+
+
+def skipWhenAllModulesMissing(modules):
+ """Skip the decorated test unless one of modules is available."""
+ for module in modules:
+ try:
+ __import__(module)
+ return False
+ except ImportError:
+ continue
+
+ return True
+
+
+orig_import = __import__
+
+
+@pytest.fixture
+def import_libxml(mocker):
+ def mock_import(name, *args, **kwargs):
+ if name in ['libxml2', 'libxml']:
+ raise ImportError()
+ else:
+ return orig_import(name, *args, **kwargs)
+
+ if ansible.module_utils.six.PY3:
+ mocker.patch('builtins.__import__', side_effect=mock_import)
+ else:
+ mocker.patch('__builtin__.__import__', side_effect=mock_import)
+
+
+@pytest.fixture
+def patch_rhn(mocker):
+ load_config_return = {
+ 'serverURL': 'https://xmlrpc.rhn.redhat.com/XMLRPC',
+ 'systemIdPath': '/etc/sysconfig/rhn/systemid'
+ }
+
+ mocker.patch.object(rhn_register.Rhn, 'load_config', return_value=load_config_return)
+ mocker.patch.object(rhn_register, 'HAS_UP2DATE_CLIENT', mocker.PropertyMock(return_value=True))
+
+
+@pytest.mark.skipif(skipWhenAllModulesMissing(['libxml2', 'libxml']), reason='none are available: libxml2, libxml')
+def test_systemid_with_requirements(capfd, mocker, patch_rhn):
+ """Check 'msg' and 'changed' results"""
+
+ mocker.patch.object(rhn_register.Rhn, 'enable')
+ mock_isfile = mocker.patch('os.path.isfile', return_value=True)
+ mocker.patch('ansible_collections.community.general.plugins.modules.rhn_register.open', mock_open(read_data=SYSTEMID), create=True)
+ rhn = rhn_register.Rhn()
+ assert '123456789' == to_native(rhn.systemid)
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_systemid_requirements_missing(capfd, mocker, patch_rhn, import_libxml):
+ """Check that missing dependencies are detected"""
+
+ mocker.patch('os.path.isfile', return_value=True)
+ mocker.patch('ansible_collections.community.general.plugins.modules.rhn_register.open', mock_open(read_data=SYSTEMID), create=True)
+
+ with pytest.raises(SystemExit):
+ rhn_register.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'Missing arguments' in results['msg']
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters(capfd, patch_rhn):
+ """Failure must occurs when all parameters are missing"""
+
+ with pytest.raises(SystemExit):
+ rhn_register.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'Missing arguments' in results['msg']
+
+
+TESTED_MODULE = rhn_register.__name__
+TEST_CASES = [
+ [
+ # Registering an unregistered host with channels
+ {
+ 'channels': 'rhel-x86_64-server-6',
+ 'username': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('channel.software.listSystemChannels',
+ [[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
+ ('channel.software.setSystemChannels', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'is_registered': False,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 1,
+ 'systemid.call_count': 2,
+ 'changed': True,
+ 'msg': "System successfully registered to 'rhn.redhat.com'.",
+ 'run_command.call_count': 1,
+ 'run_command.call_args': '/usr/sbin/rhnreg_ks',
+ 'request_called': True,
+ 'unlink.call_count': 0,
+ }
+ ],
+ [
+ # Registering an unregistered host without channels
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ],
+ 'is_registered': False,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 1,
+ 'systemid.call_count': 0,
+ 'changed': True,
+ 'msg': "System successfully registered to 'rhn.redhat.com'.",
+ 'run_command.call_count': 1,
+ 'run_command.call_args': '/usr/sbin/rhnreg_ks',
+ 'request_called': False,
+ 'unlink.call_count': 0,
+ }
+ ],
+ [
+ # Register an host already registered, check that result is unchanged
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ },
+ {
+ 'calls': [
+ ],
+ 'is_registered': True,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 0,
+ 'changed': False,
+ 'msg': 'System already registered.',
+ 'run_command.call_count': 0,
+ 'request_called': False,
+ 'unlink.call_count': 0,
+ },
+ ],
+ [
+ # Unregister an host, check that result is changed
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ 'state': 'absent',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.deleteSystems', [1]),
+ ('auth.logout', [1]),
+ ],
+ 'is_registered': True,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 1,
+ 'changed': True,
+ 'msg': 'System successfully unregistered from rhn.redhat.com.',
+ 'run_command.call_count': 0,
+ 'request_called': True,
+ 'unlink.call_count': 1,
+ }
+ ],
+ [
+ # Unregister a unregistered host (systemid missing) locally, check that result is unchanged
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ 'state': 'absent',
+ },
+ {
+ 'calls': [],
+ 'is_registered': False,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 0,
+ 'changed': False,
+ 'msg': 'System already unregistered.',
+ 'run_command.call_count': 0,
+ 'request_called': False,
+ 'unlink.call_count': 0,
+ }
+
+ ],
+ [
+ # Unregister an unknown host (an host with a systemid available locally, check that result contains failed
+ {
+ 'activationkey': 'key',
+ 'username': 'user',
+ 'password': 'pass',
+ 'state': 'absent',
+ },
+ {
+ 'calls': [
+ ('auth.login', ['X' * 43]),
+ ('system.deleteSystems', xmlrpc_client.Fault(1003, 'The following systems were NOT deleted: 123456789')),
+ ('auth.logout', [1]),
+ ],
+ 'is_registered': True,
+ 'is_registered.call_count': 1,
+ 'enable.call_count': 0,
+ 'systemid.call_count': 1,
+ 'failed': True,
+ 'msg': "Failed to unregister: <Fault 1003: 'The following systems were NOT deleted: 123456789'>",
+ 'run_command.call_count': 0,
+ 'request_called': True,
+ 'unlink.call_count': 0,
+ }
+ ],
+]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_register_parameters(mocker, capfd, mock_request, patch_rhn, testcase):
+ # successful execution, no output
+ mocker.patch.object(basic.AnsibleModule, 'run_command', return_value=(0, '', ''))
+ mock_is_registered = mocker.patch.object(rhn_register.Rhn, 'is_registered', mocker.PropertyMock(return_value=testcase['is_registered']))
+ mocker.patch.object(rhn_register.Rhn, 'enable')
+ mock_systemid = mocker.patch.object(rhn_register.Rhn, 'systemid', mocker.PropertyMock(return_value=12345))
+ mocker.patch('os.unlink', return_value=True)
+
+ with pytest.raises(SystemExit):
+ rhn_register.main()
+
+ assert basic.AnsibleModule.run_command.call_count == testcase['run_command.call_count']
+ if basic.AnsibleModule.run_command.call_count:
+ assert basic.AnsibleModule.run_command.call_args[0][0][0] == testcase['run_command.call_args']
+
+ assert mock_is_registered.call_count == testcase['is_registered.call_count']
+ assert rhn_register.Rhn.enable.call_count == testcase['enable.call_count']
+ assert mock_systemid.call_count == testcase['systemid.call_count']
+ assert xmlrpc_client.Transport.request.called == testcase['request_called']
+ assert os.unlink.call_count == testcase['unlink.call_count']
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get('changed') == testcase.get('changed')
+ assert results.get('failed') == testcase.get('failed')
+ assert results['msg'] == testcase['msg']
+ assert not testcase['calls'] # all calls should have been consumed
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_rhsm_release.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_rhsm_release.py
new file mode 100644
index 000000000..c5696962b
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_rhsm_release.py
@@ -0,0 +1,148 @@
+# Copyright (c) 2018, Sean Myers <sean.myers@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.plugins.modules import rhsm_release
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args)
+
+
+class RhsmRepositoryReleaseModuleTestCase(ModuleTestCase):
+ module = rhsm_release
+
+ def setUp(self):
+ super(RhsmRepositoryReleaseModuleTestCase, self).setUp()
+
+ # Mainly interested that the subscription-manager calls are right
+ # based on the module args, so patch out run_command in the module.
+ # returns (rc, out, err) structure
+ self.mock_run_command = patch('ansible_collections.community.general.plugins.modules.rhsm_release.'
+ 'AnsibleModule.run_command')
+ self.module_main_command = self.mock_run_command.start()
+
+ # Module does a get_bin_path check before every run_command call
+ self.mock_get_bin_path = patch('ansible_collections.community.general.plugins.modules.rhsm_release.'
+ 'AnsibleModule.get_bin_path')
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.get_bin_path.return_value = '/testbin/subscription-manager'
+
+ # subscription-manager needs to be run as root
+ self.mock_os_getuid = patch('ansible_collections.community.general.plugins.modules.rhsm_release.'
+ 'os.getuid')
+ self.os_getuid = self.mock_os_getuid.start()
+ self.os_getuid.return_value = 0
+
+ def tearDown(self):
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ self.mock_os_getuid.stop()
+ super(RhsmRepositoryReleaseModuleTestCase, self).tearDown()
+
+ def module_main(self, exit_exc):
+ with self.assertRaises(exit_exc) as exc:
+ self.module.main()
+ return exc.exception.args[0]
+
+ def test_release_set(self):
+ # test that the module attempts to change the release when the current
+ # release is not the same as the user-specific target release
+ set_module_args({'release': '7.5'})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns different version so set_release is called
+ (0, '7.4', ''),
+ # second call, set_release: just needs to exit with 0 rc
+ (0, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.assertEqual('7.5', result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ call('/testbin/subscription-manager release --set 7.5', check_rc=True),
+ ])
+
+ def test_release_set_idempotent(self):
+ # test that the module does not attempt to change the release when
+ # the current release matches the user-specified target release
+ set_module_args({'release': '7.5'})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns same version, set_release is not called
+ (0, '7.5', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertFalse(result['changed'])
+ self.assertEqual('7.5', result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ ])
+
+ def test_release_unset(self):
+ # test that the module attempts to change the release when the current
+ # release is not the same as the user-specific target release
+ set_module_args({'release': None})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns version so set_release is called
+ (0, '7.5', ''),
+ # second call, set_release: just needs to exit with 0 rc
+ (0, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.assertIsNone(result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ call('/testbin/subscription-manager release --unset', check_rc=True),
+ ])
+
+ def test_release_unset_idempotent(self):
+ # test that the module attempts to change the release when the current
+ # release is not the same as the user-specific target release
+ set_module_args({'release': None})
+ self.module_main_command.side_effect = [
+ # first call, get_release: returns no version, set_release is not called
+ (0, 'Release not set', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertFalse(result['changed'])
+ self.assertIsNone(result['current_release'])
+ self.module_main_command.assert_has_calls([
+ call('/testbin/subscription-manager release --show', check_rc=True),
+ ])
+
+ def test_release_insane(self):
+ # test that insane values for release trigger fail_json
+ insane_value = 'this is an insane release value'
+ set_module_args({'release': insane_value})
+
+ result = self.module_main(AnsibleFailJson)
+
+ # also ensure that the fail msg includes the insane value
+ self.assertIn(insane_value, result['msg'])
+
+ def test_release_matcher(self):
+ # throw a few values at the release matcher -- only sane_values should match
+ sane_values = ['1Server', '1Client', '10Server', '1.10', '10.0', '9']
+ insane_values = [
+ '6server', # lowercase 's'
+ '100Server', # excessively long 'x' component
+ '100.100', # excessively long 'x' and 'y' components
+ '+.-', # illegal characters
+ ]
+
+ matches = self.module.release_matcher.findall(' '.join(sane_values + insane_values))
+
+ # matches should be returned in the same order they were parsed,
+ # so sorting shouldn't be necessary here
+ self.assertEqual(matches, sane_values)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_rpm_ostree_pkg.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_rpm_ostree_pkg.py
new file mode 100644
index 000000000..4888b6402
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_rpm_ostree_pkg.py
@@ -0,0 +1,108 @@
+#
+# Copyright (c) 2021, Abhijeet Kasurde <akasurde@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import call, patch
+from ansible_collections.community.general.plugins.modules import rpm_ostree_pkg
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args)
+
+
+class RpmOSTreeModuleTestCase(ModuleTestCase):
+ module = rpm_ostree_pkg
+
+ def setUp(self):
+ super(RpmOSTreeModuleTestCase, self).setUp()
+ ansible_module_path = "ansible_collections.community.general.plugins.modules.rpm_ostree_pkg.AnsibleModule"
+ self.mock_run_command = patch('%s.run_command' % ansible_module_path)
+ self.module_main_command = self.mock_run_command.start()
+ self.mock_get_bin_path = patch('%s.get_bin_path' % ansible_module_path)
+ self.get_bin_path = self.mock_get_bin_path.start()
+ self.get_bin_path.return_value = '/testbin/rpm-ostree'
+
+ def tearDown(self):
+ self.mock_run_command.stop()
+ self.mock_get_bin_path.stop()
+ super(RpmOSTreeModuleTestCase, self).tearDown()
+
+ def module_main(self, exit_exc):
+ with self.assertRaises(exit_exc) as exc:
+ self.module.main()
+ return exc.exception.args[0]
+
+ def test_present(self):
+ set_module_args({'name': 'nfs-utils', 'state': 'present'})
+ self.module_main_command.side_effect = [
+ (0, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.assertEqual(['nfs-utils'], result['packages'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']),
+ ])
+
+ def test_present_unchanged(self):
+ set_module_args({'name': 'nfs-utils', 'state': 'present'})
+ self.module_main_command.side_effect = [
+ (77, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertFalse(result['changed'])
+ self.assertEqual(0, result['rc'])
+ self.assertEqual(['nfs-utils'], result['packages'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']),
+ ])
+
+ def test_present_failed(self):
+ set_module_args({'name': 'nfs-utils', 'state': 'present'})
+ self.module_main_command.side_effect = [
+ (1, '', ''),
+ ]
+
+ result = self.module_main(AnsibleFailJson)
+
+ self.assertFalse(result['changed'])
+ self.assertEqual(1, result['rc'])
+ self.assertEqual(['nfs-utils'], result['packages'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/rpm-ostree', 'install', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']),
+ ])
+
+ def test_absent(self):
+ set_module_args({'name': 'nfs-utils', 'state': 'absent'})
+ self.module_main_command.side_effect = [
+ (0, '', ''),
+ ]
+
+ result = self.module_main(AnsibleExitJson)
+
+ self.assertTrue(result['changed'])
+ self.assertEqual(['nfs-utils'], result['packages'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/rpm-ostree', 'uninstall', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']),
+ ])
+
+ def test_absent_failed(self):
+ set_module_args({'name': 'nfs-utils', 'state': 'absent'})
+ self.module_main_command.side_effect = [
+ (1, '', ''),
+ ]
+
+ result = self.module_main(AnsibleFailJson)
+
+ self.assertFalse(result['changed'])
+ self.assertEqual(1, result['rc'])
+ self.assertEqual(['nfs-utils'], result['packages'])
+ self.module_main_command.assert_has_calls([
+ call(['/testbin/rpm-ostree', 'uninstall', '--allow-inactive', '--idempotent', '--unchanged-exit-77', 'nfs-utils']),
+ ])
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_sap_task_list_execute.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_sap_task_list_execute.py
new file mode 100644
index 000000000..34c97c4a8
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_sap_task_list_execute.py
@@ -0,0 +1,91 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+sys.modules['pyrfc'] = MagicMock()
+sys.modules['pyrfc.Connection'] = MagicMock()
+sys.modules['xmltodict'] = MagicMock()
+sys.modules['xmltodict.parse'] = MagicMock()
+
+from ansible_collections.community.general.plugins.modules import sap_task_list_execute
+
+
+class TestSAPRfcModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestSAPRfcModule, self).setUp()
+ self.module = sap_task_list_execute
+
+ def tearDown(self):
+ super(TestSAPRfcModule, self).tearDown()
+
+ def define_rfc_connect(self, mocker):
+ return mocker.patch(self.module.call_rfc_method)
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_error_no_task_list(self):
+ """tests fail to exec task list"""
+
+ set_module_args({
+ "conn_username": "DDIC",
+ "conn_password": "Test1234",
+ "host": "10.1.8.9",
+ "task_to_execute": "SAP_BASIS_SSL_CHECK"
+ })
+
+ with patch.object(self.module, 'Connection') as conn:
+ conn.return_value = ''
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.main()
+ self.assertEqual(result.exception.args[0]['msg'], 'The task list does not exsist.')
+
+ def test_success(self):
+ """test execute task list success"""
+
+ set_module_args({
+ "conn_username": "DDIC",
+ "conn_password": "Test1234",
+ "host": "10.1.8.9",
+ "task_to_execute": "SAP_BASIS_SSL_CHECK"
+ })
+ with patch.object(self.module, 'xml_to_dict') as XML:
+ XML.return_value = {'item': [{'TASK': {'CHECK_STATUS_DESCR': 'Check successfully',
+ 'STATUS_DESCR': 'Executed successfully', 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO',
+ 'LNR': '1', 'DESCRIPTION': 'Check SAP Cryptographic Library', 'DOCU_EXIST': 'X',
+ 'LOG_EXIST': 'X', 'ACTION_SKIP': None, 'ACTION_UNSKIP': None, 'ACTION_CONFIRM': None,
+ 'ACTION_MAINTAIN': None}}]}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ sap_task_list_execute.main()
+ self.assertEqual(result.exception.args[0]['out'], {'item': [{'TASK': {'CHECK_STATUS_DESCR': 'Check successfully',
+ 'STATUS_DESCR': 'Executed successfully', 'TASKNAME': 'CL_STCT_CHECK_SEC_CRYPTO',
+ 'LNR': '1', 'DESCRIPTION': 'Check SAP Cryptographic Library', 'DOCU_EXIST': 'X',
+ 'LOG_EXIST': 'X', 'ACTION_SKIP': None, 'ACTION_UNSKIP': None,
+ 'ACTION_CONFIRM': None, 'ACTION_MAINTAIN': None}}]})
+
+ def test_success_no_log(self):
+ """test execute task list success without logs"""
+
+ set_module_args({
+ "conn_username": "DDIC",
+ "conn_password": "Test1234",
+ "host": "10.1.8.9",
+ "task_to_execute": "SAP_BASIS_SSL_CHECK"
+ })
+ with patch.object(self.module, 'xml_to_dict') as XML:
+ XML.return_value = "No logs available."
+ with self.assertRaises(AnsibleExitJson) as result:
+ sap_task_list_execute.main()
+ self.assertEqual(result.exception.args[0]['out'], 'No logs available.')
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_sapcar_extract.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_sapcar_extract.py
new file mode 100644
index 000000000..bec9cf886
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_sapcar_extract.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Rainer Leber (@rainerleber) <rainerleber@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import sapcar_extract
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+
+
+def get_bin_path(*args, **kwargs):
+ """Function to return path of SAPCAR"""
+ return "/tmp/sapcar"
+
+
+class Testsapcar_extract(ModuleTestCase):
+ """Main class for testing sapcar_extract module."""
+
+ def setUp(self):
+ """Setup."""
+ super(Testsapcar_extract, self).setUp()
+ self.module = sapcar_extract
+ self.mock_get_bin_path = patch.object(basic.AnsibleModule, 'get_bin_path', get_bin_path)
+ self.mock_get_bin_path.start()
+ self.addCleanup(self.mock_get_bin_path.stop) # ensure that the patching is 'undone'
+
+ def tearDown(self):
+ """Teardown."""
+ super(Testsapcar_extract, self).tearDown()
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing."""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_sapcar_extract(self):
+ """Check that result is changed."""
+ set_module_args({
+ 'path': "/tmp/HANA_CLIENT_REV2_00_053_00_LINUX_X86_64.SAR",
+ 'dest': "/tmp/test2",
+ 'binary_path': "/tmp/sapcar"
+ })
+ with patch.object(basic.AnsibleModule, 'run_command') as run_command:
+ run_command.return_value = 0, '', '' # successful execution, no output
+ with self.assertRaises(AnsibleExitJson) as result:
+ sapcar_extract.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+ self.assertEqual(run_command.call_count, 1)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_compute_private_network.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_compute_private_network.py
new file mode 100644
index 000000000..df6fd91a4
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_compute_private_network.py
@@ -0,0 +1,180 @@
+# Copyright (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+import pytest
+
+
+from ansible_collections.community.general.plugins.modules import scaleway_compute_private_network
+from ansible_collections.community.general.plugins.module_utils.scaleway import Scaleway, Response
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+
+
+def response_without_nics():
+ info = {"status": 200,
+ "body": '{ "private_nics": []}'
+ }
+ return Response(None, info)
+
+
+def response_with_nics():
+ info = {"status": 200,
+ "body": ('{ "private_nics": [{'
+ '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"mac_address": "02:00:00:00:12:23",'
+ '"state": "available",'
+ '"creation_date": "2022-03-30T06:25:28.155973+00:00",'
+ '"modification_date": "2022-03-30T06:25:28.155973+00:00",'
+ '"zone": "fr-par-1"'
+ '}]}'
+ )
+ }
+ return Response(None, info)
+
+
+def response_when_add_nics():
+ info = {"status": 200,
+ "body": ('{ "private_nics": {'
+ '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"private_network_id": "b589b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"server_id": "c004b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"mac_address": "02:00:00:00:12:23",'
+ '"state": "available",'
+ '"creation_date": "2022-03-30T06:25:28.155973+00:00",'
+ '"modification_date": "2022-03-30T06:25:28.155973+00:00",'
+ '"zone": "fr-par-1"'
+ '}}'
+ )
+ }
+ return Response(None, info)
+
+
+def response_remove_nics():
+ info = {"status": 200}
+ return Response(None, info)
+
+
+def test_scaleway_private_network_without_arguments(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ scaleway_compute_private_network.main()
+ out, err = capfd.readouterr()
+
+ assert not err
+ assert json.loads(out)['failed']
+
+
+def test_scaleway_add_nic(capfd):
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
+ cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
+ url = 'servers/' + cid + '/private_nics'
+
+ set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "state": "present",
+ "region": "par1",
+ "compute_id": cid,
+ "private_network_id": pnid
+ })
+
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_without_nics()
+ with patch.object(Scaleway, 'post') as mock_scw_post:
+ mock_scw_post.return_value = response_when_add_nics()
+ with pytest.raises(SystemExit) as results:
+ scaleway_compute_private_network.main()
+ mock_scw_post.assert_any_call(path=url, data={"private_network_id": pnid})
+ mock_scw_get.assert_any_call(url)
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+ assert not err
+ assert json.loads(out)['changed']
+
+
+def test_scaleway_add_existing_nic(capfd):
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
+ cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
+ url = 'servers/' + cid + '/private_nics'
+
+ set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "state": "present",
+ "region": "par1",
+ "compute_id": cid,
+ "private_network_id": pnid
+ })
+
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_nics()
+ with pytest.raises(SystemExit) as results:
+ scaleway_compute_private_network.main()
+ mock_scw_get.assert_any_call(url)
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+ assert not err
+ assert not json.loads(out)['changed']
+
+
+def test_scaleway_remove_existing_nic(capfd):
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
+ cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
+ nicid = 'c123b4cd-ef5g-678h-90i1-jk2345678l90'
+ url = 'servers/' + cid + '/private_nics'
+ urlremove = 'servers/' + cid + '/private_nics/' + nicid
+
+ set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "state": "absent",
+ "region": "par1",
+ "compute_id": cid,
+ "private_network_id": pnid
+ })
+
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_nics()
+ with patch.object(Scaleway, 'delete') as mock_scw_delete:
+ mock_scw_delete.return_value = response_remove_nics()
+ with pytest.raises(SystemExit) as results:
+ scaleway_compute_private_network.main()
+ mock_scw_delete.assert_any_call(urlremove)
+ mock_scw_get.assert_any_call(url)
+
+ out, err = capfd.readouterr()
+
+ del os.environ['SCW_API_TOKEN']
+ assert not err
+ assert json.loads(out)['changed']
+
+
+def test_scaleway_remove_absent_nic(capfd):
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ pnid = 'b589b4cd-ef5g-678h-90i1-jk2345678l90'
+ cid = 'c004b4cd-ef5g-678h-90i1-jk2345678l90'
+ url = 'servers/' + cid + '/private_nics'
+
+ set_module_args({"project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "state": "absent",
+ "region": "par1",
+ "compute_id": cid,
+ "private_network_id": pnid
+ })
+
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_without_nics()
+ with pytest.raises(SystemExit) as results:
+ scaleway_compute_private_network.main()
+ mock_scw_get.assert_any_call(url)
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+ assert not err
+ assert not json.loads(out)['changed']
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_private_network.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_private_network.py
new file mode 100644
index 000000000..21805d3db
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_scaleway_private_network.py
@@ -0,0 +1,198 @@
+
+# Copyright (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+import pytest
+
+
+from ansible_collections.community.general.plugins.modules import scaleway_private_network
+from ansible_collections.community.general.plugins.module_utils.scaleway import Scaleway, Response
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+
+
+def response_with_zero_network():
+ info = {"status": 200,
+ "body": '{ "private_networks": [], "total_count": 0}'
+ }
+ return Response(None, info)
+
+
+def response_with_new_network():
+ info = {"status": 200,
+ "body": ('{ "private_networks": [{'
+ '"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"name": "new_network_name",'
+ '"tags": ["tag1"]'
+ '}], "total_count": 1}'
+ )
+ }
+ return Response(None, info)
+
+
+def response_create_new():
+ info = {"status": 200,
+ "body": ('{"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"name": "anoter_network",'
+ '"organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"zone": "fr-par-2",'
+ '"tags": ["tag1"],'
+ '"created_at": "2019-04-18T15:27:24.177854Z",'
+ '"updated_at": "2019-04-18T15:27:24.177854Z"}'
+ )
+ }
+ return Response(None, info)
+
+
+def response_create_new_newtag():
+ info = {"status": 200,
+ "body": ('{"id": "c123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"name": "anoter_network",'
+ '"organization_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"project_id": "a123b4cd-ef5g-678h-90i1-jk2345678l90",'
+ '"zone": "fr-par-2",'
+ '"tags": ["newtag"],'
+ '"created_at": "2019-04-18T15:27:24.177854Z",'
+ '"updated_at": "2020-01-18T15:27:24.177854Z"}'
+ )
+ }
+ return Response(None, info)
+
+
+def response_delete():
+ info = {"status": 204}
+ return Response(None, info)
+
+
+def test_scaleway_private_network_without_arguments(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ scaleway_private_network.main()
+ out, err = capfd.readouterr()
+
+ assert not err
+ assert json.loads(out)['failed']
+
+
+def test_scaleway_create_pn(capfd):
+ set_module_args({"state": "present",
+ "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "region": "par2",
+ "name": "new_network_name",
+ "tags": ["tag1"]
+ })
+
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_zero_network()
+ with patch.object(Scaleway, 'post') as mock_scw_post:
+ mock_scw_post.return_value = response_create_new()
+ with pytest.raises(SystemExit) as results:
+ scaleway_private_network.main()
+ mock_scw_post.assert_any_call(path='private-networks/', data={'name': 'new_network_name',
+ 'project_id': 'a123b4cd-ef5g-678h-90i1-jk2345678l90',
+ 'tags': ['tag1']})
+ mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10})
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+
+
+def test_scaleway_existing_pn(capfd):
+ set_module_args({"state": "present",
+ "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "region": "par2",
+ "name": "new_network_name",
+ "tags": ["tag1"]
+ })
+
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_new_network()
+ with pytest.raises(SystemExit) as results:
+ scaleway_private_network.main()
+ mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10})
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+
+ assert not err
+ assert not json.loads(out)['changed']
+
+
+def test_scaleway_add_tag_pn(capfd):
+ set_module_args({"state": "present",
+ "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "region": "par2",
+ "name": "new_network_name",
+ "tags": ["newtag"]
+ })
+
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_new_network()
+ with patch.object(Scaleway, 'patch') as mock_scw_patch:
+ mock_scw_patch.return_value = response_create_new_newtag()
+ with pytest.raises(SystemExit) as results:
+ scaleway_private_network.main()
+ mock_scw_patch.assert_any_call(path='private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90', data={'name': 'new_network_name', 'tags': ['newtag']})
+ mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10})
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+
+ assert not err
+ assert json.loads(out)['changed']
+
+
+def test_scaleway_remove_pn(capfd):
+ set_module_args({"state": "absent",
+ "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "region": "par2",
+ "name": "new_network_name",
+ "tags": ["newtag"]
+ })
+
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_new_network()
+ with patch.object(Scaleway, 'delete') as mock_scw_delete:
+ mock_scw_delete.return_value = response_delete()
+ with pytest.raises(SystemExit) as results:
+ scaleway_private_network.main()
+ mock_scw_delete.assert_any_call('private-networks/c123b4cd-ef5g-678h-90i1-jk2345678l90')
+ mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10})
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+
+ assert not err
+ assert json.loads(out)['changed']
+
+
+def test_scaleway_absent_pn_not_exists(capfd):
+ set_module_args({"state": "absent",
+ "project": "a123b4cd-ef5g-678h-90i1-jk2345678l90",
+ "region": "par2",
+ "name": "new_network_name",
+ "tags": ["newtag"]
+ })
+
+ os.environ['SCW_API_TOKEN'] = 'notrealtoken'
+ with patch.object(Scaleway, 'get') as mock_scw_get:
+ mock_scw_get.return_value = response_with_zero_network()
+ with pytest.raises(SystemExit) as results:
+ scaleway_private_network.main()
+ mock_scw_get.assert_any_call('private-networks', params={'name': 'new_network_name', 'order_by': 'name_asc', 'page': 1, 'page_size': 10})
+
+ out, err = capfd.readouterr()
+ del os.environ['SCW_API_TOKEN']
+
+ assert not err
+ assert not json.loads(out)['changed']
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_slack.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_slack.py
new file mode 100644
index 000000000..ab4405baa
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_slack.py
@@ -0,0 +1,203 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+from ansible_collections.community.general.tests.unit.compat.mock import Mock, patch
+from ansible_collections.community.general.plugins.modules import slack
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class TestSlackModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestSlackModule, self).setUp()
+ self.module = slack
+
+ def tearDown(self):
+ super(TestSlackModule, self).tearDown()
+
+ @pytest.fixture
+ def fetch_url_mock(self, mocker):
+ return mocker.patch('ansible.module_utils.notification.slack.fetch_url')
+
+ def test_without_required_parameters(self):
+ """Failure must occurs when all parameters are missing"""
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ self.module.main()
+
+ def test_invalid_old_token(self):
+ """Failure if there is an old style token"""
+ set_module_args({
+ 'token': 'test',
+ })
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_successful_message(self):
+ """tests sending a message. This is example 1 from the docs"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible"
+ assert call_data['text'] == "test"
+ assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
+
+ def test_failed_message(self):
+ """tests failing to send a message"""
+
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 404, 'msg': 'test'})
+ with self.assertRaises(AnsibleFailJson):
+ self.module.main()
+
+ def test_message_with_thread(self):
+ """tests sending a message with a thread"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test',
+ 'thread_id': '100.00'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible"
+ assert call_data['text'] == "test"
+ assert call_data['thread_ts'] == '100.00'
+ assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
+
+ # https://github.com/ansible-collections/community.general/issues/1097
+ def test_ts_in_message_does_not_cause_edit(self):
+ set_module_args({
+ 'token': 'xoxa-123456789abcdef',
+ 'msg': 'test with ts'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ mock_response = Mock()
+ mock_response.read.return_value = '{"fake":"data"}'
+ fetch_url_mock.return_value = (mock_response, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ self.assertEquals(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.postMessage")
+
+ def test_edit_message(self):
+ set_module_args({
+ 'token': 'xoxa-123456789abcdef',
+ 'msg': 'test2',
+ 'message_id': '12345'
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ mock_response = Mock()
+ mock_response.read.return_value = '{"messages":[{"ts":"12345","msg":"test1"}]}'
+ fetch_url_mock.side_effect = [
+ (mock_response, {"status": 200}),
+ (mock_response, {"status": 200}),
+ ]
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 2)
+ self.assertEquals(fetch_url_mock.call_args[1]['url'], "https://slack.com/api/chat.update")
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ self.assertEquals(call_data['ts'], "12345")
+
+ def test_message_with_blocks(self):
+ """tests sending a message with blocks"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test',
+ 'blocks': [{
+ 'type': 'section',
+ 'text': {
+ 'type': 'mrkdwn',
+ 'text': '*test*'
+ },
+ 'accessory': {
+ 'type': 'image',
+ 'image_url': 'https://docs.ansible.com/favicon.ico',
+ 'alt_text': 'test'
+ }
+ }, {
+ 'type': 'section',
+ 'text': {
+ 'type': 'plain_text',
+ 'text': 'test',
+ 'emoji': True
+ }
+ }]
+ })
+
+ with patch.object(slack, "fetch_url") as fetch_url_mock:
+ fetch_url_mock.return_value = (None, {"status": 200})
+ with self.assertRaises(AnsibleExitJson):
+ self.module.main()
+
+ self.assertTrue(fetch_url_mock.call_count, 1)
+ call_data = json.loads(fetch_url_mock.call_args[1]['data'])
+ assert call_data['username'] == "Ansible"
+ assert call_data['blocks'][1]['text']['text'] == "test"
+ assert fetch_url_mock.call_args[1]['url'] == "https://hooks.slack.com/services/XXXX/YYYY/ZZZZ"
+
+ def test_message_with_invalid_color(self):
+ """tests sending invalid color value to module"""
+ set_module_args({
+ 'token': 'XXXX/YYYY/ZZZZ',
+ 'msg': 'test',
+ 'color': 'aa',
+ })
+ with self.assertRaises(AnsibleFailJson) as exec_info:
+ self.module.main()
+
+ msg = "Color value specified should be either one of" \
+ " ['normal', 'good', 'warning', 'danger'] or any valid" \
+ " hex value with length 3 or 6."
+ assert exec_info.exception.args[0]['msg'] == msg
+
+
+color_test = [
+ ('#111111', True),
+ ('#00aabb', True),
+ ('#abc', True),
+ ('#gghhjj', False),
+ ('#ghj', False),
+ ('#a', False),
+ ('#aaaaaaaa', False),
+ ('', False),
+ ('aaaa', False),
+ ('$00aabb', False),
+ ('$00a', False),
+]
+
+
+@pytest.mark.parametrize("color_value, ret_status", color_test)
+def test_is_valid_hex_color(color_value, ret_status):
+ generated_value = slack.is_valid_hex_color(color_value)
+ assert generated_value == ret_status
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_solaris_zone.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_solaris_zone.py
new file mode 100644
index 000000000..20b550875
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_solaris_zone.py
@@ -0,0 +1,116 @@
+# Copyright (c) 2020 Justin Bronn <jbronn@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import platform
+
+import pytest
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.modules import (
+ solaris_zone
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ set_module_args,
+)
+
+
+ZONEADM = "/usr/sbin/zoneadm"
+
+
+def mocker_zone_set(mocker, rc=0, out="", err="", zone_exists=False, zone_status=None):
+ """
+ Configure common mocker object for Solaris Zone tests
+ """
+ exists = mocker.patch.object(solaris_zone.Zone, "exists")
+ exists.return_value = zone_exists
+ get_bin_path = mocker.patch.object(AnsibleModule, "get_bin_path")
+ get_bin_path.return_value = ZONEADM
+ run_command = mocker.patch.object(AnsibleModule, "run_command")
+ run_command.return_value = (rc, out, err)
+ platform_release = mocker.patch.object(platform, "release")
+ platform_release.return_value = "5.11"
+ platform_system = mocker.patch.object(platform, "system")
+ platform_system.return_value = "SunOS"
+ if zone_status is not None:
+ status = mocker.patch.object(solaris_zone.Zone, "status")
+ status.return_value = zone_status
+
+
+@pytest.fixture
+def mocked_zone_create(mocker):
+ mocker_zone_set(mocker)
+
+
+@pytest.fixture
+def mocked_zone_delete(mocker):
+ mocker_zone_set(mocker, zone_exists=True, zone_status="running")
+
+
+def test_zone_create(mocked_zone_create, capfd):
+ """
+ test zone creation
+ """
+ set_module_args(
+ {
+ "name": "z1",
+ "state": "installed",
+ "path": "/zones/z1",
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get("failed")
+ assert results["changed"]
+
+
+def test_zone_delete(mocked_zone_delete, capfd):
+ """
+ test zone deletion
+ """
+ set_module_args(
+ {
+ "name": "z1",
+ "state": "absent",
+ "path": "/zones/z1",
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get("failed")
+ assert results["changed"]
+
+
+def test_zone_create_invalid_names(mocked_zone_create, capfd):
+ """
+ test zone creation with invalid names
+ """
+ # 1. Invalid character ('!').
+ # 2. Zone name > 64 characters.
+ # 3. Zone name beginning with non-alphanumeric character.
+ for invalid_name in ('foo!bar', 'z' * 65, '_zone'):
+ set_module_args(
+ {
+ "name": invalid_name,
+ "state": "installed",
+ "path": "/zones/" + invalid_name,
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get("failed")
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ss_3par_cpg.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ss_3par_cpg.py
new file mode 100644
index 000000000..8e935b8de
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ss_3par_cpg.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2018, Hewlett Packard Enterprise Development LP
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import mock
+import sys
+sys.modules['hpe3par_sdk'] = mock.Mock()
+sys.modules['hpe3par_sdk.client'] = mock.Mock()
+sys.modules['hpe3parclient'] = mock.Mock()
+sys.modules['hpe3parclient.exceptions'] = mock.Mock()
+from ansible_collections.community.general.plugins.modules import ss_3par_cpg
+from ansible_collections.community.general.plugins.module_utils.storage.hpe3par import hpe3par
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule')
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.create_cpg')
+def test_module_args(mock_create_cpg, mock_module, mock_client):
+ """
+ hpe3par CPG - test module arguments
+ """
+
+ PARAMS_FOR_PRESENT = {
+ 'storage_system_ip': '192.168.0.1',
+ 'storage_system_username': 'USER',
+ 'storage_system_password': 'PASS',
+ 'cpg_name': 'test_cpg',
+ 'domain': 'test_domain',
+ 'growth_increment': 32768,
+ 'growth_increment_unit': 'MiB',
+ 'growth_limit': 32768,
+ 'growth_limit_unit': 'MiB',
+ 'growth_warning': 32768,
+ 'growth_warning_unit': 'MiB',
+ 'raid_type': 'R6',
+ 'set_size': 8,
+ 'high_availability': 'MAG',
+ 'disk_type': 'FC',
+ 'state': 'present',
+ 'secure': False
+ }
+ mock_module.params = PARAMS_FOR_PRESENT
+ mock_module.return_value = mock_module
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_create_cpg.return_value = (True, True, "Created CPG successfully.")
+ ss_3par_cpg.main()
+ mock_module.assert_called_with(
+ argument_spec=hpe3par.cpg_argument_spec(),
+ required_together=[['raid_type', 'set_size']])
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule')
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.create_cpg')
+def test_main_exit_functionality_present_success_without_issue_attr_dict(mock_create_cpg, mock_module, mock_client):
+ """
+ hpe3par flash cache - success check
+ """
+ PARAMS_FOR_PRESENT = {
+ 'storage_system_ip': '192.168.0.1',
+ 'storage_system_name': '3PAR',
+ 'storage_system_username': 'USER',
+ 'storage_system_password': 'PASS',
+ 'cpg_name': 'test_cpg',
+ 'domain': 'test_domain',
+ 'growth_increment': 32768,
+ 'growth_increment_unit': 'MiB',
+ 'growth_limit': 32768,
+ 'growth_limit_unit': 'MiB',
+ 'growth_warning': 32768,
+ 'growth_warning_unit': 'MiB',
+ 'raid_type': 'R6',
+ 'set_size': 8,
+ 'high_availability': 'MAG',
+ 'disk_type': 'FC',
+ 'state': 'present',
+ 'secure': False
+ }
+ # This creates a instance of the AnsibleModule mock.
+ mock_module.params = PARAMS_FOR_PRESENT
+ mock_module.return_value = mock_module
+ instance = mock_module.return_value
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_create_cpg.return_value = (
+ True, True, "Created CPG successfully.")
+ ss_3par_cpg.main()
+ # AnsibleModule.exit_json should be called
+ instance.exit_json.assert_called_with(
+ changed=True, msg="Created CPG successfully.")
+ # AnsibleModule.fail_json should not be called
+ assert instance.fail_json.call_count == 0
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.AnsibleModule')
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.delete_cpg')
+def test_main_exit_functionality_absent_success_without_issue_attr_dict(mock_delete_cpg, mock_module, mock_client):
+ """
+ hpe3par flash cache - success check
+ """
+ PARAMS_FOR_DELETE = {
+ 'storage_system_ip': '192.168.0.1',
+ 'storage_system_name': '3PAR',
+ 'storage_system_username': 'USER',
+ 'storage_system_password': 'PASS',
+ 'cpg_name': 'test_cpg',
+ 'domain': None,
+ 'growth_increment': None,
+ 'growth_increment_unit': None,
+ 'growth_limit': None,
+ 'growth_limit_unit': None,
+ 'growth_warning': None,
+ 'growth_warning_unit': None,
+ 'raid_type': None,
+ 'set_size': None,
+ 'high_availability': None,
+ 'disk_type': None,
+ 'state': 'absent',
+ 'secure': False
+ }
+ # This creates a instance of the AnsibleModule mock.
+ mock_module.params = PARAMS_FOR_DELETE
+ mock_module.return_value = mock_module
+ instance = mock_module.return_value
+ mock_delete_cpg.return_value = (
+ True, True, "Deleted CPG test_cpg successfully.")
+ mock_client.HPE3ParClient.login.return_value = True
+ ss_3par_cpg.main()
+ # AnsibleModule.exit_json should be called
+ instance.exit_json.assert_called_with(
+ changed=True, msg="Deleted CPG test_cpg successfully.")
+ # AnsibleModule.fail_json should not be called
+ assert instance.fail_json.call_count == 0
+
+
+def test_convert_to_binary_multiple():
+ assert hpe3par.convert_to_binary_multiple(None) == -1
+ assert hpe3par.convert_to_binary_multiple('-1.0 MiB') == -1
+ assert hpe3par.convert_to_binary_multiple('-1.0GiB') == -1
+ assert hpe3par.convert_to_binary_multiple('1.0 MiB') == 1
+ assert hpe3par.convert_to_binary_multiple('1.5GiB') == 1.5 * 1024
+ assert hpe3par.convert_to_binary_multiple('1.5 TiB') == 1.5 * 1024 * 1024
+ assert hpe3par.convert_to_binary_multiple(' 1.5 TiB ') == 1.5 * 1024 * 1024
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+def test_validate_set_size(mock_client):
+ mock_client.HPE3ParClient.RAID_MAP = {'R0': {'raid_value': 1, 'set_sizes': [1]},
+ 'R1': {'raid_value': 2, 'set_sizes': [2, 3, 4]},
+ 'R5': {'raid_value': 3, 'set_sizes': [3, 4, 5, 6, 7, 8, 9]},
+ 'R6': {'raid_value': 4, 'set_sizes': [6, 8, 10, 12, 16]}
+ }
+ raid_type = 'R0'
+ set_size = 1
+ assert ss_3par_cpg.validate_set_size(raid_type, set_size)
+
+ set_size = 2
+ assert not ss_3par_cpg.validate_set_size(raid_type, set_size)
+
+ raid_type = None
+ assert not ss_3par_cpg.validate_set_size(raid_type, set_size)
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+def test_cpg_ldlayout_map(mock_client):
+ mock_client.HPE3ParClient.PORT = 1
+ mock_client.HPE3ParClient.RAID_MAP = {'R0': {'raid_value': 1, 'set_sizes': [1]},
+ 'R1': {'raid_value': 2, 'set_sizes': [2, 3, 4]},
+ 'R5': {'raid_value': 3, 'set_sizes': [3, 4, 5, 6, 7, 8, 9]},
+ 'R6': {'raid_value': 4, 'set_sizes': [6, 8, 10, 12, 16]}
+ }
+ ldlayout_dict = {'RAIDType': 'R6', 'HA': 'PORT'}
+ assert ss_3par_cpg.cpg_ldlayout_map(ldlayout_dict) == {
+ 'RAIDType': 4, 'HA': 1}
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+def test_create_cpg(mock_client):
+ ss_3par_cpg.validate_set_size = mock.Mock(return_value=True)
+ ss_3par_cpg.cpg_ldlayout_map = mock.Mock(
+ return_value={'RAIDType': 4, 'HA': 1})
+
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_client.HPE3ParClient.cpgExists.return_value = False
+ mock_client.HPE3ParClient.FC = 1
+ mock_client.HPE3ParClient.createCPG.return_value = True
+
+ assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient,
+ 'test_cpg',
+ 'test_domain',
+ '32768 MiB',
+ '32768 MiB',
+ '32768 MiB',
+ 'R6',
+ 8,
+ 'MAG',
+ 'FC'
+ ) == (True, True, "Created CPG %s successfully." % 'test_cpg')
+
+ mock_client.HPE3ParClient.cpgExists.return_value = True
+ assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient,
+ 'test_cpg',
+ 'test_domain',
+ '32768.0 MiB',
+ '32768.0 MiB',
+ '32768.0 MiB',
+ 'R6',
+ 8,
+ 'MAG',
+ 'FC'
+ ) == (True, False, 'CPG already present')
+
+ ss_3par_cpg.validate_set_size = mock.Mock(return_value=False)
+ assert ss_3par_cpg.create_cpg(mock_client.HPE3ParClient,
+ 'test_cpg',
+ 'test_domain',
+ '32768.0 MiB',
+ '32768 MiB',
+ '32768.0 MiB',
+ 'R6',
+ 3,
+ 'MAG',
+ 'FC'
+ ) == (False, False, 'Set size 3 not part of RAID set R6')
+
+
+@mock.patch('ansible_collections.community.general.plugins.modules.ss_3par_cpg.client')
+def test_delete_cpg(mock_client):
+ mock_client.HPE3ParClient.login.return_value = True
+ mock_client.HPE3ParClient.cpgExists.return_value = True
+ mock_client.HPE3ParClient.FC = 1
+ mock_client.HPE3ParClient.deleteCPG.return_value = True
+
+ assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient,
+ 'test_cpg'
+ ) == (True, True, "Deleted CPG %s successfully." % 'test_cpg')
+
+ mock_client.HPE3ParClient.cpgExists.return_value = False
+
+ assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient,
+ 'test_cpg'
+ ) == (True, False, "CPG does not exist")
+ assert ss_3par_cpg.delete_cpg(mock_client.HPE3ParClient,
+ None
+ ) == (True, False, "CPG does not exist")
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_statsd.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_statsd.py
new file mode 100644
index 000000000..7d458c5eb
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_statsd.py
@@ -0,0 +1,101 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.plugins.modules import statsd
+from ansible_collections.community.general.tests.unit.compat.mock import patch, MagicMock
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
+
+
+class FakeStatsD(MagicMock):
+
+ def incr(self, *args, **kwargs):
+ pass
+
+ def gauge(self, *args, **kwargs):
+ pass
+
+ def close(self, *args, **kwargs):
+ pass
+
+
+class TestStatsDModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestStatsDModule, self).setUp()
+ statsd.HAS_STATSD = True
+ self.module = statsd
+
+ def tearDown(self):
+ super(TestStatsDModule, self).tearDown()
+
+ def patch_udp_statsd_client(self, **kwargs):
+ return patch('ansible_collections.community.general.plugins.modules.statsd.udp_statsd_client', autospec=True, **kwargs)
+
+ def patch_tcp_statsd_client(self, **kwargs):
+ return patch('ansible_collections.community.general.plugins.modules.statsd.tcp_statsd_client', autospec=True, **kwargs)
+
+ def test_udp_without_parameters(self):
+ """Test udp without parameters"""
+ with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd:
+ with self.assertRaises(AnsibleFailJson) as result:
+ set_module_args({})
+ self.module.main()
+
+ def test_tcp_without_parameters(self):
+ """Test tcp without parameters"""
+ with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd:
+ with self.assertRaises(AnsibleFailJson) as result:
+ set_module_args({})
+ self.module.main()
+
+ def test_udp_with_parameters(self):
+ """Test udp with parameters"""
+ with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({
+ 'metric': 'my_counter',
+ 'metric_type': 'counter',
+ 'value': 1,
+ })
+ self.module.main()
+ self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD')
+ self.assertEqual(result.exception.args[0]['changed'], True)
+ with self.patch_udp_statsd_client(side_effect=FakeStatsD) as fake_statsd:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({
+ 'metric': 'my_gauge',
+ 'metric_type': 'gauge',
+ 'value': 3,
+ })
+ self.module.main()
+ self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD')
+ self.assertEqual(result.exception.args[0]['changed'], True)
+
+ def test_tcp_with_parameters(self):
+ """Test tcp with parameters"""
+ with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({
+ 'protocol': 'tcp',
+ 'metric': 'my_counter',
+ 'metric_type': 'counter',
+ 'value': 1,
+ })
+ self.module.main()
+ self.assertEqual(result.exception.args[0]['msg'], 'Sent counter my_counter -> 1 to StatsD')
+ self.assertEqual(result.exception.args[0]['changed'], True)
+ with self.patch_tcp_statsd_client(side_effect=FakeStatsD) as fake_statsd:
+ with self.assertRaises(AnsibleExitJson) as result:
+ set_module_args({
+ 'protocol': 'tcp',
+ 'metric': 'my_gauge',
+ 'metric_type': 'gauge',
+ 'value': 3,
+ })
+ self.module.main()
+ self.assertEqual(result.exception.args[0]['msg'], 'Sent gauge my_gauge -> 3 (delta=False) to StatsD')
+ self.assertEqual(result.exception.args[0]['changed'], True)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_sysupgrade.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_sysupgrade.py
new file mode 100644
index 000000000..77d1f1cd0
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_sysupgrade.py
@@ -0,0 +1,69 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils import basic
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
+from ansible_collections.community.general.plugins.modules import sysupgrade
+
+
+class TestSysupgradeModule(ModuleTestCase):
+
+ def setUp(self):
+ super(TestSysupgradeModule, self).setUp()
+ self.module = sysupgrade
+ self.mock_get_bin_path = (patch('ansible.module_utils.basic.AnsibleModule.get_bin_path'))
+ self.get_bin_path = self.mock_get_bin_path.start()
+
+ def tearDown(self):
+ super(TestSysupgradeModule, self).tearDown()
+ self.mock_get_bin_path.stop()
+
+ def test_upgrade_success(self):
+ """ Upgrade was successful """
+
+ rc = 0
+ stdout = """
+ SHA256.sig 100% |*************************************| 2141 00:00
+ Signature Verified
+ INSTALL.amd64 100% |************************************| 43512 00:00
+ base67.tgz 100% |*************************************| 238 MB 02:16
+ bsd 100% |*************************************| 18117 KB 00:24
+ bsd.mp 100% |*************************************| 18195 KB 00:17
+ bsd.rd 100% |*************************************| 10109 KB 00:14
+ comp67.tgz 100% |*************************************| 74451 KB 00:53
+ game67.tgz 100% |*************************************| 2745 KB 00:03
+ man67.tgz 100% |*************************************| 7464 KB 00:04
+ xbase67.tgz 100% |*************************************| 22912 KB 00:30
+ xfont67.tgz 100% |*************************************| 39342 KB 00:28
+ xserv67.tgz 100% |*************************************| 16767 KB 00:24
+ xshare67.tgz 100% |*************************************| 4499 KB 00:06
+ Verifying sets.
+ Fetching updated firmware.
+ Will upgrade on next reboot
+ """
+ stderr = ""
+
+ with patch.object(basic.AnsibleModule, "run_command") as run_command:
+ run_command.return_value = (rc, stdout, stderr)
+ with self.assertRaises(AnsibleExitJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_upgrade_failed(self):
+ """ Upgrade failed """
+
+ rc = 1
+ stdout = ""
+ stderr = "sysupgrade: need root privileges"
+
+ with patch.object(basic.AnsibleModule, "run_command") as run_command_mock:
+ run_command_mock.return_value = (rc, stdout, stderr)
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.module.main()
+ self.assertTrue(result.exception.args[0]['failed'])
+ self.assertIn('need root', result.exception.args[0]['msg'])
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_terraform.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_terraform.py
new file mode 100644
index 000000000..f6a0593fd
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_terraform.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2019, Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible_collections.community.general.plugins.modules import terraform
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args
+
+
+def test_terraform_without_argument(capfd):
+ set_module_args({})
+ with pytest.raises(SystemExit) as results:
+ terraform.main()
+
+ out, err = capfd.readouterr()
+ assert not err
+ assert json.loads(out)['failed']
+ assert 'project_path' in json.loads(out)['msg']
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_ufw.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_ufw.py
new file mode 100644
index 000000000..da8f0f2c8
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_ufw.py
@@ -0,0 +1,477 @@
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+import ansible_collections.community.general.plugins.modules.ufw as module
+
+import json
+
+
+# mock ufw messages
+
+ufw_version_35 = """ufw 0.35\nCopyright 2008-2015 Canonical Ltd.\n"""
+
+ufw_verbose_header = """Status: active
+Logging: on (low)
+Default: deny (incoming), allow (outgoing), deny (routed)
+New profiles: skip
+
+To Action From
+-- ------ ----"""
+
+
+ufw_status_verbose_with_port_7000 = ufw_verbose_header + """
+7000/tcp ALLOW IN Anywhere
+7000/tcp (v6) ALLOW IN Anywhere (v6)
+"""
+
+user_rules_with_port_7000 = """### tuple ### allow tcp 7000 0.0.0.0/0 any 0.0.0.0/0 in
+### tuple ### allow tcp 7000 ::/0 any ::/0 in
+"""
+
+user_rules_with_ipv6 = """### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in
+### tuple ### allow udp 5353 ::/0 any ff02::fb in
+"""
+
+ufw_status_verbose_with_ipv6 = ufw_verbose_header + """
+5353/udp ALLOW IN 224.0.0.251
+5353/udp ALLOW IN ff02::fb
+"""
+
+ufw_status_verbose_nothing = ufw_verbose_header
+
+skippg_adding_existing_rules = "Skipping adding existing rule\nSkipping adding existing rule (v6)\n"
+
+grep_config_cli = "grep -h '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules "
+grep_config_cli += "/var/lib/ufw/user.rules /var/lib/ufw/user6.rules"
+
+dry_mode_cmd_with_port_700 = {
+ "ufw status verbose": ufw_status_verbose_with_port_7000,
+ "ufw --version": ufw_version_35,
+ "ufw --dry-run allow from any to any port 7000 proto tcp": skippg_adding_existing_rules,
+ "ufw --dry-run insert 1 allow from any to any port 7000 proto tcp": skippg_adding_existing_rules,
+ "ufw --dry-run delete allow from any to any port 7000 proto tcp": "",
+ "ufw --dry-run delete allow from any to any port 7001 proto tcp": user_rules_with_port_7000,
+ "ufw --dry-run route allow in on foo out on bar from 1.1.1.1 port 7000 to 8.8.8.8 port 7001 proto tcp": "",
+ "ufw --dry-run allow in on foo from any to any port 7003 proto tcp": "",
+ "ufw --dry-run allow in on foo from 1.1.1.1 port 7002 to 8.8.8.8 port 7003 proto tcp": "",
+ "ufw --dry-run allow out on foo from any to any port 7004 proto tcp": "",
+ "ufw --dry-run allow out on foo from 1.1.1.1 port 7003 to 8.8.8.8 port 7004 proto tcp": "",
+ grep_config_cli: user_rules_with_port_7000
+}
+
+# setup configuration :
+# ufw reset
+# ufw enable
+# ufw allow proto udp to any port 5353 from 224.0.0.251
+# ufw allow proto udp to any port 5353 from ff02::fb
+dry_mode_cmd_with_ipv6 = {
+ "ufw status verbose": ufw_status_verbose_with_ipv6,
+ "ufw --version": ufw_version_35,
+ # CONTENT of the command sudo ufw --dry-run delete allow in from ff02::fb port 5353 proto udp | grep -E "^### tupple"
+ "ufw --dry-run delete allow from ff02::fb to any port 5353 proto udp": "### tuple ### allow udp any ::/0 5353 ff02::fb in",
+ grep_config_cli: user_rules_with_ipv6,
+ "ufw --dry-run allow from ff02::fb to any port 5353 proto udp": skippg_adding_existing_rules,
+ "ufw --dry-run allow from 224.0.0.252 to any port 5353 proto udp": """### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in
+### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.252 in
+""",
+ "ufw --dry-run allow from 10.0.0.0/24 to any port 1577 proto udp": "### tuple ### allow udp 1577 0.0.0.0/0 any 10.0.0.0/24 in"
+}
+
+dry_mode_cmd_nothing = {
+ "ufw status verbose": ufw_status_verbose_nothing,
+ "ufw --version": ufw_version_35,
+ grep_config_cli: "",
+ "ufw --dry-run allow from any to :: port 23": "### tuple ### allow any 23 :: any ::/0 in"
+}
+
+
+def do_nothing_func_nothing(*args, **kwarg):
+ return 0, dry_mode_cmd_nothing[args[0]], ""
+
+
+def do_nothing_func_ipv6(*args, **kwarg):
+ return 0, dry_mode_cmd_with_ipv6[args[0]], ""
+
+
+def do_nothing_func_port_7000(*args, **kwarg):
+ return 0, dry_mode_cmd_with_port_700[args[0]], ""
+
+
+def set_module_args(args):
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ """prepare arguments so that they will be picked up during module creation"""
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ """Exception class to be raised by module.exit_json and caught by the test case"""
+ pass
+
+
+class AnsibleFailJson(Exception):
+ """Exception class to be raised by module.fail_json and caught by the test case"""
+ pass
+
+
+def exit_json(*args, **kwargs):
+ """function to patch over exit_json; package return data into an exception"""
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ """function to patch over fail_json; package return data into an exception"""
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+class TestUFW(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_filter_line_that_contains_ipv4(self):
+ reg = module.compile_ipv4_regexp()
+
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 ::/0 any ff02::fb in") is None)
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in") is not None)
+
+ self.assertTrue(reg.match("ff02::fb") is None)
+ self.assertTrue(reg.match("224.0.0.251") is not None)
+ self.assertTrue(reg.match("10.0.0.0/8") is not None)
+ self.assertTrue(reg.match("somethingElse") is None)
+ self.assertTrue(reg.match("::") is None)
+ self.assertTrue(reg.match("any") is None)
+
+ def test_filter_line_that_contains_ipv6(self):
+ reg = module.compile_ipv6_regexp()
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 ::/0 any ff02::fb in") is not None)
+ self.assertTrue(reg.search("### tuple ### allow udp 5353 0.0.0.0/0 any 224.0.0.251 in") is None)
+ self.assertTrue(reg.search("### tuple ### allow any 23 :: any ::/0 in") is not None)
+ self.assertTrue(reg.match("ff02::fb") is not None)
+ self.assertTrue(reg.match("224.0.0.251") is None)
+ self.assertTrue(reg.match("::") is not None)
+
+ def test_check_mode_add_rules(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7000',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertFalse(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_insert_rules(self):
+ set_module_args({
+ 'insert': '1',
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7000',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertFalse(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_detailed_route(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'route': 'yes',
+ 'interface_in': 'foo',
+ 'interface_out': 'bar',
+ 'proto': 'tcp',
+ 'from_ip': '1.1.1.1',
+ 'to_ip': '8.8.8.8',
+ 'from_port': '7000',
+ 'to_port': '7001',
+ '_ansible_check_mode': True
+ })
+
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_ambiguous_route(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'route': 'yes',
+ 'interface_in': 'foo',
+ 'interface_out': 'bar',
+ 'direction': 'in',
+ 'interface': 'baz',
+ '_ansible_check_mode': True
+ })
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.__getResult(do_nothing_func_port_7000)
+
+ exc = result.exception.args[0]
+ self.assertTrue(exc['failed'])
+ self.assertIn('mutually exclusive', exc['msg'])
+
+ def test_check_mode_add_interface_in(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7003',
+ 'interface_in': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_interface_out(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7004',
+ 'interface_out': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_non_route_interface_both(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7004',
+ 'interface_in': 'foo',
+ 'interface_out': 'bar',
+ '_ansible_check_mode': True
+ })
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ self.__getResult(do_nothing_func_port_7000)
+
+ exc = result.exception.args[0]
+ self.assertTrue(exc['failed'])
+ self.assertIn('combine', exc['msg'])
+
+ def test_check_mode_add_direction_in(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7003',
+ 'direction': 'in',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_direction_in_with_ip(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'from_ip': '1.1.1.1',
+ 'from_port': '7002',
+ 'to_ip': '8.8.8.8',
+ 'to_port': '7003',
+ 'direction': 'in',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_direction_out(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7004',
+ 'direction': 'out',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_add_direction_out_with_ip(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'from_ip': '1.1.1.1',
+ 'from_port': '7003',
+ 'to_ip': '8.8.8.8',
+ 'to_port': '7004',
+ 'direction': 'out',
+ 'interface': 'foo',
+ '_ansible_check_mode': True
+ })
+ result = self.__getResult(do_nothing_func_port_7000)
+ self.assertTrue(result.exception.args[0]['changed'])
+
+ def test_check_mode_delete_existing_rules(self):
+
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7000',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_check_mode_delete_existing_insert_rules(self):
+
+ set_module_args({
+ 'insert': '1',
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7000',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_check_mode_delete_not_existing_rules(self):
+
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7001',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_check_mode_delete_not_existing_insert_rules(self):
+
+ set_module_args({
+ 'insert': '1',
+ 'rule': 'allow',
+ 'proto': 'tcp',
+ 'port': '7001',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_enable_mode(self):
+ set_module_args({
+ 'state': 'enabled',
+ '_ansible_check_mode': True
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_disable_mode(self):
+ set_module_args({
+ 'state': 'disabled',
+ '_ansible_check_mode': True
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_logging_off(self):
+ set_module_args({
+ 'logging': 'off',
+ '_ansible_check_mode': True
+ })
+
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_logging_on(self):
+ set_module_args({
+ 'logging': 'on',
+ '_ansible_check_mode': True
+ })
+
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_default_changed(self):
+ set_module_args({
+ 'default': 'allow',
+ "direction": "incoming",
+ '_ansible_check_mode': True
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_default_not_changed(self):
+ set_module_args({
+ 'default': 'deny',
+ "direction": "incoming",
+ '_ansible_check_mode': True
+ })
+ self.assertFalse(self.__getResult(do_nothing_func_port_7000).exception.args[0]['changed'])
+
+ def test_ipv6_remove(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '5353',
+ 'from': 'ff02::fb',
+ 'delete': 'yes',
+ '_ansible_check_mode': True,
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_ipv6_add_existing(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '5353',
+ 'from': 'ff02::fb',
+ '_ansible_check_mode': True,
+ })
+ self.assertFalse(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_add_not_existing_ipv4_submask(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '1577',
+ 'from': '10.0.0.0/24',
+ '_ansible_check_mode': True,
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_ipv4_add_with_existing_ipv6(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'proto': 'udp',
+ 'port': '5353',
+ 'from': '224.0.0.252',
+ '_ansible_check_mode': True,
+ })
+ self.assertTrue(self.__getResult(do_nothing_func_ipv6).exception.args[0]['changed'])
+
+ def test_ipv6_add_from_nothing(self):
+ set_module_args({
+ 'rule': 'allow',
+ 'port': '23',
+ 'to': '::',
+ '_ansible_check_mode': True,
+ })
+ result = self.__getResult(do_nothing_func_nothing).exception.args[0]
+ print(result)
+ self.assertTrue(result['changed'])
+
+ def __getResult(self, cmd_fun):
+ with patch.object(basic.AnsibleModule, 'run_command') as mock_run_command:
+ mock_run_command.side_effect = cmd_fun
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+ return result
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_command.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_command.py
new file mode 100644
index 000000000..332b976f7
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_command.py
@@ -0,0 +1,911 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import shutil
+import uuid
+import tarfile
+import tempfile
+import os
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils import basic
+import ansible_collections.community.general.plugins.modules.wdc_redfish_command as module
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
+
+MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = {
+ "ret": True,
+ "data": {
+ }
+}
+
+MOCK_GET_ENCLOSURE_RESPONSE_SINGLE_TENANT = {
+ "ret": True,
+ "data": {
+ "SerialNumber": "12345"
+ }
+}
+
+MOCK_GET_ENCLOSURE_RESPONSE_MULTI_TENANT = {
+ "ret": True,
+ "data": {
+ "SerialNumber": "12345-A"
+ }
+}
+
+MOCK_URL_ERROR = {
+ "ret": False,
+ "msg": "This is a mock URL error",
+ "status": 500
+}
+
+MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE = {
+ "ret": True,
+ "data": {
+ "UpdateService": {
+ "@odata.id": "/UpdateService"
+ },
+ "Chassis": {
+ "@odata.id": "/Chassis"
+ }
+ }
+}
+
+MOCK_SUCCESSFUL_RESPONSE_CHASSIS = {
+ "ret": True,
+ "data": {
+ "Members": [
+ {
+ "@odata.id": "/redfish/v1/Chassis/Enclosure"
+ }
+ ]
+ }
+}
+
+MOCK_SUCCESSFUL_RESPONSE_CHASSIS_ENCLOSURE = {
+ "ret": True,
+ "data": {
+ "Id": "Enclosure",
+ "IndicatorLED": "Off",
+ "Actions": {
+ "Oem": {
+ "WDC": {
+ "#Chassis.Locate": {
+ "target": "/Chassis.Locate"
+ },
+ "#Chassis.PowerMode": {
+ "target": "/redfish/v1/Chassis/Enclosure/Actions/Chassis.PowerMode",
+ }
+ }
+ }
+ },
+ "Oem": {
+ "WDC": {
+ "PowerMode": "Normal"
+ }
+ }
+ }
+}
+
+MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_AND_FW_ACTIVATE = {
+ "ret": True,
+ "data": {
+ "Actions": {
+ "#UpdateService.SimpleUpdate": {
+ "target": "mocked value"
+ },
+ "Oem": {
+ "WDC": {
+ "#UpdateService.FWActivate": {
+ "title": "Activate the downloaded firmware.",
+ "target": "/redfish/v1/UpdateService/Actions/UpdateService.FWActivate"
+ }
+ }
+ }
+ }
+ }
+}
+
+MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = {
+ "ret": True,
+ "data": {
+ "Actions": {}
+ }
+}
+
+MOCK_GET_IOM_A_MULTI_TENANT = {
+ "ret": True,
+ "data": {
+ "Id": "IOModuleAFRU"
+ }
+}
+
+MOCK_GET_IOM_B_MULTI_TENANAT = {
+ "ret": True,
+ "data": {
+ "error": {
+ "message": "IOM Module B cannot be read"
+ }
+ }
+}
+
+
+MOCK_READY_FOR_FW_UPDATE = {
+ "ret": True,
+ "entries": {
+ "Description": "Ready for FW update",
+ "StatusCode": 0
+ }
+}
+
+MOCK_FW_UPDATE_IN_PROGRESS = {
+ "ret": True,
+ "entries": {
+ "Description": "FW update in progress",
+ "StatusCode": 1
+ }
+}
+
+MOCK_WAITING_FOR_ACTIVATION = {
+ "ret": True,
+ "entries": {
+ "Description": "FW update completed. Waiting for activation.",
+ "StatusCode": 2
+ }
+}
+
+MOCK_SIMPLE_UPDATE_STATUS_LIST = [
+ MOCK_READY_FOR_FW_UPDATE,
+ MOCK_FW_UPDATE_IN_PROGRESS,
+ MOCK_WAITING_FOR_ACTIVATION
+]
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+def get_exception_message(ansible_exit_json):
+ """From an AnsibleExitJson exception, get the message string."""
+ return ansible_exit_json.exception.args[0]["msg"]
+
+
+def is_changed(ansible_exit_json):
+ """From an AnsibleExitJson exception, return the value of the changed flag"""
+ return ansible_exit_json.exception.args[0]["changed"]
+
+
+def mock_simple_update(*args, **kwargs):
+ return {
+ "ret": True
+ }
+
+
+def mocked_url_response(*args, **kwargs):
+ """Mock to just return a generic string."""
+ return "/mockedUrl"
+
+
+def mock_update_url(*args, **kwargs):
+ """Mock of the update url"""
+ return "/UpdateService"
+
+
+def mock_fw_activate_url(*args, **kwargs):
+ """Mock of the FW Activate URL"""
+ return "/UpdateService.FWActivate"
+
+
+def empty_return(*args, **kwargs):
+ """Mock to just return an empty successful return."""
+ return {"ret": True}
+
+
+def mock_get_simple_update_status_ready_for_fw_update(*args, **kwargs):
+ """Mock to return simple update status Ready for FW update"""
+ return MOCK_READY_FOR_FW_UPDATE
+
+
+def mock_get_request_enclosure_single_tenant(*args, **kwargs):
+ """Mock for get_request for single-tenant enclosure."""
+ if args[1].endswith("/redfish/v1") or args[1].endswith("/redfish/v1/"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE
+ elif args[1].endswith("/mockedUrl"):
+ return MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE
+ elif args[1].endswith("Chassis/Enclosure"):
+ return MOCK_GET_ENCLOSURE_RESPONSE_SINGLE_TENANT
+ elif args[1].endswith("/UpdateService"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_AND_FW_ACTIVATE
+ else:
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_get_request_enclosure_multi_tenant(*args, **kwargs):
+ """Mock for get_request with multi-tenant enclosure."""
+ if args[1].endswith("/redfish/v1") or args[1].endswith("/redfish/v1/"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE
+ elif args[1].endswith("/mockedUrl"):
+ return MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE
+ elif args[1].endswith("Chassis/Enclosure"):
+ return MOCK_GET_ENCLOSURE_RESPONSE_MULTI_TENANT
+ elif args[1].endswith("/UpdateService"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_AND_FW_ACTIVATE
+ elif args[1].endswith("/IOModuleAFRU"):
+ return MOCK_GET_IOM_A_MULTI_TENANT
+ elif args[1].endswith("/IOModuleBFRU"):
+ return MOCK_GET_IOM_B_MULTI_TENANAT
+ else:
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_get_request(*args, **kwargs):
+ """Mock for get_request for simple resource tests."""
+ if args[1].endswith("/redfish/v1") or args[1].endswith("/redfish/v1/"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE
+ elif args[1].endswith("/Chassis"):
+ return MOCK_SUCCESSFUL_RESPONSE_CHASSIS
+ elif args[1].endswith("Chassis/Enclosure"):
+ return MOCK_SUCCESSFUL_RESPONSE_CHASSIS_ENCLOSURE
+ else:
+ raise RuntimeError("Illegal call to get_request in test: " + args[1])
+
+
+def mock_post_request(*args, **kwargs):
+ """Mock post_request with successful response."""
+ valid_endpoints = [
+ "/UpdateService.FWActivate",
+ "/Chassis.Locate",
+ "/Chassis.PowerMode",
+ ]
+ for endpoint in valid_endpoints:
+ if args[1].endswith(endpoint):
+ return {
+ "ret": True,
+ "data": ACTION_WAS_SUCCESSFUL_MESSAGE
+ }
+ raise RuntimeError("Illegal POST call to: " + args[1])
+
+
+def mock_get_firmware_inventory_version_1_2_3(*args, **kwargs):
+ return {
+ "ret": True,
+ "entries": [
+ {
+ "Id": "IOModuleA_OOBM",
+ "Version": "1.2.3"
+ },
+ {
+ "Id": "IOModuleB_OOBM",
+ "Version": "1.2.3"
+ }
+ ]
+ }
+
+
+ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION = "Unable to extract bundle version or multi-tenant status from update image tarfile"
+ACTION_WAS_SUCCESSFUL_MESSAGE = "Action was successful"
+
+
+class TestWdcRedfishCommand(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+ self.tempdir = tempfile.mkdtemp()
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ module.main()
+
+ def test_module_fail_when_unknown_category(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'unknown',
+ 'command': 'FWActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': [],
+ })
+ module.main()
+
+ def test_module_fail_when_unknown_command(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'unknown',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': [],
+ })
+ module.main()
+
+ def test_module_chassis_power_mode_low(self):
+ """Test setting chassis power mode to low (happy path)."""
+ module_args = {
+ 'category': 'Chassis',
+ 'command': 'PowerModeLow',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_id': 'Enclosure',
+ 'baseuri': 'example.com'
+ }
+ set_module_args(module_args)
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_request=mock_get_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE,
+ get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_module_chassis_power_mode_normal_when_already_normal(self):
+ """Test setting chassis power mode to normal when it already is. Verify we get changed=False."""
+ module_args = {
+ 'category': 'Chassis',
+ 'command': 'PowerModeNormal',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_id': 'Enclosure',
+ 'baseuri': 'example.com'
+ }
+ set_module_args(module_args)
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_request=mock_get_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE,
+ get_exception_message(ansible_exit_json))
+ self.assertFalse(is_changed(ansible_exit_json))
+
+ def test_module_chassis_power_mode_invalid_command(self):
+ """Test that we get an error when issuing an invalid PowerMode command."""
+ module_args = {
+ 'category': 'Chassis',
+ 'command': 'PowerModeExtraHigh',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_id': 'Enclosure',
+ 'baseuri': 'example.com'
+ }
+ set_module_args(module_args)
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_request=mock_get_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ module.main()
+ expected_error_message = "Invalid Command 'PowerModeExtraHigh'"
+ self.assertIn(expected_error_message,
+ get_exception_message(ansible_fail_json))
+
+ def test_module_enclosure_led_indicator_on(self):
+ """Test turning on a valid LED indicator (in this case we use the Enclosure resource)."""
+ module_args = {
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOn',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ "resource_id": "Enclosure",
+ "baseuri": "example.com"
+ }
+ set_module_args(module_args)
+
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_request=mock_get_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE,
+ get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_module_invalid_resource_led_indicator_on(self):
+ """Test turning LED on for an invalid resource id."""
+ module_args = {
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOn',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ "resource_id": "Disk99",
+ "baseuri": "example.com"
+ }
+ set_module_args(module_args)
+
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_request=mock_get_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleFailJson) as ansible_fail_json:
+ module.main()
+ expected_error_message = "Chassis resource Disk99 not found"
+ self.assertEqual(expected_error_message,
+ get_exception_message(ansible_fail_json))
+
+ def test_module_enclosure_led_off_already_off(self):
+ """Test turning LED indicator off when it's already off. Confirm changed is False and no POST occurs."""
+ module_args = {
+ 'category': 'Chassis',
+ 'command': 'IndicatorLedOff',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ "resource_id": "Enclosure",
+ "baseuri": "example.com"
+ }
+ set_module_args(module_args)
+
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_request=mock_get_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE,
+ get_exception_message(ansible_exit_json))
+ self.assertFalse(is_changed(ansible_exit_json))
+
+ def test_module_fw_activate_first_iom_unavailable(self):
+ """Test that if the first IOM is not available, the 2nd one is used."""
+ ioms = [
+ "bad.example.com",
+ "good.example.com"
+ ]
+ module_args = {
+ 'category': 'Update',
+ 'command': 'FWActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ioms
+ }
+ set_module_args(module_args)
+
+ def mock_get_request(*args, **kwargs):
+ """Mock for get_request that will fail on the 'bad' IOM."""
+ if "bad.example.com" in args[1]:
+ return MOCK_URL_ERROR
+ else:
+ return mock_get_request_enclosure_single_tenant(*args, **kwargs)
+
+ with patch.multiple(module.WdcRedfishUtils,
+ _firmware_activate_uri=mock_fw_activate_url,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as cm:
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE,
+ get_exception_message(cm))
+
+ def test_module_fw_activate_pass(self):
+ """Test the FW Activate command in a passing scenario."""
+ # Run the same test twice -- once specifying ioms, and once specifying baseuri.
+ # Both should work the same way.
+ uri_specifiers = [
+ {
+ "ioms": ["example1.example.com"]
+ },
+ {
+ "baseuri": "example1.example.com"
+ }
+ ]
+ for uri_specifier in uri_specifiers:
+ module_args = {
+ 'category': 'Update',
+ 'command': 'FWActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ }
+ module_args.update(uri_specifier)
+ set_module_args(module_args)
+
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ _firmware_activate_uri=mock_fw_activate_url,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_single_tenant,
+ post_request=mock_post_request):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE,
+ get_exception_message(ansible_exit_json))
+ self.assertTrue(is_changed(ansible_exit_json))
+
+ def test_module_fw_activate_service_does_not_support_fw_activate(self):
+ """Test FW Activate when it is not supported."""
+ expected_error_message = "Service does not support FWActivate"
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'FWActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"]
+ })
+
+ def mock_update_uri_response(*args, **kwargs):
+ return {
+ "ret": True,
+ "data": {} # No Actions
+ }
+
+ with patch.multiple(module.WdcRedfishUtils,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_update_uri_response):
+ with self.assertRaises(AnsibleFailJson) as cm:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(cm))
+
+ def test_module_update_and_activate_image_uri_not_http(self):
+ """Test Update and Activate when URI is not http(s)"""
+ expected_error_message = "Bundle URI must be HTTP or HTTPS"
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "ftp://example.com/image"
+ })
+ with patch.multiple(module.WdcRedfishUtils,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return):
+ with self.assertRaises(AnsibleFailJson) as cm:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(cm))
+
+ def test_module_update_and_activate_target_not_ready_for_fw_update(self):
+ """Test Update and Activate when target is not in the correct state."""
+ mock_status_code = 999
+ mock_status_description = "mock status description"
+ expected_error_message = "Target is not ready for FW update. Current status: {0} ({1})".format(
+ mock_status_code,
+ mock_status_description
+ )
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image"
+ })
+ with patch.object(module.WdcRedfishUtils, "get_simple_update_status") as mock_get_simple_update_status:
+ mock_get_simple_update_status.return_value = {
+ "ret": True,
+ "entries": {
+ "StatusCode": mock_status_code,
+ "Description": mock_status_description
+ }
+ }
+
+ with patch.multiple(module.WdcRedfishUtils,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return):
+ with self.assertRaises(AnsibleFailJson) as cm:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(cm))
+
+ def test_module_update_and_activate_bundle_not_a_tarfile(self):
+ """Test Update and Activate when bundle is not a tarfile"""
+ mock_filename = os.path.abspath(__file__)
+ expected_error_message = ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = mock_filename
+ with patch.multiple(module.WdcRedfishUtils,
+ get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return):
+ with self.assertRaises(AnsibleFailJson) as cm:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(cm))
+
+ def test_module_update_and_activate_bundle_contains_no_firmware_version(self):
+ """Test Update and Activate when bundle contains no firmware version"""
+ expected_error_message = ERROR_MESSAGE_UNABLE_TO_EXTRACT_BUNDLE_VERSION
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = "empty_tarfile{0}.tar".format(uuid.uuid4())
+ empty_tarfile = tarfile.open(os.path.join(self.tempdir, tar_name), "w")
+ empty_tarfile.close()
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple(module.WdcRedfishUtils,
+ get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return):
+ with self.assertRaises(AnsibleFailJson) as cm:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(cm))
+
+ def test_module_update_and_activate_version_already_installed(self):
+ """Test Update and Activate when the bundle version is already installed"""
+ mock_firmware_version = "1.2.3"
+ expected_error_message = ACTION_WAS_SUCCESSFUL_MESSAGE
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version,
+ is_multi_tenant=False)
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple(module.WdcRedfishUtils,
+ get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3,
+ get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_single_tenant):
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(result))
+ self.assertFalse(is_changed(result))
+
+ def test_module_update_and_activate_version_already_installed_multi_tenant(self):
+ """Test Update and Activate on multi-tenant when version is already installed"""
+ mock_firmware_version = "1.2.3"
+ expected_error_message = ACTION_WAS_SUCCESSFUL_MESSAGE
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version,
+ is_multi_tenant=True)
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple(module.WdcRedfishUtils,
+ get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3,
+ get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_multi_tenant):
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(result))
+ self.assertFalse(is_changed(result))
+
+ def test_module_update_and_activate_pass(self):
+ """Test Update and Activate (happy path)"""
+ mock_firmware_version = "1.2.2"
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version,
+ is_multi_tenant=False)
+
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils",
+ get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3,
+ simple_update=mock_simple_update,
+ _simple_update_status_uri=mocked_url_response,
+ # _find_updateservice_resource=empty_return,
+ # _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_single_tenant,
+ post_request=mock_post_request):
+
+ with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status"
+ ) as mock_get_simple_update_status:
+ mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertTrue(is_changed(ansible_exit_json))
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json))
+
+ def test_module_update_and_activate_pass_multi_tenant(self):
+ """Test Update and Activate with multi-tenant (happy path)"""
+ mock_firmware_version = "1.2.2"
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version,
+ is_multi_tenant=True)
+
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple(module.WdcRedfishUtils,
+ get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3,
+ simple_update=mock_simple_update,
+ _simple_update_status_uri=mocked_url_response,
+ # _find_updateservice_resource=empty_return,
+ # _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_multi_tenant,
+ post_request=mock_post_request):
+ with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_simple_update_status"
+ ) as mock_get_simple_update_status:
+ mock_get_simple_update_status.side_effect = MOCK_SIMPLE_UPDATE_STATUS_LIST
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ self.assertTrue(is_changed(ansible_exit_json))
+ self.assertEqual(ACTION_WAS_SUCCESSFUL_MESSAGE, get_exception_message(ansible_exit_json))
+
+ def test_module_fw_update_multi_tenant_firmware_single_tenant_enclosure(self):
+ """Test Update and Activate using multi-tenant bundle on single-tenant enclosure"""
+ mock_firmware_version = "1.1.1"
+ expected_error_message = "Enclosure multi-tenant is False but bundle multi-tenant is True"
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version,
+ is_multi_tenant=True)
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple(module.WdcRedfishUtils,
+ get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(),
+ get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_single_tenant):
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(result))
+
+ def test_module_fw_update_single_tentant_firmware_multi_tenant_enclosure(self):
+ """Test Update and Activate using singe-tenant bundle on multi-tenant enclosure"""
+ mock_firmware_version = "1.1.1"
+ expected_error_message = "Enclosure multi-tenant is True but bundle multi-tenant is False"
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'UpdateAndActivate',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ 'update_image_uri': "http://example.com/image",
+ "update_creds": {
+ "username": "image_user",
+ "password": "image_password"
+ }
+ })
+
+ tar_name = self.generate_temp_bundlefile(mock_firmware_version=mock_firmware_version,
+ is_multi_tenant=False)
+ with patch('ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.fetch_file') as mock_fetch_file:
+ mock_fetch_file.return_value = os.path.join(self.tempdir, tar_name)
+ with patch.multiple(module.WdcRedfishUtils,
+ get_firmware_inventory=mock_get_firmware_inventory_version_1_2_3(),
+ get_simple_update_status=mock_get_simple_update_status_ready_for_fw_update,
+ _firmware_activate_uri=mocked_url_response,
+ _update_uri=mock_update_url,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_get_request_enclosure_multi_tenant):
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+ self.assertEqual(expected_error_message,
+ get_exception_message(result))
+
+ def generate_temp_bundlefile(self,
+ mock_firmware_version,
+ is_multi_tenant):
+ """Generate a temporary fake bundle file.
+
+ :param str mock_firmware_version: The simulated firmware version for the bundle.
+ :param bool is_multi_tenant: Is the simulated bundle multi-tenant?
+
+ This can be used for a mock FW update.
+ """
+ tar_name = "tarfile{0}.tar".format(uuid.uuid4())
+
+ bundle_tarfile = tarfile.open(os.path.join(self.tempdir, tar_name), "w")
+ package_filename = "oobm-{0}.pkg".format(mock_firmware_version)
+ package_filename_path = os.path.join(self.tempdir, package_filename)
+ package_file = open(package_filename_path, "w")
+ package_file.close()
+ bundle_tarfile.add(os.path.join(self.tempdir, package_filename), arcname=package_filename)
+ bin_filename = "firmware.bin"
+ bin_filename_path = os.path.join(self.tempdir, bin_filename)
+ bin_file = open(bin_filename_path, "wb")
+ byte_to_write = b'\x80' if is_multi_tenant else b'\xFF'
+ bin_file.write(byte_to_write * 12)
+ bin_file.close()
+ for filename in [package_filename, bin_filename]:
+ bundle_tarfile.add(os.path.join(self.tempdir, filename), arcname=filename)
+ bundle_tarfile.close()
+ return tar_name
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_info.py
new file mode 100644
index 000000000..e1dfb4a27
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_wdc_redfish_info.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils import basic
+import ansible_collections.community.general.plugins.modules.wdc_redfish_info as module
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
+
+MOCK_SUCCESSFUL_RESPONSE_WITH_ACTIONS = {
+ "ret": True,
+ "data": {
+ "Actions": {}
+ }
+}
+
+MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE = {
+ "ret": True,
+ "data": {
+ }
+}
+
+MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE = {
+ "ret": True,
+ "data": {
+ "UpdateService": {
+ "@odata.id": "/UpdateService"
+ }
+ }
+}
+
+MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_BUT_NO_FW_ACTIVATE = {
+ "ret": True,
+ "data": {
+ "Actions": {
+ "#UpdateService.SimpleUpdate": {
+ "target": "mocked value"
+ },
+ "Oem": {
+ "WDC": {} # No #UpdateService.FWActivate
+ }
+ }
+ }
+}
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+def get_redfish_facts(ansible_exit_json):
+ """From an AnsibleExitJson exception, get the redfish facts dict."""
+ return ansible_exit_json.exception.args[0]["redfish_facts"]
+
+
+def get_exception_message(ansible_exit_json):
+ """From an AnsibleExitJson exception, get the message string."""
+ return ansible_exit_json.exception.args[0]["msg"]
+
+
+class TestWdcRedfishInfo(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ module.main()
+
+ def test_module_fail_when_unknown_category(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'unknown',
+ 'command': 'SimpleUpdateStatus',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': [],
+ })
+ module.main()
+
+ def test_module_fail_when_unknown_command(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'unknown',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': [],
+ })
+ module.main()
+
+ def test_module_simple_update_status_pass(self):
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'SimpleUpdateStatus',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ })
+
+ def mock_simple_update_status(*args, **kwargs):
+ return {
+ "ret": True,
+ "data": {
+ "Description": "Ready for FW update",
+ "ErrorCode": 0,
+ "EstimatedRemainingMinutes": 0,
+ "StatusCode": 0
+ }
+ }
+
+ def mocked_string_response(*args, **kwargs):
+ return "mockedUrl"
+
+ def empty_return(*args, **kwargs):
+ return {"ret": True}
+
+ with patch.multiple(module.WdcRedfishUtils,
+ _simple_update_status_uri=mocked_string_response,
+ _find_updateservice_resource=empty_return,
+ _find_updateservice_additional_uris=empty_return,
+ get_request=mock_simple_update_status):
+ with self.assertRaises(AnsibleExitJson) as ansible_exit_json:
+ module.main()
+ redfish_facts = get_redfish_facts(ansible_exit_json)
+ self.assertEqual(mock_simple_update_status()["data"],
+ redfish_facts["simple_update_status"]["entries"])
+
+ def test_module_simple_update_status_updateservice_resource_not_found(self):
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'SimpleUpdateStatus',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ })
+ with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ "ret": True,
+ "data": {} # Missing UpdateService property
+ }
+ with self.assertRaises(AnsibleFailJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual("UpdateService resource not found",
+ get_exception_message(ansible_exit_json))
+
+ def test_module_simple_update_status_service_does_not_support_simple_update(self):
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'SimpleUpdateStatus',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ })
+
+ def mock_get_request_function(uri):
+ mock_url_string = "mockURL"
+ if mock_url_string in uri:
+ return {
+ "ret": True,
+ "data": {
+ "Actions": { # No #UpdateService.SimpleUpdate
+ }
+ }
+ }
+ else:
+ return {
+ "ret": True,
+ "data": mock_url_string
+ }
+
+ with patch.object(module.WdcRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.side_effect = mock_get_request_function
+ with self.assertRaises(AnsibleFailJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual("UpdateService resource not found",
+ get_exception_message(ansible_exit_json))
+
+ def test_module_simple_update_status_service_does_not_support_fw_activate(self):
+ set_module_args({
+ 'category': 'Update',
+ 'command': 'SimpleUpdateStatus',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'ioms': ["example1.example.com"],
+ })
+
+ def mock_get_request_function(uri):
+ if uri.endswith("/redfish/v1") or uri.endswith("/redfish/v1/"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_UPDATE_SERVICE_RESOURCE
+ elif uri.endswith("/mockedUrl"):
+ return MOCK_SUCCESSFUL_HTTP_EMPTY_RESPONSE
+ elif uri.endswith("/UpdateService"):
+ return MOCK_SUCCESSFUL_RESPONSE_WITH_SIMPLE_UPDATE_BUT_NO_FW_ACTIVATE
+ else:
+ raise RuntimeError("Illegal call to get_request in test: " + uri)
+
+ with patch("ansible_collections.community.general.plugins.module_utils.wdc_redfish_utils.WdcRedfishUtils.get_request") as mock_get_request:
+ mock_get_request.side_effect = mock_get_request_function
+ with self.assertRaises(AnsibleFailJson) as ansible_exit_json:
+ module.main()
+ self.assertEqual("Service does not support FWActivate",
+ get_exception_message(ansible_exit_json))
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_xcc_redfish_command.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_xcc_redfish_command.py
new file mode 100644
index 000000000..c3902a2f3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_xcc_redfish_command.py
@@ -0,0 +1,629 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible.module_utils import basic
+import ansible_collections.community.general.plugins.modules.xcc_redfish_command as module
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import AnsibleExitJson, AnsibleFailJson
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import set_module_args, exit_json, fail_json
+
+
+def get_bin_path(self, arg, required=False):
+ """Mock AnsibleModule.get_bin_path"""
+ return arg
+
+
+class TestXCCRedfishCommand(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module_helper = patch.multiple(basic.AnsibleModule,
+ exit_json=exit_json,
+ fail_json=fail_json,
+ get_bin_path=get_bin_path)
+ self.mock_module_helper.start()
+ self.addCleanup(self.mock_module_helper.stop)
+
+ def test_module_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({})
+ module.main()
+
+ def test_module_fail_when_unknown_category(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'unknown',
+ 'command': 'VirtualMediaEject',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+ module.main()
+
+ def test_module_fail_when_unknown_command(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'Manager',
+ 'command': 'unknown',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+ module.main()
+
+ def test_module_command_VirtualMediaInsert_pass(self):
+ set_module_args({
+ 'category': 'Manager',
+ 'command': 'VirtualMediaInsert',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'timeout': 30,
+ 'virtual_media': {
+ 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso",
+ 'media_types': ['CD'],
+ 'inserted': True,
+ 'write_protected': True,
+ 'transfer_protocol_type': 'NFS'
+ }
+ })
+ with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource:
+ mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'}
+ with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource:
+ mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'}
+
+ with patch.object(module.XCCRedfishUtils, 'virtual_media_insert') as mock_virtual_media_insert:
+ mock_virtual_media_insert.return_value = {'ret': True, 'changed': True, 'msg': 'success'}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+
+ def test_module_command_VirtualMediaEject_pass(self):
+ set_module_args({
+ 'category': 'Manager',
+ 'command': 'VirtualMediaEject',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'timeout': 30,
+ 'virtual_media': {
+ 'image_url': "nfs://10.245.52.18:/home/nfs/bootable-sr635-20210111-autorun.iso",
+ }
+ })
+ with patch.object(module.XCCRedfishUtils, '_find_systems_resource') as mock__find_systems_resource:
+ mock__find_systems_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'}
+ with patch.object(module.XCCRedfishUtils, '_find_managers_resource') as mock__find_managers_resource:
+ mock__find_managers_resource.return_value = {'ret': True, 'changed': True, 'msg': 'success'}
+
+ with patch.object(module.XCCRedfishUtils, 'virtual_media_eject') as mock_virtual_media_eject:
+ mock_virtual_media_eject.return_value = {'ret': True, 'changed': True, 'msg': 'success'}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+
+ def test_module_command_VirtualMediaEject_fail_when_required_args_missing(self):
+ with self.assertRaises(AnsibleFailJson):
+ set_module_args({
+ 'category': 'Manager',
+ 'command': 'VirtualMediaEject',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+ module.main()
+
+ def test_module_command_GetResource_fail_when_required_args_missing(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_GetResource_fail_when_get_return_false(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': False, 'msg': '404 error'}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_GetResource_pass(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+
+ def test_module_command_GetCollectionResource_fail_when_required_args_missing(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetCollectionResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_GetCollectionResource_fail_when_get_return_false(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetCollectionResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': False, 'msg': '404 error'}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_GetCollectionResource_fail_when_get_not_colection(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetCollectionResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_GetCollectionResource_pass_when_get_empty_collection(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetCollectionResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'Members': [], 'Members@odata.count': 0}}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+
+ def test_module_command_GetCollectionResource_pass_when_get_collection(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'GetCollectionResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'Members': [{'@odata.id': '/redfish/v1/testuri/1'}], 'Members@odata.count': 1}}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+
+ def test_module_command_PatchResource_fail_when_required_args_missing(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PatchResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}}
+
+ with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request:
+ mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PatchResource_fail_when_required_args_missing_no_requestbody(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PatchResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}}
+
+ with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request:
+ mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PatchResource_fail_when_noexisting_property_in_requestbody(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PatchResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ 'request_body': {'teststr': 'yyyy', 'otherkey': 'unknownkey'}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}}
+
+ with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request:
+ mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx'}}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PatchResource_fail_when_get_return_false(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PatchResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ 'request_body': {'teststr': 'yyyy'}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}}
+
+ with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request:
+ mock_patch_request.return_value = {'ret': False, 'msg': '500 internal error'}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PatchResource_pass(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PatchResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ 'request_body': {'teststr': 'yyyy'}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': True, 'data': {'teststr': 'xxxx', '@odata.etag': '27f6eb13fa1c28a2711'}}
+
+ with patch.object(module.XCCRedfishUtils, 'patch_request') as mock_patch_request:
+ mock_patch_request.return_value = {'ret': True, 'data': {'teststr': 'yyyy', '@odata.etag': '322e0d45d9572723c98'}}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_required_args_missing(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_invalid_resourceuri(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/testuri',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_no_requestbody(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_no_requestbody(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword',
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_requestbody_mismatch_with_data_from_actioninfo_uri(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword',
+ 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Parameters': [],
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_get_return_false(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword',
+ 'request_body': {'PasswordName': 'UefiAdminPassword', 'NewPassword': 'PASSW0RD=='}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {'ret': False, 'msg': '404 error'}
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_fail_when_post_return_false(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios',
+ 'request_body': {}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': False, 'msg': '500 internal error'}
+
+ with self.assertRaises(AnsibleFailJson) as result:
+ module.main()
+
+ def test_module_command_PostResource_pass(self):
+ set_module_args({
+ 'category': 'Raw',
+ 'command': 'PostResource',
+ 'baseuri': '10.245.39.251',
+ 'username': 'USERID',
+ 'password': 'PASSW0RD=21',
+ 'resource_uri': '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios',
+ 'request_body': {}
+ })
+
+ with patch.object(module.XCCRedfishUtils, 'get_request') as mock_get_request:
+ mock_get_request.return_value = {
+ 'ret': True,
+ 'data': {
+ 'Actions': {
+ '#Bios.ChangePassword': {
+ '@Redfish.ActionInfo': "/redfish/v1/Systems/1/Bios/ChangePasswordActionInfo",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ChangePassword",
+ 'title': "ChangePassword",
+ 'PasswordName@Redfish.AllowableValues': [
+ "UefiAdminPassword",
+ "UefiPowerOnPassword"
+ ]
+ },
+ '#Bios.ResetBios': {
+ 'title': "ResetBios",
+ 'target': "/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios"
+ }
+ },
+ }
+ }
+
+ with patch.object(module.XCCRedfishUtils, 'post_request') as mock_post_request:
+ mock_post_request.return_value = {'ret': True, 'msg': 'post success'}
+
+ with self.assertRaises(AnsibleExitJson) as result:
+ module.main()
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_info.py
new file mode 100644
index 000000000..6eb22a767
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_info.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+import pytest
+
+from .xenserver_common import fake_xenapi_ref
+from .xenserver_conftest import XenAPI, xenserver_guest_info # noqa: F401, pylint: disable=unused-import
+
+pytestmark = pytest.mark.usefixtures('patch_ansible_module')
+
+
+testcase_module_params = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "uuid": "somevmuuid",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "uuid": "somevmuuid",
+ },
+ ],
+ "ids": [
+ "name",
+ "uuid",
+ "name+uuid",
+ ],
+}
+
+
+@pytest.mark.parametrize('patch_ansible_module', testcase_module_params['params'], ids=testcase_module_params['ids'], indirect=True)
+def test_xenserver_guest_info(mocker, capfd, XenAPI, xenserver_guest_info):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_info.get_object_ref', return_value=None)
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_info.gather_vm_params', return_value=None)
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_info.gather_vm_facts', return_value=fake_vm_facts)
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_info.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ assert result['instance'] == fake_vm_facts
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py
new file mode 100644
index 000000000..74b21fcf3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_xenserver_guest_powerstate.py
@@ -0,0 +1,299 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pytest
+
+from .xenserver_common import fake_xenapi_ref
+from .xenserver_conftest import fake_ansible_module, XenAPI, xenserver_guest_powerstate # noqa: F401, pylint: disable=unused-import
+
+
+testcase_set_powerstate = {
+ "params": [
+ (False, "someoldstate"),
+ (True, "somenewstate"),
+ ],
+ "ids": [
+ "state-same",
+ "state-changed",
+ ],
+}
+
+testcase_module_params_state_present = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "present",
+ },
+ ],
+ "ids": [
+ "present-implicit",
+ "present-explicit",
+ ],
+}
+
+testcase_module_params_state_other = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "powered-on",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "powered-off",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "restarted",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "shutdown-guest",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "reboot-guest",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "suspended",
+ },
+ ],
+ "ids": [
+ "powered-on",
+ "powered-off",
+ "restarted",
+ "shutdown-guest",
+ "reboot-guest",
+ "suspended",
+ ],
+}
+
+testcase_module_params_wait = {
+ "params": [
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "present",
+ "wait_for_ip_address": "yes",
+ },
+ {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "name": "somevmname",
+ "state": "powered-on",
+ "wait_for_ip_address": "yes",
+ },
+ ],
+ "ids": [
+ "wait-present",
+ "wait-other",
+ ],
+}
+
+
+@pytest.mark.parametrize('power_state', testcase_set_powerstate['params'], ids=testcase_set_powerstate['ids'])
+def test_xenserver_guest_powerstate_set_power_state(mocker, fake_ansible_module, XenAPI, xenserver_guest_powerstate, power_state):
+ """Tests power state change handling."""
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params',
+ return_value={"power_state": "Someoldstate"})
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=power_state)
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ fake_ansible_module.params.update({
+ "name": "somename",
+ "uuid": "someuuid",
+ "state_change_timeout": 1,
+ })
+
+ vm = xenserver_guest_powerstate.XenServerVM(fake_ansible_module)
+ state_changed = vm.set_power_state(None)
+
+ mocked_set_vm_power_state.assert_called_once_with(fake_ansible_module, fake_xenapi_ref('VM'), None, 1)
+ assert state_changed == power_state[0]
+ assert vm.vm_params['power_state'] == power_state[1].capitalize()
+
+
+@pytest.mark.parametrize('patch_ansible_module',
+ testcase_module_params_state_present['params'],
+ ids=testcase_module_params_state_present['ids'],
+ indirect=True)
+def test_xenserver_guest_powerstate_present(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output when state is set to present.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', return_value={})
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts',
+ return_value=fake_vm_facts)
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=(True, "somenewstate"))
+ mocked_wait_for_vm_ip_address = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address',
+ return_value={})
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_powerstate.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ mocked_set_vm_power_state.assert_not_called()
+ mocked_wait_for_vm_ip_address.assert_not_called()
+ assert result['changed'] is False
+ assert result['instance'] == fake_vm_facts
+
+
+@pytest.mark.parametrize('patch_ansible_module',
+ testcase_module_params_state_other['params'],
+ ids=testcase_module_params_state_other['ids'],
+ indirect=True)
+def test_xenserver_guest_powerstate_other(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output when state is set to other value than
+ present.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', return_value={})
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=(True, "somenewstate"))
+ mocked_wait_for_vm_ip_address = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address',
+ return_value={})
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_powerstate.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ mocked_set_vm_power_state.assert_called_once()
+ mocked_wait_for_vm_ip_address.assert_not_called()
+ assert result['changed'] is True
+ assert result['instance'] == fake_vm_facts
+
+
+@pytest.mark.parametrize('patch_ansible_module',
+ testcase_module_params_wait['params'],
+ ids=testcase_module_params_wait['ids'],
+ indirect=True)
+def test_xenserver_guest_powerstate_wait(mocker, patch_ansible_module, capfd, XenAPI, xenserver_guest_powerstate):
+ """
+ Tests regular module invocation including parsing and propagation of
+ module params and module output when wait_for_ip_address option is used.
+ """
+ fake_vm_facts = {"fake-vm-fact": True}
+
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.get_object_ref',
+ return_value=fake_xenapi_ref('VM'))
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_params', return_value={})
+ mocker.patch('ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.gather_vm_facts', return_value=fake_vm_facts)
+ mocked_set_vm_power_state = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.set_vm_power_state',
+ return_value=(True, "somenewstate"))
+ mocked_wait_for_vm_ip_address = mocker.patch(
+ 'ansible_collections.community.general.plugins.modules.xenserver_guest_powerstate.wait_for_vm_ip_address',
+ return_value={})
+
+ mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
+
+ mocked_returns = {
+ "pool.get_all.return_value": [fake_xenapi_ref('pool')],
+ "pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
+ }
+
+ mocked_xenapi.configure_mock(**mocked_returns)
+
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.xenserver.get_xenserver_version', return_value=[7, 2, 0])
+
+ with pytest.raises(SystemExit):
+ xenserver_guest_powerstate.main()
+
+ out, err = capfd.readouterr()
+ result = json.loads(out)
+
+ mocked_wait_for_vm_ip_address.assert_called_once()
+ assert result['instance'] == fake_vm_facts
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf.py
new file mode 100644
index 000000000..c979fd8d2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf.py
@@ -0,0 +1,312 @@
+# -*- coding: utf-8 -*-
+# Author: Alexei Znamensky (russoz@gmail.com)
+# Largely adapted from test_redhat_subscription by
+# Jiri Hnidek (jhnidek@redhat.com)
+#
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# Copyright (c) Jiri Hnidek (jhnidek@redhat.com)
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules import xfconf
+
+import pytest
+
+TESTED_MODULE = xfconf.__name__
+
+
+@pytest.fixture
+def patch_xfconf(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path',
+ return_value='/testbin/xfconf-query')
+
+
+@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_without_required_parameters(capfd, patch_xfconf):
+ """
+ Failure must occurs when all parameters are missing
+ """
+ with pytest.raises(SystemExit):
+ xfconf.main()
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results['failed']
+ assert 'missing required arguments' in results['msg']
+
+
+TEST_CASES = [
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/inactive_opacity',
+ 'state': 'present',
+ 'value_type': 'int',
+ 'value': 90,
+ },
+ {
+ 'id': 'test_property_set_property',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '100\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity',
+ '--create', '--type', 'int', '--set', '90'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': '100',
+ 'value_type': 'int',
+ 'value': '90',
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/inactive_opacity',
+ 'state': 'present',
+ 'value_type': 'int',
+ 'value': 90,
+ },
+ {
+ 'id': 'test_property_set_property_same_value',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '90\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity',
+ '--create', '--type', 'int', '--set', '90'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': '90',
+ 'value_type': 'int',
+ 'value': '90',
+ },
+ ],
+ [
+ {
+ 'channel': 'xfce4-session',
+ 'property': '/general/SaveOnExit',
+ 'state': 'present',
+ 'value_type': 'bool',
+ 'value': False,
+ },
+ {
+ 'id': 'test_property_set_property_bool_false',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfce4-session', '--property', '/general/SaveOnExit'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'true\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfce4-session', '--property', '/general/SaveOnExit',
+ '--create', '--type', 'bool', '--set', 'false'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'false\n', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': 'true',
+ 'value_type': 'bool',
+ 'value': 'False',
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/workspace_names',
+ 'state': 'present',
+ 'value_type': 'string',
+ 'value': ['A', 'B', 'C'],
+ },
+ {
+ 'id': 'test_property_set_array',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names',
+ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B',
+ '--type', 'string', '--set', 'C'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': ['Main', 'Work', 'Tmp'],
+ 'value_type': ['str', 'str', 'str'],
+ 'value': ['A', 'B', 'C'],
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/workspace_names',
+ 'state': 'present',
+ 'value_type': 'string',
+ 'value': ['A', 'B', 'C'],
+ },
+ {
+ 'id': 'test_property_set_array_to_same_value',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names',
+ '--create', '--force-array', '--type', 'string', '--set', 'A', '--type', 'string', '--set', 'B',
+ '--type', 'string', '--set', 'C'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': False,
+ 'previous_value': ['A', 'B', 'C'],
+ 'value_type': ['str', 'str', 'str'],
+ 'value': ['A', 'B', 'C'],
+ },
+ ],
+ [
+ {
+ 'channel': 'xfwm4',
+ 'property': '/general/workspace_names',
+ 'state': 'absent',
+ },
+ {
+ 'id': 'test_property_reset_value',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nA\nB\nC\n', '',),
+ ),
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names',
+ '--reset'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': False},
+ # Mock of returned code, stdout and stderr
+ (0, '', '',),
+ ),
+ ],
+ 'changed': True,
+ 'previous_value': ['A', 'B', 'C'],
+ 'value_type': None,
+ 'value': None,
+ },
+ ],
+]
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ TEST_CASES,
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_xfconf(mocker, capfd, patch_xfconf, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch(
+ 'ansible.module_utils.basic.AnsibleModule.run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ xfconf.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % testcase)
+ print("results =\n%s" % results)
+
+ assert 'changed' in results
+ assert results['changed'] == testcase['changed']
+
+ for test_result in ('channel', 'property'):
+ assert test_result in results, "'{0}' not found in {1}".format(test_result, results)
+ assert results[test_result] == results['invocation']['module_args'][test_result], \
+ "'{0}': '{1}' != '{2}'".format(test_result, results[test_result], results['invocation']['module_args'][test_result])
+
+ assert mock_run_command.call_count == len(testcase['run_command.calls'])
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
+
+ expected_cmd, dummy, expected_res = testcase['run_command.calls'][-1]
+ assert results['cmd'] == expected_cmd
+ assert results['stdout'] == expected_res[1]
+ assert results['stderr'] == expected_res[2]
+
+ for conditional_test_result in ('msg', 'value', 'previous_value'):
+ if conditional_test_result in testcase:
+ assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results)
+ assert results[conditional_test_result] == testcase[conditional_test_result], \
+ "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result])
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf_info.py b/ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf_info.py
new file mode 100644
index 000000000..dfcd4f33a
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/test_xfconf_info.py
@@ -0,0 +1,172 @@
+# Copyright (c) Alexei Znamensky (russoz@gmail.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.plugins.modules import xfconf_info
+
+import pytest
+
+TESTED_MODULE = xfconf_info.__name__
+
+
+@pytest.fixture
+def patch_xfconf_info(mocker):
+ """
+ Function used for mocking some parts of redhat_subscription module
+ """
+ mocker.patch('ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.get_bin_path',
+ return_value='/testbin/xfconf-query')
+
+
+TEST_CASES = [
+ [
+ {'channel': 'xfwm4', 'property': '/general/inactive_opacity'},
+ {
+ 'id': 'test_simple_property_get',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/inactive_opacity'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (0, '100\n', '',),
+ ),
+ ],
+ 'is_array': False,
+ 'value': '100',
+ }
+ ],
+ [
+ {'channel': 'xfwm4', 'property': '/general/i_dont_exist'},
+ {
+ 'id': 'test_simple_property_get_nonexistent',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/i_dont_exist'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (1, '', 'Property "/general/i_dont_exist" does not exist on channel "xfwm4".\n',),
+ ),
+ ],
+ 'is_array': False,
+ }
+ ],
+ [
+ {'property': '/general/i_dont_exist'},
+ {
+ 'id': 'test_property_no_channel',
+ 'run_command.calls': [],
+ }
+ ],
+ [
+ {'channel': 'xfwm4', 'property': '/general/workspace_names'},
+ {
+ 'id': 'test_property_get_array',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--channel', 'xfwm4', '--property', '/general/workspace_names'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (0, 'Value is an array with 3 items:\n\nMain\nWork\nTmp\n', '',),
+ ),
+ ],
+ 'is_array': True,
+ 'value_array': ['Main', 'Work', 'Tmp'],
+ },
+ ],
+ [
+ {},
+ {
+ 'id': 'get_channels',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--list'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (0, 'Channels:\n a\n b\n c\n', '',),
+ ),
+ ],
+ 'is_array': False,
+ 'channels': ['a', 'b', 'c'],
+ },
+ ],
+ [
+ {'channel': 'xfwm4'},
+ {
+ 'id': 'get_properties',
+ 'run_command.calls': [
+ (
+ # Calling of following command will be asserted
+ ['/testbin/xfconf-query', '--list', '--channel', 'xfwm4'],
+ # Was return code checked?
+ {'environ_update': {'LANGUAGE': 'C', 'LC_ALL': 'C'}, 'check_rc': True},
+ # Mock of returned code, stdout and stderr
+ (0, '/general/wrap_cycle\n/general/wrap_layout\n/general/wrap_resistance\n/general/wrap_windows\n'
+ '/general/wrap_workspaces\n/general/zoom_desktop\n', '',),
+ ),
+ ],
+ 'is_array': False,
+ 'properties': [
+ '/general/wrap_cycle',
+ '/general/wrap_layout',
+ '/general/wrap_resistance',
+ '/general/wrap_windows',
+ '/general/wrap_workspaces',
+ '/general/zoom_desktop',
+ ],
+ },
+ ],
+]
+TEST_CASES_IDS = [item[1]['id'] for item in TEST_CASES]
+
+
+@pytest.mark.parametrize('patch_ansible_module, testcase',
+ TEST_CASES,
+ ids=TEST_CASES_IDS,
+ indirect=['patch_ansible_module'])
+@pytest.mark.usefixtures('patch_ansible_module')
+def test_xfconf_info(mocker, capfd, patch_xfconf_info, testcase):
+ """
+ Run unit tests for test cases listen in TEST_CASES
+ """
+
+ # Mock function used for running commands first
+ call_results = [item[2] for item in testcase['run_command.calls']]
+ mock_run_command = mocker.patch(
+ 'ansible_collections.community.general.plugins.module_utils.mh.module_helper.AnsibleModule.run_command',
+ side_effect=call_results)
+
+ # Try to run test case
+ with pytest.raises(SystemExit):
+ xfconf_info.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ print("testcase =\n%s" % testcase)
+ print("results =\n%s" % results)
+
+ for conditional_test_result in ('value_array', 'value', 'is_array', 'properties', 'channels'):
+ if conditional_test_result in testcase:
+ assert conditional_test_result in results, "'{0}' not found in {1}".format(conditional_test_result, results)
+ assert results[conditional_test_result] == testcase[conditional_test_result], \
+ "'{0}': '{1}' != '{2}'".format(conditional_test_result, results[conditional_test_result], testcase[conditional_test_result])
+
+ assert mock_run_command.call_count == len(testcase['run_command.calls'])
+ if mock_run_command.call_count:
+ call_args_list = [(item[0][0], item[1]) for item in mock_run_command.call_args_list]
+ expected_call_args_list = [(item[0], item[1]) for item in testcase['run_command.calls']]
+ print("call args list =\n%s" % call_args_list)
+ print("expected args list =\n%s" % expected_call_args_list)
+ assert call_args_list == expected_call_args_list
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/utils.py b/ansible_collections/community/general/tests/unit/plugins/modules/utils.py
new file mode 100644
index 000000000..1f7f14722
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/utils.py
@@ -0,0 +1,54 @@
+# Copyright (c) Ansible project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible_collections.community.general.tests.unit.compat import unittest
+from ansible_collections.community.general.tests.unit.compat.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+
+
+def set_module_args(args):
+ if '_ansible_remote_tmp' not in args:
+ args['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in args:
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class ModuleTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+ self.mock_module.start()
+ self.mock_sleep = patch('time.sleep')
+ self.mock_sleep.start()
+ set_module_args({})
+ self.addCleanup(self.mock_module.stop)
+ self.addCleanup(self.mock_sleep.stop)
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/xenserver_common.py b/ansible_collections/community/general/tests/unit/plugins/modules/xenserver_common.py
new file mode 100644
index 000000000..d3ebb484d
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/xenserver_common.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def fake_xenapi_ref(xenapi_class):
+ return "OpaqueRef:fake-xenapi-%s-ref" % xenapi_class
diff --git a/ansible_collections/community/general/tests/unit/plugins/modules/xenserver_conftest.py b/ansible_collections/community/general/tests/unit/plugins/modules/xenserver_conftest.py
new file mode 100644
index 000000000..f003be8b2
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/plugins/modules/xenserver_conftest.py
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import sys
+import importlib
+import pytest
+
+from .FakeAnsibleModule import FakeAnsibleModule
+
+
+@pytest.fixture
+def fake_ansible_module(request):
+ """Returns fake AnsibleModule with fake module params."""
+ if hasattr(request, 'param'):
+ return FakeAnsibleModule(request.param)
+ else:
+ params = {
+ "hostname": "somehost",
+ "username": "someuser",
+ "password": "somepwd",
+ "validate_certs": True,
+ }
+
+ return FakeAnsibleModule(params)
+
+
+@pytest.fixture(autouse=True)
+def XenAPI():
+ """Imports and returns fake XenAPI module."""
+
+ # Import of fake XenAPI module is wrapped by fixture so that it does not
+ # affect other unit tests which could potentially also use XenAPI module.
+
+ # First we use importlib.import_module() to import the module and assign
+ # it to a local symbol.
+ fake_xenapi = importlib.import_module('ansible_collections.community.general.tests.unit.plugins.modules.FakeXenAPI')
+
+ # Now we populate Python module cache with imported fake module using the
+ # original module name (XenAPI). That way, any 'import XenAPI' statement
+ # will just load already imported fake module from the cache.
+ sys.modules['XenAPI'] = fake_xenapi
+
+ return fake_xenapi
+
+
+@pytest.fixture
+def xenserver_guest_info(XenAPI):
+ """Imports and returns xenserver_guest_info module."""
+
+ # Since we are wrapping fake XenAPI module inside a fixture, all modules
+ # that depend on it have to be imported inside a test function. To make
+ # this easier to handle and remove some code repetition, we wrap the import
+ # of xenserver_guest_info module with a fixture.
+ from ansible_collections.community.general.plugins.modules import xenserver_guest_info
+
+ return xenserver_guest_info
+
+
+@pytest.fixture
+def xenserver_guest_powerstate(XenAPI):
+ """Imports and returns xenserver_guest_powerstate module."""
+
+ # Since we are wrapping fake XenAPI module inside a fixture, all modules
+ # that depend on it have to be imported inside a test function. To make
+ # this easier to handle and remove some code repetition, we wrap the import
+ # of xenserver_guest_powerstate module with a fixture.
+ from ansible_collections.community.general.plugins.modules import xenserver_guest_powerstate
+
+ return xenserver_guest_powerstate
diff --git a/ansible_collections/community/general/tests/unit/requirements.txt b/ansible_collections/community/general/tests/unit/requirements.txt
new file mode 100644
index 000000000..0aa7c1fc9
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/requirements.txt
@@ -0,0 +1,46 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unittest2 ; python_version < '2.7'
+importlib ; python_version < '2.7'
+
+# requirement for the memcached cache plugin
+python-memcached
+
+# requirement for the redis cache plugin
+redis
+
+# requirement for the linode module
+linode-python # APIv3
+linode_api4 ; python_version > '2.6' # APIv4
+
+# requirement for the gitlab and github modules
+python-gitlab
+PyGithub
+httmock
+
+# requirement for maven_artifact module
+lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
+lxml ; python_version >= '2.7'
+semantic_version
+
+# requirement for datadog_downtime module
+datadog-api-client >= 1.0.0b3 ; python_version >= '3.6'
+
+# requirement for dnsimple module
+dnsimple >= 2 ; python_version >= '3.6'
+dataclasses ; python_version == '3.6'
+
+# requirement for the opentelemetry callback plugin
+# WARNING: these libraries rely on Protobuf for Python, which regularly stops installing.
+# That's why they are disabled for now.
+# opentelemetry-api ; python_version >= '3.6' and python_version < '3.10'
+# opentelemetry-exporter-otlp ; python_version >= '3.6' and python_version < '3.10'
+# opentelemetry-sdk ; python_version >= '3.6' and python_version < '3.10'
+
+# requirement for the elastic callback plugin
+elastic-apm ; python_version >= '3.6'
+
+# requirements for scaleway modules
+passlib[argon2] \ No newline at end of file
diff --git a/ansible_collections/community/general/tests/unit/requirements.yml b/ansible_collections/community/general/tests/unit/requirements.yml
new file mode 100644
index 000000000..586a6a1b3
--- /dev/null
+++ b/ansible_collections/community/general/tests/unit/requirements.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+collections:
+- community.internal_test_tools
diff --git a/ansible_collections/community/general/tests/utils/constraints.txt b/ansible_collections/community/general/tests/utils/constraints.txt
new file mode 100644
index 000000000..4fb5276e2
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/constraints.txt
@@ -0,0 +1,59 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+certifi < 2022.5.18 ; python_version < '3.5' # certifi 2022.5.18 requires Python 3.5 or later
+coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible
+coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible
+cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6
+cryptography >= 3.0, < 3.4 ; python_version < '3.6' and python_version >= '2.7' # cryptography 3.4 drops support for python 2.7
+cryptography >= 3.3, < 3.4 ; python_version >= '2.7' and python_version < '3.9' # FIXME: the upper limit is needed for RHEL8.2, CentOS 8, Ubuntu 18.04, and OpenSuSE 15
+deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3
+jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
+urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
+pywinrm >= 0.3.0 # message encryption support
+sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
+sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
+pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers
+wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
+yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
+pycrypto >= 2.6 # Need features found in 2.6 and greater
+ncclient >= 0.5.2 # Need features added in 0.5.2 and greater
+idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead
+paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
+python-nomad < 2.0.0 ; python_version <= '3.7' # python-nomad 2.0.0 needs Python 3.7+
+pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6
+pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
+pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later
+pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+)
+ntlm-auth >= 1.3.0 # message encryption support using cryptography
+requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
+requests < 2.28 ; python_version >= '2.7' and python_version < '3.7' # requests 2.28.0 drops support for python 3.6 and before
+requests-ntlm >= 1.1.0 # message encryption support
+requests-credssp >= 0.1.0 # message encryption support
+voluptuous >= 0.11.0 # Schema recursion via Self
+openshift >= 0.6.2, < 0.9.0 # merge_type support
+virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
+pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later
+pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later
+pyopenssl < 22.0.0 ; python_version >= '2.7' and python_version < '3.6' # pyOpenSSL 22.0.0 and later require python 3.6 or later
+pyfmg == 0.6.1 # newer versions do not pass current unit tests
+pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
+pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later
+mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
+pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
+xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later
+lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
+pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later
+pyone == 1.1.9 # newer versions do not pass current integration tests
+boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support
+botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support
+botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca
+setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later
+cffi >= 1.14.2, != 1.14.3 # Yanked version which older versions of pip will still install:
+redis == 2.10.6 ; python_version < '2.7'
+redis < 4.0.0 ; python_version >= '2.7' and python_version < '3.6'
+redis ; python_version >= '3.6'
+pycdlib < 1.13.0 ; python_version < '3' # 1.13.0 does not work with Python 2, while not declaring that
+python-daemon <= 2.3.0 ; python_version < '3'
+bcrypt < 4.0.0 # TEMP: restrict to < 4.0.0 since installing 4.0.0 fails on RHEL 8
diff --git a/ansible_collections/community/general/tests/utils/shippable/aix.sh b/ansible_collections/community/general/tests/utils/shippable/aix.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/aix.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/alpine.sh b/ansible_collections/community/general/tests/utils/shippable/alpine.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/alpine.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/fedora.sh b/ansible_collections/community/general/tests/utils/shippable/fedora.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/fedora.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/freebsd.sh b/ansible_collections/community/general/tests/utils/shippable/freebsd.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/freebsd.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/generic.sh b/ansible_collections/community/general/tests/utils/shippable/generic.sh
new file mode 100755
index 000000000..5fd1fb55a
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/generic.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+python="${args[1]}"
+group="${args[2]}"
+
+target="azp/generic/${group}/"
+
+stage="${S:-prod}"
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote-terminate always --remote-stage "${stage}" \
+ --docker --python "${python}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/linux-community.sh b/ansible_collections/community/general/tests/utils/shippable/linux-community.sh
new file mode 100755
index 000000000..48d0d8687
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/linux-community.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+python="${args[2]}"
+
+if [ "${#args[@]}" -gt 3 ]; then
+ target="azp/posix/${args[3]}/"
+else
+ target="azp/posix/"
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "quay.io/ansible-community/test-image:${image}" --python "${python}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/linux.sh b/ansible_collections/community/general/tests/utils/shippable/linux.sh
new file mode 100755
index 000000000..6e1e2350b
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/linux.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "${image}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/macos.sh b/ansible_collections/community/general/tests/utils/shippable/macos.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/macos.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/osx.sh b/ansible_collections/community/general/tests/utils/shippable/osx.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/osx.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/remote.sh b/ansible_collections/community/general/tests/utils/shippable/remote.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/remote.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/rhel.sh b/ansible_collections/community/general/tests/utils/shippable/rhel.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/rhel.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/sanity.sh b/ansible_collections/community/general/tests/utils/shippable/sanity.sh
new file mode 100755
index 000000000..5b88a2677
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/sanity.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+group="${args[1]}"
+
+if [ "${BASE_BRANCH:-}" ]; then
+ base_branch="origin/${BASE_BRANCH}"
+else
+ base_branch=""
+fi
+
+if [ "${group}" == "extra" ]; then
+ ../internal_test_tools/tools/run.py --color --bot --junit
+ exit
+fi
+
+case "${group}" in
+ 1) options=(--skip-test pylint --skip-test ansible-doc --skip-test validate-modules) ;;
+ 2) options=( --test ansible-doc --test validate-modules) ;;
+ 3) options=(--test pylint plugins/modules/) ;;
+ 4) options=(--test pylint --exclude plugins/modules/) ;;
+esac
+
+# allow collection migration sanity tests for groups 3 and 4 to pass without updating this script during migration
+network_path="lib/ansible/modules/network/"
+
+if [ -d "${network_path}" ]; then
+ if [ "${group}" -eq 3 ]; then
+ options+=(--exclude "${network_path}")
+ elif [ "${group}" -eq 4 ]; then
+ options+=("${network_path}")
+ fi
+fi
+
+# shellcheck disable=SC2086
+ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ --docker --base-branch "${base_branch}" \
+ "${options[@]}" --allow-disabled
diff --git a/ansible_collections/community/general/tests/utils/shippable/shippable.sh b/ansible_collections/community/general/tests/utils/shippable/shippable.sh
new file mode 100755
index 000000000..e98680438
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/shippable.sh
@@ -0,0 +1,232 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+ansible_version="${args[0]}"
+script="${args[1]}"
+
+function join {
+ local IFS="$1";
+ shift;
+ echo "$*";
+}
+
+# Ensure we can write other collections to this dir
+sudo chown "$(whoami)" "${PWD}/../../"
+
+test="$(join / "${args[@]:1}")"
+
+docker images ansible/ansible
+docker images quay.io/ansible/*
+docker ps
+
+for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v -e '^drydock/' -e '^quay.io/ansible/azure-pipelines-test-container:' | sed 's/^.* //'); do
+ docker rm -f "${container}" || true # ignore errors
+done
+
+docker ps
+
+if [ -d /home/shippable/cache/ ]; then
+ ls -la /home/shippable/cache/
+fi
+
+command -v python
+python -V
+
+function retry
+{
+ # shellcheck disable=SC2034
+ for repetition in 1 2 3; do
+ set +e
+ "$@"
+ result=$?
+ set -e
+ if [ ${result} == 0 ]; then
+ return ${result}
+ fi
+ echo "@* -> ${result}"
+ done
+ echo "Command '@*' failed 3 times!"
+ exit 255
+}
+
+command -v pip
+pip --version
+pip list --disable-pip-version-check
+if [ "${ansible_version}" == "devel" ]; then
+ retry pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+else
+ retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check
+fi
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then
+ export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible"
+ SHIPPABLE_RESULT_DIR="$(pwd)/shippable"
+ TEST_DIR="${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/general"
+ mkdir -p "${TEST_DIR}"
+ cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}"
+ cd "${TEST_DIR}"
+else
+ export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../"
+fi
+
+if [ "${test}" == "sanity/extra" ]; then
+ retry pip install junit-xml --disable-pip-version-check
+fi
+
+# START: HACK install dependencies
+if [ "${script}" != "sanity" ] || [ "${test}" == "sanity/extra" ]; then
+ # Nothing further should be added to this list.
+ # This is to prevent modules or plugins in this collection having a runtime dependency on other collections.
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools"
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/docker"
+ # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429)
+ # retry ansible-galaxy -vvv collection install community.internal_test_tools
+fi
+
+if [ "${script}" != "sanity" ] && [ "${script}" != "units" ] && [ "${test}" != "sanity/extra" ]; then
+ # To prevent Python dependencies on other collections only install other collections for integration tests
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/ansible/posix"
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto"
+ # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429)
+ # retry ansible-galaxy -vvv collection install ansible.posix
+ # retry ansible-galaxy -vvv collection install community.crypto
+fi
+
+# END: HACK
+
+export PYTHONIOENCODING='utf-8'
+
+if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then
+ COVERAGE=yes
+ COMPLETE=yes
+fi
+
+if [ -n "${COVERAGE:-}" ]; then
+ # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value
+ export COVERAGE="--coverage"
+elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then
+ # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message
+ export COVERAGE="--coverage"
+else
+ # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled)
+ export COVERAGE="--coverage-check"
+fi
+
+if [ -n "${COMPLETE:-}" ]; then
+ # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value
+ export CHANGED=""
+elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then
+ # disable change detection triggered by having 'ci_complete' in the latest commit message
+ export CHANGED=""
+else
+ # enable change detection (default behavior)
+ export CHANGED="--changed"
+fi
+
+if [ "${IS_PULL_REQUEST:-}" == "true" ]; then
+ # run unstable tests which are targeted by focused changes on PRs
+ export UNSTABLE="--allow-unstable-changed"
+else
+ # do not run unstable tests outside PRs
+ export UNSTABLE=""
+fi
+
+# remove empty core/extras module directories from PRs created prior to the repo-merge
+find plugins -type d -empty -print -delete
+
+function cleanup
+{
+ # for complete on-demand coverage generate a report for all files with no coverage on the "sanity/5" job so we only have one copy
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/5" ]; then
+ stub="--stub"
+ # trigger coverage reporting for stubs even if no other coverage data exists
+ mkdir -p tests/output/coverage/
+ else
+ stub=""
+ fi
+
+ if [ -d tests/output/coverage/ ]; then
+ if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
+ process_coverage='yes' # process existing coverage files
+ elif [ "${stub}" ]; then
+ process_coverage='yes' # process coverage when stubs are enabled
+ else
+ process_coverage=''
+ fi
+
+ if [ "${process_coverage}" ]; then
+ # use python 3.7 for coverage to avoid running out of memory during coverage xml processing
+ # only use it for coverage to avoid the additional overhead of setting up a virtual environment for a potential no-op job
+ virtualenv --python /usr/bin/python3.7 ~/ansible-venv
+ set +ux
+ . ~/ansible-venv/bin/activate
+ set -ux
+
+ # shellcheck disable=SC2086
+ ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"}
+ cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/"
+
+ if [ "${ansible_version}" != "2.9" ]; then
+ # analyze and capture code coverage aggregated by integration test target
+ ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
+ fi
+
+ # upload coverage report to codecov.io only when using complete on-demand coverage
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then
+ for file in tests/output/reports/coverage=*.xml; do
+ flags="${file##*/coverage=}"
+ flags="${flags%-powershell.xml}"
+ flags="${flags%.xml}"
+ # remove numbered component from stub files when converting to tags
+ flags="${flags//stub-[0-9]*/stub}"
+ flags="${flags//=/,}"
+ flags="${flags//[^a-zA-Z0-9_,]/_}"
+
+ bash <(curl -s https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh) \
+ -f "${file}" \
+ -F "${flags}" \
+ -n "${test}" \
+ -t 20636cf5-4d6a-4b9a-8d2d-6f22ebbaa752 \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+ done
+ fi
+ fi
+ fi
+
+ if [ -d tests/output/junit/ ]; then
+ cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/data/ ]; then
+ cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/bot/ ]; then
+ cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+}
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then trap cleanup EXIT; fi
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=60
+else
+ timeout=50
+fi
+
+ansible-test env --dump --show --timeout "${timeout}" --color -v
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi
+"tests/utils/shippable/${script}.sh" "${test}" "${ansible_version}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/ubuntu.sh b/ansible_collections/community/general/tests/utils/shippable/ubuntu.sh
new file mode 100755
index 000000000..84c1ebbe0
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/ubuntu.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/posix/${args[2]}/"
+else
+ target="azp/posix/"
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8 ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}"
diff --git a/ansible_collections/community/general/tests/utils/shippable/units.sh b/ansible_collections/community/general/tests/utils/shippable/units.sh
new file mode 100755
index 000000000..f591ec25a
--- /dev/null
+++ b/ansible_collections/community/general/tests/utils/shippable/units.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+version="${args[1]}"
+group="${args[2]}"
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=90
+else
+ timeout=30
+fi
+
+group1=()
+
+case "${group}" in
+ 1) options=("${group1[@]:+${group1[@]}}") ;;
+esac
+
+ansible-test env --timeout "${timeout}" --color -v
+
+if [ "$2" == "2.9" ]; then
+ # 1.5.0+ will not install for Python 3.6+ in the 2.9 setting (due to `enum` being installed)
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version >= '3.6'" >> tests/unit/requirements.txt
+fi
+
+if [ "$2" == "2.10" ]; then
+ sed -i -E 's/^redis($| .*)/redis < 4.1.0/g' tests/unit/requirements.txt
+ sed -i -E 's/^python-gitlab($| .*)/python-gitlab < 2.10.1 ; python_version >= '\'3.6\''/g' tests/unit/requirements.txt
+ echo "python-gitlab ; python_version < '3.6'" >> tests/unit/requirements.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test units --color -v --docker default --python "${version}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ "${options[@]:+${options[@]}}" \